text stringlengths 8 6.05M |
|---|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import shlex
from dataclasses import dataclass
from pathlib import Path
from typing import Tuple
from pants.core.util_rules import external_tool
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import EMPTY_DIGEST, Digest
from pants.engine.internals.selectors import Get
from pants.engine.platform import Platform
from pants.engine.process import Process
from pants.engine.rules import collect_rules, rule
from pants.option.option_types import ArgsListOption, BoolOption, StrListOption
from pants.util.logging import LogLevel
from pants.util.meta import classproperty
from pants.util.strutil import softwrap
class TerraformTool(TemplatedExternalTool):
options_scope = "download-terraform"
name = "terraform"
help = "Terraform (https://terraform.io)"
# TODO: Possibly there should not be a default version, since terraform state is sensitive to
# the version that created it, so you have to be deliberate about the version you select.
default_version = "1.4.6"
default_url_template = (
"https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{platform}.zip"
)
default_url_platform_mapping = {
"macos_arm64": "darwin_arm64",
"macos_x86_64": "darwin_amd64",
"linux_x86_64": "linux_amd64",
"linux_arm64": "linux_arm64",
}
@classproperty
def default_known_versions(cls):
return [
"1.4.6|macos_x86_64|5d8332994b86411b049391d31ad1a0785dfb470db8b9c50617de28ddb5d1f25d|22051279",
"1.4.6|macos_arm64|30a2f87298ff9f299452119bd14afaa8d5b000c572f62fa64baf432e35d9dec1|20613318",
"1.4.6|linux_x86_64|e079db1a8945e39b1f8ba4e513946b3ab9f32bd5a2bdf19b9b186d22c5a3d53b|20779821",
"1.4.6|linux_arm64|b38f5db944ac4942f11ceea465a91e365b0636febd9998c110fbbe95d61c3b26|18834675",
"1.4.5|macos_x86_64|808e54d826737e9a0ca79bbe29330e50d3622bbeeb26066c63b371a291731711|22031074",
"1.4.5|macos_arm64|7104d9d13632aa61b494a349c589048d21bd550e579404c3a41c4932e4d6aa97|20592841",
"1.4.5|linux_x86_64|ce10e941cd11554b15a189cd00191c05abc20dff865599d361bdb863c5f406a9|20767621",
"1.4.5|linux_arm64|ca2c48f518f72fef668255150cc5e63b92545edc62a05939bbff8a350bceb357|18813058",
"1.4.4|macos_x86_64|0303ed9d7e5a225fc2e6fa9bf76fc6574c0c0359f22d5dfc04bc8b3234444f7c|22032187",
"1.4.4|macos_arm64|75602d9ec491982ceabea813569579b2991093a4e0d76b7ca86ffd9b7a2a1d1e|20594012",
"1.4.4|linux_x86_64|67541c1f6631befcc25b764028e5605e59234d4424e60a256518ee1e8dd50593|20767354",
"1.4.4|linux_arm64|f0b4e092f2aa6de3324e5e4b5b51260ecf5e8c2f5335ff7a2ffdc4fb54a8922d|18814310",
"1.4.3|macos_x86_64|89bdb242bfacf24167f365ef7a3bf0ad0e443ddd27ebde425fb71d77ce1a2597|22032267",
"1.4.3|macos_arm64|20b9d484bf99ada6c0de89316176ba33f7c87f64c0738991188465147bba221b|20574247",
"1.4.3|linux_x86_64|2252ee6ac8437b93db2b2ba341edc87951e2916afaeb50a88b858e80796e9111|20781685",
"1.4.3|linux_arm64|d3d9464953d390970e7f4f7cbcd94dbf63136da6fe1cbb4955d944a9315bdcdb|18814307",
"1.4.2|macos_x86_64|c218a6c0ef6692b25af16995c8c7bdf6739e9638fef9235c6aced3cd84afaf66|22030042",
"1.4.2|macos_arm64|af8ff7576c8fc41496fdf97e9199b00d8d81729a6a0e821eaf4dfd08aa763540|20588400",
"1.4.2|linux_x86_64|9f3ca33d04f5335472829d1df7785115b60176d610ae6f1583343b0a2221a931|20234129",
"1.4.2|linux_arm64|39c182670c4e63e918e0a16080b1cc47bb16e158d7da96333d682d6a9cb8eb91|18206088",
"1.4.1|macos_x86_64|96466364a7e66e3d456ecb6c85a63c83e124c004f8835fb8ea9b7bbb7542a9d0|22077050",
"1.4.1|macos_arm64|61f76e130b97c8a9017d8aaff15d252af29117e35ea1a0fc30bcaab7ceafce73|20634145",
"1.4.1|linux_x86_64|9e9f3e6752168dea8ecb3643ea9c18c65d5a52acc06c22453ebc4e3fc2d34421|20276168",
"1.4.1|linux_arm64|53322cc70b6e50ac1985bf26a78ffa2814789a4704880f071eaf3e67a463d6f6|18248378",
"1.4.0|macos_x86_64|e897a4217f1c3bfe37c694570dcc6371336fbda698790bb6b0547ec8daf1ffb3|21935694",
"1.4.0|macos_arm64|d4a1e564714c6acf848e86dc020ff182477b49f932e3f550a5d9c8f5da7636fb|20508091",
"1.4.0|linux_x86_64|5da60da508d6d1941ffa8b9216147456a16bbff6db7622ae9ad01d314cbdd188|20144407",
"1.4.0|linux_arm64|33e0f4f0b75f507fc19012111de008308df343153cd6a3992507f4566c0bb723|18130960",
"1.3.9|macos_x86_64|a73326ea8fb06f6976597e005f8047cbd55ac76ed1e517303d8f6395db6c7805|21194871",
"1.3.9|macos_arm64|d8a59a794a7f99b484a07a0ed2aa6520921d146ac5a7f4b1b806dcf5c4af0525|19793371",
"1.3.9|linux_x86_64|53048fa573effdd8f2a59b726234c6f450491fe0ded6931e9f4c6e3df6eece56|19477757",
"1.3.9|linux_arm64|da571087268c5faf884912c4239c6b9c8e1ed8e8401ab1dcb45712df70f42f1b|17513770",
"1.3.8|macos_x86_64|1a27a6fac31ecb05de610daf61a29fe83d304d7c519d773afbf56c11c3b6276b|21189878",
"1.3.8|macos_arm64|873b05ac81645cd7289d6ccfd3e73d4735af1a453f2cd19da0650bdabf7d2eb6|19780134",
"1.3.8|linux_x86_64|9d9e7d6a9b41cef8b837af688441d4fbbd84b503d24061d078ad662441c70240|19479266",
"1.3.8|linux_arm64|a42bf3c7d6327f45d2b212b692ab4229285fb44dbb8adb7c39e18be2b26167c8|17507360",
"1.3.7|macos_x86_64|eeae48adcd55212b34148ed203dd5843e9b2a84a852a9877f3386fadb0514980|21185288",
"1.3.7|macos_arm64|01d553db5f7b4cf0729b725e4402643efde5884b1dabf5eb80af328ce5e447cf|19774151",
"1.3.7|linux_x86_64|b8cf184dee15dfa89713fe56085313ab23db22e17284a9a27c0999c67ce3021e|19464102",
"1.3.7|linux_arm64|5b491c555ea8a62dda551675fd9f27d369f5cdbe87608d2a7367d3da2d38ea38|17499971",
"1.3.6|macos_x86_64|13881fe0100238577394243a90c0631783aad21b77a9a7ee830404f86c0d37bb|21183111",
"1.3.6|macos_arm64|dbff0aeeaeee877c254f5414bef5c9d186e159aa0019223aac678abad9442c53|19779986",
"1.3.6|linux_x86_64|bb44a4c2b0a832d49253b9034d8ccbd34f9feeb26eda71c665f6e7fa0861f49b|19466755",
"1.3.6|linux_arm64|f4b1af29094290f1b3935c29033c4e5291664ee2c015ca251a020dd425c847c3|17501845",
"1.3.5|macos_x86_64|e6c9836188265b20c2588e9c9d6b1727094b324a379337e68ba58a6d26be8b51|21182319",
"1.3.5|macos_arm64|fcec1cbff229fbe59b03257ba2451d5ad1f5129714f08ccf6372b2737647c063|19780547",
"1.3.5|linux_x86_64|ac28037216c3bc41de2c22724e863d883320a770056969b8d211ca8af3d477cf|19469337",
"1.3.5|linux_arm64|ba5b1761046b899197bbfce3ad9b448d14550106d2cc37c52a60fc6822b584ed|17502759",
"1.3.4|macos_x86_64|2a75c69ec5ed8506658b266a40075256b62a7d245ff6297df7e48fa72af23879|21181585",
"1.3.4|macos_arm64|a1f740f92afac6db84421a3ec07d9061c34a32f88b4b0b47d243de16c961169f|19773343",
"1.3.4|linux_x86_64|b24210f28191fa2a08efe69f54e3db2e87a63369ac4f5dcaf9f34dc9318eb1a8|19462529",
"1.3.4|linux_arm64|65381c6b61b2d1a98892199f649a5764ff5a772080a73d70f8663245e6402c39|17494667",
"1.3.3|macos_x86_64|2b3cf653cd106becdea562b6c8d3f8939641e5626c5278729cbef81678fa9f42|21163874",
"1.3.3|macos_arm64|51e94ecf88059e8a53c363a048b658230f560574f99b0d8396ebacead894d159|19755200",
"1.3.3|linux_x86_64|fa5cbf4274c67f2937cabf1a6544529d35d0b8b729ce814b40d0611fd26193c1|19451941",
"1.3.3|linux_arm64|b940a080c698564df5e6a2f1c4e1b51b2c70a5115358d2361e3697d3985ecbfe|17488660",
"1.3.2|macos_x86_64|3639461bbc712dc130913bbe632afb449fce8c0df692429d311e7cb808601901|21163990",
"1.3.2|macos_arm64|80480acbfee2e2d0b094f721f7568a40b790603080d6612e19b797a16b8ba82d|19757201",
"1.3.2|linux_x86_64|6372e02a7f04bef9dac4a7a12f4580a0ad96a37b5997e80738e070be330cb11c|19451510",
"1.3.2|linux_arm64|ce1a8770aaf27736a3352c5c31e95fb10d0944729b9d81013bf6848f8657da5f|17485206",
"1.3.1|macos_x86_64|4282ebe6d1d72ace0d93e8a4bcf9a6f3aceac107966216355bb516b1c49cc203|21161667",
"1.3.1|macos_arm64|f0514f29b08da2f39ba4fff0d7eb40093915c9c69ddc700b6f39b78275207d96|19756039",
"1.3.1|linux_x86_64|0847b14917536600ba743a759401c45196bf89937b51dd863152137f32791899|19450765",
"1.3.1|linux_arm64|7ebb3d1ff94017fbef8acd0193e0bd29dec1a8925e2b573c05a92fdb743d1d5b|17486534",
"1.3.0|macos_x86_64|80e55182d4495da867c93c25dc6ae29be83ece39d3225e6adedecd55b72d6bbf|21163947",
"1.3.0|macos_arm64|df703317b5c7f80dc7c61e46de4697c9f440e650a893623351ab5e184995b404|19741011",
"1.3.0|linux_x86_64|380ca822883176af928c80e5771d1c0ac9d69b13c6d746e6202482aedde7d457|19450952",
"1.3.0|linux_arm64|0a15de6f934cf2217e5055412e7600d342b4f7dcc133564690776fece6213a9a|17488551",
"1.2.9|macos_x86_64|84a678ece9929cebc34c7a9a1ba287c8b91820b336f4af8437af7feaa0117b7c|21672810",
"1.2.9|macos_arm64|bc3b94b53cdf1be3c4988faa61aad343f48e013928c64bfc6ebeb61657f97baa|20280541",
"1.2.9|linux_x86_64|0e0fc38641addac17103122e1953a9afad764a90e74daf4ff8ceeba4e362f2fb|19906116",
"1.2.9|linux_arm64|6da7bf01f5a72e61255c2d80eddeba51998e2bb1f50a6d81b0d3b71e70e18531|17946045",
"1.2.8|macos_x86_64|efd3e21a9bb1cfa68303f8d119ea8970dbb616f5f99caa0fe21d796e0cd70252|21678594",
"1.2.8|macos_arm64|2c83bfea9e1c202c449e91bee06a804afb45cb8ba64a73da48fb0f61df51b327|20277152",
"1.2.8|linux_x86_64|3e9c46d6f37338e90d5018c156d89961b0ffb0f355249679593aff99f9abe2a2|19907515",
"1.2.8|linux_arm64|26c05cadb05cdaa8ac64b90b982b4e9350715ec2e9995a6b03bb964d230de055|17947439",
"1.2.7|macos_x86_64|74e47b54ea78685be24c84e0e17b22b56220afcdb24ec853514b3863199f01e4|21673162",
"1.2.7|macos_arm64|ec4e623914b411f8cc93a1e71396a1e7f1fe1e96bb2e532ba3e955d2ca5cc442|20278743",
"1.2.7|linux_x86_64|dfd7c44a5b6832d62860a01095a15b53616fb3ea4441ab89542f9364e3fca718|19907183",
"1.2.7|linux_arm64|80d064008d57ba5dc97e189215c87275bf39ca14b1234430eae2f114394ea229|17943724",
"1.2.6|macos_x86_64|d896d2776af8b06cd4acd695ad75913040ce31234f5948688fd3c3fde53b1f75|21670957",
"1.2.6|macos_arm64|c88ceb34f343a2bb86960e32925c5ec43b41922ee9ede1019c5cf7d7b4097718|20279669",
"1.2.6|linux_x86_64|9fd445e7a191317dcfc99d012ab632f2cc01f12af14a44dfbaba82e0f9680365|19905977",
"1.2.6|linux_arm64|322755d11f0da11169cdb234af74ada5599046c698dccc125859505f85da2a20|17943213",
"1.2.5|macos_x86_64|2520fde736b43332b0c2648f4f6dde407335f322a3085114dc4f70e6e50eadc0|21659883",
"1.2.5|macos_arm64|92ad40db4a0930bdf872d6336a7b3a18b17c6fd04d9fc769b554bf51c8add505|20266441",
"1.2.5|linux_x86_64|281344ed7e2b49b3d6af300b1fe310beed8778c56f3563c4d60e5541c0978f1b|19897064",
"1.2.5|linux_arm64|0544420eb29b792444014988018ae77a7c8df6b23d84983728695ba73e38f54a|17938208",
"1.2.4|macos_x86_64|e7d2c66264a3da94854ae6ff692bbb9a1bc11c36bb5658e3ef19841388a07430|21658356",
"1.2.4|macos_arm64|c31754ff5553707ef9fd2f913b833c779ab05ce192eb14913f51816a077c6798|20263133",
"1.2.4|linux_x86_64|705ea62a44a0081594dad6b2b093eefefb12d54fa5a20a66562f9e082b00414c|19895510",
"1.2.4|linux_arm64|11cfa2233dc708b51b16d5b923379db67e35c22b1b988773e5b31a7c2e251471|17936883",
"1.2.3|macos_x86_64|bdc22658463237530dc120dadb0221762d9fb9116e7a6e0dc063d8ae649c431e|21658937",
"1.2.3|macos_arm64|6f06debac2ac54951464bf490e1606f973ab53ad8ba5decea76646e8f9309512|20256836",
"1.2.3|linux_x86_64|728b6fbcb288ad1b7b6590585410a98d3b7e05efe4601ef776c37e15e9a83a96|19891436",
"1.2.3|linux_arm64|a48991e938a25bfe5d257f4b6cbbdc73d920cc34bbc8f0e685e28b9610ad75fe|17933271",
"1.2.2|macos_x86_64|1d22663c1ab22ecea774ae63aee21eecfee0bbc23b953206d889a5ba3c08525a|21656824",
"1.2.2|macos_arm64|b87716b55a3b10cced60db5285bae57aee9cc0f81c555dccdc4f54f62c2a3b60|20254768",
"1.2.2|linux_x86_64|2934a0e8824925beb956b2edb5fef212a6141c089d29d8568150a43f95b3a626|19889133",
"1.2.2|linux_arm64|9c6202237d7477412054dcd36fdc269da9ee66ecbc45bb07d0d63b7d36af7b21|17932829",
"1.2.1|macos_x86_64|31c0fd4deb7c6a77c08d2fdf59c37950e6df7165088c004e1dd7f5e09fbf6307|21645582",
"1.2.1|macos_arm64|70159b3e3eb49ee71193815943d9217c59203fd4ee8c6960aeded744094a2250|20253448",
"1.2.1|linux_x86_64|8cf8eb7ed2d95a4213fbfd0459ab303f890e79220196d1c4aae9ecf22547302e|19881618",
"1.2.1|linux_arm64|972ea512dac822274791dedceb6e7f8b9ac2ed36bd7759269b6806d0ab049128|17922073",
"1.2.0|macos_x86_64|1b102ba3bf0c60ff6cbee74f721bf8105793c1107a1c6d03dcab98d7079f0c77|21645732",
"1.2.0|macos_arm64|f5e46cabe5889b60597f0e9c365cbc663e4c952c90a16c10489897c2075ae4f0|20253335",
"1.2.0|linux_x86_64|b87de03adbdfdff3c2552c8c8377552d0eecd787154465100cf4e29de4a7be1f|19880608",
"1.2.0|linux_arm64|ee80b8635d8fdbaed57beffe281cf87b8b1fd1ddb29c08d20e25a152d9f0f871|17920355",
"1.1.9|macos_x86_64|685258b525eae94fb0b406faf661aa056d31666256bf28e625365a251cb89fdc|20850638",
"1.1.9|macos_arm64|39fac4be74462be86b2290dd09fe1092f73dfb48e2df92406af0e199cfa6a16c|20093184",
"1.1.9|linux_x86_64|9d2d8a89f5cc8bc1c06cb6f34ce76ec4b99184b07eb776f8b39183b513d7798a|19262029",
"1.1.9|linux_arm64|e8a09d1fe5a68ed75e5fabe26c609ad12a7e459002dea6543f1084993b87a266|17521011",
"1.1.8|macos_x86_64|48f1f1e04d0aa8f5f1a661de95e3c2b8fd8ab16b3d44015372aff7693d36c2cf|20354970",
"1.1.8|macos_arm64|943e1948c4eae82cf8b490bb274939fe666252bbc146f098e7da65b23416264a|19631574",
"1.1.8|linux_x86_64|fbd37c1ec3d163f493075aa0fa85147e7e3f88dd98760ee7af7499783454f4c5|18796132",
"1.1.8|linux_arm64|10b2c063dcff91329ee44bce9d71872825566b713308b3da1e5768c6998fb84f|17107405",
"1.1.7|macos_x86_64|6e56eea328683541f6de0d5f449251a974d173e6d8161530956a20d9c239731a|20351873",
"1.1.7|macos_arm64|8919ceee34f6bfb16a6e9ff61c95f4043c35c6d70b21de27e5a153c19c7eba9c|19625836",
"1.1.7|linux_x86_64|e4add092a54ff6febd3325d1e0c109c9e590dc6c38f8bb7f9632e4e6bcca99d4|18795309",
"1.1.7|linux_arm64|2f72982008c52d2d57294ea50794d7c6ae45d2948e08598bfec3e492bce8d96e|17109768",
"1.1.6|macos_x86_64|7a499c1f08d89548ae4c0e829eea43845fa1bd7b464e7df46102b35e6081fe44|20303856",
"1.1.6|macos_arm64|f06a14fdb610ec5a7f18bdbb2f67187230eb418329756732d970b6ca3dae12c3|19577273",
"1.1.6|linux_x86_64|3e330ce4c8c0434cdd79fe04ed6f6e28e72db44c47ae50d01c342c8a2b05d331|18751464",
"1.1.6|linux_arm64|a53fb63625af3572f7252b9fb61d787ab153132a8984b12f4bb84b8ee408ec53|17069580",
"1.1.5|macos_x86_64|dcf7133ebf61d195e432ddcb70e604bf45056163d960e991881efbecdbd7892b|20300006",
"1.1.5|macos_arm64|6e5a8d22343722dc8bfcf1d2fd7b742f5b46287f87171e8143fc9b87db32c3d4|19581167",
"1.1.5|linux_x86_64|30942d5055c7151f051c8ea75481ff1dc95b2c4409dbb50196419c21168d6467|18748879",
"1.1.5|linux_arm64|2fb6324c24c14523ae63cedcbc94a8e6c1c317987eced0abfca2f6218d217ca5|17069683",
"1.1.4|macos_x86_64|4f3bc78fedd4aa17f67acc0db4eafdb6d70ba72392aaba65fe72855520f11f3d|20242050",
"1.1.4|macos_arm64|5642b46e9c7fb692f05eba998cd4065fb2e48aa8b0aac9d2a116472fbabe34a1|19498408",
"1.1.4|linux_x86_64|fca028d622f82788fdc35c1349e78d69ff07c7bb68c27d12f8b48c420e3ecdfb|18695508",
"1.1.4|linux_arm64|3c1982cf0d16276c82960db60c998d79ba19e413af4fa2c7f6f86e4994379437|16996040",
"1.1.3|macos_x86_64|016bab760c96d4e64d2140a5f25c614ccc13c3fe9b3889e70c564bd02099259f|20241648",
"1.1.3|macos_arm64|02ba769bb0a8d4bc50ff60989b0f201ce54fd2afac2fb3544a0791aca5d3f6d5|19493636",
"1.1.3|linux_x86_64|b215de2a18947fff41803716b1829a3c462c4f009b687c2cbdb52ceb51157c2f|18692580",
"1.1.3|linux_arm64|ad5a1f2c132bedc5105e3f9900e4fe46858d582c0f2a2d74355da718bbcef65d|16996972",
"1.1.2|macos_x86_64|78faa76db5dc0ecfe4bf7c6368dbf5cca019a806f9d203580a24a4e0f8cd8353|20240584",
"1.1.2|macos_arm64|cc3bd03b72db6247c9105edfeb9c8f674cf603e08259075143ffad66f5c25a07|19486800",
"1.1.2|linux_x86_64|734efa82e2d0d3df8f239ce17f7370dabd38e535d21e64d35c73e45f35dfa95c|18687805",
"1.1.2|linux_arm64|088e2226d1ddb7f68a4f65c704022a1cfdbf20fe40f02e0c3646942f211fd746|16994702",
"1.1.1|macos_x86_64|d125dd2e92b9245f2202199b52f234035f36bdcbcd9a06f08e647e14a9d9067a|20237718",
"1.1.1|macos_arm64|4cb6e5eb4f6036924caf934c509a1dfd61cd2c651bb3ee8fbfe2e2914dd9ed17|19488315",
"1.1.1|linux_x86_64|07b8dc444540918597a60db9351af861335c3941f28ea8774e168db97dd74557|18687006",
"1.1.1|linux_arm64|d6fd14da47af9ec5fa3ad5962eaef8eed6ff2f8a5041671f9c90ec5f4f8bb554|16995635",
"1.1.0|macos_x86_64|6e0ba9afb8795a544e70dc0459f0095fea7df15e38f5d88a7dd3f620d50f8bfe|20226329",
"1.1.0|macos_arm64|7955e173c7eadb87123fc0633c3ee67d5ba3b7d6c7f485fe803efed9f99dce54|19491369",
"1.1.0|linux_x86_64|763378aa75500ce5ba67d0cba8aa605670cd28bf8bafc709333a30908441acb5|18683106",
"1.1.0|linux_arm64|6697e9a263e264310373f3c91bf83f4cbfeb67b13994d2a8f7bcc492b554552e|16987201",
"1.0.11|macos_x86_64|551a16b612edaae1037925d0e2dba30d16504ff4bd66606955172c2ed8d76131|19422757",
"1.0.11|macos_arm64|737e1765afbadb3d76e1929d4b4af8da55010839aa08e9e730d46791eb8ea5a6|18467868",
"1.0.11|linux_x86_64|eeb46091a42dc303c3a3c300640c7774ab25cbee5083dafa5fd83b54c8aca664|18082446",
"1.0.11|linux_arm64|30c650f4bc218659d43e07d911c00f08e420664a3d12c812228e66f666758645|16148492",
"1.0.10|macos_x86_64|077479e98701bc9be88db21abeec684286fd85a3463ce437d7739d2a4e372f18|33140832",
"1.0.10|macos_arm64|776f2e144039ece66ae326ebda0884254848a2e11f0590757d02e3a74f058c81|32013985",
"1.0.10|linux_x86_64|a221682fcc9cbd7fde22f305ead99b3ad49d8303f152e118edda086a2807716d|32674953",
"1.0.10|linux_arm64|b091dbe5c00785ae8b5cb64149d697d61adea75e495d9e3d910f61d8c9967226|30505040",
"1.0.9|macos_x86_64|be122ff7fb925643c5ebf4e5704b18426e18d3ca49ab59ae33d208c908cb6d5a|33140006",
"1.0.9|macos_arm64|89b2b4fd1a0c57fabc08ad3180ad148b1f7c1c0492ed865408f75f12e11a083b|32010657",
"1.0.9|linux_x86_64|f06ac64c6a14ed6a923d255788e4a5daefa2b50e35f32d7a3b5a2f9a5a91e255|32674820",
"1.0.9|linux_arm64|457ac590301126e7b151ea08c5b9586a882c60039a0605fb1e44b8d23d2624fd|30510941",
"1.0.8|macos_x86_64|909781ee76250cf7445f3b7d2b82c701688725fa1db3fb5543dfeed8c47b59de|33140123",
"1.0.8|macos_arm64|92fa31b93d736fab6f3d105beb502a9da908445ed942a3d46952eae88907c53e|32011344",
"1.0.8|linux_x86_64|a73459d406067ce40a46f026dce610740d368c3b4a3d96591b10c7a577984c2e|32681118",
"1.0.8|linux_arm64|01aaef769f4791f9b28530e750aadbc983a8eabd0d55909e26392b333a1a26e4|30515501",
"1.0.7|macos_x86_64|23b85d914465882b027d3819cc05cd114a1aaf39b550de742e81a99daa998183|33140742",
"1.0.7|macos_arm64|d9062959f28ba0f934bfe2b6e0b021e0c01a48fa065102554ca103b8274e8e0c|32012708",
"1.0.7|linux_x86_64|bc79e47649e2529049a356f9e60e06b47462bf6743534a10a4c16594f443be7b|32671441",
"1.0.7|linux_arm64|4e71a9e759578020750be41e945c086e387affb58568db6d259d80d123ac80d3|30529105",
"1.0.6|macos_x86_64|5ac4f41d5e28f31817927f2c5766c5d9b98b68d7b342e25b22d053f9ecd5a9f1|33141677",
"1.0.6|macos_arm64|613020f90a6a5d0b98ebeb4e7cdc4b392aa06ce738fbb700159a465cd27dcbfa|32024047",
"1.0.6|linux_x86_64|6a454323d252d34e928785a3b7c52bfaff1192f82685dfee4da1279bb700b733|32677516",
"1.0.6|linux_arm64|2047f8afc7d0d7b645a0422181ba3fe47b3547c4fe658f95eebeb872752ec129|30514636",
]
extra_env_vars = StrListOption(
help=softwrap(
"""
Additional environment variables that would be made available to all Terraform processes.
"""
),
advanced=True,
)
args = ArgsListOption(
example="-auto-approve",
passthrough=True,
extra_help=softwrap(
"""
Additional arguments to pass to the Terraform command line.
"""
),
)
tailor = BoolOption(
default=True,
help="If true, add `terraform_module` targets with the `tailor` goal.",
advanced=True,
)
@dataclass(frozen=True)
class TerraformProcess:
"""A request to invoke Terraform."""
args: tuple[str, ...]
description: str
input_digest: Digest = EMPTY_DIGEST
output_files: tuple[str, ...] = ()
output_directories: tuple[str, ...] = ()
chdir: str = "." # directory for terraform's `-chdir` argument
@rule
async def setup_terraform_process(
request: TerraformProcess, terraform: TerraformTool, platform: Platform
) -> Process:
downloaded_terraform = await Get(
DownloadedExternalTool,
ExternalToolRequest,
terraform.get_request(platform),
)
env = await Get(EnvironmentVars, EnvironmentVarsRequest(terraform.extra_env_vars))
immutable_input_digests = {"__terraform": downloaded_terraform.digest}
def prepend_paths(paths: Tuple[str, ...]) -> Tuple[str, ...]:
return tuple((Path(request.chdir) / path).as_posix() for path in paths)
return Process(
argv=("__terraform/terraform", f"-chdir={shlex.quote(request.chdir)}") + request.args,
input_digest=request.input_digest,
immutable_input_digests=immutable_input_digests,
output_files=prepend_paths(request.output_files),
output_directories=prepend_paths(request.output_directories),
env=env,
description=request.description,
level=LogLevel.DEBUG,
)
def rules():
return [*collect_rules(), *external_tool.rules()]
|
#coding=utf-8
#created by SamLee 2020/3/6
import os
import sys
import math
from elftools.elf.elffile import ELFFile
import zipfile
import tempfile
import shutil
import struct
import entropy
from concurrent.futures import ThreadPoolExecutor
import threading
from queue import Queue
import time
import multiprocessing
import importlib
import file_util
'''
文件使用rc4算法进行加密 rc4的key数据定义在rodata中
0:20*4 byte 数据映射的取值
208:208+33 apk的签名串 用于校验
208+33: 208+33+9*4 为key数据 分4段存储 需要合并处理
2020/4/26
jni注册使用的类名字符串常量 "com/uzmap/pkg/uzcore/external/Enslecb"
这个字符串常量之前的 9byte 1段的4段数据 还有33 byte的apk签名串
之前固定位置的方式对有些不适用
得到key数据在利用 [0:20]byte的索引数组取出20byte的key值
2020/6/5
从原先的tools.py 迁移到 uzm_util.py
'''
JNI_PACKAGE_BYTES = 'com/uzmap/pkg/uzcore/external/Enslecb'.encode('utf-8')
# pycryptodome rc4 implementation
CRYPTODOME_ARC4 = None
try:
CRYPTODOME_ARC4 = importlib.import_module('Crypto.Cipher.ARC4')
except:pass
def extractRC4Key(soFile):
global JNI_PACKAGE_BYTES
keyStr,keyIdx = None,None
if isinstance(soFile,str):
soFile = open(soFile,'rb') if os.path.exists(soFile) else None
with soFile as f:
elffile = ELFFile(f)
littleEndian = elffile.little_endian
dataSection,dataContent = elffile.get_section_by_name('.rodata'),None
if dataSection:
dataContent = dataSection.data()
if dataContent and dataContent.find(JNI_PACKAGE_BYTES)>=80+9*4:
pkgIdx = dataContent.find(JNI_PACKAGE_BYTES)
#little endian bytes
keyIdx = [struct.unpack('<I' if littleEndian else '>I',dataContent[i:i+4])[0] for i in range(0,20*4,4)]
keyStr = dataContent[pkgIdx-9*4:pkgIdx].replace(b'\x00',b'').decode('utf-8')
#print(keyIdx)
#print(keyStr)
return ''.join([keyStr[idx] for idx in keyIdx]) if keyStr else None
'''
#sample data for rc4 key data source (not the rc4 key itself)
preKeyIdx = [0x13,0x6,0x1f,0xa,0x8,0x12,0x3,0x16,0xb,0x0,0x12,0xc,0x19,0x6,0x12,0x9,0xe,0x2,0x17,0x1a]
rawKeyData = '988f520873542ac4a8df3cbfa8937024'
'''
def getPreKey(rawKey,keyIdxArr):
return ''.join([rawKey[idx] for idx in keyIdxArr])
def computeRC4KeyMap(rc4Key):
preKey = rc4Key
if rc4Key is None or isinstance(rc4Key,tuple):
preKey = getPreKey(rc4Key[0] if rc4Key else None,rc4Key[1] if rc4Key else None)
blockA = [ord(a) for a in ( preKey*(math.ceil(256/len(preKey))) )[0:256]]
blockB = [i for i in range(256)]
reg2 = 0
for i in range(256):
reg3 = blockB[i]
reg2 += blockA[i] + blockB[i]
reg6 = (((reg2>>0x1F) >> 0x18) + reg2) & 0xFFFFFF00
reg2 -= reg6
blockB[i] = blockB[reg2]
blockB[reg2] = reg3
return blockB
def decrypt(dataBytes,rc4Key):
global CRYPTODOME_ARC4
if CRYPTODOME_ARC4:
rc4 = CRYPTODOME_ARC4.new(rc4Key.encode('utf-8') if isinstance(rc4Key,type(' ')) else rc4Key)
return rc4.decrypt(dataBytes)
isBytes,isByteArray = isinstance(dataBytes,bytes),isinstance(dataBytes,bytearray)
decDataBytes = []
keyMap = computeRC4KeyMap(rc4Key)
R3,R4 = 0, 0
for i in range(len(dataBytes)):
R3 += 1
R5 = ((R3>>0x1f)>>0x18)
R6 = (R3 + R5 )& 0xFFFFFF00
R3 -= R6
R6 = keyMap[R3]
R4 = R4 + R6
R5 = (((R4>>0x1f)>>0x18) + R4) & 0xFFFFFF00
R4 = R4 - R5
keyMap[R3] = keyMap[R4]
keyMap[R4] = R6
R5 = (keyMap[R3] + R6) & 0xFF
org = dataBytes[i]
decDataBytes.append(org^keyMap[R5])
return bytes(bytearray(decDataBytes)) if isBytes else bytearray(decDataBytes) if isByteArray else decDataBytes
'''
只有 js html css config.xml key.xml 进行了加密 其他文件没有 不需要解密
'''
enc_exts = ['js','html','css']
def needDecryptFile(fileName):
global enc_exts
extIdx = fileName.rfind('.')
ext = fileName[extIdx+1:] if extIdx>-1 else None
return ext in enc_exts or 'config.xml' in fileName or 'key.xml' in fileName
def decryptSingleFile(targetFile,rc4Key,saveTo=None):
if not os.path.exists(targetFile):
return None
if not needDecryptFile(targetFile):
return None
decContent = None
with open(targetFile,'rb') as f:
decContent = decrypt(f.read(),rc4Key)
if saveTo:
with open(saveTo,'wb') as f:
f.write(decContent)
return decContent
def decryptResourceFiles(folder):
if not os.path.exists(folder):
return
targetFiles = []
if os.path.isdir(folder):
for root, dirs, files in os.walk(folder):
targetFiles.extend(['{}/{}'.format(root,f) for f in files])
else:
targetFiles.append(folder)
if targetFiles:
for tFile in targetFiles:
extIdx = tFile.rfind('.')
saveTo = '{}_decrypted.{}'.format(tFile[0:extIdx],tFile[extIdx+1:]) if extIdx>-1 else '{}_decrypted'.format(tFile)
if os.path.exists(saveTo):
continue
decryptResult = decryptSingleFile(tFile,saveTo)
if not decryptResult:
continue
print('decrypt:{} => {}'.format(tFile,saveTo))
#python3.7.0 zipfile '_SharedFile'.seek calls 'writing' method instead of '_writing'
def isBuggyZipfile():
return sys.version_info.major==3 and sys.version_info.minor==7 and sys.version_info.micro<1
def extractRC4KeyFromApk(apkFilePath):
if not os.path.exists(apkFilePath):
print('{} does not exists'.format(apkFilePath))
return None
with zipfile.ZipFile(apkFilePath) as apkFile:
apkResList = apkFile.namelist()
soFiles = []
for fname in apkResList:
if fname.startswith('lib/') and fname.endswith('libsec.so'):
with apkFile.open(fname) as soContent:
elfHeader = soContent.read(6)
#check elffile format(https://en.wikipedia.org/wiki/Executable_and_Linkable_Format)
if elfHeader[1]==ord('E') and elfHeader[2]==ord('L') and elfHeader[3]==ord('F'):
soFiles.append(fname)
if not soFiles:
print('libsec.so file not exists in apk file')
return None
for soFile in soFiles:
with apkFile.open(soFile,'r') as soContent:
soTmp = None
if not soContent.seekable() or isBuggyZipfile():
soTmp = tempfile.mkstemp('.tmp','tmp',os.path.dirname(os.path.abspath(apkFilePath)))
with open(soTmp[1],'wb') as soTmpC:
shutil.copyfileobj(soContent,soTmpC)
soContent.close()
soContent = open(soTmp[1],'rb')
rc4Key = extractRC4Key(soContent)
if soTmp:
os.close(soTmp[0])
os.remove(soTmp[1])
return rc4Key
return None
def iterateAllNeedDecryptAssets(apkFilePath):
if not os.path.exists(apkFilePath):
print('{} does not exists'.format(apkFilePath))
return
with zipfile.ZipFile(apkFilePath) as apkFile:
apkResList = apkFile.namelist()
for resName in apkResList:
if resName.startswith('assets/widget/'):
if needDecryptFile(resName):
yield resName,apkFile.open(resName)
def isResourceEncrypted(apkFilePath):
'''
可以通过判断 apk 中的类 compile.Properties.smode 的值 : true表示有加密 false表示未加密
但目前没办法直接通过解析 apk的字节码来判断对应类方法的返回值,所以先简单的从 assets/widget/config.xml 文件进行判断
app第一个需要解密的文件是config.xml,如果这个文件没有加密 则说明其它文件也一样没有加密 反之亦然
'''
if not os.path.exists(apkFilePath):
print('{} does not exists'.format(apkFilePath))
return False
confFile = 'assets/widget/config.xml'
rawXmlFileHead = '<?xml'.encode('utf-8')
with zipfile.ZipFile(apkFilePath) as apkFile:
confFileBytes = None
try:
confFileBytes = apkFile.open(confFile).read()
except:
pass
if not confFileBytes:
print('{} does not exists in apk'.format(confFile))
return False
return confFileBytes.find(rawXmlFileHead) == -1
'''
判断熵的大小 一般加密的文件熵都超过0.9
(媒体文件除外,媒体文件的熵一般都在0.8-1之间)
'''
def isVeryLikelyEncrypted(dataBytes):
entropyValue = entropy.shannonEntropy(dataBytes) if len(dataBytes)<=512 else entropy.gzipEntropy(dataBytes)
return entropyValue>=0.9
def decryptAllResourcesInApk(apkFilePath,saveTo=None,printLog=False):
resEncrypted = isResourceEncrypted(apkFilePath)
rc4Key = None
if resEncrypted:
rc4Key = extractRC4KeyFromApk(apkFilePath)
if not rc4Key:
if printLog:
print('fail to extract rc4 key')
return None
allAssets = iterateAllNeedDecryptAssets(apkFilePath)
decryptMap = {}
if allAssets:
storeFolder = os.path.dirname(os.path.abspath(apkFilePath))
saveTo = saveTo.strip()
if saveTo :
if not os.path.exists(saveTo):
os.makedirs(saveTo)
storeFolder = saveTo
if storeFolder.endswith('/') or storeFolder.endswith('\\'):
storeFolder = storeFolder[0:-1]
while True:
assetFile = next(allAssets,None)
if not assetFile:
break
fName,fileContent = assetFile
rawContent = fileContent.read()
decContent = decrypt(rawContent,rc4Key=rc4Key) if resEncrypted and isVeryLikelyEncrypted(rawContent) else rawContent #
fileContent.close()
resDecrypted = file_util.legimateFileName('{}/{}'.format(storeFolder,fName))
decryptMap[fName] = resDecrypted
file_util.createDirectoryIfNotExist(resDecrypted)
with open(resDecrypted,'wb') as f:
f.write(decContent)
if printLog:
sys.stdout.write('decrypt {}\r'.format(fName))
sys.stdout.flush()
if printLog:
print()
return decryptMap
def _decryptHandle(fName,rawContent,rc4Key,resEncrypted,msgQueue):
decContent = decrypt(rawContent,rc4Key) if resEncrypted and isVeryLikelyEncrypted(rawContent) else rawContent
msgQueue.put_nowait((fName,decContent))
def decryptAllResourcesInApkParallel(apkFilePath,saveTo=None,printLog=False,procPool=None,msgQueue=None):
resEncrypted,rc4Key = isResourceEncrypted(apkFilePath),None
if resEncrypted:
rc4Key = extractRC4KeyFromApk(apkFilePath)
if not rc4Key:
if printLog:
print('fail to extract rc4 key')
return None
#print('decryptAllResourcesInApkParallel',apkFilePath,resEncrypted,rc4Key)
allAssets = iterateAllNeedDecryptAssets(apkFilePath)
decryptMap = {}
if allAssets:
storeFolder = os.path.dirname(os.path.abspath(apkFilePath))
saveTo = saveTo.strip()
if saveTo :
if not os.path.exists(saveTo):
os.makedirs(saveTo)
storeFolder = saveTo
if storeFolder.endswith('/') or storeFolder.endswith('\\'):
storeFolder = storeFolder[0:-1]
if not procPool:
procPool = multiprocessing.Pool(processes=max(2, multiprocessing.cpu_count() ) )
if not msgQueue:
msgQueue = multiprocessing.Manager().Queue(0)
def subHandle(allAssets,rc4Key,resEncrypted,procPool,msgQueue,globalStates):
while True:
assetFile = next(allAssets,None)
if not assetFile:
break
fName,fileContent = assetFile
rawContent = fileContent.read()
fileContent.close()
if resEncrypted:
procPool.apply_async(_decryptHandle,args=(fName,rawContent,rc4Key,resEncrypted,msgQueue))
else:
msgQueue.put_nowait((fName,rawContent))
globalStates['submittedFiles'] += 1
globalStates['submitCompleted'] = True
globalStates = {'submittedFiles':0,'processedFiles':0,'submitCompleted':False}
subTh = threading.Thread(target=subHandle,args=(allAssets,rc4Key,resEncrypted,procPool,msgQueue,globalStates))
subTh.start()
while True:
if globalStates['submitCompleted'] and globalStates['processedFiles']>=globalStates['submittedFiles']:
break
if msgQueue.empty():
time.sleep(0.01)
continue
fName,decContent = msgQueue.get_nowait()
globalStates['processedFiles'] += 1
msgQueue.task_done()
resDecrypted = file_util.legimateFileName('{}/{}'.format(storeFolder,fName))
decryptMap[fName] = resDecrypted
file_util.createDirectoryIfNotExist(resDecrypted)
with open(resDecrypted,'wb') as f:
f.write(decContent)
if printLog:
#sys.stdout.write('\r{}'.format(' '*96))
#sys.stdout.flush()
sys.stdout.write('{}/{} decrypt {}\r'.format(globalStates['processedFiles'],globalStates['submittedFiles'],fName))
sys.stdout.flush()
if printLog:
print('completed')
return decryptMap
|
"""
@description: 执行训练
"""
"""
import
"""
from config import ConfigTrain
import utils
from os.path import join as pjoin
import pandas as pd
import numpy as np
import cv2
import torch
import time
"""
main
"""
if __name__ == '__main__':
cfg = ConfigTrain()
print('Pick device: ', cfg.DEVICE)
device = torch.device(cfg.DEVICE)
# 网络
print('Generating net: ', cfg.NET_NAME)
net = utils.create_net(3, cfg.NUM_CLASSES, net_name=cfg.NET_NAME)
if cfg.PRETRAIN: # 加载预训练权重
print('Load pretrain weights: ', cfg.PRETRAINED_WEIGHTS)
net.load_state_dict(torch.load(cfg.PRETRAINED_WEIGHTS, map_location='cpu'))
net.to(device)
# 优化器
optimizer = torch.optim.Adam(net.parameters(), lr=cfg.BASE_LR)
# 训练数据生成器
print('Preparing data... batch_size: {}, image_size: {}, crop_offset: {}'.format(cfg.BATCH_SIZE, cfg.IMAGE_SIZE, cfg.HEIGHT_CROP_OFFSET))
df_train = pd.read_csv(pjoin(cfg.DATA_LIST_ROOT, 'train.csv'))
data_generator = utils.train_data_generator(np.array(df_train['image']),
np.array(df_train['label']),
cfg.BATCH_SIZE, cfg.IMAGE_SIZE, cfg.HEIGHT_CROP_OFFSET)
# 训练
print('Let us train ...')
log_iters = 1 # 多少次迭代打印一次log
epoch_size = int(len(df_train) / cfg.BATCH_SIZE) # 一个轮次包含的迭代次数
for epoch in range(cfg.EPOCH_BEGIN, cfg.EPOCH_NUM):
epoch_loss = 0.0
epoch_miou = 0.0
last_epoch_miou = 0.0
prev_time = time.time()
for iteration in range(1 , epoch_size + 1):
images, labels, images_filename = next(data_generator)
images = images.to(device)
labels = labels.to(device)
lr = utils.ajust_learning_rate(optimizer, cfg.LR_STRATEGY, epoch, iteration-1, epoch_size)
predicts = net(images)
optimizer.zero_grad()
loss, mean_iou = utils.create_loss(predicts, labels, cfg.NUM_CLASSES)
epoch_loss += loss.item()
epoch_miou += mean_iou.item()
print("[Epoch-%d Iter-%d] LR: %.4f: iter loss: %.3f, iter iou: %.3f, epoch loss: %.3f, epoch iou: %.3f, time cost: %.3f s"
% (epoch, iteration, lr, loss.item(), mean_iou.item(), epoch_loss / iteration, epoch_miou / iteration, time.time() - prev_time))
prev_time = time.time()
if mean_iou.item() < last_epoch_miou * cfg.SUSPICIOUS_RATE:
with open(cfg.LOG_SUSPICIOUS_FILES, 'a+') as f:
for filename in images_filename:
f.write("{}\n".format(filename))
f.flush()
last_epoch_miou = epoch_miou / iteration
loss.backward()
optimizer.step()
torch.save(net.state_dict(),
pjoin(cfg.WEIGHTS_SAVE_ROOT, "weights_ep_%d_%.3f_%.3f.pth"
% (epoch, epoch_loss / epoch_size, epoch_miou / epoch_size)))
|
'''
Susan Hohenberger and Brent Waters (Pairing-based)
| From: "Constructing Verifiable Random Functions with Large Input Spaces"
| Published in: ePrint
| Available from: http://eprint.iacr.org/2010/102.pdf
| Notes: applications to resetable ZK proofs, micropayment schemes, updatable ZK DBs
and verifiable transaction escrow schemes to name a few
* type: verifiable random functions (family of pseudo random functions)
* setting: Pairing
:Authors: J Ayo Akinyele
:Date: 1/2012
'''
from charm.toolbox.pairinggroup import PairingGroup,ZR,G1,G2,pair
from charm.toolbox.iterate import dotprod
debug = False
class VRF10:
"""
>>> from charm.toolbox.pairinggroup import PairingGroup
>>> group = PairingGroup('MNT224')
>>> vrf = VRF10(group)
>>> statement = [0, 1, 1, 0, 1, 0, 1, 0]
>>> n = len(statement)
>>> (public_key, secret_key) = vrf.setup(n)
>>> witness = vrf.prove(secret_key, statement)
>>> vrf.verify(public_key, statement, witness)
True
"""
"""Definition in paper: behave as Pseudo Random Functions (PRFs) with an additional property that party
holding the seed will publish a commitment to the function and is able to non-interactively convince
a verifier that a given evaluation is correct (matches pub commitment) without sacrificing pseudo-
randomness property on other inputs."""
def __init__(self, groupObj):
global group, lam_func
group = groupObj
lam_func = lambda i,a,b: a[i] ** b[i]
def setup(self, n):
"""n = bit length of inputs"""
g1 = group.random(G1)
g2, h = group.random(G2), group.random(G2)
u_t = group.random(ZR)
u = [group.random(ZR) for i in range(n+1)]
U_t = g2 ** u_t
U1 = [g1 ** u[i] for i in range(len(u))]
U2 = [g2 ** u[i] for i in range(len(u))]
pk = { 'U1':U1, 'U2':U2,'U_t':U_t, 'g1':g1, 'g2':g2, 'h':h,'n':n }
sk = { 'u':u, 'u_t':u_t, 'g1':g1, 'h':h,'n':n }
return (pk, sk)
def F(self, sk, x):
result = dotprod(1, -1, sk['n'], lam_func, sk['u'], x)
return pair(sk['g1'] ** (sk['u_t'] * sk['u'][0] * result), sk['h'])
def prove(self, sk, x):
pi = [i for i in range(sk['n'])]
for i in range(sk['n']):
x = [group.init(ZR, long(j)) for j in x]
result = dotprod(1, -1, i+1, lam_func, sk['u'], x)
pi[i] = sk['g1'] ** (sk['u_t'] * result)
result0 = dotprod(1, -1, sk['n'], lam_func, sk['u'], x)
pi_0 = sk['g1'] ** (sk['u_t'] * sk['u'][0] * result0)
y = self.F(sk, x)
return { 'y':y, 'pi':pi, 'pi0':pi_0 }
def verify(self, pk, x, st):
n, y, pi, pi_0 = pk['n'], st['y'], st['pi'], st['pi0']
# check first index
check1 = pair(pi[0], pk['g2'])
if x[0] == 0 and check1 == pair(pk['g1'], pk['U_t']):
if debug: print("Verify: check 0 successful!\t\tcase:", x[0])
elif x[0] == 1 and check1 == pair(pk['U1'][0], pk['U_t']):
if debug: print("Verify: check 0 successful!\t\tcase:", x[0])
else:
if debug: print("Verify: check 0 FAILURE!\t\t failed case:", x[0])
return False
for i in range(1, len(x)):
check2 = pair(pi[i], pk['g2'])
if x[i] == 0 and check2 == pair(pi[i-1], pk['g2']):
if debug: print("Verify: check", i ,"successful!\t\tcase:", x[i])
elif x[i] == 1 and check2 == pair(pi[i-1], pk['U2'][i]):
if debug: print("Verify: check", i ,"successful!\t\tcase:", x[i])
else:
if debug: print("Verify: check", i ,"FAILURE!\t\tcase:", x[i])
return False
if pair(pi_0, pk['g2']) == pair(pi[n-1], pk['U2'][0]) and pair(pi_0, pk['h']) == y:
if debug: print("Verify: all and final check successful!!!")
return True
else:
return False
def main():
grp = PairingGroup('MNT224')
# bits
x1 = [0, 1, 1, 0, 1, 0, 1, 0]
# x2 = [1, 1, 1, 0, 1, 0, 1, 0]
# block of bits
n = 8
vrf = VRF10(grp)
# setup the VRF to accept input blocks of 8-bits
(pk, sk) = vrf.setup(n)
# generate proof over block x (using sk)
st = vrf.prove(sk, x1)
# verify bits using pk and proof
assert vrf.verify(pk, x1, st), "VRF failed verification"
# assert vrf.verify(pk, x2, st), "VRF should FAIL verification!!!"
if __name__ == "__main__":
debug = True
main() |
###MODULES###
import numpy as np
import pandas as pd
import os, sys
import time as t
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.ticker import MaxNLocator
import pathlib
from matplotlib.colors import Normalize
from scipy import interpolate
norm = Normalize()
from resource import getrusage, RUSAGE_SELF
import random
import scipy.ndimage as ndimage
mpl.rcParams['axes.linewidth'] = 1.5 #set the value globally
mpl.rcParams['contour.negative_linestyle'] = 'solid'
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
RHO = 1000.0
NX = 512
PERIOD = 0.1
RADIUSLARGE = 0.002
RADIUSSMALL = 0.5*RADIUSLARGE
maxR = 0.025/RADIUSLARGE
csfont = {'fontname':'Times New Roman'}
#System Arguments
config = sys.argv[1]
Re = sys.argv[2]#"2"
perNumber = int(sys.argv[3])#5
local = int(sys.argv[4])
minVal, maxVal = -6.0,6.0
dX = 2.0*maxR/(1.0*NX)
if local:
cwd_FIGS = cwd_PYTHON+"../../Figures/VorticityDetection/{0}/".format(config)
pathlib.Path(cwd_FIGS).mkdir(parents=True, exist_ok=True)
cwd_Re = cwd_PYTHON+'../../FieldData/TestField/'
cwd_POS = cwd_Re
else:
cwd_FIGS = cwd_PYTHON+'../Figures/Bifurcation/{0}/'.format(config)
pathlib.Path(cwd_FIGS).mkdir(parents=True, exist_ok=True)
cwd_Re = cwd_PYTHON+'../{0}/Re{1}/VTK/AVG/'.format(config,Re)
cwd_POS = cwd_PYTHON+'../PosData/{0}/Re{1}/'.format(config,Re)
# constructs a filepath for the pos data of Re = $Re
def pname(cwd,idx):
return cwd+"pd_rot_%04d.csv"%idx
def GetPosData(cwd,idx):
data = pd.read_csv(pname(cwd,idx),delimiter=' ')
return data
def GetAvgFieldData(cwd,idx):
#Load position data
#Columns
#mx.flat my.flat avgW.flat avgP.flat avgUx.flat avgUy.flat
fieldData = pd.read_csv(cwd+'AVGRot_%04d.csv'%idx,delimiter=' ')
print(fieldData.head())
#All field values to a list
mxList = fieldData['mx'].values.tolist()
myList = fieldData['my'].values.tolist()
WList = fieldData['avgW'].values.tolist()
UxList = fieldData['avgUx'].values.tolist()
UyList = fieldData['avgUy'].values.tolist()
#Convert lists to numpy arrays
#Reshape them to be Nx x Ny
Nx, Ny = 512, 512
mxArr = np.array(mxList).reshape((Nx,Ny))
myArr = np.array(myList).reshape((Nx,Ny))
WArr = np.array(WList).reshape((Nx,Ny))
UxArr = np.array(UxList).reshape((Nx,Ny))
UyArr = np.array(UyList).reshape((Nx,Ny))
return (mxArr, myArr, WArr, UxArr, UyArr)
def AddDiscsToPlot(ax,pos):
#Add Discs
circle1 = Circle((pos.loc[0,'aXU_rot'], pos.loc[0,'aYU_rot']), 1.0, facecolor=(0.25,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle1)
circle2 = Circle((pos.loc[0,'aXL_rot'], pos.loc[0,'aYL_rot']), 0.5, facecolor=(0.25,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle2)
circle3 = Circle((pos.loc[0,'bXU_rot'], pos.loc[0,'bYU_rot']), 1.0, facecolor=(0.75,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle3)
circle4 = Circle((pos.loc[0,'bXL_rot'], pos.loc[0,'bYL_rot']), 0.5, facecolor=(0.75,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle4)
#Add Swimmer "springs"
ax.plot([pos.loc[0,'aXU_rot'],pos.loc[0,'aXL_rot']],
[pos.loc[0,'aYU_rot'],pos.loc[0,'aYL_rot']],
color=(0.25,)*3,linewidth=3,zorder=6)
ax.plot([pos.loc[0,'bXU_rot'],pos.loc[0,'bXL_rot']],
[pos.loc[0,'bYU_rot'],pos.loc[0,'bYL_rot']],
color=(0.75,)*3,linewidth=3,zorder=6)
return
def set_size(w,h, ax=None):
""" w, h: width, height in inches """
if not ax: ax=plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w)/(r-l)
figh = float(h)/(t-b)
ax.figure.set_size_inches(figw, figh)
return ax
def CalcPsi2D(fx,fy,NX,DX):
#From here, we are going to calculate the stream function
psi = np.zeros((NX,NX))
for idx in range(1,NX):
psi[idx,0] = psi[idx-1,0] - fy[idx,0]*DX
for idy in range(1,NX):
psi[:,idy] = psi[:,idy-1] + fx[:,idy]*DX
return psi
#Plot New mesh and interpolated velocity field Ux and Uy
def PlotAvgW(cwd,mx,my,W,Ux,Uy,pos,space,scale):
global FIGNUM, PERIOD,minVal,maxVal, Re, perNumber
#Here, we will visualize the velocity field on the new coordinate system
nRows, nCols = 1, 1
fig, ax = plt.subplots(nrows=nRows, ncols=nCols, num=0,figsize=(6,6),dpi=200)
#ax.set_title(r'Average Velocity Field',fontsize=12)
#Plot Streamlines
#Use two grids and combine them
UxT, UyT, WT = Ux.T, Uy.T, W.T
psi = CalcPsi2D(Ux,Uy,NX,dX)
print('psi.min() = ',psi.min())
print('psi.max() = ',psi.max())
sys.stdout.flush()
#Psi Contour
psi2 = ndimage.gaussian_filter(psi, sigma=5.0, order=0)
levels = MaxNLocator(nbins=101).tick_values(-1.0*max(abs(psi2.min()),psi2.max()), max(abs(psi2.min()),psi2.max()))
#levels = MaxNLocator(nbins=21).tick_values(-1.0*max(abs(psi2.min()),psi2.max()), max(abs(psi2.min()),psi2.max()))
ax.contour(mx,my,psi2,colors='k',extend='both',levels=levels)
#PlotVorticity with imshow (interpolate to smooth)
ax.imshow(W.T,cmap='bwr',extent=(-1.0*maxR-0.5*dX,maxR+0.5*dX,
-1.0*maxR-0.5*dX,maxR+0.5*dX),
origin='lower',vmin=-1.0,vmax=1.0,interpolation='bilinear')
#Add swimmer
AddDiscsToPlot(ax,pos)
xmin = min(pos.loc[0,'aXU_rot'],pos.loc[0,'aXL_rot'],
pos.loc[0,'bXU_rot'],pos.loc[0,'bXL_rot'])
xmax = max(pos.loc[0,'aXU_rot'],pos.loc[0,'aXL_rot'],
pos.loc[0,'bXU_rot'],pos.loc[0,'bXL_rot'])
ymin = min(pos.loc[0,'aYU_rot'],pos.loc[0,'aYL_rot'],
pos.loc[0,'bYU_rot'],pos.loc[0,'bYL_rot'])
ymax = max(pos.loc[0,'aYU_rot'],pos.loc[0,'aYL_rot'],
pos.loc[0,'bYU_rot'],pos.loc[0,'bYL_rot'])
ax.axis([xmin-0.25,xmax+0.25,ymin-2.0,ymax+2.0])
#ax.axis([-5.0,5.0,-5.0,5.0])
fig.tight_layout()
fig.savefig(cwd+'W_{0}_Re{1}_per{2}_.png'.format(config,Re,perNumber))
fig.clf()
plt.close()
return
if __name__ == '__main__':
#Get AvgVel Field and Rotate Frame
#Save Vel Field as AvgUx and AvgUy
#READ ALL AVG FILES IN A SIMULATION DIRECTORY
#EXTRACT AVERAGE FIELD DATA INTO NUMPY ARRAYS
#PLOT AVERAGED FIELD DATA
#Simulation Parameters
#Extract Position Data
#Paths to data and plots
cwd_DATA = cwd_Re
countPer = perNumber
AVGPlot = pathlib.Path(cwd_DATA+'AVGRot_%04d.csv'%countPer)
if AVGPlot.exists ():
start = t.clock()
#Get Avg Field Data
mx,my,avgW,avgUx,avgUy = GetAvgFieldData(cwd_DATA,countPer)
#Extract Position and Time Data
posData = GetPosData(cwd_POS,countPer)
#Plot Averaged Field Data
#Vorticity And Streamlines
stend = t.clock()
diff = stend - start
print('Time to run for 1 period = %.5fs'%diff)
sys.stdout.flush()
#Plot Flow Field Visual
PlotAvgW(cwd_FIGS,mx,my,avgW,avgUx,avgUy,posData,4,5)
|
import numpy as np
import pytest
from napari._qt.widgets.qt_color_swatch import (
TRANSPARENT,
QColorSwatch,
QColorSwatchEdit,
)
@pytest.mark.parametrize('color', [None, [1, 1, 1, 1]])
@pytest.mark.parametrize('tooltip', [None, 'This is a test'])
def test_succesfull_create_qcolorswatchedit(qtbot, color, tooltip):
widget = QColorSwatchEdit(initial_color=color, tooltip=tooltip)
qtbot.add_widget(widget)
test_color = color or TRANSPARENT
test_tooltip = tooltip or 'click to set color'
assert widget.color_swatch.toolTip() == test_tooltip
np.testing.assert_array_equal(widget.color, test_color)
@pytest.mark.parametrize('color', [None, [1, 1, 1, 1]])
@pytest.mark.parametrize('tooltip', [None, 'This is a test'])
def test_succesfull_create_qcolorswatch(qtbot, color, tooltip):
widget = QColorSwatch(initial_color=color, tooltip=tooltip)
qtbot.add_widget(widget)
test_color = color or TRANSPARENT
test_tooltip = tooltip or 'click to set color'
assert widget.toolTip() == test_tooltip
np.testing.assert_array_equal(widget.color, test_color)
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given preorder and inorder traversal of a tree, construct the binary tree.
# Note:
# You may assume that duplicates do not exist in the tree.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# 202 / 202 test cases passed.
# Status: Accepted
# Runtime: 266 ms
# Your runtime beats 48.18 % of python submissions.
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if preorder and inorder:
dummy = preorder.pop(0)
index_ = inorder.index(dummy)
root = TreeNode(dummy)
root.left = self.buildTree(preorder, inorder[:index_])
root.right = self.buildTree(preorder, inorder[index_+1:])
return root
# 202 / 202 test cases passed.
# Status: Accepted
# Runtime: 62 ms
# Your runtime beats 95.01 % of python submissions.
class Solution(object):
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
if not (preorder or inorder):
return None
root = TreeNode(preorder[0])
nodes_stack = [root]
node_index, level_index = 1, 0
while node_index < len(preorder):
temp_node = nodes_stack[-1]
if temp_node.val != inorder[level_index]:
temp_node.left = TreeNode(preorder[node_index])
nodes_stack.append(temp_node.left)
node_index += 1
else:
nodes_stack.pop()
level_index += 1
if not nodes_stack or nodes_stack[-1].val != inorder[level_index]:
temp_node.right = TreeNode(preorder[node_index])
nodes_stack.append(temp_node.right)
node_index += 1
return root
if __name__ == '__main__':
print(Solution().buildTree([1, 2, 3, 4, 5, 6],
[3, 2, 4, 1, 5, 6]))
|
from functools import partial
from operator import itemgetter
import numpy as np
from django.core.management import BaseCommand
from web.models import NotionDocument
from web.services.bert_service.read import get_bert_client
from web.services.notion_service.read import get_notion_client
from web.services.notion_service.read import to_plaintext
from web.utils import cosine_distance
from web.utils import get_text_chunks
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('notion_url')
def handle(self, *args, **kwargs):
url = kwargs['notion_url']
notion_client = get_notion_client()
print("Getting doc")
block = notion_client.get_block(url)
print("SIMILAR TO:", block.title)
text = to_plaintext(block)
text_chunks = get_text_chunks(text)
bert_client = get_bert_client()
print("Embedding")
embeddings = bert_client.encode(text_chunks)
document_embedding = np.average(embeddings, axis=0, weights=[len(x) for x in text_chunks]) # weighted avg by chunk length
compute_similarity = partial(cosine_distance, document_embedding)
print("Finding similar")
relevant_texts = NotionDocument.objects\
.filter(embedding__isnull=False)\
.exclude(parent_notion_document__title="People")\
.values('id', 'embedding')
embeddings = {x['id']: x['embedding'] for x in relevant_texts}
similarities = {text_id: compute_similarity(embedding) for text_id, embedding in embeddings.items()}
top_similar_ids = sorted(similarities.items(), key=itemgetter(1), reverse=True)
ids, similarities = zip(*top_similar_ids)
similarities = np.ravel(similarities)
print(f'similarity range: {max(similarities):.3f} - {min(similarities):.3f}')
# starting_index = len(ids) - 5
starting_index = 0
most_similar_text = NotionDocument.objects\
.filter(id__in=ids[:5])\
.values('title', 'parent_notion_document__title')
for i, similar_text in enumerate(most_similar_text):
print('-------------------')
print(f'similarity: {similarities[(starting_index + i)]:.3f}')
print()
parent_title = similar_text['parent_notion_document__title']
source_title = similar_text['title']
title = f"{parent_title} > {source_title}" if parent_title else source_title
print('#', title)
# print(similar_text['text'])
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import json
from unittest.mock import Mock, patch
import pytest
from generate_json_schema import (
GENERATED_JSON_SCHEMA_FILENAME,
VERSION_MAJOR_MINOR,
main,
simplify_option_description,
)
@pytest.mark.parametrize(
"description,output",
[
(
"Sentence starts here and ends without a full stop",
"Sentence starts here and ends without a full stop",
),
("Sentence starts here and ends here.", "Sentence starts here and ends here"),
(
"We run `./pants goal` and stop here, then continue.",
"We run `./pants goal` and stop here, then continue",
),
(
"We run `./pants goal` and stop here. After that, we continue.",
"We run `./pants goal` and stop here",
),
(
"We run `./pants goal` and then e.g. finish.",
"We run `./pants goal` and then e.g. finish",
),
(
"We run `./pants goal` and then stop here.With a missing whitespace after dot, a new sentence starts here.",
"We run `./pants goal` and then stop here.With a missing whitespace after dot, a new sentence starts here",
),
(
"Sentence starts here and ends here.\n\nA new sentence goes on in a new paragraph.",
"Sentence starts here and ends here.\n\nA new sentence goes on in a new paragraph",
),
(
"Path to a .pypirc config. (https://packaging.python.org/specifications/pypirc/). Set this.",
"Path to a .pypirc config. (https://packaging.python.org/specifications/pypirc/)",
),
(
"Use this (4-space indentation). ('AOSP' is the Android Open Source Project.)",
"Use this (4-space indentation). ('AOSP' is the Android Open Source Project.)",
),
],
)
def test_simplify_option_description(description: str, output: str) -> None:
assert simplify_option_description(description) == output
def test_main():
"""Test generating a JSON schema using a simplified output of the `./pants help-all` command."""
with patch(
"generate_json_schema.get_args",
lambda *args, **kwargs: Mock(
all_help_file="build-support/bin/json_schema_testdata/all_help_sample_output.json"
),
):
main()
with open(GENERATED_JSON_SCHEMA_FILENAME) as fh:
schema = json.load(fh)
assert all((schema["$schema"], schema["description"]))
collected_properties = schema["properties"]["GLOBAL"]["properties"].keys()
# all options should be included
assert all(
key in collected_properties
for key in ["log_show_rust_3rdparty", "ignore_warnings", "level"]
)
# deprecated fields shouldn't be included
assert "process_cleanup" not in collected_properties
# an option description should be a single sentence with a URL to the option docs section
assert schema["properties"]["GLOBAL"]["properties"]["level"]["description"] == (
f"Set the logging level\nhttps://www.pantsbuild.org/v{VERSION_MAJOR_MINOR}/docs/reference-global#level"
)
# options should be part of the enum
# TODO(alte): ensure enum is sorted once implemented
assert set(schema["properties"]["GLOBAL"]["properties"]["level"]["enum"]) == {
"trace",
"debug",
"info",
"warn",
"error",
}
|
#!/usr/bin/env python
"""
Small hack to iterate over all Wistar bridges and enable LLDP/LACP
(see https://github.com/Juniper/wistar/issues/12)
"""
import subprocess
import re
# return all bridges that contain _br*
BR_INFO = subprocess.Popen("ifconfig | grep _br", shell=True, stdout=subprocess.PIPE).stdout.read()
# regex creates a list of only the bridge names
BRIDGES = re.findall('(t[0-9]+_br[0-9]+?) ', BR_INFO, re.DOTALL)
for bridge in BRIDGES:
print("fixing bridge " + bridge)
command = "echo 65535 > /sys/class/net/" + bridge + "/bridge/group_fwd_mask"
subprocess.call(command, shell=True)
|
# Generated by Django 3.2.7 on 2021-10-06 17:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='researchpaper',
name='year',
field=models.IntegerField(),
),
]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Type.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui,uic
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Type(QtGui.QDialog):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(284, 161)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Images/database.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(160, 120, 91, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.lineEdit = QtGui.QLineEdit(Dialog)
self.lineEdit.setGeometry(QtCore.QRect(130, 20, 121, 29))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.comboBox = QtGui.QComboBox(Dialog)
self.comboBox.setGeometry(QtCore.QRect(130, 70, 121, 25))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(80, 30, 66, 17))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(80, 70, 66, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.checkBox = QtGui.QCheckBox(Dialog)
self.checkBox.setGeometry(QtCore.QRect(20, 100, 111, 22))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "What Type", None))
self.comboBox.setItemText(0, _translate("Dialog", "Int", None))
self.comboBox.setItemText(1, _translate("Dialog", "Char", None))
self.comboBox.setItemText(2, _translate("Dialog", "String", None))
self.comboBox.setItemText(3, _translate("Dialog", "Double", None))
self.label.setText(_translate("Dialog", "Name:", None))
self.label_2.setText(_translate("Dialog", "Type:", None))
self.checkBox.setText(_translate("Dialog", "Primary key", None))
'''Falta arreglar estos getters :v'''
def get_types(self):
return str(self.comboBox.currentText())
def get_name(self):
return str(self.lineEdit.text())
def __init__(self):
QtGui.QDialog.__init__(self)
uic.loadUi("Type.ui", self)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = Type()
Dialog.show()
sys.exit(app.exec_())
|
import numpy as np
import math
import torch
from torch.autograd import Function
class MaxDivideMin(Function):
@staticmethod
def forward(ctx, input_rep, coef, count=None, f=None, logger=None):
"""
calculate the condition number
:param ctx:
:param input_rep: (batch, dim_representation)
:param coef: the coefficient for the gradients
other params: for save sigmas to file
:return:
"""
ctx.save_for_backward(input_rep, torch.from_numpy(np.array([coef])).type(torch.FloatTensor))
if count is not None:
# full calculation
u, s, v = torch.svd(input_rep, some=False)
logger.write_sigma(f, count, s.numpy().tolist())
number = s[0] / s[-1]
return number
else:
# for fast
return torch.sum(torch.zeros(1))
@staticmethod
def backward(ctx, grad_norm):
input_rep, coef = ctx.saved_variables
u, s, v = torch.svd(input_rep, some=True) # to handle non-square matrix
max_sigma = s[0]
min_sigma = s[-1] if float(s[-1]) != 0 else (s[-1] + 0.00000000000000000001)
grad_max = u[:, 0].view(-1,1).mm(v.t()[0, :].view(1, -1))
grad_min = u[:, -1].view(-1,1).mm(v.t()[-1, :].view(1, -1))
# calculate grad_input
grad_input = min_sigma.expand_as(grad_max) * grad_max - max_sigma.expand_as(grad_min) * grad_min
grad_input = (1/(min_sigma * min_sigma)).expand_as(grad_input) * grad_input
# coef
grad_input = coef.expand_as(grad_input) * grad_input
# return u.mm(v.t()), None, None, None, None
return grad_input, None, None, None, None
class MaxMinusMin(Function):
@staticmethod
def forward(ctx, input_rep, coef, count=None, f=None, logger=None):
"""
calculate the condition number
:param ctx:
:param input_rep: (batch, dim_representation)
:param coef: the coefficient for the gradients
other params: for save sigmas to file
:return:
"""
ctx.save_for_backward(input_rep, torch.from_numpy(np.array([coef])).type(torch.FloatTensor))
if count is not None:
# full calculation
u, s, v = torch.svd(input_rep, some=False)
logger.write_sigma(f, count, s.numpy().tolist())
number = s[0] - s[-1]
return number
else:
# for fast
return torch.sum(torch.zeros(1))
@staticmethod
def backward(ctx, grad_norm):
input_rep, coef = ctx.saved_variables
u, s, v = torch.svd(input_rep, some=True) # to handle non-square matrix
grad_max = u[:, 0].view(-1,1).mm(v.t()[0, :].view(1, -1))
grad_min = u[:, -1].view(-1,1).mm(v.t()[-1, :].view(1, -1))
# calculate grad_input
grad_input = grad_max - grad_min
# coef
grad_input = coef.expand_as(grad_input) * grad_input
# return u.mm(v.t()), None, None, None, None
return grad_input, None, None, None, None
|
# coding=utf-8
"""A serializer that extends pickle to change the default protocol"""
from __future__ import absolute_import
from .. import common
import pickle
protocol = common.select_pickle_protocol()
def dump(value, fp, sort_keys=False):
"Serialize value as pickle to a byte-mode file object"
if sort_keys and isinstance(value, dict):
value = {key: value[key] for key in sorted(value)}
pickle.dump(value, fp, protocol=protocol)
def dumps(value, sort_keys=False):
"Serialize value as pickle to bytes"
if sort_keys and isinstance(value, dict):
value = {key: value[key] for key in sorted(value)}
return pickle.dumps(value, protocol=protocol)
def load(fp):
"Deserialize one pickle value from a byte-mode file object"
return pickle.load(fp)
def loads(bytes_value):
"Deserialize one pickle value from bytes"
return pickle.loads(bytes_value)
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
for i in range(0):
pixels[i] = (255,69,0)
time.sleep(.1)
for x in range(20):
pixels[x] = (255,69,0)
time.sleep(.25)
|
import pygame
from pygame.locals import *
from shapes import Entity, RandEnt
# BLUE = (0, 0, 255)
# width, height = 300, 400
# screen = pygame.display.set_mode((width, height))
# background_color = (255, 255, 255)
# screen.fill(background_color)
# p = RandEnt()
# print(p.x)
# print(p.y)
# p.draw(screen)
# pygame.display.flip()
# running = True
# while running:
# for event in pygame.event.get():
# if event.type == pygame.QUIT:
# running = False
class Window:
def __init__(self):
self.width, self.height = 400, 300
self.screen = pygame.display.set_mode((self.height, self.width))
self.background_color = (255, 255, 255)
def main(self): # main application loop
running = True
self.render()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# pygame.display.flip()
def render(self): # does whatever at the moment
self.screen.fill(self.background_color)
num_particles = 10
particle_list = []
for i in range(num_particles):
particle_list.append(RandEnt())
for x in particle_list:
x.draw(self.screen)
pygame.display.flip()
if __name__ == '__main__':
app = Window()
app.main() |
"""Protocol Port Objects Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
import logging
import warnings
class ProtocolPortObjects(APIClassTemplate):
"""The ProtocolPortObjects in the FMC."""
VALID_JSON_DATA = ["id", "name", "description", "port", "protocol", "type"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/object/protocolportobjects"
REQUIRED_FOR_POST = ["name", "port", "protocol"]
def __init__(self, fmc, **kwargs):
"""
Initialize ProtocolPortObjects object.
:param fmc: (object) FMC object
:param kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for ProtocolPortObjects class.")
self.parse_kwargs(**kwargs)
class ProtocolPort(ProtocolPortObjects):
"""
Dispose of this Class after 20210101.
Use ProtocolPortObjects() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn(
"Deprecated: ProtocolPort() should be called via ProtocolPortObjects()."
)
super().__init__(fmc, **kwargs)
|
import pandas as pd
import func
import sys
track_length = 3900
filename = 'Times.csv'
df = pd.read_csv(filename)
print('Type "help" to see available commands')
user_input = input('').upper()
while True:
df.drop(df.filter(regex="Unnamed: "), axis=1, inplace=True)
if user_input == 'HELP':
df.drop(df.filter(regex="Unnamed: "), axis=1, inplace=True)
func.help_info()
user_input = input('Write a command: ').upper()
elif user_input == 'ADD':
df.drop(df.filter(regex="Unnamed: "), axis=1, inplace=True)
func.new_entry()
# df = df.to_csv(filename)
user_input = input('Write a command: ').upper()
elif user_input == 'DEL':
func.delete_entry()
user_input = input('Write a command: ').upper()
df.drop(df.filter(regex="Unnamed: "), axis=1, inplace=True)
elif user_input == 'EXIT':
sys.exit()
elif user_input == 'ALL':
df = pd.read_csv(filename)
df.drop(df.filter(regex="Unnamed: "), axis=1, inplace=True)
func.calc_diff()
func.calc_avg_speed(track_length)
df = pd.read_csv(filename)
df.drop(df.filter(regex="Unnamed: "), axis=1, inplace=True)
func.calc_diff()
func.calc_avg_speed(track_length)
print(df)
user_input = input('Write a command: ').upper()
elif user_input == 'XLSX':
xlsx_filename = input('Please enter file name fe. filename.xlsx: ')
func.convert_to_xlsx(xlsx_filename)
user_input = input('Write a command: ').upper()
elif user_input == 'CLEAR':
func.clear_data()
user_input = input('Write a command: ').upper()
else:
print('Invalid command, try again')
user_input = input('Write a command: ').upper()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 15:43:39 2019
This is a copy of the DC Eigen vector code, however, I am trying to paralelize it to run moore efficiently using Numba and the solution provided through stack overflow.
@author: matthewmorriss
"""
import numpy as np
import numba as nb
#
#@nb.njit()
#def isnan(win):
# for i in range(win.shape[0]):
# for j in range(win.shape[1]):
# if np.isnan(win[i,j]):
# return True
# return False
@nb.njit(parallel=True)
def DC_eig_par(DEM,w,cx,cy,cz,eps):
[nrows, ncols] = np.shape(DEM)
#
# #initiate an empty array same size as dem
rms = DEM*np.nan
rms.astype(np.float32)
#Compute RMS cycling through the DEM
nw=(w*2)**2
for i in nb.prange(w+1,nrows-w):
for j in range(w+1,(ncols-w)):
# d1=np.int16(np.linspace(i-w,i+w,11))
# d2=np.int16(np.linspace(j-w,j+w,11))
tempx = cx[i-w:i+w,j-w:j+w]
tx = tempx.flatten()
# tx=np.reshape(tempx,-1)
tempy = cy[i-w:i+w,j-w:j+w]
ty = tempy.flatten()
# ty=np.reshape(tempy,-1)
# tempz = np.empty([10,10], dtype = np.float32)
tempz = cz[i-w:i+w,j-w:j+w]
tz = tempz.flatten()
# tz=np.reshape(tempz,-1)
if (np.isnan(np.concatenate((tx,ty,tz)))).sum() == 0:
T=np.array([[np.sum(tx**2), np.sum(tx*ty), np.sum(tx*tz)],
[np.sum(ty*tx), np.sum(ty**2), np.sum(ty*tz)],
[np.sum(tz*tx), np.sum(tz*ty), np.sum(tz**2)]])
[Te,_] = np.linalg.eig(T) # this step is a bit different from the matlab version b/c np.eig outputs two values.
l = (Te/nw)
l[l<eps] = 0
rms[i,j] = 1/np.log(l[0]/l[1])
else:
rms[i,j] = np.nan
return(rms) |
import sys
import pytest
import itertools
from models import *
from utils import assert_queries_equal, assert_query_count
from peewee import ModelQueryResultWrapper
from peewee import NaiveQueryResultWrapper
from aiopeewee.result import (AioNaiveQueryResultWrapper,
AioModelQueryResultWrapper)
from aiopeewee.utils import anext, alist
#from playhouse.tests.base import ModelTestCase
#from playhouse.tests.base import skip_test_if
#from playhouse.tests.base import test_db
#from playhouse.tests.models import *
pytestmark = pytest.mark.asyncio
async def test_iteration(flushdb):
await User.create_users(10)
with assert_query_count(1):
sq = User.select()
qr = await sq.execute()
first_five = []
i = 0
async for u in qr:
first_five.append(u.username)
if i == 4:
break
i += 1
assert first_five == ['u1', 'u2', 'u3', 'u4', 'u5']
async def names(it):
return [obj.username async for obj in it]
# could be enabled when cache has been filled
# assert await names(sq[5:]) == ['u6', 'u7', 'u8', 'u9', 'u10']
# assert await names(sq[2:5]) == ['u3', 'u4', 'u5']
another_iter = await names(qr)
assert another_iter == ['u%d' % i for i in range(1, 11)]
another_iter = await names(qr)
assert another_iter == ['u%d' % i for i in range(1, 11)]
async def test_count(flushdb):
await User.create_users(5)
with assert_query_count(1):
query = User.select()
qr = await query.execute()
assert await qr.count() == 5
# Calling again does not incur another query.
assert await qr.count() == 5
with assert_query_count(1):
query = query.where(User.username != 'u1')
qr = await query.execute()
assert await qr.count() == 4
# Calling again does not incur another query.
assert await qr.count() == 4
# len is not async, asynclen could be used though
# def test_len(self):
# User.create_users(5)
# with assert_query_count(1):
# query = User.select()
# assert len(query), 5)
# qr = query.execute()
# assert len(qr), 5)
# with assert_query_count(1):
# query = query.where(User.username != 'u1')
# qr = query.execute()
# assert len(qr), 4)
# assert len(query), 4)
async def test_nested_iteration(flushdb):
await User.create_users(4)
with assert_query_count(1):
sq = User.select()
outer = []
inner = []
async for i_user in sq:
outer.append(i_user.username)
async for o_user in sq:
inner.append(o_user.username)
assert outer == ['u1', 'u2', 'u3', 'u4']
assert inner == ['u1', 'u2', 'u3', 'u4'] * 4
async def test_iteration_protocol(flushdb):
await User.create_users(3)
with assert_query_count(1):
query = User.select().order_by(User.id)
qr = await query.execute()
for _ in range(2):
async for user in qr:
pass
# i = await aiter(qr)
# async for obj in i:
# pass
# with pytest.raises(StopAsyncIteration):
# await anext(i)
assert [u.username async for u in qr] == ['u1', 'u2', 'u3']
# assert query[0].username == 'u1'
# assert query[2].username == 'u3'
# with pytest.raises(StopAsyncIteration):
# await anext(i)
async def test_iterator(flushdb):
await User.create_users(10)
with assert_query_count(1):
qr = await User.select().order_by(User.id).execute()
usernames = [u.username async for u in qr.iterator()]
assert usernames == ['u%d' % i for i in range(1, 11)]
assert qr._populated
assert qr._result_cache == []
with assert_query_count(0):
again = [u.username async for u in qr]
assert again == []
with assert_query_count(1):
qr = await User.select().where(User.username == 'xxx').execute()
usernames = [u.username async for u in qr.iterator()]
assert usernames == []
async def test_iterator_query_method(flushdb):
await User.create_users(10)
with assert_query_count(1):
qr = User.select().order_by(User.id)
usernames = [u.username async for u in qr.iterator()]
assert usernames == ['u%d' % i for i in range(1, 11)]
with assert_query_count(0):
again = [u.username async for u in qr]
assert again == []
async def test_iterator_extended(flushdb):
await User.create_users(10)
for i in range(1, 4):
for j in range(i):
await Blog.create(
title='blog-%s-%s' % (i, j),
user=await User.get(User.username == 'u%s' % i))
qr = (User
.select(
User.username,
fn.Count(Blog.pk).alias('ct'))
.join(Blog)
.where(User.username << ['u1', 'u2', 'u3'])
.group_by(User)
.order_by(User.id)
.naive())
accum = []
with assert_query_count(1):
async for user in qr.iterator():
accum.append((user.username, user.ct))
assert accum == [('u1', 1),
('u2', 2),
('u3', 3)]
qr = (User
.select(fn.Count(User.id).alias('ct'))
.group_by(User.username << ['u1', 'u2', 'u3'])
.order_by(fn.Count(User.id).desc()))
accum = []
with assert_query_count(1):
async for ct, in qr.tuples().iterator():
accum.append(ct)
assert accum == [7, 3]
async def test_fill_cache(flushdb):
def assert_usernames(qr, n):
exp = ['u%d' % i for i in range(1, n + 1)]
assert [u.username for u in qr._result_cache] == exp
await User.create_users(20)
with assert_query_count(1):
qr = await User.select().execute()
await qr.fill_cache(5)
assert not qr._populated
assert_usernames(qr, 5)
# a subsequent call will not "over-fill"
await qr.fill_cache(5)
assert not qr._populated
assert_usernames(qr, 5)
# ask for one more and ye shall receive
await qr.fill_cache(6)
assert not qr._populated
assert_usernames(qr, 6)
await qr.fill_cache(21)
assert qr._populated
assert_usernames(qr, 20)
with pytest.raises(StopAsyncIteration):
await anext(qr)
async def test_select_related(flushdb):
u1 = await User.create(username='u1')
u2 = await User.create(username='u2')
b1 = await Blog.create(user=u1, title='b1')
b2 = await Blog.create(user=u2, title='b2')
c11 = await Comment.create(blog=b1, comment='c11')
c12 = await Comment.create(blog=b1, comment='c12')
c21 = await Comment.create(blog=b2, comment='c21')
c22 = await Comment.create(blog=b2, comment='c22')
# missing comment.blog_id
comments = (Comment
.select(Comment.id, Comment.comment, Blog.pk, Blog.title)
.join(Blog)
.where(Blog.title == 'b1')
.order_by(Comment.id))
with assert_query_count(1):
assert [c.blog.title async for c in comments] == ['b1', 'b1']
# missing blog.pk
comments = (Comment
.select(Comment.id, Comment.comment, Comment.blog, Blog.title)
.join(Blog)
.where(Blog.title == 'b2')
.order_by(Comment.id))
with assert_query_count(1):
assert [c.blog.title async for c in comments] == ['b2', 'b2']
# both but going up 2 levels
comments = (Comment
.select(Comment, Blog, User)
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with assert_query_count(1):
assert [c.comment async for c in comments] == ['c11', 'c12']
assert [c.blog.title async for c in comments] == ['b1', 'b1']
assert [c.blog.user.username async for c in comments] == ['u1', 'u1']
assert isinstance(comments._qr, AioModelQueryResultWrapper)
comments = (Comment
.select()
.join(Blog)
.join(User)
.where(User.username == 'u1')
.order_by(Comment.id))
with assert_query_count(5):
assert [(await (await c.blog).user).username
async for c in comments] == ['u1', 'u1']
assert isinstance(comments._qr, AioNaiveQueryResultWrapper)
# Go up two levels and use aliases for the joined instances.
comments = (Comment
.select(Comment, Blog, User)
.join(Blog, on=(Comment.blog == Blog.pk).alias('bx'))
.join(User, on=(Blog.user == User.id).alias('ux'))
.where(User.username == 'u1')
.order_by(Comment.id))
with assert_query_count(1):
assert [c.comment async for c in comments] == ['c11', 'c12']
assert [c.bx.title async for c in comments] == ['b1', 'b1']
assert [c.bx.ux.username async for c in comments] == ['u1', 'u1']
async def test_naive(flushdb):
u1 = await User.create(username='u1')
u2 = await User.create(username='u2')
b1 = await Blog.create(user=u1, title='b1')
b2 = await Blog.create(user=u2, title='b2')
users = User.select().naive()
assert [u.username async for u in users] == ['u1', 'u2']
assert isinstance(users._qr, AioNaiveQueryResultWrapper)
users = User.select(User, Blog).join(Blog).naive()
assert [u.username async for u in users] == ['u1', 'u2']
assert [u.title async for u in users] == ['b1', 'b2']
query = Blog.select(Blog, User).join(User).order_by(Blog.title).naive()
record = await query.get()
assert await record.user == await User.get(User.username == 'u1')
async def test_tuples_dicts(flushdb):
u1 = await User.create(username='u1')
u2 = await User.create(username='u2')
b1 = await Blog.create(user=u1, title='b1')
b2 = await Blog.create(user=u2, title='b2')
users = User.select().tuples().order_by(User.id)
assert [r async for r in users], [(u1.id, 'u1'),
(u2.id, 'u2')]
users = User.select().dicts()
assert [r async for r in users], [
{'id': u1.id, 'username': 'u1'},
{'id': u2.id, 'username': 'u2'},
]
users = User.select(User, Blog).join(Blog).order_by(User.id).tuples()
assert [r async for r in users], [
(u1.id, 'u1', b1.pk, u1.id, 'b1', '', None),
(u2.id, 'u2', b2.pk, u2.id, 'b2', '', None),
]
users = User.select(User, Blog).join(Blog).order_by(User.id).dicts()
assert [r async for r in users], [
{'id': u1.id, 'username': 'u1', 'pk': b1.pk,
'user': u1.id, 'title': 'b1', 'content': '',
'pub_date': None},
{'id': u2.id, 'username': 'u2', 'pk': b2.pk,
'user': u2.id, 'title': 'b2', 'content': '',
'pub_date': None},
]
# requres recent peewees
# users = User.select().order_by(User.id).namedtuples()
# exp = [(u1.id, 'u1'), (u2.id, 'u2')]
# assert [(r.id, r.username) async for r in users] == exp
# users = (User
# .select(
# User.id,
# User.username,
# fn.UPPER(User.username).alias('USERNAME'),
# (User.id + 2).alias('xid'))
# .order_by(User.id)
# .namedtuples())
# exp = [(u1.id, 'u1', 'U1', u1.id + 2), (u2.id, 'u2', 'U2', u2.id + 2)]
# assert [(r.id, r.username, r.USERNAME, r.xid) for r in users] == exp
# def test_slicing_dicing(self):
# def assertUsernames(users, nums):
# assert [u.username for u in users], ['u%d' % i for i in nums])
# User.create_users(10)
# with assert_query_count(1):
# uq = User.select().order_by(User.id)
# for i in range(2):
# res = uq[0]
# assert res.username, 'u1')
# with assert_query_count(0):
# for i in range(2):
# res = uq[1]
# assert res.username, 'u2')
# with assert_query_count(0):
# for i in range(2):
# res = uq[-1]
# assert res.username, 'u10')
# with assert_query_count(0):
# for i in range(2):
# res = uq[:3]
# assertUsernames(res, [1, 2, 3])
# with assert_query_count(0):
# for i in range(2):
# res = uq[2:5]
# assertUsernames(res, [3, 4, 5])
# with assert_query_count(0):
# for i in range(2):
# res = uq[5:]
# assertUsernames(res, [6, 7, 8, 9, 10])
# with assert_query_count(0):
# for i in range(2):
# res = uq[-3:]
# assertUsernames(res, [8, 9, 10])
# with assert_query_count(0):
# for i in range(2):
# res = uq[-5:-3]
# assertUsernames(res, [6, 7])
# with assert_query_count(0):
# for i in range(2):
# res = uq[:-3]
# assertUsernames(res, list(range(1, 8)))
# with assert_query_count(0):
# for i in range(2):
# res = uq[4:-4]
# assertUsernames(res, [5, 6])
# with assert_query_count(0):
# for i in range(2):
# res = uq[-6:6]
# assertUsernames(res, [5, 6])
# self.assertRaises(IndexError, uq.__getitem__, 10)
# with assert_query_count(0):
# res = uq[10:]
# assert res, [])
# uq = uq.clone()
# with assert_query_count(1):
# for _ in range(2):
# res = uq[-1]
# assert res.username, 'u10')
# def test_indexing_fill_cache(self):
# def assertUser(query_or_qr, idx):
# assert query_or_qr[idx].username, 'u%d' % (idx + 1))
# User.create_users(10)
# uq = User.select().order_by(User.id)
# with assert_query_count(1):
# # Ensure we can grab the first 5 users in 1 query.
# for i in range(5):
# assertUser(uq, i)
# # Iterate in reverse and ensure only costs 1 query.
# uq = User.select().order_by(User.id)
# with assert_query_count(1):
# for i in reversed(range(10)):
# assertUser(uq, i)
# # Execute the query and get reference to result wrapper.
# query = User.select().order_by(User.id)
# query.execute()
# qr = query._qr
# # Getting the first user will populate the result cache with 1 obj.
# assertUser(query, 0)
# assert len(qr._result_cache), 1)
# # Getting the last user will fill the cache.
# assertUser(query, 9)
# assert len(qr._result_cache), 10)
async def test_prepared(flushdb):
for i in range(2):
u = await User.create(username='u%d' % i)
for j in range(2):
await Blog.create(title='b%d-%d' % (i, j), user=u, content='')
async for u in User.select():
# check prepared was called
assert u.foo == u.username
async for b in Blog.select(Blog, User).join(User):
# prepared is called for select-related instances
assert b.foo == b.title
assert b.user.foo == b.user.username
async def test_aliasing_values(flushdb):
await User.create_users(2)
q = User.select(User.username.alias('xx')).order_by(User.username)
results = [row async for row in q.dicts()]
assert results == [{'xx': 'u1'},
{'xx': 'u2'}]
results = [user.xx async for user in q]
assert results == ['u1', 'u2']
# Force ModelQueryResultWrapper.
q = (User
.select(User.username.alias('xx'), Blog.pk)
.join(Blog, JOIN.LEFT_OUTER)
.order_by(User.username))
results = [user.xx async for user in q]
assert results == ['u1', 'u2']
# Use Model and Field aliases.
UA = User.alias()
q = (User
.select(
User.username.alias('x'),
UA.username.alias('y'))
.join(UA, on=(User.id == UA.id).alias('z'))
.order_by(User.username))
results = [(user.x, user.z.y) async for user in q]
assert results == [('u1', 'u1'), ('u2', 'u2')]
q = q.naive()
results = [(user.x, user.y) async for user in q]
assert results == [('u1', 'u1'), ('u2', 'u2')]
uq = User.select(User.id, User.username).alias('u2')
q = (User
.select(
User.username.alias('x'),
uq.c.username.alias('y'))
.join(uq, on=(User.id == uq.c.id))
.order_by(User.username))
results = [(user.x, user.y) async for user in q]
assert results == [('u1', 'u1'), ('u2', 'u2')]
async def create_users_blogs():
u1 = await User.create(username='u1')
u2 = await User.create(username='u2')
await Blog.create(user=u1, title='b1')
await Blog.create(user=u2, title='b2')
async def test_fk_missing_pk(flushdb):
await create_users_blogs()
# Not enough information.
with assert_query_count(1):
q = (Blog
.select(Blog.title, User.username)
.join(User)
.order_by(Blog.title, User.username))
results = []
async for blog in q:
results.append((blog.title, blog.user.username))
assert blog.user.id is None
assert blog.user_id is None
assert results == [('b1', 'u1'), ('b2', 'u2')]
async def test_fk_with_pk(flushdb):
await create_users_blogs()
with assert_query_count(1):
q = (Blog
.select(Blog.title, User.username, User.id)
.join(User)
.order_by(Blog.title, User.username))
results = []
async for blog in q:
results.append((blog.title, blog.user.username))
assert blog.user.id is not None
assert blog.user_id is not None
assert results == [('b1', 'u1'), ('b2', 'u2')]
async def test_backref_missing_pk(flushdb):
await create_users_blogs()
with assert_query_count(1):
q = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username, Blog.title))
results = []
async for user in q:
results.append((user.username, user.blog.title))
assert user.id is None
assert user.blog.pk is None
assert user.blog.user_id is None
assert results == [('u1', 'b1'), ('u2', 'b2')]
async def test_fk_join_expr(flushdb):
await create_users_blogs()
with assert_query_count(1):
q = (User
.select(User.username, Blog.title)
.join(Blog, on=(User.id == Blog.user).alias('bx'))
.order_by(User.username))
results = []
async for user in q:
results.append((user.username, user.bx.title))
assert results == [('u1', 'b1'), ('u2', 'b2')]
with assert_query_count(1):
q = (Blog
.select(Blog.title, User.username)
.join(User, on=(Blog.user == User.id).alias('ux'))
.order_by(Blog.title))
results = []
async for blog in q:
results.append((blog.title, blog.ux.username))
assert results == [('b1', 'u1'), ('b2', 'u2')]
async def test_aliases(flushdb):
await create_users_blogs()
B = Blog.alias()
U = User.alias()
with assert_query_count(1):
q = (U.select(U.username, B.title)
.join(B, on=(U.id == B.user))
.order_by(U.username))
results = []
async for user in q:
results.append((user.username, user.blog.title))
assert results == [('u1', 'b1'), ('u2', 'b2')]
with assert_query_count(1):
q = (B.select(B.title, U.username)
.join(U, on=(B.user == U.id))
.order_by(B.title))
results = []
async for blog in q:
results.append((blog.title, blog.user.username))
assert results == [('b1', 'u1'), ('b2', 'u2')]
# No explicit join condition.
with assert_query_count(1):
q = (B.select(B.title, U.username)
.join(U, on=B.user)
.order_by(B.title))
results = [(blog.title, blog.user.username) async for blog in q]
assert results == [('b1', 'u1'), ('b2', 'u2')]
# No explicit condition, backref.
await Blog.create(user=await User.get(User.username == 'u2'), title='b2-2')
with assert_query_count(1):
q = (U.select(U.username, B.title)
.join(B, on=B.user)
.order_by(U.username, B.title))
results = [(user.username, user.blog.title) async for user in q]
assert results == [('u1', 'b1'), ('u2', 'b2'), ('u2', 'b2-2')]
async def test_subqueries(flushdb):
await create_users_blogs()
uq = User.select()
bq = Blog.select(Blog.title, Blog.user).alias('bq')
with assert_query_count(1):
q = (User
.select(User, bq.c.title.bind_to(Blog))
.join(bq, on=(User.id == bq.c.user_id).alias('blog'))
.order_by(User.username))
results = []
async for user in q:
results.append((user.username, user.blog.title))
assert results == [('u1', 'b1'), ('u2', 'b2')]
async def test_multiple_joins(flushdb):
users = [await User.create(username='u%s' % i) for i in range(4)]
for from_user, to_user in itertools.combinations(users, 2):
await Relationship.create(from_user=from_user, to_user=to_user)
with assert_query_count(1):
ToUser = User.alias()
q = (Relationship
.select(Relationship, User, ToUser)
.join(User, on=Relationship.from_user)
.switch(Relationship)
.join(ToUser, on=Relationship.to_user)
.order_by(User.username, ToUser.username))
results = [(r.from_user.username, r.to_user.username) async for r in q]
assert results == [
('u0', 'u1'),
('u0', 'u2'),
('u0', 'u3'),
('u1', 'u2'),
('u1', 'u3'),
('u2', 'u3'),
]
with assert_query_count(1):
ToUser = User.alias()
q = (Relationship
.select(Relationship, User, ToUser)
.join(User,
on=(Relationship.from_user == User.id))
.switch(Relationship)
.join(ToUser,
on=(Relationship.to_user == ToUser.id).alias('to_user'))
.order_by(User.username, ToUser.username))
results = [(r.from_user.username, r.to_user.username) async for r in q]
assert results == [
('u0', 'u1'),
('u0', 'u2'),
('u0', 'u3'),
('u1', 'u2'),
('u1', 'u3'),
('u2', 'u3'),
]
async def create_users():
for i in range(3):
await User.create(username='u%d' % i)
async def assert_names(query, expected, attr='username'):
id_field = query.model_class.id
result = [getattr(item, attr) async for item in query.order_by(id_field)]
assert result == expected
async def test_simple_select(flushdb):
await create_users()
query = UpperUser.select()
await assert_names(query, ['U0', 'U1', 'U2'])
query = User.select()
await assert_names(query, ['u0', 'u1', 'u2'])
async def test_with_alias(flushdb):
await create_users()
# Even when aliased to a different attr, the column is coerced.
query = UpperUser.select(UpperUser.username.alias('foo'))
await assert_names(query, ['U0', 'U1', 'U2'], 'foo')
async def test_scalar(flushdb):
await create_users()
max_username = await (UpperUser.select(fn.Max(UpperUser.username))
.scalar(convert=True))
assert max_username == 'U2'
max_username = await (UpperUser.select(fn.Max(UpperUser.username))
.scalar())
assert max_username == 'u2'
async def test_function(flushdb):
await create_users()
substr = fn.SubStr(UpperUser.username, 1, 3)
# Being the first parameter of the function, it meets the special-case
# criteria.
query = UpperUser.select(substr.alias('foo'))
await assert_names(query, ['U0', 'U1', 'U2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('foo'))
await assert_names(query, ['u0', 'u1', 'u2'], 'foo')
query = UpperUser.select(substr.coerce(False).alias('username'))
await assert_names(query, ['u0', 'u1', 'u2'])
query = UpperUser.select(fn.Lower(UpperUser.username).alias('username'))
await assert_names(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.Lower(UpperUser.username).alias('username').coerce(False))
await assert_names(query, ['u0', 'u1', 'u2'])
# Since it is aliased to an existing column, we will use that column's
# coerce.
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('username'))
await assert_names(query, ['U0', 'U1', 'U2'])
query = UpperUser.select(
fn.SubStr(fn.Lower(UpperUser.username), 1, 3).alias('foo'))
await assert_names(query, ['u0', 'u1', 'u2'], 'foo')
async def create_test_models():
data = (
(TestModelA, (
('pk1', 'a1'),
('pk2', 'a2'),
('pk3', 'a3'))),
(TestModelB, (
('pk1', 'b1'),
('pk2', 'b2'),
('pk3', 'b3'))),
(TestModelC, (
('pk1', 'c1'),
('pk2', 'c2'))),
)
for model_class, model_data in data:
for pk, data in model_data:
await model_class.create(field=pk, data=data)
async def test_join_expr(flushdb):
def get_query(join_type=JOIN.INNER):
sq = (TestModelA
.select(TestModelA, TestModelB, TestModelC)
.join(
TestModelB,
on=(TestModelA.field == TestModelB.field).alias('rel_b'))
.join(
TestModelC,
join_type=join_type,
on=(TestModelB.field == TestModelC.field))
.order_by(TestModelA.field))
return sq
await create_test_models()
sq = get_query()
assert await sq.count() == 2
with assert_query_count(1):
results = await alist(sq)
expected = (('b1', 'c1'), ('b2', 'c2'))
for i, (b_data, c_data) in enumerate(expected):
assert results[i].rel_b.data == b_data
assert results[i].rel_b.field.data == c_data
sq = get_query(JOIN.LEFT_OUTER)
assert await sq.count() == 3
with assert_query_count(1):
results = await alist(sq)
expected = (('b1', 'c1'), ('b2', 'c2'), ('b3', None))
for i, (b_data, c_data) in enumerate(expected):
assert results[i].rel_b.data == b_data
assert results[i].rel_b.field.data == c_data
async def test_backward_join(flushdb):
u1 = await User.create(username='u1')
u2 = await User.create(username='u2')
for user in (u1, u2):
await Blog.create(title='b-%s' % user.username, user=user)
# Create an additional blog for user 2.
await Blog.create(title='b-u2-2', user=u2)
res = (User
.select(User.username, Blog.title)
.join(Blog)
.order_by(User.username.asc(), Blog.title.asc()))
expected = [('u1', 'b-u1'),
('u2', 'b-u2'),
('u2', 'b-u2-2')]
assert [(u.username, u.blog.title) async for u in res] == expected
async def test_joins_with_aliases(flushdb):
u1 = await User.create(username='u1')
u2 = await User.create(username='u2')
b1_1 = await Blog.create(user=u1, title='b1-1')
b1_2 = await Blog.create(user=u1, title='b1-2')
b2_1 = await Blog.create(user=u2, title='b2-1')
UserAlias = User.alias()
BlogAlias = Blog.alias()
async def assert_expected_query(query, is_user_query):
accum = []
with assert_query_count(1):
if is_user_query:
async for user in query:
accum.append((user.username, user.blog.title))
else:
async for blog in query:
accum.append((blog.user.username, blog.title))
assert accum == [
('u1', 'b1-1'),
('u1', 'b1-2'),
('u2', 'b2-1'),
]
combinations = [
(User, BlogAlias, User.id == BlogAlias.user, True),
(User, BlogAlias, BlogAlias.user == User.id, True),
(User, Blog, User.id == Blog.user, True),
(User, Blog, Blog.user == User.id, True),
(User, Blog, None, True),
(Blog, UserAlias, UserAlias.id == Blog.user, False),
(Blog, UserAlias, Blog.user == UserAlias.id, False),
(Blog, User, User.id == Blog.user, False),
(Blog, User, Blog.user == User.id, False),
(Blog, User, None, False),
]
for Src, JoinModel, predicate, is_user_query in combinations:
query = (Src
.select(Src, JoinModel)
.join(JoinModel, on=predicate)
.order_by(SQL('1, 2')))
await assert_expected_query(query, is_user_query)
# requires asssertJoins from base
# async def test_foreign_key_assignment(flushdb):
# parent = await Parent.create(data='p1')
# child = await Child.create(parent=parent, data='c1')
# ParentAlias = Parent.alias()
# query = Child.select(Child, ParentAlias)
# ljoin = (ParentAlias.id == Child.parent)
# rjoin = (Child.parent == ParentAlias.id)
# lhs_alias = query.join(ParentAlias, on=ljoin)
# rhs_alias = query.join(ParentAlias, on=rjoin)
# self.assertJoins(lhs_alias, [
# 'INNER JOIN "parent" AS parent '
# 'ON ("parent"."id" = "child"."parent_id")'])
# self.assertJoins(rhs_alias, [
# 'INNER JOIN "parent" AS parent '
# 'ON ("child"."parent_id" = "parent"."id")'])
# with assert_query_count(1):
# lchild = lhs_alias.get()
# assert lchild.id, child.id)
# assert lchild.parent.id, parent.id)
# with assert_query_count(1):
# rchild = rhs_alias.get()
# assert rchild.id, child.id)
# assert rchild.parent.id, parent.id)
# class TestSelectRelatedForeignKeyToNonPrimaryKey(ModelTestCase):
# requires = [Package, PackageItem]
async def test_select_related(flushdb):
p1 = await Package.create(barcode='101')
p2 = await Package.create(barcode='102')
pi11 = await PackageItem.create(title='p11', package='101')
pi12 = await PackageItem.create(title='p12', package='101')
pi21 = await PackageItem.create(title='p21', package='102')
pi22 = await PackageItem.create(title='p22', package='102')
# missing PackageItem.package_id.
with assert_query_count(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, Package.barcode)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
assert [i.package.barcode async for i in items] == ['101', '101']
with assert_query_count(1):
items = (PackageItem
.select(
PackageItem.id, PackageItem.title, PackageItem.package, Package.id)
.join(Package)
.where(Package.barcode == '101')
.order_by(PackageItem.id))
assert [i.package.id async for i in items] == [p1.id, p1.id]
# class BaseTestPrefetch(ModelTestCase):
# requires = [
# User,
# Blog,
# Comment,
# Parent,
# Child,
# Orphan,
# ChildPet,
# OrphanPet,
# Category,
# Post,
# Tag,
# TagPostThrough,
# TagPostThroughAlt,
# Category,
# UserCategory,
# Relationship,
# SpecialComment,
# ]
# user_data = [
# ('u1', (('b1', ('b1-c1', 'b1-c2')), ('b2', ('b2-c1',)))),
# ('u2', ()),
# ('u3', (('b3', ('b3-c1', 'b3-c2')), ('b4', ()))),
# ('u4', (('b5', ('b5-c1', 'b5-c2')), ('b6', ('b6-c1',)))),
# ]
# parent_data = [
# ('p1', (
# # children
# (
# ('c1', ('c1-p1', 'c1-p2')),
# ('c2', ('c2-p1',)),
# ('c3', ('c3-p1',)),
# ('c4', ()),
# ),
# # orphans
# (
# ('o1', ('o1-p1', 'o1-p2')),
# ('o2', ('o2-p1',)),
# ('o3', ('o3-p1',)),
# ('o4', ()),
# ),
# )),
# ('p2', ((), ())),
# ('p3', (
# # children
# (
# ('c6', ()),
# ('c7', ('c7-p1',)),
# ),
# # orphans
# (
# ('o6', ('o6-p1', 'o6-p2')),
# ('o7', ('o7-p1',)),
# ),
# )),
# ]
# category_tree = [
# ['root', ['p1', 'p2']],
# ['p1', ['p1-1', 'p1-2']],
# ['p2', ['p2-1', 'p2-2']],
# ['p1-1', []],
# ['p1-2', []],
# ['p2-1', []],
# ['p2-2', []],
# ]
# def setUp(self):
# super(BaseTestPrefetch, self).setUp()
# for parent, (children, orphans) in self.parent_data:
# p = Parent.create(data=parent)
# for child_pets in children:
# child, pets = child_pets
# c = Child.create(parent=p, data=child)
# for pet in pets:
# ChildPet.create(child=c, data=pet)
# for orphan_pets in orphans:
# orphan, pets = orphan_pets
# o = Orphan.create(parent=p, data=orphan)
# for pet in pets:
# OrphanPet.create(orphan=o, data=pet)
# for user, blog_comments in self.user_data:
# u = User.create(username=user)
# for blog, comments in blog_comments:
# b = Blog.create(user=u, title=blog, content='')
# for c in comments:
# Comment.create(blog=b, comment=c)
# def _build_category_tree(self):
# def cc(name, parent=None):
# return Category.create(name=name, parent=parent)
# root = cc('root')
# p1 = cc('p1', root)
# p2 = cc('p2', root)
# for p in (p1, p2):
# for i in range(2):
# cc('%s-%s' % (p.name, i + 1), p)
# class TestPrefetch(BaseTestPrefetch):
# def test_prefetch_simple(self):
# sq = User.select().where(User.username != 'u3')
# sq2 = Blog.select().where(Blog.title != 'b2')
# sq3 = Comment.select()
# with assert_query_count(3):
# prefetch_sq = prefetch(sq, sq2, sq3)
# results = []
# for user in prefetch_sq:
# results.append(user.username)
# for blog in user.blog_set_prefetch:
# results.append(blog.title)
# for comment in blog.comments_prefetch:
# results.append(comment.comment)
# assert results, [
# 'u1', 'b1', 'b1-c1', 'b1-c2',
# 'u2',
# 'u4', 'b5', 'b5-c1', 'b5-c2', 'b6', 'b6-c1',
# ])
# with assert_query_count(0):
# results = []
# for user in prefetch_sq:
# for blog in user.blog_set_prefetch:
# results.append(blog.user.username)
# for comment in blog.comments_prefetch:
# results.append(comment.blog.title)
# assert results, [
# 'u1', 'b1', 'b1', 'u4', 'b5', 'b5', 'u4', 'b6',
# ])
# def test_prefetch_reverse(self):
# sq = User.select()
# sq2 = Blog.select().where(Blog.title != 'b2').order_by(Blog.pk)
# with assert_query_count(2):
# prefetch_sq = prefetch(sq2, sq)
# results = []
# for blog in prefetch_sq:
# results.append(blog.title)
# results.append(blog.user.username)
# assert results, [
# 'b1', 'u1',
# 'b3', 'u3',
# 'b4', 'u3',
# 'b5', 'u4',
# 'b6', 'u4'])
# def test_prefetch_up_and_down(self):
# blogs = Blog.select(Blog, User).join(User).order_by(Blog.title)
# comments = Comment.select().order_by(Comment.comment.desc())
# with assert_query_count(2):
# query = prefetch(blogs, comments)
# results = []
# for blog in query:
# results.append((
# blog.user.username,
# blog.title,
# [comment.comment for comment in blog.comments_prefetch]))
# assert results, [
# ('u1', 'b1', ['b1-c2', 'b1-c1']),
# ('u1', 'b2', ['b2-c1']),
# ('u3', 'b3', ['b3-c2', 'b3-c1']),
# ('u3', 'b4', []),
# ('u4', 'b5', ['b5-c2', 'b5-c1']),
# ('u4', 'b6', ['b6-c1']),
# ])
# def test_prefetch_multi_depth(self):
# sq = Parent.select()
# sq2 = Child.select()
# sq3 = Orphan.select()
# sq4 = ChildPet.select()
# sq5 = OrphanPet.select()
# with assert_query_count(5):
# prefetch_sq = prefetch(sq, sq2, sq3, sq4, sq5)
# results = []
# for parent in prefetch_sq:
# results.append(parent.data)
# for child in parent.child_set_prefetch:
# results.append(child.data)
# for pet in child.childpet_set_prefetch:
# results.append(pet.data)
# for orphan in parent.orphan_set_prefetch:
# results.append(orphan.data)
# for pet in orphan.orphanpet_set_prefetch:
# results.append(pet.data)
# assert results, [
# 'p1', 'c1', 'c1-p1', 'c1-p2', 'c2', 'c2-p1', 'c3', 'c3-p1', 'c4',
# 'o1', 'o1-p1', 'o1-p2', 'o2', 'o2-p1', 'o3', 'o3-p1', 'o4',
# 'p2',
# 'p3', 'c6', 'c7', 'c7-p1', 'o6', 'o6-p1', 'o6-p2', 'o7', 'o7-p1',
# ])
# def test_prefetch_no_aggregate(self):
# with assert_query_count(1):
# query = (User
# .select(User, Blog)
# .join(Blog, JOIN.LEFT_OUTER)
# .order_by(User.username, Blog.title))
# results = []
# for user in query:
# results.append((
# user.username,
# user.blog.title))
# assert results, [
# ('u1', 'b1'),
# ('u1', 'b2'),
# ('u2', None),
# ('u3', 'b3'),
# ('u3', 'b4'),
# ('u4', 'b5'),
# ('u4', 'b6'),
# ])
# def test_prefetch_group_by(self):
# users = (User
# .select(User, fn.Max(fn.Length(Blog.content)).alias('max_content_len'))
# .join(Blog, JOIN_LEFT_OUTER)
# .group_by(User)
# .order_by(User.id))
# blogs = Blog.select()
# comments = Comment.select()
# with assert_query_count(3):
# result = prefetch(users, blogs, comments)
# assert len(result), 4)
# def test_prefetch_self_join(self):
# self._build_category_tree()
# Child = Category.alias()
# with assert_query_count(2):
# query = prefetch(Category.select().order_by(Category.id), Child)
# names_and_children = [
# [parent.name, [child.name for child in parent.children_prefetch]]
# for parent in query]
# assert names_and_children, self.category_tree)
# def test_prefetch_specific_model(self):
# # User -> Blog
# # -> SpecialComment (fk to user and blog)
# Comment.delete().execute()
# Blog.delete().execute()
# User.delete().execute()
# u1 = User.create(username='u1')
# u2 = User.create(username='u2')
# for i in range(1, 3):
# for user in (u1, u2):
# b = Blog.create(user=user, title='%s-b%s' % (user.username, i))
# SpecialComment.create(
# user=user,
# blog=b,
# name='%s-c%s' % (user.username, i))
# u3 = User.create(username='u3')
# SpecialComment.create(user=u3, name='u3-c1')
# u4 = User.create(username='u4')
# Blog.create(user=u4, title='u4-b1')
# u5 = User.create(username='u5')
# with assert_query_count(3):
# user_pf = prefetch(
# User.select(),
# Blog,
# (SpecialComment, User))
# results = []
# for user in user_pf:
# results.append((
# user.username,
# [b.title for b in user.blog_set_prefetch],
# [c.name for c in user.special_comments_prefetch]))
# assert results, [
# ('u1', ['u1-b1', 'u1-b2'], ['u1-c1', 'u1-c2']),
# ('u2', ['u2-b1', 'u2-b2'], ['u2-c1', 'u2-c2']),
# ('u3', [], ['u3-c1']),
# ('u4', ['u4-b1'], []),
# ('u5', [], []),
# ])
# class TestPrefetchMultipleFKs(ModelTestCase):
# requires = [
# User,
# Blog,
# Relationship,
# ]
# def create_users(self):
# names = ['charlie', 'huey', 'zaizee']
# return [User.create(username=username) for username in names]
# def create_relationships(self, charlie, huey, zaizee):
# r1 = Relationship.create(from_user=charlie, to_user=huey)
# r2 = Relationship.create(from_user=charlie, to_user=zaizee)
# r3 = Relationship.create(from_user=huey, to_user=charlie)
# r4 = Relationship.create(from_user=zaizee, to_user=charlie)
# return r1, r2, r3, r4
# def test_multiple_fks(self):
# charlie, huey, zaizee = self.create_users()
# r1, r2, r3, r4 = self.create_relationships(charlie, huey, zaizee)
# def assertRelationships(attr, values):
# for relationship, value in zip(attr, values):
# assert relationship._data, value)
# with assert_query_count(2):
# users = User.select().order_by(User.id)
# relationships = Relationship.select()
# query = prefetch(users, relationships)
# results = [row for row in query]
# assert len(results), 3)
# cp, hp, zp = results
# assertRelationships(cp.relationships_prefetch, [
# {'id': r1.id, 'from_user': charlie.id, 'to_user': huey.id},
# {'id': r2.id, 'from_user': charlie.id, 'to_user': zaizee.id}])
# assertRelationships(cp.related_to_prefetch, [
# {'id': r3.id, 'from_user': huey.id, 'to_user': charlie.id},
# {'id': r4.id, 'from_user': zaizee.id, 'to_user': charlie.id}])
# assertRelationships(hp.relationships_prefetch, [
# {'id': r3.id, 'from_user': huey.id, 'to_user': charlie.id}])
# assertRelationships(hp.related_to_prefetch, [
# {'id': r1.id, 'from_user': charlie.id, 'to_user': huey.id}])
# assertRelationships(zp.relationships_prefetch, [
# {'id': r4.id, 'from_user': zaizee.id, 'to_user': charlie.id}])
# assertRelationships(zp.related_to_prefetch, [
# {'id': r2.id, 'from_user': charlie.id, 'to_user': zaizee.id}])
# def test_prefetch_multiple_fk_reverse(self):
# charlie, huey, zaizee = self.create_users()
# r1, r2, r3, r4 = self.create_relationships(charlie, huey, zaizee)
# with assert_query_count(2):
# relationships = Relationship.select().order_by(Relationship.id)
# users = User.select()
# query = prefetch(relationships, users)
# results = [row for row in query]
# assert len(results), 4)
# expected = (
# ('charlie', 'huey'),
# ('charlie', 'zaizee'),
# ('huey', 'charlie'),
# ('zaizee', 'charlie'))
# for (from_user, to_user), relationship in zip(expected, results):
# assert relationship.from_user.username, from_user)
# assert relationship.to_user.username, to_user)
# class TestPrefetchThroughM2M(ModelTestCase):
# requires = [User, Note, Flag, NoteFlag]
# test_data = [
# ('charlie', [
# ('rewrite peewee', ['todo']),
# ('rice desktop', ['done']),
# ('test peewee', ['todo', 'urgent']),
# ('write window-manager', [])]),
# ('huey', [
# ('bite mickey', []),
# ('scratch furniture', ['todo', 'urgent']),
# ('vomit on carpet', ['done'])]),
# ('zaizee', []),
# ]
# def setUp(self):
# super(TestPrefetchThroughM2M, self).setUp()
# with test_db.atomic():
# for username, note_data in self.test_data:
# user = User.create(username=username)
# for note, flags in note_data:
# self.create_note(user, note, *flags)
# def create_note(self, user, text, *flags):
# note = Note.create(user=user, text=text)
# for flag in flags:
# try:
# flag = Flag.get(Flag.label == flag)
# except Flag.DoesNotExist:
# flag = Flag.create(label=flag)
# NoteFlag.create(note=note, flag=flag)
# return note
# def test_prefetch_through_m2m(self):
# # One query for each table being prefetched.
# with assert_query_count(4):
# users = User.select()
# notes = Note.select().order_by(Note.text)
# flags = Flag.select().order_by(Flag.label)
# query = prefetch(users, notes, NoteFlag, flags)
# accum = []
# for user in query:
# notes = []
# for note in user.notes_prefetch:
# flags = []
# for nf in note.flags_prefetch:
# assert nf.note_id, note.id)
# assert nf.note.id, note.id)
# flags.append(nf.flag.label)
# notes.append((note.text, flags))
# accum.append((user.username, notes))
# assert self.test_data, accum)
# def test_aggregate_through_m2m(self):
# with assert_query_count(1):
# query = (User
# .select(User, Note, NoteFlag, Flag)
# .join(Note, JOIN.LEFT_OUTER)
# .join(NoteFlag, JOIN.LEFT_OUTER)
# .join(Flag, JOIN.LEFT_OUTER)
# .order_by(User.id, Note.text, Flag.label)
# .aggregate_rows())
# accum = []
# for user in query:
# notes = []
# for note in user.notes:
# flags = []
# for nf in note.flags:
# assert nf.note_id, note.id)
# flags.append(nf.flag.label)
# notes.append((note.text, flags))
# accum.append((user.username, notes))
# assert self.test_data, accum)
# class TestAggregateRows(BaseTestPrefetch):
# def test_aggregate_users(self):
# with assert_query_count(1):
# query = (User
# .select(User, Blog, Comment)
# .join(Blog, JOIN.LEFT_OUTER)
# .join(Comment, JOIN.LEFT_OUTER)
# .order_by(User.username, Blog.title, Comment.id)
# .aggregate_rows())
# results = []
# for user in query:
# results.append((
# user.username,
# [(blog.title,
# [comment.comment for comment in blog.comments])
# for blog in user.blog_set]))
# assert results, [
# ('u1', [
# ('b1', ['b1-c1', 'b1-c2']),
# ('b2', ['b2-c1'])]),
# ('u2', []),
# ('u3', [
# ('b3', ['b3-c1', 'b3-c2']),
# ('b4', [])]),
# ('u4', [
# ('b5', ['b5-c1', 'b5-c2']),
# ('b6', ['b6-c1'])]),
# ])
# def test_aggregate_blogs(self):
# with assert_query_count(1):
# query = (Blog
# .select(Blog, User, Comment)
# .join(User)
# .switch(Blog)
# .join(Comment, JOIN.LEFT_OUTER)
# .order_by(Blog.title, User.username, Comment.id)
# .aggregate_rows())
# results = []
# for blog in query:
# results.append((
# blog.user.username,
# blog.title,
# [comment.comment for comment in blog.comments]))
# assert results, [
# ('u1', 'b1', ['b1-c1', 'b1-c2']),
# ('u1', 'b2', ['b2-c1']),
# ('u3', 'b3', ['b3-c1', 'b3-c2']),
# ('u3', 'b4', []),
# ('u4', 'b5', ['b5-c1', 'b5-c2']),
# ('u4', 'b6', ['b6-c1']),
# ])
# def test_aggregate_on_expression_join(self):
# with assert_query_count(1):
# join_expr = (User.id == Blog.user)
# query = (User
# .select(User, Blog)
# .join(Blog, JOIN.LEFT_OUTER, on=join_expr)
# .order_by(User.username, Blog.title)
# .aggregate_rows())
# results = []
# for user in query:
# results.append((
# user.username,
# [blog.title for blog in user.blog_set]))
# assert results, [
# ('u1', ['b1', 'b2']),
# ('u2', []),
# ('u3', ['b3', 'b4']),
# ('u4', ['b5', 'b6']),
# ])
# def test_aggregate_with_join_model_aliases(self):
# expected = [
# ('u1', ['b1', 'b2']),
# ('u2', []),
# ('u3', ['b3', 'b4']),
# ('u4', ['b5', 'b6']),
# ]
# with assert_query_count(1):
# query = (User
# .select(User, Blog)
# .join(
# Blog,
# JOIN.LEFT_OUTER,
# on=(User.id == Blog.user).alias('blogz'))
# .order_by(User.id, Blog.title)
# .aggregate_rows())
# results = [
# (user.username, [blog.title for blog in user.blogz])
# for user in query]
# assert results, expected)
# BlogAlias = Blog.alias()
# with assert_query_count(1):
# query = (User
# .select(User, BlogAlias)
# .join(
# BlogAlias,
# JOIN.LEFT_OUTER,
# on=(User.id == BlogAlias.user).alias('blogz'))
# .order_by(User.id, BlogAlias.title)
# .aggregate_rows())
# results = [
# (user.username, [blog.title for blog in user.blogz])
# for user in query]
# assert results, expected)
# def test_aggregate_unselected_join_backref(self):
# cat_1 = Category.create(name='category 1')
# cat_2 = Category.create(name='category 2')
# with test_db.transaction():
# for i, user in enumerate(User.select().order_by(User.username)):
# if i % 2 == 0:
# category = cat_2
# else:
# category = cat_1
# UserCategory.create(user=user, category=category)
# with assert_query_count(1):
# # The join on UserCategory is a backref join (since the FK is on
# # UserCategory). Additionally, UserCategory/Category are not
# # selected and are only used for filtering the result set.
# query = (User
# .select(User, Blog)
# .join(Blog, JOIN.LEFT_OUTER)
# .switch(User)
# .join(UserCategory)
# .join(Category)
# .where(Category.name == cat_1.name)
# .order_by(User.username, Blog.title)
# .aggregate_rows())
# results = []
# for user in query:
# results.append((
# user.username,
# [blog.title for blog in user.blog_set]))
# assert results, [
# ('u2', []),
# ('u4', ['b5', 'b6']),
# ])
# def test_aggregate_manytomany(self):
# p1 = Post.create(title='p1')
# p2 = Post.create(title='p2')
# Post.create(title='p3')
# p4 = Post.create(title='p4')
# t1 = Tag.create(tag='t1')
# t2 = Tag.create(tag='t2')
# t3 = Tag.create(tag='t3')
# TagPostThroughAlt.create(tag=t1, post=p1)
# TagPostThroughAlt.create(tag=t2, post=p1)
# TagPostThroughAlt.create(tag=t2, post=p2)
# TagPostThroughAlt.create(tag=t3, post=p2)
# TagPostThroughAlt.create(tag=t1, post=p4)
# TagPostThroughAlt.create(tag=t2, post=p4)
# TagPostThroughAlt.create(tag=t3, post=p4)
# with assert_query_count(1):
# query = (Post
# .select(Post, TagPostThroughAlt, Tag)
# .join(TagPostThroughAlt, JOIN.LEFT_OUTER)
# .join(Tag, JOIN.LEFT_OUTER)
# .order_by(Post.id, TagPostThroughAlt.post, Tag.id)
# .aggregate_rows())
# results = []
# for post in query:
# post_data = [post.title]
# for tpt in post.tags_alt:
# post_data.append(tpt.tag.tag)
# results.append(post_data)
# assert results, [
# ['p1', 't1', 't2'],
# ['p2', 't2', 't3'],
# ['p3'],
# ['p4', 't1', 't2', 't3'],
# ])
# def test_aggregate_parent_child(self):
# with assert_query_count(1):
# query = (Parent
# .select(Parent, Child, Orphan, ChildPet, OrphanPet)
# .join(Child, JOIN.LEFT_OUTER)
# .join(ChildPet, JOIN.LEFT_OUTER)
# .switch(Parent)
# .join(Orphan, JOIN.LEFT_OUTER)
# .join(OrphanPet, JOIN.LEFT_OUTER)
# .order_by(
# Parent.data,
# Child.data,
# ChildPet.id,
# Orphan.data,
# OrphanPet.id)
# .aggregate_rows())
# results = []
# for parent in query:
# results.append((
# parent.data,
# [(child.data, [pet.data for pet in child.childpet_set])
# for child in parent.child_set],
# [(orphan.data, [pet.data for pet in orphan.orphanpet_set])
# for orphan in parent.orphan_set]
# ))
# # Without the `.aggregate_rows()` call, this would be 289!!
# assert results, [
# ('p1',
# [('c1', ['c1-p1', 'c1-p2']),
# ('c2', ['c2-p1']),
# ('c3', ['c3-p1']),
# ('c4', [])],
# [('o1', ['o1-p1', 'o1-p2']),
# ('o2', ['o2-p1']),
# ('o3', ['o3-p1']),
# ('o4', [])],
# ),
# ('p2', [], []),
# ('p3',
# [('c6', []),
# ('c7', ['c7-p1'])],
# [('o6', ['o6-p1', 'o6-p2']),
# ('o7', ['o7-p1'])],)
# ])
# def test_aggregate_with_unselected_joins(self):
# with assert_query_count(1):
# query = (Child
# .select(Child, ChildPet, Parent)
# .join(ChildPet, JOIN.LEFT_OUTER)
# .switch(Child)
# .join(Parent)
# .join(Orphan)
# .join(OrphanPet)
# .where(OrphanPet.data == 'o6-p2')
# .order_by(Child.data, ChildPet.data)
# .aggregate_rows())
# results = []
# for child in query:
# results.append((
# child.data,
# child.parent.data,
# [child_pet.data for child_pet in child.childpet_set]))
# assert results, [
# ('c6', 'p3', []),
# ('c7', 'p3', ['c7-p1']),
# ])
# with assert_query_count(1):
# query = (Parent
# .select(Parent, Child, ChildPet)
# .join(Child, JOIN.LEFT_OUTER)
# .join(ChildPet, JOIN.LEFT_OUTER)
# .switch(Parent)
# .join(Orphan)
# .join(OrphanPet)
# .where(OrphanPet.data == 'o6-p2')
# .order_by(Parent.data, Child.data, ChildPet.data)
# .aggregate_rows())
# results = []
# for parent in query:
# results.append((
# parent.data,
# [(child.data, [pet.data for pet in child.childpet_set])
# for child in parent.child_set]))
# assert results, [('p3', [
# ('c6', []),
# ('c7', ['c7-p1']),
# ])])
# def test_aggregate_rows_ordering(self):
# # Refs github #519.
# with assert_query_count(1):
# query = (User
# .select(User, Blog)
# .join(Blog, JOIN.LEFT_OUTER)
# .order_by(User.username.desc(), Blog.title.desc())
# .aggregate_rows())
# accum = []
# for user in query:
# accum.append((
# user.username,
# [blog.title for blog in user.blog_set]))
# if sys.version_info[:2] > (2, 6):
# assert accum, [
# ('u4', ['b6', 'b5']),
# ('u3', ['b4', 'b3']),
# ('u2', []),
# ('u1', ['b2', 'b1']),
# ])
# def test_aggregate_rows_self_join(self):
# self._build_category_tree()
# Child = Category.alias()
# # Same query, but this time use an `alias` on the join expr.
# with assert_query_count(1):
# query = (Category
# .select(Category, Child)
# .join(
# Child,
# JOIN.LEFT_OUTER,
# on=(Category.id == Child.parent).alias('childrenx'))
# .order_by(Category.id, Child.id)
# .aggregate_rows())
# names_and_children = [
# [parent.name, [child.name for child in parent.childrenx]]
# for parent in query]
# assert names_and_children, self.category_tree)
# def test_multiple_fks(self):
# names = ['charlie', 'huey', 'zaizee']
# charlie, huey, zaizee = [
# User.create(username=username) for username in names]
# Relationship.create(from_user=charlie, to_user=huey)
# Relationship.create(from_user=charlie, to_user=zaizee)
# Relationship.create(from_user=huey, to_user=charlie)
# Relationship.create(from_user=zaizee, to_user=charlie)
# UserAlias = User.alias()
# with assert_query_count(1):
# query = (User
# .select(User, Relationship, UserAlias)
# .join(
# Relationship,
# JOIN.LEFT_OUTER,
# on=Relationship.from_user)
# .join(
# UserAlias,
# on=(
# Relationship.to_user == UserAlias.id
# ).alias('to_user'))
# .order_by(User.username, Relationship.id)
# .where(User.username == 'charlie')
# .aggregate_rows())
# results = [row for row in query]
# assert len(results), 1)
# user = results[0]
# assert user.username, 'charlie')
# assert len(user.relationships), 2)
# rh, rz = user.relationships
# assert rh.to_user.username, 'huey')
# assert rz.to_user.username, 'zaizee')
# FromUser = User.alias()
# ToUser = User.alias()
# from_join = (Relationship.from_user == FromUser.id)
# to_join = (Relationship.to_user == ToUser.id)
# with assert_query_count(1):
# query = (Relationship
# .select(Relationship, FromUser, ToUser)
# .join(FromUser, on=from_join.alias('from_user'))
# .switch(Relationship)
# .join(ToUser, on=to_join.alias('to_user'))
# .order_by(Relationship.id)
# .aggregate_rows())
# results = [
# (relationship.from_user.username,
# relationship.to_user.username)
# for relationship in query]
# assert results, [
# ('charlie', 'huey'),
# ('charlie', 'zaizee'),
# ('huey', 'charlie'),
# ('zaizee', 'charlie'),
# ])
# def test_multiple_fks_multi_depth(self):
# names = ['charlie', 'huey', 'zaizee']
# charlie, huey, zaizee = [
# User.create(username=username) for username in names]
# Relationship.create(from_user=charlie, to_user=huey)
# Relationship.create(from_user=charlie, to_user=zaizee)
# Relationship.create(from_user=huey, to_user=charlie)
# Relationship.create(from_user=zaizee, to_user=charlie)
# human = Category.create(name='human')
# kitty = Category.create(name='kitty')
# UserCategory.create(user=charlie, category=human)
# UserCategory.create(user=huey, category=kitty)
# UserCategory.create(user=zaizee, category=kitty)
# FromUser = User.alias()
# ToUser = User.alias()
# from_join = (Relationship.from_user == FromUser.id)
# to_join = (Relationship.to_user == ToUser.id)
# FromUserCategory = UserCategory.alias()
# ToUserCategory = UserCategory.alias()
# from_uc_join = (FromUser.id == FromUserCategory.user)
# to_uc_join = (ToUser.id == ToUserCategory.user)
# FromCategory = Category.alias()
# ToCategory = Category.alias()
# from_c_join = (FromUserCategory.category == FromCategory.id)
# to_c_join = (ToUserCategory.category == ToCategory.id)
# with assert_query_count(1):
# query = (Relationship
# .select(
# Relationship,
# FromUser,
# ToUser,
# FromUserCategory,
# ToUserCategory,
# FromCategory,
# ToCategory)
# .join(FromUser, on=from_join.alias('from_user'))
# .join(FromUserCategory, on=from_uc_join.alias('fuc'))
# .join(FromCategory, on=from_c_join.alias('category'))
# .switch(Relationship)
# .join(ToUser, on=to_join.alias('to_user'))
# .join(ToUserCategory, on=to_uc_join.alias('tuc'))
# .join(ToCategory, on=to_c_join.alias('category'))
# .order_by(Relationship.id)
# .aggregate_rows())
# results = []
# for obj in query:
# from_user = obj.from_user
# to_user = obj.to_user
# results.append((
# from_user.username,
# from_user.fuc[0].category.name,
# to_user.username,
# to_user.tuc[0].category.name))
# assert results, [
# ('charlie', 'human', 'huey', 'kitty'),
# ('charlie', 'human', 'zaizee', 'kitty'),
# ('huey', 'kitty', 'charlie', 'human'),
# ('zaizee', 'kitty', 'charlie', 'human'),
# ])
# class TestAggregateRowsRegression(ModelTestCase):
# requires = [
# User,
# Blog,
# Comment,
# Category,
# CommentCategory,
# BlogData]
# def setUp(self):
# super(TestAggregateRowsRegression, self).setUp()
# u = User.create(username='u1')
# b = Blog.create(title='b1', user=u)
# BlogData.create(blog=b)
# c1 = Comment.create(blog=b, comment='c1')
# c2 = Comment.create(blog=b, comment='c2')
# cat1 = Category.create(name='cat1')
# cat2 = Category.create(name='cat2')
# CommentCategory.create(category=cat1, comment=c1, sort_order=1)
# CommentCategory.create(category=cat2, comment=c1, sort_order=1)
# CommentCategory.create(category=cat1, comment=c2, sort_order=2)
# CommentCategory.create(category=cat2, comment=c2, sort_order=2)
# def test_aggregate_rows_regression(self):
# comments = (Comment
# .select(
# Comment,
# CommentCategory,
# Category,
# Blog,
# BlogData)
# .join(CommentCategory, JOIN.LEFT_OUTER)
# .join(Category, JOIN.LEFT_OUTER)
# .switch(Comment)
# .join(Blog)
# .join(BlogData, JOIN.LEFT_OUTER)
# .where(Category.id == 1)
# .order_by(CommentCategory.sort_order))
# with assert_query_count(1):
# c_list = list(comments.aggregate_rows())
# def test_regression_506(self):
# user = User.create(username='u2')
# for i in range(2):
# Blog.create(title='u2-%s' % i, user=user)
# users = (User
# .select()
# .order_by(User.id.desc())
# .paginate(1, 5)
# .alias('users'))
# with assert_query_count(1):
# query = (User
# .select(User, Blog)
# .join(Blog)
# .join(users, on=(User.id == users.c.id))
# .order_by(User.username, Blog.title)
# .aggregate_rows())
# results = []
# for user in query:
# results.append((
# user.username,
# [blog.title for blog in user.blog_set]))
# assert results, [
# ('u1', ['b1']),
# ('u2', ['u2-0', 'u2-1']),
# ])
# class TestPrefetchNonPKFK(ModelTestCase):
# requires = [Package, PackageItem]
# data = {
# '101': ['a', 'b'],
# '102': ['c'],
# '103': [],
# '104': ['a', 'b', 'c', 'd', 'e'],
# }
# def setUp(self):
# super(TestPrefetchNonPKFK, self).setUp()
# for barcode, titles in self.data.items():
# Package.create(barcode=barcode)
# for title in titles:
# PackageItem.create(package=barcode, title=title)
# def test_prefetch(self):
# packages = Package.select().order_by(Package.barcode)
# items = PackageItem.select().order_by(PackageItem.id)
# query = prefetch(packages, items)
# for package, (barcode, titles) in zip(query, sorted(self.data.items())):
# assert package.barcode, barcode)
# assert
# [item.title for item in package.items_prefetch],
# titles)
# packages = (Package
# .select()
# .where(Package.barcode << ['101', '104'])
# .order_by(Package.id))
# items = items.where(PackageItem.title << ['a', 'c', 'e'])
# query = prefetch(packages, items)
# accum = {}
# for package in query:
# accum[package.barcode] = [
# item.title for item in package.items_prefetch]
# assert accum, {
# '101': ['a'],
# '104': ['a', 'c','e'],
# })
|
import json
import numpy as np
import plotly.graph_objects as go
from dash import callback_context, dcc, html
from dash.dependencies import ALL, Input, Output
from rubicon_ml.viz.base import VizBase
from rubicon_ml.viz.common import dropdown_header
from rubicon_ml.viz.common.colors import get_rubicon_colorscale, light_blue, transparent
class MetricCorrelationPlot(VizBase):
"""Visualize the correlation between the parameters and metrics logged
to the experiments `experiments` using a parallel coordinates plot.
More info on parallel coordinates plots can be found here:
https://plotly.com/python/parallel-coordinates-plot/
Parameters
----------
experiments : list of rubicon_ml.client.experiment.Experiment, optional
The experiments to visualize. Defaults to None. Can be set as
attribute after instantiation.
metric_names : list of str
The names of the metrics to load. Defaults to None, which loads all
metrics logged to the experiments `experiments`.
parameter_names : list of str
The names of the parameters to load. Defaults to None, which loads all
parameters logged to the experiments `experiments`.
selected_metric : str
The name of the metric to display at launch. Defaults to None, which
selects the metric loaded first.
"""
def __init__(
self,
experiments=None,
metric_names=None,
parameter_names=None,
selected_metric=None,
):
super().__init__(dash_title="plot metric correlation")
self.experiments = experiments
self.metric_names = metric_names
self.parameter_names = parameter_names
self.selected_metric = selected_metric
def _get_dimension(self, label, values):
"""Transforms the input data for use with Plotly's parallel
coordinates plot.
"""
if len(values) > 0 and any([isinstance(v, str) or isinstance(v, bool) for v in values]):
values = [str(v) for v in values]
unique_values, values = np.unique(values, return_inverse=True)
dimension = {
"label": label,
"ticktext": unique_values,
"tickvals": list(range(0, len(unique_values))),
"values": values,
}
else:
dimension = {
"label": label,
"values": values,
}
return dimension
@property
def layout(self):
"""Defines the layout for the metric correlation plot."""
return html.Div(
[
dropdown_header(
self.visible_metric_names,
self.selected_metric,
"comparing metric ",
f" over {len(self.experiments)} experiments",
"metric-correlation",
),
dcc.Loading(
html.Div(
dcc.Graph(
id="metric-correlation-plot",
),
id="metric-correlation-plot-container",
),
color=light_blue,
),
],
id="metric-correlation-plot-layout-container",
)
def load_experiment_data(self):
"""Load the experiment data required for the experiments table.
Extracts parameter and metric metadata from each experiment in
`self.experiments`. List metrics are ignored.
"""
self.experiment_records = {}
self.visible_metric_names = set()
self.visible_parameter_names = set()
for experiment in self.experiments:
experiment_record = {"metrics": {}, "parameters": {}}
for metric in experiment.metrics():
if (
self.metric_names is None or metric.name in self.metric_names
) and not isinstance(metric.value, list):
experiment_record["metrics"][metric.name] = metric.value
self.visible_metric_names.add(metric.name)
if self.selected_metric is None:
self.selected_metric = metric.name
for parameter in experiment.parameters():
if self.parameter_names is None or parameter.name in self.parameter_names:
experiment_record["parameters"][parameter.name] = parameter.value
self.visible_parameter_names.add(parameter.name)
self.experiment_records[experiment.id] = experiment_record
if self.selected_metric not in self.visible_metric_names:
raise ValueError(
f"no metric named `selected_metric` '{self.selected_metric}'"
" logged to any experiment in `experiments`."
)
self.visible_parameter_names = list(self.visible_parameter_names)
self.visible_metric_names = list(self.visible_metric_names)
self.visible_metric_names.sort()
def register_callbacks(self, link_experiment_table=False):
outputs = [
Output("metric-correlation-plot", "figure"),
Output("metric-correlation-dropdown", "label"),
Output("metric-correlation-header-right-text", "children"),
]
inputs = [
Input({"type": "metric-correlation-dropdown-button", "index": ALL}, "n_clicks"),
]
states = []
if link_experiment_table:
inputs.append(
Input("experiment-table", "derived_virtual_selected_row_ids"),
)
@self.app.callback(outputs, inputs, states)
def update_metric_plot(*args):
"""Render the correlation plot based on the currently selected metric.
Returns the Plotly `Parcoords` figure generated by the values of the
experiments' selected metric, the name of the currently selected
metric, and the header text with the metric's name.
"""
if link_experiment_table:
selected_row_ids = args[-1]
selected_row_ids = selected_row_ids if selected_row_ids else []
experiment_records = [
self.experiment_records[row_id] for row_id in selected_row_ids
]
else:
experiment_records = self.experiment_records.values()
property_id = callback_context.triggered[0].get("prop_id")
property_value = property_id[: property_id.index(".")]
if not property_value or property_value == "experiment-table":
selected_metric = self.selected_metric
else:
selected_metric = json.loads(property_value).get("index")
self.selected_metric = selected_metric
header_right_text = (
f"over {len(experiment_records)} experiment"
f"{'s' if len(experiment_records) != 1 else ''}"
)
parameter_values = {}
for parameter_name in self.visible_parameter_names:
parameter_values[parameter_name] = [
record["parameters"].get(parameter_name) for record in experiment_records
]
metric_values = [
record["metrics"].get(selected_metric) for record in experiment_records
]
plot_dimensions = []
for parameter_name, parameter_value in parameter_values.items():
if any([p is not None for p in parameter_value]):
plot_dimensions.append(self._get_dimension(parameter_name, parameter_value))
plot_dimensions.append(self._get_dimension(selected_metric, metric_values))
metric_correlation_plot = go.Figure(
go.Parcoords(
line={
"color": [m for m in metric_values if m is not None],
"colorscale": get_rubicon_colorscale(len(experiment_records)),
"showscale": True,
},
dimensions=plot_dimensions,
),
)
metric_correlation_plot.update_layout(paper_bgcolor=transparent)
return metric_correlation_plot, selected_metric, header_right_text
|
import numpy as np
def get_index(components, percentile=20):
stds = []
std = components.reshape(-1, 200, 4).transpose(1, 0, 2).reshape(-1, 4*200).std(axis=1)
threshold = np.percentile(std, percentile)
usable_index = np.where(std > threshold)
print("X_COLUMN:", len(usable_index[0]), threshold)
return usable_index[0]
if __name__ == "__main__":
path="data/pca_200_components.npy",
components = np.load(path)
usable_index = get_index(components, 20)
np.save("data/usable_index_200.npy", usable_index.astype("int32"))
|
# Generated by Django 2.2.13 on 2020-07-10 07:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0009_auto_20200710_1238'),
]
operations = [
migrations.AlterField(
model_name='product',
name='discount_price',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.CharField(max_length=50),
),
]
|
'''
@Description: BinaryTree Library
@Date: 2020-06-04 00:02:32
@Author: Wong Symbol
@LastEditors: Wong Symbol
@LastEditTime: 2020-07-10 22:18:31
'''
class TreeNode():
def __init__(self, data=None):
self.data = data
self._left = None
self._right = None
def add2left(self,new_node):
# parent_node._left = new_node
self._left = new_node
def add2right(self,new_node):
# parent_node._right = new_node
self._right = new_node
class BinaryTree():
def __init__(self,root_node=None):
self._root = root_node
def pre_order(self, tree_node):
if tree_node != None:
print(tree_node.data)
self.pre_order(tree_node._left)
self.pre_order(tree_node._right)
def mid_order(self, tree_node):
if tree_node != None:
self.mid_order(tree_node._left)
print(tree_node.data)
self.mid_order(tree_node._right)
def post_order(self,tree_node):
pass
# 求二叉树的最小高度
def min_depth(self, tree_node):
if tree_node == None:
return 0
q = [tree_node]
depth = 1
while len(q) > 0:
size = len(q)
# 注意下面两种循环的不同
for i in range(0,size): # 相当于按层去遍历
# for i in q: # 该语句不会执行 depth += 1 操作!!!
print('the length of q:', len(q))
cur = q.pop(0)
if cur._left == None and cur._right == None:
return depth
if cur._left != None:
q.append(cur._left)
if cur._right != None:
q.append(cur._right)
print([p.data for p in q])
depth += 1
return None
# 递归法求解最大深度
def max_depth(self, root):
if not root:
return 0
left_depth = self.max_depth(root._left)
right_depth = self.max_depth(root._right)
return max(left_depth, right_depth) + 1
# 迭代法求解最大深度
def max_depth_(self, root):
stack = []
if stack is not None:
stack.append((1,root))
depth = 0
while len(stack) > 0:
current_depth,node = stack.pop()
if node is not None:
depth = max(current_depth,depth)
stack.append((current_depth+1, node._left))
stack.append((current_depth+1,node._right))
return depth
# BFS 求二叉树的最大深度
def maxDepth(self, root):
if not root:
return 0
queue = [root]
depth = 0 # 这里为什么是0?因为 下面循环的时候多加了1
while len(queue) > 0:
size = len(queue)
for i in range(0,size):
current = queue.pop(0)
if current._left:
queue.append(current._left)
if current._right:
queue.append(current._right)
depth += 1
return depth
def maxHeight(self, root):
if not root:
return -1
left = self.maxHeight(root._left)
right = self.maxHeight(root._right)
return max(left, right) + 1
if __name__ == '__main__':
A = TreeNode('A')
B = TreeNode('B')
C = TreeNode('C')
D = TreeNode('D')
E = TreeNode('E')
bt = BinaryTree(A)
# A.add2left(B)
# A.add2right(C)
# C.add2left(D)
# C.add2right(E)
'''
print('pre order is:')
bt.pre_order(bt._root)
print('mid order is:')
bt.mid_order(bt._root)
'''
m = bt.maxHeight(bt._root)
print(m)
exit()
d = bt.min_depth(bt._root)
print('the binary tree min depth is :', d)
w = bt.maxDepth(bt._root)
print('the binary tree max depth is : ',w)
|
from common.run_method import RunMethod
import allure
@allure.step("JkyApp/登录")
def employee_login_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/登录"
url = f"/api-operation-app/employee/login"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/登出")
def employee_logout_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/登出"
url = f"/api-operation-app/employee/logout"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/员工信息")
def employee_info_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/员工信息"
url = f"/api-operation-app/employee/info"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/获取短信验证码")
def employee_external_receiveVerificationCode_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/获取短信验证码"
url = f"/api-operation-app/employee/external/receiveVerificationCode"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/校验手机验证码")
def employee_external_validity_verificationCode_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/校验手机验证码"
url = f"/api-operation-app/employee/external/validity/verificationCode"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/修改员工密码")
def employee_signInPassword_patch(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/修改员工密码"
url = f"/api-operation-app/employee/signInPassword"
res = RunMethod.run_request("PATCH", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/查询登录用户的授权校区")
def employee_schools_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/查询登录用户的授权校区"
url = f"/api-operation-app/employee/schools"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/查询所有校区")
def employee_schools_all_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/查询所有校区"
url = f"/api-operation-app/employee/schools/all"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyApp/查询老师列表")
def employee_teacher_queries_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/查询老师列表"
url = f"/api-operation-app/employee/teacher/queries"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyApp/查询校区下的课程顾问")
def employee_schoolArea_course_consultant_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyApp/查询校区下的课程顾问"
url = f"/api-operation-app/employee/schoolArea/course-consultant"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("JkyAPP/查询课程顾问并返回校区")
def employee_schoolArea_course_consultant_and_school_area_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "JkyAPP/查询课程顾问并返回校区"
url = f"/api-operation-app/employee/schoolArea/course-consultant-and-school-area"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
# -*- python -*-
from Product import *
class Store(object):
def __init__( self, owner, location ):
self.owner = owner
self.location = location
self.products = []
def add_product( self, product ):
self.products.append( product )
return( self )
def remove_product( self, product_name ):
# self.products.remove( product )
for i in self.products:
if i.name == product_name:
self.products.remove( i )
return( self )
def inventory( self ):
for i in self.products:
i.displayInfo()
return( self )
|
# 有1、2、3、4个数字,能组成多少个互不相同且无重复数字的三位数?都是多少?
count = 0
for i in range(1, 5):
for j in range(1, 5):
for k in range(1, 5):
if i != j and i != k and j != k:
count += 1
print ("%d%d%d" % (i, j, k))
print("%d 种可能" % count)
# i, j, k 分别代表个位十位百位上的数字 |
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'make_global_settings': [
['CC', '/usr/bin/clang'],
['CXX', '/usr/bin/clang++'],
],
'targets': [
{
'target_name': 'aliasing_yes',
'type': 'executable',
'sources': [ 'aliasing.cc', ],
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'GCC_STRICT_ALIASING': 'YES',
'GCC_OPTIMIZATION_LEVEL': 2,
},
},
{
'target_name': 'aliasing_no',
'type': 'executable',
'sources': [ 'aliasing.cc', ],
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'GCC_STRICT_ALIASING': 'NO',
'GCC_OPTIMIZATION_LEVEL': 2,
},
},
{
'target_name': 'aliasing_default',
'type': 'executable',
'sources': [ 'aliasing.cc', ],
'xcode_settings': {
'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
'GCC_OPTIMIZATION_LEVEL': 2,
},
},
],
}
|
import unittest
from katas.beta.fix_the_base_conversion_function import convert_num
class ConvertNumTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(convert_num(12463, ['num']), 'Invalid base input')
def test_equal_2(self):
self.assertEqual(convert_num(122, 'bin'), '0b1111010')
def test_equal_3(self):
self.assertEqual(convert_num('dog', 'bin'), 'Invalid number input')
def test_equal_4(self):
self.assertEqual(convert_num(0, 'hex'), '0x0')
def test_equal_5(self):
self.assertEqual(convert_num(123, 'lol'), 'Invalid base input')
|
import pandas as pd
import pickle
vehicles = []
csv = pd.read_csv('../vehicles.csv')
for _, row in csv.iterrows():
vehicles.append(
{
'VIN': row[0],
'TrainID': row[1],
'Cabin': row[2],
'Seat': row[3],
'Transit': row[4]
}
)
pickle.dump( vehicles, open( "vehicles.pkl", "wb" ) )
print("vehicles transformed")
schedule = {}
csv = pd.read_csv('../schedule.csv')
for _, row in csv.iterrows():
schedule[row[0]] = [
row[1], row[2], row[3], row[4]
]
pickle.dump( schedule, open( "schedule.pkl", "wb" ) )
print("schedule transformed")
trains = []
csv = pd.read_csv('../trains.csv')
for _, row in csv.iterrows():
trains.append(
[
row['TrainID'],
row['TX_Wagon'],
row['DD_Wagon'],
row['DDA'],
row['TX_Time'],
row['DD_Time']
]
)
pickle.dump( trains, open( "trains.pkl", "wb" ) )
print("trains transformed")
trunks = []
csv = pd.read_csv('../trunks.csv')
for _, row in csv.iterrows():
trunks.append(
[
row['TrunkID'],
row['From'],
row['To'],
row['Time']
]
)
pickle.dump( trunks, open( "trunks.pkl", "wb" ) )
print("trunks transformed") |
from django.conf.urls import url
from . import views
app_name = 'active_learning'
urlpatterns = [
url(r'^$', views.iframe, name='iframe'),
url(r'^article$', views.index, name='index'),
url(r'^article/(?P<article_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^learn$', views.learn),
url(r'^pusher/auth', views.auth),
url(r'^get_articles', views.get_articles),
url(r'^load_four_university', views.load_four_university_dataset),
]
|
# examples of exception handling
# a few types of common errors
# print(yana) # NameError
# print(1 + 'yana') #TypeError
# handle errors using "try...except" blocks
"""
try:
statements
...
except ExceptionName:
statements evaluated in case ExceptionName happens
"""
# an example
def get_number():
"returns a float number"
number = float(input("Enter a decimal value: "))
return number
"""
while True: # ctrl+c to interrupt
try:
print(get_number())
except ValueError:
print("That's not a decimal, try again!")
"""
# an empty except statement can catch any exception
"""
try:
input()
except:
print("Unknown Exception")
"""
# ask ilija about raise...
"""
try:
raise ValueError("A value error occurred")
except ValueError:
print("ValueError in code")
"""
# using finally
# statements that must be executed under all circumstances, use
# the finally clause
import os
os.chdir("/home/yana/Documents/python/python-playground")
try:
file = open("sample.txt", "w")
d = 1 / 0
except ZeroDivisionError:
print("Stop trying to divide by zero!")
finally:
file.close()
print("File is closed") |
import os
from os import listdir
from os.path import isfile, join
def rename_file(path):
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
print onlyfiles
os.chdir(r'C:\Users\Administrator\Desktop\prank')
for file in onlyfiles:
print ("old name - "+file)
print ("new name - "+file.translate(None,'0123456789'))
os.rename(file,file.translate(None,'0123456789'))
rename_file('C:\Users\Administrator\Desktop\prank') |
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path("fb-login/", views.SignInView.as_view()),
]
|
#!/usr/bin/env python
import pygame
import mimo
from utils import utils
from utils import neopixelmatrix as graphics
from utils.NeoSprite import NeoSprite, AnimatedNeoSprite, TextNeoSprite, SpriteFromFrames
from utils import constants
from scenes.BaseScene import SceneBase
# Introduction Scene
# Available actions: back / next
# Description: "cinematic explain the player the ludovic experiment"
# Next: material section tutorial
class IntroductionScene(SceneBase):
def __init__(self):
SceneBase.__init__(self)
self.HWSetup()
subtitlefont = pygame.font.Font(constants.VCR_OSD_MONO, constants.FONT_SUBTITLE)
self.subtitle = utils.Text("", subtitlefont)
self.subtitle.SetPosition(constants.VIEWPORT_CENTER_X, 610)
self.intro_subtitles = [
{
"text": "M corp le da la bienvenida y agradece su participacion\nen esta prueba de seleccion.",
"image": ""
},
{
"text": "Esta prueba evaluara su capacidad para editar y presentar\nnoticias segun las necesidades propuestas por las directivas.",
"image": ""
},
{
"text": "Ante usted tiene la mas reciente version de nuestro modulador\n de mentes, M.i.M.o 3.2.\n\nRecibira una induccion basica y suficiente para operar esta maquina.",
"image": ""
},
{
"text": "Toda la operacion que haga sobre la maquina sera grabada\ny almacenada para nuestro posterior analisis.",
"image": ""
},
]
self.intro_subtitles_index = -1
self.textLoader = None
self.LoadNextSubtitle()
def HWSetup(self):
mimo.set_led_brightness(50)
mimo.set_material_buttons_light([3, 255, 80, 80, 4, 80, 255, 80])
mimo.set_material_buttons_mode([3, 1, 4, 1])
mimo.set_material_buttons_lock_status([0,1, 1,1, 2,1, 5,1, 6,1, 7,1])
mimo.set_tunners_enable_status(False)
mimo.set_buttons_enable_status(True, False)
def LoadNextSubtitle(self):
if self.intro_subtitles_index + 1 == len(self.intro_subtitles):
self.SwitchToScene("TutorialMat")
return
self.intro_subtitles_index += 1
self.textLoader = utils.TextLoader(self.intro_subtitles[self.intro_subtitles_index]["text"], 0.04, False)
def ProcessInput(self, events, pressed_keys):
for event in events:
if event.type == pygame.KEYDOWN and event.key == pygame.K_i:
self.Next()
def Next(self):
if self.textLoader.finished:
self.LoadNextSubtitle()
else:
self.textLoader.complete()
self.subtitle.SetText(self.textLoader.current_text)
def Update(self, dt):
SceneBase.Update(self, dt)
if self.textLoader.update(dt):
self.subtitle.SetText(self.textLoader.current_text)
def Render(self, screen):
screen.fill(constants.PALLETE_BACKGROUND_BLUE)
self.subtitle.render_multiline(screen)
graphics.render()
|
class Rectangle:
def __init__(self, x, plane):
self.x = x
self.y = x
self.xy_plane = plane
self.user_input()
self.draw_shape_chosen()
self.draw_graphic_plane()
def user_input(self):
while True:
xdim = int(input("Enter x dimension: \n"))
ydim = int(input("Enter y dimension: \n"))
if xdim < 20 and ydim < 20:
break
print("Both dimensions must be smaller than 20 to render properly.")
self.init_plane(xdim, ydim)
def init_plane(self, xdimension, ydimension):
for x in range(xdimension):
for y in range(ydimension):
self.xy_plane.append((x,y,0)) #x,y and whether it is "colored-in"
def draw_one_chosen(self, coord_capsule):
x_coord, y_coord, b_coord = coord_capsule
new_coord = (x_coord, y_coord, 1)
coord_index = self.xy_plane.index((x_coord, y_coord, 0))
self.xy_plane.pop(coord_index)
self.xy_plane.insert(coord_index, new_coord)
def draw_shape_chosen(self):
for ycoord in range(self.y):
for xcoord in range(self.x):
coord_capsule = (xcoord, ycoord, 0)
self.draw_one_chosen(coord_capsule)
def draw_graphic_plane(self):
current_x_value = 0
for coordinate in self.xy_plane:
xcoord, ycoord, bincoord = coordinate
if xcoord != current_x_value:
print("\r")
current_x_value = xcoord
if bincoord == 0:
print("|‾‾", end="")
elif bincoord == 1:
print("|##", end="")
else:
if bincoord == 0:
print("|‾‾", end="")
elif bincoord == 1:
print("|##", end="")
current_x_value = xcoord
print("\n")
def draw_all(self): #fills all squares
for coordinate in self.xy_plane:
xcoord, ycoord, bcoord = coordinate
new_coord = (xcoord, ycoord, 1)
coord_index = self.xy_plane.index((xcoord, ycoord, bcoord))
self.xy_plane.pop(coord_index)
self.xy_plane.insert(coord_index, new_coord)
def clear_all(self): #fills all squares
for coordinate in self.xy_plane:
xcoord, ycoord, bcoord = coordinate
new_coord = (xcoord, ycoord, 1)
coord_index = self.xy_plane.index((xcoord, ycoord, bcoord))
self.xy_plane.pop(coord_index)
self.xy_plane.insert(coord_index, new_coord)
def get_coords(self):
x_coord = int(input("Enter x-coordinate you want found: \n"))
y_coord = int(input("Enter y-coordinate you want found: \n"))
xy_coords = self.xy_plane.index((x_coord, y_coord))
return(xy_coords)
|
#! /usr/bin/python
import numpy as np
import xgboost as xgb
# label need to be 0 to num_class -1
# if col 33 is '?' let it be 1 else 0, col 34 substract 1
# data = np.loadtxt('data/201504_train.csv', delimiter=',')
# sz = data.shape
#
# train = data[:int(sz[0] * 0.7), :] # take row 1-256 as training set
# test = data[int(sz[0] * 0.7):, :] # take row 257-366 as testing set
# train_X = train[:, 0:33]
# train_Y = train[:, 34]
#
# test_X = test[:, 0:33]
# test_Y = test[:, 34]
import pandas as pd
# dataname = "201504"
# data_file = "data/" + dataname + "_train.csv"
dataname = "201504"
data_file = "data/" + dataname + "_data.csv"
data = pd.read_csv(data_file)
X = np.array(data.drop('label', axis=1))
y = data.label.values # np.array
train = data[:int(len(data) * 1 * 0.9)]
test = data[int(len(data) * 1 * 0.9):]
train_Y = train.label.values # np.array
train_X = np.array(train.drop('label', axis=1))
test_Y = test.label.values
test_X = np.array((test.drop('label', axis=1)))
xg_train = xgb.DMatrix(train_X, label=train_Y)
xg_test = xgb.DMatrix(test_X, label=test_Y)
# setup parameters for xgboost
param = {}
param['booster'] = "gbtree"
# use softmax multi-class classification
param['objective'] = 'multi:softmax'
# param['objective'] = 'reg:logistic'
# scale weight of positive examples
param['eta'] = 0.1
param['max_depth'] = 6
param['silent'] = 1
param['nthread'] = 4
param['num_class'] = 6
watchlist = [(xg_train, 'train'), (xg_test, 'test')]
num_round = 5
bst = xgb.train(param, xg_train, num_round, watchlist);
# get prediction
pred = bst.predict(xg_test);
print ('predicting, classification error=%f' % (
sum(int(pred[i]) != test_Y[i] for i in range(len(test_Y))) / float(len(test_Y))))
# do the same thing again, but output probabilities
param['objective'] = 'multi:softprob'
bst = xgb.train(param, xg_train, num_round, watchlist);
# Note: this convention has been changed since xgboost-unity
# get prediction, this is in 1D array, need reshape to (ndata, nclass)
yprob = bst.predict(xg_test).reshape(test_Y.shape[0], 6)
ylabel = np.argmax(yprob, axis=1) # return the index of the biggest pro
print ('predicting, classification error=%f' % (
sum(int(ylabel[i]) != test_Y[i] for i in range(len(test_Y))) / float(len(test_Y))))
|
o = object()
print()
print(o)
print(".. 以上是 object 型態物件的字串形式")
print()
print("程式結束 ....")
print()
|
n, k, l, c, d, p, nl, np = map(int, input().split())
total_drink = k * l
total_slice = c * d
print(min([total_drink//(n*nl), total_slice//(1*n), p//(n * np)]))
|
"""
Django settings for {{ cookiecutter.project_name }} project.
"""
from os import environ
from os.path import abspath, basename, dirname, join, normpath
from pathlib import Path
from sys import path
# PATH CONFIGURATION
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# Absolute filesystem path to the config directory:
CONFIG_ROOT = Path(__file__).resolve(strict=True).parent.parent
# Absolute filesystem path to the project directory:
PROJECT_ROOT = Path(CONFIG_ROOT).resolve(strict=True).parent
# Absolute filesystem path to the django repo directory:
DJANGO_ROOT = Path(PROJECT_ROOT).resolve(strict=True).parent
# Project name:
PROJECT_NAME = PROJECT_ROOT.name.capitalize()
# Project folder:
PROJECT_FOLDER = PROJECT_ROOT.name
# Project domain:
PROJECT_DOMAIN = '%s.com' % PROJECT_NAME.lower()
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(str(PROJECT_ROOT))
# END PATH CONFIGURATION
DEBUG = STAGING = False
# END DEBUG CONFIGURATION
ADMINS = (
("""{{cookiecutter.author_name}}""", '{{cookiecutter.email}}'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = '{{ cookiecutter.timezone }}'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = '{{cookiecutter.languages.strip().split(', ')[0]}}'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = PROJECT_ROOT.joinpath("media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = PROJECT_ROOT.joinpath("assets")
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_ROOT.joinpath("static"),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = environ.get('DJANGO_SECRET_KEY')
# List of callables that know how to import templates from various sources.
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (PROJECT_ROOT.joinpath("templates"),),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.csrf',
'django.template.context_processors.tz',
'django.template.context_processors.static',
'core.context_processor.settings',
]
},
},
]
MIDDLEWARE = (
{% if cookiecutter.api == "y" or cookiecutter.api == "Y" %}
'corsheaders.middleware.CorsMiddleware',
{% endif %}
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ cookiecutter.project_name }}.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = '{{ cookiecutter.project_name }}.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django.contrib.messages',
{% if cookiecutter.api == "y" or cookiecutter.api == "Y" %}
'rest_framework',
'django_filters',
'drf_yasg',
'corsheaders',
{% endif %}
'constance',
'constance.backends.database',
'core',
)
CUSTOM_APPS = ['user']
INSTALLED_APPS = list(INSTALLED_APPS) + CUSTOM_APPS
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOCALE_PATHS = (PROJECT_ROOT.joinpath("locale"),)
# Dummy gettext function
gettext = lambda s: s
LANGUAGES = [
{% for language in cookiecutter.languages.strip().split(',') %}
('{{ language|trim }}', gettext('{{ language|trim }}')),
{% endfor %}
]
# Custom User Model
AUTH_USER_MODEL = 'user.User'
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# Django Constance
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_CONFIG = {
'SITE_NAME': ('Website title', ''),
'SITE_DESCRIPTION': ('Website description', ''),
'ADDRESS': ('Address', ''),
'PHONE': ('Phone', ''),
'EMAIL': ('Email', ''),
'FACEBOOK': ('Facebook URL', ''),
'INSTAGRAM': ('Instagram URL', ''),
'TWITTER': ('Twitter URL', ''),
'LINKEDIN': ('Linkedin URL', ''),
'GOOGLE_ANALYTICS': ('UA-XXXXXXXXX-X', ''),
'GOOGLE_TAG_MANAGER': ('GTM-XXXXXXX', ''),
'GOOGLE_SITE_VERIFICATION': ('XXXXXXXXXXX', ''),
}
CONSTANCE_CONFIG_FIELDSETS = {
'Website Detail': ('SITE_NAME', 'SITE_DESCRIPTION', 'ADDRESS', 'PHONE', 'EMAIL'),
'Social Options': ('FACEBOOK', 'INSTAGRAM', 'TWITTER', 'LINKEDIN'),
'SEO': ('GOOGLE_ANALYTICS', 'GOOGLE_TAG_MANAGER', 'GOOGLE_SITE_VERIFICATION')
}
{% if cookiecutter.api == "y" or cookiecutter.api == "Y" %}
# Django Rest Framework
REST_FRAMEWORK = {
'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S%z',
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 10
}
LOGIN_URL = 'rest_framework:login'
LOGOUT_URL = 'rest_framework:logout'
SWAGGER_SETTINGS = {
'JSON_EDITOR': True,
'SHOW_REQUEST_HEADERS': True
}
{% endif %}
|
#import modules
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import os
import numpy as np
def convert_to_onehot( c ):
tensor = np.zeros(128)
tensor[ord(c)] = 1
return tensor
code_tensors = []
vocab = set()
comments_dict = {}
comments = []
file_path = 'code'
for root,dirs,files in os.walk(file_path):
for fl in files:
code = open('code/'+fl).read()
code_tensor = [x for x in code][:1000]
for i in range(1000 - len(code_tensor)):
code_tensor.append(' ')
code_tensor = list(map(convert_to_onehot ,code_tensor))
code_tensors.append(code_tensor)
comment = open('comments/'+fl).read()
for com in comment.split():
vocab.add(com)
comments.append(comment)
i=0
for item in vocab:
comments_dict[item]=i
i=i+1
comment_tensors_input = []
comment_tensors_target = []
for item in comments:
comment_tensors = []
comment = item.split()
for i in range(10):
comment_tensor = np.zeros(len(vocab)+1)
if(i < len(comment)):
comment_tensor[comments_dict[comment[i]]+1] = 1
else:
comment_tensor[0] = 1
comment_tensors.append(comment_tensor)
comment_tensors_input.append(comment_tensors)
comment_targets = comment_tensors[1:]
zero_vec = np.zeros(len(vocab)+1)
zero_vec[0] = 1
comment_targets.append(zero_vec)
print(len(comment_targets))
comment_tensors_target.append(comment_targets)
print(np.asarray(code_tensors).shape)
print(np.asarray(comment_tensors_input).shape)
print(np.asarray(comment_tensors_target).shape)
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, 128))
encoder = LSTM(500, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, len(vocab)+1))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(500, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(len(vocab)+1, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics = ['accuracy'])
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(500,))
decoder_state_input_c = Input(shape=(500,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
decoder_model.save("dec.py")
encoder_model.save("enc.py")
print(model.summary())
print(encoder_model.summary())
print(decoder_model.summary())
model.fit([np.asarray(code_tensors), np.asarray(comment_tensors_input)], np.asarray(comment_tensors_target),
batch_size=5,
epochs=100,
validation_split=0.2)
model.save("encdec.h5")
# from keras.models import load_model
# model = load_model("encdec.h5")
# prediction = model.predict([np.asarray(code_tensors)[0]])
# print(prediction)
|
#Usage: python predict-multiclass.py
#https://github.com/tatsuyah/CNN-Image-Classifier
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.models import Sequential, load_model
img_width, img_height = 150, 150
model_path = './models/model.h5'
model_weights_path = './models/weights.h5'
model = load_model(model_path)
model.load_weights(model_weights_path)
def predict(file):
x = load_img(file, target_size=(img_width,img_height))
x = img_to_array(x)
x = np.expand_dims(x, axis=0)
array = model.predict(x)
result = array[0]
answer = np.argmax(result)
if answer == 0:
print("Label: Alaskan Malamute")
elif answer == 1:
print("Label: Pitbull")
elif answer == 2:
print("Label: Golden Retriever")
return answer
malamute_t = 0
malamute_f = 0
pitbull_t = 0
pitbull_f = 0
retriever_t = 0
retriever_f = 0
for i, ret in enumerate(os.walk('./test-data/Malamut')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: Malamute")
result = predict(ret[0] + '/' + filename)
if result == 0:
print(ret[0] + '/' + filename)
malamute_t += 1
else:
malamute_f += 1
for i, ret in enumerate(os.walk('./test-data/Pitbull')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: Pitbull")
result = predict(ret[0] + '/' + filename)
if result == 1:
print(ret[0] + '/' + filename)
pitbull_t += 1
else:
pitbull_f += 1
for i, ret in enumerate(os.walk('./test-data/Retriever')):
for i, filename in enumerate(ret[2]):
if filename.startswith("."):
continue
print("Label: Retriever")
result = predict(ret[0] + '/' + filename)
if result == 2:
print(ret[0] + '/' + filename)
retriever_t += 1
else:
retriever_f += 1
"""
Check metrics
"""
print("True Malamute: ", malamute_t)
print("False Malamute: ", malamute_f)
print("True Pitbull: ", pitbull_t)
print("False Pitbull: ", pitbull_f)
print("True Retriever: ", retriever_t)
print("False Retriever: ", retriever_f)
|
import os
import sys
import winreg as reg
# Get path of current working directory and python.exe
cwd = os.getcwd()
python_exe = sys.executable
# optional hide python terminal in windows
hidden_terminal = '\\'.join(python_exe.split('\\')[:-1])+"\\pythonw.exe"
# Set the path of the context menu (right-click menu)
key_path = r'Directory\\Background\\shell\\Total Time\\' # Change 'Organiser' to the name of your project
# Create outer key
key = reg.CreateKey(reg.HKEY_CLASSES_ROOT, key_path)
reg.SetValue(key, '', reg.REG_SZ, '&Total Time') # Change 'Organise folder' to the function of your script
# create inner key
key1 = reg.CreateKey(key, r"command")
reg.SetValue(key1, '', reg.REG_SZ, python_exe + f' "{cwd}\\metadata.py"') # change 'file_organiser.py' to the name of your script
#reg.SetValue(key1, '', reg.REG_SZ, hidden_terminal + f' "{cwd}\\file_organiser.py"') # use to to hide terminal |
#!/usr/local/bin/python3
# -*- conding: utf-8 -*-
from app import create_app
# 生成app
app = create_app()
if __name__ == '__main__':
app.run('0.0.0.0', 8432, debug=True) |
import numpy as np
import tensorflow as tf
import os, sys, random, csv
def linear(x, output_size, name=None, nonlinearity=tf.nn.relu):
print("linear layer", x.get_shape()[1],"->", output_size, name)
input_size = x.get_shape().as_list()[1]
with tf.variable_scope(name or 'linear_layer', reuse=False):
W = tf.get_variable(dtype=tf.float32, shape=[input_size, output_size], initializer=tf.contrib.layers.xavier_initializer(), name='W' )
b = tf.get_variable(dtype=tf.float32, shape=[output_size], initializer=tf.constant_initializer(0.0), name='b' )
preactivation = tf.matmul(x, W, name='preactivation')
y = tf.nn.bias_add( value = nonlinearity(preactivation), bias = b, name='acitvation')
return y, W
def create_network(n_features, n_classes, is_training=False, model_name='network1', learning_rate=0.0001):
X = tf.placeholder(tf.float32, shape=[None, n_features])
Labels = tf.placeholder(tf.float32, shape=[None, n_classes])
layer_output = X
#layers = [1024, 1024, 1024]
layers = [1024, 1024, 512, 256, 128, 64]
for layer_index, layer_size in enumerate(layers):
layer_input = layer_output
layer_output, W = linear(layer_input, layer_size, name='linear_'+str(layer_index+1))
logits, _ = linear(layer_output, n_classes, name='linear_classifier')
network_output = tf.nn.softmax(logits) # [None, n_classes]
if not is_training:
return X, network_output
print("logits shape=", logits.get_shape())
print("Labels shape=", Labels.get_shape())
# softmax_cross_entropy_with_logits
#cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, Labels) #[None, 1]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, Labels) #[None, 1]
loss = tf.reduce_mean(cross_entropy)
correct_predictions = tf.equal(tf.argmax(network_output, 1), tf.argmax(Labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
with tf.name_scope(model_name):
tf.summary.scalar('cross_entropy', loss)
tf.summary.scalar('train_accuracy', accuracy)
summary = tf.summary.merge_all()
return X, network_output, Labels, loss, accuracy, optimizer, summary
|
#!/usr/bin/env python
import moksha.ctl.core.main as main
if __name__ == '__main__':
main.main(entry_point=False)
|
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,"../..")
from equation6 import Shell
import argparse
import seaborn as sns
###################################
"""
The objective of this program is
#to make a figure that illustrates
how the shell shape changes with
$\beta$ and if possible compare it
with the result of other authors
"""
###################################
############ Figure 1 #############
# Shell shape vs $\beta$ ##########
beta = [0.01,0.1,0.99]
theta = np.linspace(0,0.99*np.pi)
parser =argparse.ArgumentParser(description="choose figure output")
parser.add_argument("--fig",type=int,default=0,choices=[0,1],help="figure output")
args = parser.parse_args()
flag = args.fig <= 0
inner_list = ["isotropic","proplyd"]
xi_list = [1.0,0.8,0.2]
line_dict = {"isotropic":"--","proplyd":"-"}
line_list = ["--","-",":",]
color_list = ["r","g","b","m","c","k"]
#label = {"isotropic":None,"proplyd":r"$\beta={}$".format(b)}
sns.set_style("whitegrid")
if flag:
for xi,line in zip(xi_list,line_list):
for b,col in zip(beta,color_list):
shell = Shell(beta=b,innertype="anisotropic",xi=xi)
R = shell.radius(theta)
R[R<=0] = np.nan
if xi==0.8:
label = r"$\beta={}$".format(b)
else:
label = None
plt.plot(R*np.cos(theta),R*np.sin(theta),color=col,linestyle=line,
label=label)
fontsize = 15
ticksize = 14
plt.legend(fontsize="small")
plt.xlabel(r"$z/D$",fontsize=fontsize)
plt.ylabel(r"$r/D$",fontsize=fontsize)
plt.tick_params(axis='both', which='major', labelsize=ticksize)
plt.gca().set_aspect("equal",adjustable="box")
fig = plt.gcf()
fig.set_size_inches(7, 4.5)
plt.xlim(-0.4,1)
plt.ylim(0,1)
plt.tight_layout()
fig.savefig("figs/r-beta.pdf")
####### Figure 2 #################
# Show the characteristic Radii for a generic bowshock
else:
b = 0.1
t = np.linspace(-np.pi,np.pi)
BS = Shell(beta=b,innertype="proplyd")
A = 1.5/(1-np.sqrt(b))
R = BS.radius(theta)
R[R<=0]=np.nan
plt.plot(R*np.cos(theta),R*np.sin(theta),"k-",R*np.cos(theta),-R*np.sin(theta),"k-",lw=3)
plt.plot(A*R[0]*np.cos(t)-R[0]*(A-1),A*R[0]*np.sin(t))
plt.plot([0],[0],"r*")
plt.plot([-R[0]*(A-1)],[0],"k.")
plt.grid()
plt.gca().set_aspect("equal",adjustable="box")
plt.xlabel("z/D")
plt.ylabel("r/D")
plt.xlim(-R[0]*(A+1)-0.1,R[0]+0.6)
plt.ylim(-0.3,1.5)
plt.savefig("ch-radii.pdf")
|
"""
Created by Alex Wang
On 2018-11-30
"""
import traceback
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
def test_pca():
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
mean = pca.mean_
components = pca.components_
X = X - mean
X = np.dot(X, components.T)
print('type of mean:{}, type of component:{}'.format(type(mean), type(components)))
# X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
def test_ipca():
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = decomposition.IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca_org = ipca.fit_transform(X)
mean = ipca.mean_
components = ipca.components_
X_ipca = X - mean
X_ipca = np.dot( X_ipca, components.T)
pca = decomposition.PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
colors = ['navy', 'turquoise', 'darkorange']
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
color=color, lw=2, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
if __name__ == '__main__':
# test_pca()
test_ipca() |
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
class ExtrasPage:
def __init__(self, driver):
self.driver = driver
self.load_extras_page = WebDriverWait(self.driver.instance, 60).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "fieldset.form-section.address-section")))
self.load_insurance_section = WebDriverWait(self.driver.instance, 60).until(
EC.visibility_of_element_located((By.CSS_SELECTOR, "div.col-md-12.ancillary-group")))
self.driver.instance.save_screenshot('extras.png')
def validate_extras_loaded(self):
assert self.load_extras_page.is_displayed()
def validate_insurance_section_loaded(self):
assert self.load_insurance_section.is_displayed()
def fill_passanger(self):
self.driver.instance.find_element_by_name('passengers.0.firstName').send_keys("John")
self.driver.instance.find_element_by_name('passengers.0.lastName').send_keys("Doe")
self.driver.instance.find_element_by_css_selector("input.form-control.birth-date").send_keys("19900101")
self.driver.instance.save_screenshot('passengers.png')
def fill_address(self):
self.driver.instance.find_element_by_css_selector("input[type='radio'][value='MALE']").click()
self.driver.instance.find_element_by_name('bookerDetails.0.streetAddress').send_keys("testgatan 1")
self.driver.instance.find_element_by_name('bookerDetails.0.zipCode').send_keys("12345")
self.driver.instance.find_element_by_name('bookerDetails.0.city').send_keys("stockholm")
self.driver.instance.find_element_by_name('bookerDetails.0.phoneNumber').send_keys("0733123456")
self.driver.instance.find_element_by_name('bookerDetails.0.emailAddress').send_keys("some@email.com")
self.driver.instance.find_element_by_name('bookerDetails.0.emailAddress2').send_keys("some@email.com")
self.driver.instance.save_screenshot('address.png')
def select_cancellation_insurance(self):
self.driver.instance.find_element_by_css_selector(
"input[type='radio'][value='variantProductCode=fake_no_thanks_option_code,sysInfo=']").click()
self.driver.instance.save_screenshot('cancellation_insurance.png')
def select_transfer(self):
self.driver.instance.find_element_by_css_selector(
"input[type='radio'][value='variantProductCode=PC-000119876,sysInfo=D122 PLPATINDGD190228 01202604249']").click()
self.driver.instance.save_screenshot('transfer.png')
def select_travel_insurance(self):
self.driver.instance.find_element_by_css_selector(
"input[type='radio'][value='variantProductCode=fake_no_thanks_option_code,sysInfo=']").click()
self.driver.instance.save_screenshot('travel_insurance.png')
def click_summer_dropdown_button(self):
self.driver.instance.find_element_by_id("price-summary-container").click()
self.driver.instance.save_screenshot('finished.png')
|
from main import WKhtmlToPdf, wkhtmltopdf
import api
|
from abc import ABCMeta, abstractmethod
import logging
from random import randrange
import sys
from bitarray import bitarray
from indigox.config import (INFINITY, RUN_QBND, BASIS_LEVEL, ALLOW_HYPERVALENT,
ELECTRON_PAIRS, HYPERPENALTY, PREFILL_LOCATIONS,
COUNTERPOISE_CORRECTED)
from indigox.data import atom_enes, bond_enes, qbnd_enes, lp_prob, bo_prob
from indigox.exception import IndigoUnfeasibleComputation
from indigox.periodictable import PeriodicTable as PT
import networkx as nx
import openbabel as ob
BSSE = int(not COUNTERPOISE_CORRECTED)
def node_energy(G, n):
# Calculate the energy of a node in a BOAssignment graph
ene = INFINITY
if len(n) == 1:
e = G.node[n]['Z']
fc = G.node[n]['fc']
es = G.node[n]['e-']
try:
ene = atom_enes[BASIS_LEVEL][e][fc]
except KeyError:
pass
if HYPERPENALTY:
val = es + sum(G.node[x]['e-'] for x in G.neighbors(n))
octet = (PT[e].hyper if ALLOW_HYPERVALENT and G.degree(n) > 2
else PT[e].octet)
if val > octet:
ene = INFINITY
elif len(n) == 2:
a, b = n
order = G.node[n]['e-']
a_sign = '+' if int(G.node[(a,)]['fc']) > 0 else ''
a_sign = '-' if int(G.node[(a,)]['fc']) < 0 else a_sign
b_sign = '+' if int(G.node[(b,)]['fc']) > 0 else ''
b_sign = '-' if int(G.node[(b,)]['fc']) < 0 else b_sign
a = G.node[(a,)]['Z'] + a_sign
b = G.node[(b,)]['Z'] + b_sign
a, b = sorted((a, b))
if order % 2:
ene = order / 2
elif RUN_QBND and (a, b, order//2) in qbnd_enes[BASIS_LEVEL]:
ene = qbnd_enes[BASIS_LEVEL][(a, b, order//2)][BSSE]
else:
a = a[:-1] if ('+' in a or '-' in a) else a
b = b[:-1] if ('+' in b or '-' in b) else b
try:
ene = bond_enes[BASIS_LEVEL][(a, b, order//2)][BSSE]
except KeyError:
pass
return ene
def formal_charge(G, a):
# Calculates the formal charge on an atom given the bonding environment
fc = G.node[a]['valence'] - G.node[a]['e-']
for n in G.neighbors(a):
e = G.node[n]['e-']
za = G.node[a]['Z']
zb = G.node[(n[0],) if n[1] == a[0] else (n[1],)]['Z']
if not e % 2:
fc -= e // 2
# split electrons from odd count bonds when elements are the same
elif e % 2 and za == zb and a[0] == n[0]:
fc -= (e + 1) // 2
elif e % 2 and za == zb:
fc -= (e - 1) // 2
# give odd electron from bond to most electronegative atom when
# element different
elif e % 2 and PT[za].chi > PT[zb].chi:
fc -= (e + 1) // 2
else:
fc -= (e - 1) // 2
return fc
def graph_to_dist_graph(G):
# Convert the molecular graph to bond order assignment graph
H = nx.Graph()
for atom, dat in G.nodes(True):
H.add_node((atom,), **{'Z': dat['element'],
'e-': 0,
'valence': PT[dat['element']].valence,
'fc': 0,})
for a, b, dat in G.edges(data=True):
a, b = sorted((b, a))
H.add_node((a, b), **{'e-': 2})
H.add_edge((a,), (a, b))
H.add_edge((a, b), (b,))
if PREFILL_LOCATIONS:
for v, d in H.nodes(True):
if len(v) == 2:
continue
H.node[v]['prefill'] = 0
if lp_prob[(d['Z'], H.degree(v))][4] > 1000:
for i in lp_prob[(d['Z'], H.degree(v))][1:4]:
if i == 1.0:
H.node[v]['prefill'] += 2
return H
def electron_spots(G):
# Determines the places where electrons can be placed.
spots = []
for n, d in G.nodes(True):
if ALLOW_HYPERVALENT and G.degree(n) > 2:
octet = PT[d['element']].hyper
else:
octet = PT[d['element']].octet
bonded = G.degree(n) * 2
missing_e = octet - bonded
if PREFILL_LOCATIONS:
if lp_prob[(d['element'], G.degree(n))][4] > 1000:
for i in lp_prob[(d['element'], G.degree(n))][1:4]:
if i == 1.0:
missing_e -= 2
while missing_e > 0:
spots.append((n,))
if ELECTRON_PAIRS:
missing_e -= 2
else:
missing_e -= 1
for a, b in G.edges():
a, b = sorted((a, b))
if ALLOW_HYPERVALENT and G.degree(a) > 2:
a_octet = PT[G.node[a]['element']].hyper
else:
a_octet = PT[G.node[a]['element']].octet
if ALLOW_HYPERVALENT and G.degree(b) > 2:
b_octet = PT[G.node[b]['element']].hyper
else:
b_octet = PT[G.node[b]['element']].octet
a_bonded = G.degree(a) * 2
b_bonded = G.degree(b) * 2
a_missing = a_octet - a_bonded
b_missing = b_octet - b_bonded
order = 1
a_e = G.node[a]['element']
b_e = G.node[b]['element']
a_e, b_e = sorted((a_e, b_e))
while (a_missing > 0 and b_missing > 0
and (a_e, b_e, order + 1) in bond_enes[BASIS_LEVEL]):
spots.append((a, b))
if not ELECTRON_PAIRS:
spots.append((a, b))
a_missing -= 2
b_missing -= 2
order += 1
return spots
def electrons_to_add(G):
# Determines how many electrons/electron pairs to add to the system
total_e = sum(PT[G.node[n]['element']].valence for n in G)
total_e -= G.graph['total_charge']
total_e -= G.size() * 2 # Implicitly have all bonds with an electron
# pair in them.
if PREFILL_LOCATIONS:
for n, d in G.nodes(True):
if lp_prob[(d['element'], G.degree(n))][4] > 1000:
for i in lp_prob[(d['element'], G.degree(n))][1:4]:
if i == 1.0:
total_e -= 2
if ELECTRON_PAIRS and total_e % 2:
raise IndigoUnfeasibleComputation('Unable to handle odd number of '
'electrons when using electron pairs.')
elif ELECTRON_PAIRS:
total_e = total_e // 2
return total_e
def locs_sort(locs, G):
# sorts the possible locations based on the probability that they will be
# filled. Most probable first.
def probability_sort(n, G=G):
if len(n) == 1:
e = G.node[n]['Z']
d = G.degree(n)
return lp_prob[(e,d)]
elif len(n) == 2:
a_e = G.node[(n[0],)]['Z']
b_e = G.node[(n[1],)]['Z']
a_d = G.degree((n[0],))
b_d = G.degree((n[1],))
if a_e > b_e:
a_e, b_e = b_e, a_e
a_d, b_d = b_d, a_d
elif a_e == b_e and b_d < a_d:
a_d, b_d = b_d, a_d
return bo_prob[(a_e, a_d, b_e, b_d)]
everything = set(locs)
everything = sorted(everything, key=probability_sort, reverse=True)
new_order = []
for n in everything:
while n in locs:
locs.remove(n)
new_order.append(n)
print(new_order[::-1])
return new_order
def graph_setup(G, a, locs):
# Setsup the BOAssign graph for heuristic and actual energy calculation
for n in G:
if len(n) == 1 and not PREFILL_LOCATIONS:
G.node[n]['e-'] = 0
elif len(n) == 1 and PREFILL_LOCATIONS:
G.node[n]['e-'] = G.node[n]['prefill']
elif len(n) == 2:
G.node[n]['e-'] = 2
for i in range(a.length()):
if ELECTRON_PAIRS and a[i]:
G.node[locs[i]]['e-'] += 2
elif a[i]:
G.node[locs[i]]['e-'] += 1
for n in G:
if len(n) == 1:
G.node[n]['fc'] = formal_charge(G, n)
def bitarray_to_reallocs(a, locs):
# Converts a bitarray into the locations it corresponds to
r_locs = []
for i in range(a.length()):
if a[i]:
r_locs.append(locs[i])
return sorted(r_locs)
def bitarray_to_assignment(G, barry, locs):
locs = bitarray_to_reallocs(barry, locs)
H = graph_to_dist_graph(G)
e_per_count = 2 if ELECTRON_PAIRS else 1
for i in locs:
H.node[i]['e-'] += e_per_count
for n in H:
if len(n) == 1:
G.node[n[0]]['formal_charge'] = formal_charge(H, n)
if len(n) == 2:
G[n[0]][n[1]]['order'] = H.node[n]['e-'] // 2
def calculable_nodes(G, a, stop, locs, target):
# Determines which nodes of a BOAssign graph are calculable
more_locs = locs[stop:]
if a.count() >= target:
return set(G.nodes())
calculable = []
for n in G:
G.node[n]['changeable'] = True if n in more_locs else False
for n in sorted(G, key=len):
if G.node[n]['changeable']:
continue
if len(n) == 1:
# atoms are calculable if they and their neighbours are unchangeable
for nb in G[n]:
if G.node[nb]['changeable']:
break
else:
calculable.append(n)
elif len(n) == 2:
# bonds are calculable if they and their neighbours are unchangeable
for nb in G[n]:
if nb not in calculable:
break
elif G.node[nb]['changeable']:
break
else:
calculable.append(n)
return set(calculable)
def obmol_to_graph(mol, total_charge):
name = str(mol.GetData("COMPND")).strip()
G = nx.Graph(name=name, total_charge=total_charge)
for obAtom in ob.OBMolAtomIter(mol):
a = obAtom.GetIdx()
element = PT[obAtom.GetAtomicNum()].symbol
name = obAtom.GetTitle()
G.add_node(a, **{'element': element, 'name': name})
for obBond in ob.OBMolBondIter(mol):
a = obBond.GetBeginAtomIdx()
b = obBond.GetEndAtomIdx()
G.add_edge(a, b)
return G
def random_string(length=4, chars="ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"):
return ''.join(chars[randrange(0,len(chars))] for _ in range(length))
def _get_logger():
VERBOSE = 5
logging.addLevelName(VERBOSE, 'VERBOSE')
logging.Logger.verbose = (lambda inst, msg, *args, **kwargs:
inst.log(VERBOSE, msg, *args, **kwargs))
logging.captureWarnings(True)
_simple = logging.Formatter("[{levelname:5}]: {message}",style='{')
_with_time = logging.Formatter("[{levelname:5}] {threadName}.{processName} "
"{module}.{funcName}.{lineno}: {message}\n"
"Time: {relativeCreated:.6f} ms",
style='{')
_long = logging.Formatter("[{levelname:5}] {threadName}.{processName} "
"{module}.{funcName}.{lineno}: {message}",
style='{')
_log_level = 20
_c_handler = logging.StreamHandler(stream=sys.stderr)
_c_handler.setLevel(_log_level)
_c_handler.setFormatter(_simple)
log = logging.getLogger()
log.setLevel(_log_level)
log.addHandler(_c_handler)
return log
class HashBitArray(bitarray):
# A hashable version of bitarray.
def __hash__(self):
return hash(self.to01())
def __eq__(self, x):
if not isinstance(x, bitarray):
return False
elif self.length() != x.length():
return False
else:
return self.to01() == x.to01()
class BondOrderAssignment(object):
__metaclass__ = ABCMeta
log = _get_logger()
@abstractmethod
def __init__(self, G, *args):
pass
@abstractmethod
def run(self):
pass
@abstractmethod
def initialise(self):
pass
|
# Base Vehicle:
class Vehicle:
# Constructor
def __init__(self, owner):
self.owner = owner
def get_owner(self):
return self.owner
# Methods in which every subclass will be required to implement.
def top_speed(self):
raise NotImplementedError("Subclass is missing it's top speed method.")
# Vehicles:
class Truck(Vehicle):
def top_speed(self):
return "The truck has a top speed of 120mph."
class Sedan(Vehicle):
def top_speed(self):
return "The sedan has a top speed of 140mph."
class SportsCar(Vehicle):
def top_speed(self):
return "The sportscar has a top speed of 200mph."
vehicles = [
Truck("Chris"),
Sedan("Kyle"),
Sedan("Justin"),
SportsCar("John"),
]
for vehicle in vehicles:
print(vehicle.get_owner())
print(" * " + str(vehicle.top_speed()))
"""
Chris
* The truck has a top speed of 120mph.
Kyle
* The sedan has a top speed of 140mph.
Justin
* The sedan has a top speed of 140mph.
John
* The sportscar has a top speed of 200mph.
"""
|
import sys
import json
import h5py
import numpy as np
def json_parameter_lists_from_chain(json_dict,n_sigma=3):
"""
Read in chain data and compute parameter stats and bounds
"""
# obtain number of parameters to estimate stats and limits for
model_type = json_dict["mcmcopts"]["model_type"]
nparams = 0
if model_type == "oneWave":
nparams = 4
elif model_type == "twoWave":
nparams = 8
elif model_type == "threeWave":
nparams = 12
else:
sys.exit("ERROR: did not recognize model_type from json file. Should be oneWave, twoWave, or threeWave")
# get chain file name
fchno = json_dict["regioninfo"]["fchain"]
# retrieve MCMC chain
file = h5py.File(fchno, 'r')
chn = np.array(file["chain"])
file.close()
# sample MCMC chain
nstart = json_dict["ppopts"]["nstart"]
nsamples = json_dict["ppopts"]["nsamples"]
nskip = (chn.shape[0]-nstart)//nsamples
chnSamps=chn[nstart::nskip,:nparams]
# set parameter statistics initial guesses
cini = chnSamps.mean(axis=0)
cvini = chnSamps.var(axis=0)
# set parameter bounds
spllo = cini - n_sigma * np.sqrt(cvini)
splhi = cini + n_sigma * np.sqrt(cvini)
# convert to lists
cini = list(cini)
cvini = list(cvini)
spllo = list(spllo)
splhi = list(splhi)
return cini, cvini, spllo, splhi
def clip_json_parameter_lists(spllo,splhi,cini,nWaves=2):
"""
Clip negative valus for non-negative parameters
"""
non_negative_state_ind = []
for i in range(nWaves):
non_negative_state_ind += [1+4*i,2+4*i,3+4*i]
for ind in non_negative_state_ind:
spllo[ind] = max(spllo[ind],0.0)
splhi[ind] = max(splhi[ind],0.0)
cini[ind] = max(cini[ind],0.0)
return spllo,splhi,cini
class PrimeJsonCreator:
"""
Handles the creation and modifications of a new json dict based on an existing json dict
"""
def __init__(self, json_in):
"""
Initialize primeJson object by copying existing json dict.
:param json_in: dictionary read from json input file
:type json_in: dict
"""
self.json_dict = json_in.copy()
def set_region_name(self, regionname, filesuffix=""):
"""
:param regionname: region name to be used in new json dict
:type regionname: string
"""
self.json_dict["regioninfo"]["regionname"] = regionname
# set log filenames to include updated regionname
self.json_dict["regioninfo"]["fchain"] = regionname+"_mcmc"+filesuffix+".h5"
self.json_dict["mcmcopts"]["logfile"] = "logmcmc"+regionname+filesuffix+".txt"
self.json_dict["ppopts"]["fpredout"] = regionname+"_epidemic_curve"+filesuffix
self.json_dict["ppopts"]["fout_newcases"] = regionname+"_epidemic_curve"+filesuffix
self.json_dict["infopts"]["finfout"] = regionname+"_infection_curve"+filesuffix
self.json_dict["infopts"]["fout_inf"] = regionname+"_infection_curve"+filesuffix
self.json_dict["csvout"]["finfcurve"] = regionname+"_infection_curve"+filesuffix
self.json_dict["csvout"]["fnewcases"] = regionname+"_epidemic_curve"+filesuffix
def set_model_type(self, model_type):
self.json_dict["mcmcopts"]["model_type"] = model_type
def set_gamma(self, gamma):
self.json_dict["mcmcopts"]["gamma"] = gamma
def set_parameter_limits(self, spllo, splhi):
self.json_dict["mcmcopts"]["spllo"] = spllo
self.json_dict["mcmcopts"]["splhi"] = splhi
def set_parameter_stats(self, cini, cvini):
self.json_dict["mcmcopts"]["cini"] = cini
self.json_dict["mcmcopts"]["cvini"] = cvini
def set_prior_distributions(self, prior_types, prior_info):
self.json_dict["bayesmod"]["prior_types"] = prior_types
self.json_dict["bayesmod"]["prior_info"] = prior_info
def get_json_dict(self,filename):
"""
Get modified json dict
:return: json_out, a new dictionary to be written to a new json file
:rtype: dict
"""
return self.json_dict
def write_json_dict(self, filename):
"""
Write json dict to a new json file with a specified filename
:param filename: file name to be used for new json file
:type filename: string
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(self.json_dict, f, ensure_ascii=False, indent=4)
|
# Generated by Django 3.2.4 on 2021-07-12 20:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('churrasco', '0003_alter_produto_nome'),
]
operations = [
migrations.CreateModel(
name='InformacaoPedido',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cidade', models.CharField(max_length=500, verbose_name='Cidade')),
('quantidadePessoa', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='Quantidade pessoa')),
],
),
migrations.RemoveField(
model_name='pedido',
name='cidade',
),
migrations.AddField(
model_name='pedido',
name='lista',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='churrasco.informacaopedido'),
),
]
|
from collections import defaultdict, deque, Counter
from heapq import heapify, heappop, heappush
import math
from copy import deepcopy
from itertools import combinations, permutations, product, combinations_with_replacement
from bisect import bisect_left, bisect_right
import sys
def input():
return sys.stdin.readline().rstrip()
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getListGraph():
return list(map(lambda x:int(x) - 1, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
mod = 10 ** 9 + 7
MOD = 998244353
sys.setrecursionlimit(10000000)
inf = float('inf')
eps = 10 ** (-10)
dy = [0, 1, 0, -1]
dx = [1, 0, -1, 0]
#############
# Main Code #
#############
# 鳩の巣原理
# 組み合わせの通りと比べてその取りうる値の種類が小さい時に使われる
"""
088 - Similar but Different Ways(★6)
二つの異なる部分集合を探す
条件1→条件2 or 条件2→条件1か
条件1→条件2は総和がkになる組み合わせを求めた後DPの復元が難しい 2^Nいる
条件2→条件1は条件Qを満たすのを求めるのが難しい
そもそも2^N通りをすべて出すのは無理では?
最適な方法で2つ構築していく
N <= 20ならできるか?計算量はN^2 * 2^Nぐらい
最大2^N通りの異なる総和が作れる
条件1→条件2をすることを考えると...
sum(A)が <= 8888なので結構denseじゃない?
この上限ならN <= 12で済むはず
鳩の巣原理
上限が小さければ意外と一回あたりの探索の量が少ない
"""
N, Q = getNM()
A = getList()
ng = [set() for i in range(N)]
for _ in range(Q):
x, y = getNM()
ng[x - 1].add(y - 1)
ng[y - 1].add(x - 1)
su = sum(A)
L = [set()] * (su + 1)
for i in range(N):
# 新しい集合を作る
for j in range(su, -1, -1):
if j > 0 and not L[j]:
continue
# ngがなく数字を書き込める
if not (L[j] & ng[i]):
# すでに存在していれば
if L[j + A[i]]:
# 古い集合、新しい集合
prev, next = L[j + A[i]], L[j] | set([i])
print(len(prev))
print(*[i + 1 for i in prev])
print(len(next))
print(*[i + 1 for i in next])
exit()
else:
L[j + A[i]] = L[j] | set([i])
|
#test4.1.py
for i in range(1,10):
for j in range(10):
for k in range(10):
for n in range(10):
if i**4+j**4+k**4+n**4==1000*i+100*j+10*k+n:
print('{}{}{}{}'.format(i,j,k,n))
'''
s = ""
for i in range(1000, 10000):
t = str(i)
if pow(eval(t[0]),4) + pow(eval(t[1]),4) + pow(eval(t[2]),4)+pow(eval(t[3]),4) == i :
s += "{},".format(i)
print(s[:-1])
'''
|
# File: hw3_part3.py
# Author: Joel Okpara
# Date: 2/21/2016
# Section: 04
# E-mail: joelo1@umbc.edu
# Description: This program guesses what character the user is thinking of
def main():
#This segment checks if the character is a woman
woman = input("Is your character a woman?(y/n)")
if woman == "y":
blueEyes = input("Does your character have blue eyes?(y/n)")
if blueEyes == "y":
print("Your character is Jane!")
else:
print("Your character is Marni!")
elif woman == "n":
#If the character is not a woman, checks for glasses
glasses = input("Does your character wear glasses?(y/n)")
if glasses == "y":
print("Your character is Adrian!")
elif glasses == "n":
#if the character does not wear glasses, checks for beard
beard = input("Does your character have a beard?(y/n)")
if beard == "n":
print("Your character is Zhang!")
else:
print ("Your character is Peder!")
main()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 00:14:54 2020
@author: Meng
"""
import pandas as pd #pandas处理数据
import seaborn as sb
import matplotlib.pyplot as plt
import numpy as np #numpy数学函数
from sklearn.linear_model import LinearRegression #导入线性回归
from sklearn.model_selection import KFold #导入交叉验证
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression#导入逻辑验证
from sklearn.ensemble import RandomForestClassifier#导入随机森林
from sklearn.feature_selection import SelectKBest#选择最佳特征
import re
pd.set_option('display.max_columns',None)#输出结果显示全部列
titanic = pd.read_csv("train.csv")
print(titanic.head())
print(titanic.describe())
print(titanic.info())
# 缺失值填充,Age列缺失值按中位数填充,用fillna()函数
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
print(titanic.describe())
# 把字符值转化为数值
#.loc通过自定义索引获取数据 .loc[:,:]逗号前为行,逗号后为列
titanic.loc[titanic["Sex"] == "male","Sex"] = 0
titanic.loc[titanic["Sex"] == "female","Sex"] = 1
print(titanic.describe())
#统计登船地点
print(titanic.groupby('Embarked').Name.count())
titanic["Embarked"] = titanic["Embarked"].fillna("S")
# Embarked列表处理
titanic.loc[titanic["Embarked"] == "S","Embarked"] = 0
titanic.loc[titanic["Embarked"] == "C","Embarked"] = 1
titanic.loc[titanic["Embarked"] == "Q","Embarked"] = 2
#画散点图看分布
#sb.pairplot(titanic,hue="Age")
#选择分类特征
predictors = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
#将样本平均分成11份进行交叉验证
#alg = RandomForestClassifier(random_state = 10,warm_start = True,n_estimators = 26,max_depth = 6,max_features ='sqrt')
alg = RandomForestClassifier(random_state = 1,n_estimators = 18)
kf = KFold(n_splits = 11,random_state = 1)
scores = model_selection.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=kf)
#alg = LogisticRegression(random_state = 1)
#scores = model_selection.cross_val_score(alg,titanic[predictors],titanic["survied"],cv = 11)
#print(scores)
print(scores.mean())
titanic_test = pd.read_csv("test.csv")
titanic_test['Age'] = titanic_test['Age'].fillna(titanic_test['Age'].median())
titanic_test['Fare'] = titanic_test['Fare'].fillna(titanic_test['Fare'].median())
titanic_test.loc[titanic_test['Sex'] == 'male','Sex'] = 0
titanic_test.loc[titanic_test['Sex'] == 'female','Sex'] = 1
titanic_test['Embarked'] = titanic_test['Embarked'].fillna('S')
titanic_test.loc[titanic_test['Embarked'] == 'S', 'Embarked'] = 0
titanic_test.loc[titanic_test['Embarked'] == 'C', 'Embarked'] = 1
titanic_test.loc[titanic_test['Embarked'] == 'Q', 'Embarked'] = 2
# Initialize the algorithm class
alg = LogisticRegression(random_state=1)
# Train the algorithm using all the training data
alg.fit(titanic[predictors], titanic["Survived"])
# Make predictions using the test set.
predictions = alg.predict(titanic_test[predictors])
# Create a new dataframe with only the columns Kaggle wants from the dataset.
submission = pd.DataFrame({"PassengerId": titanic_test["PassengerId"],"Survived": predictions})
print(submission)
submission.to_csv("submission1.csv", index=False)
|
from django.test import TestCase
from .models import Tutorial
class TutorialTestCase(TestCase):
def setUp(self):
Tutorial.objects.create(title="this is a title")
Tutorial.objects.create(title="this is a title")
def test_check_slugs(self):
object_1 = Tutorial.objects.get(pk=1)
object_2 = Tutorial.objects.get(pk=2)
self.assertEqual(object_1.slug, 'this-is-a-title')
self.assertEqual(object_2.slug, 'this-is-a-title-2')
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import pytest
from pants.backend.kotlin.compile import kotlinc_plugins
from pants.backend.kotlin.compile.kotlinc import rules as kotlinc_rules
from pants.backend.kotlin.lint.ktlint import rules as ktlint_fmt_rules
from pants.backend.kotlin.lint.ktlint import skip_field
from pants.backend.kotlin.lint.ktlint.rules import KtlintFieldSet, KtlintRequest
from pants.backend.kotlin.target_types import KotlinSourcesGeneratorTarget, KotlinSourceTarget
from pants.backend.kotlin.target_types import rules as target_types_rules
from pants.build_graph.address import Address
from pants.core.goals.fmt import FmtResult
from pants.core.util_rules import config_files, source_files, system_binaries
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.rules import QueryRule
from pants.engine.target import Target
from pants.jvm import classpath, jdk_rules
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.strip_jar import strip_jar
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*classpath.rules(),
*coursier_fetch_rules(),
*coursier_setup_rules(),
*jdk_rules.rules(),
*strip_jar.rules(),
*kotlinc_rules(),
*kotlinc_plugins.rules(),
*util_rules(),
*target_types_rules(),
*ktlint_fmt_rules.rules(),
*skip_field.rules(),
*system_binaries.rules(),
*source_files.rules(),
QueryRule(FmtResult, (KtlintRequest.Batch,)),
QueryRule(SourceFiles, (SourceFilesRequest,)),
],
target_types=[KotlinSourceTarget, KotlinSourcesGeneratorTarget],
)
rule_runner.set_options(
[],
env_inherit=PYTHON_BOOTSTRAP_ENV,
)
return rule_runner
GOOD_FILE = """\
package org.pantsbuild.example
open class Foo {
val CONSTANT = "Constant changes"
}
"""
BAD_FILE = """\
package org.pantsbuild.example
open class Bar {
val CONSTANT = "Constant changes"
}
"""
FIXED_BAD_FILE = """\
package org.pantsbuild.example
open class Bar {
val CONSTANT = "Constant changes"
}
"""
def run_ktlint(rule_runner: RuleRunner, targets: list[Target]) -> FmtResult:
field_sets = [KtlintFieldSet.create(tgt) for tgt in targets]
input_sources = rule_runner.request(
SourceFiles,
[
SourceFilesRequest(field_set.source for field_set in field_sets),
],
)
fmt_result = rule_runner.request(
FmtResult,
[
KtlintRequest.Batch(
"",
input_sources.snapshot.files,
partition_metadata=None,
snapshot=input_sources.snapshot,
),
],
)
return fmt_result
def test_passing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"Foo.kt": GOOD_FILE, "BUILD": "kotlin_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="Foo.kt"))
fmt_result = run_ktlint(rule_runner, [tgt])
assert fmt_result.output == rule_runner.make_snapshot({"Foo.kt": GOOD_FILE})
assert fmt_result.did_change is False
def test_failing(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"Bar.kt": BAD_FILE, "BUILD": "kotlin_sources(name='t')"})
tgt = rule_runner.get_target(Address("", target_name="t", relative_file_path="Bar.kt"))
fmt_result = run_ktlint(rule_runner, [tgt])
assert fmt_result.output == rule_runner.make_snapshot({"Bar.kt": FIXED_BAD_FILE})
assert fmt_result.did_change is True
def test_multiple_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{"Foo.kt": GOOD_FILE, "Bar.kt": BAD_FILE, "BUILD": "kotlin_sources(name='t')"}
)
tgts = [
rule_runner.get_target(Address("", target_name="t", relative_file_path="Foo.kt")),
rule_runner.get_target(Address("", target_name="t", relative_file_path="Bar.kt")),
]
fmt_result = run_ktlint(rule_runner, tgts)
assert fmt_result.output == rule_runner.make_snapshot(
{"Foo.kt": GOOD_FILE, "Bar.kt": FIXED_BAD_FILE}
)
assert fmt_result.did_change is True
|
import pprint
def raj_template_dump():
dump_array = [{
"type": "movies",
"modes": [
"top250",
"random",
"list all movies",
"my fav movies"
]
}, {
"type": "books",
"modes": [
"popular",
"random",
"list all books",
"my fav movies"
]
}, {
"type": "series",
"modes": [
"top250_tv",
"random",
"list all series",
"horrible shows"
]
}
]
for i, enum in enumerate(dump_array):
# pprint.pprint(dump_array)
print dump_array[i]
raj_template_dump()
|
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import string
import itertools as it
def comp_words(word1, word2):
singleton_presence = 0
letter_pairs = set((map(frozenset, zip(word1, word2))))
for lp in letter_pairs:
if len(lp) == 1:
singleton_presence = singleton_presence + 1
if len(letter_pairs) == 2 and singleton_presence == 1:
return True
elif singleton_presence == 2:
return True
return False
G = nx.Graph()
graph_options = {
'with_labels': False,
'node_color': 'k',
'edge_color': 'r',
'node_size': 500,
}
label_options = {
'font_color': 'w',
'font_size': 12,
}
fig, ax = plt.subplots(figsize=(10,5))
ax.set_title('Intersection graph')
# pos = nx.circular_layout(F)
# axes[0].set_title('Labelled graph')
# nx.draw(F, pos, with_labels=True, ax = axes[0])
# axes[1].set_title('Nonlabeled graph')
# nx.draw(F, pos, with_labels=False, ax = axes[1])
g_edges = []
with open('data_lines.txt', 'r') as f:
for i, line in enumerate(f.readlines()):
g_edges = list(it.chain(g_edges, it.product([i + 1],
list(map(int,line.strip().split(','))))))
G.add_nodes_from(list(range(1,10)))
labels = {n:n for n,val in G.nodes.items()}
G.add_edges_from(g_edges)
pos = nx.circular_layout(G)
nx.draw_networkx_labels(G, pos, labels, **label_options)
nx.draw(G, pos, **graph_options)
print(G.size(), G.order())
plt.show()
|
# appconfig - remote control for DLCE apps
import pathlib
from . import config
__all__ = ['PKG_DIR', 'APPS_DIR', 'CONFIG_FILE', 'APPS']
PKG_DIR = pathlib.Path(__file__).parent
APPS_DIR = PKG_DIR.parent / 'apps'
CONFIG_FILE = APPS_DIR / 'apps.ini'
APPS = config.Config.from_file(CONFIG_FILE)
# TODO: consider https://pypi.python.org/pypi/pyvbox
# for scripting tests with virtualbox
|
from os import path, mkdir
import json
from datetime import datetime
from modules.logmanager import LogManager
from modules.navigationmanager import NavigationManager
from modules.datamanager import DataManager
from multiprocessing import Process
def retrieve_incremental_cases_info_from_url(urlToBeRequested, startingUrlOfTheProvince, navigationManger, dataManager, dataFile, logManager, logFile, elaborationDate, limitDate):
logManager.add_checkpoint_log(logFile, urlToBeRequested)
try:
response = navigationManger.request_url(urlToBeRequested)
extractedCases = dataManager.extract_cases(response, startingUrlOfTheProvince, elaborationDate)
incrementalCases = dataManager.check_incremetal_cases(extractedCases, limitDate)
casesToBeStored = incrementalCases[0]
limitReached = incrementalCases[1]
dataManager.store_cases(dataFile, casesToBeStored)
return True, limitReached
except Exception as ex:
logManager.add_error_log(logFile, str(ex))
return False, False
#everything that happens in this function is referred to a single url elaboration
def retrieve_online_negative_data(elaborationsDirectory, startingUrl, totPages, fixedPageSuffix, fixedPagePrefix, recoveryTentatives, limitDate):
# defines working directory for each url elaboration and elaboration date
province = startingUrl[12:len(startingUrl)-6]
elaborationDate = datetime.now().strftime('%m.%d.%YT%H.%M.%S')
siteElaborationDirectory = elaborationsDirectory + province + '//'
# creates the url elaboration directory that will contain log and data file, if not exists
if not path.exists(siteElaborationDirectory):
mkdir(siteElaborationDirectory)
# instantiates the log class and creates the log file and data file
logManager = LogManager()
logFile = logManager.create_incremental_log_file(siteElaborationDirectory, elaborationDate)
dataManager = DataManager()
dataFile = dataManager.create_incremental_data_file(siteElaborationDirectory, elaborationDate)
# initializes the client session and create recovery list
navigationManger = NavigationManager()
urlsToRecover = []
# create recovery list
urlsToRecover = []
limitReached = False
# tries to crawl the first page
print(f'Requesting - {startingUrl}')
casesDataRetrieved = retrieve_incremental_cases_info_from_url(startingUrl, startingUrl, navigationManger, dataManager, dataFile, logManager, logFile, elaborationDate, limitDate)
casesDataRetrievedWithoutErrors = casesDataRetrieved[0]
limitReached = casesDataRetrieved[1]
if casesDataRetrievedWithoutErrors == False:
urlsToRecover.append(startingUrl)
#re-sets the page number after the first page has been done
pageNumber = 1
# starts the crawling loop
while limitReached == False:
url = navigationManger.calculate_next_url(startingUrl, pageNumber, fixedPageSuffix, fixedPagePrefix)
print(f'Requesting - {url[0]}')
pageNumber = url[1]
casesDataRetrieved = retrieve_incremental_cases_info_from_url(url[0], startingUrl, navigationManger, dataManager, dataFile, logManager, logFile, elaborationDate, limitDate)
casesDataRetrievedWithoutErrors = casesDataRetrieved[0]
limitReached = casesDataRetrieved[1]
if casesDataRetrievedWithoutErrors == False:
urlsToRecover.append(startingUrl)
#recovers the urls that failed
if len(urlsToRecover) > 0:
failedRecoveryFile = logManager.create_urls_failed_recovery_file(siteElaborationDirectory, elaborationDate)
recoveryRound = 0
while recoveryRound <= recoveryTentatives:
for urlToRecover in urlsToRecover:
print(f'Requesting - {urlToRecover}')
casesDataRetrieved = retrieve_incremental_cases_info_from_url(urlToRecover, startingUrl, navigationManger, dataManager, dataFile, logManager, logFile, elaborationDate, limitDate)
casesDataRetrievedWithoutErrors = casesDataRetrieved[0]
if casesDataRetrievedWithoutErrors == True:
urlsToRecover.remove(startingUrl)
recoveryRound += 1
failedRecoveryFile.write(str(urlsToRecover))
failedRecoveryFile.close()
logFile.close()
dataFile.close()
if __name__ == '__main__':
# loads config from file
with open('config.json', 'r+') as configFile:
configData = json.load(configFile)
elaborationsDirectory = configData['elaborationsDirectory']
fixedPagePrefix = configData['fixedPagePrefix']
recoveryTentatives = configData['recoveryTentatives']
urls = configData['urlList']
# creates elaborations root directory that will contain single elaboration results, if not exists
if not path.exists(elaborationsDirectory):
mkdir(elaborationsDirectory)
# starts jobs parallelism on different urls using multiprocessing
jobs = []
for startingUrl in urls:
p = Process(target=retrieve_online_negative_data, args=(elaborationsDirectory, startingUrl['url'], int(startingUrl['totPages']), startingUrl['fixedPageSuffix'], fixedPagePrefix, recoveryTentatives, startingUrl['limitDate']))
p.start()
jobs.append(p)
#close the multiprocessing action once all the jobs are done
for job in jobs:
job.join() |
class ShelfNotAvailable(Exception):
""" Custom exception for signalizing a case when item not available on
product's shelf
"""
pass
|
import numpy as np
from tensorflow.keras.applications import VGG16
IMG_SIZE=128
model = VGG16(include_top=False, weights="imagenet",\
input_shape=(IMG_SIZE,IMG_SIZE,3), pooling="ave")
for layer in model.layers:
layer.trainable = False
#データの読み込み
in_npy = "gen_fig.npy"
data = np.load(f"./{in_npy}")
data = data / 255
pred = model.predict(data)
pred = pred.reshape((len(data),int((IMG_SIZE/32)**2*512)))
np.save("human_feature.npy", pred) |
import requests
import os
import gitlab
import sys
import configparser
gitlab_url = None
gitlab_private_token = None
gitlab_file = os.environ.get('GITLAB_CREDENTIALS', None)
if gitlab_file:
config = configparser.ConfigParser()
try:
config.read(gitlab_file)
gitlab_url = config["GITLAB"]["GITLAB_URL"]
gitlab_private_token = config["GITLAB"]["GITLAB_PRIVATE_TOKEN"]
except configparser.MissingSectionHeaderError as e:
print(
f"invalid config file. Use [HEADER] and key=value \n {e.message}")
except configparser.ParsingError as e:
print(f"invalid config file. Use key=value notation \n {e.message}")
except KeyError as e:
print(
f"config file exception \n {e} not provided. Trying to read from environment..")
if not gitlab_url:
gitlab_url = os.environ.get('GITLAB_URL')
if not gitlab_private_token:
gitlab_private_token = os.environ.get('GITLAB_PRIVATE_TOKEN')
if not gitlab_url:
print("missing GITLAB_URL. Use GITLAB_URL environment variable")
sys.exit(1)
if not gitlab_private_token:
print("missing GITLAB_PRIVATE_TOKEN. Use GITLAB_PRIVATE_TOKEN environment variable")
sys.exit(1)
gl = gitlab.Gitlab(gitlab_url, private_token=gitlab_private_token)
gl.auth()
|
from utils.function.setup import *
from utils.lib.user_data import *
from main.activity.desktop_v3.activity_login import *
from main.activity.desktop_v3.activity_logout import *
from main.activity.desktop_v3.activity_user_settings import *
import unittest
class TestNotification(unittest.TestCase):
#Instance
_site = "live"
def setUp(self):
test_driver = ""
self.driver = tsetup("firefox")
self.flag = 0
def test_edit_notification(self):
print("> ::TEST EDIT NOTIFICATION::")
print("============================")
driver = self.driver
self.user= user8
email = self.user['email']
pwd = self.user['password']
#Object Activity
loginValidate = loginActivity()
logoutValidate = logoutActivity()
editNotification = settingNotificationActivity()
#--
loginValidate.do_login(driver, self.user, email, pwd, self._site)
editNotification.notif_setting(driver)
logoutValidate.do_logout(driver, self._site)
def tearDown(self):
print("> ::Testing has done, the browser window will be closed soon::")
self.driver.quit()
if __name__ == '__main__':
unittest.main(warnings = 'ignore') |
# -*- coding: utf-8 -*-
SPECIAL_PRICE = 40
IS_NEW = 42
RECOMMEND_PRICE = 10
BY_ORDER = "Под заказ"
EMPTY_STRING = ""
DOUBLE_DASH = "--"
|
#!/usr/bin/python3
import json
import typing
import argparse
from classes import Student, Room
from controllers import FilesController, DBController
from models import Model
def get_input_arguments():
parser = argparse.ArgumentParser(description='give 3 arguments - path to students.json, path to rooms.json, output_path path (<name>.json,xml)')
parser.add_argument('students', metavar='[path to students.json]', type=str, help='path to students.json file')
parser.add_argument('rooms', metavar='[path to rooms.json]', type=str, help='path to rooms.json file')
parser.add_argument('output_file', metavar='[output file (xml or json)]', type=str, help='output file ( xml or json) file')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_input_arguments()
# init controller
File_controller = FilesController(students_path = args.students, rooms_path = args.rooms)
# students to rooms & file export
File_controller.concatinate_students_to_rooms_from_json()
if args.output_file[3][-3:] == 'xml':
File_controller.export_xml(output_path = args.output_file)
else:
File_controller.export_json(output_path = args.output_file)
DB_controll = DBController(students_path = args.students, rooms_path = args.rooms)
# db import
DB_controll.import_to_db()
# selects
DB_controll.show_all_selects() |
# coding: utf-8
import redis
import json
import hashlib
import urllib.parse
import lglass.database.base
import lglass.rpsl
@lglass.database.base.register("redis")
class RedisDatabase(lglass.database.base.Database):
""" Caching database layer which uses redis to cache the objects and search
results, but without redundant caching like CachedDatabase """
hash_algorithm = "sha1"
key_format = None
def __init__(self, db, _redis, timeout=600, prefix="lglass:"):
if isinstance(_redis, redis.Redis):
self.redis = _redis
elif isinstance(_redis, dict):
self.redis = redis.Redis(**_redis)
elif isinstance(_redis, tuple):
self.redis = redis.Redis(*_redis)
elif isinstance(_redis, str):
self.redis = redis.Redis.from_url(_redis)
else:
raise TypeError("Expected redis.Redis, dict or tuple as redis instance, got {}".format(type(_redis)))
self.database = db
self.timeout = timeout
self.prefix = prefix
def get(self, type, primary_key):
obj = self.redis.get(self._key_for(type, primary_key))
if obj is None:
obj = self.database.get(type, primary_key)
self.redis.set(self._key_for(type, primary_key),
self._serialize(obj),
ex=self.timeout)
else:
obj = self._deserialize(obj.decode())
return obj
def list(self):
listing = self.redis.get(self._key_for_list())
if listing is None:
listing = list(self.database.list())
self.redis.set(self._key_for_list(),
self._serialize_listing(listing),
ex=self.timeout)
else:
listing = self._deserialize_listing(listing.decode())
return listing
def find(self, key, types=None):
if types is None:
types = self.object_types
results = self.redis.get(self._key_for_find(key, types))
if results is None:
results = list(self.database.find(key, types))
self.redis.set(self._key_for_find(key, types),
self._serialize_find(results),
ex=self.timeout)
else:
results = self._deserialize_find(results.decode())
return results
def save(self, obj):
self.database.save(obj)
self.redis.set(self._key_for(obj.type, obj.primary_key),
self._serialize(obj),
ex=self.timeout)
def delete(self, type, primary_key):
self.database.delete(type, primary_key)
self.redis.delete(self._key_for(type, primary_key))
def _serialize(self, obj):
return json.dumps(obj.to_json_form())
def _deserialize(self, string):
return lglass.rpsl.Object(json.loads(string))
def _serialize_listing(self, listing):
result = [[key, value] for key, value in listing]
return json.dumps(result)
def _deserialize_listing(self, string):
listing = json.loads(string)
return [(key, value) for key, value in listing]
def _serialize_find(self, finds):
result = [list(obj.real_spec) for obj in finds]
return json.dumps(result)
def _deserialize_find(self, string):
finds = json.loads(string)
result = []
for spec in finds:
result.append(self.get(*spec))
return result
def _key_hash(self, key):
h = hashlib.new(self.hash_algorithm)
h.update(key.encode())
if self.key_format:
return self.key_format.format(h.hexdigest())
else:
return self.prefix + h.hexdigest()
def _key_for_find(self, key, types):
return self._key_hash("find+{key}+{types}".format(
key=key, types=",".join(types)))
def _key_for_list(self):
return self._key_hash("list")
def _key_for(self, type, primary_key):
return self._key_hash("{type}+{primary_key}".format(prefix=self.prefix,
type=type, primary_key=primary_key))
@classmethod
def from_url(cls, url):
""" Create instance from URL which has the form
whois+redis://{host}:{port}/{database}?timeout={n}&format={format}
"""
rurl = list(url)
rurl[0] = "redis"
rurl = urllib.parse.urlunparse(rurl)
self = cls(None, redis.Redis.from_url(rurl))
if url.query:
query = urllib.parse.parse_qs(url.query)
if "timeout" in query:
self.timeout = int(query["timeout"][-1])
if "format" in query:
self.key_format = query["format"][-1]
return self
|
# network = input("enter network id: ")
# subnet = list(map(int,input("enter subnet id: ").split('.')))
from prettytable import PrettyTable
def dec_to_bin(x):
return int(bin(x)[2:])
def bin_to_dec(n):
return bin(n).replace("0b", "")
x = PrettyTable()
is_cidr = input('is you network include CIDR? Y/n: ')
if is_cidr == 'Y':
print("Example of network: 172.168.12.0/24 ")
net = input("Enter your network id: ").split('/')
network = net[0].split(".")
a = ''
subnet = []
for i in range(0, 32):
if i < int(net[1]):
a += '1'
else:
a += '0'
for aloop in range(0, 32, 8):
temp = ''
for j in range(0, 8):
temp += a[aloop]
aloop += 1
subnet.append((int(temp, 2)))
else:
network = input("Enter your network id: ").split(".")
subnet = list(map(int,input("Enter your subnet id: ").split(".")))
for i in subnet:
if i > 255:
raise ValueError("Please enter a valid subnet mask")
if 1 <= int(network[0]) <= 127:
print("Network is A group member")
elif 128 < int(network[0]) <= 191:
print("Network is B group member")
elif 192 < int(network[0]) <= 223:
print("Network is C group member")
block_size = 0
sub_bin_only_cidr = []
for i in subnet:
if i < 255:
if block_size == 0:
block_size = 256 - i
sub_bin_only_cidr.append(dec_to_bin(i))
zero = 0
one = 0
for i in sub_bin_only_cidr:
for j in str(i):
if j == '1':
one += 1
elif j == '0':
zero += 1
total_subnet = 2 ** one
total_host = 2 ** zero
total_valid_host = total_host - 2
print(f'total block size: {block_size}')
print(f'total number of subnet/network is: {total_subnet}')
print(f'total host is: {total_host}')
print(f'total valid host is: {total_valid_host}')
current_subnet = 0
fast_valid_host = current_subnet + 1
calculate = [{"subnet_id": current_subnet, "first_valid_host": fast_valid_host}]
for i in range(0, total_subnet - 1):
current_subnet += block_size
fast_valid_host = current_subnet + 1
calculate.append({"subnet_id": current_subnet, "first_valid_host": fast_valid_host})
for i in range(0, total_subnet):
if i == total_subnet - 1:
# print(len(str(total_subnet)))
broadcast = 255
else:
broadcast = calculate[i + 1].get("subnet_id") - 1
last_valid_host = broadcast - 1
calculate[i]["last_valid_id"] = last_valid_host
calculate[i]["broadcast_address"] = broadcast
x.field_names = ["Subnet id", "First valid host", "Last valid id", "Broadcast address"]
for i in calculate:
x.add_row(i.values())
print(x)
|
# -*- coding: utf-8 -*-
class Solution:
def strWithout3a3b(self, A, B):
if A >= 2 * B:
result = ["aab"] * B + ["a"] * (A - 2 * B)
elif A >= B:
result = ["aab"] * (A - B) + ["ab"] * (2 * B - A)
elif B >= 2 * A:
result = ["bba"] * A + ["b"] * (B - 2 * A)
else:
result = ["bba"] * (B - A) + ["ab"] * (2 * A - B)
return "".join(result)
if __name__ == "__main__":
solution = Solution()
assert "bba" == solution.strWithout3a3b(1, 2)
assert "aabaa" == solution.strWithout3a3b(4, 1)
|
print('Hello')
#создадим список и сразу выводим его
spisok2 = ['Gosha','Max','Denis']
print(spisok2)
#Добавим в список переменную
spisok2.append('Alex')
print(spisok2)
#создадим ещё один список и добавим всё его содержимое в первый
spisok1 = ['Gordon']
spisok2.extend(spisok1)
print(spisok2)
#удаляем из списка конкретную переменную
spisok2.remove('Alex')
print(spisok2)
#удалим список
del spisok1
print('')
#разворачиваем список
print(spisok2)
spisok2.reverse()
print(spisok2)
#создадим новый список и отсортируем его
print('')
spisok = ['83','1','67']
print(spisok)
spisok.sort()
print(spisok)
#очистим этот список
spisok.clear()
print(spisok)
#выводим 1 и 3 элемент списка
print('')
str = ['gosha','alex','masha','valera']
print(str[0], str[2])
|
import argparse, logging, json, sys
from ..algorithms.utils import get_blocks_shape, get_named_volumes, numeric_to_3d_pos, get_theta
DEBUG=False
def compute_max_mem(R, B, O, nb_bytes_per_voxel):
""" Algorithm to compute the maximum amount of memory to be consumed by the keep algorithm.
"""
buffers_partition = get_blocks_shape(R, B)
buffers_volumes = get_named_volumes(buffers_partition, B)
# initialization of lists of remainders
k_remainder_list = [0]
j_remainder_list = [0] * buffers_partition[2]
i_remainder_list = [0] * (buffers_partition[2] * buffers_partition[1])
if DEBUG:
print(f"Image partition by B: {buffers_partition}")
print(f"Lists initialization...")
print(f"k: {k_remainder_list}")
print(f"j: {j_remainder_list}")
print(f"i: {i_remainder_list}")
nb_voxels_max = B[0] * B[1] * B[2]
nb_voxels = B[0] * B[1] * B[2]
if DEBUG:
print(f"Initialization nb voxels (=1 buffer): {nb_voxels}")
i, j, k = 0, 1, 2
for buffer_index in buffers_volumes.keys():
if DEBUG:
print(f"Processing buffer {buffer_index}")
_3d_index = numeric_to_3d_pos(buffer_index, buffers_partition, order='C')
theta, omega = get_theta(buffers_volumes, buffer_index, _3d_index, O, B)
if DEBUG:
print(f"3d buffer index: {_3d_index}")
# compute size of remainders
F1 = omega[k] * theta[j] * theta[i]
F2 = theta[k] * omega[j] * theta[i]
F3 = omega[k] * omega[j] * theta[i]
F4 = theta[k] * theta[j] * omega[i]
F5 = omega[k] * theta[j] * omega[i]
F6 = theta[k] * omega[1] * omega[i]
F7 = omega[k] * omega[j] * omega[i]
if theta[i] >= O[i] and theta[j] >= O[j] and omega[k] >= O[k]:
F1 = 0
if theta[i] >= O[i] and omega[j] >= O[j] and theta[k] >= O[k]:
F2 = 0
if theta[i] >= O[i] and omega[j] >= O[j] and omega[k] >= O[k]:
F3 = 0
if omega[i] >= O[i] and theta[j] >= O[j] and theta[k] >= O[k]:
F4 = 0
if omega[i] >= O[i] and theta[j] >= O[j] and omega[k] >= O[k]:
F5 = 0
if omega[i] >= O[i] and omega[j] >= O[j] and theta[k] >= O[k]:
F6 = 0
if omega[i] >= O[i] and omega[j] >= O[j] and omega[k] >= O[k]:
F7 = 0
k_remainder = F1
j_remainder = F2 + F3
i_remainder = F4 + F5 + F6 + F7
index_j = _3d_index[2]
index_i = _3d_index[1]*len(j_remainder_list) + _3d_index[2]
if DEBUG:
print(f"Indices: {index_j}, {index_i}")
print(f"Lengths: {len(j_remainder_list)}, {len(i_remainder_list)}")
# line 20 of algorithm in paper
nb_voxels -= k_remainder_list[0] + j_remainder_list[index_j] + i_remainder_list[index_i]
k_remainder_list[0] = k_remainder
j_remainder_list[index_j] = j_remainder
i_remainder_list[index_i] = i_remainder
# line 25 of algorithm in paper
nb_voxels += k_remainder_list[0] + j_remainder_list[index_j] + i_remainder_list[index_i]
if DEBUG:
print(f"k: {k_remainder_list}")
print(f"j: {j_remainder_list}")
print(f"i: {i_remainder_list}")
print(f"Number of voxels: {nb_voxels}")
if nb_voxels > nb_voxels_max:
nb_voxels_max = nb_voxels
if DEBUG:
print(f"Number of voxels max: {nb_voxels_max}")
print(f"RAM consumed: {nb_voxels_max * nb_bytes_per_voxel}")
return nb_voxels_max |
import logging
import random as rand
from enum import Enum
import numpy as np
from numpy import array as arr
from numpy import concatenate as cat
import scipy as sy
import scipy.io as sio
from scipy.misc import imread, imresize
pred = sio.loadmat('predictions.mat')
pred = pred['joints']
mlab = sio.loadmat('dataset.mat')
mlab = mlab['dataset']
#print(pred[0,0])
#print(pred[0,0][:,1])
#print(mlab[0,0][2][0])
#print(type(mlab[0,0][2][0]))
a = mlab[0,0][2][0][0]
num_images = mlab.shape[1]
data = []
for i in range(num_images):
ele = mlab[0,i][2][0][0] #array with joint data
data.append(ele)
#print(len(data))
#compute distance between points
dist = np.zeros(num_images)
for i in range(num_images):
#x-data
img = data[i]
act_x = img[:,1]
act_y = img[:,2]
#print(act_y.shape)
pred_img = pred[0,i]
pred_x = pred_img[:,0]
pred_y = pred_img[:,1]
#print(pred_y.shape)
pred_weight = pred_img[:,2]
if(len(act_y)==14):
dist[i] += sy.spatial.distance.euclidean(act_x,pred_x)
eucl = []
for i in range(num_images):
if dist[i]!= 0.0:
eucl.append(dist[i])
print(eucl)
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
context = {'username': 'Keith Yue'}
return render(request, 'test.html', context)
def test_bootstrap(request):
return render(request, 'bootstrapSample.html')
|
#!/usr/bin/python
import os
import time
f = open("all.txt",'r+a')
f.write("hello file")
print(f.readline())
|
import os
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
from urllib.parse import urlencode
import chardet
import redis
import requests
from soupsieve.util import string
from Cookie_pool.account_saver import RedisClient
CONN = RedisClient('account', 'pkulaw_v5')
NUM = 16
pool = redis.ConnectionPool(host = 'localhost', port = 6379, db = 1, password = '')
r_pool = redis.StrictRedis(connection_pool = pool, charset = 'UTF-8', errors = 'strict', decode_responses = True,
unix_socket_path = None)
r_pipe = r_pool.pipeline( )
proxy_pool_url = 'http://127.0.0.1:5010/get'
class downloader( ):
def __init__(self):
self.names = r_pool.hkeys('downloadreqdata')
self.names_list = None
self.username = None
self.userpassword = None
self.gid = None
self.cookieId = None
self.cookie = None
self.data1 = {
'Usrlogtype': '1',
'ExitLogin': '',
'menu': 'case',
'CookieId': '',
'UserName': self.username,
'PassWord': self.userpassword,
'jz_id': '0',
'jz_pwd': '0',
'auto_log': '0'
}
self.data2 = {
'Usrlogtype': '1',
'ExitLogin': '',
'menu': 'case',
'CookieId': self.cookieId,
'UserName': '',
'PassWord': '',
'jz_id': '',
'jz_pwd': '',
'auto_log': ''
}
self.headers1 = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Content-Length': '113',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'DNT': '1',
'Host': 'www.pkulaw.cn',
'Origin': 'https://www.pkulaw.cn',
'Referer': 'https://www.pkulaw.cn/Case/',
# 'sec-ch-ua': '"Google Chrome 79"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
self.headers2 = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Content-Length': '112',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie': self.cookie,
# 'DNT': '1',
'Host': 'www.pkulaw.cn',
'Origin': 'https://www.pkulaw.cn',
'Referer': 'https://www.pkulaw.cn/Case/',
# 'sec-ch-ua': '"Google Chrome 79"',
# 'Sec-Fetch-Dest': 'empty',
# 'Sec-Fetch-Mode': 'cors',
# 'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
'X-Requested-With': 'XMLHttpRequest'
}
self.headers3 = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
'Connection': 'keep-alive',
'Cookie': self.cookie,
'Host': 'www.pkulaw.cn',
'Referer': 'https://www.pkulaw.cn/case/pfnl_a6bdb3332ec0adc4bf6da0b52d04589a8445f45b7079568dbdfb.html?match=Exact',
# 'sec-ch-ua': 'Google Chrome 79',
# 'Sec-Fetch-Dest': 'document',
# 'Sec-Fetch-Mode': 'navigate',
# 'Sec-Fetch-Site': 'same-origin',
# 'Sec-Fetch-User': '?1',
# 'Sec-Origin-Policy': '0',
# 'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
self.data3 = {
'library': 'pfnl',
'gid': self.gid,
'type': 'txt',
'jiamizi': ''
}
self.url1 = 'https://www.pkulaw.cn/case/CheckLogin/Login'
self.url2 = 'https://www.pkulaw.cn/case/FullText/DownloadFile?' + urlencode(self.data3)
def get_proxy(self):
try:
response = requests.get(proxy_pool_url)
if response.status_code == 200:
proxy_url_content = response.content
encoding = chardet.detect(proxy_url_content)
proxy_url_context = proxy_url_content.decode(encoding['encoding'], 'ignore')
proxy_url_context1 = eval(proxy_url_context)
proxy_url = proxy_url_context1.get('proxy')
print(proxy_url)
return proxy_url
except ConnectionError:
return None
def singeldownload(self, name):
global proxy
try:
print("Requesting Pages...")
self.headers3.update(Cookie = self.cookie)
print('headers3.getcookie: ' + string(self.headers3.get('Cookie')))
self.data3.update(gid = self.gid)
proxy = self.get_proxy( )
proxies = {
'http': 'http://' + proxy
}
ses = requests.Session( )
r = ses.head(self.url2)
total = int(r.headers['Content-Length'])
print(total)
# print(r.status_code)
while r.status_code != 500:
# with ThreadPoolExecutor(max_workers = 30) as executor:
# executor.map(self.download, )
thread_list = []
# 一个数字,用来标记打印每个线程
n = 0
for ran in self.get_range(total):
start, end = ran
# 打印信息
print('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# 创建线程 传参,处理函数为download
thread = threading.Thread(
target = self.download(name, start, end, self.headers3, ses, self.url2, self.data3, proxies),
args = (start, end))
# 启动
thread.start( )
thread_list.append(thread)
for i in thread_list:
# 设置等待
i.join( )
print('download %s load success' % name)
# with open('./download/' + name + '.txt', 'wb') as f4:
# for ran in get_range(total):
# headers4['Range'] = 'Bytes=%s-%s' % ran
# r = ses.get(url = url1, headers = headers4, data = data1, stream = True, proxies = proxies)
# f4.seek(ran[0])
# f4.write(r.content)
# f4.close( )
# res = ses.get(url = url1, headers = headers4, data = data1, stream = True, proxies = proxies)
#
# print('Using proxy : ' + proxy)
# print(res.status_code)
# while res.status_code == 200:
# with open('./download/'+name+'.txt', 'wb') as f4:
# for chunk in res.iter_content(chunk_size = 32): # chunk_size #设置每次下载文件的大小
# f4.write(chunk) # 每一次循环存储一次下载下来的内容
with open('./download/' + name + '.txt', 'r', encoding = 'GBK') as f5:
lines = f5.readlines( )
first_line = lines[0]
key = "尚未登录"
if key in first_line:
print(first_line + "请先登录获取cookie")
return False
else:
print('您的账号已经登陆')
return True
else:
print("unable to download...")
return False
except Exception as e:
print(e)
return False
def download(self, name, start, end, headers, ses, url, data, proxies):
with open('./download/' + name + '.txt', 'wb') as f4:
headers['Range'] = 'Bytes=%s-%s' % (start, end)
r = ses.get(url = url, headers = headers, data = data, stream = True, proxies = proxies)
f4.seek(start)
f4.write(r.content)
f4.close( )
def get_range(self, total):
ranges = []
offset = int(total / NUM)
for i in range(NUM):
if i == NUM - 1:
ranges.append((i * offset, ''))
else:
ranges.append((i * offset, (i + 1) * offset))
return ranges
def first_login_reqck(self):
try:
response = requests.Session( )
self.data1.update(UserName = self.username, PassWord = self.userpassword)
res = response.post(url = self.url1, data = self.data1, headers = self.headers1, timeout = 10)
cookies1 = res.cookies.get_dict( )
self.cookieId = cookies1.get('CookieId')
print('CookieId: ' + string(self.cookieId))
print('firstlogcookie: ' + string(cookies1))
with open('./Cookies/firstlogCookie.txt', 'w', encoding = 'utf-8') as f:
for key, value in cookies1.items( ):
f.write(key + "=" + string(value) + "; ")
f.close( )
with open('./Cookies/firstlogCookie.txt', 'rb+') as f1:
f1.seek(-2, os.SEEK_END)
f1.truncate( )
f1.close( )
except Exception as e1:
print("Error1: " + string(e1))
pass
try:
self.cookieupdate( )
self.headers2.update(Cookie = self.cookie)
self.data2.update(CookieId = self.cookieId)
response1 = requests.Session( )
res1 = response1.post(url = self.url1, data = self.data2, headers = self.headers2, timeout = 10)
except Exception as e2:
print("error2: " + string(e2))
pass
def account_update(self):
self.username = CONN.random_key( )
self.userpassword = self.username
def autocookiecheck(self):
try:
print(self.cookie)
if self.cookie is None or self.cookie is '':
print('cookie is None')
return False
else:
print('cookie exists')
return True
except Exception as e:
print(e)
pass
return False
def cookieupdate(self):
try:
with open('./Cookies/firstlogCookie.txt', 'r', encoding = 'utf-8') as f2:
self.cookie = f2.readline( )
print("cookie: " + string(self.cookie))
f2.close( )
except Exception as e:
print(e)
pass
def download_data(self):
global FLAG
self.cookieupdate( )
FLAG = self.autocookiecheck( )
print(FLAG)
try:
for i in range(len(self.names)):
while FLAG:
self.names_list = {i: self.names[i].decode( )}
self.gid = r_pool.hget('downloadreqdata', self.names_list[i]).decode( )
print(self.names_list[i])
print(self.gid)
# self.cookieupdate( )
FLAG = self.singeldownload(name = self.names_list[i])
i += 1
time.sleep(5)
break
else:
print('cookie expired')
self.account_update( )
self.first_login_reqck( )
self.cookieupdate( )
self.names_list = {i: self.names[i].decode( )}
self.gid = r_pool.hget('downloadreqdata', self.names_list[i]).decode( )
print(self.names_list[i])
print(self.gid)
FLAG = self.singeldownload(name = self.names_list[i])
i += 1
time.sleep(5)
continue
except Exception as e:
print(e)
pass
dl = downloader( )
dl.download_data( )
|
__author__ = 'aoboturov'
import networkx as nx
def session_transition_graph(ds):
g = nx.Graph()
url_id_idx = ds.columns.get_loc('url') + 1
referrer_idx = ds.columns.get_loc('referrer') + 1
layer_idx = ds.columns.get_loc('layer') + 1
order_price_idx = ds.columns.get_loc('order_price') + 1
for t in ds.itertuples():
to_node = t[url_id_idx]
g.add_edge(t[referrer_idx], to_node)
g.node[to_node]['type'] = t[layer_idx]
if t[layer_idx] == 'order' and t[order_price_idx] > 0.:
g.node[to_node]['label'] = 1
return g
|
# list of question
questions_list = ['האם חדשנות טובה לכלכלה? : ',
'האם וודגווד היה בעל מפעל לכלי חרס? : ',
'האם ניהול וטכנו זה מעפן? : ']
admin_answers_list = ['yes', 'yes', 'no']
user_answers_list = []
right_answers = []
for i in range(len(questions_list)):
user_answers_list.append(input(questions_list[i]))
print(user_answers_list)
if user_answers_list[0] == admin_answers_list[0]:
right_answers.append(1)
print(right_answers) |
import csv
import json
import pathlib
import pandas as pd
import os
"""
This script convert .json file to .csv using Pandas
Input: All .json files in current directory
Output: .csv converted files to current directory
"""
# list all files in the directory
json_filenames = [' ']
for i in os.listdir():
if i.endswith(".json"):
json_filenames.append(i)
print("The following json files will be processed: {}".format(json_filenames))
# read json file with Pandas
def convert_json_to_csv(json_filenames = json_filenames):
for file in json_filenames:
df = pd.read_json(file, lines = True)
df.to_csv(file.split('.')[0] + ".csv", index = False)
if __name__ == "__main__":
convert_json_to_csv()
|
from pymel.core.datatypes import Vector
from pymel.util import path
from functools import partial
import maya.cmds as cmds
import pymel.core as pm
class BaseDragger(object):
def __init__(self, name="pbDragger", title="Base", default_value=0,
min_value=None, max_value=None, multiplier=0.01, cursor="crossHair", image="NEXCtx"):
self.title = title
self.default_value = default_value
self.min_value = min_value
self.max_value = max_value
self.multiplier = multiplier
self.default_multiplier = multiplier
self.anchor_point = None
self.drag_point = None
self.value = None
self.button = None
self.modifier = None
self._context = name
if not cmds.draggerContext(self._context, exists=True):
self._context = cmds.draggerContext(self._context)
self.context = partial(cmds.draggerContext, self._context, q=True, e=False)
self.context(q=False, e=True,
pressCommand=lambda *args: self.press(),
dragCommand=lambda *args: self.drag(),
releaseCommand=lambda *args: self.release(),
initialize=lambda *args: self.initialize(),
finalize=lambda *args: self.finalize(),
cursor=cursor,
drawString=self.title,
image1=image,
undoMode="all")
self.set_tool = partial(cmds.setToolTo, name)
def press(self):
self.anchor_point = Vector(*self.context(anchorPoint=True))
self.button = self.context(button=True)
cmds.undoInfo(openChunk=True)
def drag(self):
self.drag_point = Vector(*self.context(dragPoint=True))
self.modifier = self.context(modifier=True)
self.value = ((self.drag_point - self.anchor_point) * self.multiplier) + self.default_value # type: Vector
if self.min_value is not None and self.value.x < self.min_value:
self.value.x = self.min_value
if self.max_value is not None and self.value.x > self.max_value:
self.value.x = self.max_value
if self.modifier == "ctrl":
if self.button == 1:
self.drag_control_lmb()
elif self.button == 2:
self.drag_control_mmb()
elif self.modifier == "shift":
if self.button == 1:
self.drag_shift_lmb()
elif self.button == 2:
self.drag_shift_mmb()
else:
if self.button == 1:
self.drag_lmb()
elif self.button == 2:
self.drag_mmb()
cmds.refresh()
def initialize(self):
pass
def release(self):
cmds.undoInfo(closeChunk=True)
def draw_string(self, message):
self.context(q=False, e=True, drawString=message)
def drag_lmb(self):
raise NotImplementedError
def drag_mmb(self):
raise NotImplementedError
def drag_control_lmb(self):
raise NotImplementedError
def drag_control_mmb(self):
raise NotImplementedError
def drag_shift_lmb(self):
raise NotImplementedError
def drag_shift_mmb(self):
raise NotImplementedError
def finalize(self):
pass
def source_marking_menus():
menu_types = ["contextPolyToolsEdgeMM.mel"]
working_dir = path(__file__).parent
for menu in menu_types:
result = working_dir.joinpath(menu).replace("\\", "/")
pm.mel.source(result)
def get_faces():
pm.select(pm.polyListComponentConversion(tf=True), r=True)
result = pm.filterExpand(ex=True, sm=34)
return result
def polyChamferVtx(ch=True, width=0.25, deleteFace=False):
# type: (bool, float, bool) -> pm.nodetypes.PolyExtrudeVertex
node = pm.polyExtrudeVertex(ch=ch, divisions=1, length=0, width=width)[0]
node.divisions.lock(True)
node.length.lock(True)
node = pm.rename(node, "polyChamfer#")
if deleteFace:
faces = get_faces()
pm.delete(faces)
else:
cmds.DeleteVertex()
return node
|
# Generated by Django 2.2 on 2019-10-19 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20191018_1809'),
]
operations = [
migrations.AddField(
model_name='pedidos',
name='finalizado',
field=models.BooleanField(default=False),
),
]
|
print('Converte graus Celsius para Farenheit')
celsius = float(input('Informe os graus em Celsius: '))
farenheit = (celsius * 9 / 5) + 32
print('Convertidos, são {:.1f} graus Farenheit'.format(farenheit))
|
#como saber o resto de uma divisao
print (6%2)
print (2%3)
print (4%2)
print (5%2)
print (7%3.1)
print (900%100 == 0)
num1 = float(input("Digite um numero: "))
num2 = float(input ("Digite outro numero: "))
resultado = num1 / num2
resto = num1 % num2
print (num1, "dividido por ",num2," resulta em", resultado)
print (num1, "dividido por ",num2," tem resto", resto) |
#coding:utf-8
import CRFPP
import os
tagger = CRFPP.Tagger('-m '+ os.path.join('data','address_model'))
def crf_segmenter(address_str):
tags_list = []
tagger.clear()
for word in address_str:
tagger.add(word.encode('utf-8'))
tagger.parse()
size = tagger.size()
xsize = tagger.xsize()
for i in range(0,size):
for j in range(0,xsize):
char = tagger.x(i, j).decode('utf-8')
tag = tagger.y2(i)
tags_list.append((char,tag))
return tags_list
def gen_result(tags_list):
tags = {}
full = ''
result = {}
for cols in tags_list:
c = cols[1].split('_')
tag = c[1]
if tag not in tags or c[0]=='B':
tags[tag]=''
tags[tag]+=cols[0]
full+=cols[0]
result['province'] = tags['province'] if 'province' in tags else ''
result['city'] = tags['city'] if 'city' in tags else ''
result['district'] = tags['district'] if 'district' in tags else ''
result['street'] = tags['street'] if 'street' in tags else ''
result['road'] = tags['road'] if 'road' in tags else ''
result['roadnum'] = tags['roadnum'] if 'roadnum' in tags else ''
result['community'] = tags['community'] if 'community' in tags else ''
result['building'] = tags['building'] if 'building' in tags else ''
result['unit'] = tags['unit'] if 'unit' in tags else ''
result['floor'] = tags['floor'] if 'floor' in tags else ''
result['house'] = tags['house'] if 'house' in tags else ''
return result
if __name__=="__main__":
tags_list = crf_segmenter(u'成都市金牛区金房苑东路28号8栋1单元6楼2号')
print gen_result(tags_list) |
import datetime
from django.db import models
from django.utils import timezone
from accounts.models import Person
# Coupon Class Specifier
class Coupon(models.Model):
owner = models.ForeignKey(
Person,
on_delete=models.CASCADE,
editable=False)
title = models.CharField(max_length=100)
code = models.CharField(max_length=10, unique=True)
terms = models.TextField()
claimants = models.ManyToManyField(Person, through='Claim',
related_name='Claimed')
create_date = models.DateTimeField(
default=timezone.now, editable=False)
# When the coupon becomes ready for use
publish_date = models.DateTimeField(
default=timezone.now)
# When this coupon expires
validity = models.DateTimeField(
default=timezone.now)
'''
# Use this method when we get to postgres
validity = models.DurationField(
default=datetime.timedelta(days=7), help_text="1 12:00 = 1 day + 12 hours")'''
# Method to determine if coupon is currently published or not
def published(self):
return self.publish_date <= timezone.now() <= self.validity
published.admin_order_field = 'publish_date'
published.boolean = True
published.short_description = 'Currently Running'
# Human-readable name for Coupon
def __str__(self):
return self.title
# Relationship between Coupon and User
class Claim(models.Model):
coupon = models.ForeignKey(Coupon, on_delete=models.CASCADE)
user = models.ForeignKey(Person, on_delete=models.CASCADE)
date_claimed = models.DateTimeField(
default=timezone.now, editable=False)
|
from django.contrib import admin
from .models import Cryptocurrency
admin.site.register(Cryptocurrency)
|
# def listsum(numList):
# theSum = 0
# for i in numList:
# theSum = theSum + i
# return theSum
# print(listsum([1,3,5,6,10,2000]))
#tast1
#Write a function called display_message() that prints one sentence telling everyone what you are learning about in this chapter.
#Call the function, and make sure the message displays correctly.
def say_hello():
print("function do this !")
say_hello()
#task2
# Exercise 2: What’s Your Favorite Book ?
# Write a function called favorite_book() that accepts one parameter, title.
# The function should print a message, such as “One of my favorite books is Alice in Wonderland”.
# Call the function, making sure to include a book title as an argument in the function call.
# def favorite_book(title):
# print("my fav book is " + title)
# favorite_book("1984")
# task3
#Write a function that accepts one parameter (a number X) and returns the value of X+XX+XXX+XXXX.
# def tast3():
# digit = 1
# n = 5
# ones = [ int("1" * i) for i in range(1, n+1)]
# print(ones)
# def sumx(x):
# test =[str(x)*i for i in range(1, x+1)]
# print "+".join(test)
# return sum(map(int, test))
# sumx(1)
# ones = [ int("1" * i) for i in range(1, n+1)]
# print(ones)
#almoste couldnt manage to make it work with x need to convert it to strings but dont know how tto do thatt right now will come back to it later
# Exercise 4 : Some Geography
# Write a function called describe_city() that accepts the name of a city and its country.
# The function should print a simple sentence, such as “Reykjavik is in Iceland”.
# Give the parameter for the country a default value.
# Call your function for three different cities, at least one of which is not in the default country.
#def describe_city(city="Reykjavik", country='Iceland'):
# output = city + " is in " + country + "."
# print(output)
#describe_city()
#describe_city("Berlin", "Germany ")
#describe_city("London", "uk")
# Exercise 5 : Let’s Create Some Personalized Shirts !
# Write a function called make_shirt() that accepts a size and the text of a message that should be printed on the shirt.
# The function should print a sentence summarizing the size of the shirt and the message printed on it.
# Call the function once using positional arguments to make a shirt.
# Call the function a second time using keyword arguments.
# Modify the make_shirt() function so that shirts are large by default with a message that reads I love Python.
# Make a large shirt and a medium shirt with the default message, and a shirt of any size with a different message.
# def make_shirt(size="large", message="I love python"):
# summarizing = (size + message)
# print(summarizing)
# make_shirt()
# make_shirt("48 ", "my new message" )
# make_shirt(message="my message.", size="Small ")
# make_shirt("large ", "first statment")
# print(make_shirt)
# Exercise 6 : Magicians …
# Make a list of magician’s names.
# Pass the list to a function called show_magicians(), which prints the name of each magician in the list.
# Write a function called make_great() that modifies the list of magicians by adding the phrase "the Great" to each magician’s name.
# Call show_magicians() to see that the list has actually been modified.
# def show_magicians(magicians):
# """Print the name of each magician in the list."""
# for magician in magicians:
# print(magician + " Harry is the greates ")
# magicians = ["Merlin", "Harry", "Peter"]
# show_magicians(magicians)
#Exercise7
# The point of the exercise is to check is a person can retire depending on his age and his gender.
# Note : Retirement age in Israel is 67 for men, and 62 for women (born after April, 1947).
# Create a function get_age(year, month, day)
# Hard-code the current year and month in your code (there are better ways of doing this, but for now it will be enough.)
# After calculating the age of a person, the function should return it (the age is an integer).
# Create a function can_retire(gender, date_of_birth).
# It should call the get_age function (with what arguments?) in order to receive an age back.
# Now it has all the information it needs in order to determine if the person with the given gender and date of birth is able to retire or not.
# Calculate. You may need to do a little more hard-coding here.
# Return True if the person can retire, and False if he/she can’t.
# Some Hints
# Ask for the user’s gender as “m” or “f”.
# Ask for the user’s date of birth in the form “yyyy/mm/dd”, eg. “1993/09/21”.
# Call can_retire to get a definite value for whether the person can or can’t retire.
# Display a message to the user informing them whether they can retire or not.
# As always, test your code to ensure it works.
# import datetime
# year = None
# while year is None:
# try:
# user_input =input('Enter your date of birth (YYYY): ')
# year = int(user_input)
# except ValueError:
# print('try again!')
# print('You have been born {} years ago'.format(datetime.datetime.now().year - year))
# numbers = []
# def loop_function(numbers):
# x = 6
# i = 0
# while i < x:
# print "At the top i is %d" % i
# numbers.append(i)
# i = i + 1
# print "Numbers now: ", numbers
# print "At the bottom i is %d\n" % i
# return numbers
# loop_function(numbers)
# print "The numbers: "
# for num in numbers:
# print num
|
from django.shortcuts import get_object_or_404,render
from django.http import HttpResponse,HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.template import Context, loader
from polls.models import *
# Create your views here.
# def index(request):
# latest_poll_list = Poll.objects.order_by('-pub_date')[:5]
# context = {
# 'latest_poll_list' : latest_poll_list,
# }
# return render(request,'polls/index.html',context)
# def detail(request, poll_id):
# return HttpResponse("You're looking at poll %s." % poll_id)
# def result(request, poll_id):
# poll = get_object_or_404(Poll, pk=poll_id)
# return render(request,'polls/results.html',{'poll':poll})
def vote(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'poll':p,
'error_message':"You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(p.id,)))
|
"""
이코테 p303
선수 강의가있다.
ex) 알고리즘 강의의 선수강의로 자료구조와 컴퓨터 기초가 있다면 자료구조, 컴퓨터 기초를 모두 들은 후 알고리즘 강의를 들을수 있다.
총 N개의 강의를 듣고자 한다.
동시에 여러 강의를 들을수 있다.
첫번째 줄에 듣고자 하는 강의의 수 N(1 <= N <= 500)이 주어진다.,
다음 N개의 줄에는 강의의 시간과 강의를 듣기 위해서 먼저 들어야하는 강의들의 번호가 자연수로 주어진다.
각 강의번호는 1부터 N까지로 구성되며 각 줄은 -1로 끝난다
5
10 -1
10 1 -1
4 1 -1
4 3 1 -1
3 3 -1
->
10
20
14
18
17
-> 토폴로지를 이용해서 풀어볼 예정이다.
"""
from collections import deque
import copy
# 노드의 개수 입력받기
v = int(input())
#모든 노드에 대한 진입차수는 0으로 초기화
indegree = [0] * (v+1)
# 각 노드에 연결된 간선 정보를 담기 위한 연결 리스트 초기화
graph = [[] for i in range(v + 1)]
#각 강의 시간을 0으로 초기화
time = [0] * (v+1)
# 방향 그래프의 모든 간선 정보를 입력받기
for i in range(1, v+1):
data = list(map(int, input().split()))
time[i] = data[0]
for x in data[1:-1:]:
indegree[i] +=1
graph[x].append(i)
def topology_sort():
result = copy.deepcopy(time)
q = deque()
for i in range(1, v+1):
if indegree[i] == 0:
q.append(i)
while q:
now = q.popleft()
for i in graph[now]:
result[i] = max(result[i], result[now] + time[i])
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
for i in range(1, v+1):
print(result[i])
topology_sort()
|
def reverse(string):
stringList = string.split()
stringList.reverse()
newString = " ".join(str(x) for x in stringList)
return newString
arg = raw_input()
res = reverse(arg)
print res
|
import turtle
turtle.speed(-1)
width = 60
xRef = -4 * width
yRef = -4 * width
def draw_squre(x, y):
turtle.goto(x + xRef, y + yRef)
turtle.begin_fill()
turtle.down() # "Pen" down?
for i in range(4): # For each edge of the shape
turtle.forward(width) # Move forward 40 units
turtle.left(90) # Turn ready for the next edge
turtle.up() # Pen up
turtle.end_fill() # End fill.
def draw_board():
screen = turtle.getscreen()
screen.setup(width=width * 10, height=width * 10)
for row in range(9):
turtle.up() # "Pen" down?
turtle.goto(row * width + xRef, 0 + yRef)
turtle.down() # "Pen" down?
turtle.goto(row * width + xRef, width * 8 + yRef)
turtle.up()
turtle.up() # "Pen" down?
turtle.goto(0+xRef, row * width+yRef)
turtle.down() # "Pen" down?
turtle.goto(width * 8+xRef, row * width+yRef)
turtle.up()
for col in range(4):
for row in range(4):
draw_squre(col * width * 2, row * width * 2)
draw_squre(col * width * 2 + width, row * width * 2 + width)
def draw_queen(row, col, erase):
index = col * 8 + row + col
turtle.pencolor('black')
turtle.pensize(4)
if erase:
turtle.pensize(6)
if erase:
if index % 2 == 1:
turtle.pencolor('white')
else:
if index % 2 == 0:
turtle.pencolor('white')
turtle.up()
rad = width / 3
x = col * width + width / 2
y = row * width + width / 2 - rad
turtle.goto(x+xRef, y+yRef)
turtle.down()
turtle.circle(rad)
turtle.color('black')
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that the xcode-ninja GYP_GENERATOR runs and builds correctly.
"""
import TestGyp
import os
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
# Run ninja and xcode-ninja
test.formats = ['ninja', 'xcode-ninja']
test.run_gyp('test.gyp', chdir='app-bundle')
# If it builds the target, it works.
test.build('test.ninja.gyp', chdir='app-bundle')
test.pass_test()
|
from django.contrib import admin
from asignacionc.models import Alumno, AlumnoAdmin, Materia, MateriaAdmin
admin.site.register(Alumno, AlumnoAdmin)
admin.site.register(Materia, MateriaAdmin)
|
import os,sys
sys.path.append("/Users/twongjirad/working/uboone/vireviewer")
from vireviewer import getmw
import numpy as np
import pandas as pd
from channelmap import getChannelMap
from hoot import gethootdb
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import math
def get_pulsed_list(run):
f = open('runmeta/run%03d_pulsed_ch.txt'%(run),'r')
lines = f.readlines()
pulsed_dict = {}
for l in lines:
data = l.strip().split()
subrun1 = int(data[0])
subrun2 = int(data[1])
crate = int(data[2])
slot = int(data[3])
femch = int(data[4])
if (subrun1,subrun2) not in pulsed_dict:
pulsed_dict[(subrun1,subrun2)] = []
pulsed_dict[(subrun1,subrun2)].append( [crate,slot,femch] )
return pulsed_dict
def plot_run( mw, run, subrun1, subrun2, plotfft=True, subbg=True ):
pulsed_list = get_pulsed_list(run)[(subrun1,subrun2)]
print pulsed_list
# loading in noise data
if subbg==True:
bgzp = np.load( 'output/run%03d_subrun%03d_%03d.npz'%(95,0,19) )
bgarr = bgzp['wffftrgba']
bgdf = pd.DataFrame( bgarr )
bgdf.drop( 'index', axis=1, inplace=True )
# changing column names
bg_cols = []
for col in bgdf.columns:
if col not in ['crate','slot','femch']:
col = "bg_"+col
bg_cols.append( col )
bgdf.columns = bg_cols
# open data we are focuing on
npzfile = np.load( 'output/run%03d_subrun%03d_%03d.npz'%(run,subrun1,subrun2) )
arr = npzfile['wffftrgba']
df = pd.DataFrame( arr )
df.drop( 'index', axis=1, inplace=True )
if subbg:
df.set_index(['crate','slot','femch'],inplace=True)
bgdf.set_index(['crate','slot','femch'],inplace=True)
df = df.join( bgdf )
hootdb = gethootdb()
chmap = getChannelMap()
hootdb.set_index( ['crate','slot','femch'], inplace=True )
df = df.join( hootdb ).reset_index()
# now have supertable
print df.columns
print len(df)
print len(chmap)
maxamp = np.max( df['max_amp'].values )
arr = df.to_records()
print pulsed_list[0]
pulsed_row = df.query( '(crate==%d) & (slot==%d) & (femch==%d)'%(pulsed_list[0][0], pulsed_list[0][1],pulsed_list[0][2]) )
max_ampratio = pulsed_row['max_amp'].values[0]/pulsed_row['ped_rms'].values[0]
pulsed_maxamp = pulsed_row['max_amp'].values[0]
print "maxamp (overall): ",maxamp
print "pulsed maxamp: ",pulsed_maxamp
ampratio = np.zeros( len(df['max_amp'].values) )
ampratio[:] = df['max_smooth'].values[:]
ampratio[:] /= df['rms_smooth'].values[:]
max_smooth = pulsed_row['max_smooth'].values[0]
max_smooth_ratio = pulsed_row['max_smooth'].values[0]/pulsed_row['rms_smooth'].values[0]
rmax = pulsed_row['rval'].values[0]
gmax = pulsed_row['gval'].values[0]
bmax = pulsed_row['bval'].values[0]
rmax = 5000.0
gmax = 3000.0
bmax = 1000.0
rgbmax = max( (rmax,gmax,bmax) )
print "RGB Max: ",rmax,gmax,bmax,rgbmax
bg_rmax = np.max( df['bg_rval'].values )
bg_gmax = np.max( df['bg_gval'].values )
bg_bmax = np.max( df['bg_bval'].values )
candidates = []
for r in arr:
if math.isnan(r['wireid']):
#print "skipping: ",r['crate'],r['slot'],r['femch']
mw.vires.setWireColorByCSF( r['crate'],r['slot'],r['femch'], (0.01, 0.01, 0.01, 0.01) )
continue
pulsed = False
if [r['crate'],r['slot'],r['femch']] in pulsed_list:
pulsed = True
# hack to fix unknown problem
if (r['crate'],r['slot'],r['femch'])==(1,8,0):
#mw.vires.setWireColorByCSF( r['crate'],r['slot'],r['femch'], (0.01, 0.01, 0.01, 0.01) )
mw.vires.setWireColor( 'U',640, (0.01, 0.01, 0.01, 0.05) )
continue
alpha = 0.95
# FFT
if plotfft:
red = r['rval']
g = r['gval']
b = r['bval']
if subbg:
red -= r['bg_rval']
g -= r['bg_gval']
b -= r['bg_bval']
if red<0:
red = 0
if g<0:
g = 0
if b<0:
b = 0
red /= rmax
g /= gmax
b /= bmax
if red>0.1 or g>0.1 or b>0.1 or pulsed:
alpha = 0.8
if subbg:
print r['crate'],r['slot'],r['femch'],r['plane'],int(r['wireid']),red,g,b,"bg=(",r['bg_rval']/rmax,r['bg_gval']/gmax,r['bg_bval']/bmax,")"
else:
print r['crate'],r['slot'],r['femch'],r['plane'],['wireid'],red,g,b
candidates.append( (r['crate'],r['slot'],r['femch']) )
# pulsed wire color
#if (r['crate'],r['slot'],r['femch'])==(6,9,0):
# mw.vires.setWireColor( plane, wireid, ( 1.0, 1.0, 1.0, 1.0 ) )
#if above_thresh:
mw.vires.setWireColor( r['plane'], int(r['wireid']), ( (0.1+red), (0.1+g), (0.1+b), alpha ) )
# AMP
else:
red = 0.01 + 0.99*r['max_amp']/pulsed_maxamp
if not pulsed:
if r['max_smooth']/r['rms_smooth']>5.0 and r['max_amp']>1.0:
#mw.vires.setWireColor( r['plane'], int(r['wireid']), ( red, 0.01, 0.01, alpha ) )
mw.vires.setWireColorByCSF( int(r['crate']),int(r['slot']),int(r['femch']), ( red, 0.01, 0.01, alpha ) )
print r['crate'],r['slot'],r['femch'],r['plane'],int(r['wireid']),red,r['max_amp'],pulsed
else:
mw.vires.setWireColorByCSF( int(r['crate']),int(r['slot']),int(r['femch']), ( 0.01, 0.01, 0.01, alpha ) )
#mw.vires.setWireColor( r['plane'], int(r['wireid']), ( 0.01, 0.01, 0.01, alpha ) )
else:
print "Pulsed: ",int(r['crate']),int(r['slot']),int(r['femch'])
mw.vires.setWireColorByCSF( int(r['crate']),int(r['slot']),int(r['femch']), ( red, red, red, 1.0 ) )
#mw.vires.setWireColor( r['plane'], int(r['wireid']), ( red, red, red, 1.0 ) )
print candidates
mw = getmw()
mw.vires.show()
if __name__ == "__main__":
mw = getmw()
plot_run( mw, 95, 44, 55 )
#plot_run( mw, 83, 0, 0 )
#plot_run( mw, 83, 0, 0 )
mw.vires.show()
#if sys.flags.interactive != 1 or not hasattr(QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_()
raw_input()
|
# import discord
from redbot.core import commands
class Greetings(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
channel = self.bot.get_channel(971361677348044862) # the channel ID
await channel.send(f"Welcome to Mystic Valley {member.mention}, enjoy your stay. Please do read the rules in {self.bot.get_channel(971361336594403368).mention}") |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 28 22:51:33 2018
@author: shooter
"""
import dash
import dash_core_components as dcc
import dash_html_components as html
token = 'pk.eyJ1Ijoicm9iaW5zdW5ueSIsImEiOiJjam50Nmh3ZW0wcW9zM3BwNmRjcjgyNjJ3In0.BL1Gs2sYSrUkEJ7soat4jg'
app = dash.Dash()
# Boostrap CSS.
app.css.append_css({'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'})
app.layout = html.Div(
html.Div([
html.Div([
html.Div([
html.H1(children='Ending NTD’s thru women led WASH',className='eight columns',
style={'position': 'relative','padding-top': 20,'padding-left': 20})
]),
html.Div([
html.Img(src="https://www.iapb.org/wp-content/uploads/PHFI.png_0-400x177.jpg",
className='one columns',
style={'height': '30%','width': '30%','float': 'right','position': 'relative','margin-top': 10,},),
html.Img(src="https://www.leprosy.org/wp-content/themes/blankslate/img/logo.png",
className='one columns',
style={'height': '30%','width': '30%','float': 'right','position': 'relative','margin-top': 20,},),
html.Img(src="https://allngoindia.files.wordpress.com/2015/04/lepra-society.jpg",
className='one columns',
style={'height': '30%','width': '30%','float': 'right','position': 'relative','margin-top': 10,},)
], className = 'three columns')
], className = 'row')
])
)
if __name__ == '__main__':
app.run_server(debug=True) |
from io import StringIO
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as stats
import numpy as np
from sklearn import preprocessing, tree
from sklearn.tree import export_graphviz
from sklearn.model_selection import train_test_split
from scipy.stats import chi2
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from collections import Counter
import pydotplus
from six import StringIO
def plot_correlation(df, categorical, attributes):
size = len(attributes)
le = preprocessing.LabelEncoder()
for category in categorical:
df[category] = le.fit_transform(df[category])
df = df[attributes]
corr = df.corr()
# _, ax = plt.subplots(figsize=(size,size))
#ax.grid(False)
#ax.matshow(corr)
#plt.xticks(range(len(corr.columns)),corr.columns)
#plt.yticks(range(len(corr.columns)),corr.columns)
sns.heatmap(corr, annot=True)
plt.show()
def plot_hist(df, attribute):
df[attribute].hist()
plt.show()
def plot_box(df, catAttribute, numAttribute):
sns.boxplot(data=df, x=catAttribute, y=numAttribute)
plt.show()
def relative_mean(df, catAttribute, numAttribute, isBothCat=False):
if (isBothCat):
le = preprocessing.LabelEncoder()
df[numAttribute] = le.fit_transform(df[numAttribute])
print(df[[catAttribute, numAttribute]].groupby([catAttribute]).mean())
def plot_count(df, attribute, hue=None):
if (hue != None):
sns.countplot(x=attribute, data=df, hue=hue)
else:
sns.countplot(x=attribute, data=df)
plt.show()
def subplot_categs(dfs, titles, category, fignum=1):
plt.figure(fignum, figsize=(12, 6))
number_of_dfs = len(titles)
first_axis = None
for df_index, df in enumerate(dfs):
title = titles[df_index]
uniques = list(sorted(df[category].unique()))
counts = [df[df[category]==value].shape[0] for value in uniques]
size = len(uniques)
xcoords = list(range(1, size+1))
if df_index == 0:
first_axis =plt.subplot(1, 2, df_index+1)
else:
new_axis = plt.subplot(1, 2, df_index + 1, sharey=first_axis)
plt.bar(xcoords, counts)
plt.xticks(xcoords, uniques, rotation='vertical' if size >= 5 else 'horizontal')
plt.title((title if title else ''))
plt.tight_layout() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.