blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4722f6e3ab467de61e0fd91c6670670ce714fca3 | e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb | /src/repairs/repair_11.py | 5a2c890a527ffcef78c4ae88cc36fc201cd5a01e | [] | no_license | gsakkas/seq2parse | 3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9 | 7ae0681f1139cb873868727f035c1b7a369c3eb9 | refs/heads/main | 2023-04-09T12:29:37.902066 | 2023-01-18T21:32:12 | 2023-01-18T21:32:12 | 417,597,310 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | a = 5
if a == 0 < 0 < a < 10 :
print("test passé")
| [
"george.p.sakkas@gmail.com"
] | george.p.sakkas@gmail.com |
d1c2f341fc99b55e1a5ffcadc8c8515ed99a0411 | 3106db3841c4f1a0ab01f291426ac43a41a6b7b9 | /example/HTG9200/fpga_10g/tb/fpga_core/test_fpga_core.py | e1eb8dea90cf98204613b0a7ff2ed243518ad142 | [
"MIT"
] | permissive | NP95/verilog-ethernet | 011a4886b34c1cfdbc707e05a8d924328aa97963 | 40acee1bc59c8091c65cedfa470cc16cbce8e6bb | refs/heads/master | 2022-09-30T01:55:39.411808 | 2022-07-25T23:35:26 | 2022-07-25T23:35:26 | 218,686,188 | 0 | 0 | MIT | 2019-10-31T04:46:11 | 2019-10-31T04:46:11 | null | UTF-8 | Python | false | false | 30,238 | py | """
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotbext.eth import XgmiiFrame, XgmiiSource, XgmiiSink
class TB:
def __init__(self, dut):
self.dut = dut
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())
dut.btn.setimmediatevalue(0)
dut.sw.setimmediatevalue(0)
dut.uart_txd.setimmediatevalue(1)
dut.uart_rts.setimmediatevalue(1)
# Ethernet
cocotb.start_soon(Clock(dut.qsfp_1_rx_clk_1, 6.4, units="ns").start())
self.qsfp_1_1_source = XgmiiSource(dut.qsfp_1_rxd_1, dut.qsfp_1_rxc_1, dut.qsfp_1_rx_clk_1, dut.qsfp_1_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_1_tx_clk_1, 6.4, units="ns").start())
self.qsfp_1_1_sink = XgmiiSink(dut.qsfp_1_txd_1, dut.qsfp_1_txc_1, dut.qsfp_1_tx_clk_1, dut.qsfp_1_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_1_rx_clk_2, 6.4, units="ns").start())
self.qsfp_1_2_source = XgmiiSource(dut.qsfp_1_rxd_2, dut.qsfp_1_rxc_2, dut.qsfp_1_rx_clk_2, dut.qsfp_1_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_1_tx_clk_2, 6.4, units="ns").start())
self.qsfp_1_2_sink = XgmiiSink(dut.qsfp_1_txd_2, dut.qsfp_1_txc_2, dut.qsfp_1_tx_clk_2, dut.qsfp_1_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_1_rx_clk_3, 6.4, units="ns").start())
self.qsfp_1_3_source = XgmiiSource(dut.qsfp_1_rxd_3, dut.qsfp_1_rxc_3, dut.qsfp_1_rx_clk_3, dut.qsfp_1_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_1_tx_clk_3, 6.4, units="ns").start())
self.qsfp_1_3_sink = XgmiiSink(dut.qsfp_1_txd_3, dut.qsfp_1_txc_3, dut.qsfp_1_tx_clk_3, dut.qsfp_1_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_1_rx_clk_4, 6.4, units="ns").start())
self.qsfp_1_4_source = XgmiiSource(dut.qsfp_1_rxd_4, dut.qsfp_1_rxc_4, dut.qsfp_1_rx_clk_4, dut.qsfp_1_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_1_tx_clk_4, 6.4, units="ns").start())
self.qsfp_1_4_sink = XgmiiSink(dut.qsfp_1_txd_4, dut.qsfp_1_txc_4, dut.qsfp_1_tx_clk_4, dut.qsfp_1_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_2_rx_clk_1, 6.4, units="ns").start())
self.qsfp_2_1_source = XgmiiSource(dut.qsfp_2_rxd_1, dut.qsfp_2_rxc_1, dut.qsfp_2_rx_clk_1, dut.qsfp_2_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_2_tx_clk_1, 6.4, units="ns").start())
self.qsfp_2_1_sink = XgmiiSink(dut.qsfp_2_txd_1, dut.qsfp_2_txc_1, dut.qsfp_2_tx_clk_1, dut.qsfp_2_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_2_rx_clk_2, 6.4, units="ns").start())
self.qsfp_2_2_source = XgmiiSource(dut.qsfp_2_rxd_2, dut.qsfp_2_rxc_2, dut.qsfp_2_rx_clk_2, dut.qsfp_2_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_2_tx_clk_2, 6.4, units="ns").start())
self.qsfp_2_2_sink = XgmiiSink(dut.qsfp_2_txd_2, dut.qsfp_2_txc_2, dut.qsfp_2_tx_clk_2, dut.qsfp_2_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_2_rx_clk_3, 6.4, units="ns").start())
self.qsfp_2_3_source = XgmiiSource(dut.qsfp_2_rxd_3, dut.qsfp_2_rxc_3, dut.qsfp_2_rx_clk_3, dut.qsfp_2_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_2_tx_clk_3, 6.4, units="ns").start())
self.qsfp_2_3_sink = XgmiiSink(dut.qsfp_2_txd_3, dut.qsfp_2_txc_3, dut.qsfp_2_tx_clk_3, dut.qsfp_2_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_2_rx_clk_4, 6.4, units="ns").start())
self.qsfp_2_4_source = XgmiiSource(dut.qsfp_2_rxd_4, dut.qsfp_2_rxc_4, dut.qsfp_2_rx_clk_4, dut.qsfp_2_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_2_tx_clk_4, 6.4, units="ns").start())
self.qsfp_2_4_sink = XgmiiSink(dut.qsfp_2_txd_4, dut.qsfp_2_txc_4, dut.qsfp_2_tx_clk_4, dut.qsfp_2_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_3_rx_clk_1, 6.4, units="ns").start())
self.qsfp_3_1_source = XgmiiSource(dut.qsfp_3_rxd_1, dut.qsfp_3_rxc_1, dut.qsfp_3_rx_clk_1, dut.qsfp_3_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_3_tx_clk_1, 6.4, units="ns").start())
self.qsfp_3_1_sink = XgmiiSink(dut.qsfp_3_txd_1, dut.qsfp_3_txc_1, dut.qsfp_3_tx_clk_1, dut.qsfp_3_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_3_rx_clk_2, 6.4, units="ns").start())
self.qsfp_3_2_source = XgmiiSource(dut.qsfp_3_rxd_2, dut.qsfp_3_rxc_2, dut.qsfp_3_rx_clk_2, dut.qsfp_3_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_3_tx_clk_2, 6.4, units="ns").start())
self.qsfp_3_2_sink = XgmiiSink(dut.qsfp_3_txd_2, dut.qsfp_3_txc_2, dut.qsfp_3_tx_clk_2, dut.qsfp_3_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_3_rx_clk_3, 6.4, units="ns").start())
self.qsfp_3_3_source = XgmiiSource(dut.qsfp_3_rxd_3, dut.qsfp_3_rxc_3, dut.qsfp_3_rx_clk_3, dut.qsfp_3_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_3_tx_clk_3, 6.4, units="ns").start())
self.qsfp_3_3_sink = XgmiiSink(dut.qsfp_3_txd_3, dut.qsfp_3_txc_3, dut.qsfp_3_tx_clk_3, dut.qsfp_3_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_3_rx_clk_4, 6.4, units="ns").start())
self.qsfp_3_4_source = XgmiiSource(dut.qsfp_3_rxd_4, dut.qsfp_3_rxc_4, dut.qsfp_3_rx_clk_4, dut.qsfp_3_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_3_tx_clk_4, 6.4, units="ns").start())
self.qsfp_3_4_sink = XgmiiSink(dut.qsfp_3_txd_4, dut.qsfp_3_txc_4, dut.qsfp_3_tx_clk_4, dut.qsfp_3_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_4_rx_clk_1, 6.4, units="ns").start())
self.qsfp_4_1_source = XgmiiSource(dut.qsfp_4_rxd_1, dut.qsfp_4_rxc_1, dut.qsfp_4_rx_clk_1, dut.qsfp_4_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_4_tx_clk_1, 6.4, units="ns").start())
self.qsfp_4_1_sink = XgmiiSink(dut.qsfp_4_txd_1, dut.qsfp_4_txc_1, dut.qsfp_4_tx_clk_1, dut.qsfp_4_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_4_rx_clk_2, 6.4, units="ns").start())
self.qsfp_4_2_source = XgmiiSource(dut.qsfp_4_rxd_2, dut.qsfp_4_rxc_2, dut.qsfp_4_rx_clk_2, dut.qsfp_4_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_4_tx_clk_2, 6.4, units="ns").start())
self.qsfp_4_2_sink = XgmiiSink(dut.qsfp_4_txd_2, dut.qsfp_4_txc_2, dut.qsfp_4_tx_clk_2, dut.qsfp_4_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_4_rx_clk_3, 6.4, units="ns").start())
self.qsfp_4_3_source = XgmiiSource(dut.qsfp_4_rxd_3, dut.qsfp_4_rxc_3, dut.qsfp_4_rx_clk_3, dut.qsfp_4_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_4_tx_clk_3, 6.4, units="ns").start())
self.qsfp_4_3_sink = XgmiiSink(dut.qsfp_4_txd_3, dut.qsfp_4_txc_3, dut.qsfp_4_tx_clk_3, dut.qsfp_4_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_4_rx_clk_4, 6.4, units="ns").start())
self.qsfp_4_4_source = XgmiiSource(dut.qsfp_4_rxd_4, dut.qsfp_4_rxc_4, dut.qsfp_4_rx_clk_4, dut.qsfp_4_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_4_tx_clk_4, 6.4, units="ns").start())
self.qsfp_4_4_sink = XgmiiSink(dut.qsfp_4_txd_4, dut.qsfp_4_txc_4, dut.qsfp_4_tx_clk_4, dut.qsfp_4_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_5_rx_clk_1, 6.4, units="ns").start())
self.qsfp_5_1_source = XgmiiSource(dut.qsfp_5_rxd_1, dut.qsfp_5_rxc_1, dut.qsfp_5_rx_clk_1, dut.qsfp_5_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_5_tx_clk_1, 6.4, units="ns").start())
self.qsfp_5_1_sink = XgmiiSink(dut.qsfp_5_txd_1, dut.qsfp_5_txc_1, dut.qsfp_5_tx_clk_1, dut.qsfp_5_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_5_rx_clk_2, 6.4, units="ns").start())
self.qsfp_5_2_source = XgmiiSource(dut.qsfp_5_rxd_2, dut.qsfp_5_rxc_2, dut.qsfp_5_rx_clk_2, dut.qsfp_5_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_5_tx_clk_2, 6.4, units="ns").start())
self.qsfp_5_2_sink = XgmiiSink(dut.qsfp_5_txd_2, dut.qsfp_5_txc_2, dut.qsfp_5_tx_clk_2, dut.qsfp_5_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_5_rx_clk_3, 6.4, units="ns").start())
self.qsfp_5_3_source = XgmiiSource(dut.qsfp_5_rxd_3, dut.qsfp_5_rxc_3, dut.qsfp_5_rx_clk_3, dut.qsfp_5_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_5_tx_clk_3, 6.4, units="ns").start())
self.qsfp_5_3_sink = XgmiiSink(dut.qsfp_5_txd_3, dut.qsfp_5_txc_3, dut.qsfp_5_tx_clk_3, dut.qsfp_5_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_5_rx_clk_4, 6.4, units="ns").start())
self.qsfp_5_4_source = XgmiiSource(dut.qsfp_5_rxd_4, dut.qsfp_5_rxc_4, dut.qsfp_5_rx_clk_4, dut.qsfp_5_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_5_tx_clk_4, 6.4, units="ns").start())
self.qsfp_5_4_sink = XgmiiSink(dut.qsfp_5_txd_4, dut.qsfp_5_txc_4, dut.qsfp_5_tx_clk_4, dut.qsfp_5_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_6_rx_clk_1, 6.4, units="ns").start())
self.qsfp_6_1_source = XgmiiSource(dut.qsfp_6_rxd_1, dut.qsfp_6_rxc_1, dut.qsfp_6_rx_clk_1, dut.qsfp_6_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_6_tx_clk_1, 6.4, units="ns").start())
self.qsfp_6_1_sink = XgmiiSink(dut.qsfp_6_txd_1, dut.qsfp_6_txc_1, dut.qsfp_6_tx_clk_1, dut.qsfp_6_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_6_rx_clk_2, 6.4, units="ns").start())
self.qsfp_6_2_source = XgmiiSource(dut.qsfp_6_rxd_2, dut.qsfp_6_rxc_2, dut.qsfp_6_rx_clk_2, dut.qsfp_6_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_6_tx_clk_2, 6.4, units="ns").start())
self.qsfp_6_2_sink = XgmiiSink(dut.qsfp_6_txd_2, dut.qsfp_6_txc_2, dut.qsfp_6_tx_clk_2, dut.qsfp_6_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_6_rx_clk_3, 6.4, units="ns").start())
self.qsfp_6_3_source = XgmiiSource(dut.qsfp_6_rxd_3, dut.qsfp_6_rxc_3, dut.qsfp_6_rx_clk_3, dut.qsfp_6_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_6_tx_clk_3, 6.4, units="ns").start())
self.qsfp_6_3_sink = XgmiiSink(dut.qsfp_6_txd_3, dut.qsfp_6_txc_3, dut.qsfp_6_tx_clk_3, dut.qsfp_6_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_6_rx_clk_4, 6.4, units="ns").start())
self.qsfp_6_4_source = XgmiiSource(dut.qsfp_6_rxd_4, dut.qsfp_6_rxc_4, dut.qsfp_6_rx_clk_4, dut.qsfp_6_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_6_tx_clk_4, 6.4, units="ns").start())
self.qsfp_6_4_sink = XgmiiSink(dut.qsfp_6_txd_4, dut.qsfp_6_txc_4, dut.qsfp_6_tx_clk_4, dut.qsfp_6_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_7_rx_clk_1, 6.4, units="ns").start())
self.qsfp_7_1_source = XgmiiSource(dut.qsfp_7_rxd_1, dut.qsfp_7_rxc_1, dut.qsfp_7_rx_clk_1, dut.qsfp_7_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_7_tx_clk_1, 6.4, units="ns").start())
self.qsfp_7_1_sink = XgmiiSink(dut.qsfp_7_txd_1, dut.qsfp_7_txc_1, dut.qsfp_7_tx_clk_1, dut.qsfp_7_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_7_rx_clk_2, 6.4, units="ns").start())
self.qsfp_7_2_source = XgmiiSource(dut.qsfp_7_rxd_2, dut.qsfp_7_rxc_2, dut.qsfp_7_rx_clk_2, dut.qsfp_7_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_7_tx_clk_2, 6.4, units="ns").start())
self.qsfp_7_2_sink = XgmiiSink(dut.qsfp_7_txd_2, dut.qsfp_7_txc_2, dut.qsfp_7_tx_clk_2, dut.qsfp_7_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_7_rx_clk_3, 6.4, units="ns").start())
self.qsfp_7_3_source = XgmiiSource(dut.qsfp_7_rxd_3, dut.qsfp_7_rxc_3, dut.qsfp_7_rx_clk_3, dut.qsfp_7_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_7_tx_clk_3, 6.4, units="ns").start())
self.qsfp_7_3_sink = XgmiiSink(dut.qsfp_7_txd_3, dut.qsfp_7_txc_3, dut.qsfp_7_tx_clk_3, dut.qsfp_7_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_7_rx_clk_4, 6.4, units="ns").start())
self.qsfp_7_4_source = XgmiiSource(dut.qsfp_7_rxd_4, dut.qsfp_7_rxc_4, dut.qsfp_7_rx_clk_4, dut.qsfp_7_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_7_tx_clk_4, 6.4, units="ns").start())
self.qsfp_7_4_sink = XgmiiSink(dut.qsfp_7_txd_4, dut.qsfp_7_txc_4, dut.qsfp_7_tx_clk_4, dut.qsfp_7_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_8_rx_clk_1, 6.4, units="ns").start())
self.qsfp_8_1_source = XgmiiSource(dut.qsfp_8_rxd_1, dut.qsfp_8_rxc_1, dut.qsfp_8_rx_clk_1, dut.qsfp_8_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_8_tx_clk_1, 6.4, units="ns").start())
self.qsfp_8_1_sink = XgmiiSink(dut.qsfp_8_txd_1, dut.qsfp_8_txc_1, dut.qsfp_8_tx_clk_1, dut.qsfp_8_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_8_rx_clk_2, 6.4, units="ns").start())
self.qsfp_8_2_source = XgmiiSource(dut.qsfp_8_rxd_2, dut.qsfp_8_rxc_2, dut.qsfp_8_rx_clk_2, dut.qsfp_8_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_8_tx_clk_2, 6.4, units="ns").start())
self.qsfp_8_2_sink = XgmiiSink(dut.qsfp_8_txd_2, dut.qsfp_8_txc_2, dut.qsfp_8_tx_clk_2, dut.qsfp_8_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_8_rx_clk_3, 6.4, units="ns").start())
self.qsfp_8_3_source = XgmiiSource(dut.qsfp_8_rxd_3, dut.qsfp_8_rxc_3, dut.qsfp_8_rx_clk_3, dut.qsfp_8_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_8_tx_clk_3, 6.4, units="ns").start())
self.qsfp_8_3_sink = XgmiiSink(dut.qsfp_8_txd_3, dut.qsfp_8_txc_3, dut.qsfp_8_tx_clk_3, dut.qsfp_8_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_8_rx_clk_4, 6.4, units="ns").start())
self.qsfp_8_4_source = XgmiiSource(dut.qsfp_8_rxd_4, dut.qsfp_8_rxc_4, dut.qsfp_8_rx_clk_4, dut.qsfp_8_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_8_tx_clk_4, 6.4, units="ns").start())
self.qsfp_8_4_sink = XgmiiSink(dut.qsfp_8_txd_4, dut.qsfp_8_txc_4, dut.qsfp_8_tx_clk_4, dut.qsfp_8_tx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_9_rx_clk_1, 6.4, units="ns").start())
self.qsfp_9_1_source = XgmiiSource(dut.qsfp_9_rxd_1, dut.qsfp_9_rxc_1, dut.qsfp_9_rx_clk_1, dut.qsfp_9_rx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_9_tx_clk_1, 6.4, units="ns").start())
self.qsfp_9_1_sink = XgmiiSink(dut.qsfp_9_txd_1, dut.qsfp_9_txc_1, dut.qsfp_9_tx_clk_1, dut.qsfp_9_tx_rst_1)
cocotb.start_soon(Clock(dut.qsfp_9_rx_clk_2, 6.4, units="ns").start())
self.qsfp_9_2_source = XgmiiSource(dut.qsfp_9_rxd_2, dut.qsfp_9_rxc_2, dut.qsfp_9_rx_clk_2, dut.qsfp_9_rx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_9_tx_clk_2, 6.4, units="ns").start())
self.qsfp_9_2_sink = XgmiiSink(dut.qsfp_9_txd_2, dut.qsfp_9_txc_2, dut.qsfp_9_tx_clk_2, dut.qsfp_9_tx_rst_2)
cocotb.start_soon(Clock(dut.qsfp_9_rx_clk_3, 6.4, units="ns").start())
self.qsfp_9_3_source = XgmiiSource(dut.qsfp_9_rxd_3, dut.qsfp_9_rxc_3, dut.qsfp_9_rx_clk_3, dut.qsfp_9_rx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_9_tx_clk_3, 6.4, units="ns").start())
self.qsfp_9_3_sink = XgmiiSink(dut.qsfp_9_txd_3, dut.qsfp_9_txc_3, dut.qsfp_9_tx_clk_3, dut.qsfp_9_tx_rst_3)
cocotb.start_soon(Clock(dut.qsfp_9_rx_clk_4, 6.4, units="ns").start())
self.qsfp_9_4_source = XgmiiSource(dut.qsfp_9_rxd_4, dut.qsfp_9_rxc_4, dut.qsfp_9_rx_clk_4, dut.qsfp_9_rx_rst_4)
cocotb.start_soon(Clock(dut.qsfp_9_tx_clk_4, 6.4, units="ns").start())
self.qsfp_9_4_sink = XgmiiSink(dut.qsfp_9_txd_4, dut.qsfp_9_txc_4, dut.qsfp_9_tx_clk_4, dut.qsfp_9_tx_rst_4)
async def init(self):
self.dut.rst.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_1_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_1_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_2_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_2_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_2_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_2_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_2_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_2_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_2_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_2_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_3_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_3_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_3_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_3_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_3_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_3_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_3_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_3_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_4_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_4_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_4_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_4_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_4_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_4_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_4_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_4_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_5_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_5_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_5_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_5_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_5_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_5_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_5_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_5_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_6_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_6_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_6_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_6_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_6_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_6_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_6_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_6_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_7_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_7_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_7_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_7_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_7_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_7_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_7_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_7_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_8_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_8_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_8_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_8_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_8_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_8_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_8_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_8_tx_rst_4.setimmediatevalue(0)
self.dut.qsfp_9_rx_rst_1.setimmediatevalue(0)
self.dut.qsfp_9_tx_rst_1.setimmediatevalue(0)
self.dut.qsfp_9_rx_rst_2.setimmediatevalue(0)
self.dut.qsfp_9_tx_rst_2.setimmediatevalue(0)
self.dut.qsfp_9_rx_rst_3.setimmediatevalue(0)
self.dut.qsfp_9_tx_rst_3.setimmediatevalue(0)
self.dut.qsfp_9_rx_rst_4.setimmediatevalue(0)
self.dut.qsfp_9_tx_rst_4.setimmediatevalue(0)
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
self.dut.qsfp_1_rx_rst_1 <= 1
self.dut.qsfp_1_tx_rst_1 <= 1
self.dut.qsfp_1_rx_rst_2 <= 1
self.dut.qsfp_1_tx_rst_2 <= 1
self.dut.qsfp_1_rx_rst_3 <= 1
self.dut.qsfp_1_tx_rst_3 <= 1
self.dut.qsfp_1_rx_rst_4 <= 1
self.dut.qsfp_1_tx_rst_4 <= 1
self.dut.qsfp_2_rx_rst_1 <= 1
self.dut.qsfp_2_tx_rst_1 <= 1
self.dut.qsfp_2_rx_rst_2 <= 1
self.dut.qsfp_2_tx_rst_2 <= 1
self.dut.qsfp_2_rx_rst_3 <= 1
self.dut.qsfp_2_tx_rst_3 <= 1
self.dut.qsfp_2_rx_rst_4 <= 1
self.dut.qsfp_2_tx_rst_4 <= 1
self.dut.qsfp_3_rx_rst_1 <= 1
self.dut.qsfp_3_tx_rst_1 <= 1
self.dut.qsfp_3_rx_rst_2 <= 1
self.dut.qsfp_3_tx_rst_2 <= 1
self.dut.qsfp_3_rx_rst_3 <= 1
self.dut.qsfp_3_tx_rst_3 <= 1
self.dut.qsfp_3_rx_rst_4 <= 1
self.dut.qsfp_3_tx_rst_4 <= 1
self.dut.qsfp_4_rx_rst_1 <= 1
self.dut.qsfp_4_tx_rst_1 <= 1
self.dut.qsfp_4_rx_rst_2 <= 1
self.dut.qsfp_4_tx_rst_2 <= 1
self.dut.qsfp_4_rx_rst_3 <= 1
self.dut.qsfp_4_tx_rst_3 <= 1
self.dut.qsfp_4_rx_rst_4 <= 1
self.dut.qsfp_4_tx_rst_4 <= 1
self.dut.qsfp_5_rx_rst_1 <= 1
self.dut.qsfp_5_tx_rst_1 <= 1
self.dut.qsfp_5_rx_rst_2 <= 1
self.dut.qsfp_5_tx_rst_2 <= 1
self.dut.qsfp_5_rx_rst_3 <= 1
self.dut.qsfp_5_tx_rst_3 <= 1
self.dut.qsfp_5_rx_rst_4 <= 1
self.dut.qsfp_5_tx_rst_4 <= 1
self.dut.qsfp_6_rx_rst_1 <= 1
self.dut.qsfp_6_tx_rst_1 <= 1
self.dut.qsfp_6_rx_rst_2 <= 1
self.dut.qsfp_6_tx_rst_2 <= 1
self.dut.qsfp_6_rx_rst_3 <= 1
self.dut.qsfp_6_tx_rst_3 <= 1
self.dut.qsfp_6_rx_rst_4 <= 1
self.dut.qsfp_6_tx_rst_4 <= 1
self.dut.qsfp_7_rx_rst_1 <= 1
self.dut.qsfp_7_tx_rst_1 <= 1
self.dut.qsfp_7_rx_rst_2 <= 1
self.dut.qsfp_7_tx_rst_2 <= 1
self.dut.qsfp_7_rx_rst_3 <= 1
self.dut.qsfp_7_tx_rst_3 <= 1
self.dut.qsfp_7_rx_rst_4 <= 1
self.dut.qsfp_7_tx_rst_4 <= 1
self.dut.qsfp_8_rx_rst_1 <= 1
self.dut.qsfp_8_tx_rst_1 <= 1
self.dut.qsfp_8_rx_rst_2 <= 1
self.dut.qsfp_8_tx_rst_2 <= 1
self.dut.qsfp_8_rx_rst_3 <= 1
self.dut.qsfp_8_tx_rst_3 <= 1
self.dut.qsfp_8_rx_rst_4 <= 1
self.dut.qsfp_8_tx_rst_4 <= 1
self.dut.qsfp_9_rx_rst_1 <= 1
self.dut.qsfp_9_tx_rst_1 <= 1
self.dut.qsfp_9_rx_rst_2 <= 1
self.dut.qsfp_9_tx_rst_2 <= 1
self.dut.qsfp_9_rx_rst_3 <= 1
self.dut.qsfp_9_tx_rst_3 <= 1
self.dut.qsfp_9_rx_rst_4 <= 1
self.dut.qsfp_9_tx_rst_4 <= 1
for k in range(10):
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
self.dut.qsfp_1_rx_rst_1 <= 0
self.dut.qsfp_1_tx_rst_1 <= 0
self.dut.qsfp_1_rx_rst_2 <= 0
self.dut.qsfp_1_tx_rst_2 <= 0
self.dut.qsfp_1_rx_rst_3 <= 0
self.dut.qsfp_1_tx_rst_3 <= 0
self.dut.qsfp_1_rx_rst_4 <= 0
self.dut.qsfp_1_tx_rst_4 <= 0
self.dut.qsfp_2_rx_rst_1 <= 0
self.dut.qsfp_2_tx_rst_1 <= 0
self.dut.qsfp_2_rx_rst_2 <= 0
self.dut.qsfp_2_tx_rst_2 <= 0
self.dut.qsfp_2_rx_rst_3 <= 0
self.dut.qsfp_2_tx_rst_3 <= 0
self.dut.qsfp_2_rx_rst_4 <= 0
self.dut.qsfp_2_tx_rst_4 <= 0
self.dut.qsfp_3_rx_rst_1 <= 0
self.dut.qsfp_3_tx_rst_1 <= 0
self.dut.qsfp_3_rx_rst_2 <= 0
self.dut.qsfp_3_tx_rst_2 <= 0
self.dut.qsfp_3_rx_rst_3 <= 0
self.dut.qsfp_3_tx_rst_3 <= 0
self.dut.qsfp_3_rx_rst_4 <= 0
self.dut.qsfp_3_tx_rst_4 <= 0
self.dut.qsfp_4_rx_rst_1 <= 0
self.dut.qsfp_4_tx_rst_1 <= 0
self.dut.qsfp_4_rx_rst_2 <= 0
self.dut.qsfp_4_tx_rst_2 <= 0
self.dut.qsfp_4_rx_rst_3 <= 0
self.dut.qsfp_4_tx_rst_3 <= 0
self.dut.qsfp_4_rx_rst_4 <= 0
self.dut.qsfp_4_tx_rst_4 <= 0
self.dut.qsfp_5_rx_rst_1 <= 0
self.dut.qsfp_5_tx_rst_1 <= 0
self.dut.qsfp_5_rx_rst_2 <= 0
self.dut.qsfp_5_tx_rst_2 <= 0
self.dut.qsfp_5_rx_rst_3 <= 0
self.dut.qsfp_5_tx_rst_3 <= 0
self.dut.qsfp_5_rx_rst_4 <= 0
self.dut.qsfp_5_tx_rst_4 <= 0
self.dut.qsfp_6_rx_rst_1 <= 0
self.dut.qsfp_6_tx_rst_1 <= 0
self.dut.qsfp_6_rx_rst_2 <= 0
self.dut.qsfp_6_tx_rst_2 <= 0
self.dut.qsfp_6_rx_rst_3 <= 0
self.dut.qsfp_6_tx_rst_3 <= 0
self.dut.qsfp_6_rx_rst_4 <= 0
self.dut.qsfp_6_tx_rst_4 <= 0
self.dut.qsfp_7_rx_rst_1 <= 0
self.dut.qsfp_7_tx_rst_1 <= 0
self.dut.qsfp_7_rx_rst_2 <= 0
self.dut.qsfp_7_tx_rst_2 <= 0
self.dut.qsfp_7_rx_rst_3 <= 0
self.dut.qsfp_7_tx_rst_3 <= 0
self.dut.qsfp_7_rx_rst_4 <= 0
self.dut.qsfp_7_tx_rst_4 <= 0
self.dut.qsfp_8_rx_rst_1 <= 0
self.dut.qsfp_8_tx_rst_1 <= 0
self.dut.qsfp_8_rx_rst_2 <= 0
self.dut.qsfp_8_tx_rst_2 <= 0
self.dut.qsfp_8_rx_rst_3 <= 0
self.dut.qsfp_8_tx_rst_3 <= 0
self.dut.qsfp_8_rx_rst_4 <= 0
self.dut.qsfp_8_tx_rst_4 <= 0
self.dut.qsfp_9_rx_rst_1 <= 0
self.dut.qsfp_9_tx_rst_1 <= 0
self.dut.qsfp_9_rx_rst_2 <= 0
self.dut.qsfp_9_tx_rst_2 <= 0
self.dut.qsfp_9_rx_rst_3 <= 0
self.dut.qsfp_9_tx_rst_3 <= 0
self.dut.qsfp_9_rx_rst_4 <= 0
self.dut.qsfp_9_tx_rst_4 <= 0
@cocotb.test()
async def run_test(dut):
tb = TB(dut)
await tb.init()
tb.log.info("test UDP RX packet")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5a:51:52:53:54:55', dst='02:00:00:00:00:00')
ip = IP(src='192.168.1.100', dst='192.168.1.128')
udp = UDP(sport=5678, dport=1234)
test_pkt = eth / ip / udp / payload
test_frame = XgmiiFrame.from_payload(test_pkt.build())
await tb.qsfp_1_1_source.send(test_frame)
tb.log.info("receive ARP request")
rx_frame = await tb.qsfp_1_1_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == 'ff:ff:ff:ff:ff:ff'
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[ARP].hwtype == 1
assert rx_pkt[ARP].ptype == 0x0800
assert rx_pkt[ARP].hwlen == 6
assert rx_pkt[ARP].plen == 4
assert rx_pkt[ARP].op == 1
assert rx_pkt[ARP].hwsrc == test_pkt.dst
assert rx_pkt[ARP].psrc == test_pkt[IP].dst
assert rx_pkt[ARP].hwdst == '00:00:00:00:00:00'
assert rx_pkt[ARP].pdst == test_pkt[IP].src
tb.log.info("send ARP response")
eth = Ether(src=test_pkt.src, dst=test_pkt.dst)
arp = ARP(hwtype=1, ptype=0x0800, hwlen=6, plen=4, op=2,
hwsrc=test_pkt.src, psrc=test_pkt[IP].src,
hwdst=test_pkt.dst, pdst=test_pkt[IP].dst)
resp_pkt = eth / arp
resp_frame = XgmiiFrame.from_payload(resp_pkt.build())
await tb.qsfp_1_1_source.send(resp_frame)
tb.log.info("receive UDP packet")
rx_frame = await tb.qsfp_1_1_sink.recv()
rx_pkt = Ether(bytes(rx_frame.get_payload()))
tb.log.info("RX packet: %s", repr(rx_pkt))
assert rx_pkt.dst == test_pkt.src
assert rx_pkt.src == test_pkt.dst
assert rx_pkt[IP].dst == test_pkt[IP].src
assert rx_pkt[IP].src == test_pkt[IP].dst
assert rx_pkt[UDP].dport == test_pkt[UDP].sport
assert rx_pkt[UDP].sport == test_pkt[UDP].dport
assert rx_pkt[UDP].payload == test_pkt[UDP].payload
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'lib', 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "eth_axis_rx.v"),
os.path.join(eth_rtl_dir, "eth_axis_tx.v"),
os.path.join(eth_rtl_dir, "udp_complete_64.v"),
os.path.join(eth_rtl_dir, "udp_checksum_gen_64.v"),
os.path.join(eth_rtl_dir, "udp_64.v"),
os.path.join(eth_rtl_dir, "udp_ip_rx_64.v"),
os.path.join(eth_rtl_dir, "udp_ip_tx_64.v"),
os.path.join(eth_rtl_dir, "ip_complete_64.v"),
os.path.join(eth_rtl_dir, "ip_64.v"),
os.path.join(eth_rtl_dir, "ip_eth_rx_64.v"),
os.path.join(eth_rtl_dir, "ip_eth_tx_64.v"),
os.path.join(eth_rtl_dir, "ip_arb_mux.v"),
os.path.join(eth_rtl_dir, "arp.v"),
os.path.join(eth_rtl_dir, "arp_cache.v"),
os.path.join(eth_rtl_dir, "arp_eth_rx.v"),
os.path.join(eth_rtl_dir, "arp_eth_tx.v"),
os.path.join(eth_rtl_dir, "eth_arb_mux.v"),
os.path.join(axis_rtl_dir, "arbiter.v"),
os.path.join(axis_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
]
parameters = {}
# parameters['A'] = val
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| [
"alex@alexforencich.com"
] | alex@alexforencich.com |
e6070a073f733d9b54b40d15cfacc66e992b4c2b | 20acb8c4bd5f29e6ecc9006f6228b787b6f71c73 | /app/travel_borders_api/asgi.py | eaa6393c06d5e5e1eeabb5d4ed12b51b0b4c0234 | [] | no_license | datainvestor/TravelBordersApi | 3e6dd8c331c08603f16790aa52a4eb131754423e | 75cd5936f7c121ab8f90430f455095337eb5c141 | refs/heads/master | 2023-05-09T05:24:40.843955 | 2021-06-03T19:11:33 | 2021-06-03T19:11:33 | 371,000,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
ASGI config for travel_borders_api project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'travel_borders_api.settings')
application = get_asgi_application()
| [
"you@example.com"
] | you@example.com |
14fa51b5951a609f30d6f5382c6dc0df6719efa5 | 2bc8f66fd34ba1b93de82c67954a10f8b300b07e | /general_backbone/models/layers/space_to_depth.py | 48e73a718a9b86f6a483b8172b52ba33c6fde35b | [] | no_license | DoDucNhan/general_backbone | 7dabffed5a74e622ba23bf275358ca2d09faddc1 | 686c92ab811221d594816207d86a0b97c9b4bc73 | refs/heads/main | 2023-08-31T14:59:23.873555 | 2021-10-23T06:34:14 | 2021-10-23T06:34:14 | 420,419,141 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | # Copyright (c) general_backbone. All rights reserved.
import torch
import torch.nn as nn
class SpaceToDepth(nn.Module):
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)
return x
@torch.jit.script
class SpaceToDepthJit(object):
def __call__(self, x: torch.Tensor):
# assuming hard-coded that block_size==4 for acceleration
N, C, H, W = x.size()
x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs)
return x
class SpaceToDepthModule(nn.Module):
def __init__(self, no_jit=False):
super().__init__()
if not no_jit:
self.op = SpaceToDepthJit()
else:
self.op = SpaceToDepth()
def forward(self, x):
return self.op(x)
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
return x
| [
"phamdinhkhanh.tkt53.neu@gmail.com"
] | phamdinhkhanh.tkt53.neu@gmail.com |
690d1fb3811c94ca85e0333746c7a3bd66f82987 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/chrome/browser/ui/views/DEPS | 4637b49c44569e29c5e05fa5798cb3fe21e9680c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 196 | include_rules = [
"+chrome/browser/ui/views",
"+components/constrained_window",
"+components/mus/public/cpp",
"+components/user_manager",
"+content/app/resources/grit/content_resources.h",
]
| [
"changhyeok.bae@lge.com"
] | changhyeok.bae@lge.com | |
eb3bae2cd6c429b345069cfc2bfb1afabf8a250c | cfb1073b578e94315bb824e1ee659950fd99b91f | /web/budgets/urls.py | 4463366aef415bd538988d6f33ba3e6bcf64f8fb | [] | no_license | madelinepet/budget_tool | 1cf8e910a5def4a13d4b491214fefd4d02e2409f | 8d682907f98959b88c191a06abba92e4f1c3fd46 | refs/heads/master | 2021-06-13T18:53:39.701832 | 2019-08-02T17:17:12 | 2019-08-02T17:17:12 | 151,135,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from django.urls import path
from .views import (
BudgetListView,
TransactionDetailView,
BudgetCreateView,
TransactionCreateView,
)
urlpatterns = [
path('budget', BudgetListView.as_view(), name='budget_view'),
path('budget/new', BudgetCreateView.as_view(), name='budget_create'),
path(
'transaction/<int:id>',
TransactionDetailView.as_view(),
name='transaction_detail'
),
path(
'transaction/new',
TransactionCreateView.as_view(),
name="transaction_create"
)
]
| [
"madelinepet@hotmail.com"
] | madelinepet@hotmail.com |
a3227dc045f8e76677e6a561e74f403e6a16d8b4 | 05780fe9a74b116832611a35fce38fa24b4d4ffc | /madgraph/madgraph_binaries/models/OLD_loopModels_backup/smQCDNLOmass/lorentz.py | 3ea7fb70b89a58b93d5c0d64a9d5496a9aee5e20 | [] | no_license | cesarotti/Dark-Photons | d810658190297528470abe757c4a678075ef48f6 | c6dce1df70c660555bf039a78765e4efbffb4877 | refs/heads/master | 2021-01-22T19:26:13.892225 | 2015-01-28T05:43:20 | 2015-01-28T05:49:54 | 20,692,647 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,518 | py | # This file was automatically created by FeynRules $Revision: 535 $
# Mathematica version: 7.0 for Mac OS X x86 (64-bit) (November 11, 2008)
# Date: Fri 18 Mar 2011 18:40:51
from object_library import all_lorentz, Lorentz
from function_library import complexconjugate, re, im, csc, sec, acsc, asec
R2_GG_1 = Lorentz(name = 'R2_GG_1',
spins = [ 3, 3 ],
structure = 'P(-1,1)*P(-1,1)*Metric(1,2)')
R2_GG_2 = Lorentz(name = 'R2_GG_2',
spins = [ 3, 3 ],
structure = 'P(1,1)*P(2,1)')
R2_GG_3 = Lorentz(name = 'R2_GG_3',
spins = [ 3, 3 ],
structure = 'Metric(1,2)')
R2_QQ_1 = Lorentz(name = 'R2_QQ_1',
spins = [ 2, 2 ],
structure = 'P(-1,1)*Gamma(-1,2,1)')
R2_QQ_2 = Lorentz(name = 'R2_QQ_2',
spins = [ 2, 2 ],
structure = 'Identity(1,2)')
FFV1 = Lorentz(name = 'FFV1',
spins = [ 2, 2, 3 ],
structure = 'Gamma(3,2,1)')
VVV1 = Lorentz(name = 'VVV1',
spins = [ 3, 3, 3 ],
structure = 'P(3,1)*Metric(1,2) - P(3,2)*Metric(1,2) - P(2,1)*Metric(1,3) + P(2,3)*Metric(1,3) + P(1,2)*Metric(2,3) - P(1,3)*Metric(2,3)')
VVVV1 = Lorentz(name = 'VVVV1',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,3)*Metric(2,4)')
VVVV3 = Lorentz(name = 'VVVV3',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3) - Metric(1,2)*Metric(3,4)')
VVVV4 = Lorentz(name = 'VVVV4',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,3)*Metric(2,4) - Metric(1,2)*Metric(3,4)')
GHGHG = Lorentz(name = 'GHGHG',
spins = [ 1, 1, 3 ],
structure = 'P(3,1)')
#=============================================================================================
# 4-gluon R2 vertex
#=============================================================================================
R2_4G_1234 = Lorentz(name = 'R2_4G_1234',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,2)*Metric(3,4)')
R2_4G_1324 = Lorentz(name = 'R2_4G_1324',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,3)*Metric(2,4)')
R2_4G_1423 = Lorentz(name = 'R2_4G_1423',
spins = [ 3, 3, 3, 3 ],
structure = 'Metric(1,4)*Metric(2,3)')
#=============================================================================================
| [
"eyvind.niklasson@gmail.com"
] | eyvind.niklasson@gmail.com |
8ef864cb9d779223b9c72360d7e995c16611736f | 4a8bfa3407aa98a04ede3162f85467b1b5012fe7 | /aiogram/api/types/animation.py | bf9a29a321fea81e8096895f80a8b30531f09602 | [] | no_license | aiogram/tg-codegen | 07ec80814eec46f464d2490fd27b7b6b27257f1b | ba3c2f893591d45dda418dd16e0646e260afdf14 | refs/heads/master | 2022-12-09T10:44:10.781570 | 2021-11-07T23:33:25 | 2021-11-07T23:33:25 | 218,523,371 | 24 | 5 | null | 2022-12-08T08:47:43 | 2019-10-30T12:33:21 | Python | UTF-8 | Python | false | false | 1,276 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .base import TelegramObject
if TYPE_CHECKING:
from .photo_size import PhotoSize
# === Generated region: Animation ===
class Animation(TelegramObject):
"""
This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound).
Source: https://core.telegram.org/bots/api#animation
"""
file_id: str
"""Identifier for this file, which can be used to download or reuse the file"""
file_unique_id: str
"""Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file."""
width: int
"""Video width as defined by sender"""
height: int
"""Video height as defined by sender"""
duration: int
"""Duration of the video in seconds as defined by sender"""
thumb: Optional[PhotoSize] = None
"""*Optional*. Animation thumbnail as defined by sender"""
file_name: Optional[str] = None
"""*Optional*. Original animation filename as defined by sender"""
mime_type: Optional[str] = None
"""*Optional*. MIME type of the file as defined by sender"""
file_size: Optional[int] = None
"""*Optional*. File size in bytes"""
| [
"jroot.junior@gmail.com"
] | jroot.junior@gmail.com |
338b83160b9e57f4812825f9e2c52a813242d952 | bdbc9cd8c64cfa92efffb9e138cb282d36f69b0a | /addons/website_mail/__openerp__.py | a160ac0bab52188a3a20c2e1a6e1298140191e9f | [] | no_license | clebaresu/impra-adns | d330cece1b710643625627bfd7ed66bac7d233ef | 8b9889d86c6ea194cfb7b0db8bdc3284635cc081 | refs/heads/master | 2020-05-02T16:51:41.798969 | 2019-03-27T22:03:32 | 2019-03-27T22:03:32 | 178,080,681 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Website Mail',
'category': 'Hidden',
'summary': 'Website Module for Mail',
'version': '0.1',
'description': """Glue module holding mail improvements for website.""",
'author': 'OpenERP SA',
'depends': ['website', 'mail', 'email_template'],
'data': [
'views/snippets.xml',
'views/website_mail.xml',
'views/website_email_designer.xml',
'views/email_template_view.xml',
'data/mail_groups.xml',
'security/website_mail.xml',
],
'qweb': [
'static/src/xml/website_mail.xml'
],
'installable': True,
'auto_install': True,
}
| [
"clebaresu@gmail.com"
] | clebaresu@gmail.com |
d66af368067a0416e5b48e860e22e11825d0f57c | 2432996ac1615cd36d61f0feeff8a359d2b438d8 | /env/lib/python3.8/site-packages/pip/_internal/utils/misc.py | 24a7455628db76d02442451e002b6dc729deec31 | [
"Apache-2.0"
] | permissive | Parveshdhull/AutoTyper | dd65d53ece7c13fbc1ead7ce372947483e05e2e3 | 7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c | refs/heads/main | 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 | Apache-2.0 | 2023-05-07T20:43:16 | 2020-11-23T19:13:05 | Python | UTF-8 | Python | false | false | 27,394 | py | # The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import contextlib
import errno
import getpass
import hashlib
import io
import logging
import os
import posixpath
import shutil
import stat
import sys
from collections import deque
from itertools import tee
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import.
from pip._vendor.retrying import retry # type: ignore
from pip._vendor.six import PY2, text_type
from pip._vendor.six.moves import filter, filterfalse, input, map, zip_longest
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote
from pip import __version__
from pip._internal.exceptions import CommandError
from pip._internal.locations import (
get_major_minor_version,
site_packages,
user_site,
)
from pip._internal.utils.compat import (
WINDOWS,
expanduser,
stdlib_pkgs,
str_to_display,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING, cast
from pip._internal.utils.virtualenv import (
running_under_virtualenv,
virtualenv_no_global,
)
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
if MYPY_CHECK_RUNNING:
from typing import (
Any, AnyStr, Callable, Container, Iterable, Iterator, List, Optional,
Text, Tuple, TypeVar, Union,
)
from pip._vendor.pkg_resources import Distribution
VersionInfo = Tuple[int, int, int]
T = TypeVar("T")
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'normalize_path',
'renames', 'get_prog',
'captured_stdout', 'ensure_dir',
'get_installed_version', 'remove_auth_from_url']
logger = logging.getLogger(__name__)
def get_pip_version():
# type: () -> str
pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..")
pip_pkg_dir = os.path.abspath(pip_pkg_dir)
return (
'pip {} from {} (python {})'.format(
__version__, pip_pkg_dir, get_major_minor_version(),
)
)
def normalize_version_info(py_version_info):
# type: (Tuple[int, ...]) -> Tuple[int, int, int]
"""
Convert a tuple of ints representing a Python version to one of length
three.
:param py_version_info: a tuple of ints representing a Python version,
or None to specify no version. The tuple can have any length.
:return: a tuple of length three if `py_version_info` is non-None.
Otherwise, return `py_version_info` unchanged (i.e. None).
"""
if len(py_version_info) < 3:
py_version_info += (3 - len(py_version_info)) * (0,)
elif len(py_version_info) > 3:
py_version_info = py_version_info[:3]
return cast('VersionInfo', py_version_info)
def ensure_dir(path):
# type: (AnyStr) -> None
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
# Windows can raise spurious ENOTEMPTY errors. See #6426.
if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:
raise
def get_prog():
# type: () -> str
try:
prog = os.path.basename(sys.argv[0])
if prog in ('__main__.py', '-c'):
return "{} -m pip".format(sys.executable)
else:
return prog
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
# type: (Text, bool) -> None
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
try:
has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE)
except (IOError, OSError):
# it's equivalent to os.path.exists
return
if has_attr_readonly:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def path_to_display(path):
# type: (Optional[Union[str, Text]]) -> Optional[Text]
"""
Convert a bytes (or text) path to text (unicode in Python 2) for display
and logging purposes.
This function should never error out. Also, this function is mainly needed
for Python 2 since in Python 3 str paths are already text.
"""
if path is None:
return None
if isinstance(path, text_type):
return path
# Otherwise, path is a bytes object (str in Python 2).
try:
display_path = path.decode(sys.getfilesystemencoding(), 'strict')
except UnicodeDecodeError:
# Include the full bytes to make troubleshooting easier, even though
# it may not be very human readable.
if PY2:
# Convert the bytes to a readable str representation using
# repr(), and then convert the str to unicode.
# Also, we add the prefix "b" to the repr() return value both
# to make the Python 2 output look like the Python 3 output, and
# to signal to the user that this is a bytes representation.
display_path = str_to_display('b{!r}'.format(path))
else:
# Silence the "F821 undefined name 'ascii'" flake8 error since
# in Python 3 ascii() is a built-in.
display_path = ascii(path) # noqa: F821
return display_path
def display_path(path):
# type: (Union[str, Text]) -> str
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
# type: (str, str) -> str
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
# type: (str, Iterable[str]) -> str
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def _check_no_input(message):
# type: (str) -> None
"""Raise an error if no input is allowed."""
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: {}'.format(
message)
)
def ask(message, options):
# type: (str, Iterable[str]) -> str
"""Ask the message interactively, with the given possible responses"""
while 1:
_check_no_input(message)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response ({!r}) was not one of the expected responses: '
'{}'.format(response, ', '.join(options))
)
else:
return response
def ask_input(message):
# type: (str) -> str
"""Ask for input interactively."""
_check_no_input(message)
return input(message)
def ask_password(message):
# type: (str) -> str
"""Ask for a password interactively."""
_check_no_input(message)
return getpass.getpass(message)
def format_size(bytes):
# type: (float) -> str
if bytes > 1000 * 1000:
return '{:.1f} MB'.format(bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '{} kB'.format(int(bytes / 1000))
elif bytes > 1000:
return '{:.1f} kB'.format(bytes / 1000.0)
else:
return '{} bytes'.format(int(bytes))
def tabulate(rows):
# type: (Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]
"""Return a list of formatted rows and a list of column sizes.
For example::
>>> tabulate([['foobar', 2000], [0xdeadbeef]])
(['foobar 2000', '3735928559'], [10, 4])
"""
rows = [tuple(map(str, row)) for row in rows]
sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue='')]
table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows]
return table, sizes
def is_installable_dir(path):
# type: (str) -> bool
"""Is path is a directory containing setup.py or pyproject.toml?
"""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
pyproject_toml = os.path.join(path, 'pyproject.toml')
if os.path.isfile(pyproject_toml):
return True
return False
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def normalize_path(path, resolve_symlinks=True):
# type: (str, bool) -> str
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
# type: (str) -> Tuple[str, str]
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
# type: (str, str) -> None
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
# type: (str) -> bool
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
Caution: this function assumes the head of path has been normalized
with normalize_path.
"""
if not running_under_virtualenv():
return True
return path.startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in user site.
"""
return dist_location(dist).startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is installed in
sysconfig.get_python_lib().
"""
return dist_location(dist).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
# type: (Distribution) -> bool
"""
Return True if given Distribution is an editable install.
"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(
local_only=True, # type: bool
skip=stdlib_pkgs, # type: Container[str]
include_editables=True, # type: bool
editables_only=False, # type: bool
user_only=False, # type: bool
paths=None # type: Optional[List[str]]
):
# type: (...) -> List[Distribution]
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``include_editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
If ``paths`` is set, only report the distributions present at the
specified list of locations.
"""
if paths:
working_set = pkg_resources.WorkingSet(paths)
else:
working_set = pkg_resources.working_set
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def search_distribution(req_name):
# Canonicalize the name before searching in the list of
# installed distributions and also while creating the package
# dictionary to get the Distribution object
req_name = canonicalize_name(req_name)
packages = get_installed_distributions(skip=())
pkg_dict = {canonicalize_name(p.key): p for p in packages}
return pkg_dict.get(req_name)
def get_distribution(req_name):
"""Given a requirement name, return the installed Distribution object"""
# Search the distribution by looking through the working set
dist = search_distribution(req_name)
# If distribution could not be found, call working_set.require
# to update the working set, and try to find the distribution
# again.
# This might happen for e.g. when you install a package
# twice, once using setup.py develop and again using setup.py install.
# Now when run pip uninstall twice, the package gets removed
# from the working set in the first uninstall, so we have to populate
# the working set again so that pip knows about it and the packages
# gets picked up and is successfully uninstalled the second time too.
if not dist:
try:
pkg_resources.working_set.require(req_name)
except pkg_resources.DistributionNotFound:
return None
return search_distribution(req_name)
def egg_link_path(dist):
# type: (Distribution) -> Optional[str]
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
sites.append(site_packages)
if not virtualenv_no_global() and user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
return None
def dist_location(dist):
# type: (Distribution) -> str
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
The returned location is normalized (in particular, with symlinks removed).
"""
egg_link = egg_link_path(dist)
if egg_link:
return normalize_path(egg_link)
return normalize_path(dist.location)
def write_output(msg, *args):
# type: (Any, Any) -> None
logger.info(msg, *args)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = iter(lines)
def readline(self):
try:
return next(self._gen)
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
def captured_stderr():
"""
See captured_stdout().
"""
return captured_output('stderr')
def get_installed_version(dist_name, working_set=None):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
if working_set is None:
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
# Simulates an enum
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = {value: key for key, value in enums.items()}
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
def build_netloc(host, port):
# type: (str, Optional[int]) -> str
"""
Build a netloc from a host-port pair
"""
if port is None:
return host
if ':' in host:
# Only wrap host with square brackets when it is IPv6
host = '[{}]'.format(host)
return '{}:{}'.format(host, port)
def build_url_from_netloc(netloc, scheme='https'):
# type: (str, str) -> str
"""
Build a full URL from a netloc.
"""
if netloc.count(':') >= 2 and '@' not in netloc and '[' not in netloc:
# It must be a bare IPv6 address, so wrap it with brackets.
netloc = '[{}]'.format(netloc)
return '{}://{}'.format(scheme, netloc)
def parse_netloc(netloc):
# type: (str) -> Tuple[str, Optional[int]]
"""
Return the host-port pair from a netloc.
"""
url = build_url_from_netloc(netloc)
parsed = urllib_parse.urlparse(url)
return parsed.hostname, parsed.port
def split_auth_from_netloc(netloc):
"""
Parse out and remove the auth information from a netloc.
Returns: (netloc, (username, password)).
"""
if '@' not in netloc:
return netloc, (None, None)
# Split from the right because that's how urllib.parse.urlsplit()
# behaves if more than one @ is present (which can be checked using
# the password attribute of urlsplit()'s return value).
auth, netloc = netloc.rsplit('@', 1)
if ':' in auth:
# Split from the left because that's how urllib.parse.urlsplit()
# behaves if more than one : is present (which again can be checked
# using the password attribute of the return value)
user_pass = auth.split(':', 1)
else:
user_pass = auth, None
user_pass = tuple(
None if x is None else urllib_unquote(x) for x in user_pass
)
return netloc, user_pass
def redact_netloc(netloc):
# type: (str) -> str
"""
Replace the sensitive data in a netloc with "****", if it exists.
For example:
- "user:pass@example.com" returns "user:****@example.com"
- "accesstoken@example.com" returns "****@example.com"
"""
netloc, (user, password) = split_auth_from_netloc(netloc)
if user is None:
return netloc
if password is None:
user = '****'
password = ''
else:
user = urllib_parse.quote(user)
password = ':****'
return '{user}{password}@{netloc}'.format(user=user,
password=password,
netloc=netloc)
def _transform_url(url, transform_netloc):
"""Transform and replace netloc in a url.
transform_netloc is a function taking the netloc and returning a
tuple. The first element of this tuple is the new netloc. The
entire tuple is returned.
Returns a tuple containing the transformed url as item 0 and the
original tuple returned by transform_netloc as item 1.
"""
purl = urllib_parse.urlsplit(url)
netloc_tuple = transform_netloc(purl.netloc)
# stripped url
url_pieces = (
purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment
)
surl = urllib_parse.urlunsplit(url_pieces)
return surl, netloc_tuple
def _get_netloc(netloc):
return split_auth_from_netloc(netloc)
def _redact_netloc(netloc):
return (redact_netloc(netloc),)
def split_auth_netloc_from_url(url):
# type: (str) -> Tuple[str, str, Tuple[str, str]]
"""
Parse a url into separate netloc, auth, and url with no auth.
Returns: (url_without_auth, netloc, (username, password))
"""
url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
return url_without_auth, netloc, auth
def remove_auth_from_url(url):
# type: (str) -> str
"""Return a copy of url with 'username:password@' removed."""
# username/pass params are passed to subversion through flags
# and are not recognized in the url.
return _transform_url(url, _get_netloc)[0]
def redact_auth_from_url(url):
# type: (str) -> str
"""Replace the password in a given url with ****."""
return _transform_url(url, _redact_netloc)[0]
class HiddenText(object):
def __init__(
self,
secret, # type: str
redacted, # type: str
):
# type: (...) -> None
self.secret = secret
self.redacted = redacted
def __repr__(self):
# type: (...) -> str
return '<HiddenText {!r}>'.format(str(self))
def __str__(self):
# type: (...) -> str
return self.redacted
# This is useful for testing.
def __eq__(self, other):
# type: (Any) -> bool
if type(self) != type(other):
return False
# The string being used for redaction doesn't also have to match,
# just the raw, original string.
return (self.secret == other.secret)
# We need to provide an explicit __ne__ implementation for Python 2.
# TODO: remove this when we drop PY2 support.
def __ne__(self, other):
# type: (Any) -> bool
return not self == other
def hide_value(value):
# type: (str) -> HiddenText
return HiddenText(value, redacted='****')
def hide_url(url):
# type: (str) -> HiddenText
redacted = redact_auth_from_url(url)
return HiddenText(url, redacted=redacted)
def protect_pip_from_modification_on_windows(modifying_pip):
# type: (bool) -> None
"""Protection of pip.exe from modification on Windows
On Windows, any operation modifying pip should be run as:
python -m pip ...
"""
pip_names = [
"pip.exe",
"pip{}.exe".format(sys.version_info[0]),
"pip{}.{}.exe".format(*sys.version_info[:2])
]
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
modifying_pip and
WINDOWS and
os.path.basename(sys.argv[0]) in pip_names
)
if should_show_use_python_msg:
new_command = [
sys.executable, "-m", "pip"
] + sys.argv[1:]
raise CommandError(
'To modify pip, please run the following command:\n{}'
.format(" ".join(new_command))
)
def is_console_interactive():
# type: () -> bool
"""Is this console interactive?
"""
return sys.stdin is not None and sys.stdin.isatty()
def hash_file(path, blocksize=1 << 20):
# type: (Text, int) -> Tuple[Any, int]
"""Return (hash, length) for path using hashlib.sha256()
"""
h = hashlib.sha256()
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
return h, length
def is_wheel_installed():
"""
Return whether the wheel package is installed.
"""
try:
import wheel # noqa: F401
except ImportError:
return False
return True
def pairwise(iterable):
# type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]]
"""
Return paired elements.
For example:
s -> (s0, s1), (s2, s3), (s4, s5), ...
"""
iterable = iter(iterable)
return zip_longest(iterable, iterable)
def partition(
pred, # type: Callable[[T], bool]
iterable, # type: Iterable[T]
):
# type: (...) -> Tuple[Iterable[T], Iterable[T]]
"""
Use a predicate to partition entries into false entries and true entries,
like
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
| [
"parvesh.dhullmonu@gmail.com"
] | parvesh.dhullmonu@gmail.com |
1aed9706512090afb363b8b4ed3d72448e09f2ae | f11ecb59dab63af605c6e5f256ee59e00447ecc1 | /658-find-k-closest-elements.py | bdc8d9afda5c7990ddfaa5b37493cfc0919ea122 | [] | no_license | floydchenchen/leetcode | 626d55f72ec914764385ce82b0f3c57f5a7e9de8 | 9d9e0c08992ef7dbd9ac517821faa9de17f49b0e | refs/heads/master | 2022-10-07T20:33:55.728141 | 2020-06-08T16:09:17 | 2020-06-08T16:09:17 | 269,525,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # 658. Find K Closest Elements
# Given a sorted array, two integers k and x, find the k closest elements to x in the array.
# The result should also be sorted in ascending order. If there is a tie, the smaller
# elements are always preferred.
#
# Example 1:
# Input: [1,2,3,4,5], k=4, x=3
# Output: [1,2,3,4]
# Example 2:
# Input: [1,2,3,4,5], k=4, x=-1
# Output: [1,2,3,4]
# 1. Use python's custom sorting to sort the nums by each num's distance to x,
# if there is a tie we put smaller num before. For example,
# if we have [1,2,3,4,5], k=4, x=3, then the sorted array becomes [3,2,4,1,5].
# ==> sorted(nums, key=lambda num: (abs(num - x), num))
# 2. We return the first k elements in the sorted array in an ascending order.
# For example, the sorted array is [3,2,4,1,5], so we need to return [1,2,3,4].
# ==> sorted(sorted(nums, key=lambda num: (abs(num - x), num))[:k])
class Solution:
# O(nlgn) solution
def findClosestElements(self, nums, k, x):
# return sorted(sorted(nums, key=lambda num: (abs(num - x), num))[:k])
nums.sort(key=lambda num: (abs(num - x), num))
return sorted(nums[:k])
# O(lgn) solution: binary search
def findClosestElements1(self, nums, k, x):
left, right = 0, len(nums) - k - 1
while left <= right:
mid = (left + right) // 2
# 如果nums[mid]比nums[mid+k]离x更远
if x - nums[mid] > nums[mid + k] - x:
left = mid + 1
else:
right = mid - 1
return nums[left:left + k]
# print(Solution().findClosestElements1([1,2,3,4,5],4,3))
print(Solution().findClosestElements1([1], 1, 1))
| [
"chen2918@umn.edu"
] | chen2918@umn.edu |
985a206f9c8a4ee9e27bbe5543558e87b38d7bbe | 0feb9799532328d2eb5c9673751bf44a06652375 | /logic/falsifiability.py | c1472cd4f55454103672fe465c572608f1fd38dc | [] | no_license | krishnakatyal/philosophy | ebc78947508f12a9d06356d2cc8d38f6afb0510a | f7735e9adc9ba609894d89384562dbda2f794548 | refs/heads/master | 2022-03-28T14:00:52.460599 | 2020-01-25T00:28:55 | 2020-01-25T00:28:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py |
"""
We say that a theory is falsified only if we have accepted basic statements which contradict
it (cf. section 11, rule 2). This condition is necessary, but not sufficient; for we have seen
that non-reproducible single occurrences are of no significance to science.
Thus a few stray basic statements contradicting a theory will hardly induce us to reject it as falsified.
We shall take it as falsified only if we discover a reproducible effect which refutes the theory. In other words,
we only accept the falsification if a low-level empirical hypothesis which describes such an effect is
proposed and corroborated. This kind of hypothesis may be called a falsifying hypothesis.
The requirement that the falsifying hypothesis must be empirical, and so falsifiable, only means that it must
stand in a certain logical relationship to possible basic statements; thus this requirement only concerns the
logical form of the hypothesis. The rider that the hypothesis should be corroborated refers to tests which it
ought to have passed—tests which confront it with accepted basic statements.
The following is an example of inconsistent (logically false) statement - that is - one in which
p · ~p can be deduced. This is not an example of a falsifiable statement.
1. p -> (p v q) # From Bertrand Russell's "primitive propositions"
2. ~p -> (p -> q) # From substitiuting ̄pp for p and then p -> q for ~p v q
3. ~p · p -> q # By importation
Consider a class α of a finite number of occurrences, for example the class of throws made yesterday with this
particular die. This class α, which is assumed to be non-empty, serves, as it were, as a frame of reference, and
will be called a (finite) reference-class. The number of elements belonging to α, i.e. its cardinal number, is
denoted by ‘N(α)’, to be read ‘the number of α’. Now let there be another class, β, which may be finite or not.
We call β our property-class: it may be, for example, the class of all throws which show a five, or (as we shall say)
which have the property five.
The class of those elements which belong to both α and β, for example the class of throws made yesterday with this
particular die and having the property five, is called the product-class of α and β, and is denoted by ‘α.β’, to be
read ‘α and β’. Since α.β is a subclass of α, it can at most contain a finite number of elements (it may be empty).
The number of elements in α.β is denoted by ‘N(α.β)’.
Whilst we symbolize (finite) numbers of elements by N, the relative frequencies are symbolized by F′′. For example,
‘the relative frequency of the property β within the finite reference-class α’ is written ‘αF′′(β)’, which may be read
‘the α-frequency of β’. We can now define the relative frequency.
"""
def relfreq(n, alpha, beta):
"""
Relative frequency: For some function n that returns the number of fives thrown yesterday with this die when
given alpha and beta, and, when given only alpha, it returns the total number of throws yesterday.
"""
return n(alpha, beta) / n(alpha)
| [
"shussainather@gmail.com"
] | shussainather@gmail.com |
0b010569aebbcdd67493cf345ecda79053b7a947 | 71116fe7b18634a61c3593b04f59454b4311f2c8 | /fullLength.py | c84b2166894a57023933244db0f4d69210f3fa25 | [] | no_license | frankligy/NeoAntigenWorkflow | 79edf814798717467b57850322fa36843472806b | c481b654072b213a033cda1d06a5c6853a0d86fa | refs/heads/master | 2022-12-25T08:54:01.155626 | 2020-10-05T04:21:39 | 2020-10-05T04:21:39 | 250,943,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,231 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 28 22:37:01 2020
@author: ligk2e
"""
import numpy as np
import os
import sys
import pandas as pd
import re
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from decimal import Decimal as D
import argparse
import bisect
import requests
import bz2
import _pickle as cpickle
def fasta_to_dict(path):
dict_fa = {}
with open(path,'r') as in_handle:
for title,seq in SimpleFastaParser(in_handle):
temp_list = []
EnsID = title.split('|')[0]
chro = title.split('|')[1]
start = title.split('|')[2]
end = title.split('|')[3]
temp_list=[chro,start,end,seq]
dict_fa[EnsID] = temp_list
return dict_fa
def query_from_dict_fa(dict_fa,abs_start,abs_end,EnsID,strand):
if strand == '+':
start = int(dict_fa[EnsID][1])
end = int(dict_fa[EnsID][2])
seq = dict_fa[EnsID][3]
start_index = int(abs_start) - start + 2000
end_index = int(abs_end) - start + 1 + 2000
exon_seq = seq[start_index:end_index]
elif strand == '-':
start = int(dict_fa[EnsID][1])
end = int(dict_fa[EnsID][2])
seq_reverse = dict_fa[EnsID][3]
seq_forward = str(Seq(seq_reverse,generic_dna).reverse_complement()) # Hs_gene.fa restore the reverse strand info
start_index = int(abs_start) - start + 2000
end_index = int(abs_end) - start + 1 + 2000 # endpoint in python is non-inclusive
exon_seq_1 = seq_forward[start_index:end_index]
s = Seq(exon_seq_1,generic_dna)
exon_seq = str(s.reverse_complement())
return exon_seq
def exonCoords_to_dict(path,delimiter):
coords=[]
dict_exonCoords={}
with open(path,'r') as file:
next(file)
for line in file:
items = line.split('\t')
coords=(items[2],items[3],items[4],items[5])
if items[0] in dict_exonCoords:
dict_exonCoords[items[0]][items[1]] = coords
else:
dict_exonCoords[items[0]] = {}
dict_exonCoords[items[0]][items[1]] = coords
# final structure {'EnsID':{E1:[chr,strand,start,end],E2:[chr,strand,start,end]}}
return dict_exonCoords
def convertExonList(df):
dictExonList = {}
for i in range(df.shape[0]):
EnsGID = df.iat[i,0]
EnsTID = df.iat[i,1]
exonList = df.iat[i,3]
try: dictExonList[EnsGID][EnsTID] = exonList
except KeyError: dictExonList[EnsGID] = {EnsTID:exonList}
# {EnsGID:{EnsTID:exonlist,EnsTID:exonlist}}
return dictExonList
def convertExonList_pep(df):
dictExonList = {}
for i in range(df.shape[0]):
EnsGID = df.iat[i,0]
EnsPID = df.iat[i,2]
exonList = df.iat[i,3]
try:
dictExonList[EnsGID].append((EnsPID,exonList))
except KeyError:
dictExonList[EnsGID] = []
dictExonList[EnsGID].append((EnsPID,exonList))
# try: dictExonList[EnsGID][EnsPID] = exonList
# except KeyError: dictExonList[EnsGID] = {EnsPID:exonList}
# {EnsGID:[(EnsPID,exonList),(EnsPID,exonList)]}
return dictExonList
def uid(df, i):
uid = list(df['UID'])[i]
gene = uid.split(':')[0]
dict = {}
gene = gene + ':' + uid.split('|')[1].split(':')[0] # slicing the ENSG in background event
x = uid.split('|')
try: x[0].split(':')[3]
except IndexError: event = x[0].split(':')[2]
else: event = str(x[0].split(':')[2])+':'+str(x[0].split(':')[3])
finally: dict[gene] = [event]
try: x[1].split(':')[2]
except IndexError: backEvent = x[1].split(':')[1]
else: backEvent = str(x[1].split(':')[1])+':'+str(x[1].split(':')[2])
finally: dict[gene].append(backEvent)
#{'gene:ENSid':[E22-E33,E34-E56]}
# if fusion gene: E22-ENSG:E31
return dict
def matchWithExonlist(df,df_exonlist,dict_exonCoords):
col1,col2 = [],[]
for i in range(df.shape[0]):
temp=uid(df,i)
EnsID=list(temp.keys())[0].split(':')[1]
Exons_examined = exon_extract(temp,0,EnsID)
Exons_back = exon_extract(temp,1,EnsID)
col1.append(core_match(df_exonlist,dict_exonCoords,EnsID,Exons_examined))
col2.append(core_match(df_exonlist,dict_exonCoords,EnsID,Exons_back))
df['exam_first_whole_transcripts'] = col1
#df['back_first_whole_transcripts'] = col2
return df
def exon_extract(temp,pos,EnsID):
Exons = list(temp.values())[0][pos].split('-')[0] + '|' + list(temp.values())[0][pos].split('-')[1]
return Exons
def core_match(df_exonlist,dict_exonCoords,EnsID,Exons):
try:
df_certain = df_exonlist[df_exonlist['EnsGID'] == EnsID]
except: full_transcript_store = [] # EnsGID is absent in df_exonlist
full_transcript_store = []
for item in list(df_certain['Exons']):
full_transcript=''
if Exons in item:
Exonlist = item.split('|')
for j in range(len(Exonlist)):
coords = dict_exonCoords[EnsID][Exonlist[j]]
strand = coords[1]
judge = check_exonlist_general(Exonlist,j,strand)
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_transcript += frag
full_transcript = full_transcript.replace('\n','')
full_transcript_store.append(full_transcript)
else:
full_transcript_store.append('')
return full_transcript_store # ['','ATTTTT','TTTGGCC'], # [] if EnsGID is not present in exonlist
def check_exonlist_general(exonlist,index,strand):
dict = {}
for subexon in exonlist:
exon_num = subexon.split('.')[0]
subexon_num = subexon.split('.')[1]
if exon_num in dict:
dict[exon_num].append(subexon_num)
else:
dict[exon_num] = []
dict[exon_num].append(subexon_num) # E14 > 1,2,4,5
# check
query = exonlist[index]
query_exon_num = query.split('.')[0] #E14
query_subexon_num = int(query.split('.')[1]) #2, it is a int
if strand == '+':
if str(query_subexon_num + 1) in dict[query_exon_num]: return False
else: return True
else:
if str(query_subexon_num + 1) in dict[query_exon_num]: return False
else: return True
def third_round(df_second): # after second run
'''
1. [''] means trans-splicing(non-trailing), novel ordinal, intron retention
2. ['','','','',...''] means newsplicingsite, trans-splicing(trailing) or Alt5,3 but even trimming the trailing part can not match them to existing ones
'''
col = []
for i in range(df_second.shape[0]):
second = df_second.iloc[i]['second_round']
temp=uid(df_second,i)
EnsGID_this = list(temp.keys())[0].split(':')[1]
exam1 = list(temp.values())[0][0].split('-')[0] # E22
exam2 = list(temp.values())[0][0].split('-')[1] # ENSG:E31
if second == [''] and 'ENSG' in exam2: # trans-splicing(non_trailing)
EnsGID_trans = exam2.split(':')[0]
exam2_trans = exam2.split(':')[1]
full_left = single_left_match(exam1,EnsGID_this)
full_right = single_right_match(exam2_trans,EnsGID_trans)
full = cat_left_right_asym(full_left,full_right)
col.append(full)
elif 'I' in (exam1 + exam2) and second == ['']: # intron retention
col.append(['intron'])
elif second == ['']: # novel ordinal # E3.4(exam1) - E5.1(exam2)
full_left = single_left_match(exam1,EnsGID_this)
full_right = single_right_match(exam2,EnsGID_this)
full = cat_left_right_sym(full_left,full_right)
col.append(full)
else: # ['skipped'] or ['TTTTAAA','AATTGGCC'] has matches or ['','',''] that we don't want to recover
hits = sum([True if item else False for item in second])
if hits > 0: col.append(['skipped'])
elif hits == 0: col.append(['unrecoverable']) # means point 2 in comments above
df_second['third_round'] = col # if third_round is still ['','',''], means trans and novel ordinal, their single subexon still don't match up with any
return df_second
def cat_left_right_sym(full_left,full_right):
result = []
for i in range(len(full_left)):
left = full_left[i]
right = full_right[i]
if left and right: result.append(left+right)
else: result.append('') # this certain transcript doesn't have left subexon and right exon simutaneously,if don't consider cross-concatenation
return result
def cat_left_right_asym(full_left,full_right): # will store all possible combinations of former and latter sequence
result = []
for i in full_left:
for j in full_right:
if i and j: result.append(i + ',' + j)
else: result.append('')
return result
def single_left_match(exon,EnsGID): # give you E22, return all sequence E1,E2,...E22
global dict_exonCocords
global dictExonList
global dict_fa
transcripts = dictExonList[EnsGID]
result = []
for tran,item in transcripts.items():
Exons1 = '|' + exon #|E22
Exons2 = exon + '|' # E22|
if re.search(rf'{re.escape(Exons1)}',item) or re.search(rf'{re.escape(Exons2)}',item):
exons = item.split('|')
dict_judge = {}
for j in range(len(exons)):
coords = dict_exonCoords[EnsGID][exons[j]]
strand = coords[1]
judge = check_exonlist_general(exons,j,strand)
dict_judge[exons[j]] = judge
bucket = []
for k in range(len(exons)):
if not exons[k] == exon:
bucket.append(exons[k])
else:
bucket.append(exons[k])
break
full_left = ''
for m in range(len(bucket)):
coords = dict_exonCoords[EnsGID][bucket[m]]
strand = coords[1]
judge = dict_judge[bucket[m]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsGID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsGID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsGID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsGID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_left += frag
full_left = full_left.replace('\n','')
result.append(full_left)
else:
result.append('')
return result
def single_right_match(exon,EnsGID): # give you E22, return all sequence E22,E23,.....
global dict_exonCocords
global dictExonList
global dict_fa
transcripts = dictExonList[EnsGID]
result = []
for tran,item in transcripts.items():
Exons1 = '|' + exon #|E22
Exons2 = exon + '|' # E22|
if re.search(rf'{re.escape(Exons1)}',item) or re.search(rf'{re.escape(Exons2)}',item):
exons = item.split('|')
dict_judge = {}
for j in range(len(exons)):
coords = dict_exonCoords[EnsGID][exons[j]]
strand = coords[1]
judge = check_exonlist_general(exons,j,strand)
dict_judge[exons[j]] = judge
bucket = []
for k in range(len(exons)):
if not exons[k] == exon:
continue
else:
bucket.extend(exons[k:])
break
full_right = ''
for m in range(len(bucket)):
coords = dict_exonCoords[EnsGID][bucket[m]]
strand = coords[1]
judge = dict_judge[bucket[m]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsGID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsGID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsGID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsGID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_right += frag
full_right = full_right.replace('\n','')
result.append(full_right)
else:
result.append('')
return result
def second_round(df_first):
col = []
for i in range(df_first.shape[0]):
first = df_first.iloc[i]['exam_first_whole_transcripts']
hits = sum([True if match else False for match in first])
if hits > 0: col.append(['skipped']) # already matched in first round, not a novel event
else:
temp=uid(df_first,i)
#{'gene:ENSid':[E22-E33,E34-E56]}
# if fusion gene: E22-ENSG:E31
EnsID=list(temp.keys())[0].split(':')[1]
exam1 = list(temp.values())[0][0].split('-')[0]
exam2 = list(temp.values())[0][0].split('-')[1]
if '_' in exam1 and not '_' in exam2: # mode 1
exam1_exon = exam1.split('_')[0]
exam1_coord = exam1.split('_')[1]
query = exam1_exon + '|' + exam2
result = second_match(EnsID,query,exam1_coord=exam1_coord)
col.append(result)
if '_' not in exam1 and '_' in exam2: # mode 2
exam2_exon = exam2.split('_')[0]
exam2_coord = exam2.split('_')[1]
query = exam1 + '|' + exam2_exon
result = second_match(EnsID,query,exam2_coord=exam2_coord)
col.append(result)
if '_' in exam1 and '_' in exam2:
exam1_exon = exam1.split('_')[0]
exam1_coord = exam1.split('_')[1]
exam2_exon = exam2.split('_')[0]
exam2_coord = exam2.split('_')[1]
query = exam1_exon + '|' + exam2_exon
result = second_match(EnsID,query,exam1_coord=exam1_coord,exam2_coord=exam2_coord)
col.append(result)
if not '_' in exam1 and not '_' in exam2:
result = [''] # novel ordinal and intron retention and tran-splicing(non-trailing)
col.append(result)
df_first['second_round'] = col
return df_first
def second_match(EnsID,query,exam1_coord=False,exam2_coord=False): # dictExonList {EnsGID:{EnsTID:exonlist,EnsTID:exonlist}}
global dict_exonCoords
global dictExonList
global dict_fa
if exam1_coord==False: mode = 2 # trailing occur in latter one
if exam2_coord==False: mode = 1 # trailing occur in former one
if not exam1_coord==False and not exam2_coord==False: mode = 3 # trailing occur in both ones
#print(mode)
exam1 = query.split('|')[0]
exam2 = query.split('|')[1]
transcripts = dictExonList[EnsID]
result = []
for tran,item in transcripts.items():
Exons1 = '|' + query
Exons2 = query + '|'
if re.search(rf'{re.escape(Exons1)}',item) or re.search(rf'{re.escape(Exons2)}',item) or re.search(rf'{re.escape(query)}$',item):
exons = item.split('|')
dict_judge = {}
for j in range(len(exons)):
coords = dict_exonCoords[EnsID][exons[j]]
strand = coords[1]
judge = check_exonlist_general(exons,j,strand)
dict_judge[exons[j]] = judge
if mode == 1:
bucket_left, bucket_right = [],[]
for i in range(len(exons)):
if not exons[i] == exam1:
bucket_left.append(exons[i])
else:
i += 1
bucket_right.extend(exons[i:])
break # till now, we throw the exons before queried one to bucket_left, after queried one to bucket_right
if bucket_left:
full_left = ''
for k in range(len(bucket_left)):
coords = dict_exonCoords[EnsID][bucket_left[k]]
strand = coords[1]
judge = dict_judge[bucket_left[k]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_left += frag
full_left = full_left.replace('\n','')
else:
full_left = ''
coords_query = dict_exonCoords[EnsID][exam1]
strand_query = coords_query[1]
start = int(coords_query[2])
judge_query = dict_judge[exam1]
if strand_query == '+':
query_frag = query_from_dict_fa(dict_fa,start,int(exam1_coord),EnsID,strand_query)
elif strand_query == '-':
if not judge_query: start = int(coords_query[2])+1
query_frag = query_from_dict_fa(dict_fa,start,int(exam1_coord),EnsID,strand_query)
query_frag = query_frag.replace('\n','')
if bucket_right:
full_right = ''
for k in range(len(bucket_right)):
coords = dict_exonCoords[EnsID][bucket_right[k]]
strand = coords[1]
judge = dict_judge[bucket_right[k]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_right += frag
full_right = full_right.replace('\n','')
else:
full_right = ''
full = full_left + query_frag + full_right
result.append(full)
if mode == 2:
bucket_left, bucket_right = [],[]
for i in range(len(exons)):
if not exons[i] == exam2:
bucket_left.append(exons[i])
else:
i += 1
bucket_right.extend(exons[i:])
break # till now, we throw the exons before queried one to bucket_left, after queried one to bucket_right
#print(bucket_left,bucket_right)
if bucket_left:
full_left = ''
for k in range(len(bucket_left)):
coords = dict_exonCoords[EnsID][bucket_left[k]]
strand = coords[1]
judge = dict_judge[bucket_left[k]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_left += frag
full_left = full_left.replace('\n','')
else:
full_left = ''
#print(full_left)
coords_query = dict_exonCoords[EnsID][exam2]
#print(coords_query)
strand_query = coords_query[1]
judge_query = dict_judge[exam2]
end = int(coords_query[3])
if strand_query == '+':
if not judge_query: end = int(coords_query[3])-1
query_frag = query_from_dict_fa(dict_fa,int(exam2_coord),end,EnsID,strand_query)
elif strand_query == '-':
query_frag = query_from_dict_fa(dict_fa,int(exam2_coord),end,EnsID,strand_query)
query_frag = query_frag.replace('\n','')
if bucket_right:
full_right = ''
for k in range(len(bucket_right)):
coords = dict_exonCoords[EnsID][bucket_right[k]]
strand = coords[1]
judge = dict_judge[bucket_right[k]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_right += frag
full_right = full_right.replace('\n','')
else:
full_right = ''
full = full_left + query_frag + full_right
#print(full)
result.append(full)
if mode == 3:
bucket_left, bucket_right = [],[]
for i in range(len(exons)):
if not exons[i] == exam1:
bucket_left.append(exons[i])
else:
i += 2
bucket_right.extend(exons[i:])
break # till now, we throw the exons before queried one to bucket_left, after queried one to bucket_right
if bucket_left:
full_left = ''
for k in range(len(bucket_left)):
coords = dict_exonCoords[EnsID][bucket_left[k]]
strand = coords[1]
judge = dict_judge[bucket_left[k]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_left += frag
full_left = full_left.replace('\n','')
else:
full_left = ''
coords_query1 = dict_exonCoords[EnsID][exam1]
start1 = int(coords_query1[2])
strand_query1 = coords_query1[1]
judge_query1 = dict_judge[exam1]
if strand_query1 == '+':
query_frag1 = query_from_dict_fa(dict_fa,start1,int(exam1_coord),EnsID,strand_query)
elif strand_query1 == '-':
if not judge_query1: start1 = int(coords_query1[2]) + 1
query_frag1 = query_from_dict_fa(dict_fa,start1,int(exam1_coord),EnsID,strand_query)
query_frag1 = query_frag1.replace('\n','')
coords_query2 = dict_exonCoords[EnsID][exam2]
strand_query2 = coords_query[1]
judge_query2 = dict_judge[exam2]
end2 = int(coords_query2[3])
if strand_query2 == '+':
if not judge_query2: end2 = int(coords_query2[3])-1
query_frag2 = query_from_dict_fa(dict_fa,int(exam2_coord),end2,EnsID,strand_query)
elif strand_query == '-':
query_frag2 = query_from_dict_fa(dict_fa,int(exam2_coord),end2,EnsID,strand_query)
query_frag2 = query_frag2.replace('\n','')
'''
Remember: the arguments to query_from_dict_fa is very simple, it is just the coord[2] and coord[3],
no matter which strand it is on. The positive position of the start and end of a segment.
if judge is false:
1. '+': coords[3] - 1
2. '-': coords[2] + 1
'''
if bucket_right:
full_right = ''
for k in range(len(bucket_right)):
coords = dict_exonCoords[EnsID][bucket_right[k]]
strand = coords[1]
judge = dict_judge[bucket_right[k]]
if strand == '+' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1]) # corresponds to abs_start, abs_end, strand
elif strand == '+' and not judge:
frag = query_from_dict_fa(dict_fa,coords[2],int(coords[3])-1,EnsID,coords[1])
elif strand == '-' and judge:
frag = query_from_dict_fa(dict_fa,coords[2],coords[3],EnsID,coords[1])
elif strand == '-' and not judge:
frag = query_from_dict_fa(dict_fa,int(coords[2])+1,coords[3],EnsID,coords[1]) # because of the weird
# expression of minus strand, need to draw an illustrator to visulize that.
full_right += frag
full_right = full_right.replace('\n','')
else:
full_right = ''
full = full_left + query_frag1 + query_frag2 + full_right
result.append(full)
else:
result.append('')
return result
def getORF(df):
col = []
for i in range(df.shape[0]):
first_round = df.iloc[i]['exam_first_whole_transcripts']
second_round = df.iloc[i]['second_round']
third_round = df.iloc[i]['third_round']
if third_round == ['intron'] or third_round == ['unrecoverable']:
col.append(['None'])
elif not third_round == ['skipped']: # transcripts are in third_round
tempArray = []
for transcript in third_round:
if not transcript: tempArray.append('')
else:
transcript = transcript.replace(',','')
maxTran = transcript2peptide(transcript)
tempArray.append(maxTran)
col.append(tempArray) # so ['','',''] ORF could be either unrecoverable third round or the predicted ORF is too short
elif third_round == ['skipped']:
if not second_round == ['skipped']: # transripts are in second_round
tempArray = []
for transcript in second_round:
if not transcript: tempArray.append('')
else:
maxTran = transcript2peptide(transcript)
tempArray.append(maxTran)
col.append(tempArray)
elif second_round == ['skipped']: # transcripts are in first_round
tempArray = []
for transcript in first_round:
if not transcript: tempArray.append('')
else:
maxTran = transcript2peptide(transcript)
tempArray.append(maxTran)
col.append(tempArray)
df['ORF'] = col
return df
def ORF_check(df):
col1,col2 = [],[]
for i in range(df.shape[0]):
print('The {}th run'.format(i))
temp=uid(df,i)
EnsGID = list(temp.keys())[0].split(':')[1]
space = dictExonList_p[EnsGID] # [('ENSP',exonlists),('ENSP',exonlists)...]
ORF = df.iloc[i]['ORF']
first = df.iloc[i]['exam_first_whole_transcripts']
second = df.iloc[i]['second_round']
third =df.iloc[i]['third_round']
if list_check(first): whole = first
elif not list_check(first) and list_check(second): whole = second
elif not list_check(first) and not list_check(second) and list_check(third): whole = third
else:
NMD = ['None'] # intron, unrecoverable or third round is still all empty
translate = ['None']
NMD = []
translate = []
print(len(ORF),len(space))
if len(ORF) == len(space): # not trans-splicing events
for j in range(len(ORF)):
orf = ORF[j]
if not orf:
NMD.append('')
translate.append('')
elif orf:
whole_ = whole[j]
space_ENSP = space[j][0]
space_exons = space[j][1]
print(EnsGID,space_ENSP)
#result = grabEnsemblTranscriptTable(space_ENST)
result = check_translation(EnsGID,space_ENSP)
translate.append(result)
series = build_sorted_exons(EnsGID,space_exons)
num_exon = len(series) - 1
#print(series,num_exon)
#print(orf,type(orf))
orf_end_pos = whole_.find(orf)+len(orf)-1
residing = bisect.bisect_left(series,orf_end_pos) # which exon it resides on
#print(residing)
if residing <= num_exon-2: NMD.append('*') # potentially NMD
else: NMD.append('#') # good candidate
else: # trans-splicing events
for j in range(len(ORF)):
orf = ORF[j]
if not orf:
NMD.append('')
translate.append('')
elif orf :
if orf=='None':
NMD.append('None')
translate.append('None')
else:
NMD.append('#') # currently don't support interrogation of NMD for tran-splicing event
translate.append('#')
col1.append(NMD)
col2.append(translate)
df['NMD_check'] = col1
df['translate_check'] = col2
return df
def check_translation(EnsGID,EnsPID):
if EnsPID == 'None': # usually from RefSeq dataset
result = '*'
elif 'PEP' in EnsPID: # ENSP854949-PEP
result = '*'
else: # protein coding gene or NMD
pepAnno = dict_biotype[EnsGID] #{ENSP:anno,ENSP:anno}
if pepAnno[EnsPID] == 'protein_coding': result = '#'
else: result = '*'
return result
# https://rest.ensembl.org/documentation/info/lookup
def grabEnsemblTranscriptTable(EnsTID):
server = "https://rest.ensembl.org"
ext = "/lookup/id/{0}?expand=1".format(EnsTID)
r = requests.get(server+ext, headers={ "Content-Type" : "application/json"})
try: decoded = r.json()
except:
print('JSON unknoen error')
result = '#' # I don't think if running on local, this condition will ever be reached
else:
try:
biotype = decoded['biotype']
except:
result = '*' # unknown crash, might be a invalid Ensembl ID
if biotype == 'protein_coding': result = '#'
else: result = '*' # non-protein coding genes or data from other source
return result
def build_sorted_exons(EnsGID,exonlists): # E1.2|E1.3|E2.3|E3.4
series = [] # store sorted position for each exon
start_exon = exonlists.split('|')[0]
strand = dict_exonCoords[EnsGID][start_exon][1]
if strand == '+':
start = dict_exonCoords[EnsGID][start_exon][2]
else:
start = dict_exonCoords[EnsGID][start_exon][3] # negative strand, the right most position will be the start, also the largest number
# final structure {'EnsID':{E1:[chr,strand,start,end],E2:[chr,strand,start,end]}}
exonlist = exonlists.split('|')
dict_judge = {}
for j in range(len(exonlist)):
coords = dict_exonCoords[EnsGID][exonlist[j]]
strand = coords[1]
judge = check_exonlist_general(exonlist,j,strand)
dict_judge[exonlist[j]] = judge
dic = {}
for subexon in exonlist:
exon_num = subexon.split('.')[0]
subexon_num = subexon.split('.')[1]
if exon_num in dic:
dic[exon_num].append(subexon_num)
else:
dic[exon_num] = []
dic[exon_num].append(subexon_num) # E14 > [1,2,4,5]
accum = 0
for exon,sub in dic.items():
incre,position = check_consecutive(exon,sub,dict_judge,EnsGID,strand,accum)
accum += incre
series.extend(position)
series.sort() # ascending order [5,9,15,...]
series = [0]+series
return series
def check_consecutive(exon,sub,dict_judge,EnsGID,strand,accum): # E14 > [1,2,4,5]
#print(exon,sub,dict_judge,accum)
position = []
lis_int = [int(x) for x in sub]
diff1 = np.diff(lis_int,1) # array([1,2,1])
diff1 = [int(x)-1 for x in diff1] # [0,1,0]
split = np.nonzero(diff1)[0].tolist() # if pos=1, it means in original list, after index 1 will have a breaking point
#print(split)
if split: # have breaking point
split = [y + 1 for y in split]
# lis_int contains original list, split contains all the indices that identical to the first one in each subgroup
result=[lis_int[i:j] for i,j in zip([0]+split,split+[None])]
for chunk in result: # chunk[1,2], chunk[4,5]
query_s = str(exon)+'.'+str(chunk[0])
query_e = str(exon)+'.'+str(chunk[-1])
if strand=='+':
start = dict_exonCoords[EnsGID][query_s][2]
end = dict_exonCoords[EnsGID][query_e][3] if dict_judge[query_e] else int(dict_exonCoords[EnsGID][query_e][3])-1
relaPos = int(end) - int(start) + 1 # think 5-1=4, but 5 will be the 5th one
position.append(relaPos+accum)
elif strand == '-':
start = dict_exonCoords[EnsGID][query_s][3]
end = dict_exonCoords[EnsGID][query_e][2] if dict_judge[query_e] else int(dict_exonCoords[EnsGID][query_e][2])+1
relaPos = int(start) - int(end) + 1
position.append(relaPos+accum)
else: # E15 > [1,2,3] 3 consecutive
query_s = str(exon) + '.' + str(sub[0])
query_e = str(exon) +'.'+ str(sub[-1])
#print(query_s,query_e)
if strand=='+':
start = dict_exonCoords[EnsGID][query_s][2]
end = dict_exonCoords[EnsGID][query_e][3] if dict_judge[query_e] else int(dict_exonCoords[EnsGID][query_e][3])-1
relaPos = int(end) - int(start) + 1 # think 5-1=4, but 5 will be the 5th one
position.append(relaPos+accum)
elif strand=='-':
start = dict_exonCoords[EnsGID][query_s][3]
end = dict_exonCoords[EnsGID][query_e][2] if dict_judge[query_e] else int(dict_exonCoords[EnsGID][query_e][2])+1
relaPos = int(start) - int(end) + 1
position.append(relaPos+accum)
#print(relaPos)
return relaPos,position
def list_check(lis):
if lis==['skipped'] or lis==['intron'] or lis ==['unrecoverable']: cond = False
else:
hits = sum([True if item else False for item in lis])
if hits > 0: cond = True
elif hits == 0: cond = False
return cond
def transcript2peptide(cdna_sequence): # actually to ORF
reading_manners = []
reading_manners.append(cdna_sequence[0:])
reading_manners.append(cdna_sequence[1:])
reading_manners.append(cdna_sequence[2:])
frag_comp_array = []
for manner in reading_manners:
pos = []
for m in re.finditer(r'(TAA|TGA|TAG)',manner): # for multiple instances
if m.start() % 3 == 0:
pos.append(m.start())
if pos == []:
pos = rescue_position(pos,manner)
frag_array,last_seq = pos_to_frags(pos,manner)
for frag in frag_array:
if 'ATG' not in frag or len(frag) == 0:
continue
else:
for n in re.finditer(r'ATG',frag):
if (len(frag) - n.start()) % 3 == 0:
frag_comp = frag[n.start():]
frag_comp_array.append(frag_comp)
break # might have multiple 'ATG' so it is necessary to break when find first 'ATG'
else:
continue
# process last_seq:
for n in re.finditer(r'ATG',last_seq):
if n.start() % 3 == 0:
last_frag = last_seq[n.start():]
protruding = len(last_frag) % 3
end = -1 - protruding + 1 # python end exclusive, so + 1
last_frag_real = last_frag[:end]
frag_comp_array.append(last_frag_real)
####################### # We think if you only has longer length(0-7) but add_score is not higher than original one, you are FAlSE
max_seq = ''
max_length = 0
max_item_score = 0
for item in frag_comp_array:
temp1 = len(item)
if temp1==0: continue
else:
add_score = score_GC(item) + score_coding_bias(item)
if (temp1 - max_length) >= 8:
max_length = temp1
max_item_score = add_score
max_seq = item
elif (temp1 - max_length) >= 0 and (temp1 - max_length) < 8:
if add_score >= max_item_score:
max_length = temp1
max_item_score = add_score
max_seq = item
# else:
# print('equal length but less likely to be a true ORF or longer length but less likely to be a true ORF',add_score,max_item_score)
max_seq_tran = max_seq
return max_seq_tran
def rescue_position(pos,manner):
for m in re.finditer(r'ATG',manner):
if m.start() % 3 ==0:
span = len(manner) - m.start()
protruding = span % 3
end = -1 - protruding
frag = manner[m.start():end]
pos.append(frag)
return pos
def pos_to_frags(pos,sequence):
frag_array = []
if pos:
frag_array.append(sequence[0:pos[0]])
i = 0
while i < len(pos)-1:
frag_array.append(sequence[pos[i]+3:pos[i+1]])
i += 1
last_seq = sequence[pos[-1]+3:]
# I think following original if condition is not correct, we should keep the last_sequence
# if not any(codon in last_seq for codon in ['TAA','TAG','TGA']):
# frag_array.append(sequence[pos[-1]+3:])
return frag_array, last_seq # last_seq need special care
def score_GC(sequence):
GC_content = 0
length_seq = len(sequence)
for nt in sequence:
if nt == 'G' or nt == 'C':
GC_content += 1
try:
GC_percent = GC_content / length_seq
except:
print('here it the seq:',sequence)
raise Exception
return GC_percent
def score_coding_bias(sequence):
# coding frequency table is from GenScript webpage
usage_dict = {'TTT':16.9,'TTC':20.4,'TTA':7.2,'TTG':12.6,'TAT':12.0,'TAC':15.6,'TAA':0.7,'TAG':0.5,
'CTT':12.8,'CTC':19.4,'CTA':6.9,'CTG':40.3,'CAT':10.4,'CAC':14.9,'CAA':11.8,'CAG':34.6,
'ATT':15.7,'ATC':21.4,'ATA':7.1,'ATG':22.3,'AAT':16.7,'AAC':19.5,'AAA':24.0,'AAG':32.9,
'GTT':10.9,'GTC':14.6,'GTA':7.0,'GTG':28.9,'GAT':22.3,'GAC':26.0,'GAA':29.0,'GAG':40.8,
'TCT':14.6,'TCC':17.4,'TCA':11.7,'TCG':4.5,'TGT':9.9,'TGC':12.2,'TGA':1.3,'TGG':12.8,
'CCT':17.3,'CCC':20.0,'CCA':16.7,'CCG':7.0,'CGT':4.7,'CGC':10.9,'CGA':6.3,'CGG':11.9,
'ACT':12.8,'ACC':19.2,'ACA':14.8,'ACG':6.2,'AGT':11.9,'AGC':19.4,'AGA':11.5,'AGG':11.4,
'GCT':18.6,'GCC':28.5,'GCA':16.0,'GCG':7.6,'GGT':10.8,'GGC':22.8,'GGA':16.3,'GGG':16.4}
# do a normaliztion for each triplet, then for all the triplet's sum, divided by the number of triplet
min_freq = 4.5
max_freq = 40.8
norm_usage_dict = {}
for codon,freq in usage_dict.items():
norm_usage_dict[codon] = float((D(freq) - D(min_freq)) / (D(max_freq) - D(min_freq)))
length_seq = len(sequence)
num_triplet = length_seq/3
i = 0
score = 0
while i < length_seq - 2:
triplet = sequence[i:i+3:1]
score_tri = norm_usage_dict[triplet]
score += score_tri
i += 3
score_bias = score/num_triplet # scale by the number of triplet in the sequence
return score_bias
def getORFaa(df):
col = []
for i in range(df.shape[0]):
ORF = df.iloc[i]['ORF']
if ORF == ['None']:
col.append('None')
else:
tempArray = []
for transcript in ORF:
if not transcript: tempArray.append('')
else:
maxAA = str(Seq(transcript,generic_dna).translate(to_stop=False))
tempArray.append(maxAA)
col.append(tempArray)
df['ORFaa'] = col
return df
def biotype(df):
dic = {}
for i in range(df.shape[0]):
EnsGID = df.iat[i,0]
EnsPID = df.iat[i,1]
Anno = df.iat[i,2]
try:
dic[EnsGID][EnsPID] = Anno
except KeyError:
dic[EnsGID] = {EnsPID:Anno}
return dic
# {EnsGID:{EnsPID:Anno,EnsPID:Anno}}
def check_GTEx(df,cutoff_PSI,cutoff_sampleRatio,cutoff_tissueRatio):
with bz2.BZ2File(os.path.join(dataFolder,'dicTissueExp.pbz2'),'rb') as f1:
dicTissueExp = cpickle.load(f1)
col = []
for i in range(df.shape[0]):
UID = df.iloc[i]['UID']
event = UID.split('|')[0] # foreground event
try:
tissueExp = dicTissueExp[event] # {heart:[],brain:[]} # values are ndarray
except KeyError:
cond = True
col.append(cond)
else:
tissueCounter = 0
for tis,exp in tissueExp.items():
if tis == 'Cells - Cultured fibroblasts' or tis == 'Cells - Leukemia cell line (CML)' or tis == 'Cells - EBV-transformed lymphocyte': # these tissue are tumor tissue, should be excluded
continue
else:
exp = exp.astype('float64')
exp[np.isnan(exp)] = 0.0 # nan means can not detect the gene expression
hits = sum([True if i > cutoff_PSI else False for i in exp]) # in a tissue, how many samples have PSI > cutoff value
total = exp.size # how many samples for each tissue type
sampleRatio = hits/total # percentage of sampels that are expressing this event
if sampleRatio > cutoff_sampleRatio: tissueCounter += 1 # this tissue is expressiing this event
tissueRatio = tissueCounter/51 # 51 tissue types in total,excluding three cancer cell lines
if tissueRatio > cutoff_tissueRatio:
cond = False
col.append(cond)
else:
cond = True
col.append(cond)
df['cond'] = col
new_df = df[df['cond']]
new_df = new_df.drop(columns = ['cond'])
new_df = new_df.set_index(pd.Index(np.arange(new_df.shape[0])))
return new_df
def main(args):
global intFile
global dataFolder
global outFolder
global taskName
global check
intFile = args.intFile
dataFolder = args.dataFolder
outFolder = args.outFolder
taskName = args.taskName
check = args.check
if not os.path.exists(os.path.join(outFolder,'result_{0}'.format(taskName))): os.makedirs(os.path.join(outFolder,'result_{0}'.format(taskName)))
#print(intFile,dataFolder,outFolder)
# doesn't consider if EnsGID doesn't exist in existing ones, so if KeyError pops up, be aware of that
global df_exonlist
global dict_exonCoords
global dict_fa
global dictExonList
global dictExonList_p
global df_biotype
global dict_biotype
print('loading data...')
df_exonlist = pd.read_csv(os.path.join(dataFolder,'mRNA-ExonIDs.txt'),sep='\t',header=None,names=['EnsGID','EnsTID','EnsPID','Exons'])
dict_exonCoords = exonCoords_to_dict(os.path.join(dataFolder,'Hs_Ensembl_exon.txt'),'\t')
dict_fa = fasta_to_dict(os.path.join(dataFolder,'Hs_gene-seq-2000_flank.fa'))
dictExonList = convertExonList(df_exonlist)
dictExonList_p = convertExonList_pep(df_exonlist)
df_biotype = pd.read_csv(os.path.join(dataFolder,'Hs_Ensembl_transcript-biotypes.txt'),sep='\t')
dict_biotype = biotype(df_biotype)
print('loding input file...')
df = pd.read_csv(intFile,sep='\t') # only one column name is 'UID'
print('GTEx check...')
if check == 'True':
df = check_GTEx(df,cutoff_PSI=0.1,cutoff_sampleRatio=0.1,cutoff_tissueRatio=0.1)
if df.shape[0] == 0:
raise Exception('After checking GTEx, no event remains')
df.to_csv(os.path.join(outFolder,'result_{0}'.format(taskName),'after_check.txt'),sep='\t',index=None)
print('first round matching...')
df_first = matchWithExonlist(df,df_exonlist,dict_exonCoords)
print('second round matching...')
df_second = second_round(df_first)
print('third round matching...')
df_third = third_round(df_second)
print('predicting most likely ORFs...')
df_ORF = getORF(df_third)
print('labelling potential ORFs that will subjected to NMD and non-translatable ones...')
df_ORF_check = ORF_check(df_ORF)
print('in-silico translation...')
df_ORF_aa = getORFaa(df_ORF_check)
print('writing output file...')
df_ORF_aa.to_csv(os.path.join(outFolder,'result_{0}'.format(taskName),'ORF_aa.txt'),sep='\t',index=None)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='get full-length isoforms') # ArgumentParser object
parser.add_argument('--intFile',type=str,default='.',help='input file path')
parser.add_argument('--dataFolder',type=str,default='./data',help='data folder path')
parser.add_argument('--outFolder',type=str,default='.',help='output folder path')
parser.add_argument('--taskName',type=str,default='.',help='task Name')
parser.add_argument('--check',type=str,default='.',help='check tumor specificity or not')
args = parser.parse_args() # namespace object
main(args)
# df_exonlist = pd.read_csv('./data/mRNA-ExonIDs.txt',sep='\t',header=None,names=['EnsGID','EnsTID','EnsPID','Exons'])
# dict_exonCoords = exonCoords_to_dict('./data/Hs_Ensembl_exon.txt','\t')
# dict_fa = fasta_to_dict('./data/Hs_gene-seq-2000_flank.fa')
# dictExonList = convertExonList(df_exonlist)
#
# df = pd.read_csv('downsampled.txt',sep='\t') # only one column name is 'UID'
# df_first = matchWithExonlist(df,df_exonlist,dict_exonCoords)
# df_second = second_round(df_first)
# df_third = third_round(df_second)
# df_ORF = getORF(df_third)
#
# df_ORF_check = ORF_check(df_ORF)
#
#
#
# df_ORF_aa = getORFaa(df_ORF)
# df_ORF_aa.to_csv('ORF_aa_downsampled.txt',sep='\t',index=None)
| [
"Frank Li"
] | Frank Li |
549f5cdd2f1ceb5c3304e98da3e3fb0df1f12544 | 8f4f9a07fa25490289b76253971b2ae8c386e8cd | /huaweicloud-sdk-kafka/setup.py | 3f33f92192faaa07344e21f9595dfb8db145ea7d | [
"Apache-2.0"
] | permissive | xingkongcwb/huaweicloud-sdk-python-v3 | 5c635af84a9fb4fb37c45a8de38f691724ca5e31 | 007d5c54ff71f0a2d7b52dcc53e3d38dec4fe775 | refs/heads/master | 2023-03-23T09:56:10.606710 | 2021-03-19T12:47:29 | 2021-03-19T12:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | # coding: utf-8
from os import path
from setuptools import setup, find_packages
NAME = "huaweicloudsdkkafka"
VERSION = "3.0.37-rc"
AUTHOR = "HuaweiCloud SDK"
AUTHOR_EMAIL = "hwcloudsdk@huawei.com"
URL = "https://github.com/huaweicloud/huaweicloud-sdk-python-v3"
DESCRIPTION = "Kafka"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_PYPI.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIRES = ["huaweicloudsdkcore"]
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache LICENSE 2.0",
url=URL,
keywords=["huaweicloud", "sdk", "Kafka"],
packages=find_packages(exclude=["tests*"]),
install_requires=REQUIRES,
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development'
]
)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
4a5516634363232525aa64ac4cf02350279fd5aa | e20ff12b280bcae1ee6436a0a2285a10fce7baf0 | /Proposal_Extraction_Code/py-faster-2/tools/demo.py | 7d08a57f24a2fb7f4b7618a1e657f7c46e2e9a66 | [] | no_license | SeokHeel/face_classification_ccbr2016 | e5037c86d9ed33bf375101b0ce1eab97c45b4199 | 05f5664d41ebffb89389902423479db2a64e2501 | refs/heads/master | 2021-06-08T20:55:50.079297 | 2016-10-16T10:28:11 | 2016-10-16T10:28:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,651 | py | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_proposal(im,dets):
im=im[:,:,(2,1,0)]
fig,ax=plt.subplots(figsize=(12,12))
ax.imshow(im,aspect='equal')
for i in xrange(0,len(dets)):
proposal=dets[i,:]
ax.add_patch(
plt.Rectangle((proposal[0],proposal[1]),
proposal[2]-proposal[0],
proposal[3]-proposal[1],
fill=False,edgecolor='green',linewidth=3.5)
)
plt.axis('off')
plt.tight_layout()
plt.draw()
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
# transfer BGR to RGB
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
#debug for proposal
# vis_proposal(im,proposal)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
# im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
# '001763.jpg', '004545.jpg']
im_names=['000456.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| [
"davidsonic@163.com"
] | davidsonic@163.com |
ec3ad99b79a2d058b98fd32d979c31f5dc59e36b | 4af281a1b2992de4dceb37ef91f635c44e4f7dcd | /keymaster/server/model/entry_tag.py | f0dafd5d3a14eec05412335222adc013d1a49fe0 | [
"Apache-2.0"
] | permissive | shiroyuki/keymaster | e9772b50c4966ef2ee00860c934a161af60007e3 | 1efee54427378394ab04d0e53247eb38c28bc97c | refs/heads/master | 2020-12-23T11:29:19.262198 | 2020-02-09T06:20:50 | 2020-02-09T06:20:50 | 237,137,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | from dataclasses import dataclass
from uuid import uuid4
from xmode.db.analyzer import default, constraint, identified_by, stored_in
from xmode.db.definitions import UUID, String
@stored_in('entry_tags')
@identified_by('id') # This is a PK.
@constraint('index', ('owner_id',))
@constraint('unique', ('owner_id', 'entry_id',))
@constraint('unique', ('owner_id', 'name',))
@default('id', lambda: str(uuid4()))
@dataclass
class EntryTag:
id: UUID
owner_id: UUID
entry_id: UUID
name: String
| [
"jnopporn@shiroyuki.com"
] | jnopporn@shiroyuki.com |
d5c63e22901bf27c426b46d99840236846c3fc62 | ccc86a5029ff00b478685fe8ae365db141096927 | /shop/migrations/0001_initial.py | 6b30a5fe0d0b92676ee132213ecea53afdab2dbe | [] | no_license | jaishivnani/MyAwesomeCart | d0359744d80aa5c29fb77b91eb93434dbf72fd20 | 7ea29cb190b281a3ec2a5385e783a6660f0f905e | refs/heads/main | 2023-01-04T20:45:22.229027 | 2020-10-24T18:27:16 | 2020-10-24T18:27:16 | 305,470,988 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Generated by Django 3.1 on 2020-08-27 16:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('desc', models.CharField(max_length=300)),
('pub_date', models.DateField()),
],
),
]
| [
"jaishivnani30@gmail.com"
] | jaishivnani30@gmail.com |
e15b690026e60540430f1e6f9ef59773e11ad73b | d67bd00f8fe819bd3011ce154c19cbc765d59f1d | /branches/3.2_buildout/il/sapl/skins/consultas/protocolo/protocolo_index_html.py | e345773faec152ee9eb9e8df142a93e8c00973f7 | [] | no_license | openlegis-br/sagl | 90f87bdbbaa8a6efe0ccb5691ea8424575288c46 | eabf7529eefe13a53ed088250d179a92218af1ed | refs/heads/master | 2023-08-31T12:29:39.382474 | 2023-08-29T16:12:01 | 2023-08-29T16:12:01 | 32,593,838 | 17 | 1 | null | 2023-08-29T06:16:55 | 2015-03-20T16:11:04 | Python | UTF-8 | Python | false | false | 371 | py | from Products.CMFCore.utils import getToolByName
request=context.REQUEST
mt = getToolByName(context, 'portal_membership')
if mt.isAnonymousUser():
redirect_url=context.portal_url()+'/consultas/protocolo/pesquisa_publica_form'
else:
redirect_url=context.portal_url()+'/consultas/protocolo/protocolo_pesquisar_form'
request.RESPONSE.redirect(redirect_url)
| [
"contato@openlegis.com.br"
] | contato@openlegis.com.br |
31e8ef8e14aef22b0ad325db319efbf0418237b6 | f5b36f10e6c9c1dbe70208c291c7af5f3e0d39c9 | /client_src/client.py | c15f8f6b879e881dc1deceddbc90b73c589a7112 | [] | no_license | gandolfreddy/esp8266_project | 3a1eabd70f41dcf76b4c8e62cde1a926794841ab | 08625955fba09e14d6d3b18e391824fcf3f2456a | refs/heads/main | 2023-05-29T18:10:32.509840 | 2021-06-17T19:18:00 | 2021-06-17T19:18:00 | 377,606,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | import socket
from machine import Pin
from time import sleep_ms
def start():
led = Pin(2, Pin.OUT, value=1)
sw_led = Pin(0, Pin.IN, Pin.PULL_UP)
sw_bye = Pin(12, Pin.IN, Pin.PULL_UP)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect(("192.168.4.1", 13326))
while True:
print("Press any button")
while sw_led.value() and sw_bye.value():
pass
if not sw_led.value():
msg = "led change"
while not sw_led.value():
pass
if not sw_bye.value():
msg = "bye"
while not sw_bye.value():
pass
s.send(msg.encode("utf-8"))
reply = s.recv(128)
if reply == b'quit':
print("Disconnect")
s.close()
break
print(str(reply))
| [
"noreply@github.com"
] | gandolfreddy.noreply@github.com |
57580b731fc5be2d1f3b6c50e77441d76337e23f | 3cef23043a4bf3bc2a37d952e51b1a9faeb76d0b | /tests/widgets/test_mdselect.py | f103357802890708ffc5494965f13e07c3cde412 | [
"MIT"
] | permissive | hiroaki-yamamoto/django-nghelp | 794bc103ecf5bb652363e3a1df530afa971ac46a | e15dc408a4a9205d23f9d68b6d10d7b9648dbd2e | refs/heads/master | 2020-07-29T21:41:23.972244 | 2018-01-15T04:30:49 | 2018-01-15T04:30:49 | 73,657,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,463 | py | #!/usr/bin/env python
# coding=utf-8
"""MDSelect Tests."""
from django import setup
from django.test import TestCase
from django_nghelp.widgets import MDSelect
setup()
class SimpleMDSelectTest(TestCase):
"""Simple MDSelect Usage test."""
def setUp(self):
"""Setup."""
self.field = MDSelect(choices=(
("test", "Test"), ("test2", "Test2"), (None, "Test3")
))
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option data-selected>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
def test_render_has_value(self):
"""The generated html should be correct."""
result = str(self.field.render("result", "test")).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\" data-selected>"
"Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
def test_render_unselectable_value(self):
"""The generated html should be correct."""
result = str(self.field.render("result", "test_a")).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
class MDSelectGroupingTest(TestCase):
"""MDSelect Grouping test."""
def setUp(self):
"""Setup."""
self.field = MDSelect(
choices=(
("test", (
("testTest1", "Test Test 1"),
("testTest2", "Test Test 2")
)),
("test2", "Test2")
)
)
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-optgroup data-label=\"test\">"
"<md-option data-value=\"testTest1\">Test Test 1</md-option>"
"<md-option data-value=\"testTest2\">Test Test 2</md-option>"
"</md-optgroup>"
"<md-option data-value=\"test2\">Test2</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
class MDSelectEmptyFieldTest(TestCase):
"""MDSelect Test without any options."""
def setUp(self):
"""Setup."""
self.field = MDSelect()
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = "<md-select data-name=\"result\"></md-select>"
self.assertEqual(result, data)
class MDSelectDisableSelectTest(TestCase):
"""MDSelect test with disabled selection."""
def setUp(self):
"""Setup."""
self.field = MDSelect(disable_select=True, choices=(
("test", "Test"), ("test2", "Test2"), (None, "Test3")
))
def test_render(self):
"""The generated html should be correct."""
result = str(self.field.render("result", None)).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
def test_render_has_value(self):
"""The generated html should be correct."""
result = str(self.field.render("result", "test")).replace("\n", "")
data = (
"<md-select data-name=\"result\">"
"<md-option data-value=\"test\">Test</md-option>"
"<md-option data-value=\"test2\">Test2</md-option>"
"<md-option>Test3</md-option>"
"</md-select>"
)
self.assertEqual(result, data)
| [
"hiroaki@hysoftware.net"
] | hiroaki@hysoftware.net |
3439cd72da514cd08f996686a5271e11db6ec5df | 2f989d067213e7a1e19904d482a8f9c15590804c | /lib/python3.4/site-packages/faker/utils/decorators.py | 4dc5f2fa92aff4383077799a7f5457c4e9b98750 | [
"MIT"
] | permissive | levabd/smart4-portal | beb1cf8847134fdf169ab01c38eed7e874c66473 | 2c18ba593ce7e9a1e17c3559e6343a14a13ab88c | refs/heads/master | 2023-02-18T05:49:40.612697 | 2022-08-02T09:35:34 | 2022-08-02T09:35:34 | 116,001,098 | 0 | 1 | MIT | 2023-02-15T21:34:01 | 2018-01-02T10:00:07 | Roff | UTF-8 | Python | false | false | 534 | py | # coding=utf-8
from functools import wraps
from faker.utils import text
def slugify(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return text.slugify(fn(*args, **kwargs))
return wrapper
def slugify_domain(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return text.slugify(fn(*args, **kwargs), allow_dots=True)
return wrapper
def slugify_unicode(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
return text.slugify(fn(*args, **kwargs), allow_unicode=True)
return wrapper
| [
"levabd@gmail.com"
] | levabd@gmail.com |
1b739b8ddc0c3ea06a70f43ebd20060c45f6d936 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /dlm_write_f/resource_tag.py | ea3068d36bd5cd92a43e321bfe732414ac7ef3c8 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
untag-resource : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/dlm/untag-resource.html
"""
write_parameter("dlm", "tag-resource") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
aebfaa7b26235eadfb4ca58a1c720541daabd68a | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM14/IEC61970/LoadModel/ConformLoadGroup.py | 8d8d955dd5c605b9e577b17866563deec9ca5a69 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 3,544 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.LoadModel.LoadGroup import LoadGroup
class ConformLoadGroup(LoadGroup):
"""A group of loads conforming to an allocation pattern.
"""
def __init__(self, ConformLoadSchedules=None, EnergyConsumers=None, *args, **kw_args):
"""Initialises a new 'ConformLoadGroup' instance.
@param ConformLoadSchedules: The ConformLoadSchedules in the ConformLoadGroup.
@param EnergyConsumers: Conform loads assigned to this ConformLoadGroup.
"""
self._ConformLoadSchedules = []
self.ConformLoadSchedules = [] if ConformLoadSchedules is None else ConformLoadSchedules
self._EnergyConsumers = []
self.EnergyConsumers = [] if EnergyConsumers is None else EnergyConsumers
super(ConformLoadGroup, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ConformLoadSchedules", "EnergyConsumers"]
_many_refs = ["ConformLoadSchedules", "EnergyConsumers"]
def getConformLoadSchedules(self):
"""The ConformLoadSchedules in the ConformLoadGroup.
"""
return self._ConformLoadSchedules
def setConformLoadSchedules(self, value):
for x in self._ConformLoadSchedules:
x.ConformLoadGroup = None
for y in value:
y._ConformLoadGroup = self
self._ConformLoadSchedules = value
ConformLoadSchedules = property(getConformLoadSchedules, setConformLoadSchedules)
def addConformLoadSchedules(self, *ConformLoadSchedules):
for obj in ConformLoadSchedules:
obj.ConformLoadGroup = self
def removeConformLoadSchedules(self, *ConformLoadSchedules):
for obj in ConformLoadSchedules:
obj.ConformLoadGroup = None
def getEnergyConsumers(self):
"""Conform loads assigned to this ConformLoadGroup.
"""
return self._EnergyConsumers
def setEnergyConsumers(self, value):
for x in self._EnergyConsumers:
x.LoadGroup = None
for y in value:
y._LoadGroup = self
self._EnergyConsumers = value
EnergyConsumers = property(getEnergyConsumers, setEnergyConsumers)
def addEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.LoadGroup = self
def removeEnergyConsumers(self, *EnergyConsumers):
for obj in EnergyConsumers:
obj.LoadGroup = None
| [
"rwl@thinker.cable.virginmedia.net"
] | rwl@thinker.cable.virginmedia.net |
3b3b7fb2927e145132ea14c11ad0d36645415be1 | f5377cebd671c743cb44dc76b1ab8ea502c47849 | /scripts/getCommitLink.py | 94ff04a509bd5ff0a3e1ec70bc715f5ac019e677 | [] | no_license | Kechegomz/propMining | 67a38f6b3d62d043c18591381bde15769fd8f72f | fc66861567473e4491f78290f5fcc034bdfc099b | refs/heads/master | 2023-05-04T07:11:32.712962 | 2021-05-28T10:15:56 | 2021-05-28T10:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,018 | py | import urllib.request
import urllib.error
from bs4 import BeautifulSoup
import ast
import time
import sys
path = sys.argv[1]
def getHtml(url):
try:
fp = urllib.request.urlopen(link)
return fp
except urllib.error.HTTPError as e:
if e.getcode() == 429:
time.sleep(5)
return getHtml(url)
for line in open(path, "r").readlines():
hsh = line.strip().split(" ")[1]
link = "https://github.com/search?q={}&type=commits".format(hsh)
fp = getHtml(link)
mybytes = fp.read()
mystr = mybytes.decode("utf8")
fp.close()
soup = BeautifulSoup(mystr, features="html.parser")
for hyper in soup.find_all("a", {"class": "message markdown-title js-navigation-open"}):
for attrib in hyper["data-hydro-click"].split(","):
tokens = attrib.split(":")
if tokens[0] == "\"url\"":
print(":".join(tokens[1:]).replace("\"","").replace("}",""))
break
time.sleep(2)
| [
"MY_NAME@example.com"
] | MY_NAME@example.com |
acb88a6a631c84abe713ab8ff108d14fd1fcadc7 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /i6hY9JSjQK4jcaB6i_15.py | 95a6029e2c33323357c374c9d6f328a290d2d2d5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py |
def color_invert(rgb):
r, g, b = rgb
return (abs(255-r), abs(255-g), abs(255-b))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7b9f9b5bfad5c0d7ee7eef3d0d08b5cde41e18c4 | ae7d5d11351af9201ce6181c48b8c60363c7ed00 | /lib/galaxy/model/migrate/versions/0157_rework_dataset_validation.py | f315961a724527975006d0548c3d77274c30a0dd | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | natefoo/galaxy | 818037d03f39ccfb3714c7e784fd64d7ad8f4d2e | 64150c5bd803e75ed032e9f15acd003bae92b5ef | refs/heads/master | 2023-08-17T02:57:02.580487 | 2020-03-26T13:33:01 | 2020-03-26T13:33:01 | 31,212,836 | 2 | 1 | NOASSERTION | 2019-04-25T12:30:28 | 2015-02-23T15:01:46 | Python | UTF-8 | Python | false | false | 2,218 | py | """
Rework dataset validation in database.
"""
from __future__ import print_function
import logging
from sqlalchemy import (
Column,
ForeignKey,
Integer,
MetaData,
Table,
TEXT,
)
from galaxy.model.custom_types import TrimmedString
from galaxy.model.migrate.versions.util import add_column, create_table, drop_column, drop_table
log = logging.getLogger(__name__)
metadata = MetaData()
validation_error_table = Table("validation_error", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("message", TrimmedString(255)),
Column("err_type", TrimmedString(64)),
Column("attributes", TEXT))
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
drop_table(validation_error_table)
history_dataset_association_table = Table("history_dataset_association", metadata, autoload=True)
library_dataset_dataset_association_table = Table("library_dataset_dataset_association", metadata, autoload=True)
for dataset_instance_table in [history_dataset_association_table, library_dataset_dataset_association_table]:
validated_state_column = Column('validated_state', TrimmedString(64), default='unknown', server_default="unknown", nullable=False)
add_column(validated_state_column, dataset_instance_table, metadata)
validated_state_message_column = Column('validated_state_message', TEXT)
add_column(validated_state_message_column, dataset_instance_table, metadata)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
create_table(validation_error_table)
history_dataset_association_table = Table("history_dataset_association", metadata, autoload=True)
library_dataset_dataset_association_table = Table("library_dataset_dataset_association", metadata, autoload=True)
for dataset_instance_table in [history_dataset_association_table, library_dataset_dataset_association_table]:
drop_column('validated_state', dataset_instance_table, metadata)
drop_column('validated_state_message', dataset_instance_table, metadata)
| [
"jmchilton@gmail.com"
] | jmchilton@gmail.com |
c54c7a0ee2d6eb61dd7d6c8c135cf70421843605 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Simulation/RunDependentSim/RunDependentSimData/share/configLumi_run310000.py | 6ef4ed7b55e96f42b38eab14ecb9ee0e83fb6e69 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,045 | py | ####################
## File configLumi_run310000.py: autogenerated configuration file from command
##../athena/Simulation/RunDependentSim/RunDependentSimComps/share/RunDepProfileGenerator.py -r 310000 -s 1550000000 -c 2000 -o configLumi_run310000.py -e {0.5:1,1.5:1,2.5:1,3.5:1,4.5:1,5.5:1,6.5:1,7.5:1,8.5:1,9.5:1,10.5:1,11.5:2,12.5:2,13.5:4,14.5:6,15.5:8,16.5:12,17.5:15,18.5:20,19.5:24,20.5:29,21.5:34,22.5:38,23.5:41,24.5:45,25.5:47,26.5:49,27.5:51,28.5:53,29.5:55,30.5:56,31.5:57,32.5:58,33.5:59,34.5:60,35.5:60,36.5:60,37.5:60,38.5:61,39.5:61,40.5:61,41.5:61,42.5:60,43.5:59,44.5:57,45.5:55,46.5:53,47.5:50,48.5:47,49.5:43,50.5:40,51.5:36,52.5:32,53.5:29,54.5:25,55.5:22,56.5:18,57.5:16,58.5:13,59.5:11,60.5:9,61.5:7,62.5:6,63.5:5,64.5:4,65.5:3,66.5:3,67.5:2,68.5:2,69.5:2,70.5:2,71.5:1,72.5:1,73.5:1,74.5:1,75.5:1,76.5:1,77.5:1,78.5:1,79.5:1,80.5:1,81.5:1,82.5:1,83.5:1,84.5:1,85.5:1,86.5:1,87.5:1,88.5:1,89.5:1,90.5:1,91.5:1,92.5:1,93.5:1,94.5:1,95.5:1,96.5:1,97.5:1,98.5:1,99.5:1}
## Created on Thu Jul 12 17:10:50 2018
####################
#Run-dependent digi job configuration file.
#RunDependentSimData/OverrideRunLBLumiDigitConfig.py
#We need to be able to adjust for different dataset sizes.
if not 'ScaleTaskLength' in dir(): ScaleTaskLength = 1
_evts = lambda x: int(ScaleTaskLength * x)
if not 'logging' in dir(): import logging
digilog = logging.getLogger('Digi_trf')
digilog.info('doing RunLumiOverride configuration from file.')
JobMaker=[
{'run':310000, 'lb':1, 'starttstamp':1550000000, 'dt':0.000, 'evts':_evts(1), 'mu':0.500, 'force_new':False},
{'run':310000, 'lb':2, 'starttstamp':1550000060, 'dt':0.000, 'evts':_evts(1), 'mu':1.500, 'force_new':False},
{'run':310000, 'lb':3, 'starttstamp':1550000120, 'dt':0.000, 'evts':_evts(1), 'mu':2.500, 'force_new':False},
{'run':310000, 'lb':4, 'starttstamp':1550000180, 'dt':0.000, 'evts':_evts(1), 'mu':3.500, 'force_new':False},
{'run':310000, 'lb':5, 'starttstamp':1550000240, 'dt':0.000, 'evts':_evts(1), 'mu':4.500, 'force_new':False},
{'run':310000, 'lb':6, 'starttstamp':1550000300, 'dt':0.000, 'evts':_evts(1), 'mu':5.500, 'force_new':False},
{'run':310000, 'lb':7, 'starttstamp':1550000360, 'dt':0.000, 'evts':_evts(1), 'mu':6.500, 'force_new':False},
{'run':310000, 'lb':8, 'starttstamp':1550000420, 'dt':0.000, 'evts':_evts(1), 'mu':7.500, 'force_new':False},
{'run':310000, 'lb':9, 'starttstamp':1550000480, 'dt':0.000, 'evts':_evts(1), 'mu':8.500, 'force_new':False},
{'run':310000, 'lb':10, 'starttstamp':1550000540, 'dt':0.000, 'evts':_evts(1), 'mu':9.500, 'force_new':False},
{'run':310000, 'lb':11, 'starttstamp':1550000600, 'dt':0.000, 'evts':_evts(1), 'mu':10.500, 'force_new':False},
{'run':310000, 'lb':12, 'starttstamp':1550000660, 'dt':0.000, 'evts':_evts(2), 'mu':11.500, 'force_new':False},
{'run':310000, 'lb':13, 'starttstamp':1550000720, 'dt':0.000, 'evts':_evts(2), 'mu':12.500, 'force_new':False},
{'run':310000, 'lb':14, 'starttstamp':1550000780, 'dt':0.000, 'evts':_evts(4), 'mu':13.500, 'force_new':False},
{'run':310000, 'lb':15, 'starttstamp':1550000840, 'dt':0.000, 'evts':_evts(6), 'mu':14.500, 'force_new':False},
{'run':310000, 'lb':16, 'starttstamp':1550000900, 'dt':0.000, 'evts':_evts(8), 'mu':15.500, 'force_new':False},
{'run':310000, 'lb':17, 'starttstamp':1550000960, 'dt':0.000, 'evts':_evts(12), 'mu':16.500, 'force_new':False},
{'run':310000, 'lb':18, 'starttstamp':1550001020, 'dt':0.000, 'evts':_evts(15), 'mu':17.500, 'force_new':False},
{'run':310000, 'lb':19, 'starttstamp':1550001080, 'dt':0.000, 'evts':_evts(20), 'mu':18.500, 'force_new':False},
{'run':310000, 'lb':20, 'starttstamp':1550001140, 'dt':0.000, 'evts':_evts(24), 'mu':19.500, 'force_new':False},
{'run':310000, 'lb':21, 'starttstamp':1550001200, 'dt':0.000, 'evts':_evts(29), 'mu':20.500, 'force_new':False},
{'run':310000, 'lb':22, 'starttstamp':1550001260, 'dt':0.000, 'evts':_evts(34), 'mu':21.500, 'force_new':False},
{'run':310000, 'lb':23, 'starttstamp':1550001320, 'dt':0.000, 'evts':_evts(38), 'mu':22.500, 'force_new':False},
{'run':310000, 'lb':24, 'starttstamp':1550001380, 'dt':0.000, 'evts':_evts(41), 'mu':23.500, 'force_new':False},
{'run':310000, 'lb':25, 'starttstamp':1550001440, 'dt':0.000, 'evts':_evts(45), 'mu':24.500, 'force_new':False},
{'run':310000, 'lb':26, 'starttstamp':1550001500, 'dt':0.000, 'evts':_evts(47), 'mu':25.500, 'force_new':False},
{'run':310000, 'lb':27, 'starttstamp':1550001560, 'dt':0.000, 'evts':_evts(49), 'mu':26.500, 'force_new':False},
{'run':310000, 'lb':28, 'starttstamp':1550001620, 'dt':0.000, 'evts':_evts(51), 'mu':27.500, 'force_new':False},
{'run':310000, 'lb':29, 'starttstamp':1550001680, 'dt':0.000, 'evts':_evts(53), 'mu':28.500, 'force_new':False},
{'run':310000, 'lb':30, 'starttstamp':1550001740, 'dt':0.000, 'evts':_evts(55), 'mu':29.500, 'force_new':False},
{'run':310000, 'lb':31, 'starttstamp':1550001800, 'dt':0.000, 'evts':_evts(56), 'mu':30.500, 'force_new':False},
{'run':310000, 'lb':32, 'starttstamp':1550001860, 'dt':0.000, 'evts':_evts(57), 'mu':31.500, 'force_new':False},
{'run':310000, 'lb':33, 'starttstamp':1550001920, 'dt':0.000, 'evts':_evts(58), 'mu':32.500, 'force_new':False},
{'run':310000, 'lb':34, 'starttstamp':1550001980, 'dt':0.000, 'evts':_evts(59), 'mu':33.500, 'force_new':False},
{'run':310000, 'lb':35, 'starttstamp':1550002040, 'dt':0.000, 'evts':_evts(60), 'mu':34.500, 'force_new':False},
{'run':310000, 'lb':36, 'starttstamp':1550002100, 'dt':0.000, 'evts':_evts(60), 'mu':35.500, 'force_new':False},
{'run':310000, 'lb':37, 'starttstamp':1550002160, 'dt':0.000, 'evts':_evts(60), 'mu':36.500, 'force_new':False},
{'run':310000, 'lb':38, 'starttstamp':1550002220, 'dt':0.000, 'evts':_evts(60), 'mu':37.500, 'force_new':False},
{'run':310000, 'lb':39, 'starttstamp':1550002280, 'dt':0.000, 'evts':_evts(61), 'mu':38.500, 'force_new':False},
{'run':310000, 'lb':40, 'starttstamp':1550002340, 'dt':0.000, 'evts':_evts(61), 'mu':39.500, 'force_new':False},
{'run':310000, 'lb':41, 'starttstamp':1550002400, 'dt':0.000, 'evts':_evts(61), 'mu':40.500, 'force_new':False},
{'run':310000, 'lb':42, 'starttstamp':1550002460, 'dt':0.000, 'evts':_evts(61), 'mu':41.500, 'force_new':False},
{'run':310000, 'lb':43, 'starttstamp':1550002520, 'dt':0.000, 'evts':_evts(60), 'mu':42.500, 'force_new':False},
{'run':310000, 'lb':44, 'starttstamp':1550002580, 'dt':0.000, 'evts':_evts(59), 'mu':43.500, 'force_new':False},
{'run':310000, 'lb':45, 'starttstamp':1550002640, 'dt':0.000, 'evts':_evts(57), 'mu':44.500, 'force_new':False},
{'run':310000, 'lb':46, 'starttstamp':1550002700, 'dt':0.000, 'evts':_evts(55), 'mu':45.500, 'force_new':False},
{'run':310000, 'lb':47, 'starttstamp':1550002760, 'dt':0.000, 'evts':_evts(53), 'mu':46.500, 'force_new':False},
{'run':310000, 'lb':48, 'starttstamp':1550002820, 'dt':0.000, 'evts':_evts(50), 'mu':47.500, 'force_new':False},
{'run':310000, 'lb':49, 'starttstamp':1550002880, 'dt':0.000, 'evts':_evts(47), 'mu':48.500, 'force_new':False},
{'run':310000, 'lb':50, 'starttstamp':1550002940, 'dt':0.000, 'evts':_evts(43), 'mu':49.500, 'force_new':False},
{'run':310000, 'lb':51, 'starttstamp':1550003000, 'dt':0.000, 'evts':_evts(40), 'mu':50.500, 'force_new':False},
{'run':310000, 'lb':52, 'starttstamp':1550003060, 'dt':0.000, 'evts':_evts(36), 'mu':51.500, 'force_new':False},
{'run':310000, 'lb':53, 'starttstamp':1550003120, 'dt':0.000, 'evts':_evts(32), 'mu':52.500, 'force_new':False},
{'run':310000, 'lb':54, 'starttstamp':1550003180, 'dt':0.000, 'evts':_evts(29), 'mu':53.500, 'force_new':False},
{'run':310000, 'lb':55, 'starttstamp':1550003240, 'dt':0.000, 'evts':_evts(25), 'mu':54.500, 'force_new':False},
{'run':310000, 'lb':56, 'starttstamp':1550003300, 'dt':0.000, 'evts':_evts(22), 'mu':55.500, 'force_new':False},
{'run':310000, 'lb':57, 'starttstamp':1550003360, 'dt':0.000, 'evts':_evts(18), 'mu':56.500, 'force_new':False},
{'run':310000, 'lb':58, 'starttstamp':1550003420, 'dt':0.000, 'evts':_evts(16), 'mu':57.500, 'force_new':False},
{'run':310000, 'lb':59, 'starttstamp':1550003480, 'dt':0.000, 'evts':_evts(13), 'mu':58.500, 'force_new':False},
{'run':310000, 'lb':60, 'starttstamp':1550003540, 'dt':0.000, 'evts':_evts(11), 'mu':59.500, 'force_new':False},
{'run':310000, 'lb':61, 'starttstamp':1550003600, 'dt':0.000, 'evts':_evts(9), 'mu':60.500, 'force_new':False},
{'run':310000, 'lb':62, 'starttstamp':1550003660, 'dt':0.000, 'evts':_evts(7), 'mu':61.500, 'force_new':False},
{'run':310000, 'lb':63, 'starttstamp':1550003720, 'dt':0.000, 'evts':_evts(6), 'mu':62.500, 'force_new':False},
{'run':310000, 'lb':64, 'starttstamp':1550003780, 'dt':0.000, 'evts':_evts(5), 'mu':63.500, 'force_new':False},
{'run':310000, 'lb':65, 'starttstamp':1550003840, 'dt':0.000, 'evts':_evts(4), 'mu':64.500, 'force_new':False},
{'run':310000, 'lb':66, 'starttstamp':1550003900, 'dt':0.000, 'evts':_evts(3), 'mu':65.500, 'force_new':False},
{'run':310000, 'lb':67, 'starttstamp':1550003960, 'dt':0.000, 'evts':_evts(3), 'mu':66.500, 'force_new':False},
{'run':310000, 'lb':68, 'starttstamp':1550004020, 'dt':0.000, 'evts':_evts(2), 'mu':67.500, 'force_new':False},
{'run':310000, 'lb':69, 'starttstamp':1550004080, 'dt':0.000, 'evts':_evts(2), 'mu':68.500, 'force_new':False},
{'run':310000, 'lb':70, 'starttstamp':1550004140, 'dt':0.000, 'evts':_evts(2), 'mu':69.500, 'force_new':False},
{'run':310000, 'lb':71, 'starttstamp':1550004200, 'dt':0.000, 'evts':_evts(2), 'mu':70.500, 'force_new':False},
{'run':310000, 'lb':72, 'starttstamp':1550004260, 'dt':0.000, 'evts':_evts(1), 'mu':71.500, 'force_new':False},
{'run':310000, 'lb':73, 'starttstamp':1550004320, 'dt':0.000, 'evts':_evts(1), 'mu':72.500, 'force_new':False},
{'run':310000, 'lb':74, 'starttstamp':1550004380, 'dt':0.000, 'evts':_evts(1), 'mu':73.500, 'force_new':False},
{'run':310000, 'lb':75, 'starttstamp':1550004440, 'dt':0.000, 'evts':_evts(1), 'mu':74.500, 'force_new':False},
{'run':310000, 'lb':76, 'starttstamp':1550004500, 'dt':0.000, 'evts':_evts(1), 'mu':75.500, 'force_new':False},
{'run':310000, 'lb':77, 'starttstamp':1550004560, 'dt':0.000, 'evts':_evts(1), 'mu':76.500, 'force_new':False},
{'run':310000, 'lb':78, 'starttstamp':1550004620, 'dt':0.000, 'evts':_evts(1), 'mu':77.500, 'force_new':False},
{'run':310000, 'lb':79, 'starttstamp':1550004680, 'dt':0.000, 'evts':_evts(1), 'mu':78.500, 'force_new':False},
{'run':310000, 'lb':80, 'starttstamp':1550004740, 'dt':0.000, 'evts':_evts(1), 'mu':79.500, 'force_new':False},
{'run':310000, 'lb':81, 'starttstamp':1550004800, 'dt':0.000, 'evts':_evts(1), 'mu':80.500, 'force_new':False},
{'run':310000, 'lb':82, 'starttstamp':1550004860, 'dt':0.000, 'evts':_evts(1), 'mu':81.500, 'force_new':False},
{'run':310000, 'lb':83, 'starttstamp':1550004920, 'dt':0.000, 'evts':_evts(1), 'mu':82.500, 'force_new':False},
{'run':310000, 'lb':84, 'starttstamp':1550004980, 'dt':0.000, 'evts':_evts(1), 'mu':83.500, 'force_new':False},
{'run':310000, 'lb':85, 'starttstamp':1550005040, 'dt':0.000, 'evts':_evts(1), 'mu':84.500, 'force_new':False},
{'run':310000, 'lb':86, 'starttstamp':1550005100, 'dt':0.000, 'evts':_evts(1), 'mu':85.500, 'force_new':False},
{'run':310000, 'lb':87, 'starttstamp':1550005160, 'dt':0.000, 'evts':_evts(1), 'mu':86.500, 'force_new':False},
{'run':310000, 'lb':88, 'starttstamp':1550005220, 'dt':0.000, 'evts':_evts(1), 'mu':87.500, 'force_new':False},
{'run':310000, 'lb':89, 'starttstamp':1550005280, 'dt':0.000, 'evts':_evts(1), 'mu':88.500, 'force_new':False},
{'run':310000, 'lb':90, 'starttstamp':1550005340, 'dt':0.000, 'evts':_evts(1), 'mu':89.500, 'force_new':False},
{'run':310000, 'lb':91, 'starttstamp':1550005400, 'dt':0.000, 'evts':_evts(1), 'mu':90.500, 'force_new':False},
{'run':310000, 'lb':92, 'starttstamp':1550005460, 'dt':0.000, 'evts':_evts(1), 'mu':91.500, 'force_new':False},
{'run':310000, 'lb':93, 'starttstamp':1550005520, 'dt':0.000, 'evts':_evts(1), 'mu':92.500, 'force_new':False},
{'run':310000, 'lb':94, 'starttstamp':1550005580, 'dt':0.000, 'evts':_evts(1), 'mu':93.500, 'force_new':False},
{'run':310000, 'lb':95, 'starttstamp':1550005640, 'dt':0.000, 'evts':_evts(1), 'mu':94.500, 'force_new':False},
{'run':310000, 'lb':96, 'starttstamp':1550005700, 'dt':0.000, 'evts':_evts(1), 'mu':95.500, 'force_new':False},
{'run':310000, 'lb':97, 'starttstamp':1550005760, 'dt':0.000, 'evts':_evts(1), 'mu':96.500, 'force_new':False},
{'run':310000, 'lb':98, 'starttstamp':1550005820, 'dt':0.000, 'evts':_evts(1), 'mu':97.500, 'force_new':False},
{'run':310000, 'lb':99, 'starttstamp':1550005880, 'dt':0.000, 'evts':_evts(1), 'mu':98.500, 'force_new':False},
{'run':310000, 'lb':100, 'starttstamp':1550005940, 'dt':0.000, 'evts':_evts(1), 'mu':99.500, 'force_new':False},
#--> end hiding
]
include('RunDependentSimData/configCommon.py')
#cleanup python memory
if not "RunDMC_testing_configuration" in dir():
del JobMaker
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
4caba39636403bb61752fffbb21005e82c319669 | 78b44dff4ca19aba4815a383f0e715a7ce178703 | /src/sort/leetcode242_ValidAnagram.py | 868c94ea3964fe2a13d4813167b243696b106043 | [] | no_license | apepkuss/Cracking-Leetcode-in-Python | 80e5e9fd407441db77652fc480f523d3636281c1 | cbe6a7e7f05eccb4f9c5fce8651c0d87e5168516 | refs/heads/master | 2021-09-03T00:11:05.434202 | 2018-01-04T07:38:02 | 2018-01-04T07:38:02 | 85,363,605 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py |
class Solution(object):
"""
@ Amazon, Uber, Yelp
Hash Table
Given two strings s and t, write a function to determine if t is an anagram of s.
For example,
s = "anagram", t = "nagaram", return true.
s = "rat", t = "car", return false.
Note:
You may assume the string contains only lowercase alphabets.
Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
"""
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
m, n = len(s), len(t)
# check if their length are same
if m != n: return False
# use hashtable to compute the number of each character in s and t
s_table = {}
t_table = {}
for i in range(len(s)):
if s[i] not in s_table:
s_table[s[i]] = 1
else:
s_table[s[i]] += 1
if t[i] not in t_table:
t_table[t[i]] = 1
else:
t_table[t[i]] += 1
# check if s and t have same number of characters
for k, v in s_table.items():
if k not in t_table or v != t_table[k]:
return False
return True
if __name__ == "__main__":
s = "a"
t = "a"
res = Solution().isAnagram(s, t)
print res | [
"xin.sam.liu@hotmail.com"
] | xin.sam.liu@hotmail.com |
6a2040a23f3477f3b8496aa64df9e566e6215326 | ba72d59f5291973c987e86a2755c28bf4ae854b5 | /pbge/rpgmenu.py | e886d26bf60c98b8f765b649d4f7d8596a9d0263 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | kaol/gearhead-caramel | 62ee9df79abf24422a80c39ac5b90bbc8051a77e | a286be2f1160930d7f394f52add289edf18a4066 | refs/heads/master | 2021-01-25T14:16:56.464327 | 2018-02-28T16:10:50 | 2018-02-28T16:10:50 | 123,679,617 | 0 | 0 | null | 2018-03-03T09:55:31 | 2018-03-03T09:55:30 | null | UTF-8 | Python | false | false | 10,730 | py | import pygame
import glob
import util
from frects import Frect,ANCHOR_CENTER,ANCHOR_UPPERLEFT
from . import default_border,render_text,wait_event,TIMEREVENT,my_state
class MenuItem( object ):
def __init__(self,msg,value,desc=None):
self.msg = msg
self.value = value
self.desc = desc
def __lt__(self,other):
""" Comparison of menu items done by msg string """
return( self.msg < other.msg )
# The DescBox is the default MenuDesc. It takes a string stored in the menu
# item and displays it. However, it is not the only menu description possible!
# Any object with a render_desc(menu_item) method will work.
# Also note that the desc associated with each menu item doesn't need to be
# a string- it all depends on the needs of the descobj you're using.
class DescBox( Frect ):
# The DescBox inherits from Frect, since that's basically what it is.
def __init__(self,menu,dx,dy,w=300,h=100,anchor=ANCHOR_CENTER,border=default_border,justify=-1,font=None):
self.menu = menu
self.border = border
self.justify = justify
if not anchor:
anchor = menu.anchor
self.font = font or my_state.small_font
super(DescBox, self).__init__(dx,dy,w,h,anchor)
def render_desc(self,menu_item):
mydest = self.get_rect()
if self.border:
self.border.render( my_state.screen , mydest )
if menu_item and menu_item.desc:
img = render_text( self.font, menu_item.desc, self.w, justify = self.justify )
my_state.screen.blit( img , mydest )
class Menu( Frect ):
def __init__(self,dx,dy,w=300,h=100,anchor=ANCHOR_CENTER,menuitem=(150,145,130),menuselect=(128,250,230),border=default_border,predraw=None,font=None):
super(Menu, self).__init__(dx,dy,w,h,anchor)
self.menuitem = menuitem
self.menuselect = menuselect
self.border = border
self.font = font or my_state.small_font
self.items = []
self.top_item = 0
self.selected_item = 0
self.can_cancel = True
self.descobj = None
self.quick_keys = {}
# predraw is a function that
# redraws/clears the screen before the menu is rendered.
self.predraw = predraw
def add_item(self,msg,value,desc=None):
item = MenuItem( msg , value , desc )
self.items.append( item )
def add_descbox(self,x,y,w=30,h=10,justify=-1):
self.descobj = DescBox( self, x , y , w , h, self.border, justify )
def render(self,do_extras=True):
mydest = self.get_rect()
if do_extras:
if self.predraw:
self.predraw()
else:
my_state.view()
my_state.render_widgets()
if self.border:
self.border.render( mydest )
my_state.screen.set_clip(mydest)
item_num = self.top_item
y = mydest.top
while y < mydest.bottom:
if item_num < len( self.items ):
# The color of this item depends on whether or not it's the selected one.
if ( item_num == self.selected_item ) and do_extras:
color = self.menuselect
else:
color = self.menuitem
img = self.font.render(self.items[item_num].msg, True, color )
my_state.screen.blit( img , ( mydest.left , y ) )
y += self.font.get_linesize()
else:
break
item_num += 1
my_state.screen.set_clip(None)
if self.descobj != None:
self.descobj.render_desc(self.get_current_item())
def get_mouseover_item( self , pos ):
# Return the menu item under this mouse position.
mydest = self.get_rect()
x,y = pos
if mydest.collidepoint( pos ):
the_item = ( y - mydest.top ) // self.font.get_linesize() + self.top_item
if the_item >= len( self.items ):
the_item = None
return the_item
else:
return None
def query(self):
# A return of False means selection was cancelled.
if not self.items :
return False
elif self.selected_item >= len( self.items ):
self.selected_item = 0
no_choice_made = True
choice = False
# Disable widgets while menuing.
push_widget_state = my_state.widgets_active
my_state.widgets_active = False
menu_height = self.menu_height()
mouse_button_down = False
first_mouse_selection = None
first_mouse_y = 0
current_mouse_selection = None
while no_choice_made:
pc_input = wait_event()
if pc_input.type == TIMEREVENT:
# Redraw the menu on each timer event.
self.render()
pygame.display.flip()
# Also deal with mouse stuff then...
if mouse_button_down:
pos = pygame.mouse.get_pos()
dy = pos[1] - first_mouse_y
if dy > 10 and self.top_item > 0:
self.top_item += -1
first_mouse_selection = None
elif dy < -10 and self.top_item < len( self.items ) - menu_height:
self.top_item += 1
first_mouse_selection = None
current_mouse_selection = self.get_mouseover_item( pos )
if current_mouse_selection != None:
self.selected_item = current_mouse_selection
elif pc_input.type == pygame.KEYDOWN:
# A key was pressed, oh happy day! See what key it was and act
# accordingly.
if pc_input.key == pygame.K_UP:
self.selected_item -= 1
if self.selected_item < 0:
self.selected_item = len( self.items ) - 1
if ( self.selected_item < self.top_item ) or ( self.selected_item >= self.top_item + menu_height ):
self.top_item = self.selected_item
elif pc_input.key == pygame.K_DOWN:
self.selected_item += 1
if self.selected_item >= len( self.items ):
self.selected_item = 0
if ( self.selected_item < self.top_item ) or ( self.selected_item >= self.top_item + menu_height ):
self.top_item = self.selected_item
elif pc_input.key == pygame.K_SPACE or pc_input.key == pygame.K_RETURN:
choice = self.items[ self.selected_item ].value
no_choice_made = False
elif ( pc_input.key == pygame.K_ESCAPE or pc_input.key == pygame.K_BACKSPACE ) and self.can_cancel:
no_choice_made = False
elif pc_input.key >= 0 and pc_input.key < 256 and chr( pc_input.key ) in self.quick_keys:
choice = self.quick_keys[ chr(pc_input.key) ]
no_choice_made = False
elif pc_input.key > 255 and pc_input.key in self.quick_keys:
choice = self.quick_keys[ pc_input.key ]
no_choice_made = False
elif pc_input.type == pygame.MOUSEBUTTONDOWN and not mouse_button_down:
# Mouse down does nothing but set the first mouse selection, and a
# counter telling that the button is down.
first_mouse_selection = self.get_mouseover_item( pc_input.pos )
first_mouse_y = pc_input.pos[1]
if first_mouse_selection != None:
self.selected_item = first_mouse_selection
mouse_button_down = True
elif pc_input.type == pygame.MOUSEBUTTONUP:
# Mouse button up makes a selection, as long as your finger is still
# on the first item selected.
mouse_button_down = False
current_mouse_selection = self.get_mouseover_item( pc_input.pos )
if current_mouse_selection == first_mouse_selection and first_mouse_selection != None:
self.selected_item = current_mouse_selection
choice = self.items[ current_mouse_selection ].value
no_choice_made = False
elif pc_input.type == pygame.QUIT:
no_choice_made = False
# Restore the widgets.
my_state.widgets_active = push_widget_state
return( choice )
def sort(self):
self.items.sort()
alpha_key_sequence = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
def add_alpha_keys(self):
# Adds a quick key for every item currently in the menu.
key_num = 0
for item in self.items:
item.msg = self.alpha_key_sequence[ key_num ] + ') ' + item.msg
self.quick_keys[ self.alpha_key_sequence[ key_num ] ] = item.value
key_num += 1
if key_num >= len( self.alpha_key_sequence ):
break
def add_files( self , filepat ):
file_list = glob.glob( filepat )
for f in file_list:
self.add_item( f , f )
self.sort()
def menu_height( self ):
return self.h // self.font.get_linesize()
def reposition( self ):
if self.selected_item < self.top_item:
self.top_item = self.selected_item
elif self.selected_item > ( self.top_item + self.menu_height() - 1 ):
self.top_item = max( self.selected_item - self.menu_height() + 1 , 0 )
def set_item_by_value( self , v ):
for n,i in enumerate( self.items ):
if i.value == v:
self.selected_item = n
self.reposition()
def set_item_by_position( self , n ):
if n < len( self.items ):
self.selected_item = n
self.reposition()
def get_current_item( self ):
if self.selected_item < len( self.items ):
return self.items[self.selected_item]
class PopUpMenu( Menu ):
"""Creates a small menu at the current mouse position."""
WIDTH = 200
HEIGHT = 250
def __init__( self, predraw=None, border=default_border ):
x,y = pygame.mouse.get_pos()
x += 8
y += 8
sw,sh = my_state.screen.get_size()
if x + self.WIDTH + 32 > sw:
x += -self.WIDTH - 32
if y + self.HEIGHT + 32 > sh:
y += -self.HEIGHT - 32
super(PopUpMenu, self).__init__(x,y,self.WIDTH,self.HEIGHT,ANCHOR_UPPERLEFT, border=border, predraw=predraw)
| [
"pyrrho12@yahoo.ca"
] | pyrrho12@yahoo.ca |
8bbefd6a56845709609dab1c37ecd62b46095ef9 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/OutputFilters/RoyalRoadL/RRLSeriesPageFilter.py | 17e6fc2891eda98e353fcbb78ed381fc9f58b24f | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 8,493 | py |
import bs4
import re
import calendar
import datetime
import time
import json
import os.path
import parsedatetime
import bleach
import WebRequest
import common.util.urlFuncs
import WebMirror.OutputFilters.FilterBase
import WebMirror.OutputFilters.util.TitleParsers as titleParsers
import WebMirror.OutputFilters.util.MessageConstructors as msgpackers
from .. import SeriesPageCommon
class RRLSeriesPageFilter(WebMirror.OutputFilters.FilterBase.FilterBase):
wanted_mimetypes = [
'text/html',
]
want_priority = 55
loggerPath = "Main.Filter.RoyalRoad.Page"
match_re = re.compile(r"^https?://(?:www\.)?royalroadl?\.com/fiction/(\d+)(?:/?$|/[a-zA-Z0-9\-]+/?$)", flags=re.IGNORECASE)
@classmethod
def wantsUrl(cls, url):
if cls.match_re.search(url):
print("RRLSeriesPageFilter Wants url: '%s'" % url)
return True
# else:
# print("RRLSeriesPageFilter doesn't want url: '%s'" % url)
return False
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pageUrl = kwargs['pageUrl']
self.content = kwargs['pgContent']
self.type = kwargs['type']
self.log.info("Processing RoyalRoadL Item")
super().__init__(**kwargs)
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
def extractSeriesReleases(self, seriesPageUrl, soup):
match = self.match_re.search(seriesPageUrl)
series_id = match.group(1)
header = soup.find("div", class_='fic-title')
if not header:
self.log.warning("Series page %s contains no releases. Is this series removed?", seriesPageUrl)
return []
titletg = header.find("h1")
authortg = header.find("h4")
authortg.find("span").decompose()
rating_val = soup.find("meta", property='books:rating:value')
rating_scale = soup.find("meta", property='books:rating:scale')
if not rating_val or not rating_scale:
return []
rval_f = float(rating_val.get('content', "0"))
rscale_f = float(rating_scale.get('content', "999999"))
rating = 5 * (rval_f / rscale_f)
if rating < SeriesPageCommon.MIN_RATING_STARS:
self.log.error("Item rating below upload threshold: %s", rating)
return []
if not titletg:
self.log.error("Could not find title tag!")
return []
if not authortg:
self.log.error("Could not find author tag!")
return []
title = titletg.get_text().strip()
author = authortg.get_text().strip()
title = bleach.clean(title, tags=[], attributes=[], styles=[], strip=True, strip_comments=True)
author = bleach.clean(author, tags=[], attributes=[], styles=[], strip=True, strip_comments=True)
descDiv = soup.find('div', class_='description')
if not descDiv or not descDiv.div:
self.log.error("Incomplete or broken description?")
return []
desc = []
for segment in descDiv.div:
if isinstance(segment, bs4.NavigableString):
desc.append(str(segment).strip())
else:
if segment.get_text().strip():
desc.append(segment.get_text().strip())
desc = ['<p>{}</p>'.format(line) for line in desc if line.strip()]
tags = []
tagdiv = soup.find('span', class_='tags')
for tag in tagdiv.find_all('span', class_='label'):
tagtxt = tag.get_text().strip().lower().replace(" ", "-")
tagtxt = SeriesPageCommon.fix_tag(tagtxt)
tags.append(tagtxt)
info_div = soup.find("div", class_='fiction-info')
warning_div = info_div.find("div", class_='font-red-sunglo')
if warning_div:
for warning_tag in warning_div.find_all('li'):
tagtxt = warning_tag.get_text().strip().lower().replace(" ", "-")
tagtxt = SeriesPageCommon.fix_tag(tagtxt)
tags.append(tagtxt)
seriesmeta = {}
seriesmeta['title'] = msgpackers.fix_string(title)
seriesmeta['author'] = msgpackers.fix_string(author)
seriesmeta['tags'] = tags
seriesmeta['homepage'] = seriesPageUrl
seriesmeta['desc'] = "\r\n".join(desc)
seriesmeta['tl_type'] = 'oel'
seriesmeta['sourcesite'] = 'RoyalRoadL'
seriesmeta['create_tags'] = True
meta_pkt = msgpackers.createSeriesInfoPacket(seriesmeta, matchAuthor=True)
extra = {}
extra['tags'] = tags
extra['homepage'] = seriesPageUrl
extra['sourcesite'] = 'RoyalRoadL'
chapters = soup.find_all("tr", attrs={"data-url" : True})
raw_retval = []
for chapter in chapters:
if len(chapter.find_all("td")) != 2:
self.log.warning("Row with invalid number of entries?")
continue
cname, cdate = chapter.find_all("td")
if not cdate.time:
self.log.error("No time entry?")
continue
timestr = cdate.time.get("title").strip()
itemDate, status = parsedatetime.Calendar().parse(timestr)
if status < 1:
self.log.warning("Failure processing date: %s", timestr)
continue
reldate = time.mktime(itemDate)
relurl = common.util.urlFuncs.rebaseUrl(cname.a['href'], seriesPageUrl)
chp_title = cname.get_text().strip()
vol, chp, frag, _ = titleParsers.extractTitle(chp_title + " " + title)
raw_item = {}
raw_item['srcname'] = "RoyalRoadL"
raw_item['published'] = float(reldate)
raw_item['linkUrl'] = relurl
raw_msg = msgpackers._buildReleaseMessage(
raw_item,
title,
vol,
chp,
frag,
author = author,
postfix = chp_title,
tl_type = 'oel',
extraData = extra,
matchAuthor = True
)
raw_retval.append(raw_msg)
raw_retval = SeriesPageCommon.check_fix_numbering(self.log, raw_retval, series_id, rrl=True)
# Do not add series without 3 chapters.
if len(raw_retval) < 3:
self.log.info("Less then three chapters!")
return []
if not raw_retval:
self.log.info("Retval empty?!")
return []
retval = [msgpackers.createReleasePacket(raw_msg) for raw_msg in raw_retval] + [meta_pkt]
self.put_measurement(
measurement_name = 'chapter_releases',
measurement = len(retval),
fields = {},
extra_tags = {"site" : "RoyalRoadL"},
)
self.log.info("Found %s chapter releases on series page for %s (with rating %s)!", len(retval), msgpackers.fix_string(title), rating)
return retval
def sendReleases(self, releases):
self.log.info("Total releases found on page: %s. Emitting messages into AMQP local queue.", len(releases))
self.amqp_put_many(releases)
def processPage(self, url, content):
# Ignore 404 chapters
if "<title>Not Found | RoyalRoadL</title>" in content:
return
soup = WebRequest.as_soup(self.content)
releases = self.extractSeriesReleases(self.pageUrl, soup)
if releases:
self.sendReleases(releases)
##################################################################################################################################
##################################################################################################################################
##################################################################################################################################
def extractContent(self):
self.processPage(self.pageUrl, self.content)
def test():
print("Test mode!")
import logSetup
import WebMirror.rules
import WebMirror.Engine
import WebMirror.Runner
import multiprocessing
logSetup.initLogging()
crawler = WebMirror.Runner.Crawler()
crawler.start_aggregator()
c_lok = cookie_lock = multiprocessing.Lock()
engine = WebMirror.Engine.SiteArchiver(cookie_lock=c_lok, response_queue=crawler.agg_queue)
engine.dispatchRequest(testJobFromUrl('http://royalroadl.com/fiction/3333'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fiction/2850'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/latest-updates/'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/best-rated/'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/latest-updates/'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/active-top-50/'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/weekly-views-top-50/'))
# engine.dispatchRequest(testJobFromUrl('http://www.royalroadl.com/fictions/newest/'))
crawler.join_aggregator()
if __name__ == "__main__":
test()
| [
"something@fake-url.com"
] | something@fake-url.com |
74d56f3d4e8a30326e6d5c28fe262e9b50446a41 | 64b2026bd0b6f3be8b6e881ec9ddd9ca432e20f6 | /gendiff/formats/__init__.py | b101a79de81280debfef4fe44ae7d4992275692f | [] | no_license | akocur/python-project-lvl2 | e8a973902fbbbbb29a5081f1f76c6d33a13e8996 | 706fe3b9b48679e1cf02763c2459883be4bf028f | refs/heads/main | 2023-07-16T04:16:37.107677 | 2021-09-03T09:44:26 | 2021-09-03T09:44:26 | 396,692,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | from gendiff.formats.plain import plain
from gendiff.formats.stylish import stylish
from gendiff.formats.json import formatted_to_json
FORMAT_PLAIN = 'plain'
FORMAT_JSON = 'json'
FORMAT_STYLISH = 'stylish'
def get_formatter(format_name):
"""
Return formatter by format_name.
:param format_name: str
one of the available formatters.
:return: formatting function
"""
return {
FORMAT_STYLISH: stylish,
FORMAT_PLAIN: plain,
FORMAT_JSON: formatted_to_json,
}.get(format_name)
def get_default_format_name():
"""Return default format name."""
return FORMAT_STYLISH
def get_available_formatters():
"""Return available formatters."""
return [FORMAT_STYLISH, FORMAT_PLAIN, FORMAT_JSON]
| [
"akocur@yandex.ru"
] | akocur@yandex.ru |
5ea90cb07677aae40edaea3b4f2b8ca14d93ff57 | 3834a683bc7f3eb66615fad8c95d2f9400ca825a | /Palinlink.py | c89bde0f7828267cfa4484e449cfee06748108c6 | [] | no_license | balajimanikandanm/python3 | 5d0ae2a0fd2e20426ee9ac5dfc8a26eb0117aa29 | ed151ee91c935dc7ecb2c0e54c4e7b107a32c5e7 | refs/heads/master | 2020-05-31T22:37:44.534017 | 2019-08-14T10:24:00 | 2019-08-14T10:24:00 | 190,523,807 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, data):
self.items.append(data)
def pop(self):
return self.items.pop()
sb = Stack()
text = input()
for character in text:
sb.push(character)
reversed_text = ''
while not sb.is_empty():
reversed_text = reversed_text + sb.pop()
if text == reversed_text:
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | balajimanikandanm.noreply@github.com |
692205fd486af2933f22524eb23723379f93900b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/160/30989/submittedfiles/swamee.py | 26285a5016005edac6a49bc82c98cdb39ca1a975 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('Digite f:'))
L=float(input('Digite L:'))
Q=float(input('Digite Q:'))
DeltaH=float(input('Digite DeltaH:'))
v=float(input('Digite v:'))
g=9.81
E=0.000002
D=( (8*f*L*(Q*Q))/((math.pi*math.pi)*g*DeltaH) )**(1/5)
Rey=(4*Q)/(math.pi*D*v)
K=(0.25)/(math.log*10(E/(3.7*D)+5.74/Rey**0.9)**2
print('%.4f' %D)
print('%.4f' %Rey)
print('%.4f' %K)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
8d6f84d3ad7afe3a91c3e23423f26b79f70a9767 | e953ae5da775a934b86379cfa3d864bb7376fe36 | /08 tkinter_python/17.py | a8791e8dce3ded8dac4247414aba0ab26a08bdac | [] | no_license | agyenes/greenfox-exercises | 1481f17d1ddd78099d17022aa1800955ae39d92b | a2c7912c61708c6ebc53c9a22f8c09550432d4c3 | refs/heads/master | 2020-04-11T00:42:17.842170 | 2016-10-19T06:10:22 | 2016-10-19T06:10:22 | 68,081,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from tkinter import *
import math
root = Tk()
canvas = Canvas(root, width='500', height='500', bg='white')
canvas.pack()
size = 20
for i in range(23):
canvas.create_line(20 + i * size, 420, 250 + i * size/2, 20 + i * math.sqrt(3) * 10, fill='green')
canvas.create_line(250 - i * size/2, 20 + i * math.sqrt(3) * 10, 480 - i * size, 420, fill='blue')
canvas.create_line(480 - i * size/2, 420 - i * math.sqrt(3) * 10, 20 + i * size/2, 420 - i * math.sqrt(3) * 10, fill='red')
canvas.pack()
root.mainloop()
| [
"aron.gyenes@gmail.com"
] | aron.gyenes@gmail.com |
da534bbca9d72e0ec477c30ac4656daf831758e7 | f135ace9411167259588fc5f52dd2f300d4b1551 | /C++/trans.py | 5b3a7ae492a330370ece256ba40069c60ebe7d2e | [] | no_license | celestialized/FPGA_feedforward-neural-network_for_qubit_discrimination | c0cd74704bd6a63d3be2fb22db371c995ebe836f | 1ad7fd0b37a45fe249b28b8b38f25be152b56d45 | refs/heads/master | 2021-09-20T15:56:24.582204 | 2018-08-11T11:26:32 | 2018-08-11T11:26:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import numpy as np
import random
import cPickle as pickle
import matplotlib.pyplot as plt
import argparse
import math
import gzip
f =gzip.open('./DetectionBinsData_pickle615_clean.gzip','rb') #49664*100 times measurement
fb=open('bright.txt','w')
fd=open('dark.txt','w')
num_bins=100
for k in range(10000):
print(k)
d_data=pickle.load(f)[:,1:num_bins+1]
b_data=pickle.load(f)[:,102:102+num_bins]
#print(d_data,b_data)
for i in range(100):
for j in range(num_bins):
fd.write(str(d_data[i][j]))
fb.write(str(b_data[i][j]))
fd.write('\n')
fb.write('\n')
fd.close()
fb.close() | [
"1402434478@qq.com"
] | 1402434478@qq.com |
01c64bfc776426e804788775152425b3336662ac | f62fd455e593a7ad203a5c268e23129473d968b6 | /congress-5.0.0/congress/tests/dse2/test_datasource.py | 6da7d55a88178d7614fc9fbc59a18d219d14d275 | [
"Apache-2.0"
] | permissive | MinbinGong/OpenStack-Ocata | 5d17bcd47a46d48ff9e71e2055f667836174242f | 8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3 | refs/heads/master | 2021-06-23T05:24:37.799927 | 2017-08-14T04:33:05 | 2017-08-14T04:33:05 | 99,709,985 | 0 | 2 | null | 2020-07-22T22:06:22 | 2017-08-08T15:48:44 | Python | UTF-8 | Python | false | false | 8,631 | py | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from congress.db import datasources as datasource_db
from congress.dse2 import dse_node
from congress import exception as congressException
from congress.tests.api import base as api_base
from congress.tests import base
from congress.tests import fake_datasource
class TestDataSource(base.SqlTestCase):
def setUp(self):
super(TestDataSource, self).setUp()
config = api_base.setup_config(with_fake_datasource=False, api=False,
policy=False)
self.dseNode = config['node']
self.ds_manager = config['ds_manager']
def _get_datasource_request(self):
# leave ID out--generated during creation
return {'name': 'aaron',
'driver': 'fake_datasource',
'description': 'hello world!',
'enabled': True,
'type': None,
'config': {'auth_url': 'foo',
'username': 'armax',
'password': 'password',
'tenant_name': 'armax'}}
def test_add_datasource(self):
req = self._get_datasource_request()
result = self.ds_manager.add_datasource(req)
# test equality of return value except for 'id' field
del(result['id'])
self.assertEqual(req, result)
# check that service actually on dseNode
services = self.dseNode.get_services()
self.assertEqual(len(services), 1)
self.assertEqual(services[0].service_id, req['name'])
self.assertIsInstance(services[0],
fake_datasource.FakeDataSource)
obj = self.dseNode.invoke_service_rpc(
req['name'], 'get_status', {'source_id': None, 'params': None})
self.assertIsNotNone(obj)
@mock.patch.object(datasource_db, 'add_datasource')
def test_add_datasource_db_error(self, add_ds):
add_ds.side_effect = db_exc.DBError('Error in db.')
req = self._get_datasource_request()
self.assertRaises(congressException.DatasourceCreationError,
self.ds_manager.add_datasource, req)
@mock.patch.object(dse_node.DseNode, 'register_service')
def test_add_datasource_synchronizer_error(self, register_ds):
register_ds.side_effect = Exception('Error in registering service')
req = self._get_datasource_request()
self.assertRaises(congressException.DatasourceCreationError,
self.ds_manager.add_datasource, req)
ds = datasource_db.get_datasource_by_name(req['name'])
self.assertIsNone(ds)
def test_get_datasource(self):
req = self._get_datasource_request()
ds = self.ds_manager.add_datasource(req)
result = self.dseNode.get_datasource(ds['id'])
# test equality except for 'id' field
del(result['id'])
self.assertEqual(req, result)
def test_get_datasources(self):
req = self._get_datasource_request()
self.ds_manager.add_datasource(req)
result = self.dseNode.get_datasources()
self.assertEqual(len(result), 1)
result = result[0]
# test equality except for 'id' field
del(result['id'])
self.assertEqual(req, result)
def test_get_datasources2(self):
req1 = self._get_datasource_request()
req1['name'] = 'datasource1'
result1 = self.ds_manager.add_datasource(req1)
req2 = self._get_datasource_request()
req2['name'] = 'datasource2'
result2 = self.ds_manager.add_datasource(req2)
# check results of add_datasource
for key, value in req1.items():
self.assertEqual(value, result1[key])
for key, value in req2.items():
self.assertEqual(value, result2[key])
# check services actually on dseNode
services = self.dseNode.get_services()
self.assertEqual(len(services), 2)
self.assertEqual(set([s.service_id for s in services]),
set(['datasource1', 'datasource2']))
self.assertIsInstance(services[0],
fake_datasource.FakeDataSource)
self.assertIsInstance(services[1],
fake_datasource.FakeDataSource)
# check results of get_datasources
resultall = self.dseNode.get_datasources()
self.assertEqual(len(resultall), 2)
# check equality except for 'id' field
byname = {x['name']: x for x in resultall}
for x in byname.values():
del(x['id'])
self.assertEqual(byname, {'datasource1': req1, 'datasource2': req2})
def test_get_datasources_hide_secret(self):
req = self._get_datasource_request()
self.ds_manager.add_datasource(req)
result = self.dseNode.get_datasources(filter_secret=True)
result = result[0]
# check equality except that 'config'/'password' is hidden
req['config']['password'] = "<hidden>"
del(result['id'])
self.assertEqual(result, req)
def test_create_datasource_duplicate_name(self):
req = self._get_datasource_request()
self.ds_manager.add_datasource(req)
self.assertRaises(congressException.DatasourceNameInUse,
self.ds_manager.add_datasource, req)
def test_delete_datasource(self):
req = self._get_datasource_request()
result = self.ds_manager.add_datasource(req)
self.ds_manager.delete_datasource(result)
# check that service is actually deleted
services = self.dseNode.get_services()
self.assertEqual(len(services), 0)
self.assertRaises(
congressException.NotFound, self.dseNode.invoke_service_rpc,
req['name'], 'get_status', {'source_id': None, 'params': None})
# TODO(thinrichs): test that we've actually removed
# the row from the DB
# TODO(dse2): this test relies on coordination between dseNode and
# policy engine. Much harder in distributed system. Need to decide
# if we want that kind of invariant and if so implement it.
# def test_delete_datasource_error(self):
# req = self._get_datasource_request()
# req['driver'] = 'fake_datasource'
# req['config'] = {'auth_url': 'foo',
# 'username': 'armax',
# 'password': 'password',
# 'tenant_name': 'armax'}
# # let driver generate this for us.
# del req['id']
# result = self.datasource_mgr.add_datasource(req)
# engine = self.dseNode.service_object('engine')
# engine.create_policy('alice')
# engine.insert('p(x) :- %s:q(x)' % req['name'], 'alice')
# self.assertRaises(exception.DanglingReference,
# self.datasource_mgr.delete_datasource,
# result['id'])
def test_delete_invalid_datasource(self):
req = self._get_datasource_request()
req['id'] = 'fake-id'
self.assertRaises(congressException.DatasourceNotFound,
self.ds_manager.delete_datasource, req)
# TODO(dse2): Doesn't seem like we need this (or it will be moved to API).
# def test_get_driver_schema(self):
# schema = self.datasource_mgr.get_driver_schema(
# 'fake_datasource')
# self.assertEqual(
# schema,
# fake_datasource.FakeDataSource.get_schema())
def test_duplicate_driver_name_raises(self):
# Load the driver twice
cfg.CONF.set_override(
'drivers',
['congress.tests.fake_datasource.FakeDataSource',
'congress.tests.fake_datasource.FakeDataSource'])
self.assertRaises(congressException.BadConfig,
self.dseNode.load_drivers)
| [
"gongwayne@hotmail.com"
] | gongwayne@hotmail.com |
a4d8a3b9ba56807058e9ba8b2d56bcaf4d272a41 | ae5bb043439d2bad96a7017a57c6b83dd66c6ffb | /setup.py | a6cf117b5fddab5ce77c4a21b22fc5d0511e5d7f | [
"MIT"
] | permissive | kyleabeauchamp/xopen | e27be7a2de5ca665d5ea394aa9b8aff5bf4649a3 | c12a94e9f9af49da37edb20d6e82fb0bd5f4a08a | refs/heads/master | 2020-12-25T22:48:38.628011 | 2016-09-09T15:56:46 | 2016-09-09T15:56:46 | 68,322,235 | 0 | 0 | null | 2016-09-15T18:53:50 | 2016-09-15T18:53:49 | null | UTF-8 | Python | false | false | 784 | py | import sys
from setuptools import setup
if sys.version_info < (2, 6):
sys.stdout.write("At least Python 2.6 is required.\n")
sys.exit(1)
setup(
name = 'xopen',
version = '0.1.0',
author = 'Marcel Martin',
author_email = 'mail@marcelm.net',
url = 'https://github.com/marcelm/xopen/',
description = 'Open compressed files transparently',
license = 'MIT',
py_modules = ['xopen'],
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Cython",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| [
"marcel.martin@scilifelab.se"
] | marcel.martin@scilifelab.se |
29286514242944ff7946769d77ea552685d38ff7 | b4cc610bbd069c2b3e1f50c82303d48de21843a4 | /ce/c008_test.py | 694bc09965957756b549d1d77cfaaf1fa9801360 | [] | no_license | AakashKumarNain/pythonesque | d47b890ff42fa7baa3f25f9569d8a7310c7aa710 | 3225aaf878c52962becafd60a50243a91f92b264 | refs/heads/master | 2020-03-18T00:07:00.624695 | 2018-05-19T09:24:16 | 2018-05-19T09:24:16 | 134,078,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | """
CodeEval Reverse Words
author: Manny egalli64@gmail.com
info: http://thisthread.blogspot.com/2017/02/codeeval-reverse-words.html
https://www.codeeval.com/open_challenges/8/
"""
import unittest
from ce.c008 import solution
class TestCodeEval(unittest.TestCase):
def test_provided_1(self):
self.assertEqual('World Hello', solution('Hello World'))
if __name__ == '__main__':
unittest.main()
| [
"egalli64@gmail.com"
] | egalli64@gmail.com |
8a8b5e3641d7d2725ccfaf4069df1791c26f75fa | 6f4d104a5d87fa6e7a113139224158dae791eb18 | /models/vanilla_vae_bak.py | 46e8a7e7488e87f3856357259913ecc62d38be42 | [] | no_license | yellowbeango/VAE_NSF | b7c30996764d7d7f12499111e8e9db93c6d201c0 | 3bfc068b3363ffe53ceddc6f1adb0fa25afd4d31 | refs/heads/master | 2023-03-11T14:38:33.377391 | 2021-03-04T04:07:36 | 2021-03-04T04:07:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,668 | py | import torch
from .base import BaseVAE
from torch import nn
from torch.nn import functional as F
from abc import abstractmethod
from typing import List, Callable, Union, Any, TypeVar, Tuple
# from torch import tensor as Tensor
Tensor = TypeVar('torch.tensor')
__all__ = ['VanillaVAE']
class VanillaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
**kwargs) -> None:
super(VanillaVAE, self).__init__()
self.latent_dim = latent_dim
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1] * 4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1] * 4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=1,
kernel_size=3, padding=1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = args[0]
input = args[1]
mu = args[2]
log_var = args[3]
kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss = F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = recons_loss + kld_weight * kld_loss
return {'loss': loss, 'Reconstruction_Loss': recons_loss, 'KLD': -kld_loss}
def sample(self, z, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
# z = torch.randn(num_samples,
# self.latent_dim)
#
# z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
| [
"xuma@my.unt.edu"
] | xuma@my.unt.edu |
79b286b760c71c260b3537fdac6de52e607aa4c1 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004022.py | 30655fd8714685bb1fb65a8edca5cb26a2b8e4e4 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher62483(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher62483._instance is None:
CommutativeMatcher62483._instance = CommutativeMatcher62483()
return CommutativeMatcher62483._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 62482
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
2aa2527ba85cd0449c48f47f0ffdcf8648fb2356 | ff5892487c262ce845a9996a282d3a2fdb1a3b15 | /URI_1921.py | e4cfb5214ddbd3a268b43b0528f2cc9748bc71b7 | [] | no_license | dankoga/URIOnlineJudge--Python-3.9 | d424a47671f106d665a4e255382fc0ec3059096a | f1c99521caeff59be0843af5f63a74013b63f7f0 | refs/heads/master | 2023-07-15T08:32:11.040426 | 2021-09-03T13:27:17 | 2021-09-03T13:27:17 | 393,991,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | if __name__ == '__main__':
sides_qty = int(input())
print((sides_qty * (sides_qty - 1)) // 2 - sides_qty)
| [
"dankoga2@gmail.com"
] | dankoga2@gmail.com |
0de5dd2b9f24eb71dd2be78f458863fd9dcb879c | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/tables/10/4_2_2_4_table.py | 7eb24c3ccf8358f9e6b0619628378ca739ad3f6f | [
"Apache-2.0"
] | permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | def load(h):
return ({'abbr': 0, 'code': 0, 'title': 'Fire outlook', 'units': 'Code table 4.224'},
{'abbr': 1,
'code': 1,
'title': 'Fire outlook due to dry thunderstorm',
'units': 'Code table 4.224'},
{'abbr': 2, 'code': 2, 'title': 'Haines Index', 'units': 'Numeric'},
{'abbr': 3, 'code': 3, 'title': 'Fire burned area', 'units': '%'},
{'abbr': None, 'code': 255, 'title': 'Missing'})
| [
"baudouin.raoult@ecmwf.int"
] | baudouin.raoult@ecmwf.int |
3ba33009a6ff59d33e9edeafa1a50d110a7cee0b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_120/763.py | f7e5aa872796b1b2cae4a6e30617c689add81644 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | f = open("A-small-attempt0.in", "r")
o = open("Bullseye-A-small-attempt0-out.txt", "w")
T = int(f.readline())
for t in range(T):
count = 0
randt = f.readline().split()
r = int(randt[0]) #white circle radius
paint = int(randt[1]) #mL of paint
rIn = r
rOut = r + 1
nextArea = rOut**2 - rIn**2
while paint >= nextArea:
count += 1
paint -= nextArea
rOut += 2
rIn += 2
nextArea = rOut**2 - rIn**2
o.write("Case #" + str(t+1) + ": " + str(count) + "\n")
f.close()
o.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
e8d80ddc0ce9ff77bb3c1ba7f309bd2c74e9afc6 | a79ed9e33fe76af58b34082e4fe762716e38e80d | /Stage3/method2_feature_vector.py | 48cfb9871fb375683d208d9912c8fc9122d123c0 | [] | no_license | xwang322/Data-Science | c7a5a617cbb97787905c79cbed9cefd4362cd77f | 6ae6fb912cf8b1b743ae4a4e3fffb99b2f496376 | refs/heads/master | 2021-01-12T09:53:15.596743 | 2016-12-19T22:39:28 | 2016-12-19T22:39:28 | 76,287,261 | 0 | 0 | null | 2016-12-18T00:12:52 | 2016-12-12T19:13:53 | Python | UTF-8 | Python | false | false | 6,236 | py | import re
import random
import json
from random import choice, sample, randint
from py_stringmatching import simfunctions, tokenizers
import string_process2
from sklearn.preprocessing import Imputer
import numpy as np
feature_attr = ['id','product type','product name','product segment','brand','category']
#with open('X.txt','r') as f:
with open('5000Test.txt','r') as f:
for i,l in enumerate(f):
pass
f.close()
#print(i)
matrix = [['null' for j in range(6)] for j in range (2*(i+1))]
result = ['null' for j in range(i+1)]
#with open('X.txt','r') as f:
l = 0
with open('5000Test.txt','r') as f:
lines = f.readlines()
r = 0
m = 0
for i in lines:
dict_tmp1 = {}
dict_tmp2 = {}
items = i.split('?')
json_id1 = items[1]
json_id2 = items[3]
id1 = re.findall(r'[\d|]+', json_id1)
id2 = re.findall(r'[\d|]+', json_id2)
json_data1 = json.loads(items[2])
for each in json_data1.keys():
aname = each
bname = json_data1.get(aname)
cname = ''.join(bname)
if aname.lower() in feature_attr:
attrPost = feature_attr.index(aname.lower())
dict_tmp1.setdefault(aname.lower(), cname.lower())
for each in feature_attr:
if each not in dict_tmp1.keys():
dict_tmp1.setdefault(each, '')
matrix[r][0] = id1
matrix[r][1] = dict_tmp1.get('product type')
matrix[r][2] = dict_tmp1.get('product name')
matrix[r][3] = dict_tmp1.get('product segment')
matrix[r][4] = dict_tmp1.get('brand')
matrix[r][5] = dict_tmp1.get('category')
# for product 2
json_data2 = json.loads(items[4])
for each in json_data2.keys():
aname = each
bname = json_data2.get(aname)
cname = ''.join(bname)
if aname.lower() in feature_attr:
attrPost = feature_attr.index(aname.lower())
dict_tmp2.setdefault(aname.lower(), cname.lower())
for each in feature_attr:
if each not in dict_tmp2.keys():
dict_tmp2.setdefault(each, '')
matrix[r+1][0] = id2
matrix[r+1][1] = dict_tmp2.get('product type')
matrix[r+1][2] = dict_tmp2.get('product name')
matrix[r+1][3] = dict_tmp2.get('product segment')
matrix[r+1][4] = dict_tmp2.get('brand')
matrix[r+1][5] = dict_tmp2.get('category')
result[m] = items[5]
r += 2
m += 1
#print(len(matrix))
#print(result)
f.close()
#x = open('X_matrix.txt','w')
x = open('5000Test_matrix.txt','w')
for each in matrix:
print(each, file = x)
x.close()
#x = open('X_matrix_class.txt','w')
x = open('5000Test_matrix_class.txt','w')
for each in result:
print(each, file = x)
x.close()
i = int(len(matrix)/2-1)
#print(i)
FVmatrix = [[0 for j in range(11)] for j in range(i+1)]
r = 0
for r in range(i+1):
# product type: needleman_wunsch distance
if(matrix[2*r][1] == '' and matrix[2*r+1][1] == ''):
FVmatrix[r][0] = 999
else:
FVmatrix[r][0] = simfunctions.needleman_wunsch(matrix[2*r][1], matrix[2*r+1][1])
# product type: soft tfidf distance
if(matrix[2*r][1] == '' and matrix[2*r+1][1] == ''):
FVmatrix[r][1] = 999
else:
product_type1 = string_process2.string_process2(matrix[2*r][1])
product_type2 = string_process2.string_process2(matrix[2*r+1][1])
FVmatrix[r][1] = simfunctions.soft_tfidf(set(product_type1), set(product_type2))
product_name1 = string_process2.string_process2(matrix[2*r][2])
product_name2 = string_process2.string_process2(matrix[2*r+1][2])
#print(product_name1, product_name2)
# product name: soft TF/IDF
FVmatrix[r][2] = simfunctions.soft_tfidf(set(product_name1), set(product_name2))
#product name: jaccard score
FVmatrix[r][3] = simfunctions.jaccard(set(product_name1), set(product_name2))
#product segment: needleman_wunsch distance
if(matrix[2*r][3] == '' and matrix[2*r+1][3] == ''):
FVmatrix[r][4] = 999
else:
FVmatrix[r][4] = simfunctions.needleman_wunsch(matrix[2*r][3], matrix[2*r+1][3])
#product segment: soft tfidf distance
if(matrix[2*r][3] == '' and matrix[2*r+1][3] == ''):
FVmatrix[r][5] = 999
else:
product_seg1 = string_process2.string_process2(matrix[2*r][3])
product_seg2 = string_process2.string_process2(matrix[2*r+1][3])
FVmatrix[r][5] = simfunctions.soft_tfidf(set(product_seg1), set(product_seg2))
#brand: needleman_wunsch distance
if(matrix[2*r][4] == '' and matrix[2*r+1][4] == ''):
FVmatrix[r][6] = 999
else:
FVmatrix[r][6] = simfunctions.needleman_wunsch(matrix[2*r][4], matrix[2*r+1][4])
#brand: soft tfidf distance
if(matrix[2*r][4] == '' and matrix[2*r+1][4] == ''):
FVmatrix[r][7] = 999
else:
product_brand1 = string_process2.string_process2(matrix[2*r][4])
product_brand2 = string_process2.string_process2(matrix[2*r+1][4])
FVmatrix[r][7] = simfunctions.soft_tfidf(set(product_brand1), set(product_brand2))
# category: needleman_wunsch distance
if(matrix[2*r][5] == '' and matrix[2*r+1][5] == ''):
FVmatrix[r][8] = 999
else:
FVmatrix[r][8] = simfunctions.needleman_wunsch(matrix[2*r][5], matrix[2*r+1][5])
# category: soft tfidf distance
if(matrix[2*r][5] == '' and matrix[2*r+1][5] == ''):
FVmatrix[r][9] = 999
else:
product_category1 = string_process2.string_process2(matrix[2*r][5])
product_category2 = string_process2.string_process2(matrix[2*r+1][5])
FVmatrix[r][9] = simfunctions.soft_tfidf(set(product_category1), set(product_category2))
if(result[r] == 'MATCH\n'):
FVmatrix[r][10] = 1
else:
FVmatrix[r][10] = 0
#print(FVmatrix)
#x = open('X_feature_vector.txt','w')
x = open('5000Test2_string_processed_feature_vector.txt','w')
for each in FVmatrix:
print(each, file = x)
x.close()
| [
"noreply@github.com"
] | xwang322.noreply@github.com |
1cb23cdd8301f2345e6cfe91689a1091d605faf8 | 727cdc7c9af6fdf6b4eb8444197718e5c6760019 | /review_qa_collect/translate.py | 14afdd26ccfc51a61f3881d1b1c7c8b9b073d693 | [] | no_license | newer027/amazon_crawler | 0cc6feb30f9180ae48ac936eeb6af41ec06eadfd | 39d6867a8dd56b90dae5e98aa44e6df274439f8e | refs/heads/master | 2022-11-23T17:04:33.995126 | 2020-04-03T15:42:42 | 2020-04-03T15:42:42 | 252,774,253 | 1 | 0 | null | 2022-11-22T01:44:53 | 2020-04-03T15:42:31 | CSS | UTF-8 | Python | false | false | 4,117 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, Template
from django.views.decorators.csrf import csrf_exempt
from django.utils.encoding import smart_str, smart_unicode
import xml.etree.ElementTree as ET
import urllib,urllib2,time,hashlib
TOKEN = "your token"
YOUDAO_KEY = your_youdao_key
YOUDAO_KEY_FROM = "your_youdao_key_from"
YOUDAO_DOC_TYPE = "xml"
@csrf_exempt
def handleRequest(request):
if request.method == 'GET':
#response = HttpResponse(request.GET['echostr'],content_type="text/plain")
response = HttpResponse(checkSignature(request),content_type="text/plain")
return response
elif request.method == 'POST':
#c = RequestContext(request,{'result':responseMsg(request)})
#t = Template('{{result}}')
#response = HttpResponse(t.render(c),content_type="application/xml")
response = HttpResponse(responseMsg(request),content_type="application/xml")
return response
else:
return None
def checkSignature(request):
global TOKEN
signature = request.GET.get("signature", None)
timestamp = request.GET.get("timestamp", None)
nonce = request.GET.get("nonce", None)
echoStr = request.GET.get("echostr",None)
token = TOKEN
tmpList = [token,timestamp,nonce]
tmpList.sort()
tmpstr = "%s%s%s" % tuple(tmpList)
tmpstr = hashlib.sha1(tmpstr).hexdigest()
if tmpstr == signature:
return echoStr
else:
return None
def responseMsg(request):
rawStr = smart_str(request.raw_post_data)
#rawStr = smart_str(request.POST['XML'])
msg = paraseMsgXml(ET.fromstring(rawStr))
queryStr = msg.get('Content','You have input nothing~')
raw_youdaoURL = "http://fanyi.youdao.com/openapi.do?keyfrom=%s&key=%s&type=data&doctype=%s&version=1.1&q=" % (YOUDAO_KEY_FROM,YOUDAO_KEY,YOUDAO_DOC_TYPE)
youdaoURL = "%s%s" % (raw_youdaoURL,urllib2.quote(queryStr))
req = urllib2.Request(url=youdaoURL)
result = urllib2.urlopen(req).read()
replyContent = paraseYouDaoXml(ET.fromstring(result))
return getReplyXml(msg,replyContent)
def paraseMsgXml(rootElem):
msg = {}
if rootElem.tag == 'xml':
for child in rootElem:
msg[child.tag] = smart_str(child.text)
return msg
def paraseYouDaoXml(rootElem):
replyContent = ''
if rootElem.tag == 'youdao-fanyi':
for child in rootElem:
# 错误码
if child.tag == 'errorCode':
if child.text == '20':
return 'too long to translate\n'
elif child.text == '30':
return 'can not be able to translate with effect\n'
elif child.text == '40':
return 'can not be able to support this language\n'
elif child.text == '50':
return 'invalid key\n'
# 查询字符串
elif child.tag == 'query':
replyContent = "%s%s\n" % (replyContent, child.text)
# 有道翻译
elif child.tag == 'translation':
replyContent = '%s%s\n%s\n' % (replyContent, '-' * 3 + u'有道翻译' + '-' * 3, child[0].text)
# 有道词典-基本词典
elif child.tag == 'basic':
replyContent = "%s%s\n" % (replyContent, '-' * 3 + u'基本词典' + '-' * 3)
for c in child:
if c.tag == 'phonetic':
replyContent = '%s%s\n' % (replyContent, c.text)
elif c.tag == 'explains':
for ex in c.findall('ex'):
replyContent = '%s%s\n' % (replyContent, ex.text)
# 有道词典-网络释义
elif child.tag == 'web':
replyContent = "%s%s\n" % (replyContent, '-' * 3 + u'网络释义' + '-' * 3)
for explain in child.findall('explain'):
for key in explain.findall('key'):
replyContent = '%s%s\n' % (replyContent, key.text)
for value in explain.findall('value'):
for ex in value.findall('ex'):
replyContent = '%s%s\n' % (replyContent, ex.text)
replyContent = '%s%s\n' % (replyContent,'--')
return replyContent
def getReplyXml(msg,replyContent):
extTpl = "<xml><ToUserName><![CDATA[%s]]></ToUserName><FromUserName><![CDATA[%s]]></FromUserName><CreateTime>%s</CreateTime><MsgType><![CDATA[%s]]></MsgType><Content><![CDATA[%s]]></Content><FuncFlag>0</FuncFlag></xml>";
extTpl = extTpl % (msg['FromUserName'],msg['ToUserName'],str(int(time.time())),'text',replyContent)
return extTpl | [
"newer027@gmail.com"
] | newer027@gmail.com |
cf70617373fa7f876ca1d7667a0cfe82a5512398 | 5b93663bbec958a03d8c670f6e9eb24872533a7d | /tensor2tensor/trax/jaxboard.py | 278d3abba0fbb055c7a0e2c1f0ca5df894b7e416 | [
"Apache-2.0"
] | permissive | maggie0830/tensor2tensor | 9538cc82cd3157b8eeb6c47da9a66a86b570903b | 77673a0cd2cee3a1568a69d7ff0108e6501d9ffb | refs/heads/master | 2020-04-25T20:46:12.579151 | 2019-02-28T02:39:22 | 2019-02-28T02:39:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,021 | py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write Summaries from JAX for use with Tensorboard.
See jaxboard_demo.py for example usage.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import struct
import warnings
import wave
import matplotlib as mpl
# Necessary to prevent attempted Tk import:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mpl.use('Agg')
# pylint: disable=g-import-not-at-top
import matplotlib.pyplot as plt
import numpy as onp
import tensorflow as tf
from tensorflow import gfile
from tensorflow import HistogramProto
from tensorflow import Summary
from tensorflow import SummaryMetadata
def _pack_images(images, rows, cols):
"""Helper utility to make a tiled field of images from numpy arrays.
Args:
images: Image tensor in shape [N, W, H, C].
rows: Number of images per row in tiled image.
cols: Number of images per column in tiled image.
Returns:
A tiled image of shape [W * rows, H * cols, C].
Truncates incomplete rows.
"""
shape = onp.shape(images)
width, height, depth = shape[-3:]
images = onp.reshape(images, (-1, width, height, depth))
batch = onp.shape(images)[0]
rows = onp.minimum(rows, batch)
cols = onp.minimum(batch // rows, cols)
images = images[:rows * cols]
images = onp.reshape(images, (rows, cols, width, height, depth))
images = onp.transpose(images, [0, 2, 1, 3, 4])
images = onp.reshape(images, [rows * width, cols * height, depth])
return images
class SummaryWriter(object):
"""Saves data in event and summary protos for tensorboard."""
def __init__(self, log_dir):
"""Create a new SummaryWriter.
Args:
log_dir: path to record tfevents files in.
"""
# If needed, create log_dir directory as well as missing parent directories.
if not gfile.IsDirectory(log_dir):
gfile.MakeDirs(log_dir)
self.writer = tf.summary.FileWriter(log_dir, graph=None)
self.end_summaries = []
self.step = 0
self.closed = False
def close(self):
"""Close SummaryWriter. Final!"""
if not self.closed:
for summary in self.end_summaries:
self.writer.add_summary(summary, self.step)
self.writer.close()
self.closed = True
del self.writer
def __del__(self): # safe?
self.close()
def scalar(self, tag, value, step=None):
"""Saves scalar value.
Args:
tag: str: label for this data
value: int/float: number to log
step: int: training step
"""
value = float(onp.array(value))
if step is None:
step = self.step
else:
self.step = step
summary = Summary(value=[Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image(self, tag, image, step=None):
"""Saves RGB image summary from onp.ndarray [H,W], [H,W,1], or [H,W,3].
Args:
tag: str: label for this data
image: ndarray: [H,W], [H,W,1], [H,W,3] save image in greyscale or colors/
step: int: training step
"""
image = onp.array(image)
if step is None:
step = self.step
else:
self.step = step
if len(onp.shape(image)) == 2:
image = image[:, :, onp.newaxis]
if onp.shape(image)[-1] == 1:
image = onp.repeat(image, 3, axis=-1)
image_strio = io.BytesIO()
plt.imsave(image_strio, image, format='png')
image_summary = Summary.Image(
encoded_image_string=image_strio.getvalue(),
colorspace=3,
height=image.shape[0],
width=image.shape[1])
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.writer.add_summary(summary, step)
def images(self, tag, images, step=None, rows=None, cols=None):
"""Saves (rows, cols) tiled images from onp.ndarray.
If either rows or cols aren't given, they are determined automatically
from the size of the image batch, if neither are given a long column
of images is produced. This truncates the image batch rather than padding
if it doesn't fill the final row.
Args:
tag: str: label for this data
images: ndarray: [N,H,W,1] or [N,H,W,3] to tile in 2d
step: int: training step
rows: int: number of rows in tile
cols: int: number of columns in tile
"""
images = onp.array(images)
if step is None:
step = self.step
else:
self.step = step
n_images = onp.shape(images)[0]
if rows is None and cols is None:
rows = 1
cols = n_images
elif rows is None:
rows = n_images // cols
elif cols is None:
cols = n_images // rows
tiled_images = _pack_images(images, rows, cols)
self.image(tag, tiled_images, step=step)
def plot(self, tag, mpl_plt, step=None, close_plot=True):
"""Saves matplotlib plot output to summary image.
Args:
tag: str: label for this data
mpl_plt: matplotlib stateful pyplot object with prepared plotting state
step: int: training step
close_plot: bool: automatically closes plot
"""
if step is None:
step = self.step
else:
self.step = step
fig = mpl_plt.get_current_fig_manager()
img_w, img_h = fig.canvas.get_width_height()
image_buf = io.BytesIO()
mpl_plt.savefig(image_buf, format='png')
image_summary = Summary.Image(
encoded_image_string=image_buf.getvalue(),
colorspace=4, # RGBA
height=img_h,
width=img_w)
summary = Summary(value=[Summary.Value(tag=tag, image=image_summary)])
self.writer.add_summary(summary, step)
if close_plot:
mpl_plt.close()
def audio(self, tag, audiodata, step=None, sample_rate=44100):
"""Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer
"""
audiodata = onp.array(audiodata)
if step is None:
step = self.step
else:
self.step = step
audiodata = onp.clip(onp.squeeze(audiodata), -1, 1)
if audiodata.ndim != 1:
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(
sample_rate=sample_rate,
num_channels=1,
length_frames=len(sample_list),
encoded_audio_string=encoded_audio_bytes,
content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.writer.add_summary(summary, step)
def histogram(self, tag, values, bins, step=None):
"""Saves histogram of values.
Args:
tag: str: label for this data
values: ndarray: will be flattened by this routine
bins: number of bins in histogram, or array of bins for onp.histogram
step: int: training step
"""
if step is None:
step = self.step
else:
self.step = step
values = onp.array(values)
bins = onp.array(bins)
values = onp.reshape(values, -1)
counts, limits = onp.histogram(values, bins=bins)
# boundary logic
cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32))
start, end = onp.searchsorted(
cum_counts, [0, cum_counts[-1] - 1], side='right')
start, end = int(start), int(end) + 1
counts = (
counts[start -
1:end] if start > 0 else onp.concatenate([[0], counts[:end]]))
limits = limits[start:end + 1]
sum_sq = values.dot(values)
histo = HistogramProto(
min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum(),
sum_squares=sum_sq,
bucket_limit=limits.tolist(),
bucket=counts.tolist())
summary = Summary(value=[Summary.Value(tag=tag, histo=histo)])
self.writer.add_summary(summary, step)
def text(self, tag, textdata, step=None):
"""Saves a text summary.
Args:
tag: str: label for this data
textdata: string, or 1D/2D list/numpy array of strings
step: int: training step
Note: markdown formatting is rendered by tensorboard.
"""
if step is None:
step = self.step
else:
self.step = step
smd = SummaryMetadata(
plugin_data=SummaryMetadata.PluginData(plugin_name='text'))
if isinstance(textdata, (str, bytes)):
tensor = tf.make_tensor_proto(
values=[textdata.encode(encoding='utf_8')], shape=(1,))
else:
textdata = onp.array(textdata) # convert lists, jax arrays, etc.
datashape = onp.shape(textdata)
if len(datashape) == 1:
tensor = tf.make_tensor_proto(
values=[td.encode(encoding='utf_8') for td in textdata],
shape=(datashape[0],))
elif len(datashape) == 2:
tensor = tf.make_tensor_proto(
values=[
td.encode(encoding='utf_8') for td in onp.reshape(textdata, -1)
],
shape=(datashape[0], datashape[1]))
summary = Summary(
value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)])
self.writer.add_summary(summary, step)
| [
"copybara-piper@google.com"
] | copybara-piper@google.com |
4f3dee7c5f1e1f739b536197af999ea9e784bf3a | 01a33634195c48794ebb46bd19c785283ca7e885 | /backend/run_fast_20841/wsgi.py | 11f4f62c1c3dd85630639fc1ceb4cca1e89bca0e | [] | no_license | crowdbotics-apps/run-fast-20841 | db388a66a2590ae085db76d37ec712edb7892d0a | 7cc3a759d1399aa378903f8db7d85e2c4fcd07bc | refs/heads/master | 2022-12-23T15:50:17.562389 | 2020-09-29T19:26:58 | 2020-09-29T19:26:58 | 299,716,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for run_fast_20841 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "run_fast_20841.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9a9f3d2d56c06b6670be827785e25eb6bc99eb98 | 6a95dc7ee1c583119c892f193cd683499b50a706 | /tests/unit/fixtures/logging.py | c2cc4e6d048f32c78e16acd2ca9122ea32912417 | [
"BSD-3-Clause"
] | permissive | Steffanic/alice-jet-hadron | 735cba4d440f5f87364bf8d47147a0eccf1e1471 | 8526567935c0339cebb9ef224b09a551a0b96932 | refs/heads/master | 2022-12-28T23:02:20.560475 | 2020-10-09T08:03:55 | 2020-10-09T08:03:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | #!/usr/bin/env python
""" Logging related fixtures to aid testing.
.. codeauthor:: Raymond Ehlers <raymond.ehlers@yale.edu>, Yale University
"""
import logging
import pytest
# Set logging level as a global variable to simplify configuration.
# This is not ideal, but fine for simple tests.
logging_level = logging.DEBUG
@pytest.fixture
def logging_mixin(caplog):
""" Logging mixin to capture logging messages from modules.
It logs at the debug level, which is probably most useful for when a test fails.
"""
caplog.set_level(logging_level)
| [
"raymond.ehlers@gmail.com"
] | raymond.ehlers@gmail.com |
c97290f97b929f33265f2e8efdbd481a678ab48b | 5d97cf2d275a0636d8ac3b98c222b6864d1c992e | /server/walt/server/threads/main/snmp/ipsetup.py | ceacf3ff17a93ca4efd81348c6a486d5e1e035d1 | [
"BSD-3-Clause"
] | permissive | ManonBillet/walt-python-packages | 51d57bf710dc6f981040b4295b8bb7811d4462e6 | b778992e241d54b684f54715d83c4aff98a01db7 | refs/heads/master | 2023-05-04T14:58:24.265660 | 2021-02-01T15:16:44 | 2021-02-01T15:16:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,432 | py | #!/usr/bin/env python
from walt.server.threads.main.snmp.mibs import load_mib
class IPSetupProxy(object):
def __init__(self, snmp_proxy):
self.snmp = snmp_proxy
load_mib(b"NETGEAR-SWITCHING-MIB")
def perform_dhcp_setup(self):
# the switch is already configured to boot using DHCP
# by default, but affecting this value again causes
# the switch to restart the DHCP procedure, which is
# exactly what we expect.
self.snmp.agentNetworkConfigProtocol = 3 # dhcp
def record_current_ip_config_as_static(self):
# if server and switches are restarted, the switches may
# send a DHCP request before the DHCP server of the WalT server
# is started.
# This causes the switches to choose a default address,
# e.g. 192.168.0.239 for Netgear switches.
# This causes major issues because several switches
# may get this same address.
# Thus, the first time a switch is detected with a DHCP IP belonging
# to the WalT network, we statically set this IP in its bootup
# procedure.
current_ip = str(self.snmp.agentNetworkIPAddress)
current_netmask = str(self.snmp.agentNetworkSubnetMask)
with self.snmp as batch:
batch.agentNetworkConfigProtocol = 1
batch.agentNetworkIPAddress = current_ip
batch.agentNetworkSubnetMask = current_netmask
| [
"etienne.duble@imag.fr"
] | etienne.duble@imag.fr |
e2da8ecd31dc1d1ec3354ba5182d031423db4939 | 753f729f33a1b00a0a7f5c78d217cc4c609aee6f | /n13_GenericViewApiAndMixin/api/serializers.py | 325212ac2364203cfaf9ee76c32a3276784d9f4a | [] | no_license | nayan-gujju/DRF-Code | 874114a861042d558112f1a8ec95daf1356d5493 | 6fb3fdd5dde352e7b6e3a7363da0e7a3057b1ede | refs/heads/master | 2023-08-06T12:42:23.551603 | 2021-10-06T11:34:54 | 2021-10-06T11:34:54 | 404,650,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | from rest_framework import serializers
from .models import Student
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = '__all__'
| [
"nayangujarati007@gmail.com"
] | nayangujarati007@gmail.com |
91e81dfa4a16fc532038b1b7a075518ec9676dee | c69c3167819efdded3cdde7783514b971a98f25a | /services/parse_file.py | 100f8922a3dd36561f1abb318ace04790ec12cf3 | [] | no_license | sehovizko/sel_parser_rashodnika | 8a03ed6a3ccb4500a227848947fbb7774c7aae4c | 3510df57256e5775a55cafaf70e790196c475e21 | refs/heads/master | 2023-07-26T02:53:31.324946 | 2021-09-07T15:58:25 | 2021-09-07T15:58:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | import ast
import re
import pandas
from db_utils import *
async def parse(file):
# try:
file_list = pandas.read_csv(file, sep=';', header=None).values.tolist()[0]
# for elem in file_list:
# print(elem)
brand = re.sub(r'/.*', '', file_list[0]).strip()
partcode = re.sub(r'(\s.+)', '', re.sub(rf'{brand}', '', file_list[2], flags=re.IGNORECASE).strip()).strip()
if 'nan' in str(file_list[2]):
name_ru = None
else:
name_ru = re.sub(f'{brand}', '', re.sub(f'{partcode}', '', str(file_list[2]), re.I), re.I).strip()
if 'nan' in str(file_list[1]):
name_en = None
else:
name_en = re.sub(r"'", '`', re.sub(r'^/|^-|\([^)]*\)', '', str(file_list[1]))).strip()
model_analogs = ast.literal_eval(file_list[3])
options = ast.literal_eval(file_list[4])
# obj = {'brand': brand, 'partcode': partcode, 'name_en': name_en, 'name_ru': name_ru,
# 'model_analogs': model_analogs, 'options': options}
return {'brand': brand, 'partcode': partcode, 'name_en': name_en, 'name_ru': name_ru,
'model_analogs': model_analogs, 'options': options}
# except Exception as err:
# print(err, file)
async def set_option(options, code):
print(code, options)
pid = get_supplies_id(code)
if pid:
for opt in options:
dic_caption_id = get_dict_partcode_option_id(opt[0])
dic_option_id = get_dict_partcode_option_id(opt[1])
if dic_caption_id and dic_option_id:
option_id = get_option_id(dic_caption_id, dic_option_id)
link_partcode_options(option_id, pid)
else:
print('nod ids for', opt)
else:
print('no id for', code)
async def set_model_analog(models, code, brand, brands):
if brand == 'HP':
# if brand == 'HP' and code == 'CE390A':
print(models, code, brand, brands)
# if brand == 'Konica Minolta':
# b_id = brands['Konica-Minolta']
# else:
# b_id = brands[brand]
# partcode_id = get_supplies_id(code)
# if partcode_id:
# for model in models:
# model_id = get_model_id(b_id, model, model.replace('-', ' '))
# print(b_id, model, model_id, partcode_id)
# link_partcode_analog(model_id, partcode_id)
| [
"server.ares@gmail.com"
] | server.ares@gmail.com |
d0626e62ea7ce7766ea90dde59f828c2367dc570 | 008ea0c503829f33840495373ad3d60794575af3 | /PYDayByDay/Tkinter_ST/Canvas_TK/Canvas2.py | 074b7463ba665c4567fe3564e8e421627ebab75a | [] | no_license | JyHu/PYStudy | 6515bea47ca6f80e336f3b6a7a14b1159fde872f | ec0855c414237bdd7d0cb28f79a81c02ccd52d45 | refs/heads/master | 2016-08-12T19:44:06.723361 | 2016-04-11T10:38:59 | 2016-04-11T10:38:59 | 45,384,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | #coding=utf-8
__author__ = 'JinyouHU'
'''
创建一个矩形,指定画布的颜色为白色
'''
from Tkinter import *
root = Tk()
#创建一个Canvas,设置其背景色为白色
cv = Canvas(root, bg='white')
#创建一个矩形,坐标为(10, 10, 110, 110)
# cv.create_rectangle(10, 10, 110, 110)
cv.create_rectangle(
10,
10,
110,
110,
fill='red', #内部填充色,可选
outline='green', #外框颜色,可选
width=5, #外框宽度,可选
dash=10, #指定虚线,可选
)
cv.pack()
cv.create_rectangle(120, 10, 220, 110,
outline='red',
stipple='gray12', #使用画刷填充,使用属性stippe
fill='green'
)
#记录一个控件
rt = cv.create_rectangle(10, 120, 110, 220,
outline='red',
stipple='gray12',
fill='green'
)
cv.coords(rt,(120, 120, 220, 220)) #重新设置控件位置
root.mainloop() | [
"auu.aug@gmail.com"
] | auu.aug@gmail.com |
364ddbcc20a70f33f677da22070d40aa986f898b | e864543de486f7b9bad577a5c2c3c1820bb670d9 | /zvt/recorders/eastmoney/finance/base_china_stock_finance_recorder.py | 5d09d41ee24589c37f8edec3f0813b76fca47164 | [
"MIT"
] | permissive | licq201/zvt | 9b113c4018e908e55388910023ef73956460d9b8 | 5d70f4cbcc53c7e809b6b5683441bd6d1445794b | refs/heads/master | 2021-01-25T23:04:41.714691 | 2020-02-26T09:52:36 | 2020-02-26T09:52:36 | 243,219,441 | 0 | 0 | MIT | 2020-02-26T09:11:13 | 2020-02-26T09:11:12 | null | UTF-8 | Python | false | false | 8,573 | py | # -*- coding: utf-8 -*-
import pandas as pd
from jqdatasdk import auth, query, indicator, get_fundamentals, logout
from zvdata.api import get_data
from zvdata.utils.pd_utils import index_df
from zvdata.utils.pd_utils import pd_is_not_null
from zvdata.utils.time_utils import to_time_str, to_pd_timestamp
from zvt import zvt_env
from zvt.api.api import get_finance_factor
from zvt.api.common import to_jq_report_period
from zvt.domain import FinanceFactor
from zvt.recorders.eastmoney.common import company_type_flag, get_fc, EastmoneyTimestampsDataRecorder, \
call_eastmoney_api, get_from_path_fields
from zvt.recorders.joinquant.common import to_jq_entity_id
class BaseChinaStockFinanceRecorder(EastmoneyTimestampsDataRecorder):
finance_report_type = None
data_type = 1
timestamps_fetching_url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetCompanyReportDateList'
timestamp_list_path_fields = ['CompanyReportDateList']
timestamp_path_fields = ['ReportDate']
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, real_time=False,
fix_duplicate_way='add', start_timestamp=None, end_timestamp=None, close_hour=0,
close_minute=0) -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
try:
auth(zvt_env['jq_username'], zvt_env['jq_password'])
self.fetch_jq_timestamp = True
except Exception as e:
self.fetch_jq_timestamp = False
self.logger.warning(
f'joinquant account not ok,the timestamp(publish date) for finance would be not correct', e)
def init_timestamps(self, entity):
param = {
"color": "w",
"fc": get_fc(entity),
"DataType": self.data_type
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['ReportType'] = 1
timestamp_json_list = call_eastmoney_api(url=self.timestamps_fetching_url,
path_fields=self.timestamp_list_path_fields,
param=param)
if self.timestamp_path_fields:
timestamps = [get_from_path_fields(data, self.timestamp_path_fields) for data in timestamp_json_list]
return [to_pd_timestamp(t) for t in timestamps]
def generate_request_param(self, security_item, start, end, size, timestamps):
if len(timestamps) <= 10:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": '',
"latestCount": size
}
else:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": to_time_str(timestamps[10]),
"latestCount": 10
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['reportType'] = 1
return param
def generate_path_fields(self, security_item):
comp_type = company_type_flag(security_item)
if comp_type == "3":
return ['{}_YinHang'.format(self.finance_report_type)]
elif comp_type == "2":
return ['{}_BaoXian'.format(self.finance_report_type)]
elif comp_type == "1":
return ['{}_QuanShang'.format(self.finance_report_type)]
elif comp_type == "4":
return ['{}_QiYe'.format(self.finance_report_type)]
def record(self, entity, start, end, size, timestamps):
# different with the default timestamps handling
param = self.generate_request_param(entity, start, end, size, timestamps)
self.logger.info('request param:{}'.format(param))
return self.api_wrapper.request(url=self.url, param=param, method=self.request_method,
path_fields=self.generate_path_fields(entity))
def get_original_time_field(self):
return 'ReportDate'
def fill_timestamp_with_jq(self, security_item, the_data):
# get report published date from jq
try:
q = query(
indicator.pubDate
).filter(
indicator.code == to_jq_entity_id(security_item),
)
df = get_fundamentals(q, statDate=to_jq_report_period(the_data.report_date))
if not df.empty and pd.isna(df).empty:
the_data.timestamp = to_pd_timestamp(df['pubDate'][0])
self.logger.info(
'jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, security_item.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
except Exception as e:
self.logger.error(e)
def on_finish_entity(self, entity):
super().on_finish_entity(entity)
if not self.fetch_jq_timestamp:
return
# fill the timestamp for report published date
the_data_list = get_data(data_schema=self.data_schema,
provider=self.provider,
entity_id=entity.id,
order=self.data_schema.timestamp.asc(),
return_type='domain',
session=self.session,
filters=[self.data_schema.timestamp == self.data_schema.report_date,
self.data_schema.timestamp >= to_pd_timestamp('2005-01-01')])
if the_data_list:
if self.data_schema == FinanceFactor:
for the_data in the_data_list:
self.fill_timestamp_with_jq(entity, the_data)
else:
df = get_finance_factor(entity_id=entity.id,
columns=[FinanceFactor.timestamp, FinanceFactor.report_date, FinanceFactor.id],
filters=[FinanceFactor.timestamp != FinanceFactor.report_date,
FinanceFactor.timestamp >= to_pd_timestamp('2005-01-01'),
FinanceFactor.report_date >= the_data_list[0].report_date,
FinanceFactor.report_date <= the_data_list[-1].report_date, ])
if pd_is_not_null(df):
index_df(df, index='report_date')
for the_data in the_data_list:
if (df is not None) and (not df.empty) and the_data.report_date in df.index:
the_data.timestamp = df.at[the_data.report_date, 'timestamp']
self.logger.info(
'db fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, entity.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
else:
# self.logger.info(
# 'waiting jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema,
# security_item.id,
# the_data.timestamp,
# the_data.report_date))
self.fill_timestamp_with_jq(entity, the_data)
def on_finish(self):
super().on_finish()
logout()
| [
"5533061@qq.com"
] | 5533061@qq.com |
141c446c73f075c9846edd09f6e366d5cac0b2fb | da5ef82554c6c0413193b7c99192edd70fed58dd | /mozdns/mozbind/serial_utils.py | 1f1396c4089231f75470b24922d12641a2f23a6e | [] | no_license | rtucker-mozilla/mozilla_inventory | d643c7713c65aa870e732e18aaf19ce677e277b7 | bf9154b0d77705d8c0fe1a9a35ce9c1bd60fcbea | refs/heads/master | 2020-12-24T17:17:37.621418 | 2013-04-11T10:39:41 | 2013-04-11T10:39:41 | 2,709,399 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,226 | py | import re
import os
def get_serial(file_):
"""
Retrieve the serial number of a zone.
:param file_: The file with the SOA in it.
:type file_: file
"""
if not os.path.exists(file_):
return ''
with open(file_, 'r') as fd:
return _str_get_serial(fd)
def _str_get_serial(text):
"""Read in a zone file and find the serial number.
:param text: the zone file.
:type text: A file-ish object (StringIO or actual file descriptor)
:returns serial: The serial number
:serial: str
"""
# We already know it's in valid format.
isSOA = False
done = False
for raw_line in text.readlines():
if done:
break
line = raw_line.strip()
ll = LexLine(line)
if isSOA:
# If we made it here, this should be the serial.
serial = _lex_word(ll)
if serial.isdigit():
return serial
else:
return ''
if not line or line[0] == '$' or line[0] == ';':
continue
# name ttl class rr name-server email-addr (sn ref ret ex min)
# 1 2 3 4 5 6 7 8 9 10 11
# Everything up through 6 needs to be on the same line.
_lex_word(ll) # name
_lex_ws(ll)
c = ll.pop()
if c.isdigit():
_lex_word(ll) # ttl
_lex_ws(ll)
else:
ll.unpop()
_lex_word(ll) # class
_lex_ws(ll)
rr = _lex_word(ll)
if rr.upper() != 'SOA':
continue # It's not an soa, keep going.
isSOA = True
_lex_ws(ll)
_lex_word(ll) # ns
_lex_ws(ll)
email = _lex_word(ll) # email
if email[-1:] == '(':
_lex_ws(ll)
else:
_lex_ws(ll)
next = ll.peek()
if next == '(':
ll.pop()
# We are into the numbers.
_lex_ws(ll)
serial = _lex_word(ll)
if not serial:
# The serial must be on the next line
continue
if serial.isdigit():
return serial
else:
return ''
def _lex_word(ll):
word = ''
while True:
# Read in name
c = ll.pop()
if c is None:
if word:
return word
else:
return None
if re.match('\s', c):
ll.unpop()
break
else:
word = word + c
return word
def _lex_ws(ll):
while True:
# Read in name
c = ll.pop()
if c is None:
return
if re.match('\s', c):
continue
else:
ll.unpop()
break
return
class LexLine(object):
def __init__(self, line):
self.line = line
self.length = len(line)
self.pos = 0
def pop(self):
if self.pos == self.length:
return None
else:
c = self.line[self.pos]
self.pos += 1
return c
def unpop(self):
if self.pos > 0:
self.pos -= 1
def peek(self):
return self.line[self.pos]
| [
"uberj@onid.orst.edu"
] | uberj@onid.orst.edu |
ec31feb5138d44c92d5c755e9f0d84a7bf08bd59 | 04c824bb23b3c0ee378a5e915ab9467d5a4d4de7 | /metasub_utils/metadata/metasub_utils/metadata/metadata.py | 6ae55ec9f9c539cf553f957044cf8576e4db09f8 | [
"MIT"
] | permissive | MetaSUB/metasub_utils | a37a8eb79fabd4a922617744e91c9e3e6df2b2f8 | c52c5dde816d710db5ac8dc6f8804bb795a992e4 | refs/heads/master | 2023-01-06T11:14:20.095512 | 2020-02-24T14:23:08 | 2020-02-24T14:23:08 | 143,024,096 | 9 | 2 | MIT | 2022-12-26T20:44:28 | 2018-07-31T14:16:42 | Python | UTF-8 | Python | false | false | 1,540 | py | """Functions for handling metadata."""
import pandas as pd
from .constants import UPLOADABLE_TABLE_URL, COMPLETE_TABLE_URL, CANONICAL_CITIES_URL, IDS
def normalize_sample_name(name_in, default=None, tbl=None):
tbl = get_complete_metadata() if tbl is None else tbl
for id_type in IDS:
mytbl = tbl.query(f'{id_type} == "{name_in}"')
if mytbl.shape[0]:
return list(mytbl.index)[0]
return default
def get_complete_metadata(uploadable=False):
"""Return the complete metadata file as a pandas dataframe."""
if uploadable:
return pd.read_csv(UPLOADABLE_TABLE_URL, dtype=str, index_col=0)
return pd.read_csv(COMPLETE_TABLE_URL, dtype=str, index_col=0)
def get_canonical_city_names(lower=False):
"""Return a set of canonical city names."""
city_tbl = pd.read_csv(CANONICAL_CITIES_URL, dtype=str)
city_names = set(city_tbl.ix[:, 0])
if lower:
city_names = {city_name.lower() for city_name in city_names}
return city_names
def get_samples_from_city(city_name, project_name=None):
"""Return a list of sample names from a particular city.
If city_name is False return a list with all sample names.
"""
metadata = get_complete_metadata()
filtered = metadata
if city_name:
city_name = city_name.lower()
filtered = filtered[filtered['city'] == city_name]
if project_name:
project_name = project_name.upper()
filtered = filtered[filtered['project'] == project_name]
return list(filtered.index)
| [
"dcdanko@gmail.com"
] | dcdanko@gmail.com |
439a07cf5fa1351ea4f1de0562d17edc972dd626 | 31e8b777b8b6da1ef8d172d2c7b5271a892e7dc9 | /frappe/website/doctype/blog_settings/test_blog_settings.py | b7659d58a4901a70c3bcc5a8d2260f7f8053950c | [
"MIT"
] | permissive | Anurag810/frappe | a4d2f6f3a14cc600cced7146a02303cd1cb347f0 | 620cad18d60f090f5f9c13a5eefb56e86615de06 | refs/heads/develop | 2021-09-28T03:57:02.456172 | 2021-09-07T06:05:46 | 2021-09-07T06:05:46 | 157,325,015 | 5 | 0 | MIT | 2019-09-11T09:20:20 | 2018-11-13T05:25:01 | Python | UTF-8 | Python | false | false | 193 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and Contributors
# License: MIT. See LICENSE
# import frappe
import unittest
class TestBlogSettings(unittest.TestCase):
pass
| [
"scm.mymail@gmail.com"
] | scm.mymail@gmail.com |
5e370b0535189e817d1e7ce86559aebc8a85e11f | cc0e5613f1532e9922269530057970eb4f320a1a | /tests/test_concurrency/test_mainloopscheduler/py3_asyncioscheduler.py | 096256bf95b6a49bfd0616a63e23d81f3913478b | [
"Apache-2.0"
] | permissive | Huskyeder/RxPY | 9e353e20f850ce8e031bacafa91187ff2d0d83e4 | 8060b9ef79d2fe6654c0265860af6e8829524131 | refs/heads/master | 2021-01-15T10:18:31.831559 | 2015-04-15T04:34:05 | 2015-04-15T04:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,028 | py | try:
import asyncio
except ImportError:
raise SkipTest("asyncio not available")
import unittest
from datetime import datetime, timedelta
from time import sleep
from rx.concurrency import AsyncIOScheduler
class TestAsyncIOScheduler(unittest.TestCase):
def test_asyncio_schedule_now(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOScheduler(loop)
res = scheduler.now() - datetime.now()
assert(res < timedelta(seconds=1))
def test_asyncio_schedule_action(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
yield from asyncio.sleep(0.1, loop=loop)
assert(ran == True)
loop.run_until_complete(go())
def test_asyncio_schedule_action_due(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
scheduler.schedule_relative(0.2, action)
yield from asyncio.sleep(0.3, loop=loop)
diff = endtime-starttime
assert(diff > 0.18)
loop.run_until_complete(go())
def test_asyncio_schedule_action_cancel(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
ran = False
scheduler = AsyncIOScheduler(loop)
def action(scheduler, state):
nonlocal ran
ran = True
d = scheduler.schedule_relative(0.01, action)
d.dispose()
yield from asyncio.sleep(0.1, loop=loop)
assert(not ran)
loop.run_until_complete(go())
| [
"dag@brattli.net"
] | dag@brattli.net |
c537aa38ea4535afc9f71643e2aa07bf33963b72 | c67831f476cb530fc0c26e0bf4258ce18e986749 | /module_intent/control/serializers.py | 8f7e50f0f76fbee43f2d8511cb68a59bfd354ccf | [
"MIT"
] | permissive | cz-qq/bk-chatbot | a3ce4b86452b3de0ff35430c1c85b91d6b23a3e6 | da37fb2197142eae32158cdb5c2b658100133fff | refs/heads/master | 2023-06-05T05:48:22.083008 | 2021-06-15T10:21:30 | 2021-06-15T10:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,846 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.fields import JSONField
from common.constants import CHAT_BOT_TYPES
from common.constants import TASK_PLATFORM_CHOICES
from module_intent.models import Bot
from module_intent.models import ExecutionLog
from module_intent.models import Intent
from module_intent.models import Task
from module_intent.models import Utterances
class BotSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
biz_name = serializers.CharField(required=True, label=_("业务名称"))
bot_id = serializers.CharField(required=True, label=_("业务名称"))
bot_name = serializers.CharField(required=True, label=_("业务名称"))
bot_type = serializers.ChoiceField(
required=True,
label=_("业务名称"),
choices=CHAT_BOT_TYPES,
)
class Meta:
model = Bot
fields = (
"id",
"biz_id",
"biz_name",
"bot_id",
"bot_name",
"bot_type",
"created_by",
"created_at",
"updated_at",
)
class IntentSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
index_id = serializers.IntegerField(required=True, label=_("索引ID"))
intent_name = serializers.CharField(required=True, label=_("技能名称"))
status = serializers.BooleanField(required=True, label=_("意图状态"))
available_user = JSONField(required=True, label=_("可执行用户"))
available_group = JSONField(required=True, label=_("可执行群组"))
is_commit = serializers.BooleanField(required=True, label=_("执行确认"))
class Meta:
model = Intent
fields = "__all__"
class UtterancesSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
index_id = serializers.IntegerField(required=True, label=_("索引ID"))
content = JSONField(required=True, label=_("语料列表"))
class Meta:
model = Utterances
fields = "__all__"
class TaskSerializer(serializers.ModelSerializer):
biz_id = serializers.IntegerField(required=True, label=_("业务ID"))
index_id = serializers.IntegerField(required=True, label=_("索引ID"))
platform = serializers.ChoiceField(
required=True,
label=_("平台名称"),
choices=TASK_PLATFORM_CHOICES,
)
task_id = serializers.CharField(required=True, label=_("任务ID"))
activities = JSONField(required=True, label=_("节点信息"))
slots = JSONField(required=True, label=_("槽位信息"))
source = JSONField(required=True, label=_("任务元数据"))
script = JSONField(required=True, label=_("执行脚本信息"))
class Meta:
model = Task
fields = "__all__"
class ExecutionLogSerializer(serializers.ModelSerializer):
class Meta:
model = ExecutionLog
fields = "__all__"
| [
"123@qq.com"
] | 123@qq.com |
8c8f5f6ce7c4d6b47f77b25521a3c866fb059012 | e74e89592d8a3b1a0b465a7b1595708b224362d2 | /pset_classes/dogs/p4.py | 8919df926c4f0fbbe8a11a8e4b7b3ff001f07552 | [
"MIT"
] | permissive | mottaquikarim/pydev-psets | 016f60f1e9d9a534bd9a66ecde8eb412beee37d1 | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | refs/heads/master | 2023-01-10T11:15:57.041287 | 2021-06-07T23:38:34 | 2021-06-07T23:38:34 | 178,547,933 | 5 | 2 | MIT | 2023-01-03T22:28:27 | 2019-03-30T11:09:08 | Jupyter Notebook | UTF-8 | Python | false | false | 638 | py | """
Dogs IV - Tricks (CHALLENGE!)
"""
# Many dogs know how to do common tricks or follow common commands. You could create methods for each trick/command in the Dog parent class, but the problem is that not all dogs know all tricks/commands.
# However, it would be inefficient to define a custom set of instance methods for tricks/commands every time you instantiate a unique Collie (or SiberianHuskey or Pekingese etc.).
# Find an efficient way to specify which tricks each unique dog knows and to call them. You can use "roll_over", "fetch", "shake_hands", and "spin". Secondly, find a way to teach a dog new trick from this set.
| [
"jgarreffa112@gmail.com"
] | jgarreffa112@gmail.com |
ebfbac8ad5f1f89f5043471096bbff8170a8ac5e | 13556b5ff9d000b707e089f0c1be5451f20fe3fb | /stocks/settings.py | 1dec11126b357d8893492c6129e22229600f1195 | [] | no_license | LeoKnox/Django_Stock | 35a693bb1765e95a6c32b0d8ce622f226bd25ae8 | ac0e93af2be3047d1212909a3587c59b4be81dca | refs/heads/master | 2022-12-08T12:34:34.588551 | 2020-08-30T22:26:35 | 2020-08-30T22:26:35 | 290,670,535 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,295 | py | """
Django settings for DjangoStock project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4c$zue)#x8%bhz3)6+5z$1roh%=nj0h7c0+h61yx4t1gl(vv_c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'stocks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoStock.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DjangoStock.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
] | [
"noreply@github.com"
] | LeoKnox.noreply@github.com |
b89ba760610de6bfde2b410bd653af63fb1cb307 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/charging_moment_ref_structure.py | 50e10eb3a30889d5ce8a8ee36e95e4a2106992f6 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 229 | py | from dataclasses import dataclass
from .type_of_value_ref_structure import TypeOfValueRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class ChargingMomentRefStructure(TypeOfValueRefStructure):
pass
| [
"chris@komposta.net"
] | chris@komposta.net |
44f9bbd957a4f18db099ffbe535fdb1922751935 | 2af94f8a7609d47fdcea28a2132c4f8bacb103e3 | /src/devices/device_base.py | e3c07e88e1cde6d8dc945a5a253097d7bee18fbd | [] | no_license | bernhara/DigiGateway4Raph | 685527723f0b306f387233c78d27fe9d78717c38 | f36ba29ef883d70f94b8609ff734b5dcde786c66 | refs/heads/master | 2020-07-05T19:56:27.027547 | 2019-08-19T06:10:46 | 2019-08-19T06:10:46 | 202,756,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,192 | py | ############################################################################
# #
# Copyright (c)2008-2012, Digi International (Digi). All Rights Reserved. #
# #
# Permission to use, copy, modify, and distribute this software and its #
# documentation, without fee and without a signed licensing agreement, is #
# hereby granted, provided that the software is used on Digi products only #
# and that the software contain this copyright notice, and the following #
# two paragraphs appear in all copies, modifications, and distributions as #
# well. Contact Product Management, Digi International, Inc., 11001 Bren #
# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #
# opportunities for non-Digi products. #
# #
# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #
# PROVIDED HEREUNDER IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND. #
# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #
# ENHANCEMENTS, OR MODIFICATIONS. #
# #
# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #
# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #
# #
############################################################################
# imports
from core.tracing import get_tracer
from settings.settings_base import SettingsBase, Setting
from channels.channel_source_device_property import ChannelSourceDeviceProperty
import traceback
# constants
# exception classes
class DeviceBasePropertyNotFound(KeyError):
pass
# interface functions
# classes
class DeviceBase(SettingsBase):
"""
Base class that any device driver must derive from.
The :class:`DeviceBase` class is extended in order to create new
DIA device drivers. :class:`DeviceBase` defines several properties
and methods for use in DIA devices including a name for the
device, a set of property channels that can be populated with
information about the device as well as the methods for
interacting with those channels, and virtual *start* and *stop*
methods that must be implemented in each driver.
Parameters:
* *name*: the name of the device
* *settings*: configures device settings. Used to initialize
:class:`~settings.settings_base.SettingsBase`
* *core_services*: The system
:class:`~core.core_services.CoreServices` object.
"""
DEF_TRACE = '' # None - no change
def __init__(self, name, core_services, settings, properties):
# save these for use of sub-classed device drivers
self._name = name
self._core = core_services
self._tracer = get_tracer(name)
## local variables
# These are to be used by 'health monitoring' functions - all drivers
# should correctly manage these (or leave set to None to mark as N/A)
#
# use self.get_time_of_last_data() and
# self.set_time_of_last_data() to access!
self.__last_data_timestamp = None
# use self.get_data_update_rate_seconds() and
# self.set_data_update_rate_seconds() to access!
self.__data_update_rate = None
# cache the channel DB reference
self._channel_db = None
# Initialize settings:
## Settings Table Definition:
settings_list = [
Setting(
name='trace', type=str, required=False,
default_value=self.DEF_TRACE),
]
# Add our settings_list entries into the settings passed to us.
settings = self.merge_settings(settings, settings_list)
self.__settings = settings
SettingsBase.__init__(self, binding=("devices", (name,), "settings"),
setting_defs=settings)
# Initialize properties:
self.__properties = { }
if properties is not None:
for property in properties:
self.add_property(property)
# pre_start - check if special trace level requested
trace = SettingsBase.get_setting(self, "trace")
try:
self._tracer.set_level(trace)
except:
self._tracer.warning("Ignoring bad trace level \'%s\' for this device", trace)
self._tracer.calls("DeviceBase.__init__()")
# def __del__(self):
# channel_db = \
# self._core.get_service("channel_manager").channel_database_get()
# # Walk the pending registry, if this device is in there, remove it.
# try:
# for tmp in self._settings_global_pending_registry['devices']['instance_list']:
# if tmp['name'] == self._name:
# try:
def apply_settings(self):
"""\
Called when new configuration settings are available.
Must return tuple of three dictionaries: a dictionary of
accepted settings, a dictionary of rejected settings,
and a dictionary of required settings that were not found.
"""
self._tracer.calls("DeviceBase.apply_settings()")
SettingsBase.merge_settings(self)
accepted, rejected, not_found = SettingsBase.verify_settings(self)
if len(rejected) or len(not_found):
# there were problems with settings, terminate early:
return (accepted, rejected, not_found)
SettingsBase.commit_settings(self, accepted)
return (accepted, rejected, not_found)
def start(self):
"""
Start the device driver. Returns bool.
"""
self._tracer.calls("DeviceBase.start()")
return True
def pre_start(self):
"""
Call at start of start (normal DeviceBase.start called at end bool.
"""
self._tracer.calls("DeviceBase.pre_start()")
return True
def stop(self):
"""
Stop the device driver. Returns bool.
"""
self._tracer.calls("DeviceBase.stop()")
self.__settings = None
self.__properties = None
self._name = None
self._core = None
self._channel_db = None
## leave self._tracer, deleting here is problematic during shutdown
return True
## These functions are inherited by derived classes and need not be changed:
def get_core_services(self):
"""
Returns the core_services handle registered for this device
"""
return self._core
def get_name(self):
"""
Returns the name of the device.
"""
return self._name
def get_channel_database(self):
"""
Cache and return the name of the channel database.
"""
if self._channel_db is None:
# cache the channel DB reference
self._channel_db = \
self._core.get_service("channel_manager").channel_database_get()
return self._channel_db
def __get_property_channel(self, name):
"""
Returns channel designated by property *name*.
"""
channel_db = self.get_channel_database()
channel_db.channel_get(self._name + '.' + name)
if name not in self.__properties:
raise DeviceBasePropertyNotFound, \
"channel device property '%s' not found." % (name)
return self.__properties[name]
def add_property(self, channel_source_device_property):
"""
Adds a channel to the set of device properties.
"""
channel_db = self.get_channel_database()
channel_name = "%s.%s" % \
(self._name, channel_source_device_property.name)
channel = channel_db.channel_add(
channel_name,
channel_source_device_property)
self.__properties[channel_source_device_property.name] = channel
return channel
def property_get(self, name):
"""
Returns the current :class:`~samples.sample.Sample` specified
by *name* from the devices property list.
"""
channel = self.__get_property_channel(name)
return channel.producer_get()
def property_set(self, name, sample):
"""
Sets property specified by the string *name* to the
:class:`~samples.sample.Sample` object *sample* and returns
that value.
"""
channel = self.__get_property_channel(name)
return channel.producer_set(sample)
def property_exists(self, name):
"""
Determines if a property specified by *name* exists.
"""
if name in self.__properties:
return True
return False
def property_list(self):
"""
Returns a list of all properties for the device.
"""
return [name for name in self.__properties]
def remove_all_properties(self):
"""
Removes all properties from the set of device properties.
"""
channel_db = self.get_channel_database()
for chan in self.__properties:
channel_name = "%s.%s" % (self._name, chan)
chan_obj = channel_db.channel_remove(channel_name)
if chan_obj:
del chan_obj
self.__properties = { }
def remove_one_property(self, chan):
"""
Removes one named property from the set of device properties.
"""
channel_db = self.get_channel_database()
channel_name = "%s.%s" % (self._name, chan)
try:
chan_obj = channel_db.channel_remove(channel_name)
if chan_obj:
del chan_obj
self.__properties.pop(chan)
except:
self._tracer.debug(traceback.format_exc())
pass
return
def get_time_of_last_data(self):
"""Get the time of last data update, in time.time() format.
Return is None if the device does not support
"""
return self.__last_data_timestamp
def set_time_of_last_data(self, t=None):
"""Update the time of last data update
"""
if t is None:
t = digitime.time()
self.__last_data_timestamp = t
def get_data_update_rate_seconds(self):
"""Get the expected data refresh rate (in seconds). This
is used by various routines to monotor device health.
Return is None if the device does not support
"""
return self.__data_update_rate
def set_data_update_rate_seconds(self, rate):
"""Update the expected data refresh rate.
"""
self.__data_update_rate = rate
def merge_settings(self, orig, addin):
# safely add-in settings to those from derived classes
#
# NOTE: If a setting with the same name is found,
# save the original and discard the new/add-in one
if orig is None or len(orig) == 0:
# then there are no original-class settings
return addin
for add1 in addin:
# for each new setting
use = True
for orig1 in orig:
# compare to those from original/derived class
if orig1.name == add1.name:
# then ignore new setting, use original/derived classes
try:
self._tracer.warning("Discard Duplicate Setting: %s", add1.name)
except AttributeError:
# may not yet be initialized
pass
use = False
break
if use: # else append new setting to derived classes
orig.append(add1)
return orig
def merge_properties(self, orig, addin):
# safely add-in properties to those from from derived classes
if orig is None or len(orig) == 0:
# then there are no original/derived-class settings
orig = addin
else:
orig.extend(addin)
return orig
# internal functions & classes
| [
"ORBA6563@S-ORBA65630.rd.francetelecom.fr"
] | ORBA6563@S-ORBA65630.rd.francetelecom.fr |
d63b165689c63f2123a8b40265384ae8db8134c2 | c43a113f55687ccb38591e42ce729b6de87cc244 | /every_election/apps/elections/migrations/0046_update_status.py | 365892e62950a09e62dd7ba56d668dc5994cafe3 | [] | permissive | DemocracyClub/EveryElection | 9428d00bf725e02c21acd60c7125f6704fcf998a | cbcedc2b236a9287c8272f2596aae3f7a03cf45c | refs/heads/master | 2023-08-23T23:41:13.664059 | 2023-08-21T15:35:03 | 2023-08-21T15:35:03 | 70,236,537 | 11 | 12 | BSD-3-Clause | 2023-09-07T07:54:19 | 2016-10-07T10:22:20 | Python | UTF-8 | Python | false | false | 546 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("elections", "0045_auto_20181001_1437")]
operations = [
# assume all elections that already exist are approved
migrations.RunSQL(
"""
UPDATE elections_election SET suggested_status='approved'
""",
reverse_sql="""
UPDATE elections_election SET suggested_status='suggested'
""",
)
]
| [
"chris.shaw480@gmail.com"
] | chris.shaw480@gmail.com |
069b58680800dd92bb6b403794b834cb87edf8c4 | ec59f44931527284f872de0f313499046cbb602e | /source-builder/sb/bootstrap.py | e56612bb9fb30bf754f7b75c0a1f0bbc18cc7857 | [
"ISC"
] | permissive | KinseyMoore/rtems-source-builder | 2c4e8d8f5677eafc4a26010763ea0f288cf40bd2 | 9c825f0b9a4eff4f87d22e12d3c94072712c3918 | refs/heads/master | 2020-04-30T09:34:56.236727 | 2019-03-14T18:19:28 | 2019-03-14T18:44:36 | 176,752,287 | 0 | 0 | NOASSERTION | 2019-03-20T14:30:36 | 2019-03-20T14:30:36 | null | UTF-8 | Python | false | false | 9,651 | py | #
# RTEMS Tools Project (http://www.rtems.org/)
# Copyright 2013-2016 Chris Johns (chrisj@rtems.org)
# All rights reserved.
#
# This file is part of the RTEMS Tools package in 'rtems-tools'.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from __future__ import print_function
import datetime
import operator
import os
import re
import sys
import threading
import time
import error
import log
import options
import path
import version
def _collect(path_, file):
confs = []
for root, dirs, files in os.walk(path.host(path_), topdown = True):
for f in files:
if f == file:
confs += [path.shell(path.join(root, f))]
return confs
def _grep(file, pattern):
rege = re.compile(pattern)
try:
f = open(path.host(file), 'r')
matches = [rege.match(l) != None for l in f.readlines()]
f.close()
except IOError as err:
raise error.general('reading: %s' % (file))
return True in matches
class command:
def __init__(self, cmd, cwd):
self.exit_code = 0
self.thread = None
self.output = None
self.cmd = cmd
self.cwd = cwd
self.result = None
def runner(self):
import subprocess
#
# Support Python 2.6
#
if "check_output" not in dir(subprocess):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
self.start_time = datetime.datetime.now()
self.exit_code = 0
try:
try:
if os.name == 'nt':
cmd = ['sh', '-c'] + self.cmd
else:
cmd = self.cmd
self.output = subprocess.check_output(cmd, cwd = path.host(self.cwd))
except subprocess.CalledProcessError as cpe:
self.exit_code = cpe.returncode
self.output = cpe.output
except OSError as ose:
raise error.general('bootstrap failed: %s in %s: %s' % \
(' '.join(cmd), path.host(self.cwd), (str(ose))))
except KeyboardInterrupt:
pass
except:
raise
except:
self.result = sys.exc_info()
self.end_time = datetime.datetime.now()
def run(self):
self.thread = threading.Thread(target = self.runner)
self.thread.start()
def is_alive(self):
return self.thread and self.thread.is_alive()
def reraise(self):
if self.result is not None:
raise self.result[0](self.result[1]).with_traceback(self.result[2])
class autoreconf:
def __init__(self, topdir, configure):
self.topdir = topdir
self.configure = configure
self.cwd = path.dirname(self.configure)
self.command = command(['autoreconf', '-i', '--no-recursive'], self.cwd)
self.command.run()
def is_alive(self):
return self.command.is_alive()
def post_process(self):
if self.command is not None:
self.command.reraise()
if self.command.exit_code != 0:
raise error.general('error: autoreconf: %s' % (' '.join(self.command.cmd)))
makefile = path.join(self.cwd, 'Makefile.am')
if path.exists(makefile):
if _grep(makefile, 'stamp-h\.in'):
stamp_h = path.join(self.cwd, 'stamp-h.in')
try:
t = open(path.host(stamp_h), 'w')
t.write('timestamp')
t.close()
except IOError as err:
raise error.general('writing: %s' % (stamp_h))
def generate(topdir, jobs):
if type(jobs) is str:
jobs = int(jobs)
start_time = datetime.datetime.now()
confs = _collect(topdir, 'configure.ac')
next = 0
autoreconfs = []
while next < len(confs) or len(autoreconfs) > 0:
if next < len(confs) and len(autoreconfs) < jobs:
log.notice('%3d/%3d: autoreconf: %s' % \
(next + 1, len(confs), confs[next][len(topdir) + 1:]))
autoreconfs += [autoreconf(topdir, confs[next])]
next += 1
else:
for ac in autoreconfs:
if not ac.is_alive():
ac.post_process()
autoreconfs.remove(ac)
del ac
if len(autoreconfs) >= jobs:
time.sleep(1)
end_time = datetime.datetime.now()
log.notice('Bootstrap time: %s' % (str(end_time - start_time)))
class ampolish3:
def __init__(self, topdir, makefile):
self.topdir = topdir
self.makefile = makefile
self.preinstall = path.join(path.dirname(makefile), 'preinstall.am')
self.command = command([path.join(topdir, 'ampolish3'), makefile], self.topdir)
self.command.run()
def is_alive(self):
return self.command.is_alive()
def post_process(self):
if self.command is not None:
if self.command.exit_code != 0:
raise error.general('error: ampolish3: %s' % (' '.join(self.command.cmd)))
try:
p = open(path.host(self.preinstall), 'w')
for l in self.command.output:
p.write(l)
p.close()
except IOError as err:
raise error.general('writing: %s' % (self.preinstall))
def preinstall(topdir, jobs):
if type(jobs) is str:
jobs = int(jobs)
start_time = datetime.datetime.now()
makes = []
for am in _collect(topdir, 'Makefile.am'):
if _grep(am, 'include .*/preinstall\.am'):
makes += [am]
next = 0
ampolish3s = []
while next < len(makes) or len(ampolish3s) > 0:
if next < len(makes) and len(ampolish3s) < jobs:
log.notice('%3d/%3d: ampolish3: %s' % \
(next + 1, len(makes), makes[next][len(topdir) + 1:]))
ampolish3s += [ampolish3(topdir, makes[next])]
next += 1
else:
for ap in ampolish3s:
if not ap.is_alive():
ap.post_process()
ampolish3s.remove(ap)
del ap
if len(ampolish3s) >= jobs:
time.sleep(1)
end_time = datetime.datetime.now()
log.notice('Preinstall time: %s' % (str(end_time - start_time)))
def run(args):
try:
#
# On Windows MSYS2 prepends a path to itself to the environment
# path. This means the RTEMS specific automake is not found and which
# breaks the bootstrap. We need to remove the prepended path. Also
# remove any ACLOCAL paths from the environment.
#
if os.name == 'nt':
cspath = os.environ['PATH'].split(os.pathsep)
if 'msys' in cspath[0] and cspath[0].endswith('bin'):
os.environ['PATH'] = os.pathsep.join(cspath[1:])
if 'ACLOCAL_PATH' in os.environ:
#
# The clear fails on a current MSYS2 python (Feb 2016). Delete
# the entry if the clear fails.
#
try:
os.environ['ACLOCAL_PATH'].clear()
except:
del os.environ['ACLOCAL_PATH']
optargs = { '--rtems': 'The RTEMS source directory',
'--preinstall': 'Preinstall AM generation' }
log.notice('RTEMS Source Builder - RTEMS Bootstrap, %s' % (version.str()))
opts = options.load(sys.argv, optargs, logfile = False)
if opts.get_arg('--rtems'):
topdir = opts.get_arg('--rtems')
else:
topdir = os.getcwd()
if opts.get_arg('--preinstall'):
preinstall(topdir, opts.jobs(opts.defaults['_ncpus']))
else:
generate(topdir, opts.jobs(opts.defaults['_ncpus']))
except error.general as gerr:
print(gerr)
print('Bootstrap FAILED', file = sys.stderr)
sys.exit(1)
except error.internal as ierr:
print(ierr)
print('Bootstrap FAILED', file = sys.stderr)
sys.exit(1)
except error.exit as eerr:
pass
except KeyboardInterrupt:
log.notice('abort: user terminated')
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
run(sys.argv)
| [
"chrisj@rtems.org"
] | chrisj@rtems.org |
7059aebf40a5fdc792be9c88d86ab6e4b8bd4650 | 55d560fe6678a3edc9232ef14de8fafd7b7ece12 | /tools/build/test/resolution.py | 3af66b46daf757644cc94491ecadab6b821e9fce | [
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | stardog-union/boost | ec3abeeef1b45389228df031bf25b470d3d123c5 | caa4a540db892caa92e5346e0094c63dea51cbfb | refs/heads/stardog/develop | 2021-06-25T02:15:10.697006 | 2020-11-17T19:50:35 | 2020-11-17T19:50:35 | 148,681,713 | 0 | 0 | BSL-1.0 | 2020-11-17T19:50:36 | 2018-09-13T18:38:54 | C++ | UTF-8 | Python | false | false | 924 | py | #!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests for the target id resolution process.
import BoostBuild
# Create a temporary working directory.
t = BoostBuild.Tester(use_test_config=False)
# Create the needed files
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
# This should use the 'hello' target, even if there is a 'hello' file in the
# current dir.
install s : hello : <location>. ;
""")
t.write("hello.cpp", "int main() {}\n")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/hello.obj")
t.touch("hello.cpp")
t.run_build_system(["s"])
# If 'hello' in the 's' target resolved to file in the current dir, nothing
# will be rebuilt.
t.expect_touch("bin/$toolset/debug*/hello.obj")
t.cleanup()
| [
"james.pack@stardog.com"
] | james.pack@stardog.com |
d6d3f58daf810bd24bdb5ca0a4ad0a20d4a1425f | 00b405a49ac6108d24986243c4b52fa53fb58acc | /0591_tag_validator.py | fb66855422bdf55713952f38d524a0a35966598e | [] | no_license | Shin-jay7/LeetCode | 0325983fff95bfbc43a528812582cbf9b7c0c2f2 | 953b0b19764744753f01c661da969bdab6521504 | refs/heads/master | 2023-07-19T07:17:21.513531 | 2023-07-15T06:05:06 | 2023-07-15T06:05:06 | 231,285,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,969 | py | from __future__ import annotations
import re
class Solution:
def isValid(self, code: str) -> bool:
code = re.sub(r'<!\[CDATA\[.*?\]\]>|t', '-', code)
prev = None
while code != prev:
prev = code
code = re.sub(r'<([A-Z]{1,9})>[^<]*</\1>', 't', code)
return code == 't'
class Solution:
def isValid(self, code: str) -> bool:
# state_machine = ["plain", "open", "close", "cdata"]
curr = "plain"
stack, open_tag, close_tag = [], [], []
idx = 0
while idx < len(code):
char = code[idx]
if curr == "plain":
if not stack and idx != 0:
# code is not in a closed tage
return False
if code[idx:idx+9] == "<![CDATA[":
curr = "cdata"
idx += 9
continue
elif code[idx:idx+2] == '</':
curr = 'close'
idx += 2
continue
elif char == '<':
curr = "open"
elif curr == "open":
if char == '>':
if len(open_tag) > 9 or len(open_tag) < 1:
# open tag name length not valid
return False
stack.append("".join(open_tag))
open_tag = []
curr = 'plain'
idx += 1
continue
if not char.isupper():
# open tag is not upper
return False
open_tag.append(char)
elif curr == 'close':
if char == '>':
if len(close_tag) > 9 or len(close_tag) < 1:
# close tag name length not valid
return False
close_tag_str = "".join(close_tag)
if not stack or close_tag_str != stack[-1]:
# tag no match
return False
else:
stack.pop()
close_tag = []
curr = 'plain'
idx += 1
continue
if not char.isupper():
# close tag is not upper
return False
close_tag.append(char)
elif curr == "cdata":
if code[idx:idx+3] == ']]>':
idx += 3
curr = "plain"
continue
idx += 1
if stack or curr != "plain":
return False
return True
test = Solution()
test.isValid("<DIV>This is the first line <![CDATA[<div>]]></DIV>") # True
# test = Solution()
# test.isValid("<DIV>>> ![cdata[]] <![CDATA[<div>]>]]>]]>>]</DIV>") # True
# test = Solution()
# test.isValid("<A> <B> </A> </B>") # False
| [
"shin@jay7.net"
] | shin@jay7.net |
0928bcf7842c1df47ae48a1e23aa21d7bdac7f51 | afb16c3188bf06af65ae0d998e114c72342bd8be | /note/demo/pydantic_demo/dict2model.py | 5ab2e3aeae2f1224a3cf061a5f8cff325e4a6eb9 | [] | no_license | onsunsl/onsunsl.github.io | aa75f399f1c647bc2e62314633bfe35187e59ad4 | 4ed2b1b9a2407afcbffdf304020d42b81c4c8cdc | refs/heads/master | 2023-05-26T12:33:11.167270 | 2023-04-01T10:18:05 | 2023-04-01T10:18:05 | 237,595,319 | 1 | 0 | null | 2023-05-23T20:13:11 | 2020-02-01T10:02:58 | Python | UTF-8 | Python | false | false | 503 | py | from typing import List
import pydantic
from note.demo.pydantic_demo.my_list import GenericList
class A(pydantic.BaseModel):
a: str
class ListA(GenericList[A]):
pass
class B(pydantic.BaseModel):
a1: List[A] = pydantic.Field(default_factory=ListA)
a2: ListA = pydantic.Field(default_factory=ListA)
b: str
b = B.parse_obj(dict(b="123",
a1=[dict(a="aa1"), dict(a="aa2")],
a2=[dict(a="aa1"), dict(a="aa2")]))
print(b)
print(b.dict())
| [
"onsunsl@foxmail.com"
] | onsunsl@foxmail.com |
918f9042356ae2bdde7f3ab106d057dac1da5860 | b47b530ced544ec4180b2e8ddc8d3bff4b8b97ba | /141/e.py | b50432cbaefa1e3817ff064c5f2575cc055a9294 | [] | no_license | shionhonda/AtCoder | 1069f272490c45d60945a86392642d434a44ee52 | afd254d569505ee38ba3307d0e0e7437fca40814 | refs/heads/master | 2020-03-28T08:00:20.937794 | 2019-11-16T14:30:26 | 2019-11-16T14:30:26 | 147,939,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | N = int(input())
S = input()
def z_algorithm(s):
l = len(s)
A = [0]*l
A[0] = 0
j = 0
for i in range(1,l):
while i+j<l and s[j]==s[i+j]:
j += 1
if j<1:
continue
A[i] = j
k = 1
while k < l-i and k < j-A[k]:
A[i+k] = A[k]
k += 1
i += k
j -= k
#print(A, s)
return A
def main():
ans = 0
for i in range(0, N):
tmp = max(z_algorithm(S[i:]))
#print(tmp)
ans = max(ans, tmp)
print(ans)
main()
| [
"26x.orc.ed5.1hs@gmail.com"
] | 26x.orc.ed5.1hs@gmail.com |
b6c5e83d08aa1892f5291581dd3e2a97d2f4a9e1 | fa4b2b4ce915b4e58737f65efe7d18d1f45cbe27 | /accounts/admin.py | 602d8c011ad760f251464f14af4f49a61ca8a121 | [] | no_license | Wishez/cosmeticsyou-v2.0 | 0fde09158944415b2471cb07dcf1e2cd1df85923 | a0f6a1b11622cb36a5084781ad35f4eed2778f66 | refs/heads/master | 2022-12-26T12:47:53.693887 | 2020-10-12T20:22:30 | 2020-10-12T20:27:54 | 293,092,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | # -*- coding: utf-8 -*-
from django.contrib import admin
from myadmin.admin import admin_site
from django.contrib.admin import DateFieldListFilter
from .models import *
from rangefilter.filter import DateRangeFilter
class ConsultantAdmin(admin.ModelAdmin):
list_per_page = 10
list_display = ('last_name', 'first_name', 'consultant_num', 'phone_number', 'email', 'status', 'refferal_url', 'url_to_personal_room',)
# date_hierarchy = 'last_name'
list_filter = ('status', 'last_name', 'first_name', 'middle_name', 'citizenship', 'city', 'region', ('registered_date', DateRangeFilter),)
filter_horizontal = ('user_lead', 'user_lead_1', 'user_lead_2',)
search_fields = (
'last_name',
'first_name',
'middle_name',
'city',
'region',
'consultant_num',
'passport_data',
'birthday',
'street',
'num_home',
'num_apartment',
'email',
'phone_number',
'user_led',
'user_led_1',
'user_led_2',
)
fieldsets = (
('Персональные данные', {
'fields': (
('email',),
('last_name',),
('first_name',),
('middle_name',),
# ('passport_data',),
('birthday',),
('phone_number',),
('citizenship',),
),
},),
('Адрес', {
'fields': (
('region',),
('city',),
('street',),
('num_home',),
('num_apartment',),
),
},),
('Технические данные', {
'fields': (
('consultant_num', 'status',),
),
},),
('Рферальные данные', {
'fields': (
('refferal_url','url_to_personal_room',),
('user_led', 'user_led_1', 'user_led_2',),
),
},),
('Списки рефералов консультанта', {
'fields': (
('user_lead',), ('user_lead_1',), ('user_lead_2',),
),
},),
)
class RelatedConsultantAdmin(admin.ModelAdmin):
list_per_page = 10
list_display = ('last_name', 'first_name', 'middle_name', 'consultant_num', 'refferal_url', 'url_to_personal_room', 'email',)
list_filter = ('last_name', 'first_name', 'middle_name', 'consultant_num', ('registered_date', DateRangeFilter))
filter_horizontal = ('user_lead', 'user_lead_1', 'user_lead_2',)
search_fields = (
'last_name',
'first_name',
'middle_name',
'email',
'user_led',
'user_led_1',
'user_led_2',
)
fieldsets = (
('Персональная данные', {
'fields': (
('email',),
('last_name',),
('first_name',),
),
},),
('Технические данные', {
'fields': (
('consultant_num', 'status',),
),
},),
('Рферальные данные', {
'fields': (
('refferal_url', 'url_to_personal_room',),
('user_led', 'user_led_1', 'user_led_2',),
),
},),
('Списки рефералов консультанта', {
'fields': (
('user_lead',), ('user_lead_1',), ('user_lead_2',),
),
},),
)
# Register your models here.
admin_site.register(User, ConsultantAdmin)
admin_site.register(RefferalConsultant, ConsultantAdmin)
admin_site.register(RelatedConsultant, RelatedConsultantAdmin) | [
"shiningfinger@list.ru"
] | shiningfinger@list.ru |
b0672f06262344af3f8c8023edd3a85cf64b28f9 | 185f30795be9a8fec6539fe17753fb909e258e4c | /ljy_03函数/ljy_sum1.py | bf763d93261c09589c8b2f04f0bb367167e231b2 | [] | no_license | OPBrother/LearningPython | bd375430ce013abd9a4279f60e5f9457e965bdf7 | 9d264acb269a6191f7ec49abba25c98002f4fcd1 | refs/heads/main | 2023-03-31T06:47:43.071370 | 2021-04-12T07:09:16 | 2021-04-12T07:09:16 | 350,307,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | def sum_num(num1=1, num2=2):
# global num1
# global num2
"""
简简单单的函数加法
:param num1:
:param num2:
:return:
"""
return num1 + num2
reslut = sum_num()
print('%d' % (reslut))
| [
"2276720277@qq.com"
] | 2276720277@qq.com |
889924e6e4c0a39b0451cb1799c08c5073a212ac | 48832d27da16256ee62c364add45f21b968ee669 | /res_bw/scripts/common/lib/encodings/utf_32.py | e64e23103e1b600efc009acbda30ea9639b13392 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,801 | py | # 2016.08.04 19:59:25 Střední Evropa (letní čas)
# Embedded file name: scripts/common/Lib/encodings/utf_32.py
"""
Python 'utf-32' Codec
"""
import codecs, sys
encode = codecs.utf_32_encode
def decode(input, errors = 'strict'):
return codecs.utf_32_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors = 'strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
return
def encode(self, input, final = False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
return
def getstate(self):
if self.encoder is None:
return 2
else:
return 0
def setstate(self, state):
if state:
self.encoder = None
elif sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors = 'strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
return
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
output, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError('UTF-32 stream does not start with BOM')
return (output, consumed)
else:
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
return
def getstate(self):
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
if self.decoder is None:
return (state, 2)
else:
addstate = int((sys.byteorder == 'big') != (self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = codecs.utf_32_be_decode if sys.byteorder == 'big' else codecs.utf_32_le_decode
elif state == 1:
self.decoder = codecs.utf_32_le_decode if sys.byteorder == 'big' else codecs.utf_32_be_decode
else:
self.decoder = None
return
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors = 'strict'):
self.encoder = None
codecs.StreamWriter.__init__(self, stream, errors)
return
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
return
def encode(self, input, errors = 'strict'):
if self.encoder is None:
result = codecs.utf_32_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
else:
return self.encoder(input, errors)
return
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors = 'strict'):
object, consumed, byteorder = codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError, 'UTF-32 stream does not start with BOM'
return (object, consumed)
def getregentry():
return codecs.CodecInfo(name='utf-32', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\encodings\utf_32.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:59:25 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
fd4490a8aea18addd987c1bc80c11bdd94fd8553 | 9c2ba4f1a2d75b1916e6f20fa95c5fb32d0497d9 | /ScrapingWithPython2/code/myScrapy/todayMovie/todayMovie/spiders/wuHanMovieSpider.py | 1ea663d1da0a6cb675492cc3b984b1b928fedbd1 | [] | no_license | PowerDG/DgCoreInit | abe4b15e38b730c25424f71e6927db982af27a72 | 84e6b7833ddc083b90fcc172c3812dd6f8b51e3d | refs/heads/master | 2023-07-19T11:58:09.220460 | 2019-06-07T14:43:24 | 2019-06-07T14:43:24 | 163,091,619 | 0 | 1 | null | 2023-07-06T21:20:15 | 2018-12-25T14:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,203 | py | # -*- coding: utf-8 -*-
import scrapy
import re
import sys
import os
from todayMovie.items import TodaymovieItem
class WuhanmoviespiderSpider(scrapy.Spider):
name = 'wuHanMovieSpider'
allowed_domains = ['mtime.com']
# start_urls = ['http://mtime.com/']
# def parse(self, response):
# pass
start_urls = ['http://theater.mtime.com/China_Hubei_Province_Wuhan_Wuchang/4316/']
# 武汉。。影院主页
def parse(self, response):
# response 请求返回的数据额
# 第四个body下的script标签
selector = response.xpath('/html/body/script[3]/text()')[0].extract()
# print(selector)
moviesStr = re.search('"movies":\[.*?\]', selector).group()
moviesList = re.findall('{.*?}', moviesStr)
items = []
for movie in moviesList:
mDic = eval(movie)
item = TodaymovieItem()
item['movieTitleCn'] = mDic.get('movieTitleCn')
item['movieTitleEn'] = mDic.get('movieTitleEn')
item['director'] = mDic.get('director')
item['runtime'] = mDic.get('runtime')
items.append(item)
# print(items.count())
return items
| [
"1049365046@qq.com"
] | 1049365046@qq.com |
292fd601726ed3c66db99db9c9ace19128c64869 | c50fb310d8c52284be2c636f951de796eededae9 | /63.py | 190f91e0abdb366118d5410628ca971438ae7a7f | [] | no_license | Deepakdk7/Playerset3 | 6f46f638f22d894b9cc93d81b27c221f9dcdaad3 | 636e1feed0f97bbc9e9495a5dbb81a512ed980c5 | refs/heads/master | 2020-06-03T07:35:23.203780 | 2019-08-06T08:56:16 | 2019-08-06T08:56:16 | 191,497,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | ax=int(input())
c=[]
a=list(map(int,input().split()))
b=list(map(int,input().split()))
for i in range(0,ax):
for j in range(0,ax):
if a[i]==b[j] and a[i] not in c:
c.append(a[i])
for i in c:
print(i,"",end="")
| [
"noreply@github.com"
] | Deepakdk7.noreply@github.com |
9e32b28bf39bcc7331dbe33a356913015282a6c3 | 40b420e982ecb0a22b44888c192f531bb5fc91fa | /studies/models/trial.py | 60679c75fdbe6107390f652eb15caad34bb2c449 | [] | no_license | davidmcclure/field-poetics | 06d64f3b3db1dee1d86e799868fbf7df3c4479a9 | edd25f998a4253dedc1d88d8159ad92b3b5a6b40 | refs/heads/master | 2020-05-30T07:17:36.802749 | 2016-10-18T22:24:59 | 2016-10-18T22:24:59 | 70,184,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py |
from django.db import models
class Trial(models.Model):
study = models.ForeignKey('Study')
| [
"davidwilliammcclure@gmail.com"
] | davidwilliammcclure@gmail.com |
48070dfb8e7b5182733e50c6e889c4a56c5e1a2f | ed269e9a4d9d6bfbb833381b7aef65a23f391fe2 | /比赛/1438. 绝对差不超过限制的最长连续子数组.py | 0793980db1ebafedb598454840907d61b8b7e099 | [] | no_license | Comyn-Echo/leeCode | fcff0d4c4c10209a47bd7c3204e3f64565674c91 | 67e9daecb7ffd8f7bcb2f120ad892498b1219327 | refs/heads/master | 2023-04-28T17:35:52.963069 | 2021-05-19T01:52:16 | 2021-05-19T01:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | class Solution(object):
def longestSubarray(self, nums, limit):
if not nums:
return 0
curr_max = nums[0] # 当子数组下最大值 这里初始化为第一个数
curr_min = nums[0] # 当子数组下最大值 这里初始化为第一个数
sub_nums = [] # 以数组作为窗口滑动
for num in nums:
if abs(num - curr_max) <= limit and abs(num - curr_min) <= limit and abs(curr_max - curr_min) <= limit:
curr_max = max(num,curr_max)
curr_min = min(num,curr_min)
sub_nums.append(num)
else:
sub_nums.append(num)
sub_nums.pop(0)
curr_max = max(sub_nums) # 当子数组最大值
curr_min = min(sub_nums) # 当前子数组最小值
return len(sub_nums)
Solution.longestSubarray(None,nums = [8,2,4,7], limit = 4) | [
"2892211452aa@gmail.com"
] | 2892211452aa@gmail.com |
070cddb15ffb012a7b728cd8e739baf89d2f0b4b | a99a44aee5cfc5e080f6d83d2bcc1c3d273a3426 | /scripts/ingestors/rwis/process_traffic.py | 1a9456b736a38d76f8f227d31d692573117d3685 | [
"MIT"
] | permissive | ragesah/iem | 1513929c8bc7f254048271d61b4c4cf27a5731d7 | 8ed970d426bddeaa3e7ded593665d22f0f9f6e87 | refs/heads/main | 2023-08-20T20:01:15.480833 | 2021-10-12T15:44:52 | 2021-10-12T15:44:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,657 | py | """Ingest Iowa DOT RWIS data provided by DTN.
called from RUN_10_AFTER.sh
"""
import datetime
from pyiem.network import Table as NetworkTable
from pyiem.util import get_properties, get_dbconn, utc, logger
import pandas as pd
import requests
LOG = logger()
DBCONN = get_dbconn("iem")
NT = NetworkTable("IA_RWIS")
def load_metadata():
"""Load up what we know about these traffic sites."""
meta = {}
cur = DBCONN.cursor()
cur.execute(
"SELECT location_id, lane_id, sensor_id from rwis_traffic_meta"
)
rows = cur.fetchall()
cur.close()
for row in rows:
key = f"{row[0]}_{row[1]}"
meta[key] = row[2]
return meta
def create_sensor(cursor, key, row, meta):
"""create an entry."""
cursor.execute(
"INSERT into rwis_traffic_sensors(location_id, lane_id, name) "
"VALUES (%s, %s, %s) RETURNING id",
(
row["stationId"].replace("IA", ""),
row["sensorId"],
row["sensorName"],
),
)
sensor_id = cursor.fetchone()[0]
LOG.info(
"Adding RWIS Traffic Sensor: %s Lane: %s Name: %s DB_SENSOR_ID: %s",
row["stationId"],
row["sensorId"],
row["sensorName"],
sensor_id,
)
meta[key] = sensor_id
cursor.execute(
"INSERT into rwis_traffic_data(sensor_id) VALUES (%s)", (sensor_id,)
)
def process(cursor, df, meta):
"""Process our data."""
rows = []
for _, row in df.iterrows():
data = dict(row)
if "stationId" not in data:
LOG.info("hit data quirk with row %s", row)
continue
key = f"{int(row['stationId'].replace('IA', ''))}_{row['sensorId']}"
if key not in meta:
create_sensor(cursor, key, row, meta)
data["sensor_id"] = meta[key]
rows.append(data)
# 'volume',
# 'occupancy',
# 'normalLength', 'longLength', 'unclassifiedLength', 'qcFailures'
cursor.executemany(
"UPDATE rwis_traffic_data SET valid = %(utcTime)s, "
"avg_speed = %(avgSpeed)s, normal_vol = %(normalLength)s, "
"long_vol = %(longLength)s, occupancy = %(occupancy)s "
"WHERE sensor_id = %(sensor_id)s and valid < %(utcTime)s",
rows,
)
def main():
"""Go Main Go."""
# prevent a clock drift issue
ets = utc() - datetime.timedelta(minutes=1)
sts = ets - datetime.timedelta(hours=4)
edate = ets.strftime("%Y-%m-%dT%H:%M:%SZ")
sdate = sts.strftime("%Y-%m-%dT%H:%M:%SZ")
meta = load_metadata()
props = get_properties()
apikey = props["dtn.apikey"]
headers = {"accept": "application/json", "apikey": apikey}
for nwsli in NT.sts:
idot_id = NT.sts[nwsli]["remote_id"]
if idot_id is None:
continue
URI = (
f"https://api.dtn.com/weather/stations/IA{idot_id:03}/"
f"traffic-observations?startDate={sdate}"
f"&endDate={edate}&units=us&precision=0"
)
req = requests.get(URI, timeout=60, headers=headers)
if req.status_code != 200:
# HACK
if idot_id < 73:
LOG.info("Fetch %s got status_code %s", URI, req.status_code)
continue
res = req.json()
if not res:
continue
try:
df = pd.DataFrame(res)
except Exception as exp:
LOG.info(
"DataFrame construction failed with %s\n res: %s", exp, res
)
continue
cursor = DBCONN.cursor()
process(cursor, df, meta)
cursor.close()
DBCONN.commit()
if __name__ == "__main__":
main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
206383854ad7ed2c4a4c906be142b4f1f5a53f0a | 2b8c88dfee5c5a784357515eafe8cd5f997c8774 | /learn_ppdai/learn_sqlite.py | 5117e136ead3ef8863fa6945912a78c800468fea | [] | no_license | archenRen/learnpy | e060f3aa2f77c35fc1b12345720af6c8b528da57 | 934ef76b97297f746a722a48c76672c7bc744cd9 | refs/heads/master | 2022-04-28T20:25:59.114036 | 2020-05-03T02:16:03 | 2020-05-03T02:16:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | import pandas as pd
from sqlalchemy import create_engine
import json
import sys
if sys.platform == 'win32':
path = r'C:\Users\wangdi03\Downloads\history.db'
else:
path = './history.db'
engine = create_engine('sqlite:///' + path)
df = pd.read_sql_query(
"""
select * from usermmvs
""",
engine
)
for var in df['variablejson']:
dic = json.loads(var)
print dic['userid'], dic['pc_credit_edu']
print('ok')
| [
"wangdi03@ppdai.com"
] | wangdi03@ppdai.com |
fc294056e5978d3fb4a4b61abe28a0ba09e92183 | 2d82d4c6574bd6d32f2cf1c781615f7951f55f66 | /muntjac/demo/sampler/features/windows/SubwindowModal.py | 17e9f816cadf2ee1565cb1e6c0cf44512e3c4d13 | [
"Apache-2.0"
] | permissive | metaperl/muntjac | f83f745ee03942a61af92ee7fba7285aa9c46f3c | 8db97712edd81b4d25deaaa48587d2a08010f2c8 | refs/heads/master | 2021-01-15T22:04:25.057862 | 2012-11-09T03:52:59 | 2012-11-09T03:52:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py |
from muntjac.demo.sampler.NamedExternalResource import NamedExternalResource
from muntjac.demo.sampler.APIResource import APIResource
from muntjac.demo.sampler.Feature import Feature, Version
from muntjac.ui.window import Window
class SubwindowModal(Feature):
def getSinceVersion(self):
return Version.OLD
def getName(self):
return 'Modal window'
def getDescription(self):
return ('A <i>modal window</i> blocks access to the rest of the '
'application until the window is closed (or made non-modal).<br/>'
'Use modal windows when the user must finish the task in the '
'window before continuing.')
def getRelatedAPI(self):
return [APIResource(Window)]
def getRelatedFeatures(self):
from muntjac.demo.sampler.features.windows.Subwindow import Subwindow
from muntjac.demo.sampler.FeatureSet import Windows
return [Subwindow, Windows]
def getRelatedResources(self):
return [NamedExternalResource('Wikipedia: Modal window',
'http://en.wikipedia.org/wiki/Modal_window')]
| [
"r.w.lincoln@gmail.com"
] | r.w.lincoln@gmail.com |
4197cdda503cccf0d608f684b3a945810598daaa | 8e2b2aa7d7405ed351072874d75e947619379cdb | /src/billing/migrations/0006_charge.py | 0f9995f46654d802c83ca1ba6337ad9b30e6248d | [] | no_license | hoanguyen-rozer/learn-django-ecommerce--2hand | f0bc11b4331fae6d060e24c29a5293170342ff2b | b7c67e6a5703edfe922d519f576d7d87f16a7dba | refs/heads/master | 2022-06-14T10:13:17.800193 | 2020-05-05T09:49:17 | 2020-05-05T09:49:17 | 256,304,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | # Generated by Django 3.0.5 on 2020-04-25 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('billing', '0005_card_default'),
]
operations = [
migrations.CreateModel(
name='Charge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stripe_id', models.CharField(max_length=120)),
('paid', models.BooleanField(default=False)),
('refunded', models.BooleanField(default=False)),
('outcome', models.TextField(blank=True, null=True)),
('outcome_type', models.CharField(blank=True, max_length=120, null=True)),
('seller_message', models.CharField(blank=True, max_length=120, null=True)),
('risk_level', models.CharField(blank=True, max_length=120, null=True)),
('billing_profile', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='billing.BillingProfile')),
],
),
]
| [
"phuchoa099@gmail.com"
] | phuchoa099@gmail.com |
2794a116155b8d4c376b9759d4623ae07de36c4b | 14a1312dfb7c4d5e2b76f49b0837cc024f5a1295 | /python/gate/background/digester.py | dd893310ce429b07bb65d21afd9209bd468b288b | [] | no_license | bropony/gamit | b3a493c55407efa83ae20286b1e624b280b46494 | 47811e2cfe67c3c0de4c4be7394dd30e48732799 | refs/heads/master | 2020-05-17T01:51:38.887194 | 2015-11-05T12:57:13 | 2015-11-05T12:57:13 | 36,106,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | """
@author: mahanzhou
@date: 8/6/15
@file:
@desc:
"""
from gamit.log.logger import Logger
from gamit.utils.myuuid import MyUuid
from gamit.mongodb.database import MongoDatabase
from message.db.systemcommand import ESysCommandType
from message.db.mongodb.posttables import TSysTopic
import json
from social.systopicmanager import SysTopicManager
class __SystemCommandDigester:
def digest(self, systemCommand):
"""
:type systemCommand: message.db.mongodb.utiltables.TSystemCommand
:rtype: bool
"""
operStatus = False
if systemCommand.commandType == ESysCommandType.AddSysTopic:
operStatus = self.__addSysTopic(systemCommand)
elif systemCommand.commandType == ESysCommandType.AddCommercialAd:
pass
else:
Logger.logInfo("__SystemCommandDigester.digest: undigested command type:", systemCommand.commandType)
return operStatus
def __addSysTopic(self, scmd):
"""
:type scmd: message.db.mongodb.utiltables.TSystemCommand
:rtype: bool
"""
jsTopic = json.loads(scmd.stringVal, "UTF8")
tsysTopic = TSysTopic()
tsysTopic._fromJson(jsTopic)
tsysTopic.topicId = MyUuid.getUuid()
tb = MongoDatabase.findTableByMessageType(TSysTopic)
if not tb:
Logger.logInfo("__SystemCommandDigester.__addSysTopic. Table not found: ", TSysTopic.__name__)
return
tb.save(tsysTopic)
SysTopicManager.addNewSysTopics([tsysTopic])
return True
SystemCommandDigester = __SystemCommandDigester()
| [
"ahda@qq.com"
] | ahda@qq.com |
e8a2cf0901af06fe8256b304d58206bea59f42a6 | 1e8142725aa06844713d18fa38c6779aff8f8171 | /tndata_backend/goals/migrations/0047_auto_20150531_1646.py | 252f0df2ecd3c85d51b521c3169888dc1e1bf6e7 | [
"MIT"
] | permissive | tndatacommons/tndata_backend | 8f4db3e5cf5272901c9087a85e21d7560240bb3b | 3d22179c581ab3da18900483930d5ecc0a5fca73 | refs/heads/master | 2020-12-03T07:53:17.339769 | 2017-03-27T06:18:58 | 2017-03-27T06:18:58 | 68,407,220 | 1 | 2 | null | 2017-03-27T06:18:59 | 2016-09-16T18:59:16 | Python | UTF-8 | Python | false | false | 472 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('goals', '0046_behaviorprogress_goalprogress'),
]
operations = [
migrations.AlterField(
model_name='behaviorprogress',
name='status',
field=models.IntegerField(choices=[(1, 'Off Course'), (2, 'Seeking'), (3, 'On Course')]),
),
]
| [
"brad@bradmontgomery.net"
] | brad@bradmontgomery.net |
40bd087b309f78f9719a323572d870a3bf279dc9 | d860a2c1fa8fffc76a9101e4f91cecc80c27e802 | /leetcode/859_Buddy_Strings.py | 9d27eb3c71441e478d0179fbd66d1bd7781c8c35 | [] | no_license | heroming/algorithm | 80ea8f00ac049b0bc815140253568484e49c39e3 | 18e510f02bff92bc45cceb7090a79fbd40c209ec | refs/heads/master | 2021-01-19T01:27:31.676356 | 2019-06-09T08:51:16 | 2019-06-09T08:51:16 | 62,952,889 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | class Solution(object):
def buddyStrings(self, s, t) :
if len(s) != len(t) :
return False
dif = []
for i in xrange(len(s)) :
if s[i] != t[i] :
if len(dif) >= 2 :
return False
dif.append(i)
if len(dif) == 2 :
a, b = dif[0], dif[1]
return s[a] == t[b] and s[b] == t[a]
elif len(dif) == 0 :
dic = {}
for c in s :
if c in dic :
return True
else :
dic[c] = True
return False
else :
return False
| [
"heroming7788@gmail.com"
] | heroming7788@gmail.com |
7d750b7fca20b099d974eacf28cd9a5fa0f30070 | 2dbadf8d7c26b3dda69328229b60df160b69f917 | /nyuv2/weighted_histogram_matching/evaluate_midas.py | b8574abf164de94708ddbf88b99af815a8f4927c | [] | no_license | computational-imaging/spad_single | a17c31d0564a16f08f4768dcc27c064272a5f70d | 54e18e26a6f3c33837da032063e8cf9cc287569e | refs/heads/master | 2022-11-18T08:32:37.513981 | 2020-07-19T04:44:56 | 2020-07-19T04:44:56 | 152,368,443 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,489 | py | import os
import numpy as np
import torch
import pandas as pd
from sacred import Experiment
from weighted_histogram_matching import image_histogram_match, image_histogram_match_variable_bin
from models.data.data_utils.sid_utils import SID
from spad_utils import rescale_bins
from remove_dc_from_spad import remove_dc_from_spad_poisson, remove_dc_from_spad_ambient_estimate, remove_dc_from_spad_edge
from nyuv2_labeled_dataset import nyuv2_labeled_ingredient, load_data
from models.loss import get_depth_metrics
ex = Experiment("midas_weighted_hist_match", ingredients=[nyuv2_labeled_ingredient])
@ex.config
def cfg(data_config):
data_dir = "data"
dataset_type = "test"
use_intensity = True
use_squared_falloff = True
lambertian = True
dc_count = 1e5
use_jitter = True
use_poisson = True
hyper_string = "{}_int_{}_fall_{}_lamb_{}_dc_{}_jit_{}_poiss_{}".format(
dataset_type,
use_intensity,
use_squared_falloff,
lambertian,
dc_count,
use_jitter,
use_poisson)
# spad_file = os.path.join(data_dir, "{}_spad{}.npy".format(hyper_string,
# "_denoised" if dc_count > 0. and use_poisson else ""))
spad_file = os.path.join(data_dir, "{}_spad.npy".format(hyper_string))
midas_depth_file = os.path.join(data_dir, "midas_{}_outputs.npy".format(dataset_type))
# SID params
sid_bins = 140
# alpha = 0.6569154266167957
# beta = 9.972175646365525
alpha = 0.1
beta = 10.
offset = 0
# SPAD Denoising params
lam = 1e1 if use_poisson else 1e-1
eps_rel = 1e-5
n_std = 0.5
entry = None
save_outputs = True
small_run = 0
output_dir = "results"
@ex.automain
def run(dataset_type,
spad_file,
midas_depth_file,
hyper_string,
sid_bins, alpha, beta, offset,
lam, eps_rel, n_std,
entry, save_outputs, small_run, output_dir):
# Load all the data:
print("Loading SPAD data from {}".format(spad_file))
spad_dict = np.load(spad_file, allow_pickle=True).item()
spad_data = spad_dict["spad"]
intensity_data = spad_dict["intensity"]
spad_config = spad_dict["config"]
print("Loading depth data from {}".format(midas_depth_file))
depth_data = np.load(midas_depth_file)
dataset = load_data(channels_first=True, dataset_type=dataset_type)
# Read SPAD config and determine proper course of action
dc_count = spad_config["dc_count"]
ambient = spad_config["dc_count"]/spad_config["spad_bins"]
use_intensity = spad_config["use_intensity"]
use_squared_falloff = spad_config["use_squared_falloff"]
lambertian = spad_config["lambertian"]
use_poisson = spad_config["use_poisson"]
min_depth = spad_config["min_depth"]
max_depth = spad_config["max_depth"]
print("ambient: ", ambient)
print("dc_count: ", dc_count)
print("use_intensity: ", use_intensity)
print("use_squared_falloff:", use_squared_falloff)
print("lambertian:", lambertian)
print("ambient")
print("spad_data.shape", spad_data.shape)
print("depth_data.shape", depth_data.shape)
print("intensity_data.shape", intensity_data.shape)
sid_obj_init = SID(sid_bins, alpha, beta, offset)
if entry is None:
metric_list = ["delta1", "delta2", "delta3", "rel_abs_diff", "rmse", "mse", "log10", "weight"]
metrics = np.zeros((len(dataset) if not small_run else small_run, len(metric_list)))
entry_list = []
outputs = []
for i in range(depth_data.shape[0]):
if small_run and i == small_run:
break
entry_list.append(i)
print("Evaluating {}[{}]".format(dataset_type, i))
spad = spad_data[i,...]
# spad = preprocess_spad_ambient_estimate(spad, min_depth, max_depth,
# correct_falloff=use_squared_falloff,
# remove_dc= dc_count > 0.,
# global_min_depth=np.min(depth_data),
# n_std=1. if use_poisson else 0.01)
# Rescale SPAD_data
# spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
weights = np.ones_like(depth_data[i, 0, ...])
if use_intensity:
weights = intensity_data[i, 0, ...]
# spad_rescaled = preprocess_spad_sid_gmm(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.)
if dc_count > 0.:
# spad_rescaled = remove_dc_from_spad(spad_rescaled,
# sid_obj.sid_bin_edges,
# sid_obj.sid_bin_values[:-2]**2,
# lam=1e1 if spad_config["use_poisson"] else 1e-1,
# eps_rel=1e-5)
# spad_rescaled = remove_dc_from_spad_poisson(spad_rescaled,
# sid_obj.sid_bin_edges,
# lam=lam)
spad = remove_dc_from_spad_edge(spad,
ambient=ambient,
grad_th=5*np.sqrt(2*ambient))
# print(spad[:10])
# print(spad)
bin_edges = np.linspace(min_depth, max_depth, len(spad) + 1)
bin_values = (bin_edges[1:] + bin_edges[:-1])/2
if use_squared_falloff:
if lambertian:
spad = spad * bin_values ** 4
else:
spad = spad * bin_values ** 2
# Scale SID object to maximize bin utilization
nonzeros = np.nonzero(spad)[0]
if nonzeros.size > 0:
min_depth_bin = np.min(nonzeros)
max_depth_bin = np.max(nonzeros) + 1
if max_depth_bin > len(bin_edges) - 2:
max_depth_bin = len(bin_edges) - 2
else:
min_depth_bin = 0
max_depth_bin = len(bin_edges) - 2
min_depth_pred = np.clip(bin_edges[min_depth_bin], a_min=1e-2, a_max=None)
max_depth_pred = np.clip(bin_edges[max_depth_bin+1], a_min=1e-2, a_max=None)
sid_obj_pred = SID(sid_bins=sid_obj_init.sid_bins,
alpha=min_depth_pred,
beta=max_depth_pred,
offset=0.)
spad_rescaled = rescale_bins(spad[min_depth_bin:max_depth_bin+1],
min_depth_pred, max_depth_pred, sid_obj_pred)
pred, t = image_histogram_match_variable_bin(depth_data[i, 0, ...], spad_rescaled, weights,
sid_obj_init, sid_obj_pred)
# break
# Calculate metrics
gt = dataset[i]["depth_cropped"].unsqueeze(0)
# print(gt.dtype)
# print(pred.shape)
# print(pred[20:30, 20:30])
pred_metrics = get_depth_metrics(torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(),
gt,
torch.ones_like(gt))
for j, metric_name in enumerate(metric_list[:-1]):
metrics[i, j] = pred_metrics[metric_name]
metrics[i, -1] = np.size(pred)
# Option to save outputs:
if save_outputs:
outputs.append(pred)
print("\tAvg RMSE = {}".format(np.mean(metrics[:i+1, metric_list.index("rmse")])))
if save_outputs:
np.save(os.path.join(output_dir, "midas_{}_outputs.npy".format(hyper_string)), np.array(outputs))
# Save metrics using pandas
metrics_df = pd.DataFrame(data=metrics, index=entry_list, columns=metric_list)
metrics_df.to_pickle(path=os.path.join(output_dir, "midas_{}_metrics.pkl".format(hyper_string)))
# Compute weighted averages:
average_metrics = np.average(metrics_df.ix[:, :-1], weights=metrics_df.weight, axis=0)
average_df = pd.Series(data=average_metrics, index=metric_list[:-1])
average_df.to_csv(os.path.join(output_dir, "midas_{}_avg_metrics.csv".format(hyper_string)), header=True)
print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('d1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
print(
"{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(average_metrics[0],
average_metrics[1],
average_metrics[2],
average_metrics[3],
average_metrics[4],
average_metrics[6]))
print("wrote results to {} ({})".format(output_dir, hyper_string))
else:
input_unbatched = dataset.get_item_by_id(entry)
# for key in ["rgb", "albedo", "rawdepth", "spad", "mask", "rawdepth_orig", "mask_orig", "albedo_orig"]:
# input_[key] = input_[key].unsqueeze(0)
from torch.utils.data._utils.collate import default_collate
data = default_collate([input_unbatched])
# Checks
entry = data["entry"][0]
i = int(entry)
entry = entry if isinstance(entry, str) else entry.item()
print("Evaluating {}[{}]".format(dataset_type, i))
# Rescale SPAD
spad = spad_data[i, ...]
spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
print("spad_rescaled", spad_rescaled)
weights = np.ones_like(depth_data[i, 0, ...])
if use_intensity:
weights = intensity_data[i, 0, ...]
# spad_rescaled = preprocess_spad_sid_gmm(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.)
# spad_rescaled = preprocess_spad_sid(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.
# )
if dc_count > 0.:
spad_rescaled = remove_dc_from_spad(spad_rescaled,
sid_obj.sid_bin_edges,
sid_obj.sid_bin_values[:-2] ** 2,
lam=1e1 if use_poisson else 1e-1,
eps_rel=1e-5)
if use_squared_falloff:
spad_rescaled = spad_rescaled * sid_obj.sid_bin_values[:-2] ** 2
# print(spad_rescaled)
pred, _ = image_histogram_match(depth_data[i, 0, ...], spad_rescaled, weights, sid_obj)
# break
# Calculate metrics
gt = data["depth_cropped"]
print(gt.shape)
print(pred.shape)
print(gt[:,:,40, 60])
print(depth_data[i,0,40,60])
print("before rmse: ", np.sqrt(np.mean((gt.numpy() - depth_data[i,0,...])**2)))
before_metrics = get_depth_metrics(torch.from_numpy(depth_data[i,0,...]).unsqueeze(0).unsqueeze(0).float(),
gt,
torch.ones_like(gt))
pred_metrics = get_depth_metrics(torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(),
gt,
torch.ones_like(gt))
if save_outputs:
np.save(os.path.join(output_dir, "midas_{}[{}]_{}_out.npy".format(dataset_type, entry, hyper_string)),
pred)
print("before:")
print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('d1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
print(
"{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(before_metrics["delta1"],
before_metrics["delta2"],
before_metrics["delta3"],
before_metrics["rel_abs_diff"],
before_metrics["rmse"],
before_metrics["log10"]))
print("after:")
print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('d1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
print(
"{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(pred_metrics["delta1"],
pred_metrics["delta2"],
pred_metrics["delta3"],
pred_metrics["rel_abs_diff"],
pred_metrics["rmse"],
pred_metrics["log10"]))
| [
"nishimuramarky@yahoo.com"
] | nishimuramarky@yahoo.com |
8bbde69a27066cd21cc393c9b6560021a483176e | 99249dad36df26a712ae8d900041d53acf3901ea | /test/ps_delay_test-3.3.py | cb4c41fb0c53441f4683cf1318517a72829878b5 | [
"MIT"
] | permissive | bopopescu/Lauecollect | f1f79c2cc5ff106df0dedbd6939ec92630d2b305 | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | refs/heads/master | 2022-11-29T00:40:28.384831 | 2019-06-05T01:21:36 | 2019-06-05T01:21:36 | 280,989,300 | 0 | 0 | MIT | 2020-07-20T02:03:22 | 2020-07-20T02:03:22 | null | UTF-8 | Python | false | false | 2,748 | py | """Delay line linearity characterization
Friedrich Schotte, Jul 22, 2015 - Apr 20, 2015
Setup:
Ramsay-100B RF Generator, 351.93398 MHz +10 dBm -> FPGA RF IN
FPGA 1: X-scope trig -> CH1, DC50, 500 mV/div
FPGA 13: ps L oscill -> DC block -> 90-MHz low-pass -> CH2, DC50, 500 mV/div
Timebase 5 ns/div
Measurement P1 CH2, time@level, Absolute, 0, Slope Pos, Gate Start 4.5 div,
Stop 5.5 div
Waitting time: 97.8 ms
"""
__version__ = "3.3"
from instrumentation import timing_system,timing_sequencer,lecroy_scope
from timing_sequence import lxd,Sequence
from scan import rscan,timescan as tscan
from sleep import sleep
from numpy import arange
delay = lecroy_scope().measurement(2)
tmax = timing_system.clk_shift.max_dial
nsteps = (timing_system.clk_shift.max_count+1)
class Clk_shift_count(object):
name = "clk_shift.count"
def get_value(self): return timing_system.clk_shift.count
def set_value(self,value): timing_system.clk_shift.count = value
value = property(get_value,set_value)
clk_shift_count = Clk_shift_count()
def scan():
delay = lecroy_scope().measurement(1)
tmax = 5*timing_system.bct
nsteps = tmax/timing_system.clk_shift.stepsize
lxd.value = 0
data = rscan([lxd,delay.gate.start,delay.gate.stop],0,[tmax,-tmax,-tmax],
nsteps,[clk_shift_count,delay],averaging_time=10.0,logfile="logfiles/scan.log")
def scan_delayline():
delay = lecroy_scope().measurement(2)
tmax = timing_system.clk_shift.max_dial
nsteps = tmax/timing_system.clk_shift.stepsize
timing_sequencer.running = False
timing_system.xosct.enable.count = 1
timing_system.clk_shift.dial = 0
data = rscan([timing_system.clk_shift,delay.gate.start,delay.gate.stop],
[0,0,0],[tmax,tmax,tmax],nsteps,[clk_shift_count,delay],
averaging_time=10.0,logfile="logfiles/scan_delayline.log")
def timescan():
data = tscan(delay,averaging_time=10.0,logfile="logfiles/timescan.log")
def register_counts():
trange = arange(0,tmax,tmax/50)
pso = [Sequence(ps_lxd=t).register_counts[1][16][0] for t in trange]
clk_shift = [Sequence(ps_lxd=t).register_counts[1][17][0] for t in trange]
return pso,clk_shift
def reset_dcm():
timing_system.clk_shift_reset.count = 1
sleep(0.2)
timing_system.clk_shift_reset.count = 0
def peridiocally_reset_dcm(wait_time=60):
while True:
try:
reset_dcm()
sleep(wait_time)
except KeyboardInterrupt:
timing_system.clk_shift_reset.count = 0
break
if __name__ == "__main__":
print('timing_system.ip_address = %r' % timing_system.ip_address)
print('lecroy_scope().ip_address = %r' % lecroy_scope().ip_address)
print('scan_delayline()')
print('scan()')
| [
"friedrich.schotte@gmail.com"
] | friedrich.schotte@gmail.com |
cb51a14333b36709a28fef261b38acdb99cd369c | da3b03b013c00450dff6686b02d71209b3c6a271 | /pannier/pannier/tests.py | d9125299877aacf20c5513ae83a56aa624289f2f | [] | no_license | domestique/pannier | cf425e30bb4dd1999169a2375b9e2b2da599a376 | b92d40cc3e9a2523962d39895e58b9e1f7f36ae2 | refs/heads/master | 2021-01-20T19:14:16.201440 | 2016-12-22T20:46:11 | 2016-12-22T20:46:11 | 63,989,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,270 | py | import os
from mock import patch
from django.core import mail
from django.test import TestCase, override_settings
from django.core.urlresolvers import reverse
from pannier import forms, models
CUR_DIR = os.path.dirname(os.path.realpath(__file__))
TEST_DIR = os.path.join(CUR_DIR, 'test_data')
class BaseCase(TestCase):
def assertStatusCode(self, response, status_code=200):
self.assertEqual(response.status_code, status_code)
class TestLeadModel(BaseCase):
def _create_lead(self, lead_details=None):
lead_details = lead_details if lead_details else {}
return models.Lead.create_lead(
first_name=lead_details.get('first_name', 'Klea'),
last_name=lead_details.get('last_name', 'Ridley'),
company_name=lead_details.get('company_name', 'Domestique Studios'),
domain_name=lead_details.get('domain_name', 'domestique'),
email_address=lead_details.get('email_address', 'support@domestiquestudios.com'),
phone_number=lead_details.get('phone_number', '123-123-1234'),
team_size=lead_details.get('team_size', '1-10'),
)
def test_full_name(self):
lead = self._create_lead()
self.assertEqual(lead.full_name, 'Klea Ridley')
def test__str(self):
lead = self._create_lead()
self.assertEqual(lead.__str__(), 'Lead: {}'.format(lead.full_name))
class TestPannierViews(BaseCase):
def test_lead_create_get(self):
response = self.client.get(reverse('lead-create'))
self.assertStatusCode(response, 200)
self.assertTrue(
isinstance(response.context['form'], forms.LeadForm),
)
self.assertTemplateUsed('base.html')
self.assertTemplateUsed('lead.html')
def test_lead_create(self):
response = self.client.post(reverse('lead-create'), {
'first_name': 'Pat',
'last_name': 'Patterson',
'company_name': 'Patty Cakes',
'domain_name': 'itspat',
'email_address': 'pat@pattycakes.com',
'phone_number': '321-321-4321',
'team_size': '10-30',
})
self.assertStatusCode(response, 302)
self.assertRedirects(response, reverse('thanks'))
lead = models.Lead.objects.get(first_name='Pat')
self.assertEqual(lead.last_name, 'Patterson')
self.assertEqual(lead.company_name, 'Patty Cakes')
self.assertEqual(lead.domain_name, 'itspat')
self.assertEqual(lead.email_address, 'pat@pattycakes.com')
self.assertEqual(lead.phone_number, '321-321-4321')
self.assertEqual(lead.team_size, '10-30')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'New Invite Signup!')
def test_lead_create_error(self):
response = self.client.post(reverse('lead-create'), {
'first_name': 'Pat',
'last_name': 'Patterson',
'company_name': 'Patty Cakes',
'domain_name': 'itspat',
'email_address': 'pat@pattycakes.com',
'phone_number': '321-321-4321',
'team_size': 'BAD TEAM SIZE',
})
self.assertStatusCode(response, 200)
self.assertTemplateUsed('base.html')
self.assertTemplateUsed('lead.html')
form = response.context['form']
self.assertEqual(
form.errors['team_size'],
['Select a valid choice. BAD TEAM SIZE is not one of the available choices.']
)
def test_thanks_get(self):
response = self.client.get(reverse('thanks'))
self.assertStatusCode(response, 200)
self.assertTemplateUsed('base.html')
self.assertTemplateUsed('thanks.html')
@override_settings(PANNIER_WORKSPACE='/home/workspace/')
@patch('pannier.views.call')
def test_docker_webhook(self, call_mock):
json_data = open(os.path.join(TEST_DIR, 'docker_hook.json')).read()
response = self.client.post(reverse('docker'), content_type='application/json', data=json_data)
self.assertStatusCode(response, 200)
call_mock.assert_called_with(
'./tag_new_version.sh', shell=True, cwd='/home/workspace/'
)
self.assertEqual(len(mail.outbox), 1)
| [
"f4nt@f4ntasmic.com"
] | f4nt@f4ntasmic.com |
44fed3a3ec344dc97f314b02c3c8e824d51ba32f | 4be5c172c84e04c35677f5a327ab0ba592849676 | /python/snippets/unpacking_args_kwargs.py | a7209cfedb745c049a9ad5658e6f9531d6481c2d | [] | no_license | niranjan-nagaraju/Development | 3a16b547b030182867b7a44ac96a878c14058016 | d193ae12863971ac48a5ec9c0b35bfdf53b473b5 | refs/heads/master | 2023-04-06T20:42:57.882882 | 2023-03-31T18:38:40 | 2023-03-31T18:38:40 | 889,620 | 9 | 2 | null | 2019-05-27T17:00:29 | 2010-09-05T15:58:46 | Python | UTF-8 | Python | false | false | 660 | py | #!/usr/local/bin/python3
'''
Test program to test *args and **kwargs unpacking in function calls
'''
def f1(a, b, c):
return a+b+c
def f2(a):
return a+10
def f3():
return 100
def handler(fn, *args, **kwargs):
return fn(*args, **kwargs)
if __name__ == '__main__':
assert(handler(f1, 1, 2, 3) == 6)
assert(handler(f2, 3) == 13)
assert(handler(f3) == 100)
t = (2,3,5)
assert((f1(*t) == 10))
p = [6,4,3]
assert(f1(*p) == 13)
assert(f2(*[1]) == 11)
assert(f3(*[]) == 100)
assert(handler(f1, c="3", b="2", a="1") == "123")
d = {'a': 1, 'c': 4, 'b': 2}
assert(handler(f1, **d) == 7)
x = {'a': 1}
assert(handler(f2, **x) == 11)
| [
"vinithepooh@gmail.com"
] | vinithepooh@gmail.com |
90f1114b75076faa067987410e79c0e2fca5b744 | 1fb87c2038ea178ab8b7d600da6a105ccd35b44a | /ucscsdk/mometa/gl/GlVlan.py | 6fade91aaf3af0da4d65621f4af627cdba227649 | [
"Apache-2.0"
] | permissive | hrupprecht/ucscsdk | 72fe255dfb2d68b620b52793eae38e4d1b1ed7e7 | 1a62184548300ad1071780a2519c60552f0a21a2 | refs/heads/master | 2020-09-28T23:30:23.074800 | 2019-12-17T08:20:28 | 2019-12-17T08:20:28 | 226,891,866 | 0 | 0 | NOASSERTION | 2019-12-09T14:32:53 | 2019-12-09T14:32:52 | null | UTF-8 | Python | false | false | 6,814 | py | """This module contains the general information for GlVlan ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class GlVlanConsts():
IF_ROLE_DIAG = "diag"
IF_ROLE_FCOE_NAS_STORAGE = "fcoe-nas-storage"
IF_ROLE_FCOE_STORAGE = "fcoe-storage"
IF_ROLE_FCOE_UPLINK = "fcoe-uplink"
IF_ROLE_MGMT = "mgmt"
IF_ROLE_MONITOR = "monitor"
IF_ROLE_NAS_STORAGE = "nas-storage"
IF_ROLE_NETWORK = "network"
IF_ROLE_NETWORK_FCOE_UPLINK = "network-fcoe-uplink"
IF_ROLE_SERVER = "server"
IF_ROLE_SERVICE = "service"
IF_ROLE_STORAGE = "storage"
IF_ROLE_UNKNOWN = "unknown"
OPER_STATE_CONFLICT = "Conflict"
OPER_STATE_CONFLICT_RESOLVED = "ConflictResolved"
OPER_STATE_FAILED_TO_GLOBALIZE = "FailedToGlobalize"
OPER_STATE_GLOBALIZED = "Globalized"
OPER_STATE_GLOBALIZING = "Globalizing"
OPER_STATE_NOT_CONFLICT = "NotConflict"
OPER_STATE_NOT_EVALUATED = "NotEvaluated"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
SHARING_COMMUNITY = "community"
SHARING_ISOLATED = "isolated"
SHARING_NONE = "none"
SHARING_PRIMARY = "primary"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
SWITCH_ID_MGMT = "mgmt"
class GlVlan(ManagedObject):
"""This is GlVlan class."""
consts = GlVlanConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("GlVlan", "glVlan", "vlan-[id]", VersionMeta.Version201b, "InputOutput", 0x3f, [], ["admin"], [u'glVnetInvHolder'], [u'messageEp'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201b, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"context_dn": MoPropertyMeta("context_dn", "contextDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_WRITE, 0x2, 0, 256, None, [], []),
"deploy_dn": MoPropertyMeta("deploy_dn", "deployDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"globalized_dn": MoPropertyMeta("globalized_dn", "globalizedDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version201b, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"if_role": MoPropertyMeta("if_role", "ifRole", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["diag", "fcoe-nas-storage", "fcoe-storage", "fcoe-uplink", "mgmt", "monitor", "nas-storage", "network", "network-fcoe-uplink", "server", "service", "storage", "unknown"], []),
"inv_dn": MoPropertyMeta("inv_dn", "invDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"mcast_policy_dn": MoPropertyMeta("mcast_policy_dn", "mcastPolicyDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["Conflict", "ConflictResolved", "FailedToGlobalize", "Globalized", "Globalizing", "NotConflict", "NotEvaluated"], []),
"policy_class_name": MoPropertyMeta("policy_class_name", "policyClassName", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"pub_nw_dn": MoPropertyMeta("pub_nw_dn", "pubNwDn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sharing": MoPropertyMeta("sharing", "sharing", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["community", "isolated", "none", "primary"], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201b, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, ["A", "B", "NONE", "mgmt"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
"vnet_id": MoPropertyMeta("vnet_id", "vnetId", "uint", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["1-4093"]),
}
prop_map = {
"childAction": "child_action",
"contextDn": "context_dn",
"deployDn": "deploy_dn",
"dn": "dn",
"globalizedDn": "globalized_dn",
"id": "id",
"ifRole": "if_role",
"invDn": "inv_dn",
"mcastPolicyDn": "mcast_policy_dn",
"name": "name",
"operState": "oper_state",
"policyClassName": "policy_class_name",
"policyOwner": "policy_owner",
"pubNwDn": "pub_nw_dn",
"rn": "rn",
"sharing": "sharing",
"status": "status",
"switchId": "switch_id",
"type": "type",
"vnetId": "vnet_id",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.context_dn = None
self.deploy_dn = None
self.globalized_dn = None
self.if_role = None
self.inv_dn = None
self.mcast_policy_dn = None
self.name = None
self.oper_state = None
self.policy_class_name = None
self.policy_owner = None
self.pub_nw_dn = None
self.sharing = None
self.status = None
self.switch_id = None
self.type = None
self.vnet_id = None
ManagedObject.__init__(self, "GlVlan", parent_mo_or_dn, **kwargs)
| [
"paragsh@cisco.com"
] | paragsh@cisco.com |
7bfd5dfee9ff148327031ee7d52e2318c6260188 | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /AtCoder/Beginner 147/D.py | b54187e86e75040999da26a123dde94613b6de5d | [] | no_license | cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | N = int(input())
C1 = [0 for i in range(60)]
C0 = [0 for i in range(60)]
V = list(map(int, input().split()))
for n in V:
i = 0
for i in range(60):
if (n % 2 == 1):
C1[i] += 1
else:
C0[i] += 1
n //= 2
MOD = int(pow(10, 9) + 7)
total = 0;
for i in range(60):
total += pow(2, i) * C1[i] * C0[i]
total %= MOD
print(total)
| [
"u6427001@anu.edu.au"
] | u6427001@anu.edu.au |
31184b42fe6be39f479fd65591764c7419f647dc | 7b2a3ea853dc44aea204f02abedaad6a2029f4ff | /sw4_test001.py | 7ca8ff6610e40c64e60948828f1a40e7e5490344 | [] | no_license | NoisyLeon/SW4Py | 7d45503282dc988b5f886c039706bd79fdd6b339 | 7029f18eb526bcb46b4aa244da1e088ca57a56aa | refs/heads/master | 2020-12-22T14:57:11.265397 | 2016-12-20T18:27:18 | 2016-12-20T18:27:18 | 56,792,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | import obspy
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
infname = '/lustre/janus_scratch/life9360/sw4_working_dir_Q/field_R_200km_zmax_4km_qs_ak135_vs_2000/Tgr_10.0.txt'
inArr=np.loadtxt(infname)
plt.figure();
T=inArr[:,2]
DistArr=inArr[:,3]
VgrArr=DistArr/T
mindist=DistArr.min()
indexmin=DistArr.argmin()
plt.plot(DistArr, T,'o' );
# plt.plot(DistArr, (VgrArr-VgrArr[indexmin])/VgrArr[indexmin]*100.,'o' );
# plt.ylabel('Relative Difference in Vgr (%)');
plt.ylabel('Vgr(km/s)');
plt.ylabel('Travel time(sec)');
plt.xlabel('Distance(km)');
infname = '/lustre/janus_scratch/life9360/sw4_working_dir_Q/field_R_200km_zmax_4km_qs_ak135_vs_2000/Amp_10.0.txt'
inArr2=np.loadtxt(infname)
AmpArr=inArr2[:,2]
DistArr=inArr2[:,3]
fig, ax=plt.subplots()
mindist=DistArr.min()
indexmin=DistArr.argmin()
maxamp=AmpArr[indexmin]
# plt.plot(DistArr, AmpArr*1e9,'o' );
plt.ylabel('Amplitude');
plt.xlabel('Distance(km)')
CampArr=AmpArr*np.sqrt(DistArr/mindist ) /AmpArr[indexmin]
# CampArr=AmpArr*DistArr/ DistArr[0]
# plt.plot(DistArr, (CampArr-CampArr[indexmin])/CampArr[indexmin]*100.,'o' );
plt.plot(DistArr, CampArr,'o' );
y1=900
y2=1300
ax.fill_betweenx(np.array([0.2, 1.1]), y1, y2, facecolor='red', alpha=0.5)
plt.show()
slope, intercept, r_value, p_value, std_err = stats.linregress(DistArr, DistArr/VgrArr);
print slope, intercept, r_value, p_value, std_err
| [
"lili.feng@colorado.edu"
] | lili.feng@colorado.edu |
c5895fe28be33a134db7f26565c1fbe352b452f3 | a12c090eb57da4c8e1f543a1a9d497abad763ccd | /django-stubs/contrib/sessions/backends/signed_cookies.pyi | 7e91615680389abac85e9324dc87e7511fd3f27d | [
"BSD-3-Clause"
] | permissive | debuggerpk/django-stubs | be12eb6b43354a18675de3f70c491e534d065b78 | bbdaebb244bd82544553f4547157e4f694f7ae99 | refs/heads/master | 2020-04-04T08:33:52.358704 | 2018-09-26T19:32:19 | 2018-09-26T19:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | pyi | from datetime import datetime
from typing import Any, Dict, Optional, Union
from django.contrib.sessions.backends.base import SessionBase
class SessionStore(SessionBase):
accessed: bool
serializer: Type[django.core.signing.JSONSerializer]
def load(self) -> Dict[str, Union[datetime, str]]: ...
modified: bool = ...
def create(self) -> None: ...
def save(self, must_create: bool = ...) -> None: ...
def exists(self, session_key: Optional[str] = ...) -> bool: ...
def delete(self, session_key: Optional[str] = ...) -> None: ...
def cycle_key(self) -> None: ...
@classmethod
def clear_expired(cls) -> None: ...
| [
"maxim.kurnikov@gmail.com"
] | maxim.kurnikov@gmail.com |
cabab6d3e0cab16ba441df7fc625d4a3f9fe771a | c915508209ea0c6ced34ea2cc751f6f14456758d | /owllook/views/operate_blueprint.py | 47aa87f7d254c80b287f7aeb42d97cedcda4c121 | [
"Apache-2.0"
] | permissive | mscststs/owllook | eb1468c7cd0256c0a58bb76c04f28a7b285eacf7 | 7709144a01aa411735f0155729f5e42484ecf6a4 | refs/heads/master | 2021-08-23T02:09:55.070423 | 2017-12-02T11:37:50 | 2017-12-02T11:37:50 | 106,533,249 | 1 | 1 | null | 2017-12-02T11:29:26 | 2017-10-11T09:24:30 | Python | UTF-8 | Python | false | false | 15,181 | py | #!/usr/bin/env python
import hashlib
import datetime
from jinja2 import Environment, PackageLoader, select_autoescape
from urllib.parse import parse_qs, unquote
from sanic import Blueprint
from sanic.response import html, json
try:
from ujson import dumps as json_dumps
except:
from json import dumps as json_dumps
from owllook.database.mongodb import MotorBase
from owllook.fetcher.function import get_time
from owllook.utils import get_real_answer
from owllook.config import CONFIG, LOGGER
operate_bp = Blueprint('operate_blueprint', url_prefix='operate')
@operate_bp.listener('before_server_start')
def setup_db(operate_bp, loop):
global motor_base
motor_base = MotorBase()
@operate_bp.listener('after_server_stop')
def close_connection(operate_bp, loop):
motor_base = None
# jinjia2 config
env = Environment(
loader=PackageLoader('views.operate_blueprint', '../templates/operate'),
autoescape=select_autoescape(['html', 'xml', 'tpl']))
def template(tpl, **kwargs):
template = env.get_template(tpl)
return html(template.render(kwargs))
@operate_bp.route("/login", methods=['POST'])
async def owllook_login(request):
"""
用户登录
:param request:
:return:
: -1 用户名或密码不能为空
: 0 用户名或密码错误
: 1 登陆成功
"""
login_data = parse_qs(str(request.body, encoding='utf-8'))
user = login_data.get('user', [None])[0]
pwd = login_data.get('pwd', [None])[0]
if user and pwd:
motor_db = motor_base.get_db()
data = await motor_db.user.find_one({'user': user})
if data:
pass_first = hashlib.md5((CONFIG.WEBSITE["TOKEN"] + pwd).encode("utf-8")).hexdigest()
password = hashlib.md5(pass_first.encode("utf-8")).hexdigest()
if password == data.get('password'):
response = json({'status': 1})
# 将session_id存于cokies
date = datetime.datetime.now()
response.cookies['owl_sid'] = request['session'].sid
response.cookies['owl_sid']['expires'] = date + datetime.timedelta(days=30)
response.cookies['owl_sid']['httponly'] = True
# 此处设置存于服务器session的user值
request['session']['user'] = user
# response.cookies['user'] = user
# response.cookies['user']['expires'] = date + datetime.timedelta(days=30)
# response.cookies['user']['httponly'] = True
# response = json({'status': 1})
# response.cookies['user'] = user
return response
else:
return json({'status': -2})
return json({'status': -1})
else:
return json({'status': 0})
@operate_bp.route("/register", methods=['POST'])
async def owllook_register(request):
"""
用户注册 不允许重名
:param request:
:return:
: -1 用户名已存在
: 0 用户名或密码不能为空
: 1 注册成功
"""
register_data = parse_qs(str(request.body, encoding='utf-8'))
user = register_data.get('user', [None])[0]
pwd = register_data.get('pwd', [None])[0]
email = register_data.get('email', [None])[0]
answer = register_data.get('answer', [None])[0]
reg_index = request.cookies['reg_index']
if user and pwd and email and answer and reg_index:
motor_db = motor_base.get_db()
is_exist = await motor_db.user.find_one({'user': user})
if not is_exist:
# 验证问题答案是否准确
real_answer = get_real_answer(str(reg_index))
if real_answer and real_answer == answer:
pass_first = hashlib.md5((CONFIG.WEBSITE["TOKEN"] + pwd).encode("utf-8")).hexdigest()
password = hashlib.md5(pass_first.encode("utf-8")).hexdigest()
time = get_time()
data = {
"user": user,
"password": password,
"email": email,
"register_time": time,
}
await motor_db.user.save(data)
return json({'status': 1})
else:
return json({'status': -2})
else:
return json({'status': -1})
else:
return json({'status': 0})
@operate_bp.route("/logout", methods=['GET'])
async def owllook_logout(request):
"""
用户登出
:param request:
:return:
: 0 退出失败
: 1 退出成功
"""
user = request['session'].get('user', None)
if user:
response = json({'status': 1})
del response.cookies['user']
del response.cookies['owl_sid']
return response
else:
return json({'status': 0})
@operate_bp.route("/add_bookmark", methods=['POST'])
async def owllook_add_bookmark(request):
"""
添加书签
:param request:
:return:
: -1 用户session失效 需要重新登录
: 0 添加书签失败
: 1 添加书签成功
"""
user = request['session'].get('user', None)
data = parse_qs(str(request.body, encoding='utf-8'))
bookmark_url = data.get('bookmark_url', '')
if user and bookmark_url:
url = unquote(bookmark_url[0])
time = get_time()
try:
motor_db = motor_base.get_db()
res = await motor_db.user_message.update_one({'user': user}, {'$set': {'last_update_time': time}},
upsert=True)
if res:
await motor_db.user_message.update_one(
{'user': user, 'bookmarks.bookmark': {'$ne': url}},
{'$push': {'bookmarks': {'bookmark': url, 'add_time': time}}})
LOGGER.info('书签添加成功')
return json({'status': 1})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
@operate_bp.route("/delete_bookmark", methods=['POST'])
async def owllook_delete_bookmark(request):
"""
删除书签
:param request:
:return:
: -1 用户session失效 需要重新登录
: 0 删除书签失败
: 1 删除书签成功
"""
user = request['session'].get('user', None)
data = parse_qs(str(request.body, encoding='utf-8'))
bookmarkurl = data.get('bookmarkurl', '')
if user and bookmarkurl:
bookmark = unquote(bookmarkurl[0])
try:
motor_db = motor_base.get_db()
await motor_db.user_message.update_one({'user': user},
{'$pull': {'bookmarks': {"bookmark": bookmark}}})
LOGGER.info('删除书签成功')
return json({'status': 1})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
@operate_bp.route("/add_book", methods=['POST'])
async def owllook_add_book(request):
"""
添加书架
:param request:
:return:
: -1 用户session失效 需要重新登录
: 0 添加书架失败
: 1 添加书架成功
"""
user = request['session'].get('user', None)
data = parse_qs(str(request.body, encoding='utf-8'))
novels_name = data.get('novels_name', '')
chapter_url = data.get('chapter_url', '')
last_read_url = data.get('last_read_url', '')
if user and novels_name and chapter_url:
url = "/chapter?url={chapter_url}&novels_name={novels_name}".format(chapter_url=chapter_url[0],
novels_name=novels_name[0])
time = get_time()
try:
motor_db = motor_base.get_db()
res = await motor_db.user_message.update_one({'user': user}, {'$set': {'last_update_time': time}},
upsert=True)
if res:
await motor_db.user_message.update_one(
{'user': user, 'books_url.book_url': {'$ne': url}},
{'$push': {
'books_url': {'book_url': url, 'add_time': time, 'last_read_url': unquote(last_read_url[0])}}})
LOGGER.info('书架添加成功')
return json({'status': 1})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
@operate_bp.route("/delete_book", methods=['POST'])
async def owllook_delete_book(request):
"""
删除书架
:param request:
:return:
: -1 用户session失效 需要重新登录
: 0 删除书架失败
: 1 删除书架成功
"""
user = request['session'].get('user', None)
data = parse_qs(str(request.body, encoding='utf-8'))
if user:
if data.get('book_url', None):
book_url = data.get('book_url', None)[0]
else:
novels_name = data.get('novels_name', '')
chapter_url = data.get('chapter_url', '')
book_url = "/chapter?url={chapter_url}&novels_name={novels_name}".format(chapter_url=chapter_url[0],
novels_name=novels_name[0])
try:
motor_db = motor_base.get_db()
await motor_db.user_message.update_one({'user': user},
{'$pull': {'books_url': {"book_url": unquote(book_url)}}})
LOGGER.info('删除书架成功')
return json({'status': 1})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
@operate_bp.route("/change_email", methods=['POST'])
async def change_email(request):
"""
修改用户邮箱
:param request:
:return:
: -1 用户session失效 需要重新登录
: 0 修改邮箱失败
: 1 添加邮箱成功
"""
user = request['session'].get('user', None)
data = parse_qs(str(request.body, encoding='utf-8'))
if user:
try:
email = data.get('email', None)[0]
motor_db = motor_base.get_db()
await motor_db.user.update_one({'user': user},
{'$set': {'email': email}})
LOGGER.info('修改邮箱成功')
return json({'status': 1})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
@operate_bp.route("/change_pass", methods=['POST'])
async def change_pass(request):
"""
修改用户密码
:param request:
:return:
: -1 用户session失效 需要重新登录
: 0 修改密码失败
: 1 添加密码成功
: -2 原始密码错误
"""
user = request['session'].get('user', None)
data = parse_qs(str(request.body, encoding='utf-8'))
if user:
try:
new_pass = data.get('new_pass', None)[0]
old_pass = data.get('old_pass', None)[0]
motor_db = motor_base.get_db()
user_data = await motor_db.user.find_one({'user': user})
if user_data:
pass_first = hashlib.md5((CONFIG.WEBSITE["TOKEN"] + old_pass).encode("utf-8")).hexdigest()
pass_second = hashlib.md5((CONFIG.WEBSITE["TOKEN"] + new_pass).encode("utf-8")).hexdigest()
new_password = hashlib.md5(pass_second.encode("utf-8")).hexdigest()
password = hashlib.md5(pass_first.encode("utf-8")).hexdigest()
if password == user_data.get('password'):
await motor_db.user.update_one({'user': user},
{'$set': {'password': new_password}})
LOGGER.info('修改密码成功')
return json({'status': 1})
else:
return json({'status': -2})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
@operate_bp.route("/author_notification", methods=['POST'])
async def author_notification(request):
"""
作者新书通知
:param request:
:return:
: -1 用户session失效 需要重新登录
: 2 无该作者信息
: 3 作者已经添加
: 4 超过添加的上限
: 0 操作失败
: 1 操作成功
"""
user = request['session'].get('user', None)
user_data = parse_qs(str(request.body, encoding='utf-8'))
if user:
try:
motor_db = motor_base.get_db()
all_authors = await motor_db.user_message.find_one({'user': user}, {'author_latest': 1, '_id': 0})
count = len(all_authors.get('author_latest', []))
if count == CONFIG.WEBSITE.get("AUTHOR_LATEST_COUNT", 5):
return json({'status': 4})
author_name = user_data.get('author_name', None)[0]
data = []
author_cursor = motor_db.all_books.find({'author': author_name}, {'name': 1, 'url': 1, '_id': 0})
async for document in author_cursor:
data.append(document)
if data:
time = get_time()
res = await motor_db.user_message.update_one({'user': user}, {'$set': {'last_update_time': time}},
upsert=True)
is_exist = await motor_db.user_message.find_one(
{'user': user, 'author_latest.author_name': author_name})
if is_exist:
return json({'status': 3})
if res:
await motor_db.user_message.update_one(
{'user': user, 'author_latest.author_name': {'$ne': author_name}},
{'$push': {
'author_latest': {'author_name': author_name, 'add_time': time}}})
is_author_exist = await motor_db.author_message.find_one({'name': author_name})
if not is_author_exist:
author_data = {
"author_name": author_name,
"nums": len(data),
"updated_time": get_time(),
}
await motor_db.author_message.save(author_data)
LOGGER.info('作者添加成功')
return json({'status': 1})
else:
return json({'status': 2})
else:
return json({'status': 2})
except Exception as e:
LOGGER.exception(e)
return json({'status': 0})
else:
return json({'status': -1})
| [
"xiaozizayang@gmail.com"
] | xiaozizayang@gmail.com |
95941fdc60a6774a6323af41a5f6e8115a00c243 | 2345b3388ca9322e26974b6dd06d592a3b19c6b5 | /python/sysdesign/designFileSystem.py | 9312f30309dfe901a2ba8778d9147e9cae185ec5 | [] | no_license | XifeiNi/LeetCode-Traversal | b94db963cce782dfa641ca04e70876053d53f00d | fdb6bcb4c721e03e853890dd89122f2c4196a1ea | refs/heads/master | 2021-07-23T21:07:24.562063 | 2021-07-05T16:52:12 | 2021-07-05T16:52:12 | 190,349,692 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | class FileSystem:
def __init__(self):
self.val = {}
def createPath(self, path: str, value: int) -> bool:
words = path.split('/')[1:-1]
s = ""
for word in words:
s += "/" + word
if s not in self.val:
return False
if path in self.val and self.val[path] != value:
return False
self.val[path] = value
return True
def get(self, path: str) -> int:
if path not in self.val:
return -1
return self.val[path]
# Your FileSystem object will be instantiated and called as such:
# obj = FileSystem()
# param_1 = obj.createPath(path,value)
# param_2 = obj.get(path)
| [
"cecilia990@outlook.com"
] | cecilia990@outlook.com |
2d04eb5998aa6e20f7eab5b3ced0cb89f3d537a0 | 4c2c084a57ce514ed5f41877f372c6d1426c823b | /grr/server/grr_response_server/db_signed_binaries_test.py | 42deafa44b8b3e496c3e5412cfe43d4aa9a00000 | [
"Apache-2.0"
] | permissive | 4ndygu/grr | db1a8c781f52345aa21b580b3754d41a140e27f9 | cfc725b5ee3a2626ac4cdae7fb14471612da4522 | refs/heads/master | 2020-04-18T09:09:42.076738 | 2019-01-24T19:30:55 | 2019-01-24T19:30:55 | 164,693,051 | 0 | 0 | Apache-2.0 | 2019-01-09T21:10:50 | 2019-01-08T16:48:11 | Python | UTF-8 | Python | false | false | 2,783 | py | #!/usr/bin/env python
"""Tests for signed-binary DB functionality."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_server import db
from grr_response_server.rdfvalues import objects as rdf_objects
_test_id1 = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE,
path="linux/test/hello")
_test_id2 = rdf_objects.SignedBinaryID(
binary_type=rdf_objects.SignedBinaryID.BinaryType.PYTHON_HACK,
path="windows/test/hello")
_test_references1 = rdf_objects.BlobReferences(items=[
rdf_objects.BlobReference(offset=0, size=2, blob_id=b"\xaa" * 32),
rdf_objects.BlobReference(offset=2, size=3, blob_id=b"\xbb" * 32),
])
_test_references2 = rdf_objects.BlobReferences(items=[
rdf_objects.BlobReference(offset=0, size=3, blob_id=b"\xcc" * 32),
rdf_objects.BlobReference(offset=3, size=2, blob_id=b"\xdd" * 32),
])
class DatabaseTestSignedBinariesMixin(object):
"""Mixin that adds tests for signed binary DB functionality."""
def testReadSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
stored_hash_id, stored_timestamp = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_hash_id, _test_references1)
self.assertGreater(stored_timestamp.AsMicrosecondsSinceEpoch(), 0)
def testUpdateSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
stored_references1, timestamp1 = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_references1, _test_references1)
self.db.WriteSignedBinaryReferences(_test_id1, _test_references2)
stored_references2, timestamp2 = self.db.ReadSignedBinaryReferences(
_test_id1)
self.assertEqual(stored_references2, _test_references2)
self.assertGreater(timestamp2, timestamp1)
def testUnknownSignedBinary(self):
with self.assertRaises(db.UnknownSignedBinaryError):
self.db.ReadSignedBinaryReferences(_test_id1)
def testReadIDsForAllSignedBinaries(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
self.db.WriteSignedBinaryReferences(_test_id2, _test_references2)
self.assertCountEqual(self.db.ReadIDsForAllSignedBinaries(),
[_test_id1, _test_id2])
def testDeleteSignedBinaryReferences(self):
self.db.WriteSignedBinaryReferences(_test_id1, _test_references1)
self.assertNotEmpty(self.db.ReadIDsForAllSignedBinaries())
self.db.DeleteSignedBinaryReferences(_test_id1)
self.assertEmpty(self.db.ReadIDsForAllSignedBinaries())
# Trying to delete again shouldn't raise.
self.db.DeleteSignedBinaryReferences(_test_id1)
| [
"realbushman@gmail.com"
] | realbushman@gmail.com |
8e5f20a19d1a66e2a925c5334c5b378f6d25ebeb | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/list/unsignedInt/Schema+Instance/NISTXML-SV-IV-list-unsignedInt-length-4-5.py | 16305329233288f206fde998286f6d1c62dad41b | [
"MIT"
] | permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 429 | py | from output.models.nist_data.list_pkg.unsigned_int.schema_instance.nistschema_sv_iv_list_unsigned_int_length_4_xsd.nistschema_sv_iv_list_unsigned_int_length_4 import NistschemaSvIvListUnsignedIntLength4
obj = NistschemaSvIvListUnsignedIntLength4(
value=[
4181655182,
4161446171,
4162425164,
4161223171,
4122532161,
4171446151,
4143223171,
4163645122,
]
)
| [
"tsoulloftas@gmail.com"
] | tsoulloftas@gmail.com |
5821e591b24ac99aeb3838f49ef97c8f5476ad06 | e8404eb4d9aa8b483083823c3d7720ce0958e6ce | /practice20h.py | f2b2241bc3f258801305a19ee9cf5612d912854d | [] | no_license | harmansehmbi/Project20 | cdd0c98f23e1ad6f1adf22a2c18168b102e37fc4 | 22e43d4fb4ea33bcc18ed946cd031a171aba13df | refs/heads/master | 2020-06-12T10:30:02.893088 | 2019-06-28T12:42:28 | 2019-06-28T12:42:28 | 194,271,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | import matplotlib.pyplot as plt
X = list(range(1,11))
# List Comprehension
Y1 = [n for n in X]
Y2 = [n*n for n in X]
Y3 = [n*n*n for n in X]
print(X)
print(Y1)
print(Y2)
print(Y3)
plt.plot(X, Y1, label="Y1")
plt.plot(X, Y2, label="Y2")
plt.plot(X, Y3, label="Y3")
plt.legend() # How we can place a legend on different positions -> Explore
plt.xlabel("X-Axis")
plt.xlabel("Y-Axis")
plt.title("Polynomial Graph")
plt.grid(True)
plt.show() | [
"51370954+harmansehmbi@users.noreply.github.com"
] | 51370954+harmansehmbi@users.noreply.github.com |
d1dc6e13993bfda14d67288b9c9a190e1f21545d | 6bc418ab81581337aef6ab504a27fc2bf691d0dc | /environments/lab_environment.py | f09f1f575643892c1840a6905500d110d89d4772 | [
"Apache-2.0"
] | permissive | ddayzzz/rl_a3c_pytorch | 66e7dcc6c50bdfa59d33d57ac70ad03932ce2f6f | 3cb522992aec8f945c9219b7cbc4aa98325ea4de | refs/heads/master | 2020-06-19T23:01:32.584879 | 2019-08-03T09:10:00 | 2019-08-03T09:10:00 | 196,906,082 | 0 | 0 | null | 2019-07-15T01:59:18 | 2019-07-15T01:59:18 | null | UTF-8 | Python | false | false | 14,671 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Pipe, Process
from environments.utils import LabWorkCommand
from gym import spaces
import gym
try:
import deepmind_lab
from pygame import locals
except ImportError:
pass
import numpy as np
def _action(*entries):
return np.array(entries, dtype=np.intc)
ACTION_LIST = [
_action(-20, 0, 0, 0, 0, 0, 0), # look_left
_action(20, 0, 0, 0, 0, 0, 0), # look_right
# _action( 0, 10, 0, 0, 0, 0, 0), # look_up
# _action( 0, -10, 0, 0, 0, 0, 0), # look_down
_action(0, 0, -1, 0, 0, 0, 0), # strafe_left
_action(0, 0, 1, 0, 0, 0, 0), # strafe_right
_action(0, 0, 0, 1, 0, 0, 0), # forward
_action(0, 0, 0, -1, 0, 0, 0), # backward
# _action( 0, 0, 0, 0, 1, 0, 0), # fire
# _action( 0, 0, 0, 0, 0, 1, 0), # jump
# _action( 0, 0, 0, 0, 0, 0, 1) # crouch
# _action(0, 0, 0, 0, 0, 0, 0) # -1, default action
]
DEFAULT_ACTION = _action(0, 0, 0, 0, 0, 0, 0)
ACTION_MEANING = {
0: "LOOK_LEFT",
1: "LOOK_RIGHT",
2: "STRAFE_LEFT",
3: "STRAFE_RIGHT",
4: "FORWARD",
5: "BACKWARD"
}
ACTION_NAME_TO_KEY = {
'LOOK_LEFT': locals.K_4,
'LOOK_RIGHT': locals.K_6,
'STRAFE_LEFT': locals.K_LEFT,
'STRAFE_RIGHT': locals.K_RIGHT,
'FORWARD': locals.K_UP,
'BACKWARD': locals.K_DOWN
}
ACTION_TO_KEY = {
0: locals.K_4,
1: locals.K_6,
2: locals.K_LEFT,
3: locals.K_RIGHT,
4: locals.K_UP,
5: locals.K_DOWN
}
KEY_TO_ACTION = {
(locals.K_4,): 0,
(locals.K_6,): 1,
(locals.K_LEFT,): 2,
(locals.K_RIGHT,): 3,
(locals.K_UP,): 4,
(locals.K_DOWN,): 5
}
# todo 布局信息来自于 deepmind 自带的脚本. 一个方法是把 level 拷贝到本地目录, 修改 debug_observation, 使其包含 layout 信息
MAZE_INFO = {
'nav_maze_random_goal_02': {
'maze': '******************************** * * * ** * *** *********** *** * * * ** * * * * * ** *** *** ************* ***** ** * * * * * **** *** * * * ***** * ** * * * * * ** *** ***** * * **** * * * * * ** * * * * * * ** * * * * ** * * ********* * * ** * * * * * ** ***** ******* * *** ******* ** * * * * * **** *** ***** * * ******* ****** * * ********************************'
, 'height': 19
, 'width': 31},
'nav_maze_random_goal_01':{
'maze': '********************** * * ** * * ***** ***** * ** * * * * * * ** * * * * * * ** * * * * * ** ***** * * * * ** * * * * * ****** * *********** ** **********************',
'width': 21,
'height': 11
},
'nav_maze_random_goal_03':{
'maze': '****************************************** ** ***************** *********** ******* ** * * * * * * ** *** * * * * * * ** * * * * * * **** * * * *** * * ** * * * * * * ** *** * *** * * * ** * * * * * * ** *** * * *** * * ** * * * * ** ********************* *************** ** * * * * ** * * *************** * ******* * ******** * * * * * * * ****** * * * * *** *** ** * * * * * * ** * *** * * * ***** * ** * * * * * * * * ** *** * * * * * * * * ** * * * * * * * **** * *************** * ***** * * ** * * * * * * * * * ** *** * *** * *** * * * ******* * *** * ** * * * * * * ******************************************',
'width': 41,
'height': 27
}
}
# 环境在另外的进程上运行, 经过验证, 可以提高CPU的利用率
def LabWorkProcess(conn, env_name, frame_skip, enable_vel=False, enable_debug_observation=False,
enable_encode_depth=False, maxAltCameraWidth=160, maxAltCameraHeight=160):
# lab
basic_obs = [
'RGB_INTERLEAVED'
] # 基本的观察输出, RGBD
config = {
'fps': str(60),
'width': str(84),
'height': str(84),
}
if enable_debug_observation:
basic_obs.extend(['DEBUG.CAMERA.TOP_DOWN', 'DEBUG.POS.TRANS', ])
config.update(maxAltCameraWidth=str(maxAltCameraWidth), maxAltCameraHeight=str(maxAltCameraHeight), hasAltCameras='true')
if enable_encode_depth:
basic_obs.append('RGBD_INTERLEAVED')
# del basic_obs[0] # todo 可能会有问题,删除 RGB
if enable_vel:
basic_obs.extend(['VEL.TRANS', 'VEL.ROT']) # 速度
env = deepmind_lab.Lab(env_name,
basic_obs,
config=config)
conn.send(0)
while True:
# 程序接受消息的循环
command, arg = conn.recv()
if command == LabWorkCommand.RESET:
env.reset()
obs = env.observations()
conn.send(obs)
elif command == LabWorkCommand.ACTION:
reward = env.step(arg, num_steps=frame_skip)
terminal = not env.is_running()
if not terminal:
obs = env.observations()
else:
obs = 0
conn.send([obs, reward, terminal])
elif command == LabWorkCommand.TERMINATE:
break
else:
print("bad command: {}".format(command))
env.close()
conn.send(0)
conn.close()
class DeepmindLabEnvironment(gym.Env):
metadata = {'render.modes': ['rgb_array', 'human']}
def __init__(self,
env_name,
frame_skip=4,
enable_encode_depth=False,
enable_vel=False,
enable_debug_observation=False,
debug_width=160,
debug_height=160,
seed=None,
**kwargs):
super(DeepmindLabEnvironment, self).__init__()
# 相关的属性
self.env_name = env_name
self.env_type = 'lab'
self.viewer = None
self.enable_debug_observation = enable_debug_observation
self.enable_vel = enable_vel
# 编码深度
self.enable_encode_depth = enable_encode_depth
# 打开进程和相关的管道
self.connection, child_connection = Pipe()
# 配置其他的参数
self.process = Process(target=LabWorkProcess, args=(
child_connection, env_name, frame_skip, enable_encode_depth, enable_debug_observation, enable_encode_depth,
debug_width, debug_height))
# 初始化 lab 环境
self.process.start()
self.connection.recv()
# 配置相关的输出的 observation
self.action_space = gym.spaces.Discrete(len(ACTION_LIST))
obs_spaces = {'rgb': spaces.Box(0, 255, shape=[84, 84, 3], dtype=np.uint8)}
if enable_debug_observation:
# top_down = [w, h, 3]
# word_pos = [x, y, z]: double
obs_spaces['top_down'] = spaces.Box(0, 255, shape=[debug_width, debug_height, 3], dtype=np.uint8)
obs_spaces['word_position'] = spaces.Box(low=0.0, high=np.finfo(np.float).max, shape=[1, 3], dtype=np.float)
if enable_encode_depth:
obs_spaces['depth'] = spaces.Box(0, 255, shape=[84, 84], dtype=np.uint8)
if enable_vel:
obs_spaces['vel'] = spaces.Box(0.0, high=np.finfo(np.float).max, shape=[1, 6], dtype=np.float)
self.observation_space = spaces.Dict(spaces=obs_spaces)
def reset(self):
self.connection.send([LabWorkCommand.RESET, 0])
obs = self.connection.recv()
state = self._get_state(obs)
return state
def _get_state(self, obs):
returned = dict(rgb=obs['RGB_INTERLEAVED'])
if self.enable_debug_observation:
returned['top_down'] = obs['DEBUG.CAMERA.TOP_DOWN']
returned['word_position'] = obs['DEBUG.POS.TRANS']
if self.enable_encode_depth:
returned['depth'] = obs['RGBD_INTERLEAVED'][:, :, 3]
if self.enable_vel:
returned['vel'] = np.concatenate([obs['VEL.TRANS'], obs['VEL.ROT']])
return returned
def step(self, action):
"""
进行一个动作
:param action:
:return: state,
"""
if action < 0:
real_action = DEFAULT_ACTION
else:
real_action = ACTION_LIST[action]
self.connection.send([LabWorkCommand.ACTION, real_action])
obs, reward, terminated = self.connection.recv()
# step 返回的信息
if terminated:
state = None
else:
state = self._get_state(obs=obs)
return state, reward, terminated, None
def close(self):
self.connection.send([LabWorkCommand.TERMINATE, 0])
ret = self.connection.recv()
self.connection.close()
self.process.join()
print("lab environment stopped, returned ", ret)
def get_keys_to_action(self):
return KEY_TO_ACTION
@staticmethod
def get_maze_info(env_name):
return MAZE_INFO.get(env_name, dict())
class DeepmindLabEnvironmentForUnreal(gym.Wrapper):
def __init__(self, env: gym.Env, channel_first=False, enable_pixel_change=False):
super(DeepmindLabEnvironmentForUnreal, self).__init__(env=env)
# 添加额外的属性
self.last_state = None # 上一次的 state 对象
self.last_action = 0
self.last_reward = 0
# 重新定义 observation 空间
self.channel_first = channel_first
self.enable_pixel_change = enable_pixel_change
if channel_first:
obs_spaces = {'rgb': spaces.Box(0, 1, shape=[3, 84, 84], dtype=np.float32)}
else:
obs_spaces = {'rgb': spaces.Box(0, 1, shape=[84, 84, 3], dtype=np.float32)}
# 重新设置状态空间
# todo 这里访问了 Wrapper 的 get_attr 方法, gym < 0.13 没有
if self.env.enable_debug_observation:
pass # 这个还不管
if self.env.enable_encode_depth:
obs_spaces['depth'] = spaces.Box(0, 1, shape=[84, 84], dtype=np.float32)
if self.env.enable_vel:
pass
# pixel change
if enable_pixel_change:
obs_spaces['pixel_change'] = spaces.Box(0, 1, shape=[20, 20], dtype=np.float32)
self.observation_space = spaces.Dict(spaces=obs_spaces)
# pixel change
@staticmethod
def _subsample(a, average_width):
s = a.shape
sh = s[0] // average_width, average_width, s[1] // average_width, average_width
return a.reshape(sh).mean(-1).mean(1)
def calc_pixel_change(self, state, last_state):
if self.channel_first:
d = np.absolute(state[:, 2:-2, 2:-2] - last_state[:, 2:-2, 2:-2])
# (3,80,80)
m = np.mean(d, 0)
else:
d = np.absolute(state[2:-2, 2:-2, :] - last_state[2:-2, 2:-2, :])
# (80,80,3)
m = np.mean(d, 2)
c = self._subsample(m, 4)
return c
def process_frame(self, frame):
frame = frame.astype(np.float32)
frame = frame / 255.
if self.channel_first:
frame = np.transpose(frame, [2, 0, 1])
return frame
def process_state(self, state, last_frame=None):
new_state = dict(state)
new_state['rgb'] = self.process_frame(state['rgb'])
if self.env.enable_encode_depth:
new_state['depth'] = self.process_frame(state['depth'])
if self.enable_pixel_change and last_frame is not None:
frame = new_state['rgb']
pc = self.calc_pixel_change(frame, last_frame)
new_state['pixel_change'] = pc
# top_down 显示可能有问题
if self.env.enable_debug_observation:
top_down = state['top_down']
if top_down.shape[0] == 3:
if not self.channel_first:
top_down = np.transpose(top_down, [1, 2, 0]) # CHW TO HWC
top_down = np.ascontiguousarray(top_down) # C
else:
pass # todo channelfirst 应该没有需要
new_state['top_down'] = top_down
return new_state
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
# 同样需要处理
self.last_state = self.process_state(state) # todo: 其实一般不会使用 reset 的 pc
self.last_action = 0
self.last_reward = 0
return state
def step(self, action):
# 是否使用了 pc
if self.enable_pixel_change:
last_frame = self.last_state['rgb'] # 一次 step 后会覆盖, 所以需要记录一下
# 运行环境的命令
state, reward, terminated, info = self.env.step(action)
if terminated:
state = self.last_state # last_state 已经处理过了
else:
# 构造新的 state
state = self.process_state(state=state, last_frame=last_frame)
self.last_action = action
self.last_reward = reward
self.last_state = state
return state, reward, terminated, info
def render(self, mode='human', **kwargs):
"""
由于 依赖 last_state
:param mode:
:param kwargs:
:return:
"""
rgb = self.last_state['rgb']
if mode == 'rgb_array':
return rgb
elif mode is 'human':
# pop up a window and render
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow((rgb * 255).astype(np.uint8))
return self.viewer.isopen
else:
super(DeepmindLabEnvironmentForUnreal, self).render(mode=mode) # just raise an exception
if __name__ == "__main__":
# human control
pass
| [
"wangshu214@live.cn"
] | wangshu214@live.cn |
18013e2b65ef9381809646e3867756c3f08eefef | b378950c5ec10db8c7921df1261d62b5b74581e8 | /gae/lib/django/tests/modeltests/many_to_one_null/models.py | fb0f6ac3b7707feb5ea92d8d875ed9300209d290 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | towerjoo/CS-notes | 11e78395423ec097840e354c6571400db149e807 | a5704ceea72caab2a458b0f212d69041c4c2a3ce | refs/heads/master | 2020-06-02T19:26:11.524649 | 2010-09-08T02:08:16 | 2010-09-08T02:08:16 | 895,186 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,102 | py | """
16. Many-to-one relationships that can be null
To define a many-to-one relationship that can have a null foreign key, use
``ForeignKey()`` with ``null=True`` .
"""
from django.db import models
class Reporter(models.Model):
name = models.CharField(maxlength=30)
def __str__(self):
return self.name
class Article(models.Model):
headline = models.CharField(maxlength=100)
reporter = models.ForeignKey(Reporter, null=True)
class Meta:
ordering = ('headline',)
def __str__(self):
return self.headline
__test__ = {'API_TESTS':"""
# Create a Reporter.
>>> r = Reporter(name='John Smith')
>>> r.save()
# Create an Article.
>>> a = Article(headline="First", reporter=r)
>>> a.save()
>>> a.reporter.id
1
>>> a.reporter
<Reporter: John Smith>
# Article objects have access to their related Reporter objects.
>>> r = a.reporter
# Create an Article via the Reporter object.
>>> a2 = r.article_set.create(headline="Second")
>>> a2
<Article: Second>
>>> a2.reporter.id
1
# Reporter objects have access to their related Article objects.
>>> r.article_set.all()
[<Article: First>, <Article: Second>]
>>> r.article_set.filter(headline__startswith='Fir')
[<Article: First>]
>>> r.article_set.count()
2
# Create an Article with no Reporter by passing "reporter=None".
>>> a3 = Article(headline="Third", reporter=None)
>>> a3.save()
>>> a3.id
3
>>> print a3.reporter
None
# Need to reget a3 to refresh the cache
>>> a3 = Article.objects.get(pk=3)
>>> print a3.reporter.id
Traceback (most recent call last):
...
AttributeError: 'NoneType' object has no attribute 'id'
# Accessing an article's 'reporter' attribute returns None
# if the reporter is set to None.
>>> print a3.reporter
None
# To retrieve the articles with no reporters set, use "reporter__isnull=True".
>>> Article.objects.filter(reporter__isnull=True)
[<Article: Third>]
# Set the reporter for the Third article
>>> r.article_set.add(a3)
>>> r.article_set.all()
[<Article: First>, <Article: Second>, <Article: Third>]
# Remove an article from the set, and check that it was removed.
>>> r.article_set.remove(a3)
>>> r.article_set.all()
[<Article: First>, <Article: Second>]
>>> Article.objects.filter(reporter__isnull=True)
[<Article: Third>]
# Create another article and reporter
>>> r2 = Reporter(name='Paul Jones')
>>> r2.save()
>>> a4 = r2.article_set.create(headline='Fourth')
>>> r2.article_set.all()
[<Article: Fourth>]
# Try to remove a4 from a set it does not belong to
>>> r.article_set.remove(a4)
Traceback (most recent call last):
...
DoesNotExist: <Article: Fourth> is not related to <Reporter: John Smith>.
>>> r2.article_set.all()
[<Article: Fourth>]
# Use descriptor assignment to allocate ForeignKey. Null is legal, so
# existing members of set that are not in the assignment set are set null
>>> r2.article_set = [a2, a3]
>>> r2.article_set.all()
[<Article: Second>, <Article: Third>]
# Clear the rest of the set
>>> r.article_set.clear()
>>> r.article_set.all()
[]
>>> Article.objects.filter(reporter__isnull=True)
[<Article: First>, <Article: Fourth>]
"""}
| [
"zhutao@halfquest.com"
] | zhutao@halfquest.com |
77c436d784307c76055161cb9812c3027cfe4c00 | f43c49e41e61714617fb7442ea408485a20d81a5 | /dfirtrack_main/importer/file/markdown.py | 0ada04d8bdf823f7958a9a508c335515d871ccf4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | PiterPentester/dfirtrack | 2408181e2058133da2cc64c43756552e9b04a532 | e6baaa580dbc9fa26b582397834c0a0855da0940 | refs/heads/master | 2020-05-31T17:25:09.023170 | 2019-05-23T19:12:15 | 2019-05-23T19:12:15 | 190,407,697 | 1 | 0 | NOASSERTION | 2020-04-10T10:15:07 | 2019-06-05T14:13:29 | HTML | UTF-8 | Python | false | false | 4,617 | py | from dateutil.parser import parse
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from dfirtrack_main.forms import EntryFileImport
from dfirtrack_main.logger.default_logger import debug_logger, warning_logger
from dfirtrack_main.models import Entry
import hashlib
from io import TextIOWrapper
@login_required(login_url="/login")
def entrys(request):
""" this form parses a file and tries to get entries for a single system """
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " ENTRY_TXT_IMPORTER_BEGIN")
# get text out of file (variable results from request object via file upload field)
entryfile = TextIOWrapper(request.FILES['entryfile'].file, encoding=request.encoding)
# set row counter (needed for logger)
i = 0
# iterate over rows in file
for row in entryfile:
# autoincrement row counter
i += 1
# skip first two lines # TODO: remove first two lines from parsing script
if i == 1 or i == 2:
continue
# split line from markdown table format to single values
column = row.split("|")
# check row for empty value
if len(column) < 6:
warning_logger(str(request.user), " ENTRY_TXT_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":empty_row")
continue
# get values of former markdown tables
entry_date = column[1]
entry_utc = column[2]
entry_system = column[3]
entry_type = column[4]
entry_content = column[5]
# remove trailing and leading whitespaces
entry_date = entry_date.strip()
entry_utc = entry_utc.strip()
entry_system = entry_system.strip()
entry_type = entry_type.strip()
entry_content = entry_content.strip()
# concatenate all relevant entry values to a string for hashing
entry_string = entry_date + entry_utc + entry_system + entry_type + entry_content
# calculate hash from entry values
entry_hash = hashlib.sha1(entry_string.encode('utf8'))
entry_sha1 = entry_hash.hexdigest()
# get system_id as string from POST object
system = request.POST['system']
# check for existing entry_sha1 for this system and skip if it already exist
try:
check_entry = Entry.objects.get(system=system, entry_sha1=entry_sha1)
warning_logger(str(request.user), " ENTRY_TXT_IMPORTER_ENTRY_EXISTS " + "row_" + str(i) + ":entry_exists")
continue
except:
pass
# convert timing information to datetime object
entry_time = parse(entry_date + " " + entry_utc + "+00:00")
# create form with request data
form = EntryFileImport(request.POST, request.FILES)
# create entry
if form.is_valid():
pass
# don't save form yet
entry = form.save(commit=False)
# set values from file (row / column)
entry.entry_time = entry_time
entry.entry_sha1 = entry_sha1
entry.entry_date = entry_date
entry.entry_utc = entry_utc
entry.entry_system = entry_system
entry.entry_type = entry_type
entry.entry_content = entry_content
# set auto values
entry.entry_created_by_user_id = request.user
entry.entry_modified_by_user_id = request.user
# save object
entry.save()
# call logger
entry.logger(str(request.user), ' ENTRY_TXT_IMPORTER_EXECUTED')
# call logger
debug_logger(str(request.user), " ENTRY_TXT_IMPORTER_END")
return redirect('/systems/'+ system)
else:
# show empty form with preselected system
if request.method == 'GET' and 'system' in request.GET:
system = request.GET['system']
form = EntryFileImport(initial={
'system': system,
})
else:
# show empty form
form = EntryFileImport()
# call logger
debug_logger(str(request.user), " ENTRY_TXT_IMPORTER_ENTERED")
return render(request, 'dfirtrack_main/entry/entrys_file_importer.html', {'form': form})
| [
"mathias.stuhlmacher@gmx.de"
] | mathias.stuhlmacher@gmx.de |
7dcdf7f5efd7878a74aff1e9ef7205eedeab6d3d | 86f8bf3933208329eb73bfcba5e1318dbb2ddafa | /hello_world/django/benckmark/wsgi.py | 39449f5a6e5611a43eef292c969e3ffe19f098ea | [] | no_license | TakesxiSximada/benchmarks | 42ce5466c813e45db78f87ca391806fbb845a16c | 9cd2fc732ed006fd3554e01b1fc71bfcb3ada312 | refs/heads/master | 2021-01-15T23:02:14.063157 | 2015-05-30T18:52:08 | 2015-05-30T18:52:08 | 36,551,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for benckmark project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "benckmark.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"takesxi.sximada@gmail.com"
] | takesxi.sximada@gmail.com |
d09894a4407767279783f2300c505fdeee58d511 | 0cbc02dd7d1efbe61de04dcf1c6eccb6496bf074 | /month02/AID1912/day07/baidu.py | d26b8dd65faba071eedd46919db85150e638f315 | [] | no_license | fsym-fs/Python_AID | 0b1755c15e20b214940041e81bedb2d5ec99e3f9 | f806bb02cdb1670cfbea6e57846abddf3972b73b | refs/heads/master | 2021-03-20T06:57:45.441245 | 2020-05-27T14:13:45 | 2020-05-27T14:13:45 | 247,187,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | import requests
import base64
import json
ai_list = {'植物':'/v1/plant','动物':'/v1/animal','其他':'/v2/advanced_general'}
def baidu(type,path):
if type in ai_list:
"""
https://aip.baidubce.com/rest/2.0/image-classify/v1/animal
"""
url="https://aip.baidubce.com/rest/2.0/image-classify%s?access_token=24.c36ae190ea9865133bbc1bf1e2d921d4.2592000.1577462210.282335-17874022"%(ai_list[type])
else:
return None
header = {
'Content-Type':'application/x-www-form-urlencoded'
}
data = {}
with open(path,'rb') as f:
image=base64.b64encode(f.read())
data['image'] = str(image,'utf-8')
res = requests.post(url=url,data=data,headers=header).text
return json.loads(res).get('result','Error')
if __name__ == '__main__':
print(baidu('动物','kl.jpg'))
| [
"1085414029@qq.com"
] | 1085414029@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.