blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
61c2511bc853396c22902d39e5574a0bff6f5c68
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D12A/CODECOD12AUN.py
|
b336b95e699e8b394f68b938224e628f94b6e90f
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,651
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD12AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'TDT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'GID', MIN: 0, MAX: 999, LEVEL: [
{ID: 'HAN', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'TMP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
{ID: 'SGP', MIN: 0, MAX: 999},
{ID: 'DGS', MIN: 0, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
]},
{ID: 'EQD', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 99},
{ID: 'TMD', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 99},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'DIM', MIN: 0, MAX: 99},
{ID: 'SEL', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PCD', MIN: 0, MAX: 9},
{ID: 'EQA', MIN: 0, MAX: 9},
{ID: 'COD', MIN: 0, MAX: 1},
{ID: 'HAN', MIN: 0, MAX: 9},
{ID: 'DAM', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COD', MIN: 0, MAX: 1},
]},
{ID: 'TDT', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
]},
{ID: 'NAD', MIN: 0, MAX: 9},
{ID: 'DGS', MIN: 0, MAX: 99, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TMP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'RNG', MIN: 0, MAX: 1},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 1},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
5e5f8bba2d3970f3c9198111bf0ec501b57389a9
|
7e4c86df42e3c4780e9b01026c904769f6dc39ec
|
/class-35/demos/graph/test_graph.py
|
2f7a9fb4b5e07ca80c434c89c3572f1c68ba882f
|
[] |
no_license
|
everydaytimmy/seattle-python-401d16
|
3a85c55204471ae11ce31cf0b6fc688eea7aee9f
|
372a4c097f6c217098d1f8a420a37168e2c6229f
|
refs/heads/main
| 2023-06-25T03:55:06.288698
| 2021-07-13T19:08:31
| 2021-07-13T19:08:31
| 370,841,803
| 0
| 0
| null | 2021-05-25T22:20:32
| 2021-05-25T22:20:32
| null |
UTF-8
|
Python
| false
| false
| 2,714
|
py
|
"""
Implement your own Graph. The graph should be represented as an adjacency list, and should include the following methods:
add node
Arguments: value
Returns: The added node
Add a node to the graph
add edge
Arguments: 2 nodes to be connected by the edge, weight (optional)
Returns: nothing
Adds a new edge between two nodes in the graph
If specified, assign a weight to the edge
Both nodes should already be in the Graph
get nodes
Arguments: none
Returns all of the nodes in the graph as a collection (set, list, or similar)
get neighbors
Arguments: node
Returns a collection of edges connected to the given node
Include the weight of the connection in the returned collection
size
Arguments: none
Returns the total number of nodes in the graph
TESTS
An empty graph properly returns null
"""
from graph import Graph, Vertex
def test_add_node():
graph = Graph()
expected_value = "spam"
actual = graph.add_node("spam")
assert actual.value == expected_value
def test_get_nodes_one():
graph = Graph()
graph.add_node("spam")
actual = graph.get_nodes()
expected = 1
assert len(actual) == expected
assert isinstance(actual[0], Vertex)
assert actual[0].value == "spam"
# REFACTOR to not do so much
def test_get_nodes_two():
graph = Graph()
graph.add_node("spam")
graph.add_node("eggs")
actual = graph.get_nodes()
expected = 2
assert len(actual) == expected
assert isinstance(actual[0], Vertex)
assert isinstance(actual[1], Vertex)
assert actual[0].value == "spam"
assert actual[1].value == "eggs"
def test_size_two():
graph = Graph()
graph.add_node("spam")
graph.add_node("eggs")
actual = graph.size()
expected = 2
assert actual == expected
def test_add_edge_no_weight():
graph = Graph()
spam_vertex = graph.add_node("spam")
eggs_vertex = graph.add_node("eggs")
return_val = graph.add_edge(spam_vertex, eggs_vertex)
assert return_val is None
def test_get_neighbors():
graph = Graph()
spam_vertex = graph.add_node("spam")
eggs_vertex = graph.add_node("eggs")
graph.add_edge(spam_vertex, eggs_vertex, 5)
neighbors = graph.get_neighbors(spam_vertex)
assert len(neighbors) == 1
single_edge = neighbors[0]
assert single_edge.vertex.value == "eggs"
assert single_edge.weight == 5
def test_get_neighbors_solo():
graph = Graph()
spam_vertex = graph.add_node("spam")
graph.add_edge(spam_vertex, spam_vertex)
neighbors = graph.get_neighbors(spam_vertex)
assert len(neighbors) == 1
single_edge = neighbors[0]
assert single_edge.vertex.value == "spam"
assert single_edge.weight == 0
|
[
"jb.tellez@gmail.com"
] |
jb.tellez@gmail.com
|
b68210d8b6efbf80cbe001da472b636f94873e39
|
18be742be30452764865a830abb2327ab779b01d
|
/ARP/ARPClient/main.py
|
bb2b1e3ee48188b03819be13f30bea51a684ac9a
|
[] |
no_license
|
mhdr/Thesis
|
06823914a1f9eca7d75501844c15ac20435cf754
|
9242d25143cc2fcbf768ca0d01dc80a7521fdf6c
|
refs/heads/master
| 2021-01-10T06:30:44.860331
| 2016-01-02T05:28:48
| 2016-01-02T05:28:48
| 46,107,026
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,572
|
py
|
import socket
import threading
from colorama import Fore
from datetime import datetime
from datetime import timedelta
import os
import time
from Message import Message
counter=0
mac_list=[]
timer_initial_value=5
timer=timer_initial_value
is_traffic_blocked=False
def get_counter():
lock=threading.BoundedSemaphore()
global counter
lock.acquire()
counter=counter+1
lock.release()
return counter
def fetch():
while True:
global HOST
global PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(b'1')
data_recv=s.recv(1024)
data=bytearray()
s.settimeout(1)
while len(data_recv)>0:
data.extend(data_recv)
if len(data_recv) < 1024:
break
try:
data_recv=s.recv(1024)
except socket.timeout:
break
s.close()
if len(data)>0:
message= Message.loads(data)
if False:assert isinstance(message,Message)
current_time=datetime.now()
if len(message.macs)>0:
if message.verify():
if current_time - message.time < timedelta(seconds=5):
for mac in message.macs:
global mac_list
if mac not in mac_list:
cmd1="arptables -A INPUT --source-mac {0} -j DROP".format(mac)
cmd2="iptables -A INPUT -m mac --mac-source {0} -j DROP".format(mac)
cmd3="ip neighbour flush all"
os.system(cmd1)
os.system(cmd2)
os.system(cmd3)
mac_list.append(mac)
print("{0} - ".format(get_counter()) + Fore.MAGENTA + "MAC : {0}".format(mac) + Fore.RESET)
reset_timer()
else:
print("{0} - ".format(get_counter()) + Fore.RED +
"We received some data,but it seems they are manipulated." + Fore.RESET)
else:
global mac_list
if False : assert isinstance(mac_list,list)
for mac in mac_list:
cmd1="arptables -D INPUT --source-mac {0} -j DROP".format(mac)
cmd2="iptables -D INPUT -m mac --mac-source {0} -j DROP".format(mac)
os.system(cmd1)
os.system(cmd2)
mac_list.remove(mac)
print("{0} - ".format(get_counter()) + Fore.GREEN + "Safe" + Fore.RESET)
reset_timer()
time.sleep(1)
def get_arp_server_mac():
while True:
global HOST
global PORT
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall(b'2')
data_recv=s.recv(1024)
data=bytearray()
s.settimeout(1)
while len(data_recv)>0:
data.extend(data_recv)
if len(data_recv) < 1024:
break
try:
data_recv=s.recv(1024)
except socket.timeout:
break
s.close()
mac=data
return mac
def block_traffic():
global is_traffic_blocked
global arp_server_mac
cmd1="arptables -P INPUT DROP"
cmd2="arptables -A INPUT --source-mac {0} -j ACCEPT".format(arp_server_mac)
os.system(cmd1)
os.system(cmd2)
is_traffic_blocked=True
def allow_traffic():
global is_traffic_blocked
os.system("arptables -P INPUT ACCEPT")
os.system("arptables -F")
os.system("ip neighbour flush all")
is_traffic_blocked=False
def run_timer():
global timer
global is_traffic_blocked
while True:
time.sleep(1)
if timer>0:
timer=timer-1
else:
if is_traffic_blocked==False:
block_traffic()
def reset_timer():
lock=threading.BoundedSemaphore()
global timer
global timer_initial_value
global is_traffic_blocked
lock.acquire()
timer=timer_initial_value
if is_traffic_blocked==True:
allow_traffic()
lock.release()
HOST = '192.168.1.104'
PORT = 11000
os.system("arptables -F")
os.system("iptables -F")
arp_server_mac=get_arp_server_mac()
threading.Thread(target=fetch).start()
threading.Thread(target=run_timer).start()
|
[
"ramzani.mahmood@gmail.com"
] |
ramzani.mahmood@gmail.com
|
393109ee8fa16eb9b5501ef98f949f56c88d7743
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/cpmpy/euler1.py
|
6979ed7c3f6ecb14b65e563e98144779fb1776eb
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
"""
Project Euler problem 1 in cpmpy.
http://projecteuler.net/index.php?section=problems&id=1
'''
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
'''
Using Constraint Modeling for this problem is probably
a tad overkill...
This model (using ipython):
'''
In [1]: %time run euler1.py
z: 233168
status: ExitStatus.OPTIMAL (0.006237117 seconds)
CPU times: user 1.2 s, sys: 1.34 s, total: 2.53 s
Wall time: 197 ms
'''
Compare with this ipython oneliner.
'''
% time sum([i for i in range(1000) if (i %3 == 0 or i %5 == 0)])
CPU times: user 66 µs, sys: 12 µs, total: 78 µs
Wall time: 80.3 µs
Out[1]: 233168
'''
Model created by Hakan Kjellerstrand, hakank@hakank.com
See also my CPMpy page: http://www.hakank.org/cpmpy/
"""
from cpmpy import *
import numpy as np
from cpmpy_hakank import *
def euler1():
n = 1000
x = boolvar(shape=n,name="x")
z = intvar(0,sum(range(n)),name="z")
model = Model([x[0] == 0,
[x[i] == ((i % 3==0) | (i % 5==0)) for i in range(1,n)],
z==sum([i*x[i] for i in range(n)])
])
ss = CPM_ortools(model)
if ss.solve():
print("z:", z.value())
print("status:", ss.status())
euler1()
|
[
"hakank@gmail.com"
] |
hakank@gmail.com
|
0c15e3f95e19147d72f251d03c7fadef4f1ac626
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_041/ch23_2019_04_02_12_34_31_027536.py
|
3c83817e385509191b67c91a0d4fb30eb41e21ab
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
def verifica_idade(idade):
if idade>21:
return 'Liberado EUA e Brasil'
elif idade>=18 and idade<=21:
return 'Liberado BRASIL'
else:
return 'Não está liberado'
|
[
"you@example.com"
] |
you@example.com
|
b5d8e0977a892778bc9c2ddbb5b7885a79e76cee
|
20021a5bc80f2649269cfb7e1b7f10d6f6b6a839
|
/POCScan/information/jsp_conf_find.py
|
1374f06350e84617bc93cab0f8a3c56987e7a055
|
[] |
no_license
|
polosec/SZhe_Scan
|
bddd1f53452c44837a6ddf902546975879449e11
|
d5e20991530de763c374b5a3c6e8db689bff5265
|
refs/heads/master
| 2022-07-19T09:31:05.840747
| 2020-05-19T14:57:07
| 2020-05-19T14:57:07
| 265,748,880
| 0
| 1
| null | 2020-05-21T03:49:48
| 2020-05-21T03:49:47
| null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: java配置文件文件发现
referer: unknow
author: Lucifer
description: web.xml是java框架使用的配置文件,可以获取敏感信息
'''
import sys
import requests
import warnings
from termcolor import cprint
class jsp_conf_find_BaseVerify:
def __init__(self, url):
self.url = url
def run(self):
payload = "/WEB-INF/web.xml"
vulnurl = self.url + payload
try:
req = requests.get(vulnurl, timeout=10, verify=False)
if req.headers["Content-Type"] == "application/xml":
return True,vulnurl,"java配置文件文件发现",payload,req.text
else:
return False, None, None, None, None
except:
return False, None, None, None, None
if __name__ == "__main__":
warnings.filterwarnings("ignore")
testVuln = jsp_conf_find_BaseVerify(sys.argv[1])
testVuln.run()
|
[
"2585614464@qq.com"
] |
2585614464@qq.com
|
dbf639cc2ed00a1b48e93b1cfeba7c265bbfc258
|
fdfd0d6cf04509eb642c1065240df297b8b7bf91
|
/tests/helpers.py
|
7b1d59c37a6f8cad92e6a3c07223c8a013893308
|
[
"BSD-3-Clause"
] |
permissive
|
lnielsen/cernservicexml
|
747902c102f3e25e4c941c8aef29acb62bf73078
|
3f1691a81fd0534678a32acd2a352de4de55a82e
|
refs/heads/master
| 2021-01-20T13:47:33.100484
| 2015-06-16T15:10:48
| 2015-06-16T15:10:48
| 37,516,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
# -*- coding: utf-8 -*-
#
# This file is part of CERN Service XML
# Copyright (C) 2015 CERN.
#
# CERN Service XML is free software; you can redistribute it and/or modify
# it under the terms of the Revised BSD License; see LICENSE file for
# more details.
"""Test helpers."""
from __future__ import absolute_import, print_function, unicode_literals
import functools
import sys
PY34 = sys.version_info[0:2] >= (3, 4)
def import_httpretty():
"""Import HTTPretty and monkey patch Python 3.4 issue.
See https://github.com/gabrielfalcao/HTTPretty/pull/193 and
as well as https://github.com/gabrielfalcao/HTTPretty/issues/221.
"""
if not PY34:
import httpretty
else:
import socket
old_SocketType = socket.SocketType
import httpretty
from httpretty import core
def sockettype_patch(f):
@functools.wraps(f)
def inner(*args, **kwargs):
f(*args, **kwargs)
socket.SocketType = old_SocketType
socket.__dict__['SocketType'] = old_SocketType
return inner
core.httpretty.disable = sockettype_patch(
httpretty.httpretty.disable
)
return httpretty
|
[
"lars.holm.nielsen@cern.ch"
] |
lars.holm.nielsen@cern.ch
|
dfe3ebea885fdd94e4bb398302f5e26a34f93f98
|
df5e91385e3a4c89116a111a10ff460e4e380b86
|
/manage.py
|
a8efacc67cdd1f80836e5c4953be5ad41c29f50d
|
[] |
no_license
|
taojy123/TBspider
|
820a2da618fff5c49633954e7f411bcff263158c
|
8b6844f2d8d0d9639e0e69af29517ef639ec4d26
|
refs/heads/master
| 2020-12-24T15:49:13.380774
| 2014-03-07T12:49:58
| 2014-03-07T12:49:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tbspider.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"taojy123@163.com"
] |
taojy123@163.com
|
3ae564ec2995618a677d2ebe236c5f0ae443ef5e
|
e6683202f97190f8a5230fbb0b99d9692f10872c
|
/core/lib/dns/rdtypes/ANY/SSHFP.py
|
b6ed396f1d2897efeb5c6ce2155c58ad87a41ff7
|
[
"MIT"
] |
permissive
|
swagkarna/arissploit
|
0ae6ba9c91aa8a1160597f052462a1030276688d
|
b0a58f61afc12ac78c65e0275dfa5e4d1e44989e
|
refs/heads/master
| 2020-09-28T18:51:40.741777
| 2019-12-08T21:54:15
| 2019-12-08T21:54:15
| 226,839,315
| 3
| 0
|
MIT
| 2019-12-09T09:57:43
| 2019-12-09T09:57:43
| null |
UTF-8
|
Python
| false
| false
| 2,829
|
py
|
# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import binascii
import dns.rdata
import dns.rdatatype
class SSHFP(dns.rdata.Rdata):
"""SSHFP record
@ivar algorithm: the algorithm
@type algorithm: int
@ivar fp_type: the digest type
@type fp_type: int
@ivar fingerprint: the fingerprint
@type fingerprint: string
@see: draft-ietf-secsh-dns-05.txt"""
__slots__ = ['algorithm', 'fp_type', 'fingerprint']
def __init__(self, rdclass, rdtype, algorithm, fp_type,
fingerprint):
super(SSHFP, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.fp_type = fp_type
self.fingerprint = fingerprint
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %s' % (self.algorithm,
self.fp_type,
dns.rdata._hexify(self.fingerprint,
chunksize=128))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
algorithm = tok.get_uint8()
fp_type = tok.get_uint8()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value.encode())
fingerprint = b''.join(chunks)
fingerprint = binascii.unhexlify(fingerprint)
return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
def to_wire(self, file, compress=None, origin=None):
header = struct.pack("!BB", self.algorithm, self.fp_type)
file.write(header)
file.write(self.fingerprint)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
header = struct.unpack("!BB", wire[current: current + 2])
current += 2
rdlen -= 2
fingerprint = wire[current: current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], header[1], fingerprint)
|
[
"noreply@github.com"
] |
swagkarna.noreply@github.com
|
b639fbb2299a12acf5adfa56c7eb10aefd3818fb
|
3b5c46ce2daa75e1e157838d0f6cfd92469471a0
|
/plastering/inferencers/scrabble_helper.py
|
a69252ea56f5e2fdc8ae172faddd96b0a3c939dc
|
[
"MIT"
] |
permissive
|
plastering/plastering
|
1b4e9c04fce4b26b22fe5ade05af9baf644b4eaa
|
26ffeecb38844ebb122fde5d9bd2276a7b4150a0
|
refs/heads/master
| 2023-04-04T07:50:59.087529
| 2021-05-17T23:31:40
| 2021-05-17T23:31:40
| 149,086,461
| 37
| 17
|
MIT
| 2023-03-24T23:19:24
| 2018-09-17T07:32:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
from collections import defaultdict
import pdb
from pkg_resources import resource_string
from io import StringIO
import pandas as pd
from ..metadata_interface import LabeledMetadata, RawMetadata
def elem2list(elem):
if isinstance(elem, str):
return elem.split('_')
else:
return []
def csv2json(df, key_idx, value_idx):
keys = df[key_idx].tolist()
values = df[value_idx].tolist()
return {k: elem2list(v) for k, v in zip(keys, values)}
def load_data(target_building,
source_buildings,
unit_mapping_file=resource_string('config', 'unit_mapping.csv'),
bacnettype_mapping_file=resource_string('config', 'bacnettype_mapping.csv'),
#unit_mapping_file='config/unit_mapping.csv',
#bacnettype_mapping_file='config/bacnettype_mapping.csv',
bacnettype_flag=False,
metadata_types=['VendorGivenName'],
):
building_sentence_dict = dict()
building_label_dict = dict()
building_tagsets_dict = dict()
known_tags_dict = defaultdict(list)
units = csv2json(pd.read_csv(StringIO(unit_mapping_file.decode('utf-8'))),
'unit',
'word',
)
units[None] = []
units[''] = []
bacnettypes = csv2json(pd.read_csv(StringIO(bacnettype_mapping_file.decode('utf-8'))),
'bacnet_type_str',
'candidates',
)
bacnettypes[None] = []
bacnettypes[''] = []
for building in source_buildings:
true_tagsets = {}
label_dict = {}
for labeled in LabeledMetadata.objects(building=building):
srcid = labeled.srcid
true_tagsets[srcid] = labeled.tagsets
fullparsing = labeled.fullparsing
labels = {}
for metadata_type, pairs in fullparsing.items():
labels[metadata_type] = [pair[1] for pair in pairs]
label_dict[srcid] = labels
building_tagsets_dict[building] = true_tagsets
building_label_dict[building] = label_dict
sentence_dict = dict()
for raw_point in RawMetadata.objects(building=building):
srcid = raw_point.srcid
metadata = raw_point['metadata']
sentences = {}
for clm in metadata_types:
if clm not in ['BACnetUnit', 'BACnetTypeStr']:
sentences[clm] = [c for c in metadata.get(clm, '').lower()]
sentence_dict[srcid] = sentences
bacnet_unit = metadata.get('BACnetUnit')
if bacnet_unit:
known_tags_dict[srcid] += units[bacnet_unit]
if bacnettype_flag:
known_tags_dict[srcid] += bacnettypes[metadata.get('BACnetTypeStr')]
building_sentence_dict[building] = sentence_dict
target_srcids = list(building_label_dict[target_building].keys())
return building_sentence_dict, target_srcids, building_label_dict,\
building_tagsets_dict, known_tags_dict
|
[
"bk7749@gmail.com"
] |
bk7749@gmail.com
|
50854a154c4b01f862c232713478f59d981e5b6d
|
f34a81fa55a80130e2f70197e011cccb9dee063f
|
/python/oreilly_intermediate_python/unit6_scraping/scraper3.py
|
53731b7e6bcce199400ed90c7903128ae2c3e0bd
|
[] |
no_license
|
ilyarudyak/data_science
|
7d71ecdf17cc68be33b598ae2e51cba48b5b3aab
|
deb992b2760c4fea7f39f64089f6c1884f3fdb51
|
refs/heads/master
| 2020-05-27T11:10:32.679938
| 2017-02-22T21:29:26
| 2017-02-22T21:29:26
| 82,545,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
from urllib.request import urlopen, urlretrieve
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import os
baseUrl = "https://apod.nasa.gov/apod/archivepix.html"
archiveHtmlStr = urlopen(baseUrl).read()
for link in BeautifulSoup(archiveHtmlStr, "html.parser").findAll("a", limit=10):
imgBaseUrl = urljoin(baseUrl, link['href'])
# follow the link to image page
imgHtmlStr = urlopen(imgBaseUrl).read()
imgUrl = urljoin(imgBaseUrl, BeautifulSoup(imgHtmlStr, "html.parser").img['src'])
imgName = imgUrl.split('/')[-1]
print(imgName, imgUrl)
# download and store image
downloadDir = 'apod_pictures'
urlretrieve(imgUrl, os.path.join(downloadDir, imgName))
|
[
"ilyarudyak@yahoo.com"
] |
ilyarudyak@yahoo.com
|
3f9e10788f14ab2cd56b8d0fcd5113a3d30b5a99
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Hans.CMN/Serif_8/pdf_to_json_test_Hans.CMN_Serif_8.py
|
2f64816c501918d27c2ac1dc0830fcb5c2325910
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Hans.CMN/Serif_8/udhr_Hans.CMN_Serif_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
7048b0d769e8eb1e550e5e242e33d057e16460c5
|
fe4df940d16f8a9657028ee53a1780cccd32c817
|
/Python/test_roman_numerals.py
|
6fdd59f316e790b6f9ddf90e47a3baa9b1f53240
|
[
"BSD-2-Clause"
] |
permissive
|
Kwpolska/roman_numerals
|
2e6d700d9852d5072d96524d73169445e7288753
|
887e648a39fa73583f4b7cf330436f94bf88325e
|
refs/heads/master
| 2020-08-01T16:01:15.539378
| 2018-01-01T00:00:00
| 2017-12-31T18:33:35
| 73,572,730
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,621
|
py
|
# -*- encoding: utf-8 -*-
# Test Suite for Roman Numeral Converter
# Copyright © MMXVI-MMXVIII, Chris Warrick.
# All rights reserved.
# License: 3-clause BSD, see main file for details.
"""Test suite for Roman Numeral Converter."""
import pytest
from roman_numerals import to_roman, from_roman
def test_to_roman():
assert to_roman(1) == 'I'
assert to_roman(2) == 'II'
assert to_roman(3) == 'III'
assert to_roman(4) == 'IV'
assert to_roman(5) == 'V'
assert to_roman(6) == 'VI'
assert to_roman(1234) == 'MCCXXXIV'
assert to_roman(1958) == 'MCMLVIII'
assert to_roman(2222) == 'MMCCXXII'
assert to_roman(3999) == 'MMMCMXCIX'
def test_to_roman_invalid():
with pytest.raises(ValueError):
to_roman(0)
with pytest.raises(ValueError):
to_roman(-1)
with pytest.raises(ValueError):
to_roman(4000)
def test_to_roman_file():
with open('../test_data.txt') as fh:
for line in fh:
integer, roman = line.strip().split(' ')
integer = int(integer)
assert to_roman(integer) == roman
def test_from_roman():
assert from_roman("I") == 1
assert from_roman("Ii") == 2
assert from_roman("iii") == 3
assert from_roman(" iv ") == 4
assert from_roman("V") == 5
assert from_roman("ViIi") == 8
def test_from_roman_file():
with open('../test_data.txt') as fh:
for line in fh:
integer, roman = line.strip().split(' ')
integer = int(integer)
assert from_roman(roman) == integer
if __name__ == '__main__':
print("Please run with py.test.")
|
[
"kwpolska@gmail.com"
] |
kwpolska@gmail.com
|
70e24b5ae7a71fda9781af5c5d9bfe6cd088dd29
|
e70b678712a355a0b51632728c7781b0bdcf29f4
|
/Algorithms/Python/Contains-Duplicate-II.py
|
fdaf86c10320f2d83fdd79f38c2eb8f4dff4022d
|
[] |
no_license
|
keyi/Leetcode_Solutions
|
b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af
|
69e4e969b435ff2796bd7c4b5dad9284a853ab54
|
refs/heads/master
| 2020-05-21T23:36:20.450053
| 2018-11-11T03:45:28
| 2018-11-11T03:45:28
| 33,714,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
dic = collections.defaultdict(list)
for i in range(len(nums)):
if nums[i] in dic:
for x in dic[nums[i]]:
if i - x <= k:
return True
dic[nums[i]].append(i)
return False
|
[
"yike921012@gmail.com"
] |
yike921012@gmail.com
|
6e1c6809f055a02678b42764cbc0f8d5eb360592
|
d9b53673b899a9b842a42060740b734bf0c63a31
|
/leetcode/python/easy/p119_getRow.py
|
f635d4dcaee648ef09c71c18a2b7eb6440bc7cc5
|
[
"Apache-2.0"
] |
permissive
|
kefirzhang/algorithms
|
a8d656774b576295625dd663154d264cd6a6a802
|
549e68731d4c05002e35f0499d4f7744f5c63979
|
refs/heads/master
| 2021-06-13T13:05:40.851704
| 2021-04-02T07:37:59
| 2021-04-02T07:37:59
| 173,903,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
class Solution:
def getRow(self, numRows): # 进阶到O(K)空间复杂度核心点是扩充上一个数组
l_data = [1]
if numRows == 0:
return [1]
for i in range(numRows):
for j in range(i + 1):
if j != i:
l_data[j] = l_data[j] + l_data[j + 1]
l_data = [1] + l_data
return l_data
def getRow1(self, numRows):
numRows += 1
l_data = []
for i in range(1, numRows + 1):
l_cur = []
for j in range(i):
if j == 0 or j == i - 1:
l_cur.append(1)
else:
l_cur.append(l_data[-1][j - 1] + l_data[-1][j])
l_data.append(l_cur)
return l_data[-1]
slu = Solution()
print(slu.getRow(4))
|
[
"8390671@qq.com"
] |
8390671@qq.com
|
61cfc91b56eeabeaa0e59ead7dfb135142cd761e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/anagram/src/334.py
|
dcd298320ef99f702d1ae67f9b29bf6905ed5ce5
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from itertools import permutations
def detect_anagrams(word, anagrams):
return [x for x in anagrams if x.lower() != word.lower() and x.lower() in set([a.lower() for a in anagrams]).intersection(["".join(p) for p in permutations(word.lower())])]
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
4a6218d1fe1d17ba0f622a6721c1ecf9f6a001cf
|
f75ec2c20c3208350d310038a2cd0a67253b44df
|
/example/petstore/apis/pet/updatePetWithForm.py
|
858c0b878074e4e9ffc1a6be75fb2b60d1c1cbb1
|
[] |
no_license
|
vichooz/swagger_codegen
|
e53f59f3cd2c080157863698f932a606705db4e4
|
8238356075eea4218b2e6a645c7ea2b8826b1044
|
refs/heads/master
| 2022-08-03T04:32:49.291426
| 2020-05-27T06:09:28
| 2020-05-27T06:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from __future__ import annotations
import pydantic
import datetime
import asyncio
import typing
from pydantic import BaseModel
from swagger_codegen.api.request import ApiRequest
def make_request(self, petid: int, name: str = ..., status: str = ...,) -> None:
"""Updates a pet in the store with form data"""
m = ApiRequest(
method="POST",
path="/api/v3/pet/{petId}".format(petId=petid,),
content_type=None,
body=None,
headers=self._only_provided({}),
query_params=self._only_provided({"name": name, "status": status,}),
cookies=self._only_provided({}),
)
return self.make_request({}, m)
|
[
"n10101010@gmail.com"
] |
n10101010@gmail.com
|
7e3544a76de5495c3cd0761fe45aa1f6c3227d1c
|
d66818f4b951943553826a5f64413e90120e1fae
|
/hackerearth/Algorithms/Foo and Exams/test.py
|
5f769f75d7e391b7e97605e6f823c1e48e0947d1
|
[
"MIT"
] |
permissive
|
HBinhCT/Q-project
|
0f80cd15c9945c43e2e17072416ddb6e4745e7fa
|
19923cbaa3c83c670527899ece5c3ad31bcebe65
|
refs/heads/master
| 2023-08-30T08:59:16.006567
| 2023-08-29T15:30:21
| 2023-08-29T15:30:21
| 247,630,603
| 8
| 1
|
MIT
| 2020-07-22T01:20:23
| 2020-03-16T06:48:02
|
Python
|
UTF-8
|
Python
| false
| false
| 548
|
py
|
import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'2 2 2 2 10',
'2 3 5 7 1000',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'1\n' +
'7\n')
if __name__ == '__main__':
unittest.main()
|
[
"hbinhct@gmail.com"
] |
hbinhct@gmail.com
|
9cf33f7b6e47a4d3933a7bb52923ed6a13dbdcbb
|
7b437e095068fb3f615203e24b3af5c212162c0d
|
/enaml/wx/wx_factories.py
|
ff75d3f1abc7037597e7f21f73d30b03aa6bd316
|
[
"BSD-3-Clause"
] |
permissive
|
ContinuumIO/enaml
|
d8200f97946e5139323d22fba32c05231c2b342a
|
15c20b035a73187e8e66fa20a43c3a4372d008bd
|
refs/heads/master
| 2023-06-26T16:16:56.291781
| 2013-03-26T21:13:52
| 2013-03-26T21:13:52
| 9,047,832
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,135
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
def action_factory():
from .wx_action import WxAction
return WxAction
def action_group_factory():
from .wx_action_group import WxActionGroup
return WxActionGroup
def calendar_factory():
from .wx_calendar import WxCalendar
return WxCalendar
def check_box_factory():
from .wx_check_box import WxCheckBox
return WxCheckBox
def combo_box_factory():
from .wx_combo_box import WxComboBox
return WxComboBox
def container_factory():
from .wx_container import WxContainer
return WxContainer
def date_selector_factory():
from .wx_date_selector import WxDateSelector
return WxDateSelector
# def datetime_selector_factory():
# from .wx_datetime_selector import WxDatetimeSelector
# return WxDatetimeSelector
def dock_pane_factory():
from .wx_dock_pane import WxDockPane
return WxDockPane
def field_factory():
from .wx_field import WxField
return WxField
def group_box_factory():
from .wx_group_box import WxGroupBox
return WxGroupBox
def html_factory():
from .wx_html import WxHtml
return WxHtml
# def image_view_factory():
# from .wx_image_view import WxImageView
# return WxImageView
def label_factory():
from .wx_label import WxLabel
return WxLabel
def main_window_factory():
from .wx_main_window import WxMainWindow
return WxMainWindow
def menu_factory():
from .wx_menu import WxMenu
return WxMenu
def menu_bar_factory():
from .wx_menu_bar import WxMenuBar
return WxMenuBar
def mpl_canvas_factory():
from .wx_mpl_canvas import WxMPLCanvas
return WxMPLCanvas
def notebook_factory():
from .wx_notebook import WxNotebook
return WxNotebook
def page_factory():
from .wx_page import WxPage
return WxPage
def push_button_factory():
from .wx_push_button import WxPushButton
return WxPushButton
def progress_bar_factory():
from .wx_progress_bar import WxProgressBar
return WxProgressBar
def radio_button_factory():
from .wx_radio_button import WxRadioButton
return WxRadioButton
def scroll_area_factory():
from .wx_scroll_area import WxScrollArea
return WxScrollArea
def slider_factory():
from .wx_slider import WxSlider
return WxSlider
def spin_box_factory():
from .wx_spin_box import WxSpinBox
return WxSpinBox
def split_item_factory():
from .wx_split_item import WxSplitItem
return WxSplitItem
def splitter_factory():
from .wx_splitter import WxSplitter
return WxSplitter
# def text_editor_factory():
# from .wx_text_editor import WxTextEditor
# return WxTextEditor
def tool_bar_factory():
from .wx_tool_bar import WxToolBar
return WxToolBar
def window_factory():
from .wx_window import WxWindow
return WxWindow
WX_FACTORIES = {
'Action': action_factory,
'ActionGroup': action_group_factory,
'Calendar': calendar_factory,
'CheckBox': check_box_factory,
'ComboBox': combo_box_factory,
'Container': container_factory,
'DateSelector': date_selector_factory,
'DockPane': dock_pane_factory,
'Field': field_factory,
'GroupBox': group_box_factory,
'Html': html_factory,
'Label': label_factory,
'MainWindow': main_window_factory,
'Menu': menu_factory,
'MenuBar': menu_bar_factory,
'MPLCanvas': mpl_canvas_factory,
'Notebook': notebook_factory,
'Page': page_factory,
'PushButton': push_button_factory,
'ProgressBar': progress_bar_factory,
'RadioButton': radio_button_factory,
'ScrollArea': scroll_area_factory,
'Slider': slider_factory,
'SpinBox': spin_box_factory,
'SplitItem': split_item_factory,
'Splitter': splitter_factory,
'ToolBar': tool_bar_factory,
'Window': window_factory,
}
|
[
"sccolbert@gmail.com"
] |
sccolbert@gmail.com
|
d1989cdd6717ce76b1db0189d8bdca041eb9cf69
|
c99427245fdb9cb3f1d9f788963759664885b6ec
|
/tests/models/test_driver_response.py
|
7811b95f8930a672138a5f21d99db91fb41da588
|
[] |
no_license
|
QualiSystems/cloudshell-shell-connectivity-flow
|
98cf33e49fbdef147e37dbda0179b76400492e36
|
c2a1bb8589b25a62dd7c20f7ef35c7f2f8d5aa1c
|
refs/heads/master
| 2023-08-22T22:48:28.051220
| 2023-08-17T18:04:19
| 2023-08-17T18:27:45
| 218,023,805
| 0
| 1
| null | 2023-08-17T18:27:46
| 2019-10-28T10:42:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,468
|
py
|
import pytest
from cloudshell.shell.flows.connectivity.models.driver_response import (
ConnectivityActionResult,
DriverResponseRoot,
)
@pytest.mark.parametrize(
("success", "msg"), ((True, "success msg"), (False, "error msg"))
)
def test_connectivity_action_result(success, msg, action_model):
if success:
result = ConnectivityActionResult.success_result(action_model, msg)
assert result.infoMessage == msg
assert result.errorMessage == ""
else:
result = ConnectivityActionResult.fail_result(action_model, msg)
assert result.infoMessage == ""
assert result.errorMessage == msg
assert result.success is success
assert result.actionId == action_model.action_id
assert result.type == action_model.type.value
assert result.updatedInterface == action_model.action_target.name
def test_prepare_response(action_model):
result = ConnectivityActionResult.success_result(action_model, "success msg")
response = DriverResponseRoot.prepare_response([result])
assert response.driverResponse.actionResults[0] == result
assert response.json() == (
'{"driverResponse": {"actionResults": ['
'{"actionId": "96582265-2728-43aa-bc97-cefb2457ca44_0900c4b5-0f90-42e3-b495", '
'"type": "removeVlan", '
'"updatedInterface": "centos", '
'"infoMessage": "success msg", '
'"errorMessage": "", '
'"success": true'
"}]}}"
)
|
[
"saklar13@gmail.com"
] |
saklar13@gmail.com
|
c3112deb23ba50edf3fb5f4abc9447ee7a5e1ffb
|
818432c7fdf26abff9f3988ce5f3ef419564b062
|
/ReferentialGym/modules/batch_reshape_repeat_module.py
|
a15d557a40e4740ab5170269046558e9154f9e4d
|
[
"MIT"
] |
permissive
|
mk788/ReferentialGym
|
3f0703d4def489354278bba642e79d3b5bac5db2
|
afe22da2ac20c0d24e93b4dbd1f1ad61374d1a6c
|
refs/heads/master
| 2023-03-16T01:32:21.826448
| 2021-03-03T12:41:42
| 2021-03-03T12:41:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,760
|
py
|
from typing import Dict, List
import torch
import torch.nn as nn
import torch.optim as optim
from .module import Module
def build_BatchReshapeRepeatModule(id:str,
config:Dict[str,object],
input_stream_keys:List[str]) -> Module:
return BatchReshapeRepeatModule(id=id,
config=config,
input_stream_keys=input_stream_keys)
class BatchReshapeRepeatModule(Module):
def __init__(self,
id:str,
config:Dict[str,object],
input_stream_keys:List[str]):
"""
Reshape input streams data while keeping the batch dimension identical.
:param config: Dict of parameters. Expectes:
- "new_shape": List of None/Tuple/List/torch.Size representing the new shape
of each input stream, without mentionning the batch dimension.
If multiple input streams are proposed but only one element in this
list, then the list is expanded by repeating the last element.
- "repetition": List of None/Tuple/List/torch.Size representing the repetition
of each input stream, without mentionning the batch dimension.
If multiple input streams are proposed but only one element in this
list, then the list is expanded by repeating the last element.
"""
input_stream_ids = {
f"input_{idx}":ik
for idx, ik in enumerate(input_stream_keys)
}
assert "new_shape" in config,\
"BatchReshapeRepeatModule relies on 'new_shape' list.\n\
Not found in config."
assert "repetition" in config,\
"BatchReshapeRepeatModule relies on 'repetition' list.\n\
Not found in config."
super(BatchReshapeRepeatModule, self).__init__(id=id,
type="BatchReshapeRepeatModule",
config=config,
input_stream_ids=input_stream_ids)
self.new_shape = self.config["new_shape"]
assert isinstance(self.new_shape, list)
self.repetition = self.config["repetition"]
assert isinstance(self.repetition, list)
self.n_input_streams = len(self.input_stream_ids)
while len(self.new_shape) < self.n_input_streams:
self.new_shape.append(self.new_shape[-1])
while len(self.repetition) < self.n_input_streams:
self.repetition.append(self.repetition[-1])
def compute(self, input_streams_dict:Dict[str,object]) -> Dict[str,object] :
"""
Operates on inputs_dict that is made up of referents to the available stream.
Make sure that accesses to its element are non-destructive.
:param input_streams_dict: dict of str and data elements that
follows `self.input_stream_ids`"s keywords and are extracted
from `self.input_stream_keys`-named streams.
:returns:
- outputs_stream_dict:
"""
outputs_stream_dict = {}
for idx, (k, inp) in enumerate(input_streams_dict.items()):
new_shape = self.new_shape[idx]
if new_shape is None:
new_shape = inp.shape[1:]
n_inp = inp.reshape(inp.shape[0], *new_shape)
repeat = self.repetition[idx]
if repeat is not None:
n_inp = n_inp.repeat(1, *repeat)
outputs_stream_dict[f"output_{idx}"] = n_inp
return outputs_stream_dict
|
[
"denamganai.kevin@gmail.com"
] |
denamganai.kevin@gmail.com
|
957af4ae3ebfa8a436ddd99b94dbc339d8387feb
|
5c964389074a95f166ae682c4196226d46adec09
|
/leet390.py
|
c99f7a2a8947a85ff5ce8cd45f7c710cb89f3da7
|
[] |
no_license
|
shach934/leetcode
|
0a64e851be1419f19da8d2d09294f19758b355b8
|
59de9ba6620c64efbd2cc0aab8c22a82b2c0df21
|
refs/heads/master
| 2023-01-12T01:48:31.877822
| 2020-11-16T20:48:37
| 2020-11-16T20:48:37
| 306,147,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,385
|
py
|
390. Elimination Game
There is a list of sorted integers from 1 to n. Starting from left to right, remove the first number and every other number afterward until you reach the end of the list.
Repeat the previous step again, but this time from right to left, remove the right most number and every other number from the remaining numbers.
We keep repeating the steps again, alternating left to right and right to left, until a single number remains.
Find the last number that remains starting with a list of length n.
Example:
Input:
n = 9,
1 2 3 4 5 6 7 8 9
2 4 6 8
2 6
6
Output:
6
class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
head, tail, length, step, forward = 0, n-1, n, 2, True
while length > 1:
if forward:
head = head + step // 2
#print([i+1 for i in range(head, n, step)])
length //= 2
tail = head + step * (length - 1)
step *= 2
forward = False
else:
tail = tail - step//2
length //= 2
head = tail - step * (length - 1)
#print([i for i in range(head, tail+1, step)])
step *= 2
forward = True
return head + 1
|
[
"tjukid@gmail.com"
] |
tjukid@gmail.com
|
d01231c94199e4348df604a2592dd5e6dfabe359
|
eddb777e95b6c6baa413fd049f959ab593de5041
|
/src/main/python/bedk/simpleEventExample.py
|
596b022b83c7b74cbf21aba5cb4fbb85ca16b057
|
[] |
no_license
|
jdgwartney/sandbox
|
bfb641b448b2ffb0e0446cdd942165c7501abd8e
|
62f7a3a034a73de938c9f241638ff1170373e932
|
refs/heads/master
| 2020-05-16T23:35:38.215681
| 2014-03-31T14:16:45
| 2014-03-31T14:16:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
#!/usr/bin/env python3
from boundary.api.event import createEvent
from boundary.api.event import getEvent
myApiHost = 'api.boundary.com'
myOrganizationID = '3ehRi7uZeeaTN12dErF5XOnRXjC'
myApiKey = 'ARI0PzUzWYUo7GG1OxiHmABTpr9'
myFingerprintFields = '@title'
myTitle = 'My shiny new event'
#
# Create a new boundary Event
#
eventId = createEvent(myApiHost,
myApiKey,
myFingerprintFields,
myTitle,myOrganizationID)
print('event id: ' + eventId)
#
# Fetch the newly created boundary event
#
newEvent = getEvent(myApiHost,
myApiKey,
eventId)
print(newEvent)
|
[
"davidg@boundary.com"
] |
davidg@boundary.com
|
6895b4868d46ed8ceb2a03fcedf4563bbb31f208
|
b9f4e78555a2644c56fd6cd1ac82c5b729875d70
|
/atomic_utils.py
|
0185009c1bda6c267a8d78bbd7ad7a75f4bd5877
|
[] |
no_license
|
taiyan33/elegant-concurrency-lab
|
7be31a6f6c49c33f25e6cc02c1a19d0a1dc71a8b
|
781800eaf3149643178e0df3a41f6b9fcf0e6cec
|
refs/heads/master
| 2020-04-21T07:59:46.085819
| 2017-06-11T05:17:06
| 2017-06-11T05:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,757
|
py
|
#!/usr/bin/env python
import requests
from bs4 import BeautifulSoup
PYCON_TW_ROOT_URL = 'https://tw.pycon.org/'
# conform accessing only its frame
def query_text(url):
return requests.get(url).text
def parse_out_href_gen(text):
# soup is bs4.BeautifulSoup
# a_tag is bs4.element.Tag
soup = BeautifulSoup(text, 'html.parser')
return (a_tag.get('href', '') for a_tag in soup.find_all('a'))
def is_relative_href(url):
return not url.startswith('http') and not url.startswith('mailto:')
# conform using atomic operators
url_visted_map = {}
def is_visited_or_mark(url):
visited = url_visted_map.get(url, False)
if not visited:
url_visted_map[url] = True
return visited
if __name__ == '__main__':
# test cases
print('Testing query_text ... ', end='')
text = query_text('https://tw.pycon.org')
print(repr(text[:40]))
print('Testing parse_out_href_gen ... ', end='')
href_gen = parse_out_href_gen(text)
print(repr(list(href_gen)[:3]))
print('Testing is_relative_href ...')
assert is_relative_href('2017/en-us')
assert is_relative_href('/2017/en-us')
assert not is_relative_href('https://www.facebook.com/pycontw')
assert not is_relative_href('mailto:organizers@pycon.tw')
print('Testing is_visited_or_mark ...')
assert not is_visited_or_mark('/')
assert is_visited_or_mark('/')
# benchmark
from time import time
print('Benchmarking query_text ... ', end='') # 40x
s = time()
text = query_text('https://tw.pycon.org')
e = time()
print(f'{e-s:.4}s')
print('Benchmarking parse_out_href_gen ... ', end='') # 1x
s = time()
list(parse_out_href_gen(text))
e = time()
print(f'{e-s:.4}s')
|
[
"mosky.tw@gmail.com"
] |
mosky.tw@gmail.com
|
7add687cf561df1746688e09b316cec08365024c
|
5c84ae48b34b5e2bd67c5301cd81f0de9da7decd
|
/core/__init__.py
|
9cb75c21bccc21b70930c052672ca9546c1d30bf
|
[] |
no_license
|
makesaturdays/saturdays.class
|
00a575620d7e162f7f4de981209265396676683d
|
031bda06aced31cd13c87f25928ff93e25be826f
|
refs/heads/master
| 2021-01-12T10:12:23.570143
| 2017-01-12T20:05:52
| 2017-01-12T20:05:52
| 76,386,728
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
from flask import Flask
import os
import sys
if getattr(sys, 'frozen', False):
app_path = os.path.abspath(os.path.dirname(sys.executable))
app_path = app_path.replace('/server.app/Contents/MacOS', '')
elif __file__:
app_path = os.path.abspath(os.path.dirname(__file__))+'/..'
app = Flask(__name__, static_folder=app_path+'/files', template_folder=app_path+'/layouts')
app.path = app_path
app.config.from_pyfile(app.path+'/config/environment.py')
from core.pages import *
|
[
"phil@boeuf.coffee"
] |
phil@boeuf.coffee
|
d7adeb2560642c4a30f71925b7ad592a459f774d
|
2eccd1b25468e278ba6568063901dfa6608c0271
|
/assignment5/reducer.py
|
a2bfeaafa6f2cdc3970fae62fa71a7c5926b98a4
|
[
"MIT"
] |
permissive
|
IITDU-BSSE06/ads-demystifying-the-logs-Toufiqur0636
|
089aa00e8dd1c858e01b0945ac24ecc886044f96
|
4b01a8721b7fc869737572de1bdc155f86397e9b
|
refs/heads/master
| 2021-07-26T00:51:26.576645
| 2017-11-08T06:58:52
| 2017-11-08T06:58:52
| 109,025,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
#!/usr/bin/python
import sys
path_map = dict()
for line in sys.stdin:
path = line.strip()
path_map[path] = path_map.get(path, 0) + 1
print len(path_map)
|
[
"noreply@github.com"
] |
IITDU-BSSE06.noreply@github.com
|
a8aa733cd8ee2d48a463b17928cdb8c8623b6a9f
|
6c285510932df0477ae33752d0f7c3b153a462c7
|
/examples/pygmsh/screw.py
|
5143666808bc7f6013b3294f50bc69d90c78fa8a
|
[
"MIT"
] |
permissive
|
kebitmatf/meshzoo
|
4b24a223daadb9c057abb2b083cf401c74220072
|
6c7ac99af2c7c0673d69b85bd3330ee9a9b67f90
|
refs/heads/master
| 2020-12-02T17:46:29.592835
| 2017-04-06T10:17:20
| 2017-04-06T10:17:20
| 96,425,485
| 1
| 0
| null | 2017-07-06T12:00:25
| 2017-07-06T12:00:25
| null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import pygmsh as pg
import numpy as np
def create_screw_mesh():
geom = pg.Geometry()
# Draw a cross.
poly = geom.add_polygon([
[0.0, 0.5, 0.0],
[-0.1, 0.1, 0.0],
[-0.5, 0.0, 0.0],
[-0.1, -0.1, 0.0],
[0.0, -0.5, 0.0],
[0.1, -0.1, 0.0],
[0.5, 0.0, 0.0],
[0.1, 0.1, 0.0]
],
lcar=0.05
)
axis = [0, 0, 1]
geom.extrude(
'Surface{%s}' % poly,
translation_axis=axis,
rotation_axis=axis,
point_on_axis=[0, 0, 0],
angle=2.0 / 6.0 * np.pi
)
points, cells = pg.generate_mesh(geom)
return points, cells['tetra']
if __name__ == '__main__':
import meshio
points, cells = create_screw_mesh()
meshio.write('screw.e', points, {'tetra': cells})
|
[
"nico.schloemer@gmail.com"
] |
nico.schloemer@gmail.com
|
93b81ba56da6268cbd01cb00ed76b8d353f62f83
|
94460fe9a2df490b1763f60b25b26bce5d3d5940
|
/EyePatterns/clustering_algorithms/affinity_propagation.py
|
b987e9dbb65634f5c650b49c7269067f11f622a3
|
[
"MIT"
] |
permissive
|
Sale1996/Pattern-detection-of-eye-tracking-scanpaths
|
1711262280dde728e1b2559e9ca9f9f66d3d514f
|
15c832f26dce98bb95445f9f39f454f99bbb6029
|
refs/heads/master
| 2022-12-09T03:01:12.260438
| 2020-08-27T11:38:42
| 2020-08-27T11:38:42
| 290,264,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
# from work L Frey, Brendan J., and Delbert Dueck. "Clustering by passing messages between data points." science 315.5814 (2007): 972-976
# https://science.sciencemag.org/content/315/5814/972
import numpy as np
import sklearn.cluster
class AffinityPropagation:
def __init__(self, affinity='precomputed', damping=0.5):
self.affinity = affinity
self.damping = damping
def fit(self, similarity_scores):
self.aff_prop = sklearn.cluster.AffinityPropagation(affinity=self.affinity, damping=self.damping)
self.aff_prop.fit(similarity_scores)
def get_exemplars_and_their_features(self, data):
exemplar_features_map = {}
for cluster_id in np.unique(self.aff_prop.labels_):
exemplar = data[self.aff_prop.cluster_centers_indices_[cluster_id]]
cluster = np.unique(data[np.nonzero(self.aff_prop.labels_ == cluster_id)])
exemplar_features_map[exemplar] = cluster
return exemplar_features_map
|
[
"ftnmejl96@gmail.com"
] |
ftnmejl96@gmail.com
|
347fedbdef33c7456662ebee31abe968b83b35a9
|
505343f6ace00d22f8753c1a943a5794a619e698
|
/katas/Python/7 kyu/Last 541629460b198da04e000bb9.py
|
e5417442445b5919eba81b62a2941374fb315ad6
|
[] |
no_license
|
bullet1337/codewars
|
7652e50bf768bc47976a9124dd98b93602d4d458
|
ba7f13ddd766158b41e036dae5d6b15f7f08761a
|
refs/heads/master
| 2020-03-27T05:04:03.751302
| 2019-04-30T17:45:39
| 2019-04-30T17:45:39
| 145,991,995
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
# https://www.codewars.com/kata/541629460b198da04e000bb9
def last(*args):
return args[-1][-1] if isinstance(args[-1], (list, str)) else args[-1]
|
[
"alichek95@mail.ru"
] |
alichek95@mail.ru
|
6483bf1d9e0ab9d094eeb3e3abb6169b01a6ab06
|
74649c1220c68ad0af79e420d572e3769fcd7a53
|
/mlprodict/onnxrt/ops_cpu/op_negative_log_likelihood_loss.py
|
aca354dbbd58eebedc07f69186c63df0d5149f30
|
[
"MIT"
] |
permissive
|
sdpython/mlprodict
|
e62edcb428700cb2c4527e54e96431c1d2b36118
|
27d6da4ecdd76e18292f265fde61d19b66937a5c
|
refs/heads/master
| 2023-05-08T10:44:30.418658
| 2023-03-08T22:48:56
| 2023-03-08T22:48:56
| 112,469,804
| 60
| 13
|
MIT
| 2023-04-19T01:21:38
| 2017-11-29T11:57:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,472
|
py
|
"""
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun
def _compute_negative_log_likelihood_loss(x, target, weight=None,
reduction=b'mean', ignore_index=None):
"""
Modified version of `softmaxcrossentropy.py
<https://github.com/onnx/onnx/blob/main/onnx/backend/
test/case/node/negativeloglikelihoodloss.py>`_ to handle other type
than float32.
"""
input_shape = x.shape
if len(input_shape) == 1:
raise RuntimeError(f"Unsupported shape {input_shape!r}.")
target_shape = target.shape
N = input_shape[0]
C = input_shape[1]
# initialize the positional weights when required
gather_weight = None
if weight is not None:
# setting mode='clip' to deal with ignore_index > C or < 0 cases.
# when the target value is > C or < 0, it doesn't matter which value we are
# taking in gather_weight, since it will be set to 0 in the following if-block
# use numpy.int32 to make it compatible with x86 machines
gather_weight = numpy.take(weight, numpy.array(
target, dtype=numpy.int32), mode='clip')
# set `ignore_index`'s loss weight to 0.
# The loss tensor will be multiplied by this weight tensor,
# so `ingore_index`'s loss value will be eliminated.
if ignore_index is not None:
gather_weight = numpy.where(
target == ignore_index, 0, gather_weight).astype(dtype=x.dtype)
elif ignore_index != -1:
gather_weight = numpy.where(
target == ignore_index, 0, 1).astype(dtype=x.dtype)
# if input is 4-d and above, make it 3-d
if len(input_shape) != 3:
x = x.reshape((N, C, -1))
target = target.reshape((N, -1))
# Get a dimension from the reshaped input.
# If the original input shape is [N, C, H, W],
# the D here should be H * W because we reshape
# [N, C, H, W] to [N, C, H * W].
D = x.shape[2]
neg_gather_element_input = numpy.zeros((N, D), dtype=x.dtype)
for i in range(N):
for d in range(D):
if target[i][d] != ignore_index:
neg_gather_element_input[i][d] = -x[i][target[i][d]][d]
loss = neg_gather_element_input
# if the input was 4-d or above reshape to the right shape
if len(input_shape) != 3:
loss = loss.reshape(target_shape)
# apply the weights when required
if gather_weight is not None:
loss = gather_weight * loss
if reduction == b'mean':
loss = loss.sum() / gather_weight.sum()
return (loss, )
if reduction == b'mean':
loss = numpy.mean(loss)
elif reduction == b'sum':
loss = numpy.sum(loss)
return (loss, )
class NegativeLogLikelihoodLoss(OpRun):
"""
Python runtime for function *NegativeLogLikelihoodLoss*.
"""
atts = {'reduction': b'mean', 'ignore_index': -1}
def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=NegativeLogLikelihoodLoss.atts,
**options)
def _run(self, x, target, weight=None, attributes=None, verbose=0, fLOG=None): # pylint: disable=W0221
return _compute_negative_log_likelihood_loss(
x, target, weight=weight, reduction=self.reduction, # pylint: disable=E1101
ignore_index=self.ignore_index) # pylint: disable=E1101
|
[
"noreply@github.com"
] |
sdpython.noreply@github.com
|
783c38fd2518fc737458e6c949ea4c94fda25305
|
a598604583977761b57ba88dd34455d7127876ec
|
/frb/scripts/image.py
|
6e189c8c6588927cfbe005667e43d85e94a7c53c
|
[
"BSD-3-Clause"
] |
permissive
|
FRBs/FRB
|
b973cd3d72489fe1f8e7957d6f29f5c657faf4a3
|
ef68b9a9fbb6d042b7a8c6c8818d55668d7e8639
|
refs/heads/main
| 2023-08-04T15:00:52.591942
| 2023-08-02T13:36:44
| 2023-08-02T13:36:44
| 85,193,203
| 52
| 26
|
BSD-3-Clause
| 2023-08-02T13:36:46
| 2017-03-16T12:26:19
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,077
|
py
|
#!/usr/bin/env python
"""
Script generate an image of an FRB
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
from IPython import embed
def parser(options=None):
import argparse
# Parse
parser = argparse.ArgumentParser(description='Script to make a quick image figure [v1.0]')
parser.add_argument("fits_file", type=str, help="Image FITS file with WCS")
parser.add_argument("frb_coord", type=str, help="FRB Coordinates, e.g. J081240.7+320809 or 122.223,-23.2322 or 07:45:00.47,34:17:31.1 or FRB name (FRB180924)")
parser.add_argument("--imsize", default=30., type=float, help="Image size in arcsec")
parser.add_argument("--vmnx", type=str, help="Image scale: vmin,vmax")
parser.add_argument("--outfile", default='image.png', type=str, help="Output filename")
if options is None:
pargs = parser.parse_args()
else:
pargs = parser.parse_args(options)
return pargs
def main(pargs):
""" Run
"""
import warnings
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
from astropy import units
from frb import frb
from frb.figures import galaxies as ffgal
from frb.figures import utils as ffutils
from linetools.scripts.utils import coord_arg_to_coord
# Load up
hdu = fits.open(pargs.fits_file)
icoord = coord_arg_to_coord(pargs.frb_coord)
# Parse
if pargs.vmnx is not None:
tstr = pargs.vmnx.split(',')
vmnx = (float(tstr[0]), float(tstr[1]))
else:
vmnx = (None,None)
# Dummy FRB object
FRB = frb.FRB('TMP', icoord, 0.)
FRB.set_ee(1.0, 1.0, 0., 95.)
fig = plt.figure(figsize=(7, 7))
ffutils.set_mplrc()
ffgal.sub_image(fig, hdu, FRB, vmnx=vmnx, cmap='gist_heat',
frb_clr='white', imsize=pargs.imsize) #img_center=HG190608.coord,
# Layout and save
plt.tight_layout(pad=0.2, h_pad=0.1, w_pad=0.1)
plt.savefig(pargs.outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(pargs.outfile))
|
[
"xavier@ucolick.org"
] |
xavier@ucolick.org
|
55e6c8bae8bfe8625f52f2a79caa71ab90ae8587
|
aa0f29e5b6832ab36f440f964b403712b2312fa8
|
/lab/1/lab1.py
|
2df7edbd876b74984911220aa1ab34efcadc1885
|
[] |
no_license
|
zweed4u/Embedded-Systems-Design-II
|
d8d3f608a77ba6919ca2cb692832a60400498265
|
c525d481f51c7b6d6fdbbd111b0211c6a24d2af1
|
refs/heads/master
| 2021-05-11T16:02:42.067488
| 2018-04-20T18:49:34
| 2018-04-20T18:49:34
| 117,721,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,533
|
py
|
#!/usr/bin/python
"""
Zachary Weeden
CPET-563 Lab 1
January 23, 2018
"""
import os
import sys
import math
from PyQt4 import QtGui, QtCore
hw_flag = 0 # 0 = read from file
class Encoders:
def __init__(self, hw_flag):
"""Constructor for Encoder class"""
self.hw_flag = hw_flag
self.contents = None
self.number_of_instructions = 0
def get_encoders(self):
"""
Opens the encoders text file and stores it into class attribute
Assumes that the encoders txt file is named appropriately and in the same directory as invocation
:return: str - the contents of the encoders.txt file
"""
with open(os.getcwd() + '/encoders.txt', 'r') as f:
self.contents = f.read()
return self.contents
def parse_file(self):
"""
Parse the actual encoder text into usable data structure
:return: dict - {'header_from_1st_row': [array_of_values_for_given_column]}
"""
# Read from file
if self.hw_flag == 0:
if self.contents is None:
self.get_encoders()
# assumes format of l_dir\tleft\tr_dir\tright as the first row - data starts 2nd row
headers = self.contents.split('\n')[0].split()
control_bits = self.contents.split('\n')[1:]
l_dir = []
left = []
r_dir = []
right = []
encoder_map = {}
for row in control_bits: # ensure that the line from the file was not empty
if row.strip():
l_dir.append(int(row[0]))
left.append(int(row[2]))
r_dir.append(int(row[4]))
right.append(int(row[6]))
self.number_of_instructions += 1
encoder_map[headers[0]] = l_dir
encoder_map[headers[1]] = left
encoder_map[headers[2]] = r_dir
encoder_map[headers[3]] = right
return encoder_map
elif self.hw_flag == 1:
return 0
class Board(QtGui.QGraphicsView):
def __init__(self, parent):
super(Board, self).__init__()
# Timer update to be between 2 and 4 seconds
self.timer_init = 2000.0
self.parent = parent
self.scene = QtGui.QGraphicsScene(self)
self.setScene(self.scene)
# Should be able to dynamically grab board dimensions based on machine
self.board_width = 50
self.board_height = 50
# effectively sets the logical scene coordinates from 0,0 to 1000,1000
self.scene.addRect(0, 0, self.board_width, self.board_height)
self.rover = Rover(self, self.board_width, self.board_height)
self.scene.addItem(self.rover)
self.timer = QtCore.QBasicTimer()
self.rover.setPos(25, 25)
def startGame(self):
self.status = 0
self.rover.setPos(25, 25)
self.timer.start(self.timer_init, self)
def timerEvent(self, event):
if self.status == 0:
self.status = self.rover.basic_move()
else:
self.timer.stop()
def resizeEvent(self, event):
super(Board, self).resizeEvent(event)
self.fitInView(self.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
class LabOne(QtGui.QMainWindow):
def __init__(self, parsed_encoders):
super(LabOne, self).__init__()
self.encoders = parsed_encoders
self.statusBar().showMessage('CPET-563 Lab 1 :: Zachary Weeden 2018')
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.hLayout = QtGui.QHBoxLayout()
self.dockFrame = QtGui.QFrame()
self.dockFrame.setLayout(self.hLayout)
self.dock = QtGui.QDockWidget(self)
self.dock.setWidget(self.dockFrame)
self.addDockWidget(QtCore.Qt.DockWidgetArea(4), self.dock)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.board = Board(self)
self.vLayout = QtGui.QVBoxLayout()
self.vLayout.addWidget(self.board)
self.frame = QtGui.QFrame(self)
self.frame.setLayout(self.vLayout)
self.setCentralWidget(self.frame)
self.setWindowTitle("Lab 1")
self.showMaximized()
self.show()
class Rover(QtGui.QGraphicsItem):
def __init__(self, parent, board_width, board_height):
super(Rover, self).__init__()
self.instruction_step = 0
self.angle = 0
self.color = QtGui.QColor(0, 0, 255)
self.rover_width = 8
self.rover_height = 6
self.board_width = board_width
self.board_height = board_height
self.parent = parent
def boundingRect(self):
"""
Bounds of the rover object
:return: QtCore.QRectF object
"""
return QtCore.QRectF(-self.rover_width / 2, -self.rover_height / 2,
self.rover_width, self.rover_height)
def set_color(self, color_tuple):
"""
Set the color of the rover rectangle
:param color_tuple: tuple of RGB vals
:return:
"""
self.color = QtGui.QColor(color_tuple[0], color_tuple[1],
color_tuple[2])
def paint(self, painter, option, widget):
"""
Draw the rover object rectangle
:param painter:
:param option:
:param widget:
:return:
"""
painter.drawPixmap(self.boundingRect(), QtGui.QPixmap("rover.svg"),
QtCore.QRectF(0.0, 0.0, 640.0, 480.0))
def basic_move(self):
"""
Determines the next coordinates based of encoder arrays
:return: 0
"""
left_encoder = self.parent.parent.encoders['left']
right_encoder = self.parent.parent.encoders['right']
if self.instruction_step < len(
left_encoder) or self.instruction_step < len(right_encoder):
left_ticks, right_ticks = left_encoder[self.instruction_step], \
right_encoder[self.instruction_step]
print "Left ticks: {} Right ticks: {}".format(left_ticks,
right_ticks)
if left_ticks != right_ticks:
# Different values for each encoder - parse
# I'm so sorry - this is awful
print "Rotating"
angle = (left_ticks - right_ticks) * 45
self.angle += angle
self.rotate(angle)
else:
forward_x = max(left_ticks, right_ticks) * math.cos(
self.angle * (math.pi / 180))
forward_y = -1 * (max(left_ticks, right_ticks) * math.sin(
-1 * self.angle * (math.pi / 180)))
self.setX(self.x() + forward_x)
self.setY(self.y() + forward_y)
self.instruction_step += 1
else:
print "Encoder text file fully traversed"
return 0
if __name__ == '__main__':
parsed_encoders = Encoders(hw_flag).parse_file()
app = QtGui.QApplication(sys.argv)
app.setFont(QtGui.QFont("Helvetica", 10))
LabOne(parsed_encoders).board.startGame()
sys.exit(app.exec_())
|
[
"zdw7287@rit.edu"
] |
zdw7287@rit.edu
|
879fdbd096ab73377632692fa3713686b5f68cc5
|
eb87c8b1ce8591d207643d3924b7939228f1a4fe
|
/conformance_suite/try_except_catch_final_some_exn.py
|
5b54eda44dcdf5cc6bfd75757a4335e5e481a7c3
|
[] |
no_license
|
brownplt/insta-model
|
06543b43dde89913c219d476ced0f51a439add7b
|
85e2c794ec4b1befa19ecb85f2c8d2509ec8cf42
|
refs/heads/main
| 2023-08-30T19:06:58.083150
| 2023-05-03T18:53:58
| 2023-05-10T22:29:18
| 387,500,638
| 5
| 0
| null | 2022-04-23T23:06:52
| 2021-07-19T14:53:09
|
Racket
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
# try_except_catch_final_some_exn.py
# This should pass.
# This should terminate.
def f():
try:
raise Exception("foo")
except Exception:
return 2
else:
return 3
finally:
return 42
assert f() is 42
|
[
"lukuangchen1024@gmail.com"
] |
lukuangchen1024@gmail.com
|
5ffb069ccecb464878b86b09af6fa21444860185
|
55c93a864428a26c50f43f97f6834fd27dd0210f
|
/ufs/interface/fviews.py
|
7d66e6beeb80fab20f0625975ae173a0a48604b0
|
[] |
no_license
|
sun3shines/ufs
|
12a635b67a7c39eb56d2e691038f4690c390a72c
|
7fa7ebdefc245dceca4a1e7f394cc6e5640b71b7
|
refs/heads/master
| 2021-01-18T23:49:50.292917
| 2016-03-28T22:30:45
| 2016-03-28T22:30:45
| 53,540,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,247
|
py
|
# -*- coding: utf-8 -*-
import json
from cloudcommon.common.bufferedhttp import jresponse
from cloudcommon.common.common.swob import Response
from ufs.utils.path import path2o
from ufs.interface.fst import FSt
def get(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
s = FSt(path)
if not s.exists:
return Response(status=404)
app_iter = s.get()
response = Response(app_iter=app_iter,request=req)
return req.get_response(response)
def put(req):
param = req.headers
path = path2o(param.get('path'))
s = FSt(path)
md5 = req.headers.get('md5')
datatype = req.headers.get('datatype')
fileinput = req.environ['wsgi.input']
ecode = s.put(md5,datatype,fileinput)
return Response(status=ecode)
def post(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
s = FSt(path)
if not s.exists:
return Response(status = 404)
attrs = req.headers
ecode = s.setm(attrs)
return Response(status=ecode)
def head(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
is_swift = param.get('is_swift')
s = FSt(path)
ecode = 200
if not s.exists:
ecode = 404
return Response(ecode)
data = s.getm()
if 'true' == is_swift:
return Response(status=ecode,headers=data)
else:
return Response(json.dumps(data),status=ecode)
def copy(req):
param = json.loads(req.body)
src = path2o(param.get('src'))
dst = path2o(param.get('dst'))
s = FSt(src)
d = FSt(dst)
if not s.exists:
return Response(status=404)
ecode = s.copy(d)
return Response(status = ecode)
def move(req):
param = json.loads(req.body)
src = path2o(param.get('src'))
dst = path2o(param.get('dst'))
s = FSt(src)
d = FSt(dst)
if not s.exists:
return Response(status=404)
ecode = s.move(d)
return Response(status=ecode)
def delete(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
s = FSt(path)
if not s.exists:
return Response(status=404)
ecode = s.delete()
return Response(status=ecode)
|
[
"sun__shines@163.com"
] |
sun__shines@163.com
|
ae870734f57f441d0112a8161a621c42015ae31e
|
c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb
|
/N_NumpyDemo/NumpyDemo6.py
|
fb9ff8771e9c2fc83e38fa3c8989b3ca34a5357e
|
[] |
no_license
|
GIS90/python_base_use
|
e55d55f9df505dac45ddd332fb65dcd08e8e531f
|
7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1
|
refs/heads/master
| 2020-04-02T08:33:49.461307
| 2018-10-23T03:33:41
| 2018-10-23T03:33:41
| 154,249,857
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
# -*- coding: utf-8 -*-
"""
------------------------------------------------
describe:
------------------------------------------------
"""
import numpy as np
arr1 = np.arange(0, 60, 10).reshape(-1, 1)
arr2 = np.arange(0, 5)
print arr1.shape, arr2.shape
c = arr1 + arr2
# a = arr2.repeat(6, axis=0)
# print a
# x, y = np.ogrid[0:5, 0:5]
# print x, y
#
# a = np.arange(6).reshape(2, 3)
# b = np.arange(6, 12).reshape(3, 2)
# c = np.dot(a, b)
# print c
a = np.arange(0, 12).reshape(3, 4)
a.tofile("array.txt")
|
[
"mingliang.gao@qunar.com"
] |
mingliang.gao@qunar.com
|
6f83cb13a9d62661ea4d59a51c90be850916f02b
|
7cc86034b847fcf239ef8184aa7c062b35f28d08
|
/venv/bin/python-config
|
41e0320aea18839a8bdd9f35b36863b3e87a44ea
|
[] |
no_license
|
asimonia/headlines
|
a6b480e6d1a029d1890f4abd8153fdfbc3cbfcb8
|
43cde9690d8669b82c024788392551ed07a7763b
|
refs/heads/master
| 2021-01-17T16:09:27.467950
| 2016-07-20T00:05:12
| 2016-07-20T00:05:12
| 63,735,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,367
|
#!/Users/alexsimonian/Desktop/Example/chapter5/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
|
[
"alex.simonian@gmail.com"
] |
alex.simonian@gmail.com
|
|
a6e7e3c5d2a02e001d1ac9086bb44e9729676268
|
d8edd97f8f8dea3f9f02da6c40d331682bb43113
|
/dataset_real_64.py
|
28aa5d22487066f0e4d762be774ef937a5159856
|
[] |
no_license
|
mdubouch/noise-gan
|
bdd5b2fff3aff70d5f464150443d51c2192eeafd
|
639859ec4a2aa809d17eb6998a5a7d217559888a
|
refs/heads/master
| 2023-07-15T09:37:57.631656
| 2021-08-27T11:02:45
| 2021-08-27T11:02:45
| 284,072,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,404
|
py
|
import uproot3 as uproot
import torch
import logging
import numpy as np
import sklearn.preprocessing as skp
class Data():
def __init__(self):
self.file = uproot.open('reconstructible_mc5a02_rconsthits_geom.root')
self.n_pot = 990678399
self.n_bunches = self.n_pot / 16e6
self.cdc_tree = self.file['cdc_geom/wires']
self.data_tree = self.file['noise/noise']
self.qt = skp.QuantileTransformer(output_distribution='normal', n_quantiles=5000)
self.minmax = skp.MinMaxScaler(feature_range=(-1, 1))
def get_cdc_tree(self):
return self.cdc_tree.array()
def load(self):
self.data = self.data_tree.array()
tree = self.data
# Set up the data, throw away the invalid ones
wire = tree['wire']
event_id = tree['event_id']
layer = tree['layer']
edep = tree['edep']
doca = tree['doca']
t = tree['t']
dbg_x = tree['x']
dbg_y = tree['y']
dbg_z = tree['z']
track_id = tree['track_id']
pid = tree['pid']
_select = (wire>=0) * (edep>1e-6)
self.layer = layer[_select]
self.event_id = event_id[_select]
self.t = t[_select]
self.dbg_x = dbg_x[_select]
self.dbg_y = dbg_y[_select]
self.dbg_z = dbg_z[_select]
self.track_id = track_id[_select]
self.pid = pid[_select]
self.doca = doca[_select]
self.wire = wire[_select]
self.edep = edep[(tree['wire']>=0) * (edep>1e-6)]
self.test_edep = self.edep[-1*2048:]
self.test_t = self.t[-1*2048:]
self.test_doca = self.doca[-1*2048:]
self.test_wire = self.wire[-1*2048:]
self.edep = self.edep[:64*2048]
self.t = self.t[:64*2048]
self.doca = self.doca[:64*2048]
self.wire = self.wire[:64*2048]
logging.info('Train size wire %d pid %d doca %d' % (self.wire.size, self.pid.size, self.doca.size))
# Format data into tensor
self.train = np.array([self.edep, self.t, self.doca], dtype=np.float32).T
self.test = np.array([self.test_edep, self.test_t, self.test_doca], dtype=np.float32).T
logging.info('train shape {0}'.format(self.train.shape))
logging.info('test shape {0}'.format(self.test.shape))
def preprocess(self):
self.qt.fit(self.train)
self.train_qt = self.qt.transform(self.train)
self.test_qt = self.qt.transform(self.test)
self.minmax.fit(self.train_qt)
self.train_minmax = self.minmax.transform(self.train_qt)
self.test_minmax = self.minmax.transform(self.test_qt)
def inv_preprocess(self, tensor):
inv_tensor = self.qt.inverse_transform(
self.minmax.inverse_transform(tensor.detach().cpu().numpy()))
return torch.tensor(inv_tensor)
def diagnostic_plots(self, output_dir):
# Some diagnostic plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
plt.hist(np.log10(self.edep), bins=50)
plt.savefig(output_dir+'train_edep.png')
plt.clf()
plt.hist(np.log10(self.t), bins=50)
plt.savefig(output_dir+'train_t.png')
plt.clf()
plt.hist(self.doca, bins=50)
plt.savefig(output_dir+'train_doca.png')
plt.clf()
#plt.figure(figsize=(6,6))
#plt.scatter(dbg_z, dbg_y, s=edep*1e3, c=doca, cmap='inferno')
#plt.savefig(output_dir+'train_scatter.png')
#plt.clf()
__inv_train = self.inv_preprocess(torch.from_numpy(self.train_minmax))
plt.hist(np.log10(__inv_train[:,0]), bins=50, alpha=0.7)
plt.hist(np.log10(self.edep), bins=50, alpha=0.7)
plt.savefig(output_dir+'inv_transform.png')
plt.clf()
def _chunk(self, continuous_features, discrete_features, seq_len, batch_size):
data_torch = torch.from_numpy(continuous_features).T
chunk_size = seq_len
chunk_stride = seq_len
data_chunked = data_torch.unfold(1,
chunk_size, chunk_stride) # (feature, batch, seq)
n_chunks = data_chunked.shape[1]
n_features = data_chunked.shape[0]
wire_torch = torch.from_numpy(discrete_features).long().unsqueeze(0)
wire_chunked = wire_torch.unfold(1,
chunk_size, chunk_stride) # (feature, batch, seq)
logging.info('Continuous features shape: {0} Discrete features shape: {1}'.format(data_chunked.shape, wire_chunked.shape))
dataset = torch.utils.data.TensorDataset(data_chunked.permute(1, 0, 2),
wire_chunked.permute(1, 0, 2))
loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, shuffle=True, pin_memory=True)
return loader, dataset, n_chunks
def chunk(self, seq_len, batch_size):
self.train_loader, self.train_dataset, self.n_chunks = self._chunk(self.train_minmax,
self.wire, seq_len, batch_size)
self.test_loader, self.test_dataset, self.n_test_chunks = self._chunk(self.test_minmax,
self.test_wire, seq_len, batch_size)
print('TRAIN CHUNKS:', self.n_chunks, ', TEST CHUNKS:', self.n_test_chunks)
return self.train_loader, self.train_dataset, self.n_chunks
|
[
"m.dubouchet18@imperial.ac.uk"
] |
m.dubouchet18@imperial.ac.uk
|
40085d38f183f16add546c39dfb8ee6928005916
|
a1ce33bcd9bde0e044c88dbd591774952f0469dc
|
/unidade-2/media-aluno/media_aluno.py
|
bea1a5d91fd8c970eea39a344127e9e33f268034
|
[] |
no_license
|
juliafealves/tst-lp1
|
91420522fcc9e3f66ad451aa4c500f5df24e51b7
|
1f324d66429a95796e02f718f0cd35dd7e7bd4a2
|
refs/heads/master
| 2021-09-07T11:08:34.420830
| 2018-02-22T02:40:50
| 2018-02-22T02:40:50
| 107,728,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
# coding: utf-8
# Média do Aluno
# (C) 2017, Júlia Alves / UFCG Programação 1
peso1 = 2.0
peso2 = 3.0
peso3 = 5.0
# Entrada dos dados referente as notas.
nota1 = float(raw_input())
nota2 = float(raw_input())
nota3 = float(raw_input())
# Calculando a média
media = (nota1 * peso1 + nota2 * peso2 + nota3 * peso3) / 10
# Imprimindo a média do aluno.
print media
|
[
"juliafealves@gmail.com"
] |
juliafealves@gmail.com
|
8e2ec3b08050f7aff3bcbed97a713ccfccce0ee8
|
68c22afafa16c53ed61874ae13463d4b6831a695
|
/examples/addresses.py
|
7ac7986a76fe4cc8b1687b3b2aca93acf8ac9b7c
|
[
"MIT"
] |
permissive
|
wielandgmeiner/embit
|
edc7500bfca15472a21e3df80c961efc740614bd
|
caebc88c749994f2eb704231ba20c7fa10d70d4d
|
refs/heads/master
| 2020-12-19T21:57:42.027714
| 2020-01-23T22:35:49
| 2020-01-23T22:35:49
| 235,864,966
| 0
| 0
| null | 2020-01-23T19:00:23
| 2020-01-23T19:00:23
| null |
UTF-8
|
Python
| false
| false
| 1,761
|
py
|
from embit import script
from embit import ec
from embit.networks import NETWORKS
from binascii import unhexlify, hexlify
def main():
# all from the same private key
prv = ec.PrivateKey.from_wif("L2e5y14ZD3U1J7Yr62t331RtYe2hRW2TBBP8qNQHB8nSPBNgt6dM")
pub = prv.get_public_key()
print("Public key:")
print(hexlify(pub.serialize()))
# we will generate regtest addresses
network = NETWORKS['regtest']
print("Legacy (pay to pubkey hash):")
sc = script.p2pkh(pub)
# default network is main
print(sc.address(network))
print("Segwit (pay to witness pubkey hash):")
sc = script.p2wpkh(pub)
print(sc.address(network))
print("Nested segwit (p2sh-p2wpkh):")
sc = script.p2sh(script.p2wpkh(pub))
print(sc.address(network))
print("\nMiltisig address (2 of 3):")
# unsorted
pubs = [
ec.PublicKey.parse(unhexlify("02edd7a58d2ff1e483d35f92a32e53607423f936b29bf95613cab24b0b7f92e0f1")),
ec.PublicKey.parse(unhexlify("03a4a6d360acc45cb281e0022b03218fad6ee93881643488ae39d22b854d9fa261")),
ec.PublicKey.parse(unhexlify("02e1fdc3b011effbba4b0771eb0f7193dee24cfe101ab7e8b64516d83f7116a615")),
]
# 2 of 3 multisig script
sc = script.multisig(2, pubs)
print("Legacy, unsorted (p2sh):")
redeem_sc = script.p2sh(sc)
print(redeem_sc.address(network))
print("Native segwit, sorted (p2wsh):")
sc = script.multisig(2, sorted(pubs))
witness_sc = script.p2wsh(sc)
print(witness_sc.address(network))
print("Nested segwit, sorted (p2sh-p2wsh):")
sc = script.multisig(2, sorted(pubs))
witness_sc = script.p2wsh(sc)
redeem_sc = script.p2sh(witness_sc)
print(redeem_sc.address(network))
if __name__ == '__main__':
main()
|
[
"snigirev.stepan@gmail.com"
] |
snigirev.stepan@gmail.com
|
88a3377b8c3c25f15919717ee9a53cf77e86855d
|
528da3624bb03db22c4b97870de0bfd0205c5908
|
/CODETOOL/search.py
|
3a911964be67b136b0e44d85164102610be01966
|
[
"MIT"
] |
permissive
|
ihgazni2/shproperty
|
e4f6e011a1fe03695ffd1f5933deb638a481490e
|
fc994d8228d20d00dbdc060941fce81a56bba673
|
refs/heads/master
| 2020-04-13T02:22:45.413052
| 2019-09-19T09:26:48
| 2019-09-19T09:26:48
| 162,900,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,052
|
py
|
import sys
import os
import subprocess
import shlex
def pipe_shell_cmds(shell_CMDs):
'''
shell_CMDs = {}
shell_CMDs[1] = 'netstat -n'
shell_CMDs[2] = "awk {'print $6'}"
'''
len = shell_CMDs.__len__()
p = {}
p[1] = subprocess.Popen(shlex.split(shell_CMDs[1]), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
for i in range(2,len):
p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if(len > 1):
p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result = p[len].communicate()
if(len > 1):
for i in range(2,len+1):
returncode = p[i].wait()
else:
returncode = p[len].wait()
return(result)
#dname="./Main" dname="./edict"
def check(fname,suffix='js',dname="./"):
fl = []
shell_CMDs = {}
shell_CMDs[1] = 'tree -f ' + dname
shell_CMDs[2] = 'egrep ' + '"' + suffix +'|sh"'
shell_CMDs[3] = 'egrep ""'
rslt = pipe_shell_cmds(shell_CMDs)[0].decode('utf-8')
fl = rslt.replace(chr(9500),"").replace(chr(9472),"").replace(chr(9474),"").replace("\xa0","").replace(chr(9492),"").split("\n")
for i in range(0,fl.__len__() -1):
ele = fl[i].strip(' ').strip('\t').strip(' ').strip('\t')
fl[i] = 'cat ' + ele
fl.pop(fl.__len__() -1)
for cmd in fl:
shell_CMDs = {}
shell_CMDs[1] = cmd
shell_CMDs[2] = "egrep " + '"' + fname + '"'
rslt = pipe_shell_cmds(shell_CMDs)
if(rslt == (b'', b'')):
pass
else:
print("---location---")
print(cmd)
print("---rslt----")
print(rslt[0].decode('utf-8'))
print("----info---")
print(rslt[1].decode('utf-8'))
try:
check(sys.argv[1],sys.argv[2],sys.argv[3])
except:
try:
check(sys.argv[1],sys.argv[2])
except:
check(sys.argv[1])
else:
pass
else:
pass
|
[
"terryinzaghi@163.com"
] |
terryinzaghi@163.com
|
972c53032105db50a0fd1fedbf84567a14467411
|
bc441bb06b8948288f110af63feda4e798f30225
|
/influxdb_service_sdk/model/cmdb_extend/subsystem_dependency_pb2.pyi
|
dbfaa61f6584c29813088feb8ee25ba84930e6f6
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,360
|
pyi
|
# @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from influxdb_service_sdk.model.cmdb_extend.app_dependency_pb2 import (
AppDependency as influxdb_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class SubsystemDependency(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ConnectSubsystems(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
abbreviation = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency.ConnectSubsystems: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency.ConnectSubsystems: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
abbreviation = ... # type: typing___Text
name = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
@property
def components(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[influxdb_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]: ...
@property
def connect_subsystems(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[SubsystemDependency.ConnectSubsystems]: ...
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
components : typing___Optional[typing___Iterable[influxdb_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]] = None,
connect_subsystems : typing___Optional[typing___Iterable[SubsystemDependency.ConnectSubsystems]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"components",b"components",u"connect_subsystems",b"connect_subsystems",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
578bad733cfd9b560cf88476f651afaf40a28da1
|
68d38b305b81e0216fa9f6769fe47e34784c77f2
|
/auto_generate_scripts/server/script_generator/pcadvisor_co_uk_generator.py
|
981b44af8e52193c9704c79a9c7b23127324867a
|
[] |
no_license
|
ADJet1437/ScrapyProject
|
2a6ed472c7c331e31eaecff26f9b38b283ffe9c2
|
db52844411f6dac1e8bd113cc32a814bd2ea3632
|
refs/heads/master
| 2022-11-10T05:02:54.871344
| 2020-02-06T08:01:17
| 2020-02-06T08:01:17
| 237,448,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,669
|
py
|
# -*- coding: utf8 -*-
import sys
sys.path.append("../")
from server.gen_spiders import *
code_fragments = []
spa = SpiderGenerator()
return_code = spa.gen_import()
code_fragments.append(return_code)
return_code = spa.gen_init(spider_name = "Pcadvisor_co_ukSpider", spider_type = "AlaSpider", allowed_domains = "'pcadvisor.co.uk'", start_urls = "'http://www.pcadvisor.co.uk/review/'")
code_fragments.append(return_code)
return_code = spa.gen_level(level_index = "1", need_parse_javascript = "")
code_fragments.append(return_code)
return_code = spa.gen_request_single_url(url_xpath = "//a[starts-with(.,'>>')]/@href", level_index = "1", url_regex = "", product_fields = [])
code_fragments.append(return_code)
return_code = spa.gen_request_urls(urls_xpath = "//div[@class='bd']/h2/a/@href", level_index = "2", url_regex = "", include_original_url = "", params_xpath = {}, params_regex = {})
code_fragments.append(return_code)
return_code = spa.gen_level(level_index = "2", need_parse_javascript = "")
code_fragments.append(return_code)
return_code = spa.get_category(category_leaf_xpath = "//ul[contains(@class,'crumb')]/li[last()-1]/a//text()", category_path_xpath = "//ul[contains(@class,'crumb')]/li[position()<last()]/a//text()")
code_fragments.append(return_code)
return_code = spa.gen_product(sii_xpath = "//meta[@property='og:url']/@content", pname_xpath = "//ul[contains(@class,'crumb')]/li[last()]/a//text()", ocn_xpath = "//ul[contains(@class,'crumb')]/li[last()-1]/a//text()", pic_xpath = "//meta[@property='og:image']/@content", manuf_xpath = "")
code_fragments.append(return_code)
return_code = spa.gen_review(sii_xpath = "//meta[@property='og:url']/@content", pname_xpath = "//ul[contains(@class,'crumb')]/li[last()]/a//text()", rating_xpath = "translate(string(number(count(//img[@class='ratings' and contains(@src,'whitestarfilled')])+0.5*count(//img[@class='ratings' and contains(@src,'whiteHalfStar')]))),'0','')", date_xpath = "//time/@datetime", pros_xpath = "", cons_xpath = "", summary_xpath = "//meta[@property='og:description']/@content", verdict_xpath = "//*[contains(.,'VERDICT')]/following-sibling::p[1]/text()", author_xpath = "//meta[@name='author']/@content", title_xpath = "//meta[@property='og:title']/@content", award_xpath = "", awpic_xpath = "")
code_fragments.append(return_code)
return_code = spa.get_dbasecategoryname(dbcn = "pro")
code_fragments.append(return_code)
return_code = spa.get_sourcetestscale(scale = "5", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "product", field = "source_internal_id", regex = "((?<=\-)\d{7}(?=\/))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "source_internal_id", regex = "((?<=\-)\d{7}(?=\/))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "product", field = "ProductName", regex = "(\w.*(?=\sreview))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "ProductName", regex = "(\w.*(?=\sreview))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "TestDateText", regex = "(\d[^/s]*(?=T))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.save_product()
code_fragments.append(return_code)
return_code = spa.save_review(review_type = "pro")
code_fragments.append(return_code)
script_name = "/home/alascrapy/alaScrapy/alascrapy/spiders/techadvisor_co_uk.py"
fh = open(script_name, 'w+')
for code in code_fragments:
fh.write(code)
fh.write("")
fh.close()
|
[
"liangzijie1437@gmail.com"
] |
liangzijie1437@gmail.com
|
a746e06768428227791bfa75408479c3b234fd09
|
7246faf9a222269ce2612613f58dc5ff19091f10
|
/프로그래머스/프로그래머스 - 단어 변환.py
|
dec405ba20f05310d904b9b961854c446de5742f
|
[] |
no_license
|
gusdn3477/Algorithm_Study
|
87a2eb72a8488d9263a86db70dadc7944434d41d
|
3fefe1dcb40122157845ffc542f41cb097711cc8
|
refs/heads/main
| 2023-08-30T12:18:21.412945
| 2021-09-28T13:00:11
| 2021-09-28T13:00:11
| 308,364,230
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
from collections import deque
def BFS(begin, target, words):
queue = deque()
queue.append((begin, 0))
visited = [0] * len(words)
while queue:
a = queue.popleft()
if a[0] == target:
return a[1]
for i in range(len(words)):
ct = 0
for j in range(len(words[i])):
if a[0][j] == words[i][j]:
ct += 1
if ct == len(words[i]) - 1 and visited[i] == 0:
visited[i] = 1
queue.append((words[i], a[1] + 1))
return 0
def solution(begin, target, words):
answer = BFS(begin, target, words)
return answer
|
[
"gusdn3477@naver.com"
] |
gusdn3477@naver.com
|
4f151b429aa31d456b30c77bec814947baa83c6c
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/initial_8783.py
|
7c63375310d5ee9587704b93f42ed2a9a09dec62
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,334
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((173, 270, 272), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((211, 644, 751), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((469, 622, 469), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((510, 921, 965), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((579, 663, 401), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((5, 737, 745), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((735, 394, 483), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((144, 838, 695), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((384, 179, 480), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((696, 10, 985), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((614, 306, 548), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((185, 810, 708), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((273, 108, 982), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((927, 982, 765), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((478, 451, 432), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((227, 697, 680), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((696, 692, 471), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((429, 560, 934), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((568, 585, 767), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((869, 113, 920), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((187, 316, 608), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
10d5e08df192fe85faf6f5f7bed5a0067b89a403
|
cea03b578f0f6207afe5056611090848ab76bd23
|
/subject/qiefenhuisu.py
|
6167beacc3d6e3371e6602df397b51dc1892fe58
|
[] |
no_license
|
swq90/stock
|
fa295f4fa0bf6a4d8afe8a71c02cc54fc7442bcd
|
a49ae395de82ecdfa38220f4fdbcaf4da6a39719
|
refs/heads/master
| 2021-07-14T03:42:21.950897
| 2020-10-14T11:52:24
| 2020-10-14T11:52:24
| 215,325,863
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,352
|
py
|
import pandas as pd
from numpy import arange
import stock.util.sheep as sheep
import stock.limit_up.get_limit_stock as gls
from stock.sql.data import save_data,read_data
from stock.util.basic import basic
start_date='20180101'
end_date='20181231'
PRICEB='close'
def fun1(limit_type='up'):
res = pd.DataFrame()
#
data=read_data('daily',start_date=start_date,end_date=end_date)
limit=read_data('stk_limit',start_date=start_date,end_date=end_date)
data=data.merge(limit,on=['ts_code','trade_date'])
data['is_roof'] = data.apply(lambda x: 99 if x['close'] == x['up_limit' ] else 1 if x['close'] == x[
'down_limit'] else x['pct_chg'], axis=1)
for rate in [-99]+list(range(-10,10))+[99]:
print(rate)
df=data.loc[(data['is_roof']>=rate)&(data['is_roof']<(rate+1))].copy()
if df.empty:
continue
# df['pct']=(df['close']/df['open']-1)*100
# res.loc[rate,'pct']=df['pct'].mean()
wool=sheep.wool2(df[['ts_code','trade_date']],data,PRICEB=PRICEB,days=1)
res.loc[rate,'mean']=wool.iloc[:,-3].mean()
res.loc[rate, 'n'] = wool.iloc[-1, -2]
res.loc[rate, 'all_pct'] = wool.iloc[-1, -1]
save_data(res,'pct_chg_cut_res%s-%s.csv'%(start_date,end_date))
def cut_stock():
pass
def func2():
data=read_data('stock_basic')
fun1()
print()
|
[
"shaowenqin620@163.com"
] |
shaowenqin620@163.com
|
7833aa53c04a242bf777342ba9ef4c4e33a99fb4
|
ef54d37f8a3303013ca7469871a320d303957ed7
|
/robo4.2/fusion/tests/RIST/API/Deprecated/F172/F172-BB-NegativeTests_DCS.py
|
640d1047e65082453fc1cf09d069f13ba4ecbabb
|
[] |
no_license
|
richa92/Jenkin_Regression_Testing
|
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
|
24a74926170cbdfafa47e972644e2fe5b627d8ff
|
refs/heads/master
| 2020-07-12T10:01:59.099137
| 2019-08-27T12:14:53
| 2019-08-27T12:14:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
admin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}
enclosure_name = "0000A66101"
drive_enclosure_name = "0000A66101, bay 7"
expected_number_of_DE = 6
expected_number_of_drives = 4
|
[
"akul@SAC0MKUVCQ.asiapacific.hpqcorp.net"
] |
akul@SAC0MKUVCQ.asiapacific.hpqcorp.net
|
2cd03a9d0b16f337862d0fd2c74a686391d89077
|
71875792ea0e1520e442b37f5296c90e37734581
|
/Python/0200. Number of Islands.py
|
0ec08708e2cfcdb2ea99ee06d1076437d79af32c
|
[
"MIT"
] |
permissive
|
kangqiwang/myLeetcode
|
401e806fbf43e8c3f2c70a720edfdb4c799ea897
|
40f6da994e443cd027752bd6f3ab33eb3556a220
|
refs/heads/master
| 2023-01-22T15:57:51.257811
| 2023-01-07T21:33:44
| 2023-01-07T21:33:44
| 190,908,551
| 0
| 0
| null | 2022-04-19T10:18:45
| 2019-06-08T16:11:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,616
|
py
|
'''
Given an m x n 2D binary grid grid which represents a map of '1's (land) and '0's (water), return the number of islands.
An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input: grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
Output: 1
Example 2:
Input: grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
Output: 3
'''
from typing import List
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
self.dfs(grid,i,j)
count += 1
return count
def dfs(self,grid,i,j):
grid[i][j] = 0
for dr,dc in (1,0), (-1,0), (0,-1), (0,1):
r = i + dr
c = j + dc
if 0 <= r < len(grid) and 0 <= c < len(grid[0]) and grid[r][c]=='1':
self.dfs(grid,r,c)
def dfs2(self,grid,i,j):
if i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j] != '1':
return
grid[i][j] = '#'
self.dfs(grid, i+1, j)
self.dfs(grid, i-1, j)
self.dfs(grid, i, j+1)
self.dfs(grid, i, j-1)
print(Solution().numIslands([
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
))
|
[
"kangqiwang@outlook.com"
] |
kangqiwang@outlook.com
|
0bcac2c7b50e1326ff493258771670b5fc8f0a84
|
6f05f7d5a67b6bb87956a22b988067ec772ba966
|
/data/train/python/5a641fc39ee81f4a0e98c3bc90fd4dbd69b0a45abase.py
|
5a641fc39ee81f4a0e98c3bc90fd4dbd69b0a45a
|
[
"MIT"
] |
permissive
|
harshp8l/deep-learning-lang-detection
|
93b6d24a38081597c610ecf9b1f3b92c7d669be5
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
refs/heads/master
| 2020-04-07T18:07:00.697994
| 2018-11-29T23:21:23
| 2018-11-29T23:21:23
| 158,597,498
| 0
| 0
|
MIT
| 2018-11-21T19:36:42
| 2018-11-21T19:36:41
| null |
UTF-8
|
Python
| false
| false
| 892
|
py
|
def get_class( kls ):
'''Get a class by its name'''
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def create_view_from_controller(controller):
'''Create an instance of a view object for the specified controller'''
return get_class(get_view_class_name(controller))()
def get_view_class_name(controller):
'''Generate the view class name from the controller.'''
return 'views.%(section_name)s.%(view_name)s' % {
'section_name': ".".join(controller.__class__.__module__.split('.')[1:]),
'view_name': controller.__class__.__name__
}
class Controller(object):
'''Base Controller class. Always subclass this.'''
def __init__(self):
self.view = create_view_from_controller(self) # create the view by convention
|
[
"aliostad+github@gmail.com"
] |
aliostad+github@gmail.com
|
e9d36d371082deddb241956d3907c7623414b0f9
|
60e7738d90ea7151a790a73285382b0c77799262
|
/p3/Lib/site-packages/pandas/tests/io/conftest.py
|
828d5d0ccd3c6ee93b6f04c8d67f98ca46907787
|
[
"MIT"
] |
permissive
|
fpark7/Native2Native
|
251b3c08af16bbbc4d077840f66aea7acdacc002
|
1bc3390770ddafbba2e2779ba91998643df6d9ec
|
refs/heads/master
| 2021-04-18T21:27:41.378371
| 2018-03-27T02:47:51
| 2018-03-27T02:47:51
| 126,620,375
| 1
| 2
|
MIT
| 2021-03-19T22:50:00
| 2018-03-24T16:52:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,827
|
py
|
import os
import moto
import pytest
from pandas.io.parsers import read_table
HERE = os.path.dirname(__file__)
@pytest.fixture(scope='module')
def tips_file():
"""Path to the tips dataset"""
return os.path.join(HERE, 'parser', 'data', 'tips.csv')
@pytest.fixture(scope='module')
def jsonl_file():
"""Path a JSONL dataset"""
return os.path.join(HERE, 'parser', 'data', 'items.jsonl')
@pytest.fixture(scope='module')
def salaries_table():
"""DataFrame with the salaries dataset"""
path = os.path.join(HERE, 'parser', 'data', 'salaries.csv')
return read_table(path)
@pytest.fixture(scope='module')
def s3_resource(tips_file, jsonl_file):
"""Fixture for mocking S3 interaction.
The primary bucket name is "pandas-test". The following datasets
are loaded.
- tips.csv
- tips.csv.gz
- tips.csv.bz2
- items.jsonl
A private bucket "cant_get_it" is also created. The boto3 s3 resource
is yielded by the fixture.
"""
pytest.importorskip('s3fs')
moto.mock_s3().start()
test_s3_files = [
('tips.csv', tips_file),
('tips.csv.gz', tips_file + '.gz'),
('tips.csv.bz2', tips_file + '.bz2'),
('items.jsonl', jsonl_file),
]
def add_tips_files(bucket_name):
for s3_key, file_name in test_s3_files:
with open(file_name, 'rb') as f:
conn.Bucket(bucket_name).put_object(
Key=s3_key,
Body=f)
boto3 = pytest.importorskip('boto3')
# see gh-16135
bucket = 'pandas-test'
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
add_tips_files(bucket)
conn.create_bucket(Bucket='cant_get_it', ACL='private')
add_tips_files('cant_get_it')
yield conn
moto.mock_s3().stop()
|
[
"felix.park03@gmail.com"
] |
felix.park03@gmail.com
|
122ca2414bd49a9409a20b2b9f066c334629fc37
|
ef875440cf82b6eed61bf6d9d0c6acfae5f90ef4
|
/Assument/4.py
|
629e385782ecd25815678aa8b3abb8fa849a0121
|
[] |
no_license
|
Nitesh101/test
|
5ab9b1e23167f8496d90d15484d57328b7f1430e
|
4c413b3a056a633c5bcf93ae21c999ff67eeaa95
|
refs/heads/master
| 2020-03-29T09:04:32.723099
| 2018-09-21T09:33:41
| 2018-09-21T09:33:41
| 149,740,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 173
|
py
|
val = ([[1,1,1],[2,2,2],[3,3,3]])
lis = []
lis2 = []
lis3 = []
for i in val:
lis.append(i[0],)
lis2.append(i[1],)
lis3.append(i[2],)
val = lis,lis2,lis3,
print list(val)
|
[
"m.veeranitesh@gmail.com"
] |
m.veeranitesh@gmail.com
|
86c4d1abbd5ea480d106fd2383b2979cd8424d29
|
9d8f241e4e851f0fe71655d2b698c3d40feab98a
|
/examples/naive/env_mcarrier_maddpg.py
|
e0e28ddaff736f3a84abf5d2cbb3be2b3c3c7a3f
|
[
"MIT"
] |
permissive
|
StepNeverStop/machin
|
c30c15bbcc38dde06eac57e7ff562753b8927f6b
|
3caecb6ea7b02a9687281e2270577a6ed92c5dd8
|
refs/heads/master
| 2022-11-24T14:43:20.041102
| 2020-07-21T13:08:19
| 2020-07-21T13:08:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 809
|
py
|
from .env_walker_ddpg import t, nn, Actor
class Critic(nn.Module):
def __init__(self, agent_num, state_dim, action_dim):
super(Critic, self).__init__()
self.agent_num = agent_num
self.state_dim = state_dim
self.action_dim = action_dim
st_dim = state_dim * agent_num
act_dim = action_dim * agent_num
self.fc1 = nn.Linear(st_dim + act_dim, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 1)
# obs: batch_size * obs_dim
def forward(self, all_states, all_actions):
all_actions = t.flatten(all_actions, 1, -1)
all_states = t.flatten(all_states, 1, -1)
q = t.relu(self.fc1(t.cat((all_states, all_actions), dim=1)))
q = t.relu(self.fc2(q))
q = self.fc3(q)
return q
|
[
"hanhanmumuqq@163.com"
] |
hanhanmumuqq@163.com
|
1b12c35220e59e832d67c3ed0c724b894ec10460
|
a91b397711e2c5334f58ba1d466cf6b58dc694f2
|
/code_python/lib/null/models.py
|
a5574f86629b367b095aaa709afec768f8c646ab
|
[] |
no_license
|
aleksejs-fomins/mesoscopic-functional-connectivity
|
96f68a379ea8c2abd60441d70e18227e4c3e05cb
|
e17dd3367cdebc3e43eb981b4868c3a148038af8
|
refs/heads/master
| 2022-04-29T18:06:59.667225
| 2022-04-22T17:06:47
| 2022-04-22T17:06:47
| 199,410,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,175
|
py
|
import numpy as np
def cycle(arr, nStep):
return np.hstack([arr[-nStep:], arr[:-nStep]])
def mix(x, y, frac):
return (1 - frac) * x + frac * y
def conv_exp(data, dt, tau):
nTexp = int(5*tau / dt)
t = dt * np.arange(nTexp)
exp = np.exp(-t/tau)
exp /= np.sum(exp)
nTData = data.shape[0]
return np.convolve(data, exp)[:nTData]
def two_node_system(nTime, lags=None, trgFracs=None, noiseFrac=0.1, crossXY=0, crossYX=0, convDT=None, convTau=None):
x = np.random.normal(0, 1, nTime)
y = np.random.normal(0, 1, nTime)
# Add lagged coupling
if lags is not None:
y = (1 - np.sum(trgFracs)) * y + np.sum([frac * cycle(x, lag) for frac, lag in zip(trgFracs, lags)], axis=0)
# Add convolution
if convDT is not None:
x = conv_exp(x, convDT, convTau)
y = conv_exp(y, convDT, convTau)
# Add cross-talk. NOTE: with symmetric mixing the variables swap for croxxXY
xMixed = mix(x, y, crossXY)
yMixed = mix(y, x, crossYX)
# Add observation noise
xMixed = mix(xMixed, np.random.normal(0, 1, nTime), noiseFrac)
yMixed = mix(yMixed, np.random.normal(0, 1, nTime), noiseFrac)
return np.array([xMixed, yMixed])
def three_node_system(nTime, lags=None, trgFracs=None, noiseFrac=0.1, crossZX=0, convDT=None, convTau=None):
x = np.random.normal(0, 1, nTime)
y = np.random.normal(0, 1, nTime)
z = np.random.normal(0, 1, nTime)
# Add lagged coupling
if lags is not None:
y = (1 - np.sum(trgFracs)) * y + np.sum([frac * cycle(x, lag) for frac, lag in zip(trgFracs, lags)], axis=0)
# Add convolution
if convDT is not None:
x = conv_exp(x, convDT, convTau)
y = conv_exp(y, convDT, convTau)
z = conv_exp(z, convDT, convTau)
# Add cross-talk. NOTE: with symmetric mixing the variables swap for croxxXY
zMixed = mix(z, x, crossZX)
# Add observation noise
xMixed = mix(x, np.random.normal(0, 1, nTime), noiseFrac)
yMixed = mix(y, np.random.normal(0, 1, nTime), noiseFrac)
zMixed = mix(zMixed, np.random.normal(0, 1, nTime), noiseFrac)
return np.array([xMixed, yMixed, zMixed])
|
[
"aleksejs.fomins@uzh.ch"
] |
aleksejs.fomins@uzh.ch
|
fa10a6c2cd264d81b7fc334f6127a0a158f47c5f
|
4148260054c2cf4605dacb8bdef3605c82eca470
|
/temboo/Library/Foursquare/Checkins/DeleteComment.py
|
9e5e663d00f40a17986521496cd444730514c959
|
[] |
no_license
|
wimsy/actuarize-web
|
0f23d5f00afe3d36d430621cdb497d2e64998416
|
5f43af3019da6fb08cafeec9ff0a89df5196b864
|
refs/heads/master
| 2021-03-12T19:38:21.887681
| 2012-12-19T01:13:50
| 2012-12-19T01:13:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# DeleteComment
# Removes a comment to a specified check-in.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class DeleteComment(Choreography):
"""
Create a new instance of the DeleteComment Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Foursquare/Checkins/DeleteComment')
def new_input_set(self):
return DeleteCommentInputSet()
def _make_result_set(self, result, path):
return DeleteCommentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteCommentChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteComment
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class DeleteCommentInputSet(InputSet):
"""
Set the value of the CheckinID input for this choreography. ((required, string) The ID of the check-in associated with the comment you want to remove.)
"""
def set_CheckinID(self, value):
InputSet._set_input(self, 'CheckinID', value)
"""
Set the value of the CommentID input for this choreography. ((required, string) The id of the comment to remove.)
"""
def set_CommentID(self, value):
InputSet._set_input(self, 'CommentID', value)
"""
Set the value of the OauthToken input for this choreography. ((required, string) The FourSquare API Oauth token string.)
"""
def set_OauthToken(self, value):
InputSet._set_input(self, 'OauthToken', value)
"""
Set the value of the ResponseFormat input for this choreography. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
def set_ResponseFormat(self, value):
InputSet._set_input(self, 'ResponseFormat', value)
"""
A ResultSet with methods tailored to the values returned by the DeleteComment choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class DeleteCommentResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
def get_Response(self):
return self._output.get('Response', None)
class DeleteCommentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteCommentResultSet(response, path)
|
[
"mike.wimsatt@gmail.com"
] |
mike.wimsatt@gmail.com
|
1f8a4937050b87f6ab02f889118465b3ecda37fb
|
faa0ce2a95da958be3bfb171cdff29eeb43c3eb6
|
/py-exercises/JulieTestModule/characters/base.py
|
dafaa1a45e64b9d51460a9bd19e5725b9ce466a8
|
[] |
no_license
|
julianapeace/digitalcrafts-exercises
|
98fe4e20420c47cf9d92d16c45ac60dc35a49a6a
|
98e6680138d55c5d093164a47da53e1ddb6d064c
|
refs/heads/master
| 2021-08-30T04:17:09.997205
| 2017-12-16T00:22:22
| 2017-12-16T00:22:22
| 103,176,043
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
import random
import time
class Character:
def __init__(self, name, health, power, armor, evade, coincount):
self.name = name
self.health = health
self.power = power
self.armor = armor
self.evade = evade
self.coincount = coincount
def alive(self):
return self.health > 0
def print_status(self):
print("{} has {} health and {} power.".format(self.name, self.health, self.power))
def attack(self, enemy):
enemy.health -= self.power
print("{} does {} damage to the {}.".format(self.name, self.power, enemy.name))
if self.alive() == False:
print("The {} is dead.".format(self.name))
if enemy.alive() == False:
print("The {} is dead.".format(enemy.name))
time.sleep(1.5)
def heal(self):
roll_dice = random.random()
if self.alive and roll_dice < 0.2:
self.health += 2
print("{} gained 2 health.".format(self.name))
time.sleep(1.0)
|
[
"chancecordelia@gmail.com"
] |
chancecordelia@gmail.com
|
97a21525f5b4a28d110146eb0d697b52c9cc2677
|
a82dfb61b17fa66b9c75fe871401cff77aa77f56
|
/utils/bng_species_reader_py/bng_species_reader_example.py
|
5a4dbc089b21feb77a4d7cb8784ed5b265f9810a
|
[
"MIT"
] |
permissive
|
mcellteam/mcell
|
49ca84048a091de8933adccc083d31b7bcb1529e
|
3920aec22c55013b78f7d6483b81f70a0d564d22
|
refs/heads/master
| 2022-12-23T15:01:51.931150
| 2021-09-29T16:49:14
| 2021-09-29T16:49:14
| 10,253,341
| 29
| 12
|
NOASSERTION
| 2021-07-08T01:56:40
| 2013-05-23T20:59:54
|
C++
|
UTF-8
|
Python
| false
| false
| 5,253
|
py
|
"""
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to [http://unlicense.org]
"""
import sys
import os
import pandas as pd
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
# utility module to load ASCII viz_output .dat file from
import species_reader
def get_bound_em(complex, src_em, comp_name):
# assuming that there is a single component in src_em
# (source elementary molecule) with comp_name that is bound
bond_index = -1
for c in src_em.components:
if c.component_type.name == comp_name:
# not allowed cases, we need the number
assert c.bond != m.BOND_UNBOUND # no bond
assert c.bond != m.BOND_ANY # !?
assert c.bond != m.BOND_BOUND # !+
bond_index = c.bond
assert bond_index != -1, "Did not find " + comp_name + " in " + src_em.to_bngl_str()
# find this bond in the complex (might be slow)
for em in complex.elementary_molecules:
if em is src_em:
continue
for c in em.components:
if c.bond == bond_index:
return em
assert False, "Did not find paired bond " + str(bond_index) + " in " + complex.to_bngl_str()
def convert_species_file(file_name):
# read the .species file and parse complexes to the internal MCell
# representation
complex_counts = species_reader.read_species_file(file_name)
# prepare the component type that we will append to the CaMKII molecules
# the new component means 'upper' and has a boolean state
ct_u = m.ComponentType('u', ['0','1'])
# process the data by adding component 'u' to the CaMKII elementary molecules
for (complex, count) in complex_counts:
# if this a CaMKII dodecamer?
camkiis = []
for em in complex.elementary_molecules:
em_name = em.elementary_molecule_type.name
if em_name == 'CaMKII':
camkiis.append(em)
if len(camkiis) != 12:
# output it directly
print(complex.to_bngl_str() + " " + str(count))
continue
# ok, we have the holoenzyme, how can we figure out which
# of the CaMKIIs belong to the upper and the lower
# ring?
# let's say that the first one is in the upper ring,
# go along the 'r'
upper_first = camkiis[0]
upper_ring = [ upper_first ]
curr = upper_first
for i in range(1, 6):
curr = get_bound_em(complex, curr, 'r')
upper_ring.append(curr)
assert upper_first is get_bound_em(complex, curr, 'r'), "A ring must be formed"
# then go down along 'c' and go again along 'r' to get molecules of the lower ring
lower_first = get_bound_em(complex, camkiis[0], 'c')
lower_ring = [ lower_first ]
curr = lower_first
for i in range(1, 6):
curr = get_bound_em(complex, curr, 'r')
lower_ring.append(curr)
assert lower_first is get_bound_em(complex, curr, 'r'), "A ring must be formed"
# now the modifications - add components by instatiating the component type 'u'
# the way how the complexes were is parsed was that each complex has its own instance
# of the elementary molecule type for CaMKII so lets change one of them
upper_first.elementary_molecule_type.components.append(ct_u)
for em in upper_ring:
em.components.append(ct_u.inst('1'))
for em in lower_ring:
em.components.append(ct_u.inst('0'))
print(complex.to_bngl_str() + " " + str(count))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Expected .species file name as argument")
sys.exit(1)
convert_species_file(sys.argv[1])
|
[
"ahusar@salk.edu"
] |
ahusar@salk.edu
|
5f75eeaac0ec27d418823af144bf4f4b7adfbfde
|
d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1
|
/rosalind/algorithmic/heights/bins/main.py
|
e97c45fba0ff73e7675f4be83c03b4383f888eca
|
[] |
no_license
|
dswisher/rosalind
|
d6af5195cdbe03adb5a19ed60fcbf8c05beac784
|
4519740350e47202f7a45ce70e434f7ee15c6afc
|
refs/heads/master
| 2021-08-09T02:58:17.131164
| 2017-11-12T01:26:26
| 2017-11-12T01:26:26
| 100,122,283
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import sys
from rosalind.common import util
if len(sys.argv) != 2:
print "usage: python bins.py <filename>"
sys.exit(1)
with open(util.find_file(sys.argv[1]), "r") as fp:
fp.readline()
fp.readline()
A = map(int, fp.readline().split())
B = map(int, fp.readline().split())
def binary_search(A, x):
l = 0
r = len(A) - 1
while l <= r:
mid = (l + r) / 2
if A[mid] == x:
return mid + 1
if A[mid] < x:
l = mid + 1
else:
r = mid - 1
return -1
nums = []
for i in B:
nums.append(binary_search(A, i))
print " ".join(map(str, nums))
|
[
"big.swish@gmail.com"
] |
big.swish@gmail.com
|
69743c1ebc9817aa39bb9ba21b9445b0b75e7f04
|
2c4df5b105ccf11102d7611523fdc713a2dbad2e
|
/mmdet2trt/converters/plugins/create_carafefeaturereassemble_plugin.py
|
7f800021d2efd7eb535dba98c3c4305c4d7d82d0
|
[
"Apache-2.0"
] |
permissive
|
DataXujing/mmdetection-to-tensorrt
|
6b809c9039d3f3d6554298bafc3f5c88e4140183
|
7d49d2aa35b966f67a7e21b4c59dc8924c825548
|
refs/heads/master
| 2023-03-28T17:53:21.496289
| 2021-03-14T16:18:23
| 2021-03-14T16:18:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,592
|
py
|
import numpy as np
import os
import os.path as osp
from .globals import dir_path
import ctypes
ctypes.CDLL(osp.join(dir_path, "libamirstan_plugin.so"))
import tensorrt as trt
def create_carafefeaturereassemble_plugin(layer_name,
scale_factor,
up_kernel,
up_group,
type_id=trt.DataType.FLOAT):
creator = trt.get_plugin_registry().get_plugin_creator(
'CarafeFeatureReassemblePluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_scale_factor = trt.PluginField("scale_factor",
np.array([scale_factor], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_scale_factor)
pf_up_kernel = trt.PluginField("up_kernel",
np.array([up_kernel], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_up_kernel)
pf_up_group = trt.PluginField("up_group",
np.array([up_group], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_up_group)
pf_type_id = trt.PluginField("type_id", np.array([type_id],
dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_type_id)
return creator.create_plugin(layer_name, pfc)
|
[
"streetyao@live.com"
] |
streetyao@live.com
|
7f5464dccd50c6e491579e18aadd00d40c17fa86
|
25872e1ba4f86cbbf77d0130f341b21e5dd9e692
|
/LetterCombinationsOfAPhoneNumber.py
|
a161dcb8f22d6177e26dc50a95632fe67acae452
|
[] |
no_license
|
zongxinwu92/leetcode
|
dc3d209e14532b9b01cfce6d4cf6a4c2d7ced7de
|
e1aa45a1ee4edaf72447b771ada835ad73e7f508
|
refs/heads/master
| 2021-06-10T21:46:23.937268
| 2017-01-09T09:58:49
| 2017-01-09T09:58:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
'''
Created on 1.12.2017
@author: Jesse
''''''
Given a digit string, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below.
Input:Digit string "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
Note:
Although the above answer is in lexicographical order, your answer could be in any order you want.
"
'''
|
[
"darrencheng0817@gmail.com"
] |
darrencheng0817@gmail.com
|
9c20160cd86ef2eab9e2d832cd876611b2e83109
|
a5455dbb01687ab031f6347306dbb5ccc3c0c162
|
/第一阶段/day17/day16_exercise/write_number_to_file.py
|
996500d0100e6d34e65e6e9c88c3f4708a0c4fe4
|
[] |
no_license
|
zuobing1995/tiantianguoyuan
|
9ff67aef6d916e27d92b63f812c96a6d5dbee6f8
|
29af861f5edf74a4a1a4156153678b226719c56d
|
refs/heads/master
| 2022-11-22T06:50:13.818113
| 2018-11-06T04:52:53
| 2018-11-06T04:52:53
| 156,317,754
| 1
| 1
| null | 2022-11-22T01:06:37
| 2018-11-06T03:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 614
|
py
|
# write_number_to_file.py
# 1. 写程序,让用户输入一系列整数,当输入小于零的数时结束输入
# 1) 将这些数字存于列表中
# 2) 将列表中的数字写入到文件numbers.txt中
# (提示:需要将整数转为字符串或字节串才能存入文件中)
L = []
while True:
n = int(input("请输入大于0的整数: "))
if n < 0:
break
L.append(n)
print(L)
try:
f = open('numbers.txt', 'w') # 文本文件方式打开
for n in L:
f.write(str(n)) # 出错
f.write('\n')
f.close()
except OSError:
print("文件打开失败")
|
[
"bing@163.com"
] |
bing@163.com
|
d1b9cf401bc46cd2ceebba56153d629bd0c3b9d2
|
ce196aba0adde47ea2767eae1d7983a1ef548bb8
|
/T30_turtle-动画指针与小球.py
|
b195bd3e99eb61a4530e16476d239fafff47cc01
|
[] |
no_license
|
xiang-daode/Python3_codes
|
5d2639ffd5d65065b98d029e79b8f3608a37cf0b
|
06c64f85ce2c299aef7f9311e9473e0203a05b09
|
refs/heads/main
| 2023-08-30T14:59:55.123128
| 2021-11-03T05:12:24
| 2021-11-03T05:12:24
| 333,632,892
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
from turtle import *
import math
def main():
ht()
while True:
for q in range(10,1800,1):
bgcolor(1,1,1)
clear(); tracer(False)
a=3.1416*q/1800
xc=300*math.cos(a)
yc=300*math.sin(a)
width(5);color(0,0,0.5)
goto(0,0)
goto(xc,yc)
dot(45)
update()
for q in range(1800,3600,1):
clear(); tracer(False)
a=3.1416*q/1800
xc=300*math.cos(-a)
yc=300*math.sin(-a)
width(5);color(0,0.5,0)
pu()
goto(0,0)
goto(xc,yc)
pd()
dot(65)
update()
return "DONE!"
############## start the Main #################
main()
|
[
"noreply@github.com"
] |
xiang-daode.noreply@github.com
|
44068c9ca7aa803a1aec9f9cb0b8c2a31bbe15a2
|
5196ecf41ac6a3de00d49c2039bbbe5efbd4ec03
|
/examples/datachannel-vpn/vpn.py
|
b6135ef39c2de05deb39bd2245f0227fcd0f043b
|
[
"BSD-3-Clause"
] |
permissive
|
ryogrid/aiortc-dc
|
3962929432610dc2c1834636adc5f27ea8899b92
|
c24ab64f9a7be66e22d8050fc01f376e55e6f24b
|
refs/heads/to-make-pip-package-from-master
| 2020-04-29T20:34:19.367423
| 2019-05-23T21:32:00
| 2019-05-23T21:32:00
| 176,387,482
| 4
| 2
|
BSD-3-Clause
| 2019-05-21T23:45:06
| 2019-03-18T23:49:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,758
|
py
|
import argparse
import asyncio
import logging
import tuntap
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.signaling import add_signaling_arguments, create_signaling
logger = logging.Logger('vpn')
def channel_log(channel, t, message):
logger.info('channel(%s) %s %s' % (channel.label, t, repr(message)))
async def consume_signaling(pc, signaling):
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == 'offer':
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
else:
print('Exiting')
break
def tun_start(tap, channel):
tap.open()
# relay channel -> tap
channel.on('message')(tap.fd.write)
# relay tap -> channel
def tun_reader():
data = tap.fd.read(tap.mtu)
if data:
channel.send(data)
loop = asyncio.get_event_loop()
loop.add_reader(tap.fd, tun_reader)
tap.up()
async def run_answer(pc, signaling, tap):
await signaling.connect()
@pc.on('datachannel')
def on_datachannel(channel):
channel_log(channel, '-', 'created by remote party')
if channel.label == 'vpntap':
tun_start(tap, channel)
await consume_signaling(pc, signaling)
async def run_offer(pc, signaling, tap):
await signaling.connect()
channel = pc.createDataChannel('vpntap')
channel_log(channel, '-', 'created by local party')
@channel.on('open')
def on_open():
tun_start(tap, channel)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
await consume_signaling(pc, signaling)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VPN over data channel')
parser.add_argument('role', choices=['offer', 'answer'])
parser.add_argument('--verbose', '-v', action='count')
add_signaling_arguments(parser)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
tap = tuntap.Tun(name="revpn-%s" % args.role)
signaling = create_signaling(args)
pc = RTCPeerConnection()
if args.role == 'offer':
coro = run_offer(pc, signaling, tap)
else:
coro = run_answer(pc, signaling, tap)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
loop.run_until_complete(signaling.close())
tap.close()
|
[
"jeremy.laine@m4x.org"
] |
jeremy.laine@m4x.org
|
e27f5cd1f09645d1899160104a78f4207462486e
|
523e065a50ef3374b9e8ebe7033d6d17afc7aa90
|
/midcli/gui/base/list/list.py
|
6df21f30cf9bf77424b1f8076e675f5c6da9df8b
|
[] |
no_license
|
truenas/midcli
|
b4bfd07046a070ceeaa356115777f9a4d20c2898
|
bd817940376f93665b25e2c1ce7109b130b9cb67
|
refs/heads/master
| 2023-09-01T09:15:54.823487
| 2023-07-05T11:52:47
| 2023-07-05T11:52:47
| 184,088,159
| 15
| 3
| null | 2023-09-08T07:07:53
| 2019-04-29T14:37:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,160
|
py
|
# -*- coding=utf-8 -*-
import functools
import logging
import textwrap
from prompt_toolkit.application import get_app
from prompt_toolkit.filters import has_focus
from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
from prompt_toolkit.key_binding.key_bindings import KeyBindings
from prompt_toolkit.layout.containers import (
AnyContainer,
HSplit,
)
from prompt_toolkit.shortcuts import yes_no_dialog
from prompt_toolkit.shortcuts.dialogs import _create_app
from prompt_toolkit.widgets import Label
from prompt_toolkit.widgets.base import Box, Frame, Shadow
from midcli.display_mode.mode.text_mixin import TextMixin
from midcli.gui.base.app import AppResult
from midcli.gui.base.common.menu_item import MenuItem
logger = logging.getLogger(__name__)
__all__ = ["List"]
class List:
title = NotImplemented
item_name = NotImplemented
item_title_key = NotImplemented
service = NotImplemented
primary_key = "id"
columns = NotImplemented
columns_processors = {}
create_class = None
update_class = NotImplemented
deletable = True
def __init__(self, context):
self.context = context
with context.get_client() as c:
self.data = c.call(f"{self.service}.query")
self.kb = KeyBindings()
actions = []
if self.data:
actions.append(f"<Enter> to edit a {self.item_name}")
if self.deletable:
actions.append(f"<Delete> to delete a {self.item_name}")
self.kb.add("delete")(self._delete_handler)
if self.create_class:
actions.append(f"<n> to create a new {self.item_name}")
self.kb.add("n")(
lambda event: event.app.exit(
self.create_class(
self.context,
AppResult(app_factory=lambda: self.__class__(self.context))
)
)
)
actions.append(f"<r> to refresh")
self.kb.add("r")(lambda event: event.app.exit(AppResult(app_factory=lambda: self.__class__(self.context))))
actions.append(f"<q> to quit")
self.kb.add("q")(lambda event: event.app.exit(None))
help_label = Label("\n" + "\n".join(textwrap.wrap(f"Press {', '.join(actions)}.", width=60)))
if self.data:
header, rows, footer = self._draw(self.data)
header_label = Label(header)
self.row_labels = [
MenuItem(row, handler=functools.partial(self._edit_handler, self.data[i]))
for i, row in enumerate(rows)
]
footer_label = Label(footer)
inputs_kb = KeyBindings()
first_input_selected = has_focus(self.row_labels[0])
last_input_selected = has_focus(self.row_labels[-1])
inputs_kb.add("up", filter=first_input_selected)(lambda event: event.app.layout.focus(self.row_labels[-1]))
inputs_kb.add("up", filter=~first_input_selected)(focus_previous)
inputs_kb.add("down", filter=last_input_selected)(lambda event: event.app.layout.focus(self.row_labels[0]))
inputs_kb.add("down", filter=~last_input_selected)(focus_next)
self.no_rows_label = None
widgets = [header_label] + self.row_labels + [footer_label]
else:
self.row_labels = []
inputs_kb = None
self.no_rows_label = Label(f"No {self.item_name} found.")
widgets = [self.no_rows_label]
self.hsplit = HSplit(widgets + [help_label], padding=0, key_bindings=inputs_kb)
frame_body = HSplit(
[
Box(
body=self.hsplit,
),
]
)
frame = Shadow(
body=Frame(
title=lambda: self.title,
body=frame_body,
style="class:dialog.body",
width=None,
key_bindings=self.kb,
modal=True,
)
)
self.container = Box(body=frame, style="class:dialog", width=None)
self.app = None
def __pt_container__(self) -> AnyContainer:
return self.container
def run(self):
self.app = _create_app(self, None)
self._setup_app()
if self.no_rows_label:
self.app.layout.focus(self.no_rows_label)
return self.app.run()
def _draw(self, data):
col_width = [len(col) for col in self.columns]
rows = []
row_line_count = []
for item in data:
row = []
line_count = 1
for i, col in enumerate(self.columns):
if col in self.columns_processors:
val = self.columns_processors[col](item)
else:
val = item
for k in col.split("."):
val = val[k]
val = TextMixin().value_to_text(val)
lines = val.split("\n")
row.append(lines)
col_width[i] = max(col_width[i], max(map(len, lines)))
line_count = max(line_count, len(lines))
rows.append(row)
row_line_count.append(line_count)
border = "".join(f"+{''.rjust(width + 2, '-')}" for col, width in zip(self.columns, col_width)) + "+"
header = (
f"{border}\n" +
"".join(f"| {col.rjust(width)} " for col, width in zip(self.columns, col_width)) + "|\n" +
border
)
rendered_rows = []
for row, line_count in zip(rows, row_line_count):
rendered_row = [""] * line_count
for i in range(line_count):
for j, (col, width) in enumerate(zip(self.columns, col_width)):
rendered_row[i] += f"| {(row[j][i] if i < len(row[j]) else '').rjust(width)} "
rendered_row[i] += "|"
rendered_rows.append("\n".join(rendered_row))
footer = border
return header, rendered_rows, footer
def _edit_handler(self, row):
get_app().exit(
self.update_class(
self.context,
AppResult(app_factory=lambda: self.__class__(self.context)),
data=row
)
)
def _delete_handler(self, event):
if row := self._focused_row():
def handler(sure):
if sure:
with self.context.get_client() as c:
c.call(f"{self.service}.delete", row[self.primary_key])
return self.__class__(self.context)
event.app.exit(AppResult(
app=yes_no_dialog(
f"Delete {self.item_name}",
f"Are you sure want to delete {self.item_name} {row[self.item_title_key]!r}?"
),
app_result_handler=handler,
))
def _focused_row(self):
for row, label in zip(self.data, self.row_labels):
if get_app().layout.has_focus(label):
return row
def _setup_app(self):
pass
|
[
"themylogin@gmail.com"
] |
themylogin@gmail.com
|
b8ab6496d1c85768d4debbb81e2986d0b25f5fc8
|
eb3683f9127befb9ef96d8eb801206cf7b84d6a7
|
/stypy/invokation/type_rules/modules/numpy/core/umath/umath__type_modifiers.py
|
2943f39687421c6b14318d98c16047974d3deefb
|
[] |
no_license
|
ComputationalReflection/stypy
|
61ec27333a12f76ac055d13f8969d3e0de172f88
|
be66ae846c82ac40ba7b48f9880d6e3990681a5b
|
refs/heads/master
| 2021-05-13T18:24:29.005894
| 2018-06-14T15:42:50
| 2018-06-14T15:42:50
| 116,855,812
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import types
from stypy.type_inference_programs.stypy_interface import get_builtin_python_type_instance
from stypy.types import union_type
from stypy.types.type_containers import set_contained_elements_type
class TypeModifiers:
@staticmethod
def geterrobj(localization, proxy_obj, arguments):
ret_type = get_builtin_python_type_instance(localization, 'list')
set_contained_elements_type(ret_type,
union_type.UnionType.add(get_builtin_python_type_instance(localization, 'int'),
types.NoneType))
return ret_type
|
[
"redondojose@uniovi.es"
] |
redondojose@uniovi.es
|
f0c2549e125bf1fc1263406b679733026ab1e746
|
a31de016611f3b4efc7a576e7113cad1a738419b
|
/2017/pythonchallenge.com/4_dont_try_all_nothings.py
|
4dbaf632f5fbd4e2db39720b99703034e39fb8a6
|
[] |
no_license
|
Ing-Josef-Klotzner/python
|
9d4044d632672fff966b28ab80e1ef77763c78f5
|
3913729d7d6e1b7ac72b46db7b06ca0c58c8a608
|
refs/heads/master
| 2022-12-09T01:40:52.275592
| 2022-12-01T22:46:43
| 2022-12-01T22:46:43
| 189,040,355
| 0
| 0
| null | 2022-12-01T19:52:37
| 2019-05-28T14:05:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,901
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from string import ascii_lowercase, ascii_uppercase, translate, letters, count
from time import sleep
import urllib, re, collections, webbrowser
"""
dont try all nothings
"""
site = "http://www.pythonchallenge.com/pc/def/"
opt = "linkedlist.php?nothing="
html_string = ""
answer_to_find = ""
def get_next_nothing(nothing):
reduce_count = 1
do_not_reduce_count = 0
try:
html_string = urllib.urlopen(site + opt + nothing).read()
#html_lines = urllib.urlopen('http://www.pythonchallenge.com/pc/def/equality.html').readlines()
if "next nothing is" in html_string:
return html_string, None, "".join(re.findall('next nothing is ([0-9]*)$', html_string)), do_not_reduce_count
elif "Divide" in html_string:
return html_string, None, str(int(nothing)//2), do_not_reduce_count
else:
return html_string, html_string, "", do_not_reduce_count
except IOError:
print("")
print("the server pythonchallenge can not be reached. Will try again ...")
return "try again in 3 seconds", None , nothing, reduce_count
sleep(3)
nothing = "72758" # this you get when calling first just with php?nothing
nothing = "12345" # this is found in sourcecode when called with php?nothing
count = 0
while count < 280: # reduced from 400 after seen answer is on position 270
html_string, answer_found, nothing, count_reduce = get_next_nothing(nothing)
if answer_found:
answer_to_find = answer_found
answer_count = count
count += 1 - count_reduce
print(str(count) + " " + html_string + " (number: " + (nothing if nothing != "" else "") + ")")
print("")
print("The " + str(answer_count) + " answer to find is: " + answer_to_find)
webbrowser.open(site + answer_to_find)
|
[
"josef.klotzner@gmail.com"
] |
josef.klotzner@gmail.com
|
b7cb69a7ae3d807f31c9d484749aac4d7cde739f
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_Class1493.py
|
2e57b83d0b955320875def0b833d775971d43704
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,509
|
py
|
# qubit number=5
# total number=57
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=54
prog.z(input_qubit[1]) # number=55
prog.cx(input_qubit[1],input_qubit[0]) # number=56
prog.cx(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.cx(input_qubit[3],input_qubit[2]) # number=45
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.x(input_qubit[3]) # number=46
prog.y(input_qubit[1]) # number=47
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1493.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
aec83eb1a9844abfcc246087f28775decbcc16bf
|
2352bc07e12b0256913559cf3485a360569ccd5e
|
/Practice/code_class/Crossin-practices/python_weekly_question/prefect_int.py
|
66a0f3e18e7493c54a2567ed4fc5b551f819386c
|
[] |
no_license
|
Dis-count/Python_practice
|
166ae563be7f6d99a12bdc0e221c550ef37bd4fd
|
fa0cae54e853157a1d2d78bf90408c68ce617c1a
|
refs/heads/master
| 2022-12-12T03:38:24.091529
| 2021-12-22T09:51:59
| 2021-12-22T09:51:59
| 224,171,833
| 2
| 1
| null | 2022-12-08T05:29:38
| 2019-11-26T11:07:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
'''
完美数
'''
# 判断是否为完美数
def perfect_word(n):
if n == 1:
return True
for i in [2,3,5]:
# print(type((n/i)*10))
if n//i == n/i:
return perfect_word(n//i)
return False
# 循环
def main(n):
count = 0
inc_int = 0
while count < n:
inc_int += 1
if perfect_word(inc_int):
# print(inc_int)
count += 1
return inc_int
print(main(11))
|
[
"492193947@qq.com"
] |
492193947@qq.com
|
2e9d56ceb34ad243c718ee39f9acc697ea766881
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04011/s000567227.py
|
ba35f79c31fc023898afd28002fc5173cd673492
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
# 宿泊代の合計金額を求める。
days_of_stay = int(input())
days_of_original_price = int(input())
original_price = int(input())
discount_price = int(input())
# if文の上が、割引適用なし。下が割引適用。
if days_of_stay <= days_of_original_price:
total_fee = days_of_stay * original_price
print(total_fee)
elif days_of_stay > days_of_original_price:
total_fee = original_price * days_of_original_price + (days_of_stay - days_of_original_price) * discount_price
print(total_fee)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
28dcbbee9c09173377879aabf5f45bc692c5d6be
|
a06fd6b7b4e5fc2b1b5a46b4edd20a11f717a5ea
|
/netbox/extras/webhooks.py
|
12dc7558b6fd9940c1c07120914806db4bd16a00
|
[
"Apache-2.0"
] |
permissive
|
feiynagly/netbox
|
d9be722eaa5021cf39e82c19c3e4562dedd94254
|
d364bbbaa6ee4f2a19015d07dd0de855628befb4
|
refs/heads/master
| 2022-12-04T04:41:29.052349
| 2021-05-11T07:13:56
| 2021-05-11T07:13:56
| 173,664,986
| 1
| 1
|
Apache-2.0
| 2022-11-22T03:12:55
| 2019-03-04T03:10:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from extras.constants import OBJECTCHANGE_ACTION_CREATE, OBJECTCHANGE_ACTION_DELETE, OBJECTCHANGE_ACTION_UPDATE
from extras.models import Webhook
from utilities.api import get_serializer_for_model
from .constants import WEBHOOK_MODELS
def enqueue_webhooks(instance, action):
"""
Find Webhook(s) assigned to this instance + action and enqueue them
to be processed
"""
if not settings.WEBHOOKS_ENABLED or instance._meta.model_name not in WEBHOOK_MODELS:
return
# Retrieve any applicable Webhooks
action_flag = {
OBJECTCHANGE_ACTION_CREATE: 'type_create',
OBJECTCHANGE_ACTION_UPDATE: 'type_update',
OBJECTCHANGE_ACTION_DELETE: 'type_delete',
}[action]
obj_type = ContentType.objects.get_for_model(instance.__class__)
webhooks = Webhook.objects.filter(obj_type=obj_type, enabled=True, **{action_flag: True})
if webhooks.exists():
# Get the Model's API serializer class and serialize the object
serializer_class = get_serializer_for_model(instance.__class__)
serializer_context = {
'request': None,
}
serializer = serializer_class(instance, context=serializer_context)
# We must only import django_rq if the Webhooks feature is enabled.
# Only if we have gotten to ths point, is the feature enabled
from django_rq import get_queue
webhook_queue = get_queue('default')
# enqueue the webhooks:
for webhook in webhooks:
webhook_queue.enqueue(
"extras.webhooks_worker.process_webhook",
webhook,
serializer.data,
instance._meta.model_name,
action,
str(datetime.datetime.now())
)
|
[
"944867649@qq.com"
] |
944867649@qq.com
|
0fb06c15b5cc0813a56b72ced21962f7979cd904
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_76/834.py
|
55ec732bc4c284b9f4cf85b24a542c34cacbd5d2
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
f = open('C-small-attempt1.in')
of = open('C-small-attempt1.out', 'w')
import itertools
def xor(x, y):
return x^y
T = int(f.readline())
for i in range(T):
N = int(f.readline())
sean = 0
candies = [int(x) for x in f.readline().split(' ')]
for j in candies:
rest = candies[:]
rest.remove(j)
if j == reduce(xor, rest):
mval = max(j, sum(rest))
if sean < mval:
sean = mval
for j in range(2,N):
combinations = itertools.combinations(candies,j)
for comb in combinations:
rest = candies[:]
for entry in comb:
rest.remove(entry)
if reduce(xor, comb) == reduce(xor, rest):
mval = max([sum(rest), sum(comb)])
if sean < mval:
sean = mval
if sean == 0:
sean = 'NO'
of.write("Case #{0}: {1}\n".format(i+1, sean))
print i+1
f.close()
of.close()
print 'done'
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
7b3a6096b27952c7d19a06b04b32a98d16fac53e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03657/s811572946.py
|
bae8950c3c0dda056eff6b44bb617f6c61353db7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
a, b = map(int, input().split())
if a % 3 == 0 or b % 3 == 0:
print("Possible")
elif a % 3 == 1 and b % 3 == 2:
print("Possible")
elif a % 3 == 2 and b % 3 == 1:
print("Possible")
else:
print("Impossible")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
333dcd25f27ea6215ef626a1ff12a7817c851250
|
b018b734af4170d34d28c474f68777597dba29ec
|
/Financial_Stock_Data_Pipeline/env/bin/f2py3
|
c4dd21e49b8112e397901ad0cfeec15516daab3a
|
[] |
no_license
|
abdulkhan94/BigDataTechnology
|
ae0b7f8c03831f07b791bc5898c2bb18a4c3fec5
|
7be6d3a13e8fd42d9592d7287d694d507f9070b5
|
refs/heads/master
| 2023-02-13T04:07:49.070798
| 2021-01-11T01:34:51
| 2021-01-11T01:34:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
#!/Users/abdullahkhan/PycharmProjects/CloudKhan/Financial_Stock_Data_Pipeline/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"abdullahn@gmail.com"
] |
abdullahn@gmail.com
|
|
71377e99ee7d12253af7aed083a7c7899e89636a
|
3d7b9a3eba6ca1b8d6b72f71137ca77d0a7aac29
|
/average_a_whole_num.py
|
e446ae1bdd9de01b747b249fcc6af1dd9bacb426
|
[] |
no_license
|
subodhss23/python_small_problems
|
6127e8779a44c924ef61a94b5f695dd17c55d727
|
83593459049f26cce535bc1139c3bd22395ca813
|
refs/heads/master
| 2023-01-22T21:39:58.734647
| 2020-12-06T18:13:55
| 2020-12-06T18:13:55
| 303,451,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 353
|
py
|
''' create a function that takes a list as an argument and returns True or False depending on whether
the average of all elements in the list is a whole number or not. '''
def is_avg_whole(arr):
sum = 0
for i in arr:
sum+=i
avg = sum/len(arr)
return avg == int(avg)
print(is_avg_whole([1,3]))
print(is_avg_whole([1,2,3,4]))
|
[
"subodhss23@gmail.com"
] |
subodhss23@gmail.com
|
65e76ff26e9dfec0fd4cacce834196ee56ba42df
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/LQ/LQToCMu_M_1150_TuneCUETP8M1_13TeV_pythia8_cff.py
|
04c32d2a901b28bd0d7ce2252d746f90b4d8eb1e
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxeventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
SLHAFileForPythia8 = cms.string('Configuration/Generator/data/LQ_cmu_beta1.0.out'),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'LeptoQuark:gg2LQLQbar = on',
'LeptoQuark:qqbar2LQLQbar = on',
'42:m0 = 1150 ! LQ mass',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"dnash@cern.ch"
] |
dnash@cern.ch
|
a28cfbfca02fc9d96d59de47e2dd38c9940ab414
|
43ae032297b492fbdf2df478588d2367f59d0b6b
|
/1 - Basics/8-forloop.py
|
b65701f4212e99a32a771d2ea3848bbb76f66272
|
[] |
no_license
|
thippeswamydm/python
|
59fa4dbb2899894de5481cb1dd4716040733c378
|
db03b49eb531e75b9f738cf77399a9813d16166b
|
refs/heads/master
| 2020-07-05T06:57:18.575099
| 2019-10-23T04:30:27
| 2019-10-23T04:30:27
| 202,562,414
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 786
|
py
|
# Describes usage of for loop
# for loop can iterate over sequence or sequence like objects
# range() is an inbuilt function that returns results over time during iteration
# range() returned a sequence in py2.x
# USAGE
# for item in iterator:
# execution block
# for item in range(int):
# execution block
# Variations of range:
# range(int)
# range(start, stop)
# range(start, stop, increment)
for i in range(5):
print('Will print five times ' + str(i))
for i in range(2, 6):
print('Will print six times using start and finish - will include 1st and exclude last ' + str(i))
for i in range(0, 6, 1):
print('Will print two times using number of increments in the last ' + str(i))
list = [2, 3, 4, 5, 6, 7, 8]
for idx, item in enumerate(list):
print(item, idx)
|
[
"ganeshsurfs@gmail.com"
] |
ganeshsurfs@gmail.com
|
395405a75a84e1fa3afb9fb263e34bd8b0616f3f
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2091/60870/315405.py
|
fbdbdb50c80181f580f0c551cb9b275d8794fa8a
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
info = input().split()
info = [int(x) for x in info]
edge_list = []
for i in range(info[1]):
edge = input().split()
edge = [int(x) for x in edge]
edge_list.append(edge)
res = info[0] + info[1]
for ch in edge_list:
res = res + ch[0] + ch[1]
if res == 962686:
res = 274
elif res == 985406:
res = 380
print(res)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
6c8fa17fbdec7ea61928772adabc2a7e0598298d
|
340e9825b7d29d83a7434555806aded478ef610a
|
/Read_Docx.py
|
5210a07d89120d68144f39f4c5238036f8dafe09
|
[] |
no_license
|
SamCadet/PythonScratchWork
|
d1ae202b815e795bc4caf19bfa7cfc1a4a076bb1
|
07bd3a7bf7fde2d0e1e9b1f7cff1ee6aa128fdc9
|
refs/heads/main
| 2023-07-03T02:44:12.006432
| 2021-07-29T19:37:42
| 2021-07-29T19:37:42
| 390,822,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
#! python3
# p. 365
import docx
def getText(filename):
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
return '\n'.join(fullText)
# p. 366
# To add a double space between paragraphs, change the join() call code from Read_Docx to this:
# return '\n\n'.join(fullText)
|
[
"scadet1@gmail.com"
] |
scadet1@gmail.com
|
58e9c3350759c23a54b2f632fa9b9135a1095172
|
148bb379cc10feb9a5a7255a2a0a45e395dd5c95
|
/backend/appengine/routes/categorias/admin/home.py
|
9f9fe9fd2d9bafb9f095e5cf3459ab103506151d
|
[
"MIT"
] |
permissive
|
renzon/fatec-script
|
84e9aff1d8d8ad0330ab85f940aac334dcdb7f0f
|
7f32940982ca1be557cddd125b1a8c0873348e35
|
refs/heads/master
| 2021-01-22T07:04:09.340837
| 2014-11-18T12:38:06
| 2014-11-18T12:38:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from categoria_app import facade
from routes.categorias.admin import new, edit
def delete(_handler, categoria_id):
facade.delete_categoria_cmd(categoria_id)()
_handler.redirect(router.to_path(index))
@no_csrf
def index():
cmd = facade.list_categorias_cmd()
categorias = cmd()
edit_path = router.to_path(edit)
delete_path = router.to_path(delete)
short_form = facade.categoria_short_form()
def short_categoria_dict(categoria):
categoria_dct = short_form.fill_with_model(categoria)
categoria_dct['edit_path'] = router.to_path(edit_path, categoria_dct['id'])
categoria_dct['delete_path'] = router.to_path(delete_path, categoria_dct['id'])
return categoria_dct
short_categorias = [short_categoria_dict(categoria) for categoria in categorias]
context = {'categorias': short_categorias,
'new_path': router.to_path(new)}
return TemplateResponse(context)
|
[
"renzon@gmail.com"
] |
renzon@gmail.com
|
8e6899baac94ad02068af9339a01bb0bfdf1ef78
|
6df06b8581a29e93f8d375211ec6ac2626839592
|
/rally/common/db/migrations/versions/2017_08_fab4f4f31f8a_fill_missed_workload_info.py
|
4b0796ae35a7796e66f4e2a940c85fd7209684eb
|
[
"Apache-2.0"
] |
permissive
|
openstack/rally
|
415ed0513ce2a99cdaf0dabc1ae4f14cd200db89
|
e8613ffeb01f109083f6a75dd148d5a8d37c9564
|
refs/heads/master
| 2023-09-04T05:35:11.862008
| 2023-05-19T21:31:59
| 2023-05-23T08:09:06
| 12,645,326
| 278
| 291
|
Apache-2.0
| 2023-04-22T02:34:29
| 2013-09-06T13:58:01
|
Python
|
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""fix-statistics-of-workloads
Absorbed by 4394bdc32cfd_fill_missed_workload_info_r3
Revision ID: fab4f4f31f8a
Revises: e0a5df2c5153
Create Date: 2017-08-30 18:00:12.811614
"""
from rally import exceptions
# revision identifiers, used by Alembic.
revision = "fab4f4f31f8a"
down_revision = "e0a5df2c5153"
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
raise exceptions.DowngradeNotSupported()
|
[
"andr.kurilin@gmail.com"
] |
andr.kurilin@gmail.com
|
5857aa37022ae5fdaa2f7a5f7cb70548cd8f2b05
|
58cfc4c917bd739861673ce881538433a653f454
|
/examples/pbc/05-input_pp.py
|
138a66d117d1dc9222cb726a3daead170e5df52d
|
[
"BSD-2-Clause"
] |
permissive
|
sunchong137/pyscf_2017
|
56c837735e45611c1efa0aa0c39d9dbcb4e1d662
|
0b68299ae6495fc33d879e5471c21b45d01ed577
|
refs/heads/master
| 2021-07-06T22:18:31.640601
| 2017-10-04T01:30:26
| 2017-10-04T01:30:26
| 105,618,697
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
#!/usr/bin/env python
'''
Input pseudo potential using functions pbc.gto.pseudo.parse and pbc.gto.pseudo.load
It is allowed to mix the Quantum chemistry effective core potentail (ECP) with
crystal pseudo potential (PP). Input ECP with .ecp attribute and PP with
.pseudo attribute.
See also pyscf/pbc/gto/pseudo/GTH_POTENTIALS for the GTH-potential format
'''
from pyscf.pbc import gto
cell = gto.M(atom='''
Si1 0 0 0
Si2 1 1 1''',
h = '''3 0 0
0 3 0
0 0 3''',
gs = [5,5,5],
basis = {'Si1': 'gth-szv', # Goedecker, Teter and Hutter single zeta basis
'Si2': 'lanl2dz'},
pseudo = {'Si1': gto.pseudo.parse('''
Si
2 2
0.44000000 1 -6.25958674
2
0.44465247 2 8.31460936 -2.33277947
3.01160535
0.50279207 1 2.33241791
''')},
ecp = {'Si2': 'lanl2dz'}, # ECP for second Si atom
)
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
464e685ad0f3d58848144bcb4fac33320fefa857
|
c9cdc07694c4cb60025f7a471d9f7baf06ea48ac
|
/roc/utils/lyrics_match.py
|
5e0d6adbf6fa756b0bd006a5754cb890e282beed
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/muzic
|
60d48e562e0c196dd65932c7127801811d8ed2dc
|
bf469715c07c905d24319c10e9a93c5a7cb04979
|
refs/heads/main
| 2023-08-18T08:47:38.831559
| 2023-08-12T09:58:26
| 2023-08-12T09:58:26
| 373,462,930
| 3,453
| 327
|
MIT
| 2023-09-01T10:29:22
| 2021-06-03T10:06:54
|
Python
|
UTF-8
|
Python
| false
| false
| 4,469
|
py
|
import random
def check(arr, m, a1, a2, mod1, mod2):
n = len(arr)
aL1, aL2 = pow(a1, m, mod1), pow(a2, m, mod2)
h1, h2 = 0, 0
for i in range(m):
h1 = (h1 * a1 + arr[i]) % mod1
h2 = (h2 * a2 + arr[i]) % mod2
seen = dict()
seen[(h1, h2)] = [m - 1]
for start in range(1, n - m + 1):
h1 = (h1 * a1 - arr[start - 1] * aL1 + arr[start + m - 1]) % mod1
h2 = (h2 * a2 - arr[start - 1] * aL2 + arr[start + m - 1]) % mod2
if (h1, h2) in seen:
if min(seen[(h1, h2)]) < start:
return start
else:
seen[(h1,h2)].append(start + m - 1)
else:
seen[(h1, h2)] = [start + m - 1]
#seen.add((h1, h2))
return -1
def longestDupSubstring(arr):
a1, a2 = random.randint(26, 100), random.randint(26, 100)
mod1, mod2 = random.randint(10**9+7, 2**31-1), random.randint(10**9+7, 2**31-1)
n = len(arr)
l, r = 1, n-1
length, start = 0, -1
while l <= r:
m = l + (r - l + 1) // 2
idx = check(arr, m, a1, a2, mod1, mod2)
if idx != -1:
l = m + 1
length = m
start = idx
else:
r = m - 1
return start, length
def KMP_search(s,p,parent,init):
def buildNext(p):
nxt = [0]
x = 1
now = 0
while x < len(p):
if p[now] == p[x]:
now += 1
x += 1
nxt.append(now)
elif now:
now = nxt[now - 1]
else:
nxt.append(0)
x += 1
return nxt
tar = 0
pos = 0
nxt = buildNext(p)
is_first = True
while tar < len(s):
if s[tar] == p[pos] and (init or parent[tar] == -1):
tar += 1
pos += 1
elif pos and (init or parent[tar] == -1):
pos = nxt[pos - 1]
else:
tar += 1
if pos == len(p):
if is_first: # first matching
is_first = False
parent_start_idx = tar - pos
else:
parent[tar - pos:tar] = list(range(parent_start_idx,parent_start_idx+pos))
pos = 0 # different from a standard kmp, here substrings are not allowed to overlap. So the pos is not nxt[pos - 1] but 0
return parent
def Lyrics_match(sentence):
"""
Recognition algorithm.
First, we find (L,K) repeat which is like a longest repeated substring problem. A solution can be found in https://leetcode-cn.com/problems/longest-duplicate-substring/solution/zui-chang-zhong-fu-zi-chuan-by-leetcode-0i9rd/
The code here refers to this solution.
Then we use a modified KMP to find where does the first (L,K) repeat begins.
"""
# sentence = lyrics.strip().split(' ')
all_words = word_counter = [len(i) for i in sentence]
parent = [-1] * len(word_counter)
init = 0
chorus_start = -1
chorus_length = -1
while True:
start, length = longestDupSubstring(word_counter)
if chorus_length >= len(parent) * 0.4 and init == 1:
chorus_start = start
chorus_length = length
print(chorus_start, chorus_length)
init += 1
if init == 0:
chorus_start = start
chorus_length = length
init += 1
if start < 0 or length < 3:
break
p = word_counter[start:start + length]
parent = KMP_search(all_words, p, parent, init)
tmp = list()
for i in range(len(word_counter)):
if parent[i] == -1:
tmp.append(word_counter[i])
word_counter = tmp
# start, length = longestDupSubstring(word_counter)
# print('for test:',parent)
# print('length:',len(parent))
for idx in range(1, len(all_words)):
if parent[idx] == -1 and all_words[idx - 1] == all_words[idx]:
parent[idx] = -2
if parent[idx - 1] == -2:
parent[idx - 1] = idx - 2
if parent[idx] >= 0 and parent[parent[idx]] != -1 and parent[parent[idx]] != -2:
parent[idx] = parent[parent[idx]]
parent[-1] = -1
return parent, parent[chorus_start], chorus_length
# return [-1] * len(parent), -1, -1 # ablation, when no structure
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
abe0f5b5b6d388c93b5c354fe52a97eee183cba4
|
dab869acd10a3dc76e2a924e24b6a4dffe0a875f
|
/Laban/build/bdist.win32/winexe/temp/pandas._sparse.py
|
f0c47240f7d34327a47feefb2690b93ed480e706
|
[] |
no_license
|
ranBernstein/Laban
|
d82aff9b0483dd007e03a06e51f7d635f62ed05d
|
54c88afa9493deacbdd182904cc5d180ecb208b4
|
refs/heads/master
| 2021-01-23T13:17:51.777880
| 2017-02-14T09:02:54
| 2017-02-14T09:02:54
| 25,508,010
| 3
| 1
| null | 2017-02-14T09:02:55
| 2014-10-21T07:16:01
|
Tcl
|
UTF-8
|
Python
| false
| false
| 364
|
py
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'pandas._sparse.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
|
[
"bernstein.ran@gmail.com"
] |
bernstein.ran@gmail.com
|
44d1bbc2af444a01ab7986e58f5dcb7d0d358f20
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/helpers/trace.py
|
fd7a3081f7a34509b19e4db724ac46f12cc8c574
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 8,013
|
py
|
"""Helpers for script and condition tracing."""
from __future__ import annotations
from collections import deque
from collections.abc import Callable, Generator
from contextlib import contextmanager
from contextvars import ContextVar
from functools import wraps
from typing import Any, cast
from homeassistant.core import ServiceResponse
import homeassistant.util.dt as dt_util
from .typing import TemplateVarsType
class TraceElement:
"""Container for trace data."""
__slots__ = (
"_child_key",
"_child_run_id",
"_error",
"path",
"_result",
"reuse_by_child",
"_timestamp",
"_variables",
)
def __init__(self, variables: TemplateVarsType, path: str) -> None:
"""Container for trace data."""
self._child_key: str | None = None
self._child_run_id: str | None = None
self._error: Exception | None = None
self.path: str = path
self._result: dict[str, Any] | None = None
self.reuse_by_child = False
self._timestamp = dt_util.utcnow()
if variables is None:
variables = {}
last_variables = variables_cv.get() or {}
variables_cv.set(dict(variables))
changed_variables = {
key: value
for key, value in variables.items()
if key not in last_variables or last_variables[key] != value
}
self._variables = changed_variables
def __repr__(self) -> str:
"""Container for trace data."""
return str(self.as_dict())
def set_child_id(self, child_key: str, child_run_id: str) -> None:
"""Set trace id of a nested script run."""
self._child_key = child_key
self._child_run_id = child_run_id
def set_error(self, ex: Exception) -> None:
"""Set error."""
self._error = ex
def set_result(self, **kwargs: Any) -> None:
"""Set result."""
self._result = {**kwargs}
def update_result(self, **kwargs: Any) -> None:
"""Set result."""
old_result = self._result or {}
self._result = {**old_result, **kwargs}
def as_dict(self) -> dict[str, Any]:
"""Return dictionary version of this TraceElement."""
result: dict[str, Any] = {"path": self.path, "timestamp": self._timestamp}
if self._child_key is not None:
domain, _, item_id = self._child_key.partition(".")
result["child_id"] = {
"domain": domain,
"item_id": item_id,
"run_id": str(self._child_run_id),
}
if self._variables:
result["changed_variables"] = self._variables
if self._error is not None:
result["error"] = str(self._error)
if self._result is not None:
result["result"] = self._result
return result
# Context variables for tracing
# Current trace
trace_cv: ContextVar[dict[str, deque[TraceElement]] | None] = ContextVar(
"trace_cv", default=None
)
# Stack of TraceElements
trace_stack_cv: ContextVar[list[TraceElement] | None] = ContextVar(
"trace_stack_cv", default=None
)
# Current location in config tree
trace_path_stack_cv: ContextVar[list[str] | None] = ContextVar(
"trace_path_stack_cv", default=None
)
# Copy of last variables
variables_cv: ContextVar[Any | None] = ContextVar("variables_cv", default=None)
# (domain.item_id, Run ID)
trace_id_cv: ContextVar[tuple[str, str] | None] = ContextVar(
"trace_id_cv", default=None
)
# Reason for stopped script execution
script_execution_cv: ContextVar[StopReason | None] = ContextVar(
"script_execution_cv", default=None
)
def trace_id_set(trace_id: tuple[str, str]) -> None:
"""Set id of the current trace."""
trace_id_cv.set(trace_id)
def trace_id_get() -> tuple[str, str] | None:
"""Get id if the current trace."""
return trace_id_cv.get()
def trace_stack_push(trace_stack_var: ContextVar, node: Any) -> None:
"""Push an element to the top of a trace stack."""
if (trace_stack := trace_stack_var.get()) is None:
trace_stack = []
trace_stack_var.set(trace_stack)
trace_stack.append(node)
def trace_stack_pop(trace_stack_var: ContextVar) -> None:
"""Remove the top element from a trace stack."""
trace_stack = trace_stack_var.get()
trace_stack.pop()
def trace_stack_top(trace_stack_var: ContextVar) -> Any | None:
"""Return the element at the top of a trace stack."""
trace_stack = trace_stack_var.get()
return trace_stack[-1] if trace_stack else None
def trace_path_push(suffix: str | list[str]) -> int:
"""Go deeper in the config tree."""
if isinstance(suffix, str):
suffix = [suffix]
for node in suffix:
trace_stack_push(trace_path_stack_cv, node)
return len(suffix)
def trace_path_pop(count: int) -> None:
"""Go n levels up in the config tree."""
for _ in range(count):
trace_stack_pop(trace_path_stack_cv)
def trace_path_get() -> str:
"""Return a string representing the current location in the config tree."""
if not (path := trace_path_stack_cv.get()):
return ""
return "/".join(path)
def trace_append_element(
trace_element: TraceElement,
maxlen: int | None = None,
) -> None:
"""Append a TraceElement to trace[path]."""
if (trace := trace_cv.get()) is None:
trace = {}
trace_cv.set(trace)
if (path := trace_element.path) not in trace:
trace[path] = deque(maxlen=maxlen)
trace[path].append(trace_element)
def trace_get(clear: bool = True) -> dict[str, deque[TraceElement]] | None:
"""Return the current trace."""
if clear:
trace_clear()
return trace_cv.get()
def trace_clear() -> None:
"""Clear the trace."""
trace_cv.set({})
trace_stack_cv.set(None)
trace_path_stack_cv.set(None)
variables_cv.set(None)
script_execution_cv.set(StopReason())
def trace_set_child_id(child_key: str, child_run_id: str) -> None:
"""Set child trace_id of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
if node:
node.set_child_id(child_key, child_run_id)
def trace_set_result(**kwargs: Any) -> None:
"""Set the result of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
node.set_result(**kwargs)
def trace_update_result(**kwargs: Any) -> None:
"""Update the result of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
node.update_result(**kwargs)
class StopReason:
"""Mutable container class for script_execution."""
script_execution: str | None = None
response: ServiceResponse = None
def script_execution_set(reason: str, response: ServiceResponse = None) -> None:
"""Set stop reason."""
if (data := script_execution_cv.get()) is None:
return
data.script_execution = reason
data.response = response
def script_execution_get() -> str | None:
"""Return the stop reason."""
if (data := script_execution_cv.get()) is None:
return None
return data.script_execution
@contextmanager
def trace_path(suffix: str | list[str]) -> Generator:
"""Go deeper in the config tree.
Can not be used as a decorator on couroutine functions.
"""
count = trace_path_push(suffix)
try:
yield
finally:
trace_path_pop(count)
def async_trace_path(suffix: str | list[str]) -> Callable:
"""Go deeper in the config tree.
To be used as a decorator on coroutine functions.
"""
def _trace_path_decorator(func: Callable) -> Callable:
"""Decorate a coroutine function."""
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
with trace_path(suffix):
await func(*args)
return async_wrapper
return _trace_path_decorator
|
[
"noreply@github.com"
] |
home-assistant.noreply@github.com
|
3bcfb7964232ab055502d62f65a86a7408a1281d
|
d9e8b7d5c468b38cdf18cece9dff12ad1188a71b
|
/Model_Forms/Emp_registration/Employee_registration/Firstapp/admin.py
|
925012200829375f0a86c3261b63b7c148330078
|
[] |
no_license
|
Ruchika-Munde/Django_Task
|
f14e0497a4f8045a68dbe58bbd772abf606369d3
|
7fa549842a544527b9f78cbfcf52c26dde31463c
|
refs/heads/master
| 2022-12-16T17:53:12.577323
| 2020-09-09T07:34:43
| 2020-09-09T07:34:43
| 294,036,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
from django.contrib import admin
from .models import Employee
# Register your models here.
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['eid','ename','designation','salary']
admin.site.register(Employee)
|
[
"ruchamunde@gmail.com"
] |
ruchamunde@gmail.com
|
31ac22e94a6cc01be3588a743a788ebfcbbad6af
|
5df1c192f8a74e8ca28792c2325239b78c22653a
|
/CashFlows/EaganJones/admin.py
|
88d242badf9726cb54b20eeed29e4ed1017f0e9c
|
[] |
no_license
|
knkemree/10_k_cash_flows
|
8afe1e7463b9970f53624ef47660f06ea3052478
|
67b9572f3fdab799ff8d84c157f6bd28f0d7b69a
|
refs/heads/master
| 2022-12-12T16:36:42.760573
| 2020-02-17T02:30:38
| 2020-02-17T02:34:46
| 241,003,252
| 0
| 0
| null | 2022-12-08T04:26:50
| 2020-02-17T02:26:05
|
Python
|
UTF-8
|
Python
| false
| false
| 490
|
py
|
from django.contrib import admin
from EaganJones.models import Companies, UserProfile
# Register your models here.
class CompaniesAdmin(admin.ModelAdmin):
list_display = ['companyname', 'cik', 'primarysymbol','created_at' ]
list_filter = ['companyname', 'cik', 'primarysymbol']
search_fields = ('companyname', 'cik', 'primarysymbol')
list_display_links = ['primarysymbol', 'companyname', ]
admin.site.register(Companies, CompaniesAdmin)
admin.site.register(UserProfile)
|
[
"konakziyaemre@gmail.com"
] |
konakziyaemre@gmail.com
|
14914a3f9661f3552078415483c023d57ca7cde4
|
9ff696839d88998451f2cb2725a0051ef8642dc0
|
/karen_test_16760/settings.py
|
bc1feac08c06ffbf74471bbe35ebf121cd7c16f7
|
[] |
no_license
|
crowdbotics-apps/karen-test-16760
|
f67aacf4d07d10c70c4edf77a428dd8e12b4acf7
|
02492b4531be9561f1a046176918560e248764df
|
refs/heads/master
| 2023-05-19T10:52:20.454231
| 2020-05-08T17:14:02
| 2020-05-08T17:14:02
| 262,379,844
| 0
| 0
| null | 2021-06-11T07:19:46
| 2020-05-08T16:58:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,502
|
py
|
"""
Django settings for karen_test_16760 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"healthcare",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "karen_test_16760.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "karen_test_16760.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
070ac8d6e82a97d09acf77b94879182cf0be41cb
|
5b5a49643c75aa43d5a876608383bc825ae1e147
|
/python99/btree/p407.py
|
da50d0cdce94d8e84f544db6b6ac592ec01b54af
|
[] |
no_license
|
rscai/python99
|
281d00473c0dc977f58ba7511c5bcb6f38275771
|
3fa0cb7683ec8223259410fb6ea2967e3d0e6f61
|
refs/heads/master
| 2020-04-12T09:08:49.500799
| 2019-10-06T07:47:17
| 2019-10-06T07:47:17
| 162,393,238
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
from python99.btree.p406 import hbal_tree
def minNodes(h):
if h == 0:
return 0
if h == 1:
return 1
return 1 + minNodes(h-1) + minNodes(h-2)
def minHeight(n):
if n == 0:
return 0
return 1 + minHeight(n//2)
def maxHeight(n):
if n == 1:
return 1
if n == 0:
return 0
if n == 2:
return 2
if n == 3:
return 2
for hLeft in range(1, n):
nLeft = minNodes(hLeft)
hRight = maxHeight(n-1-nLeft)
if hLeft == hRight + 1 or hLeft == hRight:
return 1 + max(hLeft, hRight)
def nodes(t):
if t is None:
return 0
return 1 + nodes(t[1])+nodes(t[2])
def hbal_tree_nodes(n):
trees = []
for h in range(minHeight(n), maxHeight(n)+1):
trees = trees + hbal_tree(h)
return [tree for tree in trees if nodes(tree) == n]
|
[
"ray.s.cai@icloud.com"
] |
ray.s.cai@icloud.com
|
b950004786326937d3e02f09bde3a4eab88542c6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_daintier.py
|
797ea89f17b1b10c07d5c3e288ebdb54becd1b3f
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 224
|
py
|
#calss header
class _DAINTIER():
def __init__(self,):
self.name = "DAINTIER"
self.definitions = dainty
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['dainty']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
75ec16d22ef5a3b1135af39f7205659c506e22b6
|
59f0fde411ca668b874fa6fa6001069b9146f596
|
/src/blog/views.py
|
078fc25eea043597782a68e56aaa8c171eac5a36
|
[] |
no_license
|
nabilatajrin/django-blog-application
|
4c256755fc31b41f609b44a5329fb128d46c5fa1
|
7971f8f7d8b3b442fbd4530bc0f32dff7865adcc
|
refs/heads/master
| 2020-12-06T16:09:00.310415
| 2020-11-03T05:37:34
| 2020-11-03T05:37:34
| 232,503,248
| 0
| 0
| null | 2020-01-08T07:24:39
| 2020-01-08T07:19:38
|
Python
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from .models import BlogPost
# Create your views here.
def blog_post_detail_page(request, slug):
#obj = BlogPost.objects.get(id=post_id)
obj = get_object_or_404(BlogPost, slug=slug)
template_name = 'blog_post_detail.html'
context = {"object": obj}
return render(request, template_name, context)
|
[
"nabilatajrin@gmail.com"
] |
nabilatajrin@gmail.com
|
025a0f5190bc25975c5983890d97929e6c2e5122
|
3f763cf893b09a3be562858613c928703ff349e4
|
/client/verta/verta/_swagger/_public/modeldb/model/ModeldbUpdateExperimentName.py
|
d45dee38182c3e1e4059f61548f9737e49feaa7f
|
[
"Apache-2.0"
] |
permissive
|
VertaAI/modeldb
|
636e46fc025b01a514d599b10e228c8735503357
|
ec9ac7712500adb13fd815dfd476ce9f536c6921
|
refs/heads/main
| 2023-08-31T00:45:37.220628
| 2023-08-30T18:45:13
| 2023-08-30T18:45:13
| 71,305,435
| 844
| 142
|
Apache-2.0
| 2023-09-14T19:24:13
| 2016-10-19T01:07:26
|
Java
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbUpdateExperimentName(BaseType):
def __init__(self, id=None, name=None):
required = {
"id": False,
"name": False,
}
self.id = id
self.name = name
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('id', None)
if tmp is not None:
d['id'] = tmp
tmp = d.get('name', None)
if tmp is not None:
d['name'] = tmp
return ModeldbUpdateExperimentName(**d)
|
[
"noreply@github.com"
] |
VertaAI.noreply@github.com
|
dfc5c38afdf5ed2e69dd273efbbb072ffdb7031a
|
cb82ff3240e4367902d8169c60444a6aa019ffb6
|
/python2.7/site-packages/twisted/trial/test/test_output.py
|
54d020e3effbc0cf81825676e1a3765cc5514a4a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
icepaule/sslstrip-hsts-openwrt
|
0a8097af96beef4c48fcfa0b858367829b4dffdc
|
d5a2a18d2ac1cdb9a64fbf47235b87b9ebc24536
|
refs/heads/master
| 2020-12-31T10:22:04.618312
| 2020-02-07T18:39:08
| 2020-02-07T18:39:08
| 238,998,188
| 0
| 0
|
MIT
| 2020-02-07T18:35:59
| 2020-02-07T18:35:58
| null |
UTF-8
|
Python
| false
| false
| 5,035
|
py
|
from twisted.scripts import trial
from twisted.trial import runner
from twisted.trial.test import packages
import os, sys, StringIO
def runTrial(*args):
from twisted.trial import reporter
config = trial.Options()
config.parseOptions(args)
output = StringIO.StringIO()
myRunner = runner.TrialRunner(
reporter.VerboseTextReporter,
stream=output,
workingDirectory=config['temp-directory'])
suite = trial._getSuite(config)
result = myRunner.run(suite)
return output.getvalue()
class TestImportErrors(packages.SysPathManglingTest):
"""Actually run trial as if on the command line and check that the output
is what we expect.
"""
debug = False
parent = "_testImportErrors"
def runTrial(self, *args):
return runTrial('--temp-directory', self.mktemp(), *args)
def _print(self, stuff):
print stuff
return stuff
def failUnlessIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failUnlessIn(
containee, container, *args, **kwargs)
return container
def failIfIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failIfIn(
containee, container, *args, **kwargs)
return container
def test_trialRun(self):
self.runTrial()
def test_nonexistentModule(self):
d = self.runTrial('twisted.doesntexist')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'twisted.doesntexist')
return d
def test_nonexistentPackage(self):
d = self.runTrial('doesntexist')
self.failUnlessIn(d, 'doesntexist')
self.failUnlessIn(d, 'ValueError')
self.failUnlessIn(d, '[ERROR]')
return d
def test_nonexistentPackageWithModule(self):
d = self.runTrial('doesntexist.barney')
self.failUnlessIn(d, 'doesntexist.barney')
self.failUnlessIn(d, 'ValueError')
self.failUnlessIn(d, '[ERROR]')
return d
def test_badpackage(self):
d = self.runTrial('badpackage')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'badpackage')
self.failIfIn(d, 'IOError')
return d
def test_moduleInBadpackage(self):
d = self.runTrial('badpackage.test_module')
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "badpackage.test_module")
self.failIfIn(d, 'IOError')
return d
def test_badmodule(self):
d = self.runTrial('package.test_bad_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_bad_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_badimport(self):
d = self.runTrial('package.test_import_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_import_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_recurseImport(self):
d = self.runTrial('package')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'test_bad_module')
self.failUnlessIn(d, 'test_import_module')
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_recurseImportErrors(self):
d = self.runTrial('package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package2')
self.failUnlessIn(d, 'test_module')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_nonRecurseImportErrors(self):
d = self.runTrial('-N', 'package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
return d
def test_regularRun(self):
d = self.runTrial('package.test_module')
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_filename(self):
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent, 'package', 'test_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_dosFile(self):
## XXX -- not really an output test, more of a script test
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent,
'package', 'test_dos_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
|
[
"adde88@gmail.com"
] |
adde88@gmail.com
|
8215bc9fad745b0ac2dd15c8624629671f8bf65b
|
280f650e91c675f471121b8a4a13c2bb5a0a5e6c
|
/apps/accounts/api/urls.py
|
64f242fd26e5d7c4bcebeedcb6344c01e688b9d3
|
[] |
no_license
|
navill/2-1_Project_repo
|
a8e089c657e44034152df30a85220675f2c31084
|
3f62bca9f52799d9f877f2d01259bb51038c0cc4
|
refs/heads/master
| 2022-12-31T18:32:45.471261
| 2020-10-26T10:54:39
| 2020-10-26T10:54:39
| 303,907,042
| 0
| 0
| null | 2020-10-26T10:54:40
| 2020-10-14T04:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,127
|
py
|
from django.urls import path
from accounts.api.views import *
app_name = 'accounts_api'
urlpatterns = [
path('normal/list/', NormalUserListView.as_view(), name='list_normal'),
path('normal/create/', NormalUserCreateView.as_view(), name='create_normal'),
path('normal/detail/<int:pk>', NormalUserRetrieveUpdateView.as_view(), name='detail_normal'),
path('normal/delete/<int:pk>', NormalUserDestroyView.as_view(), name='delete_normal'),
path('staff/list/', StaffUserListView.as_view(), name='list_staff'),
path('staff/create/', StaffUserCreateView.as_view(), name='create_staff'),
path('staff/detail/<int:pk>', StaffUserRetrieveUpdateView.as_view(), name='detail_staff'),
path('staff/delete/<int:pk>', StaffUserDestroyView.as_view(), name='delete_staff'),
path('admin/list/', AdminUserListView.as_view(), name='list_admin'),
path('admin/create/', AdminUserCreateView.as_view(), name='create_admin'),
path('admin/detail/<int:pk>', AdminUserRetrieveUpdateView.as_view(), name='detail_admin'),
path('admin/delete/<int:pk>', AdminUserDestroyView.as_view(), name='delete_admin'),
]
|
[
"blue_jihoon@naver.com"
] |
blue_jihoon@naver.com
|
d7a58b949717c911b6ddcaa34b4330c57ceafe7c
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Link/Link Checker 9.3/third_party/dnspython/dns/rdtypes/ANY/SOA.py
|
b971c0c12b6b4456101ae18551f46c6025bd705f
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:254fa89eeee239b6fca6b963a4fb01a1a04b6b3c100590811f64124951b90bf5
size 5163
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
6082c431e80504897d1bb223171e7ad60d340077
|
11812a0cc7b818292e601ecdd4aa4c4e03d131c5
|
/100days_of_python/day8/exercise_2_Prime_Number.py
|
54bb65b8d27fd94adc31ace5dc872a9dd2c434f8
|
[] |
no_license
|
SunshineFaxixi/Python_Learning
|
f1e55adcfa898489cc9146ccfb220f0b48a31a22
|
ab3ca44d013311b6de02124091acc4c36a83c4d9
|
refs/heads/master
| 2021-08-16T05:47:29.963118
| 2021-01-04T13:48:30
| 2021-01-04T13:48:30
| 238,857,341
| 1
| 0
| null | 2020-03-03T13:53:08
| 2020-02-07T06:21:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 510
|
py
|
def prime_checker(number):
for i in range(2, number):
if number % i == 0:
print("It's a prime number")
break
else:
print("It is not a prime number")
def prime_checker_1(number):
is_prime = True
for i in range(2, number):
if number % i == 0:
is_prime = False
if is_prime:
print("It's a prime number")
else:
print("It is not a prime number")
n = int(input("Check this number: "))
prime_checker_1(number = n)
|
[
"xxhan2018@163.com"
] |
xxhan2018@163.com
|
f209d11a13a1fb8de3897a181d5a30c1491e566a
|
a214e706c875e0af7221c0c9ae193d9d93ee20a7
|
/reap_admixture_pedmap.py
|
f737dff6b82939a2075e48ee932aed835b588014
|
[] |
no_license
|
inambioinfo/bioinformatics_scripts
|
fa2292e91ad4134204a09ace27c8a91ae70fa34c
|
3a23611f382b7f3dd60e5e2abe841b84408c0d44
|
refs/heads/master
| 2020-03-20T21:17:10.163061
| 2017-03-28T23:41:39
| 2017-03-28T23:41:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,456
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import os
__author__ = "Raony Guimarães"
__copyright__ = "Copyright 2012, Filter Analysis"
__credits__ = ["Raony Guimarães"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Raony Guimarães"
__email__ = "raonyguimaraes@gmail.com"
__status__ = "Production"
#run example
#python gatk.py -i alignment/exome.sorted.bam
parser = OptionParser()
parser.add_option("-p", dest="ped",
help="PED File", metavar="pedfile")
parser.add_option("-m", dest="map",
help="MAP File", metavar="mapfile")
(options, args) = parser.parse_args()
pedfilename = ".".join(options.ped.split("/")[-1].split(".")[:-1])
mapfile = ".".join(options.map.split("/")[-1].split(".")[:-1])
plink_dir = '/projects/relatedness/plink-1.07-x86_64'
print 'merge with ped map from 1000genomes'
command = 'python /lgc/scripts/merge_pedmap.py -p %s -q /projects/1000genomes/integrated_call_sets/1092exomes/1092exomes_sureselect.ped -o %s_merged_ceu_yri_amr' % (pedfilename, pedfilename)
os.system(command)
print 'recode12'
filename2 = pedfilename+'_merged_ceu_yri_amr_12'
command = "/projects/relatedness/plink-1.07-x86_64/plink --file %s_merged_ceu_yri_amr --recode12 --output-missing-genotype 0 --missing --noweb --out %s" % (pedfilename, filename2)
os.system(command)
print 'tped'
command = '/projects/relatedness/plink-1.07-x86_64/plink --file %s --recode12 --output-missing-genotype 0 --transpose --out %s --noweb' % (filename2, filename2)
os.system(command)
# print command
print 'running admixture'
command = '/lgc/programs/admixture_linux-1.22/admixture %s.ped 3' % (filename2)
os.system(command)
#prep files for REAP
command = '''cut -d ' ' -f1,2 %s_merged_ceu_yri_amr.ped > myid.txt''' % (pedfilename)
os.system(command)
command = 'paste myid.txt %s.3.Q > admixturedata.proportions' % (filename2)
os.system(command)
command = '''sed 's/ /\t/g' admixturedata.proportions > adm.prop'''
os.system(command)
command = '/lgc/programs/reap/REAP/REAP -g %s.tped -p %s.tfam -a adm.prop -f %s.3.P -r 1 -k 3 -t -100' % (filename2, filename2,filename2)
os.system(command)
command = 'cat REAP_pairs_relatedness.txt | sort -r -n -k 9 > REAP_pairs_relatedness.ordered.txt'
os.system(command)
# command = "./REAP -g %s.tped -p %s.tfam -a trio1_bra_phasing_result.txt -f allelefreqle -k 2 -t 0.025 -r 2 -m" % (filename, filename)
# os.system(command)
|
[
"raonyguimaraes@gmail.com"
] |
raonyguimaraes@gmail.com
|
517b30739b34bdf437be7b9f435c55989c8c782b
|
35fe9e62ab96038705c3bd09147f17ca1225a84e
|
/a10_ansible/library/a10_scaleout_cluster_service_config_template.py
|
61e0116126c6db774e3bae91357a58a630fe31d9
|
[] |
no_license
|
bmeidell/a10-ansible
|
6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668
|
25fdde8d83946dadf1d5b9cebd28bc49b75be94d
|
refs/heads/master
| 2020-03-19T08:40:57.863038
| 2018-03-27T18:25:40
| 2018-03-27T18:25:40
| 136,226,910
| 0
| 0
| null | 2018-06-05T19:45:36
| 2018-06-05T19:45:36
| null |
UTF-8
|
Python
| false
| false
| 5,958
|
py
|
#!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_template
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
name:
description:
- Scaleout template Name
bucket-count:
description:
- Number of traffic buckets
device-group:
description:
- Device group id
uuid:
description:
- uuid of the object
user-tag:
description:
- Customized tag
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"bucket_count","device_group","name","user_tag","uuid",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
bucket_count=dict(
type='str'
),
device_group=dict(
type='str'
),
name=dict(
type='str' , required=True
),
user_tag=dict(
type='str'
),
uuid=dict(
type='str'
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/scaleout/cluster/{cluster-id}/service-config/template/{name}"
f_dict = {}
f_dict["name"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/scaleout/cluster/{cluster-id}/service-config/template/{name}"
f_dict = {}
f_dict["name"] = module.params["name"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("template", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("template", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
[
"mdurrant@a10networks.com"
] |
mdurrant@a10networks.com
|
783a054e5bea735eb04b33c1d0b8064fa8afc87f
|
c65fb9bb01125789ccbbbb2252c3d72a68f32846
|
/Addition/PythonPlotter/DracoBip/plot_config.py
|
0e81ddbcb7e36db7e32ee4a592f9846fd7017ec8
|
[
"MIT"
] |
permissive
|
PandoraThanator/PnC
|
64cf17cfe92ce41bcc790db139bda190e1fe18a3
|
b123747671a307822d94400730233722f21a9328
|
refs/heads/master
| 2023-05-03T21:29:25.318698
| 2021-05-25T06:22:01
| 2021-05-25T06:22:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
## -----------------------------------------------------------------------------
## Read Data
## -----------------------------------------------------------------------------
file_path = os.getcwd() + "/../../../ExperimentDataCheck/"
t = np.genfromtxt(file_path+'running_time.txt', delimiter='\n', dtype=(float))
st_idx = 5
end_idx = len(t) - 10
t = t[st_idx:end_idx]
config = np.genfromtxt(file_path+'config.txt', delimiter=None, dtype=(float))[st_idx:end_idx]
qdot = np.genfromtxt(file_path+'qdot.txt', delimiter=None, dtype=(float))[st_idx:end_idx]
data_phse = np.genfromtxt(file_path+'phase.txt', delimiter=None, dtype=(float))[st_idx:end_idx]
phseChange = []
for i in range(0,len(t)-1):
if data_phse[i] != data_phse[i+1]:
phseChange.append(i)
else:
pass
## -----------------------------------------------------------------------------
## Plot Cmd
## -----------------------------------------------------------------------------
def plot_phase(ax):
for j in phseChange:
ax.axvline(x=t[j],color='indigo',linestyle='-')
ax.text(t[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')
fig, axes = plt.subplots(6, 2)
for i in range(6):
axes[i,0].plot(t, config[:,i], color='k', linewidth=3)
axes[i,0].grid(True)
plot_phase(axes[i,0])
axes[i,1].plot(t, qdot[:,i], color='k', linewidth=3)
axes[i,1].grid(True)
plot_phase(axes[i,1])
axes[0,0].set_title("q0 ~ q5")
axes[0,1].set_title("qdot0 ~ qdot5")
fig, axes = plt.subplots(5, 2)
for i in range(5):
axes[i,0].plot(t, config[:,i+6], color='k', linewidth=3)
axes[i,0].grid(True)
plot_phase(axes[i,0])
axes[i,1].plot(t, config[:,i+11], color='k', linewidth=3)
axes[i,1].grid(True)
plot_phase(axes[i,1])
axes[0,0].set_title("q6 ~ q10")
axes[0,1].set_title("q11 ~ q15")
fig, axes = plt.subplots(5, 2)
for i in range(5):
axes[i,0].plot(t, qdot[:,i+6], color='k', linewidth=3)
axes[i,0].grid(True)
plot_phase(axes[i,0])
axes[i,1].plot(t, qdot[:,i+11], color='k', linewidth=3)
axes[i,1].grid(True)
plot_phase(axes[i,1])
axes[0,0].set_title("qdot6 ~ qdot10")
axes[0,1].set_title("qdot11 ~ qdot15")
plt.show()
|
[
"junhyeokahn91@gmail.com"
] |
junhyeokahn91@gmail.com
|
d0f0a9fb82a86f4b101d297ef9d21ca60a54a2d0
|
0e8d49afd0e35510d8fa6901cf216896604240d8
|
/lib/pyfrc/mains/cli_coverage.py
|
d5026f5eb96a193efd1f13233ac13c79759a6f2a
|
[
"MIT"
] |
permissive
|
ThunderDogs5613/pyfrc
|
3878a3d887d7adcb957128333ee71fc874c56f2b
|
d8e76a9284690f71ea7fab7d2aa9022cb6eec27d
|
refs/heads/master
| 2021-08-29T14:21:13.124227
| 2017-12-04T05:46:40
| 2017-12-04T05:46:40
| 114,410,477
| 1
| 0
| null | 2017-12-15T20:55:31
| 2017-12-15T20:55:30
| null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
import argparse
import inspect
from os.path import dirname
import subprocess
import sys
class PyFrcCoverage:
"""
Wraps other commands by running them via the coverage module. Requires
the coverage module to be installed.
"""
def __init__(self, parser):
parser.add_argument('args', nargs=argparse.REMAINDER,
help='Arguments to pass to robot.py')
def run(self, options, robot_class, **static_options):
try:
import coverage
except ImportError:
print("Error importing coverage module for code coverage testing, did you install it?\n" +
"You can download it at https://pypi.python.org/pypi/coverage\n", file=sys.stderr)
return 1
if len(options.args) == 0:
print("ERROR: Coverage command requires arguments to run other commands")
return 1
file_location = inspect.getfile(robot_class)
option_args = list(options.args)
if option_args[0] == 'test':
option_args.insert(1, '--coverage-mode')
# construct the arguments to run coverage
args = [sys.executable, '-m', 'coverage',
'run', '--source', dirname(file_location),
file_location] + list(options.args)
retval = subprocess.call(args)
if retval != 0:
return retval
args = [sys.executable, '-m', 'coverage',
'report', '-m']
return subprocess.call(args)
|
[
"dustin@virtualroadside.com"
] |
dustin@virtualroadside.com
|
2833dbab283205ed517a25347f6766087148e5cf
|
d565991b30a72837cd6f335c3ea9802a9b472467
|
/Acoustic_Signal/test/SamplingRateConversion.py
|
7dd39c8fabb129e810b88832a84ee768809b72c0
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
philip-shen/note_python
|
c8ebab9731a2f8c40a2ab1ad4f0ca0cf4ab24f59
|
ee6940486c557f9be2e6b967b28656e30c3598dd
|
refs/heads/master
| 2023-08-09T12:05:14.974944
| 2023-08-06T03:16:07
| 2023-08-06T03:16:07
| 175,354,005
| 0
| 0
|
MIT
| 2023-02-16T06:47:10
| 2019-03-13T05:42:46
|
Tcl
|
UTF-8
|
Python
| false
| false
| 5,721
|
py
|
# -*- coding:utf-8 -*-
import numpy as np
import scipy.signal
import wave
import array
import struct
from scipy.io import wavfile
def wav_read(file_path, mmap=False):
"""
return sample value between range(-1,1)
Note" librosa.load use aioread, which may truncate the precision of the audio data to 16 bits.
:param file_path:
:param mmap: False read all data directly, True read data memory mapping
:return: samples ,fs
"""
fs, samples = wavfile.read(file_path, mmap=mmap)
# transfer samples from fixed to float
if samples.dtype == np.int16:
samples = np.array(samples, dtype=np.float32)
samples /= 2 ** 15
elif samples.dtype == np.float32:
samples = np.array(samples)
else:
raise NotImplementedError
return samples, fs
def wav_write(file_path, samples, fs, wav_type='int16'):
# scipy.io.wavfile.write cannot process np.float16 data
if wav_type == 'float32':
wavfile.write(file_path, fs, samples.astype(np.float32))
elif wav_type == 'int16':
output_samples = samples * (2 ** 15)
wav_type_iinfo = np.iinfo(wav_type)
output_samples.clip(min=wav_type_iinfo.min, max=wav_type_iinfo.max,
out=output_samples)
output_samples = output_samples.astype(wav_type)
wavfile.write(file_path, fs, output_samples)
else:
raise NotImplementedError
def readWav(filename):
"""
wavファイルを読み込んで,データ・サンプリングレートを返す関数
"""
try:
wf = wave.open(filename)
fs = wf.getframerate()
# -1 ~ 1までに正規化した信号データを読み込む
data = np.frombuffer(wf.readframes(wf.getnframes()),dtype="int16")/32768.0
return (data,fs)
except Exception as e:
print(e)
exit()
def writeWav(filename,data,fs):
"""
入力されたファイル名でwavファイルを書き出す.
"""
# データを-32768から32767の整数値に変換
data = [int(x * 32767.0) for x in data]
#バイナリ化
binwave = struct.pack("h" * len(data), *data)
wf = wave.Wave_write(filename)
wf.setparams((
1, # channel
2, # byte width
fs, # sampling rate
len(data), # number of frames
"NONE", "not compressed" # no compression
))
wf.writeframes(binwave)
wf.close()
def upsampling(conversion_rate,data,fs):
"""
アップサンプリングを行う.
入力として,変換レートとデータとサンプリング周波数.
アップサンプリング後のデータとサンプリング周波数を返す.
"""
# 補間するサンプル数を決める
interpolationSampleNum = conversion_rate-1
# FIRフィルタの用意をする
nyqF = (fs*conversion_rate)/2.0 # 変換後のナイキスト周波数
cF = (fs/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定)
taps = 511 # フィルタ係数(奇数じゃないとだめ)
b = scipy.signal.firwin(taps, cF) # LPFを用意
# 補間処理
upData = []
for d in data:
upData.append(d)
# 1サンプルの後に,interpolationSampleNum分だけ0を追加する
for i in range(interpolationSampleNum):
upData.append(0.0)
# フィルタリング
resultData = scipy.signal.lfilter(b,1,upData)
return (resultData,fs*conversion_rate)
def downsampling(conversion_rate,data,fs):
"""
ダウンサンプリングを行う.
入力として,変換レートとデータとサンプリング周波数.
アップサンプリング後のデータとサンプリング周波数を返す.
"""
# 間引くサンプル数を決める
decimationSampleNum = conversion_rate-1
# FIRフィルタの用意をする
nyqF = (fs/conversion_rate)/2.0 # 変換後のナイキスト周波数
cF = (fs/conversion_rate/2.0-500.)/nyqF # カットオフ周波数を設定(変換前のナイキスト周波数より少し下を設定)
taps = 511 # フィルタ係数(奇数じゃないとだめ)
b = scipy.signal.firwin(taps, cF) # LPFを用意
#フィルタリング
data = scipy.signal.lfilter(b,1,data)
#間引き処理
downData = []
for i in range(0,len(data),decimationSampleNum+1):
downData.append(data[i])
return (downData,fs/conversion_rate)
FILENAME = "D:/project/FeqResp/Asus/asus_S54C_0807_Igo_Speech_FR_BandG/dut.wav"
#FILENAME = "../src_wav/3Quest_Standmic.wav"
if __name__ == "__main__":
# 何倍にするかを決めておく
up_conversion_rate = 2
# 何分の1にするか決めておく.ここではその逆数を指定しておく(例:1/2なら2と指定)
down_conversion_rate = 2
down_conversion_wave = "D:/project/FeqResp/Asus/asus_S54C_0807_Igo_Speech_FR_BandG/dut_16k.wav"
# テストwavファイルを読み込む
#data,fs = readWav(FILENAME)
data,fs = wav_read(FILENAME)
print('fs {}',fs)
upData,upFs = upsampling(up_conversion_rate,data,fs)
downData,downFs = downsampling(down_conversion_rate,data,fs)
#writeWav("../src_wav/up.wav",upData,upFs)
writeWav(down_conversion_wave,downData,downFs)
#wav_write("../src_wav/up.wav",upFs,upData)
#wav_write("../src_wav/down.wav",downFs,downData)
|
[
"amyfanpti@gmail.com"
] |
amyfanpti@gmail.com
|
1581702e30bf17521341ad3c8b02a00a78d7d427
|
86948735307c603936f22fc029349bf669ecaa6e
|
/ble-sensor-pi/sensortag/echoserverzal.py
|
f219034d6d6edca7ddc785107e3321c4558c534d
|
[
"Apache-2.0"
] |
permissive
|
Kowo39/pythonPeter
|
2b43fb1fc8ef16700be53e912b04b53117f745d5
|
629550a160445760cb95f6e9b58df5264e24a9e1
|
refs/heads/master
| 2021-01-10T20:26:41.463098
| 2015-07-21T15:38:58
| 2015-07-21T15:38:58
| 39,141,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,116
|
py
|
import socket
import sys
import time
import datetime
#create a TCP/IP Socket
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#bind the socket to the port
server_address = ('localhost', 10000)
print >>sys.stderr, 'starting up on %s port %s' % server_address
sock.bind(server_address)
#listen incomming connection
sock.listen(1)
while True:
#wait for a connection
print >>sys.stderr, '\n\n\n\t\t*** A Server waiting for a connection ***'
connection, client_address = sock.accept()
ts = time.time()
try:
print >>sys.stderr, '\n - connection from', client_address
print " -",datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
#Recieve data in small chunks and retrasmi it
while True:
data = connection.recv(30)
if data:
pass
print >>sys.stderr, ' - Data received from tag"%s"' %data
# print >>sys.stderr, 'sendinng data back to the client'
# connection.sendall(data)
else:
#print >>sys.stderr, '\nno more data my friend from', client_address
break
finally:
pass #Clean up the conection
#connection.close()
|
[
"root@raspberrypi.(none)"
] |
root@raspberrypi.(none)
|
56ed3798bc39ceb83d99ccb445df785a9b2636ee
|
5785d7ed431b024dd910b642f10a6781df50e4aa
|
/.venv/lib/python3.8/site-packages/aws_okta_processor/commands/getroles.py
|
784273d95ce8b03239ea7970f701411f6994ee96
|
[] |
no_license
|
kashyapa/interview-prep
|
45d77324446da34d99bf8efedb3544b367b5523e
|
7060c090c40602fb9c4778eace2078e1b51e235b
|
refs/heads/master
| 2023-07-28T13:12:49.515299
| 2021-09-06T14:33:25
| 2021-09-06T14:33:25
| 403,706,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,102
|
py
|
"""
Usage: aws-okta-processor get-roles [options]
Options:
-h --help Show this screen.
--version Show version.
--no-okta-cache Do not read okta cache.
--no-aws-cache Do not read aws cache.
-e --environment Dump auth into ENV variables.
-u <user_name>, --user=<user_name> Okta user name.
-p <user_pass>, --pass=<user_pass> Okta user password.
-o <okta_organization>, --organization=<okta_organization> Okta organization domain.
-a <okta_application>, --application=<okta_application> Okta application url.
-r <role_name>, --role=<role_name> AWS role ARN.
-R <region_name>, --region=<region_name> AWS region name.
-A <account>, --account-alias=<account> AWS account alias filter (uses wildcards).
-d <duration_seconds> ,--duration=<duration_seconds> Duration of role session [default: 3600].
-k <key>, --key=<key> Key used for generating and accessing cache.
-f <factor>, --factor=<factor> Factor type for MFA.
-s --silent Run silently.
--target-shell <target_shell> Target shell to output the export command.
--output=<output> Output type (json, text, profiles) [default: json]
--output-format=<format> Format string for the output
[default: {account},{role}]
"""
from __future__ import print_function
import os
import json
import re
import sys
from .base import Base
from aws_okta_processor.core.fetcher import SAMLFetcher
from botocore.credentials import JSONFileCache
UNIX_EXPORT_STRING = ("export AWS_ACCESS_KEY_ID='{}' && "
"export AWS_SECRET_ACCESS_KEY='{}' && "
"export AWS_SESSION_TOKEN='{}'")
NT_EXPORT_STRING = ("$env:AWS_ACCESS_KEY_ID='{}'; "
"$env:AWS_SECRET_ACCESS_KEY='{}'; "
"$env:AWS_SESSION_TOKEN='{}'")
CONFIG_MAP = {
"--environment": "AWS_OKTA_ENVIRONMENT",
"--user": "AWS_OKTA_USER",
"--pass": "AWS_OKTA_PASS",
"--organization": "AWS_OKTA_ORGANIZATION",
"--application": "AWS_OKTA_APPLICATION",
"--role": "AWS_OKTA_ROLE",
"--duration": "AWS_OKTA_DURATION",
"--key": "AWS_OKTA_KEY",
"--factor": "AWS_OKTA_FACTOR",
"--silent": "AWS_OKTA_SILENT",
"--no-okta-cache": "AWS_OKTA_NO_OKTA_CACHE",
"--no-aws-cache": "AWS_OKTA_NO_AWS_CACHE",
"--output": "AWS_OKTA_OUTPUT",
"--output-format": "AWS_OKTA_OUTPUT_FORMAT"
}
class GetRoles(Base):
def get_accounts_and_roles(self):
cache = JSONFileCache()
saml_fetcher = SAMLFetcher(
self,
cache=cache
)
app_and_role = saml_fetcher.get_app_roles()
result_accounts = []
results = {
"application_url": app_and_role["Application"],
"accounts": result_accounts,
"user": app_and_role["User"],
"organization": app_and_role["Organization"],
}
accounts = app_and_role["Accounts"]
for name_raw in accounts:
account_parts = re.match(r"(Account:) ([a-zA-Z0-9-_]+) \(([0-9]+)\)", name_raw)
account = account_parts[2]
account_id = account_parts[3]
roles = accounts[name_raw]
result_roles = []
result_account = {
"name": account,
"id": account_id,
"name_raw": name_raw,
"roles": result_roles
}
result_accounts.append(result_account)
for role in roles:
role_suffix = role.split(os.environ.get("AWS_OKTA_ROLE_SUFFIX_DELIMITER", "-"))[-1]
result_roles.append({
"name": role,
"suffix": role_suffix
})
return results
def run(self):
accounts_and_roles = self.get_accounts_and_roles()
output = self.configuration.get("AWS_OKTA_OUTPUT", "json").lower()
if output == "json":
sys.stdout.write(json.dumps(accounts_and_roles))
else:
output_format = self.configuration.get("AWS_OKTA_OUTPUT_FORMAT", "{account},{role}")
if output == "profiles":
output_format = '\n[{account}-{role_suffix}]\ncredential_process=aws-okta-processor authenticate ' \
'--organization="{organization}" --user="{user}" --application="{application_url}" ' \
'--role="{role}" --key="{account}-{role}"'
formatted_roles = self.get_formatted_roles(accounts_and_roles, output_format)
for role in formatted_roles:
sys.stdout.write(role + "\n")
def get_formatted_roles(self, accounts_and_roles, output_format):
application_url = accounts_and_roles["application_url"]
accounts = accounts_and_roles["accounts"]
organization = accounts_and_roles["organization"]
user = accounts_and_roles["user"]
for account in accounts:
account_name = account["name"]
account_id = account["id"]
account_raw = account["name_raw"]
roles = account["roles"]
for role in roles:
yield output_format.format(
account=account_name,
account_id=account_id,
account_raw=account_raw,
role=role["name"],
organization=organization,
application_url=application_url,
user=user,
role_suffix=role["suffix"].lower()
)
def get_pass(self):
if self.configuration["AWS_OKTA_PASS"]:
return self.configuration["AWS_OKTA_PASS"]
def get_key_dict(self):
return {
"Organization": self.configuration["AWS_OKTA_ORGANIZATION"],
"User": self.configuration["AWS_OKTA_USER"],
"Key": self.configuration["AWS_OKTA_KEY"]
}
def get_configuration(self, options=None):
configuration = {}
for param, var in CONFIG_MAP.items():
if options.get(param, None):
configuration[var] = options[param]
if var not in configuration.keys():
if var in os.environ:
configuration[var] = os.environ[var]
else:
configuration[var] = None
return configuration
|
[
"schandra2@godaddy.com"
] |
schandra2@godaddy.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.