hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
58bb3bdee68016c8f1865176bbbb0531b4055727
| 855
|
py
|
Python
|
lintcode/1375.2.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | 1
|
2021-01-08T06:57:49.000Z
|
2021-01-08T06:57:49.000Z
|
lintcode/1375.2.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | null | null | null |
lintcode/1375.2.py
|
jianershi/algorithm
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
[
"MIT"
] | 1
|
2021-01-08T06:57:52.000Z
|
2021-01-08T06:57:52.000Z
|
"""
1375. Substring With At Least K Distinct Characters
"""
class Solution:
"""
@param s: a string
@param k: an integer
@return: the number of substrings there are that contain at least k distinct characters
"""
def kDistinctCharacters(self, s, k):
# Write your code here
n = len(s)
left = 0
count = [0] * 256
distinct_count = 0
substring_count = 0
for right in range(n):
count[ord(s[right])] += 1
if count[ord(s[right])] == 1:
distinct_count += 1
while left <= right and distinct_count >= k:
substring_count += n - right
count[ord(s[left])] -= 1
if count[ord(s[left])] == 0:
distinct_count -= 1
left += 1
return substring_count
| 28.5
| 91
| 0.512281
| 793
| 0.927485
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.271345
|
58bb7d04b96141208c9caee423f5f2553e1e7354
| 390
|
py
|
Python
|
ex29_half.py
|
youknowone/learn-python3-thw-code-ko
|
3b7fccaf3eed7427e437004cfe3c4908823f5e41
|
[
"MIT"
] | null | null | null |
ex29_half.py
|
youknowone/learn-python3-thw-code-ko
|
3b7fccaf3eed7427e437004cfe3c4908823f5e41
|
[
"MIT"
] | null | null | null |
ex29_half.py
|
youknowone/learn-python3-thw-code-ko
|
3b7fccaf3eed7427e437004cfe3c4908823f5e41
|
[
"MIT"
] | null | null | null |
people = 20
cats = 30
dogs = 15
if people < cats:
print("๊ณ ์์ด๊ฐ ๋๋ฌด ๋ง์์! ์ธ์์ ๋ฉธ๋งํฉ๋๋ค!")
if people > cats:
print("๊ณ ์์ด๊ฐ ๋ง์ง ์์์! ์ธ์์ ์ง์๋ฉ๋๋ค!")
if people < dogs:
print("์ธ์์ ์นจ์ ์ ์ต๋๋ค!")
if people > dogs:
print("์ธ์์ ๋ง๋์ต๋๋ค!")
dogs += 5
if people >= dogs:
print("์ฌ๋์ ๊ฐ๋ณด๋ค ๋ง๊ฑฐ๋ ๊ฐ์ต๋๋ค")
if people <= dogs:
print("์ฌ๋์ ๊ฐ๋ณด๋ค ์ ๊ฑฐ๋ ๊ฐ์ต๋๋ค.")
if people == dogs:
print("์ฌ๋์ ๊ฐ์
๋๋ค.")
| 13
| 36
| 0.584615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 292
| 0.523297
|
58bbe52ab96a55b367459bffd53e878ab429b0e4
| 1,019
|
py
|
Python
|
env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py
|
Raniac/NEURO-LEARN
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
[
"Apache-2.0"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py
|
Raniac/neurolearn_dev
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
[
"Apache-2.0"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
env/lib/python3.6/site-packages/traits/util/tests/test_import_symbol.py
|
Raniac/NEURO-LEARN
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
[
"Apache-2.0"
] | 1
|
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
""" Tests for the import manager. """
from traits.util.api import import_symbol
from traits.testing.unittest_tools import unittest
class TestImportSymbol(unittest.TestCase):
""" Tests for the import manager. """
def test_import_dotted_symbol(self):
""" import dotted symbol """
import tarfile
symbol = import_symbol("tarfile.TarFile")
self.assertEqual(symbol, tarfile.TarFile)
return
def test_import_nested_symbol(self):
""" import nested symbol """
import tarfile
symbol = import_symbol("tarfile:TarFile.open")
self.assertEqual(symbol, tarfile.TarFile.open)
return
def test_import_dotted_module(self):
""" import dotted module """
symbol = import_symbol("traits.util.import_symbol:import_symbol")
self.assertEqual(symbol, import_symbol)
return
if __name__ == "__main__":
unittest.main()
#### EOF ######################################################################
| 22.644444
| 79
| 0.614328
| 753
| 0.73896
| 0
| 0
| 0
| 0
| 0
| 0
| 327
| 0.320903
|
58bc5c4ebc0423782cb9f4ff3dd73ea7e914bd8c
| 7,692
|
py
|
Python
|
cubecode/ไบ้ถๆฎต็ฎๆณๅ้/python็/RubiksCube-TwophaseSolver-master/client_gui.py
|
YuYuCong/Color-recognition-of-Rubik-s-Cube
|
35d5af5383ed56d38e596983aaeda98540fdb646
|
[
"CC0-1.0"
] | 11
|
2018-07-28T03:20:26.000Z
|
2022-02-18T07:36:35.000Z
|
cubecode/ไบ้ถๆฎต็ฎๆณๅ้/python็/RubiksCube-TwophaseSolver-master/client_gui.py
|
technicianliu/Color-recognition-of-Rubik-s-Cube
|
35d5af5383ed56d38e596983aaeda98540fdb646
|
[
"CC0-1.0"
] | null | null | null |
cubecode/ไบ้ถๆฎต็ฎๆณๅ้/python็/RubiksCube-TwophaseSolver-master/client_gui.py
|
technicianliu/Color-recognition-of-Rubik-s-Cube
|
35d5af5383ed56d38e596983aaeda98540fdb646
|
[
"CC0-1.0"
] | 9
|
2018-07-28T03:20:29.000Z
|
2021-05-09T05:54:30.000Z
|
# ################ A simple graphical interface which communicates with the server #####################################
from tkinter import *
import socket
import face
import cubie
# ################################## some global variables and constants ###############################################
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = '8080'
width = 60 # width of a facelet in pixels
facelet_id = [[[0 for col in range(3)] for row in range(3)] for face in range(6)]
colorpick_id = [0 for i in range(6)]
curcol = None
t = ("U", "R", "F", "D", "L", "B")
cols = ("yellow", "green", "red", "white", "blue", "orange")
########################################################################################################################
# ################################################ Diverse functions ###################################################
def show_text(txt):
"""Displays messages."""
print(txt)
display.insert(INSERT, txt)
root.update_idletasks()
def create_facelet_rects(a):
"""Initializes the facelet grid on the canvas."""
offset = ((1, 0), (2, 1), (1, 1), (1, 2), (0, 1), (3, 1))
for f in range(6):
for row in range(3):
y = 10 + offset[f][1] * 3 * a + row * a
for col in range(3):
x = 10 + offset[f][0] * 3 * a + col * a
facelet_id[f][row][col] = canvas.create_rectangle(x, y, x + a, y + a, fill="grey")
if row == 1 and col == 1:
canvas.create_text(x + width // 2, y + width // 2, font=("", 14), text=t[f], state=DISABLED)
for f in range(6):
canvas.itemconfig(facelet_id[f][1][1], fill=cols[f])
def create_colorpick_rects(a):
"""Initializes the "paintbox" on the canvas"""
global curcol
global cols
for i in range(6):
x = (i % 3)*(a+5) + 7*a
y = (i // 3)*(a+5) + 7*a
colorpick_id[i] = canvas.create_rectangle(x, y, x + a, y + a, fill=cols[i])
canvas.itemconfig(colorpick_id[0], width=4)
curcol = cols[0]
def get_definition_string():
"""Generates the cube definition string from the facelet colors."""
color_to_facelet = {}
for i in range(6):
color_to_facelet.update({canvas.itemcget(facelet_id[i][1][1], "fill"): t[i]})
s = ''
for f in range(6):
for row in range(3):
for col in range(3):
s += color_to_facelet[canvas.itemcget(facelet_id[f][row][col], "fill")]
return s
########################################################################################################################
# ############################### Solve the displayed cube with a local or remote server ###############################
def solve():
"""Connects to the server and returns the solving maneuver."""
display.delete(1.0, END) # clear output window
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
show_text('Failed to create socket')
return
# host = 'f9f0b2jt6zmzyo6b.myfritz.net' # my RaspberryPi, if online
host = txt_host.get(1.0, END).rstrip() # default is localhost
port = int(txt_port.get(1.0, END)) # default is port 8080
try:
remote_ip = socket.gethostbyname(host)
except socket.gaierror:
show_text('Hostname could not be resolved.')
return
try:
s.connect((remote_ip, port))
except:
show_text('Cannot connect to server!')
return
show_text('Connected with ' + remote_ip + '\n')
try:
defstr = get_definition_string()+'\n'
except:
show_text('Invalid facelet configuration.\nWrong or missing colors.')
return
show_text(defstr)
try:
s.sendall((defstr+'\n').encode())
except:
show_text('Cannot send cube configuration to server.')
return
show_text(s.recv(2048).decode())
########################################################################################################################
# ################################# Functions to change the facelet colors #############################################
def clean():
"""Restores the cube to a clean cube."""
for f in range(6):
for row in range(3):
for col in range(3):
canvas.itemconfig(facelet_id[f][row][col], fill=canvas.itemcget(facelet_id[f][1][1], "fill"))
def empty():
"""Removes the facelet colors except the center facelets colors."""
for f in range(6):
for row in range(3):
for col in range(3):
if row != 1 or col != 1:
canvas.itemconfig(facelet_id[f][row][col], fill="grey")
def random():
"""Generates a random cube and sets the corresponding facelet colors."""
cc = cubie.CubieCube()
cc.randomize()
fc = cc.to_facelet_cube()
idx = 0
for f in range(6):
for row in range(3):
for col in range(3):
canvas.itemconfig(facelet_id[f][row][col], fill=cols[fc.f[idx]] )
idx += 1
########################################################################################################################
# ################################### Edit the facelet colors ##########################################################
def click(event):
"""Defines how to react on left mouse clicks"""
global curcol
idlist = canvas.find_withtag("current")
if len(idlist) > 0:
if idlist[0] in colorpick_id:
curcol = canvas.itemcget("current", "fill")
for i in range(6):
canvas.itemconfig(colorpick_id[i], width=1)
canvas.itemconfig("current", width=5)
else:
canvas.itemconfig("current", fill=curcol)
########################################################################################################################
# ###################################### Generate and display the TK_widgets ##########################################
root = Tk()
root.wm_title("Solver Client")
canvas = Canvas(root, width=12 * width + 20, height=9 * width + 20)
canvas.pack()
bsolve = Button(text="Solve", height=2, width=10, relief=RAISED, command=solve)
bsolve_window = canvas.create_window(10 + 10.5 * width, 10 + 6.5 * width, anchor=NW, window=bsolve)
bclean = Button(text="Clean", height=1, width=10, relief=RAISED, command=clean)
bclean_window = canvas.create_window(10 + 10.5 * width, 10 + 7.5 * width, anchor=NW, window=bclean)
bempty = Button(text="Empty", height=1, width=10, relief=RAISED, command=empty)
bempty_window = canvas.create_window(10 + 10.5 * width, 10 + 8 * width, anchor=NW, window=bempty)
brandom = Button(text="Random", height=1, width=10, relief=RAISED, command=random)
brandom_window = canvas.create_window(10 + 10.5 * width, 10 + 8.5 * width, anchor=NW, window=brandom)
display = Text(height=7, width=39)
text_window = canvas.create_window(10 + 6.5 * width, 10 + .5 * width, anchor=NW, window=display)
hp = Label(text=' Hostname and Port')
hp_window = canvas.create_window(10 + 0 * width, 10 + 0.6 * width, anchor=NW, window=hp)
txt_host = Text(height=1, width=20)
txt_host_window = canvas.create_window(10 + 0 * width, 10 + 1 * width, anchor=NW, window=txt_host)
txt_host.insert(INSERT, DEFAULT_HOST)
txt_port = Text(height=1, width=20)
txt_port_window = canvas.create_window(10 + 0 * width, 10 + 1.5 * width, anchor=NW, window=txt_port)
txt_port.insert(INSERT, DEFAULT_PORT)
canvas.bind("<Button-1>", click)
create_facelet_rects(width)
create_colorpick_rects(width)
root.mainloop()
########################################################################################################################
| 40.484211
| 120
| 0.518201
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,643
| 0.343604
|
58bc76fe979d8a17599711a8021f4425b357315a
| 1,159
|
py
|
Python
|
bootcamp/wiki/core/compat.py
|
basiltiger/easy_bootcamp
|
875b9ed287f1a7824bb38f142dbe2f3b1ce54389
|
[
"MIT"
] | null | null | null |
bootcamp/wiki/core/compat.py
|
basiltiger/easy_bootcamp
|
875b9ed287f1a7824bb38f142dbe2f3b1ce54389
|
[
"MIT"
] | null | null | null |
bootcamp/wiki/core/compat.py
|
basiltiger/easy_bootcamp
|
875b9ed287f1a7824bb38f142dbe2f3b1ce54389
|
[
"MIT"
] | null | null | null |
"""Abstraction layer to deal with Django related changes in order to keep
compatibility with several Django versions simultaneously."""
from __future__ import unicode_literals
from django.conf import settings as django_settings
USER_MODEL = getattr(django_settings, 'AUTH_USER_MODEL', 'auth.User')
# Django 1.11 Widget.build_attrs has a different signature, designed for the new
# template based rendering. The previous version was more useful for our needs,
# so we restore that version.
# When support for Django < 1.11 is dropped, we should look at using the
# new template based rendering, at which point this probably won't be needed at all.
class BuildAttrsCompat(object):
def build_attrs_compat(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = self.attrs.copy()
if extra_attrs is not None:
attrs.update(extra_attrs)
if kwargs is not None:
attrs.update(kwargs)
return attrs
try:
# Python 3
from urllib.parse import urljoin # noqa
except ImportError:
# Python 2
from urlparse import urljoin # noqa @UnusedImport
| 36.21875
| 84
| 0.734254
| 350
| 0.301984
| 0
| 0
| 0
| 0
| 0
| 0
| 608
| 0.52459
|
58bd378d11da26df2129a68edbd47f10e2375dc5
| 10,649
|
py
|
Python
|
ethereum.py/ethereum/clients/ethereum.py
|
dixonwhitmire/connect-clients
|
09bf6f53f0a4fc923d1fb18f75ce86521880517c
|
[
"Apache-2.0"
] | null | null | null |
ethereum.py/ethereum/clients/ethereum.py
|
dixonwhitmire/connect-clients
|
09bf6f53f0a4fc923d1fb18f75ce86521880517c
|
[
"Apache-2.0"
] | 6
|
2021-07-13T19:58:58.000Z
|
2021-11-02T21:25:14.000Z
|
ethereum.py/ethereum/clients/ethereum.py
|
dixonwhitmire/connect-clients
|
09bf6f53f0a4fc923d1fb18f75ce86521880517c
|
[
"Apache-2.0"
] | 1
|
2021-07-13T19:22:04.000Z
|
2021-07-13T19:22:04.000Z
|
"""
ethereum.py
ethereum.py contains an EthereumClient class that provides functions for interacting
with the Coverage.sol solidity contract on an Ethereum blockchain network.
"""
import asyncio
import datetime
import json
import logging
import os
from ethereum.clients.nats import get_nats_client
from ethereum.config import get_settings, nats_eligibility_subject
from ethereum.exceptions import EthereumNetworkConnectionError
from hexbytes import HexBytes
from typing import Optional, Any, List
from web3 import Web3
logger = logging.getLogger(__name__)
# client instance
eth_client = None
class EthereumClient:
"""
Ethereum client for LFH that utilizes the Web3 library for interacting
with an Ethereum blockchain network.
"""
def __init__(self, **qwargs):
logger.debug("Initializing EthereumClient")
self.eth_network_uri = qwargs["eth_network_uri"]
logger.debug("Initializing Web3")
self.client: Optional[Web3] = Web3(Web3.HTTPProvider(self.eth_network_uri))
self.from_acct = {"from": self.client.eth.accounts[0]}
if (self.client and self.client.isConnected()):
logger.info(f"Connected to the Ethereum network at: {self.eth_network_uri}")
self.contract = self.client.eth.contract(address=qwargs["contract_address"],
abi=qwargs["contract_abi"])
event_filter = self.contract.events.EligibilityResult.createFilter(fromBlock="latest")
self.cancelled = False
contract_event_loop = asyncio.get_event_loop()
contract_event_loop.create_task(self.event_loop(event_filter, qwargs["event_poll_interval"]))
logger.info(f"Connected to the contract at: {qwargs['contract_address']}")
else:
error_msg = f"Failed to connect to the Ethereum network at: {self.eth_network_uri}"
logger.error(error_msg)
raise EthereumNetworkConnectionError(error_msg)
def add_coverage_resource(self, path: str, fhir_json: Any, payor_ref: str,
subscriber_ref: str, coverage_start: int, coverage_end: int):
"""
Send a Coverage FHIR resource to the Coverage.sol contract.
:param path: FHIR path of the resource, e.g. /Coverage/001
:param fhir_json: The string representation of the FHIR resource
:param payor_ref: coverage.payor[0].reference
:param subscriber_ref: coverage.subscriber.reference
:param coverage_start: coverage.period.start converted to a timestamp
:param coverage_end: coverage.period.end converted to a timestamp
:return: The hash of the submitted transaction or None
"""
if not self.client.isConnected():
error = f"Not connected to the Ethereum network"
logger.error(error)
return {"error": error}
try:
tx_hash = self.contract.functions.add_coverage_resource(path,
json.dumps(fhir_json),
payor_ref,
subscriber_ref,
coverage_start,
coverage_end).transact(self.from_acct)
tx_receipt = self.client.eth.waitForTransactionReceipt(tx_hash)
receipt_dict = dict(tx_receipt)
hash_str = receipt_dict["transactionHash"].hex()
logger.info(f"tx hash: {hash_str}")
return {"result": hash_str}
except Exception as ex:
error = f"Transaction error {ex}"
logger.error(error)
return {"error": error}
def check_eligibility(self, path: str, fhir_json: Any, insurer_ref: str,
patient_ref: str, coverage_ref: str, coverage_date: int):
"""
Send a CoverageEligibilityRequest FHIR resource to the Coverage.sol contract.
:param path: FHIR path of the resource, e.g. /CoverageEligibilityRequest/001
:param fhir_json: The string representation of the FHIR resource
:param insurer_ref: coverageeligibilityrequest.insurer.reference
:param patient_ref: coverageeligibilityrequest.patient.reference
:param coverage_ref: coverageeligibilityrequest.insurance[0].coverage
:param coverage_date: coverageeligibilityrequest.created converted to a timestamp
:return: The hash of the submitted transaction or None
"""
if not self.client.isConnected():
error = f"Not connected to the Ethereum network"
logger.error(error)
return {"error": error}
try:
tx_hash = self.contract.functions.check_eligibility(path,
json.dumps(fhir_json),
insurer_ref,
patient_ref,
coverage_ref,
coverage_date).transact(self.from_acct)
tx_receipt = self.client.eth.waitForTransactionReceipt(tx_hash)
receipt_dict = dict(tx_receipt)
hash_str = receipt_dict["transactionHash"].hex()
logger.info(f"tx hash: {hash_str}")
return {"result": hash_str}
except Exception as ex:
error = f"Transaction error {ex}"
logger.error(error)
return {"error": error}
def add_fhir_resource(self, fhir_type: str, path: str, fhir_json: Any):
"""
Send a Patient or Organization FHIR resource to the Coverage.sol contract.
:param fhir_type: FHIR type of the resource, e.g. Patient
:param path: FHIR path of the resource, e.g. /Patient/001
:param fhir_json: The string representation of the FHIR resource
:return: The hash of the submitted transaction or None
"""
if not self.client.isConnected():
error = f"Not connected to the Ethereum network"
logger.error(error)
return {"error": error}
try:
tx_hash = self.contract.functions.add_fhir_resource(fhir_type,
path,
json.dumps(fhir_json)).transact(self.from_acct)
tx_receipt = self.client.eth.waitForTransactionReceipt(tx_hash)
receipt_dict = dict(tx_receipt)
hash_str = receipt_dict["transactionHash"].hex()
logger.info(f"tx hash: {hash_str}")
return {"result": hash_str}
except Exception as ex:
error = f"Transaction error {ex}"
logger.error(error)
return {"error": error}
def close(self):
self.cancelled = True
async def event_loop(self, event_filter, poll_interval: int):
while not self.cancelled:
for event in event_filter.get_new_entries():
await self.handle_event(json.loads(Web3.toJSON(event)))
await asyncio.sleep(poll_interval)
async def handle_event(self, event: dict):
"""
Send a FHIR CoverageEligibilityResponse based on the eligibility decision from the contract.
:param event: The JSON contract event containing the eligibility decision and supporting info.
"""
logger.trace(f"Received contract event: {event}")
path: List[str] = event["args"]["path"].split("/")
request_id: str = path[1]
result: bool = event["args"]["result"]
disposition: str = "Policy is currently in effect."
if not result:
disposition = "Policy is not in effect."
today: str = datetime.date.today().isoformat()
message: Any = {
"resourceType": "CoverageEligibilityResponse",
"id": request_id,
"text": {
"status": "generated",
"div": "<div xmlns=\"http://www.w3.org/1999/xhtml\">A human-readable rendering of the CoverageEligibilityResponse.</div>"
},
"identifier": [
{
"system": "http://localhost:5000/fhir/coverageeligibilityresponse/" + request_id,
"value": request_id
}
],
"status": "active",
"purpose": [
"validation"
],
"patient": {
"reference": event["args"]["patient_ref"]
},
"created": today,
"request": {
"reference": "http://www.BenefitsInc.com/fhir/coverageeligibilityrequest/" + request_id
},
"outcome": "complete",
"disposition": disposition,
"insurer": {
"reference": event["args"]["insurer_ref"]
},
"insurance": [
{
"coverage": {
"reference": event["args"]["coverage_ref"]
},
"inforce": result
}
]
};
nats_client = await get_nats_client()
msg_str = json.dumps(message)
logger.info(f"CoverageEligibilityResponse: {msg_str}")
await nats_client.publish(nats_eligibility_subject, bytearray(msg_str, "utf-8"))
logger.trace("Sent CoverageEligibilityResponse via NATS")
def get_ethereum_client() -> Optional[EthereumClient]:
"""
:return: a connected EthereumClient instance
"""
global eth_client
if not eth_client:
settings = get_settings()
# load ABI file
abi_file: str = os.path.join(settings.ethereum_config_directory, settings.ethereum_contract_abi)
contract_info = json.load(open(abi_file))
eth_client = EthereumClient(
eth_network_uri=settings.ethereum_network_uri,
contract_address=settings.ethereum_contract_address,
contract_abi=contract_info["abi"],
event_poll_interval=settings.ethereum_event_poll_seconds
)
return eth_client
def stop_ethereum_client():
client = get_ethereum_client()
client.close()
class HexJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, HexBytes):
return obj.hex()
return super().default(obj)
| 42.426295
| 137
| 0.582966
| 9,273
| 0.870786
| 0
| 0
| 0
| 0
| 2,580
| 0.242276
| 3,673
| 0.344915
|
58bd44d4180b36c4fc8b719cc1462f1b22fa94a6
| 1,759
|
py
|
Python
|
cli/actions/mc_combination_action.py
|
daneshvar-amrollahi/polar
|
b72254e1a8354e6a10135cd3990b8edfda02559e
|
[
"MIT"
] | 1
|
2021-11-14T05:52:21.000Z
|
2021-11-14T05:52:21.000Z
|
cli/actions/mc_combination_action.py
|
daneshvar-amrollahi/polar
|
b72254e1a8354e6a10135cd3990b8edfda02559e
|
[
"MIT"
] | null | null | null |
cli/actions/mc_combination_action.py
|
daneshvar-amrollahi/polar
|
b72254e1a8354e6a10135cd3990b8edfda02559e
|
[
"MIT"
] | null | null | null |
from argparse import Namespace
from .action import Action
from symengine.lib.symengine_wrapper import sympify
from termcolor import colored
from program.mc_comb_finder import MCCombFinder
from cli.common import prepare_program
class MCCombinationAction(Action):
cli_args: Namespace
def __init__(self, cli_args: Namespace):
self.cli_args = cli_args
def __call__(self, *args, **kwargs):
benchmark = args[0]
combination_deg = self.cli_args.mc_comb_deg
program = prepare_program(benchmark, self.cli_args)
if len(program.non_mc_variables) == 0:
print(f"--mc_comb not applicable to {benchmark} since all variables are already moment computable.")
return
combination_vars = []
if len(combination_vars) == 0:
for var in program.non_mc_variables:
if var in program.original_variables:
combination_vars.append(var)
else:
combination_vars = [sympify(v) for v in self.cli_args.mc_comb]
print(colored("-------------------", "cyan"))
print(colored("- Analysis Result -", "cyan"))
print(colored("-------------------", "cyan"))
print()
combinations = MCCombFinder.find_good_combination(
combination_vars, combination_deg, program, self.cli_args.numeric_roots, self.cli_args.numeric_croots,
self.cli_args.numeric_eps
)
if combinations is None:
print(f"No combination found with degree {combination_deg}. Try using other degrees.")
else:
for combination in combinations:
candidate, solution = combination[0], combination[1]
print(f"E({candidate})[n] = {solution}")
| 37.425532
| 114
| 0.637862
| 1,529
| 0.869244
| 0
| 0
| 0
| 0
| 0
| 0
| 286
| 0.162592
|
58c073b6ae4e5dbeb4eb910c743f8e1c8773b328
| 494
|
py
|
Python
|
docker/gunicorn.py
|
admariner/madewithwagtail
|
a43b3263c0f151ece4994fccd561b0575db4979f
|
[
"MIT"
] | null | null | null |
docker/gunicorn.py
|
admariner/madewithwagtail
|
a43b3263c0f151ece4994fccd561b0575db4979f
|
[
"MIT"
] | null | null | null |
docker/gunicorn.py
|
admariner/madewithwagtail
|
a43b3263c0f151ece4994fccd561b0575db4979f
|
[
"MIT"
] | null | null | null |
import gunicorn
accesslog = "-"
errorlog = "-"
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" "%({X-Forwarded-For}i)s"'
capture_output = True
forwarded_allow_ips = "*"
secure_scheme_headers = {"X-CLOUDFRONT": "yes"}
workers = 2
worker_class = "gthread"
worker_connections = 5
bind = ":8000"
keep_alive = 75
chdir = "/madewithwagtail"
# Obfuscate the Server header (to the md5sum of "Springload")
gunicorn.SERVER_SOFTWARE = "04e96149a2f64d6135c82d199ab62122"
| 27.444444
| 106
| 0.690283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 243
| 0.491903
|
58c135e6998a8525b0faabf5c07d8105ddf708e8
| 1,596
|
py
|
Python
|
Lista 2/Questao_1.py
|
flaviomelo10/Python-para-PLN
|
845da043c2618f3aace655cf065fca3d866342d5
|
[
"MIT"
] | null | null | null |
Lista 2/Questao_1.py
|
flaviomelo10/Python-para-PLN
|
845da043c2618f3aace655cf065fca3d866342d5
|
[
"MIT"
] | null | null | null |
Lista 2/Questao_1.py
|
flaviomelo10/Python-para-PLN
|
845da043c2618f3aace655cf065fca3d866342d5
|
[
"MIT"
] | null | null | null |
# -- encoding:utf-8 -- #
'''
Crie uma variรกvel com a string โ instituto de ciรชncias matemรกticas e de computaรงรฃoโ e faรงa:
a. Concatene (adicione) uma outra string chamada โuspโ
b. Concatene (adicione) uma outra informaรงรฃo: 2021
c. Verifique o tamanho da nova string (com as informaรงรตes adicionadas das questรตes a e b), com referรชncia a caracteres e espaรงos
d. Transforme a string inteiramente em maiรบsculo
e. Transforme a string inteiramente em minรบsculo
f. Retire o espaรงo que estรก no inรญcio da string e imprima a string
g. Substitua todas as letras โaโ por โxโ
h. Separe a string em palavras รบnicas
i. Verifique quantas palavras existem na string
j. Separe a string por meio da palavra โdeโ
k. Verifique agora quantas palavras/frases foram formadas quando houve a separaรงรฃo pela palavra โdeโ
l. Junte as palavras que foram separadas (pode usar a separaรงรฃo resultante da questรฃo h ou j)
m. Junte as palavras que foram separadas, mas agora separadas por uma barra invertida, nรฃo por espaรงos (pode usar a separaรงรฃo resultante da questรฃo h ou j)
'''
texto = " instituto de ciรชncias matemรกticas e de computaรงรฃo"
#a)
texto = texto + " usp"
print(texto)
#b)
texto = texto + " 2021"
print(texto)
#c)
tamanho = len(texto)
print(tamanho)
#d)
print(texto.upper())
#e)
print(texto.lower())
#f)
print(texto[1:])
print(texto.strip())
#g)
print(texto.replace('a', 'x'))
#h
separar = texto.split()
print(separar)
#i)
print(separar)
#j)
separar2 = texto.split('de')
print(separar2)
#k)
print(len(separar2))
#l)
juntar = " ".join(separar)
print(juntar)
#m)
juntar2 = "/".join(separar)
print(juntar2)
| 24.181818
| 155
| 0.734962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,221
| 0.738657
|
58c1cf2e7948459916f7782bc8f6b76e361226be
| 753
|
py
|
Python
|
breadcrumbs/templatetags/breadcrumbs_tags.py
|
LinuxOSsk/Shakal-NG
|
c4091c7972cffd86f64aa9f9a058d2907a56e5eb
|
[
"MIT"
] | 10
|
2016-02-06T15:40:30.000Z
|
2018-09-27T15:15:13.000Z
|
breadcrumbs/templatetags/breadcrumbs_tags.py
|
LinuxOSsk/Shakal-NG
|
c4091c7972cffd86f64aa9f9a058d2907a56e5eb
|
[
"MIT"
] | 94
|
2016-02-04T18:39:36.000Z
|
2022-01-20T05:25:00.000Z
|
breadcrumbs/templatetags/breadcrumbs_tags.py
|
LinuxOSsk/Shakal-NG
|
c4091c7972cffd86f64aa9f9a058d2907a56e5eb
|
[
"MIT"
] | 8
|
2016-05-10T20:29:53.000Z
|
2021-02-07T00:50:31.000Z
|
# -*- coding: utf-8 -*-
from django.shortcuts import resolve_url
from django.template.loader import render_to_string
from django_jinja import library
from jinja2 import contextfunction
@contextfunction
@library.global_function
def breadcrumb(context, contents, *args, **kwargs):
class_name = kwargs.pop('class', False)
url = kwargs.pop('url', False)
if url is not False:
url = resolve_url(url, *args, **kwargs)
breadcrumb_context = {
'contents': contents,
'url': url,
'class': class_name
}
context['breadcrumbs'].append(breadcrumb_context)
return ''
@library.global_function
def render_breadcrumbs(breadcrumbs):
breadcrumbs.reverse()
ctx = {'breadcrumbs': breadcrumbs}
return render_to_string('breadcrumbs/breadcrumbs.html', ctx)
| 25.1
| 61
| 0.755644
| 0
| 0
| 0
| 0
| 562
| 0.746348
| 0
| 0
| 115
| 0.152722
|
58c23fc6ab7f8d080ab7dfae6e27ec6257ea2869
| 1,334
|
py
|
Python
|
contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 650
|
2017-07-09T02:08:10.000Z
|
2022-03-22T20:39:54.000Z
|
contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 735
|
2017-07-26T01:15:16.000Z
|
2022-03-29T20:17:20.000Z
|
contrib/opencensus-ext-datadog/opencensus/ext/datadog/transport.py
|
Flared/opencensus-python
|
e2535e688a50c7a06be8af93ca3b987d387da605
|
[
"Apache-2.0"
] | 256
|
2017-07-24T18:29:15.000Z
|
2022-03-15T15:33:03.000Z
|
import platform
import requests
class DDTransport(object):
""" DDTransport contains all the logic for sending Traces to Datadog
:type trace_addr: str
:param trace_addr: trace_addr specifies the host[:port] address of the
Datadog Trace Agent.
"""
def __init__(self, trace_addr):
self._trace_addr = trace_addr
self._headers = {
"Datadog-Meta-Lang": "python",
"Datadog-Meta-Lang-Interpreter": platform.platform(),
# Following the example of the Golang version it is prefixed
# OC for Opencensus.
"Datadog-Meta-Tracer-Version": "OC/0.0.1",
"Content-Type": "application/json",
}
@property
def trace_addr(self):
""" specifies the host[:port] address of the Datadog Trace Agent.
"""
return self._trace_addr
@property
def headers(self):
""" specifies the headers that will be attached to HTTP request sent to DD.
"""
return self._headers
def send_traces(self, trace):
""" Sends traces to the Datadog Tracing Agent
:type trace: dic
:param trace: Trace dictionary
"""
requests.post("http://" + self.trace_addr + "/v0.4/traces",
json=trace,
headers=self.headers)
| 28.382979
| 83
| 0.595202
| 1,298
| 0.973013
| 0
| 0
| 310
| 0.232384
| 0
| 0
| 721
| 0.54048
|
58c4071d4471ff72fd95738a79b453160bfc2e4b
| 252
|
py
|
Python
|
credsweeper/file_handler/analysis_target.py
|
ARKAD97/CredSweeper
|
0f613cded13d6c28c19c57eac54dd245b2c318ea
|
[
"MIT"
] | null | null | null |
credsweeper/file_handler/analysis_target.py
|
ARKAD97/CredSweeper
|
0f613cded13d6c28c19c57eac54dd245b2c318ea
|
[
"MIT"
] | null | null | null |
credsweeper/file_handler/analysis_target.py
|
ARKAD97/CredSweeper
|
0f613cded13d6c28c19c57eac54dd245b2c318ea
|
[
"MIT"
] | null | null | null |
from typing import List
class AnalysisTarget:
def __init__(self, line: str, line_num: int, lines: List[str], file_path: str):
self.line = line
self.line_num = line_num
self.lines = lines
self.file_path = file_path
| 25.2
| 83
| 0.650794
| 225
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
58c4179e5713c05abfe6169f74df8cd9ca6987a4
| 5,558
|
py
|
Python
|
model/vgg_deeplab.py
|
ireina7/zero-shot-segmentation
|
870d08ad7ea3965f006d0eb44667f6ecd87ef205
|
[
"MIT"
] | null | null | null |
model/vgg_deeplab.py
|
ireina7/zero-shot-segmentation
|
870d08ad7ea3965f006d0eb44667f6ecd87ef205
|
[
"MIT"
] | null | null | null |
model/vgg_deeplab.py
|
ireina7/zero-shot-segmentation
|
870d08ad7ea3965f006d0eb44667f6ecd87ef205
|
[
"MIT"
] | null | null | null |
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
class Vgg_Deeplab(nn.Module):
def __init__(self,*args, **kwargs):
super(Vgg_Deeplab, self).__init__()
vgg16 = torchvision.models.vgg16()
layers = []
layers.append(nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(3, stride=2, padding=1))
layers.append(nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(3, stride=2, padding=1))
layers.append(nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(3, stride=2, padding=1))
layers.append(nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(3, stride=1, padding=1))
layers.append(nn.Conv2d(512,
512,
kernel_size=3,
stride=1,
padding=2,
dilation=2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(512,
512,
kernel_size=3,
stride=1,
padding=2,
dilation=2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(512,
512,
kernel_size=3,
stride=1,
padding=2,
dilation=2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(3, stride=1, padding=1))
self.features = nn.Sequential(*layers)
classifier = []
classifier.append(nn.AvgPool2d(3, stride=1, padding=1))
classifier.append(nn.Conv2d(512,
1024,
kernel_size=3,
stride=1,
padding=12,
dilation=12))
classifier.append(nn.ReLU(inplace=True))
classifier.append(nn.Dropout(p=0.5))
self.classifier = nn.Sequential(*classifier)
self.init_weights()
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x
def init_weights(self):
vgg = torchvision.models.vgg16(pretrained=True)
state_vgg = vgg.features.state_dict()
self.features.load_state_dict(state_vgg)
for ly in self.classifier.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
nn.init.constant_(ly.bias, 0)
def get_1x_lr_params(self):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
# b = []
#
# b.append(self.conv1)
# b.append(self.bn1)
# b.append(self.layer1)
# b.append(self.layer2)
# b.append(self.layer3)
# b.append(self.layer4)
for i in self.features:
#for j in self.features[i].modules():
jj = 0
for k in i.parameters():
jj += 1
if k.requires_grad:
yield k
def optim_parameters_1x(self, args):
return [{"params": self.get_1x_lr_params(), "lr": 1 * args.learning_rate}]
def get_10x_lr_params(self):
"""
This generator returns all the parameters for the last layer of the net,
which does the classification of pixel into classes
"""
# b = []
# b.append(self.layer.parameters())
for i in self.classifier:
#for j in self.classifier[i].modules():
jj = 0
for k in i.parameters():
jj += 1
if k.requires_grad:
yield k
def optim_parameters_10x(self, args):
return [{"params": self.get_10x_lr_params(), "lr": 10 * args.learning_rate}]
if __name__ == "__main__":
net = Vgg_Deeplab(3, 10)
in_ten = torch.randn(1, 3, 224, 224)
out = net(in_ten)
print(net)
print(out.size())
in_ten = torch.randn(1, 3, 64, 64)
mod = nn.Conv2d(3,
512,
kernel_size=3,
stride=1,
padding=2,
dilation=2)
out = mod(in_ten)
print(out.shape)
| 35.858065
| 84
| 0.526268
| 5,052
| 0.90896
| 1,119
| 0.201331
| 0
| 0
| 0
| 0
| 608
| 0.109392
|
58c5f8a4b5b023272ffe37f64fa23b186bdc1cd0
| 510
|
py
|
Python
|
web/app.py
|
erberlin/themepark-times-API
|
0b6a44224c3d5e1f98399ac30e96e35bad38849e
|
[
"MIT"
] | 7
|
2019-04-19T16:32:24.000Z
|
2019-07-10T12:41:50.000Z
|
web/app.py
|
erberlin/themepark-times-API
|
0b6a44224c3d5e1f98399ac30e96e35bad38849e
|
[
"MIT"
] | null | null | null |
web/app.py
|
erberlin/themepark-times-API
|
0b6a44224c3d5e1f98399ac30e96e35bad38849e
|
[
"MIT"
] | 1
|
2019-07-10T04:36:43.000Z
|
2019-07-10T04:36:43.000Z
|
# -*- coding: utf-8 -*-
"""
This module defines a connexion app object and configures the API
endpoints based the swagger.yml configuration file.
copyright: ยฉ 2019 by Erik R Berlin.
license: MIT, see LICENSE for more details.
"""
import connexion
app = connexion.App(__name__, specification_dir="./")
app.app.url_map.strict_slashes = False
app.add_api("swagger.yml")
if __name__ == "__main__":
# FLASK_ENV=development & FLASK_DEBUG=1 w/ Docker don't seem to enable debug mode.
app.run(debug=True)
| 25.5
| 86
| 0.731373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 340
| 0.665362
|
58c6d6c03c23a334c302f4903855ceb65421ce9b
| 2,341
|
py
|
Python
|
CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py
|
MY-Climate-Observatory/myco-data
|
5203fa63c7ce609bbc9bbc4186f55da78befdc50
|
[
"CC-BY-4.0"
] | null | null | null |
CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py
|
MY-Climate-Observatory/myco-data
|
5203fa63c7ce609bbc9bbc4186f55da78befdc50
|
[
"CC-BY-4.0"
] | null | null | null |
CLIMATExScience/air-pollution-index/data-visualization/pollutant-freq.py
|
MY-Climate-Observatory/myco-data
|
5203fa63c7ce609bbc9bbc4186f55da78befdc50
|
[
"CC-BY-4.0"
] | 1
|
2021-12-16T04:56:09.000Z
|
2021-12-16T04:56:09.000Z
|
# -*- coding: utf-8 -*-
"""
17 June 2020
Author: Xiandi Ooi
Visualizing the types of pollutants.
"""
import pandas as pd
from plotly.offline import plot
import plotly.graph_objects as go
# Get the file from us
df = pd.read_csv(https://www.dropbox.com/s/u0ymg0ufne0an60/api-20200713.csv?dl=1", sep = ";")
# Make the selection
selected_area = "Sandakan"
df_select = df.loc[(df.Area == selected_area),
["Area", "Dominant", "Datetime"]]
# Data wrangling for this particular visual
df_update = df_select.set_index(pd.DatetimeIndex(df_select["Datetime"]))
df_update.drop(df_update.columns[2], axis = 1, inplace = True)
# Wrangling
df_group_time = df_update.groupby(pd.Grouper(freq = "Q")).size().reset_index(name = "Total")
df_group = df_update.groupby([pd.Grouper(freq = "Q"),
pd.Grouper("Dominant")]).size().reset_index(name = "Count")
df_output = df_group.set_index("Datetime").join(df_group_time.set_index("Datetime"))
df_output["Frequency"] = df_output["Count"] / df_output["Total"]
# Creating df subset for the stacked bars, here we are only dealing with the main dominant pollutants
df_pm2_5 = df_output.loc[(df_output.Dominant == "**")]
df_pm10 = df_output.loc[(df_output.Dominant == "*")]
df_so2 = df_output.loc[(df_output.Dominant == "a")]
df_no2 = df_output.loc[(df_output.Dominant == "b")]
df_o3 = df_output.loc[(df_output.Dominant == "c")]
df_co = df_output.loc[(df_output.Dominant == "d")]
# Now comes the bar chart
fig = go.Figure()
fig.add_trace(go.Bar(x = df_pm2_5.index,
y = df_pm2_5["Frequency"],
name = "PM 2.5"))
fig.add_trace(go.Bar(x = df_pm10.index,
y = df_pm10["Frequency"],
name = "PM 10"))
fig.add_trace(go.Bar(x = df_so2.index,
y = df_so2["Frequency"],
name = "SO2"))
fig.add_trace(go.Bar(x = df_no2.index,
y = df_no2["Frequency"],
name = "NO2"))
fig.add_trace(go.Bar(x = df_o3.index,
y = df_o3["Frequency"],
name = "O3"))
fig.add_trace(go.Bar(x = df_co.index,
y = df_co["Frequency"],
name = "CO"))
fig.update_layout(barmode = "stack", title_text="Frequency of Detected Pollutants")
plot(fig)
| 32.971831
| 101
| 0.612986
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 614
| 0.262281
|
58c6e236acba1419c8019e6e9d0019c26bbbfc7f
| 3,977
|
py
|
Python
|
tests/bs3/test_block_fields.py
|
rpkilby/django-template-forms
|
5099d87d661a6a313df49fa484afd94f145e65bc
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T11:53:32.000Z
|
2021-01-29T11:53:32.000Z
|
tests/bs3/test_block_fields.py
|
rpkilby/django-template-forms
|
5099d87d661a6a313df49fa484afd94f145e65bc
|
[
"BSD-3-Clause"
] | 5
|
2017-11-29T11:01:56.000Z
|
2018-02-05T23:34:08.000Z
|
tests/bs3/test_block_fields.py
|
rpkilby/django-template-forms
|
5099d87d661a6a313df49fa484afd94f145e65bc
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.test import TestCase
from template_forms import bs3
def startswith_a(value):
if value.startswith('a'):
return value
raise forms.ValidationError('Value must start with "a".')
def not_now(value):
if value:
raise forms.ValidationError('I cannot let you do that right now.')
class StandardFieldTests(TestCase):
class Form(bs3.BlockForm, forms.Form):
field = forms.CharField(required=False, validators=[startswith_a], help_text='Example text.', )
def get_attrs(self, bf):
return {
'name': bf.html_name,
'id': bf.auto_id,
'label': bf.label,
}
def test_field(self):
form = self.Form()
field = form['field']
template = """
<div class="form-group">
<label for="{id}" class="control-label">{label}:</label>
<input id="{id}" name="{name}" type="text" class="form-control">
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
def test_field_bound(self):
form = self.Form({'field': 'a value'})
field = form['field']
template = """
<div class="form-group">
<label for="{id}" class="control-label">{label}:</label>
<input id="{id}" name="{name}" type="text" class="form-control" value="a value">
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
def test_field_error(self):
form = self.Form({'field': 'error'})
field = form['field']
template = """
<div class="form-group has-error">
<label for="{id}" class="control-label">{label}:</label>
<input id="{id}" name="{name}" type="text" class="form-control has-error" value="error">
<small class="help-block">Value must start with "a".</small>
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
class CheckboxFieldTests(TestCase):
class Form(bs3.BlockForm, forms.Form):
field = forms.BooleanField(required=False, validators=[not_now], help_text='Example text.')
def get_attrs(self, bf):
return {
'name': bf.html_name,
'id': bf.auto_id,
'label': bf.label,
}
def test_field(self):
form = self.Form()
field = form['field']
template = """
<div class="form-group">
<div class="checkbox">
<label>
<input id="{id}" name="{name}" type="checkbox"> {label}
</label>
</div>
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
def test_field_error(self):
form = self.Form({'field': 'on'})
field = form['field']
template = """
<div class="form-group has-error">
<div class="checkbox">
<label>
<input id="{id}" name="{name}" type="checkbox" checked> {label}
</label>
</div>
<small class="help-block">I cannot let you do that right now.</small>
<small class="help-block">Example text.</small>
</div>
"""
self.assertHTMLEqual(
template.format(**self.get_attrs(field)),
form.render_field(field, field.errors)
)
| 30.829457
| 103
| 0.538094
| 3,629
| 0.912497
| 0
| 0
| 0
| 0
| 0
| 0
| 1,852
| 0.465678
|
58c773feba3d980c07a404541fff29ea0e07df10
| 19,463
|
py
|
Python
|
bzt/modules/java.py
|
3dgiordano/taurus
|
77cb31b6f0e5c27545094f600ac2b595fa76d992
|
[
"Apache-2.0"
] | 1
|
2018-02-17T16:00:34.000Z
|
2018-02-17T16:00:34.000Z
|
bzt/modules/java.py
|
3dgiordano/taurus
|
77cb31b6f0e5c27545094f600ac2b595fa76d992
|
[
"Apache-2.0"
] | 5
|
2018-03-10T20:50:24.000Z
|
2021-08-20T15:07:32.000Z
|
bzt/modules/java.py
|
3dgiordano/taurus
|
77cb31b6f0e5c27545094f600ac2b595fa76d992
|
[
"Apache-2.0"
] | 1
|
2018-05-04T23:06:15.000Z
|
2018-05-04T23:06:15.000Z
|
"""
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
import shutil
import subprocess
import time
from os import listdir
from os.path import join
from bzt import ToolError, TaurusConfigError
from bzt.engine import HavingInstallableTools, Scenario
from bzt.modules import SubprocessedExecutor
from bzt.utils import get_full_path, shell_exec, TclLibrary, JavaVM, RequiredTool, MirrorsManager
SELENIUM_DOWNLOAD_LINK = "http://selenium-release.storage.googleapis.com/3.6/" \
"selenium-server-standalone-3.6.0.jar"
SELENIUM_VERSION = "3.6" # FIXME: unused, remove it
JUNIT_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=junit/junit/" \
"{version}/junit-{version}.jar"
JUNIT_VERSION = "4.12"
JUNIT_MIRRORS_SOURCE = "http://search.maven.org/solrsearch/select?q=g%3A%22junit%22%20AND%20a%3A%22" \
"junit%22%20AND%20v%3A%22{version}%22&rows=20&wt=json".format(version=JUNIT_VERSION)
TESTNG_VERSION = "6.8.5"
TESTNG_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/testng/testng/" \
"{version}/testng-{version}.jar".format(version=TESTNG_VERSION)
HAMCREST_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core" \
"/1.3/hamcrest-core-1.3.jar"
JSON_JAR_DOWNLOAD_LINK = "http://search.maven.org/remotecontent?filepath=org/json/json/20160810/json-20160810.jar"
class JavaTestRunner(SubprocessedExecutor, HavingInstallableTools):
"""
Allows to test java and jar files
:type script: str
"""
def __init__(self):
super(JavaTestRunner, self).__init__()
self.working_dir = os.getcwd()
self.target_java = "1.8"
self.props_file = None
self.base_class_path = []
def path_lambda(self, x):
return os.path.abspath(self.engine.find_file(x))
def install_required_tools(self):
self.hamcrest_path = self.path_lambda(self.settings.get("hamcrest-core",
"~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar"))
self.json_jar_path = self.path_lambda(
self.settings.get("json-jar", "~/.bzt/selenium-taurus/tools/junit/json.jar"))
self.selenium_server_jar_path = self.path_lambda(self.settings.get("selenium-server",
"~/.bzt/selenium-taurus/selenium-server.jar"))
def prepare(self):
"""
make jar.
"""
self.script = self.get_scenario().get(Scenario.SCRIPT,
TaurusConfigError("Script not passed to runner %s" % self))
self.script = self.engine.find_file(self.script)
self.install_required_tools()
self.working_dir = self.engine.create_artifact(self.settings.get("working-dir", "classes"), "")
self.target_java = str(self.settings.get("compile-target-java", self.target_java))
self.base_class_path.extend(self.settings.get("additional-classpath", []))
self.base_class_path.extend(self.get_scenario().get("additional-classpath", []))
self.base_class_path.extend([self.hamcrest_path, self.json_jar_path, self.selenium_server_jar_path])
self.props_file = self.engine.create_artifact("runner", ".properties")
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
self.reporting_setup(suffix=".ldjson")
def resource_files(self):
resources = super(JavaTestRunner, self).resource_files()
resources.extend(self.get_scenario().get("additional-classpath", []))
global_additional_classpath = self.settings.get("additional-classpath", [])
execution_files = self.execution.get('files', []) # later we need to fix path for sending into cloud
execution_files.extend(global_additional_classpath)
return resources
def _collect_script_files(self, extensions):
file_list = []
if self.script is not None and os.path.isdir(self.script):
for root, _, files in os.walk(self.script):
for test_file in files:
if os.path.splitext(test_file)[1].lower() in extensions:
path = get_full_path(join(root, test_file))
file_list.append(path)
else:
if os.path.splitext(self.script)[1].lower() in extensions:
file_list.append(get_full_path(self.script))
return file_list
def compile_scripts(self):
"""
Compile .java files
"""
self.log.debug("Compiling .java files started")
jar_path = join(self.engine.artifacts_dir, self.working_dir, self.settings.get("jar-name", "compiled.jar"))
if os.path.exists(jar_path):
self.log.debug(".java files are already compiled, skipping")
return
compile_cl = ["javac",
"-source", self.target_java,
"-target", self.target_java,
"-d", self.working_dir,
]
compile_cl.extend(["-cp", os.pathsep.join(self.base_class_path)])
compile_cl.extend(self._collect_script_files({".java"}))
with open(self.engine.create_artifact("javac", ".out"), 'ab') as javac_out:
with open(self.engine.create_artifact("javac", ".err"), 'ab') as javac_err:
self.log.debug("running javac: %s", compile_cl)
self.process = shell_exec(compile_cl, stdout=javac_out, stderr=javac_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Compiling .java files...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
self.log.debug("javac exit code: %s", ret_code)
with open(javac_err.name) as err_file:
out = err_file.read()
raise ToolError("Javac exited with code: %s\n %s" % (ret_code, out.strip()))
self.log.info("Compiling .java files completed")
self.make_jar()
def make_jar(self):
"""
move all .class files to compiled.jar
"""
self.log.debug("Making .jar started")
with open(join(self.engine.artifacts_dir, "jar.out"), 'ab') as jar_out:
with open(join(self.engine.artifacts_dir, "jar.err"), 'ab') as jar_err:
class_files = [java_file for java_file in listdir(self.working_dir) if java_file.endswith(".class")]
jar_name = self.settings.get("jar-name", "compiled.jar")
if class_files:
compile_jar_cl = ["jar", "-cf", jar_name]
compile_jar_cl.extend(class_files)
else:
compile_jar_cl = ["jar", "-cf", jar_name, "."]
self.process = shell_exec(compile_jar_cl, cwd=self.working_dir, stdout=jar_out, stderr=jar_err)
ret_code = self.process.poll()
while ret_code is None:
self.log.debug("Making jar file...")
time.sleep(1)
ret_code = self.process.poll()
if ret_code != 0:
with open(jar_err.name) as err_file:
out = err_file.read()
raise ToolError("Jar exited with code %s\n%s" % (ret_code, out.strip()))
self.log.info("Making .jar file completed")
class JUnitTester(JavaTestRunner, HavingInstallableTools):
"""
Allows to test java and jar files
"""
def __init__(self):
super(JUnitTester, self).__init__()
self.junit_path = None
self.junit_listener_path = None
def prepare(self):
super(JUnitTester, self).prepare()
self.install_required_tools()
self.base_class_path += [self.junit_path, self.junit_listener_path]
self.base_class_path = [self.path_lambda(x) for x in self.base_class_path]
if any(self._collect_script_files({'.java'})):
self.compile_scripts()
def install_required_tools(self):
super(JUnitTester, self).install_required_tools()
self.junit_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/junit/junit.jar"))
self.junit_listener_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-junit-1.0.jar")
tools = []
# only check javac if we need to compile. if we have JAR as script - we don't need javac
if self.script and any(self._collect_script_files({'.java'})):
tools.append(JavaC(self.log))
tools.append(TclLibrary(self.log))
tools.append(JavaVM(self.log))
link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION)
tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
tools.append(JUnitJar(self.junit_path, self.log, JUNIT_VERSION))
tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK))
tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK))
tools.append(JUnitListenerJar(self.junit_listener_path, ""))
self._check_tools(tools)
def startup(self):
# java -cp junit.jar:selenium-test-small.jar:
# selenium-2.46.0/selenium-java-2.46.0.jar:./../selenium-server.jar
# taurusjunit.CustomRunner runner.properties
jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")]
jar_list.extend(self._collect_script_files({".jar"}))
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("report_file=%s\n" % self.report_file)
load = self.get_load()
if load.iterations:
props.write("iterations=%s\n" % load.iterations)
if load.hold:
props.write("hold_for=%s\n" % load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
class_path = os.pathsep.join(self.base_class_path)
junit_cmd_line = ["java", "-cp", class_path, "-Djna.nosys=true", "taurusjunit.CustomRunner", self.props_file]
self._start_subprocess(junit_cmd_line)
class TestNGTester(JavaTestRunner, HavingInstallableTools):
"""
Allows to test java and jar files with TestNG
"""
__test__ = False # Hello, nosetests discovery mechanism
def __init__(self):
super(TestNGTester, self).__init__()
self.testng_path = None
self.testng_plugin_path = None
def prepare(self):
super(TestNGTester, self).prepare()
self.install_required_tools()
self.base_class_path += [self.testng_path, self.testng_plugin_path]
if any(self._collect_script_files({'.java'})):
self.compile_scripts()
def detected_testng_xml(self):
script_path = self.get_script_path()
if script_path and self.settings.get("autodetect-xml", True):
script_dir = get_full_path(script_path, step_up=1)
testng_xml = os.path.join(script_dir, 'testng.xml')
if os.path.exists(testng_xml):
return testng_xml
return None
def resource_files(self):
resources = super(TestNGTester, self).resource_files()
testng_xml = self.execution.get('testng-xml', None)
if not testng_xml:
testng_xml = self.detected_testng_xml()
if testng_xml:
self.log.info("Detected testng.xml file at %s", testng_xml)
self.execution['testng-xml'] = testng_xml
if testng_xml:
resources.append(testng_xml)
return resources
def install_required_tools(self):
super(TestNGTester, self).install_required_tools()
self.testng_path = self.path_lambda(self.settings.get("path", "~/.bzt/selenium-taurus/tools/testng/testng.jar"))
self.testng_plugin_path = join(get_full_path(__file__, step_up=2), "resources", "taurus-testng-1.0.jar")
tools = []
if self.script and any(self._collect_script_files({'.java'})):
tools.append(JavaC(self.log))
tools.append(TclLibrary(self.log))
tools.append(JavaVM(self.log))
link = SELENIUM_DOWNLOAD_LINK.format(version=SELENIUM_VERSION)
tools.append(SeleniumServerJar(self.selenium_server_jar_path, link, self.log))
tools.append(TestNGJar(self.testng_path, TESTNG_DOWNLOAD_LINK))
tools.append(HamcrestJar(self.hamcrest_path, HAMCREST_DOWNLOAD_LINK))
tools.append(JsonJar(self.json_jar_path, JSON_JAR_DOWNLOAD_LINK))
tools.append(TestNGPluginJar(self.testng_plugin_path, ""))
self._check_tools(tools)
def startup(self):
# java -classpath
# testng.jar:selenium-server.jar:taurus-testng-1.0.jar:json.jar:compiled.jar
# taurustestng.TestNGRunner runner.properties
jar_list = [join(self.working_dir, jar) for jar in listdir(self.working_dir) if jar.endswith(".jar")]
jar_list.extend(self._collect_script_files({".jar"}))
self.base_class_path.extend(jar_list)
with open(self.props_file, 'wt') as props:
props.write("report_file=%s\n" % self.report_file)
load = self.get_load()
if load.iterations:
props.write("iterations=%s\n" % load.iterations)
if load.hold:
props.write("hold_for=%s\n" % load.hold)
for index, item in enumerate(jar_list):
props.write("target_%s=%s\n" % (index, item.replace(os.path.sep, '/')))
testng_xml = self.execution.get('testng-xml', None) or self.detected_testng_xml()
if testng_xml:
props.write('testng_config=%s\n' % testng_xml.replace(os.path.sep, '/'))
cmdline = ["java", "-cp", os.pathsep.join(self.base_class_path), "taurustestng.TestNGRunner", self.props_file]
self._start_subprocess(cmdline)
class TestNGJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(TestNGJar, self).__init__("TestNG", tool_path, download_link)
class HamcrestJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(HamcrestJar, self).__init__("HamcrestJar", tool_path, download_link)
class JsonJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JsonJar, self).__init__("JsonJar", tool_path, download_link)
class JavaC(RequiredTool):
def __init__(self, parent_logger, tool_path='javac', download_link=''):
super(JavaC, self).__init__("JavaC", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
try:
output = subprocess.check_output([self.tool_path, '-version'], stderr=subprocess.STDOUT)
self.log.debug("%s output: %s", self.tool_name, output)
return True
except (subprocess.CalledProcessError, OSError):
return False
def install(self):
raise ToolError("The %s is not operable or not available. Consider installing it" % self.tool_name)
class SeleniumServerJar(RequiredTool):
def __init__(self, tool_path, download_link, parent_logger):
super(SeleniumServerJar, self).__init__("Selenium server", tool_path, download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
def check_if_installed(self):
self.log.debug("%s path: %s", self.tool_name, self.tool_path)
selenium_launch_command = ["java", "-jar", self.tool_path, "-help"]
selenium_subproc = shell_exec(selenium_launch_command, stderr=subprocess.STDOUT)
output = selenium_subproc.communicate()
self.log.debug("%s output: %s", self.tool_name, output)
if selenium_subproc.returncode == 0:
self.already_installed = True
return True
else:
return False
class JUnitJar(RequiredTool):
def __init__(self, tool_path, parent_logger, junit_version):
super(JUnitJar, self).__init__("JUnit", tool_path)
self.log = parent_logger.getChild(self.__class__.__name__)
self.version = junit_version
self.mirror_manager = JUnitMirrorsManager(self.log, self.version)
def install(self):
dest = get_full_path(self.tool_path, step_up=1)
self.log.info("Will install %s into %s", self.tool_name, dest)
junit_dist = self._download(suffix=".jar")
if not os.path.exists(dest):
os.makedirs(dest)
shutil.move(junit_dist, self.tool_path)
self.log.info("Installed JUnit successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class JUnitListenerJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(JUnitListenerJar, self).__init__("JUnitListener", tool_path, download_link)
def install(self):
raise ToolError("Automatic installation of JUnitListener isn't implemented")
class TestNGPluginJar(RequiredTool):
def __init__(self, tool_path, download_link):
super(TestNGPluginJar, self).__init__("TestNGPlugin", tool_path, download_link)
def install(self):
raise ToolError("TestNG plugin should be bundled with Taurus distribution")
class JUnitMirrorsManager(MirrorsManager):
def __init__(self, parent_logger, junit_version):
self.junit_version = junit_version
super(JUnitMirrorsManager, self).__init__(JUNIT_MIRRORS_SOURCE, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
try:
resp = json.loads(self.page_source)
objects = resp.get("response", {}).get("docs", [])
if objects:
obj = objects[0]
group = obj.get("g")
artifact = obj.get("a")
version = obj.get("v")
ext = obj.get("p")
link_template = "http://search.maven.org/remotecontent?filepath={group}/{artifact}/" \
"{version}/{artifact}-{version}.{ext}"
link = link_template.format(group=group, artifact=artifact, version=version, ext=ext)
links.append(link)
except BaseException as exc:
self.log.error("Error while parsing mirrors %s", exc)
default_link = JUNIT_DOWNLOAD_LINK.format(version=self.junit_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
| 41.946121
| 121
| 0.640138
| 17,451
| 0.896624
| 0
| 0
| 0
| 0
| 0
| 0
| 4,120
| 0.211684
|
58c8432548a967e56cf908c27cbcc2cdbca067b8
| 1,434
|
py
|
Python
|
various_modules/interface_segregation_principle.py
|
Neykah/design_patterns_python
|
6f801fc4fc60f2d34002e4fe435feb6111a2cd23
|
[
"MIT"
] | null | null | null |
various_modules/interface_segregation_principle.py
|
Neykah/design_patterns_python
|
6f801fc4fc60f2d34002e4fe435feb6111a2cd23
|
[
"MIT"
] | null | null | null |
various_modules/interface_segregation_principle.py
|
Neykah/design_patterns_python
|
6f801fc4fc60f2d34002e4fe435feb6111a2cd23
|
[
"MIT"
] | null | null | null |
"""
Maybe not so relevant in Python due to the possibility to use multiple inheritance...
"""
from abc import ABC, abstractmethod
class CloudHostingProvider(ABC):
@abstractmethod
def create_server(region):
...
@abstractmethod
def list_servers(region):
...
class CDNProvider(ABC):
@abstractmethod
def get_cdna_address():
...
class CloudStorageProvider(ABC):
@abstractmethod
def store_file(name):
...
@abstractmethod
def get_file(name):
...
class Amazon(CloudHostingProvider, CDNProvider, CloudStorageProvider):
def store_file(self, name: str):
print(f"Storing the file {name} in AWS...")
def get_file(self, name: str):
print(f"Getting the file {name} from AWS...")
def create_server(self, region: str):
print(f"Creating a new server in the following region: {region}...")
def list_servers(self, region: str):
print(f"List all servers available in {region}...")
def get_cdna_address(self):
print("AWS CDNA address: ...")
class Dropbox(CloudStorageProvider):
def store_file(self, name: str):
print(f"Storing the file {name} in Dropbox...")
def get_file(self, name: str):
print(f"Getting the file {name} from Dropbox...")
if __name__ == "__main__":
amazon = Amazon()
dropbox = Dropbox()
amazon.get_file("Baba")
dropbox.store_file("Baba")
| 22.40625
| 85
| 0.642259
| 1,154
| 0.804742
| 0
| 0
| 274
| 0.191074
| 0
| 0
| 399
| 0.278243
|
58c8441bd96dbfec3988f61c1477017eb7ba3344
| 3,536
|
py
|
Python
|
SCSCons/Variables/PackageVariable.py
|
Relintai/pandemonium_engine
|
3de05db75a396b497f145411f71eb363572b38ae
|
[
"MIT",
"Apache-2.0",
"CC-BY-4.0",
"Unlicense"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Variables/PackageVariable.py
|
lps1333/Nuitka
|
02e8d59a275cd7fe482cbc8100e753ff5abe39d7
|
[
"Apache-2.0"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Variables/PackageVariable.py
|
lps1333/Nuitka
|
02e8d59a275cd7fe482cbc8100e753ff5abe39d7
|
[
"Apache-2.0"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Variable type for package Variables.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Given these options ::
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existence)
Can be used as a replacement for autoconf's ``--with-xxx=yyy`` ::
opts = Variables()
opts.Add(
PackageVariable(
key='x11',
help='use X11 installed here (yes = search some places)',
default='yes'
)
)
...
if env['x11'] == True:
dir = ... # search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... # build with x11 ...
"""
from typing import Tuple, Callable
import SCons.Errors
__all__ = ['PackageVariable',]
ENABLE_STRINGS = ('1', 'yes', 'true', 'on', 'enable', 'search')
DISABLE_STRINGS = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
""" """
lval = val.lower()
if lval in ENABLE_STRINGS:
return True
if lval in DISABLE_STRINGS:
return False
return val
def _validator(key, val, env, searchfunc) -> None:
""" """
# NB: searchfunc is currently undocumented and unsupported
# TODO write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None) -> Tuple[str, str, str, Callable, Callable]:
"""Return a tuple describing a package list SCons Variable.
The input parameters describe a 'package list' option. Returns
a tuple including the correct converter and validator appended.
The result is usable as input to :meth:`Add` .
A 'package list' option may either be 'all', 'none' or a pathname
string. This information is appended to *help*.
"""
# NB: searchfunc is currently undocumented and unsupported
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k, v, e, searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 32.740741
| 101
| 0.669683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,643
| 0.747455
|
58c98a83a2ec15c194e9ea24edc51018cb47644a
| 8,351
|
py
|
Python
|
tests/test_protocol.py
|
kwikiel/edgedb
|
dac3f0c408bcf3fe9159b7f844d0186c54aaa3fd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_protocol.py
|
kwikiel/edgedb
|
dac3f0c408bcf3fe9159b7f844d0186c54aaa3fd
|
[
"Apache-2.0"
] | null | null | null |
tests/test_protocol.py
|
kwikiel/edgedb
|
dac3f0c408bcf3fe9159b7f844d0186c54aaa3fd
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2020-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import edgedb
from edb.server import compiler
from edb import protocol
from edb.testbase.protocol.test import ProtocolTestCase
class TestProtocol(ProtocolTestCase):
async def test_proto_executescript_01(self):
# Test that ExecuteScript returns ErrorResponse immediately.
await self.con.connect()
await self.con.send(
protocol.ExecuteScript(
headers=[],
script='SELECT 1/0'
)
)
await self.con.recv_match(
protocol.ErrorResponse,
message='division by zero'
)
await self.con.recv_match(
protocol.ReadyForCommand,
transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION,
)
# Test that the protocol has recovered.
await self.con.send(
protocol.ExecuteScript(
headers=[],
script='SELECT 1'
)
)
await self.con.recv_match(
protocol.CommandComplete,
status='SELECT'
)
await self.con.recv_match(
protocol.ReadyForCommand,
transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION,
)
async def test_proto_executescript_02(self):
# Test ReadyForCommand.transaction_state
await self.con.connect()
await self.con.send(
protocol.ExecuteScript(
headers=[],
script='START TRANSACTION; SELECT 1/0'
)
)
await self.con.recv_match(
protocol.ErrorResponse,
message='division by zero'
)
await self.con.recv_match(
protocol.ReadyForCommand,
transaction_state=protocol.TransactionState.IN_FAILED_TRANSACTION,
)
# Test that the protocol is still in a failed transaction
await self.con.send(
protocol.ExecuteScript(
headers=[],
script='SELECT 1/0'
)
)
await self.con.recv_match(
protocol.ErrorResponse,
message='current transaction is aborted'
)
await self.con.recv_match(
protocol.ReadyForCommand,
transaction_state=protocol.TransactionState.IN_FAILED_TRANSACTION,
)
# Test recovery
await self.con.send(
protocol.ExecuteScript(
headers=[],
script='ROLLBACK'
)
)
await self.con.recv_match(
protocol.CommandComplete,
status='ROLLBACK'
)
await self.con.recv_match(
protocol.ReadyForCommand,
transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION,
)
async def test_proto_flush_01(self):
await self.con.connect()
await self.con.send(
protocol.Prepare(
headers=[],
io_format=protocol.IOFormat.BINARY,
expected_cardinality=compiler.Cardinality.AT_MOST_ONE,
statement_name=b'',
command='SEL ECT 1',
)
)
# Should come through even without an explicit 'flush'
await self.con.recv_match(
protocol.ErrorResponse,
message="Unexpected 'SEL'"
)
# Recover the protocol state from the error
self.assertEqual(
await self.con.sync(),
protocol.TransactionState.NOT_IN_TRANSACTION)
# This Prepare should be handled alright
await self.con.send(
protocol.Prepare(
headers=[],
io_format=protocol.IOFormat.BINARY,
expected_cardinality=compiler.Cardinality.AT_MOST_ONE,
statement_name=b'',
command='SELECT 1',
),
protocol.Flush()
)
await self.con.recv_match(
protocol.PrepareComplete,
cardinality=compiler.Cardinality.AT_MOST_ONE,
)
# Test that Flush has completed successfully -- the
# command should be executed and no exception should
# be received.
# While at it, rogue ROLLBACK should be allowed.
await self.con.send(
protocol.ExecuteScript(
headers=[],
script='ROLLBACK'
)
)
await self.con.recv_match(
protocol.CommandComplete,
status='ROLLBACK'
)
await self.con.recv_match(
protocol.ReadyForCommand,
transaction_state=protocol.TransactionState.NOT_IN_TRANSACTION,
)
async def test_proto_connection_lost_cancel_query(self):
# This test is occasionally hanging - adding a timeout to find out why
await asyncio.wait_for(
self._test_proto_connection_lost_cancel_query(), 30
)
async def _test_proto_connection_lost_cancel_query(self):
# Prepare the test data
con2 = await edgedb.async_connect(**self.get_connect_args())
try:
await con2.execute(
'CREATE TYPE tclcq { CREATE PROPERTY p -> str }'
)
try:
await con2.execute("INSERT tclcq { p := 'initial' }")
# Ready the nested connection
await self.con.connect()
# Use an implicit transaction in the nested connection: lock
# the row with an UPDATE, and then hold the transaction for 10
# seconds, which is long enough for the upcoming cancellation
await self.con.send(
protocol.ExecuteScript(
headers=[],
script="""\
UPDATE tclcq SET { p := 'inner' };
SELECT sys::_sleep(10);
""",
)
)
# Sanity check - we shouldn't get anything here
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(
self.con.recv_match(
protocol.CommandComplete,
status='UPDATE'
),
0.1,
)
# Close the nested connection without waiting for the result;
# the server is supposed to cancel the pending query.
await self.con.aclose()
# In the outer connection, let's wait until the lock is
# released by either an expected cancellation, or an unexpected
# commit after 10 seconds.
tx = con2.raw_transaction()
await tx.start()
try:
await tx.execute("UPDATE tclcq SET { p := 'lock' }")
except edgedb.TransactionSerializationError:
# In case the nested transaction succeeded, we'll meet an
# concurrent update error here, which can be safely ignored
pass
finally:
await tx.rollback()
# Let's check what's in the row - if the cancellation didn't
# happen, the test will fail with value "inner".
val = await con2.query_single('SELECT tclcq.p LIMIT 1')
self.assertEqual(val, 'initial')
finally:
# Clean up
await con2.execute(
"DROP TYPE tclcq"
)
finally:
await con2.aclose()
| 33.138889
| 79
| 0.554305
| 7,524
| 0.90097
| 0
| 0
| 0
| 0
| 7,457
| 0.892947
| 2,557
| 0.306191
|
58cc1d434d0ca910c890148d1eb3817d02e4f5af
| 278
|
py
|
Python
|
210125/homework_re_3.py
|
shadowsmain/pyton-adv
|
9562097b2d34c1b286c13cf0930fa06079532a67
|
[
"MIT"
] | null | null | null |
210125/homework_re_3.py
|
shadowsmain/pyton-adv
|
9562097b2d34c1b286c13cf0930fa06079532a67
|
[
"MIT"
] | null | null | null |
210125/homework_re_3.py
|
shadowsmain/pyton-adv
|
9562097b2d34c1b286c13cf0930fa06079532a67
|
[
"MIT"
] | null | null | null |
import re
RE_NUMBER_VALIDATOR = re.compile(r'^\d+[.,]\d+$')
def number_is_valid(number):
return RE_NUMBER_VALIDATOR.match(number)
assert number_is_valid('1.32')
assert number_is_valid('1,32')
assert not number_is_valid('asdasd1234')
assert not number_is_valid('22,a44')
| 21.384615
| 49
| 0.755396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.169065
|
58cc767b16ca728bd586a1ff7e220380c8ce5e1a
| 3,529
|
py
|
Python
|
shp_code/prec_reformat.py
|
anahm/inferring-population-preferences
|
1eec9c6966e65c615f3cf5bd769ab121369b926d
|
[
"Unlicense"
] | 4
|
2016-10-29T12:10:48.000Z
|
2016-11-06T02:25:09.000Z
|
shp_code/prec_reformat.py
|
anahm/inferring-population-preferences
|
1eec9c6966e65c615f3cf5bd769ab121369b926d
|
[
"Unlicense"
] | null | null | null |
shp_code/prec_reformat.py
|
anahm/inferring-population-preferences
|
1eec9c6966e65c615f3cf5bd769ab121369b926d
|
[
"Unlicense"
] | null | null | null |
"""
prec_reformat.py
Taking state data and having each line be a precinct's voting results and candidate
cf-scores (rather than each line be each candidate per precinct.
| prec_id | cf_score_0 | num_votes_0 | cf_score_1 | num_votes_1 |
"""
import math
import numpy as np
import pandas as pd
from prec_cd import prec_cd_main
from check_data import check_main
def convert_by_prec(old_df, state, year, dirname):
precs = []
years = []
cf_score_0 = []
num_votes_0 = []
cf_score_1 = []
num_votes_1 = []
# group by precinct (year assumed)
for key, group in old_df.groupby(['geoid']):
cf_iter = iter(group['cf_score'])
votes_iter = iter(group['num_votes'])
nxt_score = cf_iter.next()
if math.isnan(nxt_score):
nxt_score = 0
cf_0 = nxt_score
nv_0 = votes_iter.next()
try:
nxt_score = cf_iter.next()
if math.isnan(nxt_score):
nxt_score = 0
cf_1 = nxt_score
nv_1 = votes_iter.next()
# enforcing the idea that cfscore0 < cfscore1
precs.append(key)
if cf_1 < cf_0:
cf_score_0.append(cf_1)
num_votes_0.append(nv_1)
cf_score_1.append(cf_0)
num_votes_1.append(nv_0)
else:
cf_score_0.append(cf_0)
num_votes_0.append(nv_0)
cf_score_1.append(cf_1)
num_votes_1.append(nv_1)
except StopIteration:
# get rid of
pass
# use arrays to create dataframe
new_df = pd.DataFrame({
'cf_score_0': cf_score_0,
'num_votes_0': num_votes_0,
'cf_score_1': cf_score_1,
'num_votes_1': num_votes_1,
'geoid': precs},
index=None)
new_df['tot_votes'] = new_df['num_votes_0'] + new_df['num_votes_1']
new_df['midpoint'] = (new_df['cf_score_0'] + new_df['cf_score_1']) / 2.0
# write new dataframe out to csv
outfile = '%s/precline_%s_house_%s.csv' % (dirname, state, year)
new_df.to_csv(outfile)
return outfile
"""
data_clean()
Function to parse out certain types of data that are not useful in our
results.
# NOTE: overwrites the old file, since it is unnecessary
"""
def data_clean(precline_file):
df = pd.read_csv(precline_file, index_col = 0)
# remove all precincts with tot_votes == 0
df = df[df['tot_votes'] > 0]
# remove all uncontested candidates (cf_score_1 == 0)
df = df[df['cf_score_1'] != 0]
df.to_csv(precline_file, index=False)
"""
prec_reformat_main()
Function that does the bulk of the original main function and can be called
by the commandline.
@param: state, year
@return: location of new precline file
"""
def prec_reformat_main(state, year):
prec_cd_main(state, year)
csv_dir = '../data/%s_data/%s_%s' % (state, state, year)
infile = '%s/%s_house_%s_final.csv' % (csv_dir, state, year)
outfile = '%s/precline_%s_house_%s.csv' % (csv_dir, state, year)
# read in file
old_df = pd.read_csv(infile)
convert_by_prec(old_df, state, year, csv_dir)
data_clean(outfile)
print 'Precinct data written to: %s' % outfile
rep_col = 't_USH_R_%s' % year
dem_col = 't_USH_D_%s' % year
check_main(outfile, state, year, rep_col, dem_col)
def main():
state = raw_input('State: ')
year = raw_input('Year: ')
prec_reformat_main(state, year)
if __name__ == "__main__":
main()
| 26.140741
| 83
| 0.616322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,244
| 0.352508
|
58ce3480a9b43387f9f12525806c69631b6a2afa
| 1,668
|
py
|
Python
|
scripts/make_fasta.py
|
orionzhou/snk-rnaseq
|
5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a
|
[
"MIT"
] | null | null | null |
scripts/make_fasta.py
|
orionzhou/snk-rnaseq
|
5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a
|
[
"MIT"
] | null | null | null |
scripts/make_fasta.py
|
orionzhou/snk-rnaseq
|
5ead8aebf5ed00a2aec15363b8023c9b75b0ed4a
|
[
"MIT"
] | null | null | null |
from snakemake import shell
input, output, params, threads, w, config = snakemake.input, snakemake.output, snakemake.params, snakemake.threads, snakemake.wildcards, snakemake.config
genome = w.genome
params.hybrid = config['x'][genome]['hybrid']
opt = params.opt
shell("""
rm -rf {output.fna}* {output.fai}*
rm -rf {output.chrom_bed} {output.chrom_size} {output.gap}
mkdir -p {params.wdir}/{params.odir}
cd {params.wdir}/{params.odir}
rm -rf raw.fna.* renamed* map* raw.sizes
""")
merge_tag = '--merge_short' if w.genome != 'Mt_R108' else ''
if params.hybrid:
shell("""
cat {input} > {params.wdir}/{params.odir}/renamed.fna
cd {params.wdir}/{params.odir}
fasta.py size renamed.fna > renamed.sizes
touch mapf.chain mapb.chain
""")
else:
params.gap = int(config['x'][genome]['gap'])
params.prefix = config['x'][genome]['prefix']
shell("""
cd {params.wdir}/{params.odir}
ln -sf ../download/raw.fna raw.fna
fasta.py size raw.fna > raw.sizes
fasta.py rename raw.fna renamed.fna mapf.bed mapb.bed \
--opt {params.opt} {merge_tag} \
--gap {params.gap} --prefix_chr {params.prefix}
fasta.py size renamed.fna > renamed.sizes
chain.py fromBed mapf.bed raw.sizes renamed.sizes > mapf.chain
chainSwap mapf.chain mapb.chain
""")
shell("""
cd {params.wdir}
ln -sf {params.odir}/renamed.fna 10_genome.fna
cd ..
samtools faidx {output.fna}
fasta.py size --bed {output.fna} > {output.chrom_bed}
cut -f1,3 {output.chrom_bed} > {output.chrom_size}
fasta.py gaps {output.fna} > {output.gap}
""")
| 32.076923
| 153
| 0.631894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,233
| 0.739209
|
58ceaafc2d2819124d87eef37b783e73dcf0c336
| 2,358
|
py
|
Python
|
HanderCode/aidaiwangApp/aidaiwangApp/Login_aidaiwangApp.py
|
mocne/PycharmProjects
|
b009e530f4f01e5b1826bbe2364d86b65bcd66e3
|
[
"MIT"
] | null | null | null |
HanderCode/aidaiwangApp/aidaiwangApp/Login_aidaiwangApp.py
|
mocne/PycharmProjects
|
b009e530f4f01e5b1826bbe2364d86b65bcd66e3
|
[
"MIT"
] | null | null | null |
HanderCode/aidaiwangApp/aidaiwangApp/Login_aidaiwangApp.py
|
mocne/PycharmProjects
|
b009e530f4f01e5b1826bbe2364d86b65bcd66e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import time
import xlrd
import Register_aidaiwangApp
import LogOut_aidiawangApp
def start_to_login(filename):
print(u'login')
driver = Register_aidaiwangApp.driver
driver.launch_app()
time.sleep(3)
try:
driver.find_element_by_id('cn.phaidai.loan:id/rb_mine')
print('id')
except:
try:
driver.find_element_by_android_uiautomator('new UiSelector().text(u"ๆ็")')
except:
return 'can not jump to mine'
else:
driver.find_element_by_android_uiautomator('new UiSelector().text(u"ๆ็")').click()
print('text')
else:
driver.find_element_by_id('cn.phaidai.loan:id/rb_mine').click()
try:
driver.find_element_by_id('cn.phaidai.loan:id/tv_click')
except:
try:
driver.find_element_by_id('cn.phaidai.loan:id/iv_avatar')
except:
return 'can not check status'
else:
driver.find_element_by_id('cn.phaidai.loan:id/iv_avatar').click()
else:
usernameLabel = driver.find_element_by_id('cn.phaidai.loan:id/tv_click')
loginfo = usernameLabel.text
while loginfo != u'็ซๅณ็ปๅฝ':
LogOut_aidiawangApp.start_to_logout()
usernameLabel.click()
currentAC = driver.current_activity
print(currentAC)
print(filename)
userData = xlrd.open_workbook(r'%s' % filename)
print('open user file success')
userSheet = userData.sheet_by_name('login')
loginName = str(userSheet.cell_value(1, 0))
loginPassword = str(userSheet.cell_value(1, 1))
print(loginName.split('.')[0], loginPassword)
try:
userNameLabel = driver.find_element_by_id('cn.phaidai.loan:id/et_login_name')
userNameLabel.clear()
userNameLabel.send_keys(loginName.split('.')[0])
except:
return 'can not input username : %s' % loginName.split('.')[0]
driver.find_element_by_id('cn.phaidai.loan:id/et_login_password').send_keys(loginPassword)
driver.find_element_by_id('cn.phaidai.loan:id/bt_login_into').click()
try:
driver.find_element_by_android_uiautomator('new UiSelector().text(u"้ฆ้กต")').click()
except:
driver.find_element_by_id('cn.phaidai.loan:id/rb_home').click()
| 33.685714
| 98
| 0.636556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 584
| 0.245585
|
58cf4798257428d881df3e8a39b178b627d7c8dd
| 389
|
py
|
Python
|
Python/Learning/Language/arg_ex.py
|
prynix/learning-programming
|
85aea40a61fb824a2b4e142331d9ac7971fef263
|
[
"MIT"
] | 2
|
2017-03-14T16:02:08.000Z
|
2017-05-02T13:48:18.000Z
|
Python/Learning/Language/arg_ex.py
|
CajetanP/learning-programming
|
85aea40a61fb824a2b4e142331d9ac7971fef263
|
[
"MIT"
] | 4
|
2021-05-20T21:10:13.000Z
|
2022-02-26T09:50:19.000Z
|
Python/Learning/Language/arg_ex.py
|
CajetanP/learning-programming
|
85aea40a61fb824a2b4e142331d9ac7971fef263
|
[
"MIT"
] | 1
|
2021-06-18T01:31:24.000Z
|
2021-06-18T01:31:24.000Z
|
from sys import argv
script, user_name = argv
prompt = '>'
print(user_name, script)
print("Do you like me " + user_name + "?")
likes = input(prompt)
print("Where do you live " + user_name + "?")
lives = input(prompt)
print("""
So you said {:s} about liking me.
You live in {:s}.
""".format(likes, lives))
print("Script: ", script)
age = int(input("Age? "))
print("Age*2: ", age*2)
| 16.208333
| 45
| 0.62982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.336761
|
58cf698a07fcbc7df1f0d9ad44c0aa4f953a79ab
| 2,565
|
py
|
Python
|
tests/functional/test_uploads.py
|
jounile/nollanet
|
7bea20934d3f5e09658a9d31c3b05c15416398a0
|
[
"MIT"
] | 3
|
2019-10-13T08:37:13.000Z
|
2020-02-16T12:24:11.000Z
|
tests/functional/test_uploads.py
|
jounile/nollanet
|
7bea20934d3f5e09658a9d31c3b05c15416398a0
|
[
"MIT"
] | 5
|
2019-11-13T15:56:52.000Z
|
2021-04-30T20:58:19.000Z
|
tests/functional/test_uploads.py
|
jounile/nollanet
|
7bea20934d3f5e09658a9d31c3b05c15416398a0
|
[
"MIT"
] | 1
|
2020-04-08T21:09:52.000Z
|
2020-04-08T21:09:52.000Z
|
import io
import pytest
from requests import get
from urllib.parse import urljoin
def test_my_uploads_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/my/uploads' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/my/uploads'))
assert response.status_code == 200
assert '<h1>My uploads</h1>' in response.text
def test_valid_new_upload_page(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newupload' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newupload'))
assert response.status_code == 200
assert '<h1>New upload</h1>' in response.text
def test_invalid_new_upload_page(wait_for_api):
"""
GIVEN a user has not logged in
WHEN the '/media/newupload' page is navigated to (GET)
THEN check the response is valid and page title is correct
"""
request_session, api_url = wait_for_api
response = request_session.get(urljoin(api_url, '/media/newupload'))
assert response.status_code == 200
assert '<div class="flash">Please login first</div>' in response.text
def test_new_upload(wait_for_api, login_user):
"""
GIVEN a user has logged in (login_user)
WHEN the '/media/newupload' page is posted an example image (POST)
THEN check the response is valid and the page title is correct
"""
example_file=open("./app/static/gfx/example.png","rb")
files = { 'file': example_file }
request_session, api_url = wait_for_api
response = request_session.post(urljoin(api_url, '/media/newupload'), files=files, allow_redirects=True)
assert response.status_code == 200
assert '<h1>My uploads</h1>' in response.text
#def test_remove_upload(wait_for_api, login_user):
# """
# GIVEN a user has logged in (login_user)
# WHEN the '/blob/delete' page is posted (POST)
# THEN check the response is valid and the user is logged in
# """
# valid_blob = dict(blob_path='images/*example.png', upload_id=2)
# request_session, api_url = wait_for_api
# response = request_session.post(urljoin(api_url, '/blob/delete'), data=valid_blob, allow_redirects=True)
# assert response.status_code == 200
# assert 'example.png was deleted successfully' in response.text
| 40.078125
| 109
| 0.71384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,477
| 0.575828
|
58cfe77be2b1a529ec5b49496f3549cf64c84e22
| 1,107
|
py
|
Python
|
plugins/python/test/testCustomEntity.py
|
shotgunsoftware/cplusplus-api
|
576aab4ae266e37ba80da23f82fe9ed08b9894e4
|
[
"BSD-3-Clause"
] | 3
|
2015-04-04T03:08:52.000Z
|
2021-01-09T00:09:25.000Z
|
plugins/python/test/testCustomEntity.py
|
shotgunsoftware/cplusplus-api
|
576aab4ae266e37ba80da23f82fe9ed08b9894e4
|
[
"BSD-3-Clause"
] | null | null | null |
plugins/python/test/testCustomEntity.py
|
shotgunsoftware/cplusplus-api
|
576aab4ae266e37ba80da23f82fe9ed08b9894e4
|
[
"BSD-3-Clause"
] | 4
|
2015-04-04T03:08:57.000Z
|
2021-10-03T14:59:23.000Z
|
#!/usr/bin/env python
import sys
from shotgun import *
try:
if len(sys.argv) > 1:
sg = Shotgun(sys.argv[1])
else:
sg = Shotgun()
#################################################################
# Find CustomEntity01 entities
#################################################################
print "*" * 40, "findEntities - CustomEntity01", "*" * 40
for entity in sg.findEntities("CustomEntity01", FilterBy(), 5):
#print entity
#print "-" * 40
print "%s : %s" % (entity.sgProjectCode(), entity.getAttrValue("code"))
#################################################################
# Find CustomEntity02 entities
#################################################################
print "*" * 40, "findEntities - CustomEntity02", "*" * 40
for entity in sg.findEntities("CustomEntity02", FilterBy(), 5):
#print entity
#print "-" * 40
print "%s : %s" % (entity.sgProjectCode(), entity.getAttrValue("code"))
except SgError, e:
print "SgError:", e
except Exception, e:
print "Error:", e
| 31.628571
| 79
| 0.443541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 551
| 0.497742
|
58d09df8656249313c3aee99521da7185757d936
| 24,955
|
py
|
Python
|
word2vec_np/utils/data.py
|
mkserge/word2vec-np
|
6b53b7e3defd7c8333dcb9d9098e16502a9ce20f
|
[
"MIT"
] | 1
|
2021-11-22T11:30:10.000Z
|
2021-11-22T11:30:10.000Z
|
word2vec_np/utils/data.py
|
mkserge/word2vec-np
|
6b53b7e3defd7c8333dcb9d9098e16502a9ce20f
|
[
"MIT"
] | null | null | null |
word2vec_np/utils/data.py
|
mkserge/word2vec-np
|
6b53b7e3defd7c8333dcb9d9098e16502a9ce20f
|
[
"MIT"
] | null | null | null |
import word2vec_np.utils.checks as checks
import collections
import numpy as np
import math
import time
import logging
def get_sentences_from_file(train_file):
""" Returns a list of sentences from an input file.
Args:
train_file: A path to a file
Returns:
A list of sentences as they appear in the input.
"""
# Read the sentences from the input file (assumed to be a sentence per line)
sentences = [line.rstrip('\n') for line in open(train_file)]
return sentences
def get_words_from_file(train_file):
""" Returns a list of words from input sentences.
Args:
train_file: A path to a file
Returns:
A list of words as they appear in the input.
"""
words = []
sentences = get_sentences_from_file(train_file)
for sentence in sentences:
sentence_words = sentence.split()
words.extend(sentence_words)
return words
def get_words_from_sentences(sentences):
""" Returns a list of words from a list of sentences.
Args:
sentences: A list of sentences
Returns:
A list of words as they appear in the input.
"""
words = []
for sentence in sentences:
sentence_words = sentence.split()
words.extend(sentence_words)
return words
def get_indexed_sentences(sentences, dictionaries, downsample=True):
logger = logging.getLogger('main')
logger.info('Indexing input sentences...')
start_time = time.time()
num_words = 0
indexed_sentences = []
dictionary = dictionaries['dictionary']
keep_probs = dictionaries['keep_probs']
for sentence in sentences:
indexed_sentence = []
sentence_words = sentence.split()
for word in sentence_words:
word_ind = dictionary.get(word, 1)
# 'UNK' tokens are always removed as we don't train on them
if word_ind == 1:
continue
if downsample:
random_number = np.random.rand()
if keep_probs[word_ind - 2] < random_number:
continue
indexed_sentence.append(word_ind)
# Sentences consisting of a single word (or no words)
# are ignored since we cannot build training examples from them.
if len(indexed_sentence) > 1:
indexed_sentences.append(indexed_sentence)
num_words += len(indexed_sentence)
elapsed_time = time.time() - start_time
logger.info('Finished indexing input sentences in %d seconds' % elapsed_time)
return indexed_sentences, num_words
def save_word_counts(word_count, dict_file):
""" Saves the dictionary into a file.
The word_count and dictionary have the same ordering
except that dictionary has extra 'PAD' symbol at index 0
Args:
word_count: List of (word, count) tuples
dict_file: Path to the output file.
"""
dict_file = open(dict_file, 'w+')
for word, count in word_count:
dict_file.write(word + ' ' + str(count) + '\n')
dict_file.close()
def save_dictionary(word_count, dict_file):
"""Saves the dictionary into a file.
The word_count and dictionary have the same ordering
except that dictionary has extra 'PAD' symbol at index 0
Args:
word_count: List of (word, count) tuples
dict_file: Path to the output file.
"""
#
dict_file = open(dict_file, 'w+')
for word, _ in word_count:
dict_file.write(word + '\n')
dict_file.close()
def get_data(sentences, num_total_words, dictionaries, args):
""" Gets data ready for training.
Args:
sentences: list of training sentences
num_total_words: Total number of words in training corpus.
dictionaries: Dictionary of dictionary (urgh) and word counts.
args: Args passed to the script.
"""
logger = logging.getLogger('main')
logger.info('Building train data...')
# Get the relevant dictionaries
dictionary = dictionaries['dictionary']
word_count = dictionaries['word_count']
# If we want to use word2vec's dictionary swap here.
# This is for debugging only, to compare with embeddings
# generated from original word2vec.
if args.use_w2v_weights:
dictionary_w2v, word_count_w2v = get_w2v_dictionaries(num_total_words, args)
# Do some sanity checks
checks.check_word_counts(word_count, word_count_w2v)
checks.check_dictionaries(dictionary, dictionary_w2v)
# Swap the dictionaries
dictionary = dictionary_w2v
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
word_count = word_count_w2v
# See if we want to load pre-generated data instead of building it.
if args.load_data:
return np.load(args.x_file + '.npy'), np.load(args.y_file + '.npy'), np.load(args.yneg_file + '.npy')
# Get the probabilities of keeping the words during downsampling
keep_prob = get_keep_probs(word_count, num_total_words, args.ds_param)
# Dump the dictionary into a file.
save_word_counts(word_count, args.dict_file)
# Get the training data. This returns a list of ([context], target, [negative samples]) tuples.
train_data = get_train_data_with_sentence_downsampling(sentences, dictionaries, args)
# Break training data into arrays of context words, targets and negative samples.
x_train, y_train, y_neg = process_data(train_data, word_count, args)
logger.info('Finished building train data...')
# Dump the files to a file
np.save(args.x_file, x_train)
np.save(args.y_file, y_train)
np.save(args.yneg_file, y_neg)
return x_train, y_train, y_neg
def get_w2v_dictionaries(n_words, args):
# For comparison purposes, tracking w2v dictionary and word_counts here as well.
dictionary = {'PAD': 0, 'UNK': 1}
word_count = [('UNK', 0)]
n_known_words = 0
# Load the dictionary and word counts from word2vec run.
with open(args.w2v_dict_file) as vocab:
for line in vocab:
word, count = line.split()
dictionary[word] = len(dictionary)
word_count.append((word, int(count)))
n_known_words += int(count)
word_count[0] = ('UNK', n_words - n_known_words)
return dictionary, word_count
def get_dictionaries(words, args):
""" Returns a dictionary of dictionaries used in training.
Args:
words: A list of words from the training file.
args: The arguments passed on to the script.
Returns:
A dictionary of consisting of
dictionary -- dictionary mapping words to indices.
reversed_dictionary -- dictionary indices to words.
word_count -- dictionary mapping words to the number of times they occur in the corpus
keep_prob -- a list of probabilities of keeping them during down-sampling.
ns_prob -- a list of probabilities of getting sampled during NS
"""
logger = logging.getLogger('main')
logger.info('Building dictionaries...')
start_time = time.time()
# List of (word, word_count) tuples
word_count = [('UNK', 0)]
# Total number of the words in the corpus
num_total_words = len(words)
# Sort the list of words by frequency and pick the top vocab_size ones
if args.vocab_size == 0:
# noinspection PyArgumentList
# vocab_size = 0 implies we take the entire vocabulary available from the corpus
word_count.extend(collections.Counter(words).most_common())
else:
# noinspection PyArgumentList
word_count.extend(collections.Counter(words).most_common(args.vocab_size - 1))
# Build the dictionary
dictionary = dict()
dictionary['PAD'] = 0
# num_vocab_words stores the number of words in the corpus that exist in our dictionary.
num_vocab_words = 0
for word, count in word_count:
num_vocab_words += count
dictionary[word] = len(dictionary)
# Update word count list
word_count[0] = ('UNK', num_total_words - num_vocab_words)
# Get the reversed dictionary.
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
# Get the negative sampling probabilities
ns_probs = get_ns_probs(word_count, args.ns_param)
# Get the probabilities of keeping the words during downsampling
keep_probs = get_keep_probs(word_count, num_total_words, args.ds_param)
dictionaries = {'dictionary': dictionary,
'reversed_dictionary': reversed_dictionary,
'word_count': word_count,
'ns_probs': ns_probs,
'keep_probs': keep_probs}
elapsed_time = time.time() - start_time
logger.info('Finished building dictionaries in %d seconds' % elapsed_time)
return dictionaries
def downsample_sentence(sentence_in, dictionaries):
""" Downsamples the training sentences exactly as in word2vec.
* Words not in the vocabulary are omitted.
* EOS symbols are also omitted.
Args:
sentence_in: The input sentence that will be downsampled
dictionaries: List of dictionaries
Returns:
The downsampled sentence
"""
dictionary = dictionaries['dictionary']
keep_probs = dictionaries['keep_probs']
sentence_out = []
sentence_words = sentence_in.split()
for ind, word in enumerate(sentence_words):
# Ignore the UNK words
if dictionary.get(word, 1) == 1:
continue
# Ignore the EOS word
if word == 'EOS':
continue
# Sub-sample the frequent words.
random_number = np.random.rand()
if keep_probs.get(word) < random_number:
continue
sentence_out.append(word)
return ' '.join(sentence_out)
def get_train_data_with_sentence_downsampling(sentences, dictionaries, args):
""" This is the new implementation of get_train_data where the downsampling is done before building the context on
each sentence. The main differences with get_train_data_with_context_downsampling implementation are
* Downsampling is done before building context on each sentence.
* Context window size is downsized randomly for each sentence.
Args:
sentences: list of sentences in the training data
dictionaries: a list of dictionaries including
dictionary: dictionary of the vocabulary words mapping words to indices
reversed_dictionary: dictionary mapping indices to their corresponding words
word_count: a list of (word, word_count) tuples
ns_probs: dictionary of negative sampling probabilities
keep_prob: a dictionary mapping words to their probability of staying during downsampling
args: input args
Returns:
train_data: A list of (context, target, neg_samples) tuples
"""
logger = logging.getLogger('main')
train_data = []
# Get the required dictionaries
ns_probs = dictionaries['ns_probs']
dictionary = dictionaries['dictionary']
reversed_dictionary = dictionaries['reversed_dictionary']
num_processed_sentences = 0
num_total_sentences = len(sentences)
logger.info('Number of sentences: %d' % num_total_sentences)
for sentence in sentences:
# Note that the downsampled sentence will not contain 'UNK' or 'EOS' symbols.
sentence = downsample_sentence(sentence, dictionaries)
sentence_words = sentence.split()
num_processed_words = 0
num_total_words = len(sentence_words)
for ind, word in enumerate(sentence_words):
# Get the dictionary index for the given word. This is our target
# W2 matrix does not contain 'PAD' or 'UNK', so we shift the target index by two
target_ind = dictionary.get(word) - 2
# Build context for the current word in the sentence.
# Shrink context window by a random number
context_window = np.random.randint(1, args.context_window + 1)
context = []
for cont_ind in range(ind - context_window, ind + context_window + 1):
if cont_ind < 0:
continue
if cont_ind == ind:
continue
if cont_ind >= len(sentence_words):
continue
if dictionary.get(sentence_words[cont_ind], 1) == 1:
continue
context.append(dictionary.get(sentence_words[cont_ind]))
if len(context) != 0:
# If we are doing negative sampling, build a set of negative samples
neg_samples = []
if args.ns_param != 0:
# Pick neg_samples of negative samples.
while len(neg_samples) < args.num_neg_samples:
# Pick a random word from the dictionary (ignoring 'PAD', 'UNK' and 'EOS')
# according to probabilities stored in ns_prob table.
neg_ind = np.random.choice(np.arange(2, len(dictionary)), p=ns_probs)
# Ignore if the random pick is the EOS symbol, or the target index
if reversed_dictionary.get(neg_ind) == 'EOS' \
or neg_ind == target_ind \
or neg_ind in neg_samples:
continue
# W2 matrix does not contain 'PAD' or 'UNK', so we shift the dictionary by two
neg_samples.append(neg_ind - 2)
train_data.append((context, target_ind, neg_samples))
num_processed_words += 1
if num_processed_words % 1000 == 0:
logger.info('Processed words for sentence: %.3f%%' % (float(num_processed_words * 100) / num_total_words))
num_processed_sentences += 1
if num_processed_sentences % 1000 == 0:
logger.info('Processed sentences: %.3f%%' % (float(num_processed_sentences * 100) / num_total_sentences))
return train_data
def process_data(train_data, word_count, args):
# Find the size of the training examples
M = len(train_data)
# Get the dictionary size
V = len(word_count)
if args.num_neg_samples > 0:
# We are doing negative sampling
# x_train holds the entire training data, where each row represents context words for that training example.
x_train = np.zeros((M, 2 * args.context_window), dtype=np.int32)
# Each row in y_train represents the target label
y_train = np.zeros((M, 1), dtype=np.int32)
# each row in y_neg is a set of K negative examples for that training example.
y_neg = np.zeros((M, args.num_neg_samples), dtype=np.int32)
for index, (context, target, neg_samples) in enumerate(train_data):
# Fill the corresponding column of the x_train matrix
for cw_ind, cw in enumerate(context):
x_train[index, cw_ind] = cw
# Fill the corresponding column of the y_train matrix
y_train[index, 0] = target
# Fill the corresponding column of the y_neg matrix
for ind, neg_sample in enumerate(neg_samples):
y_neg[index, ind] = neg_sample
else:
# We are doing softmax
# x_train holds the entire training data, where each row represents context for one training example
x_train = np.zeros((M, V), dtype=np.float32)
# Each column in y_train represents the one-hot encoding of the target word
y_train = np.zeros((M, V), dtype=np.float32)
# each column in y_neg is a set of K negative examples for that training example.
y_neg = np.zeros((M, V), dtype=np.float32)
for index, (context, target, neg_samples) in enumerate(train_data):
# Fill the corresponding row of the x_train matrix
for cw in context:
x_train[index, cw] = 1
# Fill the corresponding row of the y_train matrix
y_train[index, target] = 1
# Fill the corresponding row of the y_neg matrix
for neg_sample in neg_samples:
y_neg[index, neg_sample] = 1
return x_train, y_train, y_neg
def get_ns_probs(word_count, ns_param):
""" Returns a list of the probabilities of picking each word as a negative sample.
List is ordered as word_count without the 'UNK' (this is not considered in any of these calculations).
:param word_count: The dictionary containing mappings from words to their count in the corpus.
:param ns_param: The negative sampling parameter used when building the probability distribution.
:return: A list of probabilities for each word.
"""
ns_probs = []
# Compute normalization constant so that probabilities add up to 1.
norm_const = 0
for word, count in word_count[1:]: # TODO: Think about this
norm_const += np.power(count, ns_param)
# Compute the probabilities for each word.
for word, count in word_count[1:]: # <- Skip 'UNK'
word_prob = np.power(count, ns_param) / norm_const
ns_probs.append(word_prob)
return ns_probs
def get_keep_probs(word_count, num_total_words, ds_param):
""" Returns a list of probabilities of keeping the corresponding words during downsampling
:param word_count: A list containing tuples of (word, word_count)
:param num_total_words: Total number of words in the corpus
:param ds_param: The downsampling parameter, used in the distribution
:return: A dictionary mapping words to their probabilities
"""
# Build the probabilities of keeping the words when downsampling
keep_prob = []
for word, count in word_count[1:]: # <- Ignore 'UNK'
# Compute the fraction of the words in the vocabulary that are the current word.
word_frac = float(count) / num_total_words
# Compute the probability of keeping the current word.
word_prob = (np.sqrt(word_frac / ds_param) + 1) * ds_param / word_frac
keep_prob.append(word_prob)
return keep_prob
def get_mini_batches(X, Y, YNEG, batch_size=64, shuffled=True):
"""Split the data into minibatches of batch_size
:param X: array containing the context words at each row
:param Y: array containing the target word at each row
:param YNEG: array containing the negative samples at each row
:param batch_size: size of the mini-batch
:param shuffled: If true, training examples will be shuffled before building mini-batches
:return: a list of mini-batches.
"""
logger = logging.getLogger('main')
logger.info('Processing into mini-batches...')
mini_batches = []
# Get the total number of training examples
n_training_examples = X.shape[0]
# If shuffled=True, shuffle X and Y
if shuffled:
permutation = list(np.random.permutation(n_training_examples))
X = X[permutation, :]
Y = Y[permutation, :]
YNEG = YNEG[permutation, :]
num_full_batches = int(math.floor(n_training_examples / batch_size))
for k in range(0, num_full_batches):
mini_batch_X = X[k * batch_size: (k + 1) * batch_size, :]
mini_batch_Y = Y[k * batch_size: (k + 1) * batch_size, :]
mini_batch_YNEG = YNEG[k * batch_size: (k + 1) * batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_YNEG)
mini_batches.append(mini_batch)
if n_training_examples % batch_size != 0:
mini_batch_X = X[num_full_batches * batch_size:, :]
mini_batch_Y = Y[num_full_batches * batch_size:, :]
mini_batch_YNEG = YNEG[num_full_batches * batch_size:, :]
mini_batch = (mini_batch_X, mini_batch_Y, mini_batch_YNEG)
mini_batches.append(mini_batch)
logger.info('Finished processing mini-batches.')
return mini_batches
def get_negative_samples(target, num_neg_samples, dictionaries):
neg_samples = []
# Get the required dictionaries
dictionary = dictionaries['dictionary']
reversed_dictionary = dictionaries['reversed_dictionary']
ns_probs = dictionaries['ns_probs']
# Pick negative samples.
# * We do not want to pick 'PAD' or 'UNK' as negative samples from the dictionary.
# * W2 matrix does not contain 'PAD' or 'UNK' symbols (which is where we get our
# negative embeddings from), so our samples are shifted from the dictionary by two.
samples = np.arange(len(dictionary) - 2)
# Pick num_neg_samples of negative samples.
while len(neg_samples) < num_neg_samples:
# Pick a random word from the samples according
# to probabilities stored in ns_prob table.
neg_ind = np.random.choice(samples, p=ns_probs)
# Ignore if the random pick is the target index or has already been picked
# TODO: This is actually not strictly necessary.
if neg_ind == target or neg_ind in neg_samples:
continue
neg_samples.append(neg_ind)
# Alternatively, if we don't care about having target
# in negative samples we could do something like this:
# neg_samples = np.random.choice(samples, size=num_neg_samples, replace=False, p=ns_probs)
return neg_samples
sentence_index = 0
word_index = 0
def get_training_example(sentences, dictionaries, args):
""" Generates a single training example from the input sentences sequentially
(a.k.a. we keep track of positioning on the sentence and the target word)
:param sentences: A list of sentences, where each sentence is a list of word indices
:param dictionaries: The dictionaries built from corpus
:param args: Scripts arguments
:return: A tuple of ([context], target, [negative samples])
"""
logger = logging.getLogger('main')
global sentence_index
global word_index
current_sentence = sentences[sentence_index]
target = current_sentence[word_index] - 2
# Shrink context window by random amount
context_window = np.random.randint(1, args.context_window + 1)
context = []
low = max(word_index - context_window, 0)
high = min(word_index + context_window + 1, len(current_sentence))
for cont_ind in range(low, high):
# Target word cannot be part of context
if cont_ind == word_index:
continue
# Do not use 'UNK' words as context
# TODO: Remove this check if downsampling is applied
# if current_sentence[cont_ind] == 1:
# continue
context.append(current_sentence[cont_ind])
# Pad context with zeros
while len(context) < 2 * args.context_window:
context.append(0)
neg_samples = get_negative_samples(target, args.num_neg_samples, dictionaries)
# Advance the word_index to the next word
word_index += 1
# If we reached the end of the sentence, advance to next sentence and reset word index
if word_index >= len(current_sentence):
sentence_index += 1
word_index = 0
# If we reached the end of the sentences, reset sentence_index back to the first one
if sentence_index >= len(sentences):
sentence_index = 0
logger.info('Epoch completed.')
return context, target, neg_samples
def get_training_batch(sentences, dictionaries, args):
# Each row in x_train represent a context vector for a single training example
x_train = np.zeros(shape=(args.batch_size, 2 * args.context_window), dtype=np.int32)
# Each row in y_train represents the target label
y_train = np.zeros(shape=(args.batch_size, 1), dtype=np.int32)
# each row in y_neg is a set of K negative examples for that training example.
y_neg = np.zeros(shape=(args.batch_size, args.num_neg_samples), dtype=np.int32)
for i in range(args.batch_size):
context, target, neg_samples = get_training_example(sentences, dictionaries, args)
x_train[i, :] = context
y_train[i, :] = target
y_neg[i, :] = neg_samples
return x_train, y_train, y_neg
| 40.909836
| 123
| 0.640232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11,317
| 0.453496
|
58d14d425be795bfa4409700edc4323d29494ae2
| 307
|
py
|
Python
|
nicegui/elements/row.py
|
florianwittkamp/nicegui
|
4c054f4e5b82e4ac56db93b73d5fb5ffcd480d06
|
[
"MIT"
] | 30
|
2021-06-16T15:46:45.000Z
|
2022-03-27T03:14:18.000Z
|
nicegui/elements/row.py
|
florianwittkamp/nicegui
|
4c054f4e5b82e4ac56db93b73d5fb5ffcd480d06
|
[
"MIT"
] | 11
|
2021-05-24T17:05:22.000Z
|
2022-02-19T07:13:18.000Z
|
nicegui/elements/row.py
|
florianwittkamp/nicegui
|
4c054f4e5b82e4ac56db93b73d5fb5ffcd480d06
|
[
"MIT"
] | 7
|
2021-07-22T05:51:04.000Z
|
2022-01-31T19:39:37.000Z
|
import justpy as jp
from .group import Group
class Row(Group):
def __init__(self):
'''Row Element
Provides a container which arranges its child in a row.
'''
view = jp.QDiv(classes='row items-start', style='gap: 1em', delete_flag=False)
super().__init__(view)
| 23.615385
| 86
| 0.628664
| 260
| 0.846906
| 0
| 0
| 0
| 0
| 0
| 0
| 118
| 0.384365
|
58d1b1562239fddc199cba78a4c7fd5ac432e0af
| 102
|
py
|
Python
|
src/mtvs/__init__.py
|
digsim/mtvs
|
d89d12d4cd65eafe732226e588a54874123db7f4
|
[
"Apache-2.0"
] | 2
|
2017-11-19T05:51:31.000Z
|
2020-01-22T08:12:53.000Z
|
src/mtvs/__init__.py
|
digsim/mtvs
|
d89d12d4cd65eafe732226e588a54874123db7f4
|
[
"Apache-2.0"
] | 3
|
2015-12-03T00:34:46.000Z
|
2016-01-04T15:49:14.000Z
|
src/mtvs/__init__.py
|
digsim/missingTvShows
|
f17660dc965c7a6eef1b0cfad9577d62087cba56
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pkg_resources
__version__ = pkg_resources.require("mtvs")[0].version
| 17
| 54
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.284314
|
58d37fec96977e11aa6010c2276ce5434c4fc6f8
| 452
|
py
|
Python
|
tests/guinea-pigs/unittest/expected_failure.py
|
Tirzono/teamcity-messages
|
e7f7334e2956a9e707222e4c83de9ffeb15b8ac0
|
[
"Apache-2.0"
] | 105
|
2015-06-24T15:40:41.000Z
|
2022-02-04T10:30:34.000Z
|
tests/guinea-pigs/unittest/expected_failure.py
|
Tirzono/teamcity-messages
|
e7f7334e2956a9e707222e4c83de9ffeb15b8ac0
|
[
"Apache-2.0"
] | 145
|
2015-06-24T15:26:28.000Z
|
2022-03-22T20:04:19.000Z
|
tests/guinea-pigs/unittest/expected_failure.py
|
Tirzono/teamcity-messages
|
e7f7334e2956a9e707222e4c83de9ffeb15b8ac0
|
[
"Apache-2.0"
] | 76
|
2015-07-20T08:18:21.000Z
|
2022-03-18T20:03:53.000Z
|
# coding=utf-8
import sys
from teamcity.unittestpy import TeamcityTestRunner
if sys.version_info < (2, 7):
from unittest2 import main, TestCase, expectedFailure
else:
from unittest import main, TestCase, expectedFailure
class TestSkip(TestCase):
def test_expected_failure(self):
self.fail("this should happen unfortunately")
test_expected_failure = expectedFailure(test_expected_failure)
main(testRunner=TeamcityTestRunner)
| 26.588235
| 66
| 0.783186
| 183
| 0.404867
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.106195
|
58d426717d52de8aec95bf782518c3dd7fa7dafe
| 877
|
py
|
Python
|
main.py
|
GabrielIFPB/wishlist-fastapi
|
1d39bf4c65dcc4491d0836487a218e67dbb07a80
|
[
"MIT"
] | null | null | null |
main.py
|
GabrielIFPB/wishlist-fastapi
|
1d39bf4c65dcc4491d0836487a218e67dbb07a80
|
[
"MIT"
] | null | null | null |
main.py
|
GabrielIFPB/wishlist-fastapi
|
1d39bf4c65dcc4491d0836487a218e67dbb07a80
|
[
"MIT"
] | null | null | null |
import uvicorn
from fastapi import FastAPI
from database import Base, engine
from routers.user import router as router_user
from routers.product import router as router_product
from routers.authentication import router as router_auth
app = FastAPI(
title="Wish List",
description="Permita que seus clientes acompanhem seus produtos favoritos, adicionando-os a uma lista de desejos.",
version="1.0.0",
)
Base.metadata.create_all(engine)
@app.get('/')
def index():
"""
"welcome": "Wish List",
"documentation": "127.0.0.1:8000/docs ou 127.0.0.1:8000/redoc"
"""
return {
"welcome": "Wish List",
"documentation": "127.0.0.1:8000/docs ou 127.0.0.1:8000/redoc"
}
app.include_router(router_auth)
app.include_router(router_product)
app.include_router(router_user)
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8000, reload=True)
| 21.925
| 116
| 0.735462
| 0
| 0
| 0
| 0
| 233
| 0.265678
| 0
| 0
| 332
| 0.378563
|
58d441702771292f5be7e698cfa7a42a16e08886
| 1,605
|
py
|
Python
|
libs/data_layers/transform.py
|
lsc25846/Wildlife-Recognition-System
|
81d8afdf4a50bc94bf5e1952bfce1b5a9c4c6bd2
|
[
"MIT"
] | null | null | null |
libs/data_layers/transform.py
|
lsc25846/Wildlife-Recognition-System
|
81d8afdf4a50bc94bf5e1952bfce1b5a9c4c6bd2
|
[
"MIT"
] | null | null | null |
libs/data_layers/transform.py
|
lsc25846/Wildlife-Recognition-System
|
81d8afdf4a50bc94bf5e1952bfce1b5a9c4c6bd2
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import torch
import cv2
import numpy as np
import pdb
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (tensor) [batch, num_gt, 5]
batch of annotations stacked on their 0 dim
annotations for a given image are stacked on 1 dim
"""
targets = []
imgs = []
# numpy array
num_gts = [sample[1].shape[0] for sample in batch]
max_num_gt = max(num_gts)
for sample in batch:
imgs.append(sample[0])
size_gt = sample[1].shape
num_gt = size_gt[0]
aug_size = list(size_gt[:])
aug_size[0] = max_num_gt
aug_gt = np.zeros(aug_size, dtype=sample[1].dtype)
aug_gt[:num_gt] = sample[1]
targets.append(torch.FloatTensor(aug_gt))
return torch.stack(imgs, 0), torch.stack(targets, 0)
def base_transform(image, size, mean):
x = cv2.resize(image, (size, size)).astype(np.float32)
x -= mean
x = x.astype(np.float32)
return x
class BaseTransform:
"""
For evaluation and testing.
"""
def __init__(self, size, mean):
self.size = size
self.mean = np.array(mean, dtype=np.float32)
def __call__(self, image, boxes=None, labels=None):
return base_transform(image, self.size, self.mean), boxes, labels
| 29.181818
| 81
| 0.633022
| 313
| 0.195016
| 0
| 0
| 0
| 0
| 0
| 0
| 575
| 0.358255
|
58d6ba044f8814b989985c1b13e416f82125fe24
| 2,273
|
py
|
Python
|
rllab/torch/algos/base.py
|
NeurIPSPaperSubmission7934/code_submission
|
713fce673e8e3ba30b559d4eebe6d3e4891069ed
|
[
"Apache-2.0"
] | null | null | null |
rllab/torch/algos/base.py
|
NeurIPSPaperSubmission7934/code_submission
|
713fce673e8e3ba30b559d4eebe6d3e4891069ed
|
[
"Apache-2.0"
] | null | null | null |
rllab/torch/algos/base.py
|
NeurIPSPaperSubmission7934/code_submission
|
713fce673e8e3ba30b559d4eebe6d3e4891069ed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 Copyright holder of the paper Generative Adversarial Model Learning
# submitted to NeurIPS 2019 for review
# All rights reserved.
import numpy as np
import torch
class Optimizer(object):
def __init__(self, policy, use_gpu=False):
self.networks = self._init_networks(policy.input_dim, policy.output_dim)
networks = self.networks.copy()
networks['policy'] = policy
self.optimizers = self._init_optimizers(networks)
self.use_gpu = use_gpu
if self.use_gpu:
self.networks = {k: v.cuda() for k, v in self.networks.items()}
@classmethod
def _init_networks(cls, obs_dim, action_dim):
raise NotImplementedError
def process_batch(self, policy, batch, update_policy_args):
states, actions, rewards, masks = unpack_batch(batch)
if self.use_gpu:
states, actions, rewards, masks = map(
lambda x: x.cuda(), [states, actions, rewards, masks])
policy = self.update_networks(
policy, actions, masks, rewards, states,
batch["num_episodes"], *update_policy_args)
return policy
def update_networks(self, policy,
actions, masks, rewards, states, num_episodes,
*args, **step_kwargs):
raise NotImplementedError
@staticmethod
def _init_optimizers(networks, lr_rates=None):
return init_optimizers(networks, lr_rates=lr_rates)
def init_optimizers(networks, lr_rates=None):
args = {key: [network] for key, network in networks.items()}
if lr_rates is not None:
for key in args.keys():
args[key].append(lr_rates[key])
optimizers = {key: init_optimizer(*args[key])
for key in networks.keys()}
return optimizers
def unpack_batch(batch):
states = torch.from_numpy(np.array(batch["states"], dtype=np.float32))
rewards = torch.from_numpy(np.array(batch["rewards"], dtype=np.float32))
masks = torch.from_numpy(np.array(batch["masks"], dtype=np.float32))
actions = torch.from_numpy(np.array(batch["actions"]))
return states, actions, rewards, masks
def init_optimizer(network, lr_rate=0.01):
return torch.optim.Adam(network.parameters(), lr=lr_rate)
| 33.925373
| 88
| 0.661681
| 1,287
| 0.566212
| 0
| 0
| 220
| 0.096788
| 0
| 0
| 203
| 0.089309
|
58d6f86c63a774052533d60d46858c6d938085a4
| 915
|
py
|
Python
|
lab-4.py
|
PavelKovalets/python-reviewer-test
|
a4d489482f596570abd5d34677f7549e1b724c8e
|
[
"MIT"
] | null | null | null |
lab-4.py
|
PavelKovalets/python-reviewer-test
|
a4d489482f596570abd5d34677f7549e1b724c8e
|
[
"MIT"
] | null | null | null |
lab-4.py
|
PavelKovalets/python-reviewer-test
|
a4d489482f596570abd5d34677f7549e1b724c8e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import boto3
source_ddb = boto3.resource('dynamodb', 'us-east-1')
dest_ddb = boto3.client('dynamodb', 'us-west-2')
def sync(source_ddb, dest_ddb):
table = source_ddb.Table("CodeGuru-MusicCollection")
scan_kwargs = {
'ProjectionExpression': "Artist, SongTitle"
}
done = False
start_key = None
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = table.scan(**scan_kwargs)
for item in response['Items']:
newItem = { 'Artist': {}, 'SongTitle': {} }
newItem['Artist']['S'] = item['Artist']
newItem['SongTitle']['S'] = item['SongTitle']
dest_ddb.put_item(TableName="CodeGuru-MusicCollection", Item=newItem)
print(item)
start_key = response.get('LastEvaluatedKey', None)
done = start_key is None
sync(source_ddb, dest_ddb)
| 33.888889
| 81
| 0.619672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.284153
|
58d7f0d0c28563ede738e4f0ad2b23de85fe0aac
| 2,492
|
py
|
Python
|
core/reports/views.py
|
jilbertozamorasaa/panda-bigmon-core
|
4e38411489e9ca538a000df43aed5280a72b4342
|
[
"Apache-2.0"
] | 3
|
2017-07-19T18:29:23.000Z
|
2021-11-18T04:57:18.000Z
|
core/reports/views.py
|
jilbertozamorasaa/panda-bigmon-core
|
4e38411489e9ca538a000df43aed5280a72b4342
|
[
"Apache-2.0"
] | 3
|
2021-09-01T15:20:21.000Z
|
2022-03-12T01:02:51.000Z
|
core/reports/views.py
|
jilbertozamorasaa/panda-bigmon-core
|
4e38411489e9ca538a000df43aed5280a72b4342
|
[
"Apache-2.0"
] | 9
|
2015-11-17T15:52:23.000Z
|
2021-09-07T12:23:16.000Z
|
"""
"""
import json
from django.views.decorators.cache import never_cache
from django.http import HttpResponse
from django.shortcuts import render_to_response
from core.views import initRequest, DateEncoder
from core.reports import MC16aCPReport, ObsoletedTasksReport, TitanProgressReport
@never_cache
def report(request):
initRequest(request)
step = 0
response = None
if 'requestParams' in request.session and 'campaign' in request.session['requestParams'] and request.session['requestParams']['campaign'].upper() == 'MC16':
reportGen = MC16aCPReport.MC16aCPReport()
response = reportGen.prepareReportJEDI(request)
return response
if 'requestParams' in request.session and 'campaign' in request.session['requestParams'] and request.session['requestParams']['campaign'].upper() == 'MC16C':
reportGen = MC16aCPReport.MC16aCPReport()
response = reportGen.prepareReportJEDIMC16c(request)
return response
if 'requestParams' in request.session and 'campaign' in request.session['requestParams'] and request.session['requestParams']['campaign'].upper() == 'MC16A' and 'type' in request.session['requestParams'] and request.session['requestParams']['type'].upper() == 'DCC':
reportGen = MC16aCPReport.MC16aCPReport()
resp = reportGen.getDKBEventsSummaryRequestedBreakDownHashTag(request)
dump = json.dumps(resp, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
if 'requestParams' in request.session and 'obstasks' in request.session['requestParams']:
reportGen = ObsoletedTasksReport.ObsoletedTasksReport()
response = reportGen.prepareReport(request)
return response
if 'requestParams' in request.session and 'titanreport' in request.session['requestParams']:
reportGen = TitanProgressReport.TitanProgressReport()
response = reportGen.prepareReport(request)
return response
if 'requestParams' in request.session and 'step' in request.session['requestParams']:
step = int(request.session['requestParams']['step'])
if step == 0:
response = render_to_response('reportWizard.html', {'nevents': 0}, content_type='text/html')
else:
if 'reporttype' in request.session['requestParams'] and request.session['requestParams']['reporttype'] == 'rep0':
reportGen = MC16aCPReport.MC16aCPReport()
response = reportGen.prepareReport()
return response
| 43.719298
| 270
| 0.72191
| 0
| 0
| 0
| 0
| 2,197
| 0.881621
| 0
| 0
| 527
| 0.211477
|
58d8fe58ae3d14e3614960efa20628276cc29e39
| 4,594
|
py
|
Python
|
xain/fl/participant/participant.py
|
danieljanes/ox-msc-diss-code-freeze
|
20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78
|
[
"Apache-2.0"
] | 1
|
2020-05-30T20:34:19.000Z
|
2020-05-30T20:34:19.000Z
|
xain/fl/participant/participant.py
|
danieljanes/ox-msc-diss-code-freeze
|
20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78
|
[
"Apache-2.0"
] | null | null | null |
xain/fl/participant/participant.py
|
danieljanes/ox-msc-diss-code-freeze
|
20c6881cabdf1e3ed7a9ddb40bbdcc7a7fd22f78
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, List, Tuple
import numpy as np
import tensorflow as tf
from absl import logging
from xain.datasets import prep
from xain.types import History, Metrics, Partition, Theta, VolumeByClass
from .model_provider import ModelProvider
class Participant:
# pylint: disable-msg=too-many-arguments
# pylint: disable=too-many-instance-attributes
def __init__(
self,
cid: int,
model_provider: ModelProvider,
xy_train: Partition,
xy_val: Partition,
num_classes: int,
batch_size: int,
use_lr_fn: bool = True,
) -> None:
assert xy_train[0].shape[0] == xy_train[1].shape[0]
assert xy_val[0].shape[0] == xy_val[1].shape[0]
self.cid = cid
self.model_provider = model_provider
self.num_classes: int = num_classes
self.batch_size: int = batch_size
self.use_lr_fn: bool = use_lr_fn
self.num_examples = xy_train[0].shape[0]
# Training set
self.xy_train = xy_train
self.steps_train: int = int(xy_train[0].shape[0] / batch_size)
# Validation set
self.xy_val = xy_val
self.steps_val: int = 1
def train_round(
self, theta: Theta, epochs: int, epoch_base: int
) -> Tuple[Tuple[Theta, int], History, Dict]:
logging.info(
f"Participant {self.cid}: train_round START (epoch_base: {epoch_base})"
)
model = self.model_provider.init_model(epoch_base=epoch_base) # type:ignore
model.set_weights(theta)
callbacks: List = []
if self.use_lr_fn:
lr_fn = self.model_provider.init_lr_fn(epoch_base=epoch_base) # type:ignore
callback_lr = tf.keras.callbacks.LearningRateScheduler(lr_fn)
callbacks = [callback_lr]
hist: History = self.fit(model, epochs, callbacks)
theta_prime = model.get_weights()
opt_config = model.optimizer.get_config()
opt_config = convert_numpy_types(opt_config)
logging.info("Participant {}: train_round FINISH".format(self.cid))
return (theta_prime, self.num_examples), hist, opt_config
def fit(self, model: tf.keras.Model, epochs: int, callbacks: List) -> History:
ds_train = prep.init_ds_train(self.xy_train, self.num_classes, self.batch_size)
ds_val = prep.init_ds_val(self.xy_val, self.num_classes)
callback_logging = LoggingCallback(str(self.cid), logging.info)
callbacks.append(callback_logging)
hist = model.fit(
ds_train,
epochs=epochs,
validation_data=ds_val,
callbacks=callbacks,
shuffle=False, # Shuffling is handled via tf.data.Dataset
steps_per_epoch=self.steps_train,
validation_steps=self.steps_val,
verbose=0,
)
return cast_to_float(hist.history)
def evaluate(self, theta: Theta, xy_test: Partition) -> Tuple[float, float]:
model = self.model_provider.init_model()
model.set_weights(theta)
ds_val = prep.init_ds_val(xy_test)
# Assume the validation `tf.data.Dataset` to yield exactly one batch containing
# all examples in the validation set
loss, accuracy = model.evaluate(ds_val, steps=1, verbose=0)
return loss, accuracy
def metrics(self) -> Metrics:
vol_by_class = xy_train_volume_by_class(self.num_classes, self.xy_train)
return (self.cid, vol_by_class)
def xy_train_volume_by_class(num_classes: int, xy_train) -> VolumeByClass:
counts = [0] * num_classes
_, y = xy_train
classes, counts_actual = np.unique(y, return_counts=True)
for i_ca, c in enumerate(classes):
# Cast explicitly to int so its later JSON serializable
# as other we will get a list of np objects of type int64
counts[c] = int(counts_actual[i_ca])
return counts
def cast_to_float(hist) -> History:
for key in hist:
for index, number in enumerate(hist[key]):
hist[key][index] = float(number)
return hist
def convert_numpy_types(opt_config: Dict) -> Dict:
for key in opt_config:
if isinstance(opt_config[key], np.float32):
opt_config[key] = opt_config[key].item()
return opt_config
class LoggingCallback(tf.keras.callbacks.Callback):
def __init__(self, cid: str, print_fn):
tf.keras.callbacks.Callback.__init__(self)
self.cid = cid
self.print_fn = print_fn
def on_epoch_end(self, epoch, logs=None):
self.print_fn(f"CID {self.cid} epoch {epoch}")
| 35.068702
| 88
| 0.653243
| 3,532
| 0.768829
| 0
| 0
| 0
| 0
| 0
| 0
| 549
| 0.119504
|
58db0a434e3091024b2614aa6f89111b6536e4cd
| 1,380
|
py
|
Python
|
client/animation/qr_code.py
|
Nurgak/IoT-RGB-LED-Matrix-Socket
|
e03ce4c2e2d77a6939662aad4ac92fbf9bdea77c
|
[
"MIT"
] | 1
|
2022-01-26T09:01:44.000Z
|
2022-01-26T09:01:44.000Z
|
client/animation/qr_code.py
|
Nurgak/IoT-RGB-LED-Matrix-Socket
|
e03ce4c2e2d77a6939662aad4ac92fbf9bdea77c
|
[
"MIT"
] | 3
|
2021-12-28T10:29:02.000Z
|
2022-01-06T03:01:08.000Z
|
client/animation/qr_code.py
|
Nurgak/IoT-RGB-LED-Matrix-Socket
|
e03ce4c2e2d77a6939662aad4ac92fbf9bdea77c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""! QR code generator animation script."""
import numpy as np
import qrcode
from src.animate import Animate
class QRCode(Animate):
"""! QR code generator animation class.
@image html qr_code.QRCode.png width=256px
Animation displaying a static QR-code-encoded-text.
"""
__CODE_COLOR = (0xFF, 0xFF, 0xFF)
def __init__(self, shape: tuple, *args: list, **kwargs: dict):
super().__init__(shape)
self.__text = kwargs["text"]
assert self.__text != "", "Text is empty."
def draw(self):
qr_code = qrcode.QRCode(
version=1,
# error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=1,
border=0,
)
qr_code.add_data(self.__text)
qr_code.make(fit=True)
qr_pil = qr_code.make_image(fill_color=self.__CODE_COLOR, back_color=(0, 0, 0))
qr_np = np.array(qr_pil)
assert (
qr_np.shape <= self._screen.shape
), f"[{self.__class__.__name__}] QR code too large."
# Center the code on the screen.
offset_y = (self._screen.shape[0] - qr_np.shape[0]) // 2
offset_x = (self._screen.shape[1] - qr_np.shape[1]) // 2
self._screen[
offset_y : qr_np.shape[1] + offset_y, offset_x : qr_np.shape[0] + offset_x
] = qr_np
yield self._screen
| 30
| 87
| 0.602899
| 1,245
| 0.902174
| 835
| 0.605072
| 0
| 0
| 0
| 0
| 372
| 0.269565
|
58db8c9e99f143cbab9455bc35570eeeb524d210
| 483
|
py
|
Python
|
tests/test_xiaochengtu.py
|
lamzuzuzu/yxf_yixue_py
|
90eb077f195b543f93a507f28b0a4c016cb0c92f
|
[
"MIT"
] | 20
|
2019-01-08T08:13:39.000Z
|
2021-12-23T09:04:14.000Z
|
tests/test_xiaochengtu.py
|
lamzuzuzu/yxf_yixue_py
|
90eb077f195b543f93a507f28b0a4c016cb0c92f
|
[
"MIT"
] | null | null | null |
tests/test_xiaochengtu.py
|
lamzuzuzu/yxf_yixue_py
|
90eb077f195b543f93a507f28b0a4c016cb0c92f
|
[
"MIT"
] | 13
|
2019-04-22T03:25:13.000Z
|
2022-01-04T05:43:48.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # ๅบๅฐๅๆฏๆฌ้กน็ฎๆ น็ฎๅฝ
sys.path.append(BASE_DIR)
import datetime
from yxf_yixue.xiaochengtu import XiaochengtuApi
if __name__ == '__main__':
string = '1996/02/29 23:16'
obj = datetime.datetime(2012, 3, 7, 17, 40)
a = XiaochengtuApi()
res1 = a.paipan(obj)
print(res1)
a.print_pan()
res2 = a.get_chuantongfenxi()
print(res2)
| 24.15
| 84
| 0.681159
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.200795
|
58dc98f64796c7f6f0664ca055829713dcb9192e
| 3,662
|
py
|
Python
|
_Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 1,107
|
2016-09-21T02:18:36.000Z
|
2022-03-29T02:52:12.000Z
|
_Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 18
|
2016-12-22T10:24:47.000Z
|
2022-03-11T23:18:43.000Z
|
_Dist/NeuralNetworks/b_TraditionalML/MultinomialNB.py
|
leoatchina/MachineLearning
|
071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3
|
[
"MIT"
] | 776
|
2016-12-21T12:08:08.000Z
|
2022-03-21T06:12:08.000Z
|
import numpy as np
from sklearn.preprocessing import OneHotEncoder
class MultinomialNB:
""" Naive Bayes algorithm with discrete inputs
Parameters
----------
alpha : float, optional (default=1.)
Smooth parameter used in Naive Bayes, default is 1 (which indicates a laplace smoothing)
Attributes
----------
enc : OneHotEncoder
One-Hot encoder used to transform (discrete) inputs
class_log_prior : np.ndarray of float
Log class prior used to calculate (linear) prediction
feature_log_prob : np.ndarray of float
Feature log probability used to calculate (linear) prediction
Examples
--------
>>> import numpy as np
>>> x = np.random.randint(0, 10, [1000, 10]) # Generate feature vectors
>>> y = np.random.randint(0, 5, 1000) # Generate labels
>>> nb = MultinomialNB().fit(x, y) # fit the model
>>> nb.predict(x) # (linear) prediction
>>> nb.predict_class(x) # predict labels
"""
def __init__(self, alpha=1.):
self.alpha = alpha
self.enc = self.class_log_prior = self.feature_log_prob = None
def fit(self, x, y, do_one_hot=True):
""" Fit the model with x & y
Parameters
----------
x : {list of float, np.ndarray of float}
Feature vectors used for training
Note: features are assumed to be discrete
y : {list of float, np.ndarray of float}
Labels used for training
do_one_hot : bool, optional (default=True)
Whether do one-hot encoding on x
Returns
-------
self : MultinomialNB
Returns self.
"""
if do_one_hot:
self.enc = OneHotEncoder(dtype=np.float32)
x = self.enc.fit_transform(x)
else:
self.enc = None
x = np.array(x, np.float32)
n = x.shape[0]
y = np.array(y, np.int8)
self.class_log_prior = np.log(np.bincount(y) / n)
masks = [y == i for i in range(len(self.class_log_prior))]
masked_xs = [x[mask] for mask in masks]
feature_counts = np.array([np.asarray(masked_x.sum(0))[0] for masked_x in masked_xs])
smoothed_fc = feature_counts + self.alpha
self.feature_log_prob = np.log(smoothed_fc / smoothed_fc.sum(1, keepdims=True))
return self
def _predict(self, x):
""" Internal method for calculating (linear) predictions
Parameters
----------
x : {np.ndarray of float, scipy.sparse.csr.csr_matrix of float}
One-Hot encoded feature vectors
Returns
-------
predictions : np.ndarray of float
Returns (linear) predictions.
"""
return x.dot(self.feature_log_prob.T) + self.class_log_prior
def predict(self, x):
""" API for calculating (linear) predictions
Parameters
----------
x : {list of float, np.ndarray of float}
Target feature vectors
Returns
-------
predictions : np.ndarray of float
Returns (linear) predictions.
"""
if self.enc is not None:
x = self.enc.transform(x)
return self._predict(x)
def predict_class(self, x):
""" API for predicting labels
Parameters
----------
x : {list of float, np.ndarray of float}
Target feature vectors
Returns
-------
labels : np.ndarray of int
Returns labels.
"""
return np.argmax(self.predict(x), 1)
| 29.296
| 96
| 0.560896
| 3,592
| 0.980885
| 0
| 0
| 0
| 0
| 0
| 0
| 2,368
| 0.646641
|
58df035c2ab9c1b7f4e6cbacccfa792d055318cf
| 9,362
|
py
|
Python
|
Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py
|
lawrence910426/ProgrammingII_FinalProject
|
493183dc2a674310e65bffe3a5e00395e8bebb4b
|
[
"MIT"
] | null | null | null |
Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py
|
lawrence910426/ProgrammingII_FinalProject
|
493183dc2a674310e65bffe3a5e00395e8bebb4b
|
[
"MIT"
] | null | null | null |
Reinforcement-Learning/Python-Model/venv/lib/python3.8/site-packages/tensorflow/core/protobuf/graph_debug_info_pb2.py
|
lawrence910426/ProgrammingII_FinalProject
|
493183dc2a674310e65bffe3a5e00395e8bebb4b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/graph_debug_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/graph_debug_info.proto',
package='tensorflow',
syntax='proto3',
serialized_options=_b('\n\030org.tensorflow.frameworkB\024GraphDebugInfoProtosP\001ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\370\001\001'),
serialized_pb=_b('\n/tensorflow/core/protobuf/graph_debug_info.proto\x12\ntensorflow\"\xd5\x02\n\x0eGraphDebugInfo\x12\r\n\x05\x66iles\x18\x01 \x03(\t\x12\x36\n\x06traces\x18\x02 \x03(\x0b\x32&.tensorflow.GraphDebugInfo.TracesEntry\x1aX\n\x0b\x46ileLineCol\x12\x12\n\nfile_index\x18\x01 \x01(\x05\x12\x0c\n\x04line\x18\x02 \x01(\x05\x12\x0b\n\x03\x63ol\x18\x03 \x01(\x05\x12\x0c\n\x04\x66unc\x18\x04 \x01(\t\x12\x0c\n\x04\x63ode\x18\x05 \x01(\t\x1aL\n\nStackTrace\x12>\n\x0e\x66ile_line_cols\x18\x01 \x03(\x0b\x32&.tensorflow.GraphDebugInfo.FileLineCol\x1aT\n\x0bTracesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.tensorflow.GraphDebugInfo.StackTrace:\x02\x38\x01\x42\x8c\x01\n\x18org.tensorflow.frameworkB\x14GraphDebugInfoProtosP\x01ZUgithub.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\x01\x01\x62\x06proto3')
)
_GRAPHDEBUGINFO_FILELINECOL = _descriptor.Descriptor(
name='FileLineCol',
full_name='tensorflow.GraphDebugInfo.FileLineCol',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_index', full_name='tensorflow.GraphDebugInfo.FileLineCol.file_index', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='line', full_name='tensorflow.GraphDebugInfo.FileLineCol.line', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='col', full_name='tensorflow.GraphDebugInfo.FileLineCol.col', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='func', full_name='tensorflow.GraphDebugInfo.FileLineCol.func', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='tensorflow.GraphDebugInfo.FileLineCol.code', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=241,
)
_GRAPHDEBUGINFO_STACKTRACE = _descriptor.Descriptor(
name='StackTrace',
full_name='tensorflow.GraphDebugInfo.StackTrace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_line_cols', full_name='tensorflow.GraphDebugInfo.StackTrace.file_line_cols', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=243,
serialized_end=319,
)
_GRAPHDEBUGINFO_TRACESENTRY = _descriptor.Descriptor(
name='TracesEntry',
full_name='tensorflow.GraphDebugInfo.TracesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.GraphDebugInfo.TracesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.GraphDebugInfo.TracesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=321,
serialized_end=405,
)
_GRAPHDEBUGINFO = _descriptor.Descriptor(
name='GraphDebugInfo',
full_name='tensorflow.GraphDebugInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='files', full_name='tensorflow.GraphDebugInfo.files', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='traces', full_name='tensorflow.GraphDebugInfo.traces', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_GRAPHDEBUGINFO_FILELINECOL, _GRAPHDEBUGINFO_STACKTRACE, _GRAPHDEBUGINFO_TRACESENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=405,
)
_GRAPHDEBUGINFO_FILELINECOL.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO_STACKTRACE.fields_by_name['file_line_cols'].message_type = _GRAPHDEBUGINFO_FILELINECOL
_GRAPHDEBUGINFO_STACKTRACE.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO_TRACESENTRY.fields_by_name['value'].message_type = _GRAPHDEBUGINFO_STACKTRACE
_GRAPHDEBUGINFO_TRACESENTRY.containing_type = _GRAPHDEBUGINFO
_GRAPHDEBUGINFO.fields_by_name['traces'].message_type = _GRAPHDEBUGINFO_TRACESENTRY
DESCRIPTOR.message_types_by_name['GraphDebugInfo'] = _GRAPHDEBUGINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GraphDebugInfo = _reflection.GeneratedProtocolMessageType('GraphDebugInfo', (_message.Message,), {
'FileLineCol' : _reflection.GeneratedProtocolMessageType('FileLineCol', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_FILELINECOL,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.FileLineCol)
})
,
'StackTrace' : _reflection.GeneratedProtocolMessageType('StackTrace', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_STACKTRACE,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.StackTrace)
})
,
'TracesEntry' : _reflection.GeneratedProtocolMessageType('TracesEntry', (_message.Message,), {
'DESCRIPTOR' : _GRAPHDEBUGINFO_TRACESENTRY,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo.TracesEntry)
})
,
'DESCRIPTOR' : _GRAPHDEBUGINFO,
'__module__' : 'tensorflow.core.protobuf.graph_debug_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphDebugInfo)
})
_sym_db.RegisterMessage(GraphDebugInfo)
_sym_db.RegisterMessage(GraphDebugInfo.FileLineCol)
_sym_db.RegisterMessage(GraphDebugInfo.StackTrace)
_sym_db.RegisterMessage(GraphDebugInfo.TracesEntry)
DESCRIPTOR._options = None
_GRAPHDEBUGINFO_TRACESENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 39.838298
| 888
| 0.760735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,811
| 0.300256
|
58df52412971e5d196be467f42346c84563d779e
| 1,376
|
py
|
Python
|
tests/inferfaces_tests/test_people.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
tests/inferfaces_tests/test_people.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | 1
|
2019-04-13T10:15:48.000Z
|
2019-04-13T10:15:48.000Z
|
tests/inferfaces_tests/test_people.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
import pytest
from tests.test_data.lists import LIST
from tests.test_data.people import MOVIE_CREDITS, PERSON, SHOW_CREDITS
from tests.utils import mk_mock_client
from trakt.core.exceptions import ArgumentError
from trakt.core.json_parser import parse_tree
from trakt.core.models import Person
def test_get_person():
client = mk_mock_client({r".*people.*": [PERSON, 200]})
person = parse_tree(PERSON, Person)
with pytest.raises(ArgumentError):
client.people.get_person(person=0.5)
assert client.people.get_person(person=person.ids.trakt).name == PERSON["name"]
assert client.people.get_person(person=person).name == PERSON["name"]
def test_get_movie_credits():
client = mk_mock_client({r".*people.*": [MOVIE_CREDITS, 200]})
credits = client.people.get_movie_credits(person=123)
assert credits.cast[0].character == MOVIE_CREDITS["cast"][0]["character"]
def test_get_show_credits():
client = mk_mock_client({r".*people.*": [SHOW_CREDITS, 200]})
credits = client.people.get_show_credits(person=123)
expected = SHOW_CREDITS["crew"]["production"][0]["job"]
assert credits.crew.production[0].job == expected
def test_get_lists():
client = mk_mock_client({r".*people.*": [[LIST], 200]})
lists = list(client.people.get_lists(person=123))
assert len(lists) == 1
assert lists[0].name == LIST["name"]
| 32
| 83
| 0.721657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.079942
|
58e00459697805d8f1e7adbc2795e9616fc70667
| 3,717
|
py
|
Python
|
batch_score.py
|
Lufedi/reaper
|
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
|
[
"Apache-2.0"
] | 106
|
2015-07-21T16:18:26.000Z
|
2022-03-31T06:45:34.000Z
|
batch_score.py
|
Lufedi/reaper
|
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
|
[
"Apache-2.0"
] | 21
|
2015-07-11T03:48:28.000Z
|
2022-01-18T12:57:30.000Z
|
batch_score.py
|
Lufedi/reaper
|
bdf56b499e5b704c27b9f6c053d798c2a10fa4cf
|
[
"Apache-2.0"
] | 26
|
2015-07-22T22:38:21.000Z
|
2022-03-14T10:11:56.000Z
|
#!/usr/bin/env python3
import argparse
import os
import sys
import traceback
from lib import core, utilities, run
from lib.attributes import Attributes
from lib.database import Database
def process_arguments():
"""
Uses the argparse module to parse commandline arguments.
Returns:
Dictionary of parsed commandline arguments.
"""
parser = argparse.ArgumentParser(
description='Calculate the scores of a set of repositories.'
)
parser.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
help='Delete cloned repositories from the disk when done.'
)
parser.add_argument(
'-c',
'--config',
type=argparse.FileType('r'),
default='config.json',
dest='config_file',
help='Path to the configuration file.'
)
parser.add_argument(
'-m',
'--manifest',
type=argparse.FileType('r'),
default='manifest.json',
dest='manifest_file',
help='Path to the manifest file.'
)
parser.add_argument(
'-r',
'--repositories-root',
dest='repositories_root',
help='Path to the root of downloaded repositories.'
)
parser.add_argument(
'-s',
'--repositories-sample',
type=argparse.FileType('r'),
dest='repositories_sample',
help='A file containing newline-separated GHTorrent project ids'
)
parser.add_argument(
'-k',
'--key-string',
type=str,
dest='key_string',
default=None,
required=False,
help='String of attribute initials. Uppercase to persist data'
)
parser.add_argument(
'-n',
'--num-processes',
type=int,
dest='num_processes',
default=1,
required=False,
help=(
'Number of processes to spawn when processing repositories'
' from the samples file.'
)
)
parser.add_argument(
'--goldenset',
action='store_true',
dest='goldenset',
help=(
'Indicate that the repositories sample file contains projects'
' from the Golden Set.'
)
)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
"""
Main execution flow.
"""
try:
args = process_arguments()
config = utilities.read(args.config_file)
manifest = utilities.read(args.manifest_file)
# TODO: Refactor
core.config = config
utilities.TOKENIZER = core.Tokenizer()
database = Database(config['options']['datasource'])
globaloptions = {
'today': config['options']['today'],
'timeout': config['options']['timeout']
}
attributes = Attributes(
manifest['attributes'], database, args.cleanup, args.key_string,
**globaloptions
)
if not os.path.exists(args.repositories_root):
os.makedirs(args.repositories_root, exist_ok=True)
table = 'reaper_results'
if args.goldenset:
table = 'reaper_goldenset'
_run = run.Run(
args.repositories_root, attributes, database,
config['options']['threshold'], args.num_processes
)
_run.run([int(line) for line in args.repositories_sample], table)
except Exception as e:
extype, exvalue, extrace = sys.exc_info()
traceback.print_exception(extype, exvalue, extrace)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\rCaught interrupt, killing all children...')
| 26.361702
| 76
| 0.584073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,218
| 0.327684
|
58e423e71414f032f22c45a5bedf02c030da3667
| 8,423
|
py
|
Python
|
behave/reporter/summary.py
|
fluendo/behave
|
eeffde083456dcf1a0ea9b6139b32091970118c0
|
[
"BSD-2-Clause"
] | null | null | null |
behave/reporter/summary.py
|
fluendo/behave
|
eeffde083456dcf1a0ea9b6139b32091970118c0
|
[
"BSD-2-Clause"
] | 2
|
2020-03-21T22:37:54.000Z
|
2021-10-04T17:14:14.000Z
|
behave/reporter/summary.py
|
fluendo/behave
|
eeffde083456dcf1a0ea9b6139b32091970118c0
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
Provides a summary after each test run.
"""
from __future__ import absolute_import, division, print_function
import sys
from time import time as time_now
from behave.model import Rule, ScenarioOutline # MAYBE: Scenario
from behave.model_core import Status
from behave.reporter.base import Reporter
from behave.formatter.base import StreamOpener
# ---------------------------------------------------------------------------
# CONSTANTS:
# ---------------------------------------------------------------------------
# -- DISABLED: OPTIONAL_STEPS = ('untested', 'undefined')
OPTIONAL_STEPS = (Status.untested,) # MAYBE: Status.undefined
STATUS_ORDER = (Status.passed, Status.failed, Status.skipped,
Status.undefined, Status.untested)
# ---------------------------------------------------------------------------
# UTILITY FUNCTIONS:
# ---------------------------------------------------------------------------
def pluralize(word, count=1, suffix="s"):
if count == 1:
return word
# -- OTHERWISE:
return "{0}{1}".format(word, suffix)
def compute_summary_sum(summary):
"""Compute sum of all summary counts (except: all)
:param summary: Summary counts (as dict).
:return: Sum of all counts (as integer).
"""
counts_sum = 0
for name, count in summary.items():
if name == "all":
continue # IGNORE IT.
counts_sum += count
return counts_sum
def format_summary0(statement_type, summary):
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
if not parts:
# -- FIRST ITEM: Add statement_type to counter.
label = statement_type
if counts != 1:
label += 's'
part = u"%d %s %s" % (counts, label, status.name)
else:
part = u"%d %s" % (counts, status.name)
parts.append(part)
return ", ".join(parts) + "\n"
def format_summary(statement_type, summary):
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
name = status.name
if status.name == "passed":
statement = pluralize(statement_type, counts)
name = u"%s passed" % statement
part = u"%d %s" % (counts, name)
parts.append(part)
return ", ".join(parts) + "\n"
# -- PREPARED:
def format_summary2(statement_type, summary, end="\n"):
"""Format the summary line for one statement type.
.. code-block::
6 scenarios (passed: 5, failed: 1, skipped: 0, untested: 0)
:param statement_type:
:param summary:
:return:
"""
parts = []
for status in STATUS_ORDER:
if status.name not in summary:
continue
counts = summary[status.name]
if status in OPTIONAL_STEPS and counts == 0:
# -- SHOW-ONLY: For relevant counts, suppress: untested items, etc.
continue
parts.append((status.name, counts))
counts_sum = summary["all"]
statement = pluralize(statement_type, sum)
parts_text = ", ".join(["{0}: {1}".format(name, value)
for name, value in parts])
return "{count:4} {statement:<9} ({parts}){end}".format(
count=counts_sum, statement=statement, parts=parts_text, end=end)
# ---------------------------------------------------------------------------
# REPORTERS:
# ---------------------------------------------------------------------------
class SummaryReporter(Reporter):
show_failed_scenarios = True
output_stream_name = "stdout"
def __init__(self, config):
super(SummaryReporter, self).__init__(config)
stream = getattr(sys, self.output_stream_name, sys.stderr)
self.stream = StreamOpener.ensure_stream_with_encoder(stream)
summary_zero_data = {
"all": 0,
Status.passed.name: 0,
Status.failed.name: 0,
Status.skipped.name: 0,
Status.untested.name: 0
}
self.feature_summary = summary_zero_data.copy()
self.rule_summary = summary_zero_data.copy()
self.scenario_summary = summary_zero_data.copy()
self.step_summary = {Status.undefined.name: 0}
self.step_summary.update(summary_zero_data)
self.duration = 0.0
self.run_starttime = 0
self.run_endtime = 0
self.failed_scenarios = []
self.show_rules = True
def testrun_started(self, timestamp=None):
if timestamp is None:
timestamp = time_now()
self.run_starttime = timestamp
def testrun_finished(self, timestamp=None):
if timestamp is None:
timestamp = time_now()
self.run_endtime = timestamp
def print_failing_scenarios(self, stream=None):
if stream is None:
stream = self.stream
stream.write("\nFailing scenarios:\n")
for scenario in self.failed_scenarios:
# add the list of tags matching ###-### so we will show the issue identifier with the error OPE-1234
stream.write(u" %s %s %s\n" % (', '.join(t for t in scenario.tags if len(t.split('-'))==2 ), scenario.location, scenario.name))
def compute_summary_sums(self):
"""(Re)Compute summary sum of all counts (except: all)."""
summaries = [
self.feature_summary,
self.rule_summary,
self.scenario_summary,
self.step_summary
]
for summary in summaries:
summary["all"] = compute_summary_sum(summary)
def print_summary(self, stream=None, with_duration=True):
if stream is None:
stream = self.stream
self.compute_summary_sums()
has_rules = (self.rule_summary["all"] > 0)
stream.write(format_summary("feature", self.feature_summary))
if self.show_rules and has_rules:
# -- HINT: Show only rules, if any exists.
self.stream.write(format_summary("rule", self.rule_summary))
stream.write(format_summary("scenario", self.scenario_summary))
stream.write(format_summary("step", self.step_summary))
# -- DURATION:
if with_duration:
timings = (int(self.duration / 60.0), self.duration % 60)
stream.write('Took %dm%02.3fs\n' % timings)
# -- REPORTER-API:
def feature(self, feature):
if self.run_starttime == 0:
# -- DISCOVER: TEST-RUN started.
self.testrun_started()
self.process_feature(feature)
def end(self):
self.testrun_finished()
# -- SHOW FAILED SCENARIOS (optional):
if self.show_failed_scenarios and self.failed_scenarios:
self.print_failing_scenarios()
self.stream.write("\n")
# -- SHOW SUMMARY COUNTS:
self.print_summary()
def process_run_items_for(self, parent):
for run_item in parent:
if isinstance(run_item, Rule):
self.process_rule(run_item)
elif isinstance(run_item, ScenarioOutline):
self.process_scenario_outline(run_item)
else:
# assert isinstance(run_item, Scenario)
self.process_scenario(run_item)
def process_feature(self, feature):
self.duration += feature.duration
self.feature_summary[feature.status.name] += 1
self.process_run_items_for(feature)
def process_rule(self, rule):
self.rule_summary[rule.status.name] += 1
self.process_run_items_for(rule)
def process_scenario(self, scenario):
if scenario.status == Status.failed:
self.failed_scenarios.append(scenario)
self.scenario_summary[scenario.status.name] += 1
for step in scenario:
self.step_summary[step.status.name] += 1
def process_scenario_outline(self, scenario_outline):
for scenario in scenario_outline.scenarios:
self.process_scenario(scenario)
| 34.239837
| 141
| 0.581859
| 4,502
| 0.534489
| 0
| 0
| 0
| 0
| 0
| 0
| 1,959
| 0.232577
|
58e63151e272298d99abe2311270c00ae4f753a6
| 2,109
|
py
|
Python
|
tests/common/bridgecrew/vulnerability_scanning/conftest.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | 1
|
2021-02-13T15:24:42.000Z
|
2021-02-13T15:24:42.000Z
|
tests/common/bridgecrew/vulnerability_scanning/conftest.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | 7
|
2021-04-12T06:54:07.000Z
|
2022-03-21T14:04:14.000Z
|
tests/common/bridgecrew/vulnerability_scanning/conftest.py
|
vangundy-jason-pfg/checkov
|
2fb50908f62390c98dda665f1fa94fe24806b654
|
[
"Apache-2.0"
] | 1
|
2021-12-16T03:09:55.000Z
|
2021-12-16T03:09:55.000Z
|
from typing import Dict, Any
import pytest
from checkov.common.bridgecrew.bc_source import SourceType
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration, bc_integration
@pytest.fixture()
def mock_bc_integration() -> BcPlatformIntegration:
bc_integration.bc_api_key = "abcd1234-abcd-1234-abcd-1234abcd1234"
bc_integration.setup_bridgecrew_credentials(
repo_id="bridgecrewio/checkov",
skip_fixes=True,
skip_suppressions=True,
skip_policy_download=True,
source=SourceType("Github", False),
source_version="1.0",
repo_branch="master",
)
return bc_integration
@pytest.fixture()
def scan_result() -> Dict[str, Any]:
return {
"repository": "/abs_path/to/app/requirements.txt",
"passed": True,
"packages": {"type": "python", "name": "django", "version": "1.2", "path": "/abs_path/to/app/requirements.txt"},
"complianceIssues": None,
"complianceDistribution": {"critical": 0, "high": 0, "medium": 0, "low": 0, "total": 0},
"vulnerabilities": [
{
"id": "CVE-2019-19844",
"status": "fixed in 3.0.1, 2.2.9, 1.11.27",
"cvss": 9.8,
"vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover.",
"severity": "critical",
"packageName": "django",
"packageVersion": "1.2",
"link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
"riskFactors": ["Critical severity", "Has fix", "Attack complexity: low", "Attack vector: network"],
"impactedVersions": ["\u003c1.11.27"],
"publishedDate": "2019-12-18T20:15:00+01:00",
"discoveredDate": "2019-12-18T19:15:00Z",
"fixDate": "2019-12-18T20:15:00+01:00",
}
],
"vulnerabilityDistribution": {"critical": 1, "high": 0, "medium": 0, "low": 0, "total": 0},
}
| 40.557692
| 120
| 0.579896
| 0
| 0
| 0
| 0
| 1,902
| 0.901849
| 0
| 0
| 963
| 0.456615
|
58e6b8cbdb9f5deb8475e765553e3c1da2be8892
| 1,038
|
py
|
Python
|
image_matting/modules/trimap_generator/trimap_generator_application.py
|
image-matting/backend
|
bbf502539cf70822dadb5eded31529d5e66c6276
|
[
"Apache-2.0"
] | 1
|
2022-01-22T04:12:48.000Z
|
2022-01-22T04:12:48.000Z
|
image_matting/modules/trimap_generator/trimap_generator_application.py
|
image-matting/backend
|
bbf502539cf70822dadb5eded31529d5e66c6276
|
[
"Apache-2.0"
] | 4
|
2021-12-23T14:02:17.000Z
|
2022-01-26T18:44:06.000Z
|
image_matting/modules/trimap_generator/trimap_generator_application.py
|
image-matting/backend
|
bbf502539cf70822dadb5eded31529d5e66c6276
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from pathlib import Path
from cv2 import cv2
from trimap import generate_trimap
from trimap_output_utils import save_trimap_output
def main():
args = parse_args()
image_path = args.image
output_directory_path = args.output
image_path = Path(image_path)
if not image_path.is_file():
raise RuntimeError(f'The provided image path "{image_path}" does not exist!')
image_filename = image_path.stem
saliency_image_path = image_path.as_posix()
trimap_image = generate_trimap(saliency_image_path, kernel_size=3, iterations=20)
save_trimap_output(trimap_image, image_filename, output_directory_path)
def parse_args():
parser = argparse.ArgumentParser(description='Trimap Generator Application')
parser.add_argument('-i', '--image', required=True, type=str, help='path to input image')
parser.add_argument('-o', '--output', required=False, default='.', type=str, help='path to output directory')
return parser.parse_args()
if __name__ == "__main__":
main()
| 29.657143
| 113
| 0.739884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 174
| 0.16763
|
58e6e3920ecb9bb8ae3ccc039500c2bfac35935a
| 2,102
|
py
|
Python
|
tests/test_views.py
|
Siecje/graphene-django-sentry
|
b82188f91717211896cc1dbfc1f0e86de3729734
|
[
"MIT"
] | 20
|
2019-03-13T15:28:17.000Z
|
2022-03-23T09:52:26.000Z
|
tests/test_views.py
|
Siecje/graphene-django-sentry
|
b82188f91717211896cc1dbfc1f0e86de3729734
|
[
"MIT"
] | 5
|
2019-06-29T06:41:16.000Z
|
2021-06-10T21:05:25.000Z
|
tests/test_views.py
|
Siecje/graphene-django-sentry
|
b82188f91717211896cc1dbfc1f0e86de3729734
|
[
"MIT"
] | 2
|
2019-05-30T13:03:23.000Z
|
2019-06-17T16:08:39.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from http.client import OK
from unittest.mock import MagicMock, patch
from urllib.parse import urlencode
import graphene_django.views as views
from django.urls import reverse
from graphql import GraphQLError
from graphql.error import GraphQLLocatedError
class CustomException(Exception):
""" Boom! """
def get_query_string():
path = reverse('graphql')
query = urlencode({'query': 'query {test}'})
path = f'{path}?{query}'
return path
def test_view(client):
result = client.get(
get_query_string(),
HTTP_ACCEPT="application/json;q=0.8, text/html;q=0.9",
)
assert result.status_code == OK
@patch.object(views.GraphQLView, 'execute_graphql_request')
@patch('sentry_sdk.capture_exception')
def test_execute_graphql_request(
mocked_capture_exception,
mocked_method,
client,
):
error = CustomException('Boom')
errors = [GraphQLLocatedError([], error)]
mocked_return_value = MagicMock()
mocked_return_value.errors = errors
mocked_method.return_value = mocked_return_value
result = client.get(
get_query_string(),
HTTP_ACCEPT="application/json;q=0.8, text/html;q=0.9",
)
assert result.status_code == 400
assert result.json()['errors'][0]['message'] == 'Boom'
mocked_capture_exception.assert_called_with(error)
@patch.object(views.GraphQLView, 'execute_graphql_request')
@patch('sentry_sdk.capture_exception')
def test_execute_graphql_request_raises_raw_graphql_exceptions(
mocked_capture_exception,
mocked_method,
client,
):
error = GraphQLError(message='Syntax error in GraphQL query')
mocked_return_value = MagicMock()
mocked_return_value.errors = [error]
mocked_method.return_value = mocked_return_value
result = client.get(
reverse('graphql'),
{'query': '{__schema{types{name}}}'},
)
assert result.status_code == 400
assert result.json()['errors'][0]['message'] == (
'Syntax error in GraphQL query'
)
mocked_capture_exception.assert_called_with(error)
| 26.948718
| 65
| 0.706946
| 51
| 0.024263
| 0
| 0
| 1,407
| 0.669363
| 0
| 0
| 445
| 0.211703
|
58e7d15456033fa62d2766b6d09f022fb1eb2ace
| 3,137
|
py
|
Python
|
spacy/lang/nl/stop_words.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 12
|
2019-03-20T20:43:47.000Z
|
2020-04-13T11:10:52.000Z
|
spacy/lang/nl/stop_words.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 13
|
2018-06-05T11:54:40.000Z
|
2019-07-02T11:33:14.000Z
|
spacy/lang/nl/stop_words.py
|
cedar101/spaCy
|
66e22098a8bb77cbe527b1a4a3c69ec1cfb56f95
|
[
"MIT"
] | 1
|
2020-05-12T16:00:38.000Z
|
2020-05-12T16:00:38.000Z
|
# coding: utf8
from __future__ import unicode_literals
# The original stop words list (added in f46ffe3) was taken from
# http://www.damienvanholten.com/downloads/dutch-stop-words.txt
# and consisted of about 100 tokens.
# In order to achieve parity with some of the better-supported
# languages, e.g., English, French, and German, this original list has been
# extended with 200 additional tokens. The main source of inspiration was
# https://raw.githubusercontent.com/stopwords-iso/stopwords-nl/master/stopwords-nl.txt.
# However, quite a bit of manual editing has taken place as well.
# Tokens whose status as a stop word is not entirely clear were admitted or
# rejected by deferring to their counterparts in the stop words lists for English
# and French. Similarly, those lists were used to identify and fill in gaps so
# that -- in principle -- each token contained in the English stop words list
# should have a Dutch counterpart here.
STOP_WORDS = set("""
aan af al alle alles allebei alleen allen als altijd ander anders andere anderen aangaangde aangezien achter achterna
afgelopen aldus alhoewel anderzijds
ben bij bijna bijvoorbeeld behalve beide beiden beneden bent bepaald beter betere betreffende binnen binnenin boven
bovenal bovendien bovenstaand buiten
daar dan dat de der den deze die dit doch doen door dus daarheen daarin daarna daarnet daarom daarop des dezelfde dezen
dien dikwijls doet doorgaand doorgaans
een eens en er echter enige eerder eerst eerste eersten effe eigen elk elke enkel enkele enz erdoor etc even eveneens
evenwel
ff
ge geen geweest gauw gedurende gegeven gehad geheel gekund geleden gelijk gemogen geven geweest gewoon gewoonweg
geworden gij
haar had heb hebben heeft hem het hier hij hoe hun hadden hare hebt hele hen hierbeneden hierboven hierin hoewel hun
iemand iets ik in is idd ieder ikke ikzelf indien inmiddels inz inzake
ja je jou jouw jullie jezelf jij jijzelf jouwe juist
kan kon kunnen klaar konden krachtens kunnen kunt
lang later liet liever
maar me meer men met mij mijn moet mag mede meer meesten mezelf mijzelf min minder misschien mocht mochten moest moesten
moet moeten mogelijk mogen
na naar niet niets nog nu nabij nadat net nogal nooit nr nu
of om omdat ons ook op over omhoog omlaag omstreeks omtrent omver onder ondertussen ongeveer onszelf onze ooit opdat
opnieuw opzij over overigens
pas pp precies prof publ
reeds rond rondom
sedert sinds sindsdien slechts sommige spoedig steeds
โt 't te tegen toch toen tot tamelijk ten tenzij ter terwijl thans tijdens toe totdat tussen
u uit uw uitgezonderd uwe uwen
van veel voor vaak vanaf vandaan vanuit vanwege veeleer verder verre vervolgens vgl volgens vooraf vooral vooralsnog
voorbij voordat voordien voorheen voorop voort voorts vooruit vrij vroeg
want waren was wat we wel werd wezen wie wij wil worden waar waarom wanneer want weer weg wegens weinig weinige weldra
welk welke welken werd werden wiens wier wilde wordt
zal ze zei zelf zich zij zijn zo zonder zou zeer zeker zekere zelfde zelfs zichzelf zijnde zijne zoโn zoals zodra zouden
zoveel zowat zulk zulke zulks zullen zult
""".split())
| 42.391892
| 120
| 0.808734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,057
| 0.973257
|
58e841d7116f44d86fb300ae823c11eb893353a1
| 1,858
|
py
|
Python
|
liv_covid19/web/artic/opentrons_thread.py
|
neilswainston/liv-covid19
|
4842fccdca626caca50bd7c545e3f673660503d4
|
[
"MIT"
] | 2
|
2020-03-31T12:59:13.000Z
|
2021-02-08T21:40:20.000Z
|
liv_covid19/web/artic/opentrons_thread.py
|
neilswainston/liv-covid19
|
4842fccdca626caca50bd7c545e3f673660503d4
|
[
"MIT"
] | null | null | null |
liv_covid19/web/artic/opentrons_thread.py
|
neilswainston/liv-covid19
|
4842fccdca626caca50bd7c545e3f673660503d4
|
[
"MIT"
] | 2
|
2020-06-23T16:49:20.000Z
|
2020-06-25T14:59:32.000Z
|
'''
(c) University of Liverpool 2020
Licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>..
@author: neilswainston
'''
# pylint: disable=broad-except
import os.path
import tempfile
from liv_covid19.web.artic import opentrons
from liv_covid19.web.job import JobThread, save_export
class OpentronsThread(JobThread):
'''Runs a Opentrons job.'''
def __init__(self, query, out_dir):
self.__filename, suffix = os.path.splitext(query['file_name'])
tmpfile = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
self.__in_filename = tmpfile.name
with open(self.__in_filename, 'w') as fle:
fle.write(query['file_content'])
self.__temp_deck = query['temp_deck']
self.__vol_scale = float(query['vol_scale'])
self.__out_dir = out_dir
JobThread.__init__(self, query, 1)
def run(self):
'''Run.'''
try:
parent_dir = tempfile.mkdtemp()
iteration = 0
self._fire_job_event('running', iteration, 'Running...')
opentrons.run(in_filename=self.__in_filename,
temp_deck=self.__temp_deck,
vol_scale=self.__vol_scale,
out_dir=parent_dir)
iteration += 1
if self._cancelled:
self._fire_job_event('cancelled', iteration,
message='Job cancelled')
else:
save_export(parent_dir, self.__out_dir, self._job_id)
self._result = self._job_id
self._fire_job_event('finished', iteration,
message='Job completed')
except Exception as err:
self._fire_job_event('error', iteration, message=str(err))
| 30.459016
| 77
| 0.595264
| 1,516
| 0.815931
| 0
| 0
| 0
| 0
| 0
| 0
| 372
| 0.200215
|
58e8cb21bea9ec496741309cc75c724289559dd8
| 838
|
py
|
Python
|
futuquant/common/ft_logger.py
|
hxhxhx88/futuquant
|
a1b4a875604f1de451ddde4bfa3e713452482b0a
|
[
"Apache-2.0"
] | null | null | null |
futuquant/common/ft_logger.py
|
hxhxhx88/futuquant
|
a1b4a875604f1de451ddde4bfa3e713452482b0a
|
[
"Apache-2.0"
] | null | null | null |
futuquant/common/ft_logger.py
|
hxhxhx88/futuquant
|
a1b4a875604f1de451ddde4bfa3e713452482b0a
|
[
"Apache-2.0"
] | null | null | null |
import logging
from datetime import datetime
import os
logger = logging.getLogger('FT')
log_level = logging.INFO
is_file_log = True
# ่ฎพ็ฝฎlogger็levelไธบDEBUG
logger.setLevel(log_level)
# ๅๅปบไธไธช่พๅบๆฅๅฟๅฐๆงๅถๅฐ็StreamHandler
hdr = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [%(filename)s] %(funcName)s:%(lineno)d: %(message)s')
hdr.setFormatter(formatter)
# ็ปloggerๆทปๅ ไธhandler
logger.addHandler(hdr)
# ๆทปๅ ๆไปถhandle
if is_file_log:
filename = 'ft_' + datetime.now().strftime('%Y%m%d') + '.log'
tempPath = os.path.join(os.getcwd(), 'log')
if not os.path.exists(tempPath):
os.makedirs(tempPath)
filepath = os.path.join(tempPath, filename)
fileHandler = logging.FileHandler(filepath)
fileHandler.setLevel(log_level)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
| 24.647059
| 70
| 0.731504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 224
| 0.252252
|
58ecb5c52b1dbb5c09611ce11efbdfb06b5edf51
| 3,946
|
py
|
Python
|
image_classification/cifar10/cifar10/networking/cifar10_vgg16_model.py
|
poncos/deeplearning
|
0874b22dab05117bcf39ccb4895d513ab6f87861
|
[
"MIT"
] | null | null | null |
image_classification/cifar10/cifar10/networking/cifar10_vgg16_model.py
|
poncos/deeplearning
|
0874b22dab05117bcf39ccb4895d513ab6f87861
|
[
"MIT"
] | null | null | null |
image_classification/cifar10/cifar10/networking/cifar10_vgg16_model.py
|
poncos/deeplearning
|
0874b22dab05117bcf39ccb4895d513ab6f87861
|
[
"MIT"
] | null | null | null |
# Copyright 2018 Esteban Collado.
#
# Licensed under the MIT License
import tensorflow as tf
DEFAULT_VARIABLE_NAMES = ['conv1', 'conv2', 'conv3', 'conv4', 'fc1', 'fc2', 'softmax_linear']
BATCH_SIZE = 200
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
IMAGE_DEPTH = 3
NUM_CLASSES = 10
INPUT_PLACEHOLDER = 'X_INPUT'
LABELS_PLACEHOLDER = 'Y_LABELS'
def create_placeholder():
x_placeholder = tf.placeholder(tf.float32, [None, IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_DEPTH], name=INPUT_PLACEHOLDER)
y_placeholder = tf.placeholder(tf.int32, [None], name=LABELS_PLACEHOLDER)
return x_placeholder, y_placeholder
def initialize_parameters():
parameters = {
"w1": tf.get_variable("w1", shape=[3, 3, 3, 64],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"w2": tf.get_variable("w2", shape=[3, 3, 64, 64],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"w3": tf.get_variable("w3", shape=[3, 3, 64, 64],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"w4": tf.get_variable("w4", shape=[3, 3, 64, 64],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"w5": tf.get_variable("w5", shape=[4096, 384],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"w6": tf.get_variable("w6", shape=[384, 192],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"w7": tf.get_variable("w7", shape=[192, 10],
initializer=tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),
"b1": tf.get_variable('b1', [64], initializer=tf.constant_initializer(0.0), dtype=tf.float32),
"b2": tf.get_variable('b2', [64], initializer=tf.constant_initializer(0.0), dtype=tf.float32),
"b3": tf.get_variable('b3', [64], initializer=tf.constant_initializer(0.0), dtype=tf.float32),
"b4": tf.get_variable('b4', [64], initializer=tf.constant_initializer(0.0), dtype=tf.float32),
"b5": tf.get_variable('b5', [384], initializer=tf.constant_initializer(0.1), dtype=tf.float32),
"b6": tf.get_variable('b6', [192], initializer=tf.constant_initializer(0.1), dtype=tf.float32),
"b7": tf.get_variable('b7', [10], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
}
return parameters
def create_conv2d_layer(inputs, name, weight, bias, strides=[1, 1, 1, 1], padding='SAME'):
with tf.variable_scope(name) as scope:
conv = tf.nn.conv2d(inputs, weight, strides, padding)
pre_activation = tf.nn.bias_add(conv, bias)
activation = tf.nn.relu(pre_activation, name=scope.name)
return activation
def forward_propagation(input, parameters):
conv1 = create_conv2d_layer(input, 'conv1', parameters['w1'], parameters['b1'])
conv2 = create_conv2d_layer(conv1, 'conv2', parameters['w2'], parameters['b2'])
pool1 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
conv3 = create_conv2d_layer(pool1, 'conv3', parameters['w3'], parameters['b3'])
conv4 = create_conv2d_layer(conv3, 'conv4', parameters['w4'], parameters['b4'])
pool2 = tf.nn.max_pool(conv4, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
flattened_conv = tf.reshape(pool2, shape=[-1, parameters['w5'].get_shape().as_list()[0]])
fc1 = tf.nn.relu(tf.matmul(flattened_conv, parameters['w5']) + parameters['b5'], name='fc1')
fc2 = tf.nn.relu(tf.matmul(fc1, parameters['w6']) + parameters['b6'], name='fc2')
softmax_linear = tf.add(tf.matmul(fc2, parameters['w7']), parameters['b7'], name='softmax')
return softmax_linear
| 43.362637
| 118
| 0.640902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 390
| 0.098834
|
58eeedb6cd1adb5de820dbc349b434e1a3735952
| 425
|
py
|
Python
|
wrappers/Python/sbmlsolver/__init__.py
|
gitter-badger/sbmlsolver
|
c92936832297ea1d2ad7f17223b68ada43c8f0b2
|
[
"Apache-2.0"
] | null | null | null |
wrappers/Python/sbmlsolver/__init__.py
|
gitter-badger/sbmlsolver
|
c92936832297ea1d2ad7f17223b68ada43c8f0b2
|
[
"Apache-2.0"
] | null | null | null |
wrappers/Python/sbmlsolver/__init__.py
|
gitter-badger/sbmlsolver
|
c92936832297ea1d2ad7f17223b68ada43c8f0b2
|
[
"Apache-2.0"
] | null | null | null |
"""
The LibRoadRunner SBML Simulation Engine, (c) 2009-2014 Andy Somogyi and Herbert Sauro
LibRoadRunner is an SBML JIT compiler and simulation engine with a variety of analysis
functions. LibRoadRunner is a self contained library which is designed to be integrated
into existing simulation platforms or may be used a stand alone simulation and analysis
package.
"""
from sbmlsolver import *
__version__ = getVersionStr()
| 32.692308
| 87
| 0.807059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 367
| 0.863529
|
58f0ab77666277ac6d3ddc06e53dedb0c6d49f2b
| 1,573
|
py
|
Python
|
classification/tests/test_evidence_mixin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
classification/tests/test_evidence_mixin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
classification/tests/test_evidence_mixin.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
from classification.models import EvidenceMixin
from classification.models.evidence_mixin import VCStore
class BasicEvidence(EvidenceMixin):
def __init__(self, evidence: VCStore):
self.evidence = evidence
@property
def _evidence(self) -> VCStore:
return self.evidence
# doesn't work without Transcripts loaded now
# class EvidenceMixinTest(TestCase):
#
# @override_settings(VARIANT_ANNOTATION_TRANSCRIPT_PREFERENCES=['refseq_transcript_accession'])
# def test_get_transcript(self):
# # if transcript version is in c.hgvs use it
# be = BasicEvidence({
# SpecialEKeys.C_HGVS: "NM_020975.5(RET):c.867+48A>G",
# SpecialEKeys.REFSEQ_TRANSCRIPT_ID: "NM_020975",
# SpecialEKeys.GENOME_BUILD: "GRCh37"
# })
# self.assertEqual(be.transcript, "NM_020975.5")
#
# # if transcript version is in c.hgvs but transcript doesn't match
# # value in transcript field, use the raw transcript value
# be = BasicEvidence({
# SpecialEKeys.C_HGVS: "NM_020975.5(RET):c.867+48A>G",
# SpecialEKeys.REFSEQ_TRANSCRIPT_ID: "NM_033333",
# SpecialEKeys.GENOME_BUILD: "GRCh37"
# })
# self.assertEqual(be.transcript, "NM_033333")
#
# # if there is no transcript field, use the contents of c.hgvs
# be = BasicEvidence({
# SpecialEKeys.C_HGVS: "NM_020975.5(RET):c.867+48A>G",
# SpecialEKeys.GENOME_BUILD: "GRCh37"
# })
# self.assertEqual(be.transcript, "NM_020975.5")
| 37.452381
| 99
| 0.650985
| 192
| 0.12206
| 0
| 0
| 74
| 0.047044
| 0
| 0
| 1,244
| 0.790846
|
58f0dabb24cb5744c956fc257b97c051c5d3142b
| 674
|
py
|
Python
|
scronsole/widgets/main_screen.py
|
bastianh/screeps_console_mod
|
e093cc1e071fae5bdf106674b97e71902fbbb6ff
|
[
"MIT"
] | 2
|
2017-10-08T19:39:27.000Z
|
2017-10-08T19:51:18.000Z
|
scronsole/widgets/main_screen.py
|
bastianh/screeps_console_mod
|
e093cc1e071fae5bdf106674b97e71902fbbb6ff
|
[
"MIT"
] | null | null | null |
scronsole/widgets/main_screen.py
|
bastianh/screeps_console_mod
|
e093cc1e071fae5bdf106674b97e71902fbbb6ff
|
[
"MIT"
] | null | null | null |
import urwid
from scronsole.config_manager import ConfigManager
from scronsole.plugin_manager import PluginManager
from scronsole.widgets.main_menu import MainMenu
from scronsole.widgets.server_screen import ServerScreen
class MainScreen(urwid.WidgetPlaceholder):
def __init__(self):
super().__init__(urwid.SolidFill(u'/'))
self.config = ConfigManager()
self.show_main_menu()
self.plugins = PluginManager(self)
self.plugins.load_plugins()
def show_server_screen(self, server_data):
self.original_widget = ServerScreen(self, server_data)
def show_main_menu(self):
self.original_widget = MainMenu(self)
| 30.636364
| 62
| 0.746291
| 449
| 0.666172
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.005935
|
58f1e5bfcc6007b51ace335dfbea68c9b539583f
| 436
|
py
|
Python
|
sql/language.py
|
skylarkgit/sql2java
|
befd55180969b0ec68e242991c3260272d755cc9
|
[
"MIT"
] | 2
|
2019-10-23T08:27:30.000Z
|
2019-10-23T09:58:45.000Z
|
sql/language.py
|
skylarkgit/sql2java
|
befd55180969b0ec68e242991c3260272d755cc9
|
[
"MIT"
] | null | null | null |
sql/language.py
|
skylarkgit/sql2java
|
befd55180969b0ec68e242991c3260272d755cc9
|
[
"MIT"
] | null | null | null |
import re
from csv import reader
def splitEscaped(str, by, escapeChar):
infile = [str]
return reader(infile, delimiter=by, quotechar=escapeChar)
def removeComments(text):
p = r'/\*[^*]*\*+([^/*][^*]*\*+)*/|("(\\.|[^"\\])*"|\'(\\.|[^\'\\])*\'|.[^/"\'\\]*)'
return ''.join(m.group(2) for m in re.finditer(p, text, re.M|re.S) if m.group(2))
def escapeAnnotations(text):
return re.sub(r'(/\*@)(.*)(\*/)',r'@\2',text)
| 31.142857
| 88
| 0.53211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 106
| 0.243119
|
58f3c7c8febd7b51f53b623ee90e4c562e1d0bd1
| 659
|
py
|
Python
|
easy_ArrayAdditionI.py
|
GabrielGhe/CoderbyteChallenges
|
5601dbc24c95a65fed04896de2f534417c2e730d
|
[
"MIT"
] | 1
|
2020-11-04T15:30:18.000Z
|
2020-11-04T15:30:18.000Z
|
easy_ArrayAdditionI.py
|
GabrielGhe/CoderbyteChallenges
|
5601dbc24c95a65fed04896de2f534417c2e730d
|
[
"MIT"
] | null | null | null |
easy_ArrayAdditionI.py
|
GabrielGhe/CoderbyteChallenges
|
5601dbc24c95a65fed04896de2f534417c2e730d
|
[
"MIT"
] | null | null | null |
import itertools
#################################################
# This function will see if there is any #
# possible combination of the numbers in #
# the array that will give the largest number #
#################################################
def ArrayAdditionI(arr):
#sort, remove last element
result = "false"
arr.sort()
large = arr[-1]
arr = arr[:-1]
#go through every combination and see if sum = large
for x in range(2,len(arr) + 1):
for comb in itertools.combinations(arr,x):
if large == sum(comb):
result = "true"
break
return result
print ArrayAdditionI(raw_input())
| 26.36
| 54
| 0.53566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 336
| 0.509863
|
58f4750834ad708c962b1818098008a6819ef467
| 1,994
|
py
|
Python
|
SVM.py
|
JAMJU/KernelMethod
|
e52f5a0cfaefa87073facd88220c311709e513e8
|
[
"MIT"
] | null | null | null |
SVM.py
|
JAMJU/KernelMethod
|
e52f5a0cfaefa87073facd88220c311709e513e8
|
[
"MIT"
] | null | null | null |
SVM.py
|
JAMJU/KernelMethod
|
e52f5a0cfaefa87073facd88220c311709e513e8
|
[
"MIT"
] | null | null | null |
import numpy as np
import quadprog
def quadprog_solve_qp(P, q, G=None, h=None, A=None, b=None):
""" Solve a QP of the form min 1/2xTPx + qTx st Gx < h st Ax=b"""
#qp_G = .5 * (P + P.T) # make sure P is symmetric
qp_G = P
qp_a = -q
if A is not None:
qp_C = -np.vstack([A, G]).T
qp_b = -np.hstack([b, h])
meq = A.shape[0]
else: # no equality constraint
qp_C = -G.T
qp_b = -h
meq = 0
return quadprog.solve_qp(qp_G, qp_a, qp_C, qp_b, meq)[0]
def evaluate_alpha(alpha, K, label):
""" Return success percent """
result = K.dot(alpha)
success = [float(result[i,0]*label[i] > 0) for i in range(len(label))]
return np.mean(success)*100
def compute_G_h(label, lamb):
nb_spl = len(label)
G = np.zeros([nb_spl*2, nb_spl])
h = np.zeros((nb_spl*2, ))
for i in range(nb_spl):
G[i,i] = - float(label[i])
G[nb_spl + i, i] = float(label[i])
h[nb_spl + i] = 1./(2.*lamb*float(nb_spl))
return G, h
def svm_compute_label(data_in_kernel, alpha):
""" Compute the label for the data given (in the form data[i,j] = K(x, xj) with x a new data, xj in the data set"""
result = data_in_kernel.dot(alpha)
return [int(result[i,0] > 0.) for i in range(data_in_kernel.shape[0])]
def SVM(K, label, lamb, K_test, label_test):
G, h = compute_G_h(label, lamb)
alpha = quadprog_solve_qp(P=K, q = - np.asarray(label).reshape((len(label),)), G = G, h = h)
alpha = alpha.reshape([alpha.shape[0], 1])
print("on train: ", evaluate_alpha(alpha, K, label))
print("on test: ", evaluate_alpha(alpha, K_test, label_test) )
return alpha
""" Just an example of how quadprog works :
M = np.array([[1., 2., 0.], [-8., 3., 2.], [0., 1., 1.]])
P = np.dot(M.T, M)
q = np.dot(np.array([3., 2., 3.]), M).reshape((3,))
G = np.array([[1., 2., 1.], [2., 0., 1.], [-1., 2., -1.]])
h = np.array([3., 2., -2.]).reshape((3,))
al = quadprog_solve_qp(P, q, G, h)
print(al)"""
| 33.79661
| 119
| 0.57322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 629
| 0.315446
|
58f7245b5e8f50a694e2a4405f7daff21e842618
| 1,547
|
py
|
Python
|
util/MalShare.py
|
cclauss/ph0neutria
|
04b6a569d4e707c3de652ba7ad15c1b5223bebcb
|
[
"Apache-2.0"
] | null | null | null |
util/MalShare.py
|
cclauss/ph0neutria
|
04b6a569d4e707c3de652ba7ad15c1b5223bebcb
|
[
"Apache-2.0"
] | null | null | null |
util/MalShare.py
|
cclauss/ph0neutria
|
04b6a569d4e707c3de652ba7ad15c1b5223bebcb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from ConfigUtils import getBaseConfig
from LogUtils import getModuleLogger
from StringUtils import isValidUrl, randomString
from urlparse import urlparse
import json
import os
import requests
import sys
cDir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.abspath(os.path.join(cDir, os.pardir))
baseConfig = getBaseConfig(rootDir)
logging = getModuleLogger(__name__)
def getMalShareList():
try:
payload = {'action': 'getsourcesraw', 'api_key': baseConfig.malShareApiKey }
userAgent = {'User-agent': baseConfig.userAgent}
logging.info('Fetching latest MalShare list.')
request = requests.get('http://malshare.com/api.php', params=payload, headers=userAgent)
if request.status_code == 200:
mal_list = []
for line in request.content.split('\n'):
url = line.strip()
if isValidUrl(url):
mal_list.append(url)
return mal_list
else:
logging.error('Problem connecting to MalShare. Status code:{0}. Please try again later.'.format(request.status_code))
except requests.exceptions.ConnectionError as e:
logging.warning('Problem connecting to Malshare. Error: {0}'.format(e))
except Exception as e:
logging.warning('Problem connecting to Malshare. Aborting task.')
logging.exception(sys.exc_info())
logging.exception(type(e))
logging.exception(e.args)
logging.exception(e)
return []
| 29.75
| 129
| 0.661926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 292
| 0.188752
|
58f785be1e5480e5359e098bb9e3ecdf8b2d4246
| 705
|
py
|
Python
|
attempt/runner.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
attempt/runner.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
attempt/runner.py
|
prstolpe/rrc_simulation
|
b430fe4e575641cdd64945cf57d0dd67a0eea17a
|
[
"BSD-3-Clause"
] | null | null | null |
from attempt.ddpg import HERDDPG, DDPG
import gym
import os
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
if __name__ == "__main__":
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
env = gym.make('FetchReach-v1')
agent = HERDDPG(env)
for epoch in range(2):
for cycle in tqdm(range(10)):
agent.gather_cycle()
# target_agent.train()
agent.test_env(10)
env.close()
plt.plot(np.vstack(agent.rewards))
plt.title('Rewards')
plt.show()
plt.plot(np.vstack(agent.policy_losses))
plt.title('Policy Losses')
plt.show()
plt.plot(np.vstack(agent.value_losses))
plt.title('Value Losses')
plt.show()
| 20.735294
| 44
| 0.64539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.156028
|
58f82c89b0b711b196471a3d0d54cc05fadd6ef6
| 8,187
|
py
|
Python
|
src/shogun/base/class_list.cpp.py
|
srgnuclear/shogun
|
33c04f77a642416376521b0cd1eed29b3256ac13
|
[
"Ruby",
"MIT"
] | 1
|
2015-11-05T18:31:14.000Z
|
2015-11-05T18:31:14.000Z
|
src/shogun/base/class_list.cpp.py
|
waderly/shogun
|
9288b6fa38e001d63c32188f7f847dadea66e2ae
|
[
"Ruby",
"MIT"
] | null | null | null |
src/shogun/base/class_list.cpp.py
|
waderly/shogun
|
9288b6fa38e001d63c32188f7f847dadea66e2ae
|
[
"Ruby",
"MIT"
] | null | null | null |
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2008-2009 Soeren Sonnenburg
# Copyright (C) 2008-2009 Fraunhofer Institute FIRST and Max Planck Society
class_str='class'
types=["BOOL", "CHAR", "INT8", "UINT8", "INT16", "UINT16", "INT32", "UINT32",
"INT64", "UINT64", "FLOAT32", "FLOAT64", "FLOATMAX", "COMPLEX128"]
config_tests=["HAVE_HDF5", "HAVE_JSON", "HAVE_XML", "HAVE_LAPACK", "USE_CPLEX",
"USE_SVMLIGHT", "USE_GLPK", "USE_LZO", "USE_GZIP", "USE_BZIP2", "USE_LZMA",
"USE_MOSEK", "HAVE_EIGEN3", "HAVE_COLPACK", "HAVE_NLOPT", "HAVE_PROTOBUF",
"HAVE_VIENNACL"]
SHOGUN_TEMPLATE_CLASS = "SHOGUN_TEMPLATE_CLASS"
SHOGUN_BASIC_CLASS = "SHOGUN_BASIC_CLASS"
def check_class(line):
if not (line.find('public')==-1 and
line.find('private')==-1 and
line.find('protected')==-1):
return True
def check_abstract_class(line):
line=line.replace(' ','').replace('\t','').strip()
return line.endswith('=0;')
def check_is_in_blacklist(c, lines, line_nr, blacklist):
ifdef_cnt=0
for i in range(line_nr,0,-1):
line=lines[i]
if line.find('#endif')!=-1:
ifdef_cnt-=1
if line.find('#ifdef')!=-1:
ifdef_cnt+=1
for b in blacklist.keys():
if line.find(b)!=-1 and ifdef_cnt>0:
return True
if line.find('#ifndef')!=-1:
ifdef_cnt+=1
return False
def extract_class_name(lines, line_nr, line, blacklist):
try:
if not line:
line=lines[line_nr]
c=line[line.index(class_str)+len(class_str):]
if not ':' in c:
return
if not check_class(line):
if not check_class(lines[line_nr+1]):
return
c=c.split()[0]
except:
return
c=c.strip(':').strip()
if not c.startswith('C'):
return
if c.endswith(';'):
return
if '>' in c:
return
if not (len(c)>2 and c[1].isupper()):
return
if check_is_in_blacklist(c[1:], lines, line_nr, blacklist):
return
return c[1:]
def get_includes(classes):
class_headers = []
for c,t in classes:
class_headers.append(c+".h")
import os
result = []
for root, dirs, files in os.walk("."):
for f in files:
if f in class_headers:
result.append(os.path.join(root, f))
includes=[]
for o in result:
includes.append('#include <shogun/%s>' % o.strip().lstrip('./'))
return includes
def get_definitions(classes):
definitions=[]
definitions.append("#define %s" % SHOGUN_TEMPLATE_CLASS)
definitions.append("#define %s" % SHOGUN_BASIC_CLASS)
for c,t in classes:
d="static %s CSGObject* __new_C%s(EPrimitiveType g) { return g == PT_NOT_GENERIC? new C%s(): NULL; }" % (SHOGUN_BASIC_CLASS,c,c)
definitions.append(d)
return definitions
def get_template_definitions(classes, supports_complex):
definitions=[]
for c,t in classes:
d=[]
d.append("static %s CSGObject* __new_C%s(EPrimitiveType g)\n{\n\tswitch (g)\n\t{\n" % (SHOGUN_TEMPLATE_CLASS,c))
for t in types:
if t in ('BOOL','CHAR'):
suffix=''
else:
suffix='_t'
if t=='COMPLEX128' and not supports_complex:
d.append("\t\tcase PT_COMPLEX128: return NULL;\n")
else:
d.append("\t\tcase PT_%s: return new C%s<%s%s>();\n" % (t,c,t.lower(),suffix))
d.append("\t\tcase PT_SGOBJECT:\n")
d.append("\t\tcase PT_UNDEFINED: return NULL;\n\t}\n\treturn NULL;\n}")
definitions.append(''.join(d))
return definitions
def get_struct(classes):
struct=[]
for c,template in classes:
prefix = SHOGUN_BASIC_CLASS
if template:
prefix = SHOGUN_TEMPLATE_CLASS
s='{"%s", %s __new_C%s},' % (c,prefix,c)
struct.append(s)
return struct
def extract_block(c, lines, start_line, stop_line, start_sym, stop_sym):
sym_cnt=0
block_start=-1;
block_stop=-1;
for line_nr in range(start_line, stop_line):
line=lines[line_nr]
if line.find(start_sym)!=-1:
sym_cnt+=1
if block_start==-1:
block_start=line_nr
if line.find(stop_sym)!=-1:
block_stop=line_nr+1
sym_cnt-=1
if sym_cnt==0 and block_start!=-1 and block_stop!=-1:
return block_start,block_stop
return block_start,block_stop
def check_complex_supported_class(line):
l=list(filter(lambda y:y if y!='' else None,\
line.strip().replace('\t',' ').split(' ')))
supported=len(l)==3 and l[0]=='typedef' and l[1]=='bool'\
and l[2]=='supports_complex128_t;'
return supported
def test_candidate(c, lines, line_nr, supports_complex):
start,stop=extract_block(c, lines, line_nr, len(lines), '{','}')
if stop<line_nr:
return False, line_nr+1
complex_supported=False
for line_nr in range(start, stop):
line=lines[line_nr]
if line.find('virtual')!=-1:
if check_abstract_class(line):
return False, stop
else:
vstart,vstop=extract_block(c, lines, line_nr, stop, '(',')')
for line_nr in range(vstart, vstop):
line=lines[line_nr]
if check_abstract_class(line):
return False, stop
if line.find('supports_complex128_t')!=-1:
if check_complex_supported_class(line):
complex_supported=True
if not supports_complex:
return False, stop
if supports_complex and not complex_supported:
return False, stop
return True, stop
def extract_classes(HEADERS, template, blacklist, supports_complex):
"""
Search in headers for non-template/non-abstract class-names starting
with `C'.
Does not support local nor multiple classes and
drops classes with pure virtual functions
"""
classes=list()
for fname in HEADERS:
try:
lines=open(fname).readlines()
except: # python3 workaround
lines=open(fname, encoding='utf-8', errors='ignore').readlines()
line_nr=0
while line_nr<len(lines):
line=lines[line_nr]
if line.find('IGNORE_IN_CLASSLIST')!=-1:
line_nr+=1
continue
c=None
if template:
tp=line.find('template')
if tp!=-1:
line=line[tp:]
cp=line.find('>')
line=line[cp+1:]
cp=line.find(class_str)
if cp!=-1:
c=extract_class_name(lines, line_nr, line, blacklist)
else:
if line.find(class_str)!=-1:
c=extract_class_name(lines, line_nr, None, blacklist)
if c:
ok, line_nr=test_candidate(c, lines, line_nr, supports_complex)
if ok:
classes.append((c,template))
continue
line_nr+=1
return classes
def write_templated_file(fname, substitutes):
template=open(fname).readlines()
f=open(fname,'w')
for line in template:
l=line.strip()
if l.startswith('REPLACE') and l.endswith('THIS'):
l=line.split()[1]
if sys.version_info >= (3,):
for s in substitutes.keys():
if l==s:
f.write('\n'.join(substitutes[s]))
continue
else:
for s in substitutes.iterkeys():
if l==s:
f.write('\n'.join(substitutes[s]))
continue
else:
f.write(line)
def read_config():
config=dict()
for line in open('lib/config.h').readlines():
if line=='\n':
continue
l=[l.strip() for l in line.split()]
config[l[1]]=1
return config
def get_blacklist():
config=read_config()
blacklist=dict()
for cfg in config_tests:
if not cfg in config:
blacklist[cfg]=1
return blacklist
if __name__=='__main__':
import sys
TEMPL_FILE=sys.argv[1]
HEADERS=None
if (sys.argv[2] == "-in"):
# read header file list from file
with open(sys.argv[3]) as f:
content = f.readlines()
HEADERS = [x.strip() for x in content]
else:
HEADERS=sys.argv[2:]
blacklist = get_blacklist()
classes = extract_classes(HEADERS, False, blacklist, False)
template_classes = extract_classes(HEADERS, True, blacklist, False)
complex_template_classes = extract_classes(HEADERS, True, blacklist, True)
includes = get_includes(classes+template_classes+complex_template_classes)
definitions = get_definitions(classes)
template_definitions = get_template_definitions(template_classes, False)
complex_template_definitions = get_template_definitions(complex_template_classes, True)
struct = get_struct(classes+template_classes+complex_template_classes)
substitutes = {'includes': includes,
'definitions' :definitions,
'template_definitions' : template_definitions,
'complex_template_definitions' : complex_template_definitions,
'struct' : struct
}
write_templated_file(TEMPL_FILE, substitutes)
| 27.109272
| 130
| 0.693416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,801
| 0.219983
|
58f8d01058e75992d07c8d9e6c624ed7a5775471
| 771
|
py
|
Python
|
script/solr_unauthorized_access.py
|
5up3rc/Vxscan
|
0d2cae446f6502b51596853be3514c7c4c62809c
|
[
"Apache-2.0"
] | 2
|
2019-12-05T01:58:22.000Z
|
2019-12-14T09:19:28.000Z
|
script/solr_unauthorized_access.py
|
5up3rc/Vxscan
|
0d2cae446f6502b51596853be3514c7c4c62809c
|
[
"Apache-2.0"
] | null | null | null |
script/solr_unauthorized_access.py
|
5up3rc/Vxscan
|
0d2cae446f6502b51596853be3514c7c4c62809c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# project = https://github.com/Xyntax/POC-T
# author = i@cdxy.me
"""
Apache Solr ๆชๆๆ่ฎฟ้ฎPoC
(iterate_pathๅฝๆฐไฝฟ็จๅบๆฏ็คบไพ)
Usage
python POC-T.py -s solr-unauth -iF target.txt
python POC-T.py -s solr-unauth -aZ "solr country:cn"
"""
from lib.verify import verify
from lib.random_header import get_ua
import requests
vuln = ['solr']
def check(ip, ports, apps):
if verify(vuln, ports, apps):
try:
url = 'http://' + ip
url = url + '/solr/'
g = requests.get(url, headers=get_ua(), timeout=5)
if g.status_code is 200 and 'Solr Admin' in g.content and 'Dashboard' in g.content:
return 'Apache Solr Admin leask'
except Exception:
pass
| 24.09375
| 95
| 0.608301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 368
| 0.461731
|
58f91a9f5c9302c8e95efa47c83b819f09e32089
| 1,248
|
py
|
Python
|
conanfile.py
|
midurk/conan-rapidxml
|
df93616a87ba41edd9def914f765fd8eae0007c5
|
[
"MIT"
] | null | null | null |
conanfile.py
|
midurk/conan-rapidxml
|
df93616a87ba41edd9def914f765fd8eae0007c5
|
[
"MIT"
] | null | null | null |
conanfile.py
|
midurk/conan-rapidxml
|
df93616a87ba41edd9def914f765fd8eae0007c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, tools
import os
class RapiXMLConan(ConanFile):
name = "rapidxml"
version = "1.13"
description = "RapidXml is an attempt to create the fastest XML parser possible"
url = "https://github.com/bincrafters/conan-rapidxml"
homepage = "http://rapidxml.sourceforge.net"
author = "Bincrafters <bincrafters@gmail.com>"
license = ("BSL-1.0", "MIT")
exports = ["LICENSE.md"]
exports_sources = ["CMakeLists.txt", "name_lookup_changes_fix.patch"]
source_subfolder = "source_subfolder"
no_copy_source = True
def source(self):
source_url = "https://cfhcable.dl.sourceforge.net/project/rapidxml/rapidxml/rapidxml%20"
tools.get("{0}{1}/{2}-{3}.zip".format(source_url, self.version, self.name, self.version))
os.rename(self.name + "-" + self.version, self.source_subfolder)
tools.patch(base_path=self.source_subfolder, patch_file="name_lookup_changes_fix.patch")
def package(self):
self.copy(pattern="license.txt", dst="licenses", src=self.source_subfolder)
self.copy(pattern="*.hpp", dst="include", src=self.source_subfolder)
def package_id(self):
self.info.header_only()
| 37.818182
| 97
| 0.684295
| 1,152
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 502
| 0.402244
|
58f92d8b76c80f99fb368b4b75fdb05787830601
| 2,772
|
py
|
Python
|
src_2d/help/compute_dice.py
|
xzluo97/MvMM-RegNet
|
c08d5df14b4a9c4a98c66973ff4950aba7f416e4
|
[
"MIT"
] | 19
|
2020-07-14T02:23:58.000Z
|
2022-03-15T12:22:49.000Z
|
src_2d/help/compute_dice.py
|
xzluo97/MvMM-RegNet
|
c08d5df14b4a9c4a98c66973ff4950aba7f416e4
|
[
"MIT"
] | 4
|
2020-09-25T22:42:40.000Z
|
2021-08-25T15:03:29.000Z
|
src_2d/help/compute_dice.py
|
xzluo97/MvMM-RegNet
|
c08d5df14b4a9c4a98c66973ff4950aba7f416e4
|
[
"MIT"
] | 7
|
2020-08-29T15:46:13.000Z
|
2021-07-16T01:51:28.000Z
|
"""
Compute Dice between test ground truth and predictions from groupwise registration.
"""
import os
import nibabel as nib
import glob
import numpy as np
from core import utils_2d
from core.metrics_2d import OverlapMetrics
def one_hot_label(label, label_intensity):
gt = np.around(label)
n_class = len(label_intensity)
label = np.zeros((np.hstack((gt.shape, n_class))), dtype=np.float32)
for k in range(1, n_class):
label[..., k] = (gt == label_intensity[k])
label[..., 0] = np.logical_not(np.sum(label[..., 1:], axis=-1))
return label
def load_nifty(name):
img = nib.load(name)
return np.asarray(img.get_fdata(), np.float32)
if __name__ == '__main__':
gt_path = '../../../../../../dataset/C0T2LGE/label_center_data/test/*label.nii.gz'
pred_path = '../../../../../../results/MSCMR/test_predictions_1.5mm_group3_fusion15/*label.nii.gz'
pred_names = utils_2d.strsort(glob.glob(pred_path))
gt_names = utils_2d.strsort([name for name in glob.glob(gt_path) if os.path.basename(name).split('_')[1] == 'DE'])
pred_gt_names = dict(zip(pred_names, gt_names))
print(pred_gt_names)
average_dice = []
myo_dice = []
LV_dice = []
RV_dice = []
for name in pred_names:
pred_label = load_nifty(name)
one_hot_pred = one_hot_label(pred_label, (0, 200, 500, 600))
gt_label = load_nifty(pred_gt_names[name])
gt_label = np.concatenate([gt for gt in np.dsplit(gt_label, gt_label.shape[-1])
if np.all([np.sum(gt==i) > 0 for i in [200, 500, 600]])], axis=-1)
one_hot_gt = one_hot_label(gt_label, (0, 200, 500, 600))
Dice = OverlapMetrics(n_class=4, mode='np')
dice = Dice.averaged_foreground_dice(one_hot_gt, one_hot_pred)
m_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=1)
l_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=2)
r_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=3)
average_dice.append(dice)
myo_dice.append(m_dice)
LV_dice.append(l_dice)
RV_dice.append(r_dice)
print("Average foreground Dice for %s: %.4f" % (os.path.basename(name), dice))
print("Myocardium Dice for %s: %.4f" % (os.path.basename(name), m_dice))
print("LV Dice for %s: %.4f" % (os.path.basename(name), l_dice))
print("RV Dice for %s: %.4f" % (os.path.basename(name), r_dice))
print("Average prediction Dice: %.4f" % np.mean(average_dice))
print("Average myocardium Dice: %.4f" % np.mean(myo_dice))
print("Average LV Dice: %.4f" % np.mean(LV_dice))
print("Average RV Dice: %.4f" % np.mean(RV_dice))
| 35.538462
| 119
| 0.626623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 496
| 0.178932
|
58f98f05dc6e23f4ee940bfbe966fafa9a03fa4c
| 14,359
|
py
|
Python
|
farmos_ext/farm.py
|
applecreekacres/farmos.py.ext
|
91db8a6d5532661650869d34c2ff94e1fed02f02
|
[
"MIT"
] | null | null | null |
farmos_ext/farm.py
|
applecreekacres/farmos.py.ext
|
91db8a6d5532661650869d34c2ff94e1fed02f02
|
[
"MIT"
] | 46
|
2021-02-21T21:05:07.000Z
|
2022-03-15T23:05:25.000Z
|
farmos_ext/farm.py
|
applecreekacres/farmos.py.ext
|
91db8a6d5532661650869d34c2ff94e1fed02f02
|
[
"MIT"
] | null | null | null |
"""Main farm access."""
from __future__ import annotations
import os
from datetime import datetime
from typing import Dict, Iterable, Iterator, List, Type, Union
from farmos_ext.area import Area
from farmos_ext.asset import Asset, Equipment, Planting
from farmos_ext.log import (Activity, Birth, Harvest, Input, Log, Maintenance,
Medical, Observation, Purchase, Sale, Seeding,
SoilTest, Transplanting)
from farmos_ext.others import Content, Quantity
from farmos_ext.term import Crop, CropFamily, Season, Term, Unit
from farmOS import farmOS # pylint: disable=wrong-import-order
from farmOS.client import BaseAPI # pylint: disable=wrong-import-order
class FarmTypeMissingError(Exception):
pass
def farm():
"""Access to farm with provided credentials."""
return Farm()
class FileAPI(BaseAPI):
def __init__(self, session):
# Define 'log' as the farmOS API entity endpoint
super().__init__(session=session, entity_type='file')
# pylint: disable=too-many-public-methods
class Farm(farmOS):
def __init__(self, local_resources="./resources"):
self._host = None
self._user = None
self._pass = None
self.local_resources = local_resources
if os.path.exists("farmos.cfg"):
with open('farmos.cfg') as cfg:
for line in cfg.readlines():
if line.startswith("HOST"):
self._host = line[line.index("=")+1:].strip()
if line.startswith("USER"):
self._user = line[line.index("=")+1:].strip()
if line.startswith("PASS"):
self._pass = line[line.index("=")+1:].strip()
if not self._host:
raise KeyError("HOST key is not defined in farmos.cfg")
if not self._user:
raise KeyError("USER key is not defined in farmos.cfg")
if not self._pass:
raise KeyError("PASS key is not defined in farmos.cfg")
super().__init__(self._host)
self._token = self.authorize(self._user, self._pass)
else:
raise Exception('farmos.cfg not found.')
self.file = FileAPI(self.session)
def assets(self,
filters: Union[Dict, List[Dict], int, str] = None,
asset_class: Type[Asset] = Asset) -> Iterator[Type[Asset]]:
if isinstance(filters, list):
for filt in filters:
for asset in self.asset.get(filt)['list']:
yield asset_class(self, keys=asset)
else:
for asset in self.asset.get(filters)['list']:
yield asset_class(self, keys=asset)
# def _get_assets(self, items: List[Dict], obj_class):
# retitems = []
# for item in items:
# rets = self.asset.get(item['id'])
# if 'list' in rets:
# self.extract(rets, obj_class)
# else:
# retitems.append(obj_class(self, rets))
# return retitems
def logs(self,
filters: Union[Dict, List[Dict], int, str] = None,
log_class: Type[Log] = Log) -> Iterator[Type[Log]]:
if isinstance(filters, list):
for filt in filters:
for log in self.log.get(filt):
yield log_class(self, keys=log)
elif isinstance(filters, int):
yield log_class(self, keys=self.log.get(filters))
else:
for log in self.log.get(filters):
yield log_class(self, keys=log)
def terms(self, filters: Union[str, List[Dict], Dict] = None,
term_class: Type[Term] = Term) -> Iterator[Type[Term]]:
if isinstance(filters, list):
for item in filters:
for term in self.term.get({"tid": item['id']})['list']:
yield term_class(self, keys=term)
else:
rets = self.term.get(filters)
yield term_class(self, keys=rets)
def areas(self, filters: Union[Dict, List[Dict], int, str] = None) -> Iterator[Area]:
if isinstance(filters, list):
for filt in filters:
for area in self.area.get(filt)['list']:
yield Area(self, keys=area)
else:
for area in self.area.get(filters)['list']:
yield Area(self, keys=area)
def _create_log(self, name: str, date: datetime, category: str, fields: Dict, done=False):
data = {
"name": name,
"timestamp": str(int(datetime.timestamp(date))),
"log_category": [{
"name": category
}],
"type": "farm_observation"
}
data.update(fields)
if 'done' not in data:
data['done'] = '1' if done else '0'
ret = self.log.send(data)
return ret
@property
def content(self) -> Content:
return Content(self, keys=self.info())
@property
def seasons(self) -> Iterator[Season]:
for season in self.term.get("farm_season")['list']:
yield Season(self, season)
@property
def crop_families(self) -> Iterator[CropFamily]:
for fam in self.term.get("farm_crop_families")['list']:
yield CropFamily(self, keys=fam)
@property
def crops(self) -> Iterator[Crop]:
for crop in self.term.get("farm_crops")['list']:
yield Crop(self, crop)
def equipment(self, filters: Dict = None) -> Iterable[Equipment]:
if not filters:
filters = {'type': 'equipment'}
else:
filters.update({'type': 'equipment'})
return self.assets(filters, Equipment)
def plantings(self, filters: Dict = None) -> Iterable[Planting]:
if not filters:
filters = {'type': 'planting'}
else:
filters.update({'type': 'planting'})
return self.assets(filters, Planting)
@property
def units(self) -> Iterable[Unit]:
for unit in self.term.get('farm_quantity_units')['list']:
yield Unit(self, unit)
def harvests(self, filters: Dict = None) -> Iterable[Harvest]:
if 'farm_harvests' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_harvest'}
else:
filters.update({'type': 'farm_harvest'})
return self.logs(filters, Harvest)
else:
raise FarmTypeMissingError("Harvest logs not supported.")
def seedings(self, filters: Dict = None) -> Iterable[Seeding]:
if 'farm_seedings' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_seeding'}
else:
filters.update({'type': 'farm_seeding'})
return self.logs(filters, Seeding)
else:
raise FarmTypeMissingError("Seeding logs not supported.")
def transplants(self, filters: Dict = None) -> Iterable[Transplanting]:
if 'farm_transplanting' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_transplanting'}
else:
filters.update({'type': 'farm_transplanting'})
return self.logs(filters, Transplanting)
else:
raise FarmTypeMissingError("Transplanting logs not supported.")
def observations(self, filters: Dict = None) -> Iterable[Observation]:
if 'farm_observation' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_observation'}
else:
filters.update({'type': 'farm_observation'})
return self.logs(filters, Observation)
else:
raise FarmTypeMissingError("Observation logs not supported.")
def maintenances(self, filters: Dict = None) -> Iterator[Maintenance]:
if 'farm_maintenance' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_maintenance'}
else:
filters.update({'type': 'farm_maintenance'})
return self.logs(filters, Maintenance)
else:
raise FarmTypeMissingError("Maintenance logs not supported.")
def purchases(self, filters: Dict = None) -> Iterator[Purchase]:
if 'farm_purchase' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_purchase'}
else:
filters.update({'type': 'farm_purchase'})
return self.logs(filters, Purchase)
else:
raise FarmTypeMissingError("Purchase logs not supported.")
def sales(self, filters: Dict = None) -> Iterator[Sale]:
if 'farm_sale' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_sale'}
else:
filters.update({'type': 'farm_sale'})
return self.logs(filters, Sale)
else:
raise FarmTypeMissingError("Sale logs not supported.")
def births(self, filters: Dict = None) -> Iterator[Birth]:
if 'farm_birth' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_birth'}
else:
filters.update({'type': 'farm_birth'})
return self.logs(filters, Birth)
else:
raise FarmTypeMissingError("Birth logs not supported.")
def inputs(self, filters: Dict = None) -> Iterator[Input]:
if 'farm_input' in self.content.resources['input']:
if not filters:
filters = {'type': 'farm_input'}
else:
filters.update({'type': 'farm_input'})
return self.logs(filters, Input)
else:
raise FarmTypeMissingError("Input logs not supported.")
def soil_tests(self, filters: Dict = None) -> Iterator[SoilTest]:
if 'farm_soil_test' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_soil_test'}
else:
filters.update({'type': 'farm_soil_test'})
return self.logs(filters, SoilTest)
else:
raise FarmTypeMissingError("Soil test logs not supported.")
def activities(self, filters: Dict = None) -> Iterator[Activity]:
if 'farm_activity' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_activity'}
else:
filters.update({'type': 'farm_activity'})
return self.logs(filters, Activity)
else:
raise FarmTypeMissingError("Activity logs not supported.")
def medicals(self, filters: Dict = None) -> Iterator[Medical]:
if 'farm_medical' in self.content.resources['log']:
if not filters:
filters = {'type': 'farm_medical'}
else:
filters.update({'type': 'farm_medical'})
return self.logs(filters, Medical)
else:
raise FarmTypeMissingError("Medical logs are not supported.")
def create_planting(self, crop: Crop, season: str, location: str) -> Planting:
ret = self.asset.send({
"name": "{} {} {}".format(season, location, crop.name),
"type": "planting",
"crop": [
{
"id": crop.tid
}
],
"season": [{"name": season}]
})
plant = Planting(self, keys=ret)
return plant
def create_seeding(self, planting: Planting, location: Area, crop: Crop,
date: datetime, seeds: int, source=None, done=False) -> Seeding:
name = "Seed {} {} {}".format(date.year, location.name, crop.name)
fields = {
"type": "farm_seeding",
"asset": [
{
"id": planting.id,
"resource": "taxonomy_term"
}
],
"seed_source": source,
"movement": {
"area": [
{
"id": location.tid,
"resource": "taxonomy_term"
}
]
},
"quantity": [
{
"measure": "count",
"value": str(seeds),
"unit": {
'name': 'Seeds',
"resource": "taxonomy_term"
}
}
]
}
ret = self._create_log(name, date, 'Plantings', fields, done=done)
return Seeding(self, keys=ret)
def create_transplant(self, planting: Planting, location: Area, date: datetime, fields=None, done=False):
name = "Transplant {}".format(planting.name)
data = {
"type": "farm_transplanting",
"movement": {
"area": [
{
"id": location.tid,
"resource": "taxonomy_term"
}
]
},
"asset": [
{
"id": planting.id,
"resource": "taxonomy_term"
}
]
}
if fields:
data.update(fields)
ret = self._create_log(name, date, 'Plantings', data, done=done)
return Transplanting(self, ret)
def create_harvest(self, planting: Planting, date: datetime, quantities: List[Quantity], done=False):
name = "Harvest {} {}".format(date.year, planting.crop[0]['name'])
data = {
"type": "farm_harvest",
"asset": [{
"id": planting.id,
"resource": "taxonomy_term"
}]
}
if quantities:
data["quantity"] = []
for quantity in quantities:
data["quantity"].append(quantity.to_dict())
ret = self._create_log(name, date, 'Plantings', data, done=done)
return Harvest(self, ret)
def create_log(self, name: str, date: datetime, category: str, fields: Dict, done=False):
return Log(self, self._create_log(name, date, category, fields, done))
| 37.393229
| 109
| 0.537503
| 13,514
| 0.941152
| 2,349
| 0.163591
| 701
| 0.04882
| 0
| 0
| 2,636
| 0.183578
|
58f9aedfba7b25435acbe41455b6f6873bd36f40
| 2,768
|
py
|
Python
|
tests/io/v3/base/test_csv_iterator.py
|
alpesh-te/pyTenable
|
4b5381a7757561f7ac1e79c2e2679356dd533540
|
[
"MIT"
] | null | null | null |
tests/io/v3/base/test_csv_iterator.py
|
alpesh-te/pyTenable
|
4b5381a7757561f7ac1e79c2e2679356dd533540
|
[
"MIT"
] | 25
|
2021-11-16T18:41:36.000Z
|
2022-03-25T05:43:31.000Z
|
tests/io/v3/base/test_csv_iterator.py
|
alpesh-te/pyTenable
|
4b5381a7757561f7ac1e79c2e2679356dd533540
|
[
"MIT"
] | 2
|
2022-03-02T12:24:40.000Z
|
2022-03-29T05:12:04.000Z
|
'''
Testing the CSV iterators
'''
import responses
from tenable.io.v3.base.iterators.explore_iterator import CSVChunkIterator
USERS_BASE_URL = r'https://cloud.tenable.com/api/v3/assets/search'
CSV_TEXT = (
'created,display_ipv4_address,first_observed,id,'
'ipv4_addresses,ipv6_addresses,is_deleted,is_licensed,'
'is_public,last_observed,name,network.id,network.name,'
'observation_sources,sources,types,updated\n'
'2021-11-24T13:43:56.709Z,192.12.13.7,2021-11-24T13:43:56.442Z,'
'"0142df77-dbc4-4706-8456-b756c06ee8a2",192.12.13.7,,false,'
'false,true,2021-11-24T13:43:56.442Z,192.12.13.7,'
'"00000000-0000-0000-0000-000000000000",Default,'
'"test_v3;2021-11-24T13:43:56.442Z;2021-11-24T13:43:56.442Z",'
'test_v3,host,2021-11-24T13:43:56.709Z\n'
)
CSV_TEXT_2 = (
'created,display_ipv4_address,first_observed,id,ipv4_addresses,'
'ipv6_addresses,is_deleted,is_licensed,is_public,last_observed,'
'name,network.id,network.name,observation_sources,sources,'
'types,updated\ncreated,display_ipv4_address,first_observed,id,'
'ipv4_addresses,ipv6_addresses,is_deleted,is_licensed,'
'is_public,last_observed,name,network.id,network.name,'
'observation_sources,sources,types,updated\n'
'2021-11-24T13:43:56.709Z,192.12.13.7,2021-11-24T13:43:56.442Z,'
'"0142df77-dbc4-4706-8456-b756c06ee8a2",192.12.13.7,,'
'false,false,true,2021-11-24T13:43:56.442Z,192.12.13.7,'
'"00000000-0000-0000-0000-000000000000",Default,'
'"test_v3;2021-11-24T13:43:56.442Z;2021-11-24T13:43:56.442Z",'
'test_v3,host,2021-11-24T13:43:56.709Z\n'
)
CSV_HEADERS = {
'Date': 'Wed, 08 Dec 2021 04:42:28 GMT',
'Content-Type': 'text/csv;charset=UTF-8',
'Content-Length': '508',
'Connection': 'keep-alive',
'Set-Cookie': 'nginx-cloud-site-id=qa-develop; path=/; '
'HttpOnly; SameSite=Strict; Secure',
'X-Request-Uuid': '4d43db5bac4decd79fc198e06a8113bd',
'X-Continuation-Token': 'fasd563456fghfgfFGHFGHRT',
'X-Content-Type-Options': 'nosniff',
'X-Frame-Options': 'DENY',
'X-Xss-Protection': '1; mode=block',
'Cache-Control': 'no-store',
'Strict-Transport-Security': 'max-age=63072000; includeSubDomains',
'X-Gateway-Site-ID': 'nginx-router-jm8uw-us-east-1-eng',
'Pragma': 'no-cache',
'Expect-CT': 'enforce, max-age=86400',
'X-Path-Handler': 'tenable-io',
}
@responses.activate
def test_csv_iterator(api):
responses.add(
method=responses.POST,
url=USERS_BASE_URL,
body=CSV_TEXT,
headers=CSV_HEADERS
)
csv_iterator = CSVChunkIterator(
api=api,
_path='api/v3/assets/search',
_payload={}
)
assert next(csv_iterator) == CSV_TEXT
assert next(csv_iterator) == CSV_TEXT_2
| 35.948052
| 74
| 0.695087
| 0
| 0
| 0
| 0
| 387
| 0.139812
| 0
| 0
| 1,985
| 0.717124
|
58fae9bfd3e0a20200a7b3dc48f407ee12665c55
| 246
|
py
|
Python
|
import_new_tournaments/process_hh_files/process/hands/extract/position_info/extract_stack_from_seat_line.py
|
michaelcukier/Poker-Hand-Tracker
|
9adae42fab9f640e6939ba06bd588ab1a2feb90f
|
[
"MIT"
] | 5
|
2021-02-28T18:33:02.000Z
|
2022-03-12T01:43:40.000Z
|
import_new_tournaments/process_hh_files/process/hands/extract/position_info/extract_stack_from_seat_line.py
|
michaelcukier/Poker-Hand-Tracker
|
9adae42fab9f640e6939ba06bd588ab1a2feb90f
|
[
"MIT"
] | null | null | null |
import_new_tournaments/process_hh_files/process/hands/extract/position_info/extract_stack_from_seat_line.py
|
michaelcukier/Poker-Hand-Tracker
|
9adae42fab9f640e6939ba06bd588ab1a2feb90f
|
[
"MIT"
] | 2
|
2021-03-01T03:08:04.000Z
|
2021-12-31T17:53:46.000Z
|
def extract_stack_from_seat_line(seat_line: str) -> float or None:
# Seat 3: PokerPete24 (40518.00)
if 'will be allowed to play after the button' in seat_line:
return None
return float(seat_line.split(' (')[1].split(')')[0])
| 35.142857
| 66
| 0.670732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 81
| 0.329268
|
58fb3efb05d44bd1aa9c7179a47db0c343140765
| 3,716
|
py
|
Python
|
startt.py
|
OnyyMexicanCat/RinkoglionitoBot
|
12e48e679b46710bbeaa7e98f02b09a512609031
|
[
"MIT"
] | null | null | null |
startt.py
|
OnyyMexicanCat/RinkoglionitoBot
|
12e48e679b46710bbeaa7e98f02b09a512609031
|
[
"MIT"
] | null | null | null |
startt.py
|
OnyyMexicanCat/RinkoglionitoBot
|
12e48e679b46710bbeaa7e98f02b09a512609031
|
[
"MIT"
] | null | null | null |
from telegram.ext import *
from telegram import *
import time
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="Hi ๐ I'm Rinkoglionito and I'm here because @OnyyTheBest had nothing to do ;-;. \n that said do the /cmds command to see the available commands")
chat_user_id = update.message.from_user.id
more_lines = [str(chat_user_id)+"\n"]
if str(chat_user_id)+"\n" not in open("users.txt", 'r'):
with open('users.txt', 'a') as f:
f.writelines('\n'.join(more_lines))
f.close()
else:
return
def YAAA(update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.effective_chat.id, text="Why you don't say YAAA?")
context.bot.send_video(chat_id=update.effective_chat.id, video="https://onyymexicancat.github.io/RinkoglionitoBot/mediafile/video/meme/01.mp4")
def BASTARDI(update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.effective_chat.id, text="๐ฃ Bastardi, chiamo da ๐น Reggio Emilia ๐๐ธ, sono un ๐ assassino di ๐
ฑ meridionali. Vi ๐ฐ ammazzo tutti bastardi pezzi di ๐
ฑ merda ๐คฌ. Porcodio a tutti i ๐ฅ napoletani romani di ๐ merda ๐คฌ stronzi, siete ๐ฅ della gente ๐จโ๐ฉโ๐งโ๐ฆ che โ viene schiacciata come ๐ topi ๐ฐ maledetti stronzi figli di ๐
ฑ una ๐ cagna in calore. Io ๐
ฑ vi ๐ฐ sp โ .. io ๐
ฑ vi ๐ฐ spacco le ๐
ฐ fighe, le ๐
ฑ ovaie a tutte le ๐
ฑ vostre donne sporche. venite su ๐
ฑ, puttane, che โ vi ๐ฐ apro lo ๐ sterno e ๐น vi ๐ฐ mangio il ๐ cuore e ๐น poi ve lo ๐ cago nella figa, brutte merde che โ non โ siete ๐ฅ altro, sono un ๐ฆ๐ฒ๐ฝ๐ assassino di ๐
ฑ fkghe.")
context.bot.send_audio(chat_id=update.effective_chat.id, audio="https://onyymexicancat.github.io/RinkoglionitoBot/mediafile/audio/meme/01.mp3")
def CMDS(update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.effective_chat.id, text="comandi attualmente attivi nel bot sono i seguenti \n /Start (Avvia il Bot) \n /BASTARDI (Bastardi chiamo da reggio emilia) \n /YAAA (YAAA KID) \n /CHK (VIP Only CC Checker)\n /vip (pay me xD)\n")
def oldupdate(update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.effective_chat.id, text="""
-- โ๏ธ@OnyyTheBest --
""", parse_mode="html")
def update(update: Update, context: CallbackContext):
context.bot.send_message(chat_id=update.effective_chat.id, text="""
-- โ๏ธ@OnyyTheBest --
""", parse_mode="html")
def commandnotfount(update, context):
try:
bot_msg = context.bot.send_message(chat_id=update.message.chat_id, text="<b>COMANDO NON TROVATO!</b> usa il comando /cmds per trovare il comando che stai cercando", parse_mode="html")
time.sleep(10)
context.bot.delete_message(chat_id=update.message.chat_id, message_id=bot_msg.message_id)
except:
pass
def bcast(update: update,context: CallbackContext):
if update.effective_chat.id == 476263382:
if context.args == []:
context.bot.send_message(update.effective_chat.id, text="<b>Please enter the message you want to broadcast to Bot users!</b>", parse_mode="html")
else:
porco = ''
for char in context.args:
if char !="[" + "'" + "]":
porco += char
ciccio = open("users.txt", 'r')
for line in ciccio:
content = line
context.bot.send_message(chat_id=content, text=porco)
update.message.reply_text(text="<b>DONE!</b>", parse_mode="html")
else:
context.bot.send_message(update.effective_chat.id, text="<b>NO PERMS</b>", parse_mode="html")
| 62.983051
| 655
| 0.660388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,630
| 0.421297
|
58fb480ae327d41355b8b8179c7dede058c21b5b
| 222
|
py
|
Python
|
server/config.py
|
nikitinivan/Crypton
|
90db77f4066d763e55f55c0fb540dc99aa2495e3
|
[
"MIT"
] | null | null | null |
server/config.py
|
nikitinivan/Crypton
|
90db77f4066d763e55f55c0fb540dc99aa2495e3
|
[
"MIT"
] | null | null | null |
server/config.py
|
nikitinivan/Crypton
|
90db77f4066d763e55f55c0fb540dc99aa2495e3
|
[
"MIT"
] | null | null | null |
import os
class Configuration:
APPLICATION_DIR = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
SECRET_KEY = 'thisissecretkeyforcrypton' # change it in production !!!
MONGO_DBNAME = 'cryptondb'
| 24.666667
| 74
| 0.725225
| 209
| 0.941441
| 0
| 0
| 0
| 0
| 0
| 0
| 67
| 0.301802
|
58fc01c36853b26f8562e022eac13585ff61105f
| 69
|
py
|
Python
|
nbviewerbot/__main__.py
|
JohnPaton/nbviewerbot
|
a9564655ba041e53db9a6916fb424e9582704321
|
[
"MIT"
] | 7
|
2018-08-06T20:02:13.000Z
|
2021-04-12T06:04:46.000Z
|
nbviewerbot/__main__.py
|
JohnPaton/nbviewerbot
|
a9564655ba041e53db9a6916fb424e9582704321
|
[
"MIT"
] | 5
|
2018-09-13T20:53:32.000Z
|
2021-03-31T18:55:48.000Z
|
nbviewerbot/__main__.py
|
JohnPaton/nbviewerbot
|
a9564655ba041e53db9a6916fb424e9582704321
|
[
"MIT"
] | null | null | null |
import nbviewerbot
if __name__ == "__main__":
nbviewerbot.cli()
| 13.8
| 26
| 0.710145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.144928
|
58fd05c379e33e35d0b95e61f85e13decd24ff2f
| 1,571
|
py
|
Python
|
fetch_data.py
|
ASabryMazroua/Arabic-Dialect-Classification
|
e0e778379a321022d4d05b54b067ab6541793434
|
[
"MIT"
] | 1
|
2022-03-19T04:40:27.000Z
|
2022-03-19T04:40:27.000Z
|
fetch_data.py
|
ASabryMazroua/Arabic-Dialect-Classification
|
e0e778379a321022d4d05b54b067ab6541793434
|
[
"MIT"
] | null | null | null |
fetch_data.py
|
ASabryMazroua/Arabic-Dialect-Classification
|
e0e778379a321022d4d05b54b067ab6541793434
|
[
"MIT"
] | null | null | null |
import json
import math
import requests
import pandas as pd
def fetch_data(ids):
'''
A function to fetch data from the API.
Parameters:
ids (list): A list of ids (integrs) to fetch
Returns:
text (dict): A dictionary where keys are the ids and values are the text
'''
results = {}
# We'll loop over the ids to fetch the text data
# We'll split ids into 1000 because of the limit of the API
# Futrue work:
# we can handle if the connection timed out or any other problem that would happen
# we can add some assertion to make sure that ids are valid
for i in range(math.ceil(len(ids)/1000)):
sub_ids = json.dumps(ids[i*1000:1000*(i+1)])
while True:
r = requests.post("https://recruitment.aimtechnologies.co/ai-tasks", sub_ids)
# print(r.status_code)
if r.status_code == 200:
results.update(json.loads(r.text))
break;
print(f"We managed to fetch {len(results)} samples of text.")
return results
if __name__ == '__main__':
#Read the ids' file, then fetch data, and write the file to a csv
source_data = pd.read_csv("files/dialect_dataset.csv")
text_dict = fetch_data(list(source_data.loc[:,"id"].astype(str)))
#We'll make sure that we managed to fetch all the ids
if len(source_data) == len(text_dict):
source_data.loc[:,"text"] = text_dict.values()
source_data.to_csv("data/full_dialect_dataset.csv",encoding='utf-8-sig')
| 38.317073
| 89
| 0.624443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.545512
|
58fd528adc4ec458f11e0462bce2b5ed5cc03175
| 2,305
|
py
|
Python
|
tests/test.py
|
daniel-sasu/roadwatch-data-processor
|
10317998a7f336bd1a26dc95b54e7bb7785cfd22
|
[
"MIT"
] | null | null | null |
tests/test.py
|
daniel-sasu/roadwatch-data-processor
|
10317998a7f336bd1a26dc95b54e7bb7785cfd22
|
[
"MIT"
] | null | null | null |
tests/test.py
|
daniel-sasu/roadwatch-data-processor
|
10317998a7f336bd1a26dc95b54e7bb7785cfd22
|
[
"MIT"
] | null | null | null |
# tests/test.py
from rw_data_proc.core import *
import unittest
def _dummyModifierFunction(row):
row['adr'] = 'modified'
return row['adr']
class TableTest(unittest.TestCase):
def test_init(self):
tb = Table('tests/test_file.csv')
self.assertIsNotNone(tb)
def test_add_modifier(self):
tb = Table('tests/test_file.csv')
modifiers = len(tb.modifiers)
tb.addModifier('adr', _dummyModifierFunction)
self.assertEqual(len(tb.modifiers), modifiers + 1)
def test_col_rename(self):
tb = Table('tests/test_file.csv', col_rename=[])
self.assertTrue('adr' in tb.dataFrame.columns.values.tolist())
tb.col_rename([{'adr': 'ADR'}])
self.assertTrue('ADR' in tb.dataFrame.columns.values.tolist())
def test_clean(self):
tb = Table('tests/test_file.csv')
self.assertTrue('adr' in tb.dataFrame.columns.values.tolist())
self.assertTrue('gps' in tb.dataFrame.columns.values.tolist())
tb.clean(['gps', 'adr'])
self.assertFalse('adr' in tb.dataFrame.columns.values.tolist())
self.assertFalse('gps' in tb.dataFrame.columns.values.tolist())
def test_process(self):
tb = Table('tests/test_file.csv')
tb.addModifier('adr', _dummyModifierFunction)
tb.process()
self.assertTrue('modified' in tb.dataFrame['adr'].tolist())
def test_process_generic(self):
df = process_generic_file('tests/test_file.csv', index='num_acc',
encoding='latin-1', sep=',', dtype=None,
col_rename=[{'Num_Acc': 'num_acc'}, {'adr': 'ADR'}, {'gps': 'GPS'}],
cols_formatted=['ADR', 'GPS'], modifiers={'ADR': _dummyModifierFunction},
drop_cols=['int', 'lat', 'long'])
self.assertIsNotNone(df)
self.assertEqual(df.index.name, 'num_acc')
self.assertTrue('ADR' in df.columns.values.tolist())
self.assertTrue('GPS' in df.columns.values.tolist())
self.assertTrue('modified' in df['ADR'].tolist())
self.assertFalse('int' in df.columns.values.tolist())
self.assertFalse('lat' in df.columns.values.tolist())
self.assertFalse('long' in df.columns.values.tolist())
| 39.741379
| 107
| 0.610846
| 2,153
| 0.934056
| 0
| 0
| 0
| 0
| 0
| 0
| 376
| 0.163124
|
58fdec912e446a48a537a766eb98ce253951af60
| 1,496
|
py
|
Python
|
booking_spaces/booking/forms.py
|
pvlvnk/booking
|
701c8e1d8ceefde03090cd93bf954874d9fe349e
|
[
"MIT"
] | null | null | null |
booking_spaces/booking/forms.py
|
pvlvnk/booking
|
701c8e1d8ceefde03090cd93bf954874d9fe349e
|
[
"MIT"
] | null | null | null |
booking_spaces/booking/forms.py
|
pvlvnk/booking
|
701c8e1d8ceefde03090cd93bf954874d9fe349e
|
[
"MIT"
] | null | null | null |
from booking.models import Schedule, ParkingSpace
from datetime import datetime as dt
from django import forms
class ReservingForm(forms.ModelForm):
reserving_dates = forms.ModelMultipleChoiceField(
queryset=Schedule.objects.filter(reserving_date__gte=dt.today()),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = Schedule
fields = ('reserving_dates',)
class EditReservingForm(forms.ModelForm):
reserving_dates = forms.ModelMultipleChoiceField(
queryset=Schedule.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = Schedule
fields = ('reserving_dates',)
class CreationScheduleForm(forms.ModelForm):
class Meta:
model = Schedule
fields = ('space', 'reserving_date',)
class DeletionScheduleForm(forms.ModelForm):
deleting_dates = forms.ModelMultipleChoiceField(
queryset=Schedule.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = Schedule
fields = ('deleting_dates',)
class CreationSpaceForm(forms.ModelForm):
class Meta:
model = ParkingSpace
fields = ('title', 'slug',)
class DeletionSpaceForm(forms.ModelForm):
deleting_spaces = forms.ModelMultipleChoiceField(
queryset=ParkingSpace.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = ParkingSpace
fields = ('deleting_spaces',)
| 24.933333
| 73
| 0.68115
| 1,366
| 0.913102
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.06885
|
58ff16ed56b02ccd24c5cca15503e57704dd6fd0
| 20,793
|
py
|
Python
|
tests/unit/aiplatform/test_matching_engine_index_endpoint.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/aiplatform/test_matching_engine_index_endpoint.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/aiplatform/test_matching_engine_index_endpoint.py
|
kthytang/python-aiplatform
|
e82c1792293396045a1032df015a3700fc38609b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from importlib import reload
from unittest import mock
from unittest.mock import patch
from google.api_core import operation
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.types import (
matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref,
index_endpoint as gca_index_endpoint,
index as gca_index,
)
from google.cloud.aiplatform.compat.services import (
index_endpoint_service_client,
index_service_client,
)
from google.protobuf import field_mask_pb2
import pytest
# project
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
# index
_TEST_INDEX_ID = "index_id"
_TEST_INDEX_NAME = f"{_TEST_PARENT}/indexes/{_TEST_INDEX_ID}"
_TEST_INDEX_DISPLAY_NAME = "index_display_name"
# index_endpoint
_TEST_INDEX_ENDPOINT_ID = "index_endpoint_id"
_TEST_INDEX_ENDPOINT_NAME = f"{_TEST_PARENT}/indexEndpoints/{_TEST_INDEX_ENDPOINT_ID}"
_TEST_INDEX_ENDPOINT_DISPLAY_NAME = "index_endpoint_display_name"
_TEST_INDEX_ENDPOINT_DESCRIPTION = "index_endpoint_description"
_TEST_INDEX_DESCRIPTION = "index_description"
_TEST_INDEX_ENDPOINT_VPC_NETWORK = "projects/{}/global/networks/{}".format(
"12345", "network"
)
_TEST_LABELS = {"my_key": "my_value"}
_TEST_DISPLAY_NAME_UPDATE = "my new display name"
_TEST_DESCRIPTION_UPDATE = "my description update"
_TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"}
# deployment
_TEST_DEPLOYED_INDEX_ID = "deployed_index_id"
_TEST_DEPLOYED_INDEX_DISPLAY_NAME = "deployed_index_display_name"
_TEST_MIN_REPLICA_COUNT = 2
_TEST_MAX_REPLICA_COUNT = 2
_TEST_ENABLE_ACCESS_LOGGING = False
_TEST_RESERVED_IP_RANGES = ["vertex-ai-ip-range-1", "vertex-ai-ip-range-2"]
_TEST_DEPLOYMENT_GROUP = "prod"
_TEST_AUTH_CONFIG_AUDIENCES = ["a", "b"]
_TEST_AUTH_CONFIG_ALLOWED_ISSUERS = [
"service-account-name-1@project-id.iam.gserviceaccount.com",
"service-account-name-2@project-id.iam.gserviceaccount.com",
]
# deployment_updated
_TEST_MIN_REPLICA_COUNT_UPDATED = 4
_TEST_MAX_REPLICA_COUNT_UPDATED = 4
# request_metadata
_TEST_REQUEST_METADATA = ()
# Lists
_TEST_INDEX_ENDPOINT_LIST = [
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
]
# Match
_TEST_QUERIES = [
[
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
]
_TEST_NUM_NEIGHBOURS = 1
def uuid_mock():
return uuid.UUID(int=1)
# All index mocks
@pytest.fixture
def get_index_mock():
with patch.object(
index_service_client.IndexServiceClient, "get_index"
) as get_index_mock:
index = gca_index.Index(
name=_TEST_INDEX_NAME,
display_name=_TEST_INDEX_DISPLAY_NAME,
description=_TEST_INDEX_DESCRIPTION,
)
index.deployed_indexes = [
gca_matching_engine_deployed_index_ref.DeployedIndexRef(
index_endpoint=index.name,
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
)
]
get_index_mock.return_value = index
yield get_index_mock
# All index_endpoint mocks
@pytest.fixture
def get_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient, "get_index_endpoint"
) as get_index_endpoint_mock:
index_endpoint = gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
)
index_endpoint.deployed_indexes = [
gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
index=_TEST_INDEX_NAME,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
gca_index_endpoint.DeployedIndex(
id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
index=f"{_TEST_INDEX_NAME}_2",
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
]
get_index_endpoint_mock.return_value = index_endpoint
yield get_index_endpoint_mock
@pytest.fixture
def deploy_index_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"deploy_index",
) as deploy_index_mock:
deploy_index_lro_mock = mock.Mock(operation.Operation)
deploy_index_mock.return_value = deploy_index_lro_mock
yield deploy_index_mock
@pytest.fixture
def undeploy_index_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"undeploy_index",
) as undeploy_index_mock:
undeploy_index_lro_mock = mock.Mock(operation.Operation)
undeploy_index_mock.return_value = undeploy_index_lro_mock
yield undeploy_index_mock
@pytest.fixture
def update_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"update_index_endpoint",
) as index_endpoint_mock:
index_endpoint_mock.return_value = gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_DISPLAY_NAME_UPDATE,
description=_TEST_DESCRIPTION_UPDATE,
labels=_TEST_LABELS_UPDATE,
)
yield index_endpoint_mock
@pytest.fixture
def mutate_deployed_index_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"mutate_deployed_index",
) as mutate_deployed_index_mock:
mutate_deployed_index_lro_mock = mock.Mock(operation.Operation)
update_index_endpoint_mock.return_value = mutate_deployed_index_lro_mock
yield mutate_deployed_index_mock
@pytest.fixture
def list_index_endpoints_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient, "list_index_endpoints"
) as list_index_endpoints_mock:
list_index_endpoints_mock.return_value = _TEST_INDEX_ENDPOINT_LIST
yield list_index_endpoints_mock
@pytest.fixture
def delete_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"delete_index_endpoint",
) as delete_index_endpoint_mock:
delete_index_endpoint_lro_mock = mock.Mock(operation.Operation)
delete_index_endpoint_mock.return_value = delete_index_endpoint_lro_mock
yield delete_index_endpoint_mock
@pytest.fixture
def create_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"create_index_endpoint",
) as create_index_endpoint_mock:
create_index_endpoint_lro_mock = mock.Mock(operation.Operation)
create_index_endpoint_lro_mock.result.return_value = (
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
)
)
create_index_endpoint_mock.return_value = create_index_endpoint_lro_mock
yield create_index_endpoint_mock
@pytest.mark.usefixtures("google_auth_mock")
class TestMatchingEngineIndexEndpoint:
def setup_method(self):
reload(initializer)
reload(aiplatform)
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
@pytest.mark.parametrize(
"index_endpoint_name", [_TEST_INDEX_ENDPOINT_ID, _TEST_INDEX_ENDPOINT_NAME]
)
def test_init_index_endpoint(self, index_endpoint_name, get_index_endpoint_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=index_endpoint_name
)
get_index_endpoint_mock.assert_called_once_with(
name=my_index_endpoint.resource_name, retry=base._DEFAULT_RETRY
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
def test_update_index_endpoint(self, update_index_endpoint_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
updated_endpoint = my_index_endpoint.update(
display_name=_TEST_DISPLAY_NAME_UPDATE,
description=_TEST_DESCRIPTION_UPDATE,
labels=_TEST_LABELS_UPDATE,
request_metadata=_TEST_REQUEST_METADATA,
)
expected = gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_DISPLAY_NAME_UPDATE,
description=_TEST_DESCRIPTION_UPDATE,
labels=_TEST_LABELS_UPDATE,
)
update_index_endpoint_mock.assert_called_once_with(
index_endpoint=expected,
update_mask=field_mask_pb2.FieldMask(
paths=["labels", "display_name", "description"]
),
metadata=_TEST_REQUEST_METADATA,
)
assert updated_endpoint.gca_resource == expected
def test_list_index_endpoints(self, list_index_endpoints_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoints_list = aiplatform.MatchingEngineIndexEndpoint.list()
list_index_endpoints_mock.assert_called_once_with(
request={"parent": _TEST_PARENT, "filter": None}
)
assert len(my_index_endpoints_list) == len(_TEST_INDEX_ENDPOINT_LIST)
for my_index_endpoint in my_index_endpoints_list:
assert type(my_index_endpoint) == aiplatform.MatchingEngineIndexEndpoint
@pytest.mark.parametrize("sync", [True, False])
@pytest.mark.usefixtures("get_index_endpoint_mock")
def test_delete_index_endpoint(self, delete_index_endpoint_mock, sync):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
my_index_endpoint.delete(sync=sync)
if not sync:
my_index_endpoint.wait()
delete_index_endpoint_mock.assert_called_once_with(
name=my_index_endpoint.resource_name
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_create_index_endpoint(self, create_index_endpoint_mock, sync):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
labels=_TEST_LABELS,
)
if not sync:
my_index_endpoint.wait()
expected = gca_index_endpoint.IndexEndpoint(
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
labels=_TEST_LABELS,
)
create_index_endpoint_mock.assert_called_once_with(
parent=_TEST_PARENT,
index_endpoint=expected,
metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock")
def test_deploy_index(self, deploy_index_mock, undeploy_index_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
# Get index
my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_NAME)
my_index_endpoint = my_index_endpoint.deploy_index(
index=my_index,
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
min_replica_count=_TEST_MIN_REPLICA_COUNT,
max_replica_count=_TEST_MAX_REPLICA_COUNT,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
auth_config_audiences=_TEST_AUTH_CONFIG_AUDIENCES,
auth_config_allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
request_metadata=_TEST_REQUEST_METADATA,
)
deploy_index_mock.assert_called_once_with(
index_endpoint=my_index_endpoint.resource_name,
deployed_index=gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
index=my_index.resource_name,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
metadata=_TEST_REQUEST_METADATA,
)
my_index_endpoint = my_index_endpoint.undeploy_index(
deployed_index_id=_TEST_DEPLOYED_INDEX_ID
)
undeploy_index_mock.assert_called_once_with(
index_endpoint=my_index_endpoint.resource_name,
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock")
def test_mutate_deployed_index(self, mutate_deployed_index_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
my_index_endpoint.mutate_deployed_index(
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
request_metadata=_TEST_REQUEST_METADATA,
)
mutate_deployed_index_mock.assert_called_once_with(
index_endpoint=_TEST_INDEX_ENDPOINT_NAME,
deployed_index=gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT_UPDATED,
"max_replica_count": _TEST_MAX_REPLICA_COUNT_UPDATED,
},
),
metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_delete_index_endpoint_without_force(
self, undeploy_index_mock, delete_index_endpoint_mock, sync
):
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_NAME
)
my_index_endpoint.delete(sync=sync)
if not sync:
my_index_endpoint.wait()
# undeploy_index_mock should not be called unless force is set to True
undeploy_index_mock.assert_not_called()
delete_index_endpoint_mock.assert_called_once_with(
name=_TEST_INDEX_ENDPOINT_NAME
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_delete_index_endpoint_with_force(
self, undeploy_index_mock, delete_index_endpoint_mock, sync
):
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_NAME
)
my_index_endpoint.delete(force=True, sync=sync)
if not sync:
my_index_endpoint.wait()
# undeploy_index_mock should be called if force is set to True
assert undeploy_index_mock.call_count == 2
delete_index_endpoint_mock.assert_called_once_with(
name=_TEST_INDEX_ENDPOINT_NAME
)
| 33.537097
| 90
| 0.67994
| 9,216
| 0.443226
| 5,980
| 0.287597
| 15,385
| 0.739912
| 0
| 0
| 2,238
| 0.107632
|
58ff85cb63e954368b68902aa3d1e1f0e1df7bef
| 1,440
|
py
|
Python
|
vrc_log_viewer.py
|
27Cobalter/vrc_log_viewer
|
00b5d106488fb95c605ef873a75fd26bd7d1d37f
|
[
"MIT"
] | 2
|
2019-08-29T05:58:22.000Z
|
2019-09-09T15:29:16.000Z
|
vrc_log_viewer.py
|
27Cobalter/vrc_log_viewer
|
00b5d106488fb95c605ef873a75fd26bd7d1d37f
|
[
"MIT"
] | 1
|
2022-03-20T08:11:00.000Z
|
2022-03-20T08:11:00.000Z
|
vrc_log_viewer.py
|
27Cobalter/vrc_log_viewer
|
00b5d106488fb95c605ef873a75fd26bd7d1d37f
|
[
"MIT"
] | 2
|
2020-02-04T03:19:57.000Z
|
2021-02-08T15:17:22.000Z
|
import glob
import os
import re
import sys
import time
import yaml
def tail(thefile, past):
if not past:
thefile.seek(0, 2)
while True:
line = thefile.readline()
if not line:
time.sleep(0.5)
continue
line = line.rstrip("\n").rstrip("\r")
if line != "":
yield repr(line)[1:-1]
if __name__ == "__main__":
with open("config.yml", "r") as config:
conf = yaml.load(config, Loader=yaml.SafeLoader)
print("load config")
reg = []
for pattern in conf["reg"]:
print(" " + pattern)
reg.append(re.compile(pattern))
vrcdir = os.environ["USERPROFILE"] + "\\AppData\\LocalLow\\VRChat\\VRChat\\"
logfile = vrcdir + conf["logfile"]
if len(sys.argv) > 1:
logfile = sys.argv[1]
if logfile == vrcdir:
logfiles = glob.glob(vrcdir + "output_log_*.txt")
logfiles.sort(key=os.path.getctime, reverse=True)
logfile = logfiles[0]
with open(logfile, "r", encoding="utf-8") as f:
print("open logfile : ", logfile)
loglines = tail(f, conf["past"])
for line in loglines:
for pattern in reg:
match = pattern.match(line)
if not match:
continue
message = ""
for group in match.groups():
message = message + group + " "
print(message)
| 27.169811
| 80
| 0.532639
| 0
| 0
| 292
| 0.202778
| 0
| 0
| 0
| 0
| 174
| 0.120833
|
450019e12b1cc5c40f3f6bff8bbb906c38cceb65
| 9,032
|
py
|
Python
|
src/intermediate_representation/sem2sql/infer_from_clause.py
|
brunnurs/proton
|
057889e2bcefd2e7e6bc3b0fcdf418a2123767a0
|
[
"Apache-2.0"
] | null | null | null |
src/intermediate_representation/sem2sql/infer_from_clause.py
|
brunnurs/proton
|
057889e2bcefd2e7e6bc3b0fcdf418a2123767a0
|
[
"Apache-2.0"
] | null | null | null |
src/intermediate_representation/sem2sql/infer_from_clause.py
|
brunnurs/proton
|
057889e2bcefd2e7e6bc3b0fcdf418a2123767a0
|
[
"Apache-2.0"
] | null | null | null |
def infer_from_clause(table_names, graph, columns):
tables = list(table_names.keys())
if len(tables) == 1: # no JOINS needed - just return the simple "FROM" clause.
return f"FROM {tables[0]} "
else: # we have to deal with multiple tables - and find the shortest path between them
join_clauses, cross_join_clauses = generate_path_by_graph(graph, table_names, tables)
if len(_tables_in_join_clauses(join_clauses)) >= 3:
join_clauses = _find_and_remove_star_table(columns, join_clauses)
stringified_join_clauses = []
for idx, (start, start_alias, end, end_alias, entry_column, exit_column) in enumerate(join_clauses):
# the first case is kind of an exception case, as we need to write two tables, for example: "A AS T1 JOIN B AS T2 ON ....".
# All the following joins will only be "... JOIN T2 ON ...."
if idx == 0:
stringified_join_clauses.append(
f"{start} JOIN {end} ON {start_alias}.{entry_column} = {end_alias}.{exit_column}")
else:
stringified_join_clauses.append(f"JOIN {end} ON {start_alias}.{entry_column} = {end_alias}.{exit_column}")
# that's the cross-join exception cases. We have to add them for syntactical correctness, even though it will not result
# in a good query at execution.
for table, table_alias in cross_join_clauses:
if len(stringified_join_clauses) == 0:
stringified_join_clauses.append(f"{table}")
else:
stringified_join_clauses.append(f"JOIN {table}")
return f'FROM {" ".join(stringified_join_clauses)}'
def generate_path_by_graph(graph, table_names, tables):
join_clause = list()
cross_joins, tables_handled_by_cross_joins = _handle_standalone_tables(graph, table_names, tables)
tables_cleaned = [table for table in tables if table not in tables_handled_by_cross_joins]
idx = 0
edges = []
# We always deal with two tables at the time and try to find the shortest path between them. This might be over-simplified
# as there could be a more optimal path between all tables (see Steiner Graph), but practically it doesn't matter so much.
while idx < len(tables_cleaned) - 1:
start_table = tables_cleaned[idx]
end_table = tables_cleaned[idx + 1]
edges_for_this_path = graph.dijkstra(start_table, end_table)
if edges_for_this_path:
edges.extend(edges_for_this_path)
else:
raise Exception(f"We could not find a path between table '${start_table}' and '${end_table}'. This query can"
f"not work. Make sure you allow only questions in a fully connected schema!")
idx += 1
# now there might be duplicates - as parts of the path from A to C might be the same as from A to B.
# be aware that, as we only consider INNER JOINS, A <-> B is equal to B <-> A! So we also have to remove this edges.
edges_deduplicated = _deduplicate_edges(edges)
# now for each edge we now have to add both, the start table and the end table to the join_clause (including the PK/FK-columns).
for edge in edges_deduplicated:
if edge.start not in table_names:
table_names[edge.start] = edge.start.replace(' ', '_')
if edge.end not in table_names:
table_names[edge.end] = edge.end.replace(' ', '_')
join_clause.append((edge.start,
table_names[edge.start],
edge.end,
table_names[edge.end],
edge.entry_column,
edge.exit_column))
return join_clause, cross_joins
def _handle_standalone_tables(graph, table_names, tables):
join_clause = []
tables_handled = []
# there is a few rare cases of tables without connections to others - which will then obviously not be part of the graph.
# as we can't properly handle this cases, we just have to do a stupid cross-join with them
for table in tables:
if table not in graph.vertices:
join_clause.append((table, table_names[table]))
tables_handled.append(table)
remaining_tables = [t for t in table_names if t not in tables_handled]
# if there is only one table left after removing all the others, we can't use a graph anymore - so we need to do use a cross join as well.
if len(remaining_tables) == 1:
join_clause.append((remaining_tables[0], table_names[remaining_tables[0]]))
tables_handled.append(remaining_tables[0])
return join_clause, tables_handled
def _get_max_alias(table_names):
max_key = 1
for t, k in table_names.items():
_k = int(k[1:])
if _k > max_key:
max_key = _k
return max_key + 10
def _find_and_remove_star_table(columns, join_clause):
"""
Starting from 3 tables we have to deal with the "star-table" effect - a join with a joining table where we only wanna know e.g. the count(*) of the third table.
In that case we don't need to join the third table - we just do a count over the join with the joining table.
In general, the additional join is not an issue - but is seen as incorrect by the spider-evaluation and therefore we have to remove it.
Example:
SELECT T2.concert_name , T2.theme , count(*) FROM singer_in_concert AS T1 JOIN concert AS T2 ON T1.concert_id = T2.concert_id GROUP BY T2.concert_id ---> GOOD
SELECT T1.concert_Name, T1.Theme, count(*) FROM concert AS T1 JOIN singer_in_concert AS T3 JOIN singer AS T2 GROUP BY T1.concert_ID -----> BAD, REMOVE "singer" join.
"""
# unfortunately auto tuple unpacking doesn't work anymore in python 3, therefore this comment: a "column" contains the 3 elements "aggregator, "column name", "table".
star_tables = list(map(lambda column: column[2], filter(lambda column: column[1] == '*', columns)))
# remove duplicates
star_tables = list(set(star_tables))
assert len(star_tables) <= 1, "The case of having multiple star-joins is currently not supported (and not part of the spider-dataset)"
if len(star_tables) == 1:
star_table = star_tables[0]
# we need to make sure the table we try to remove is not used at any other place - e.g. in the SELECT or in the WHERE clause.
# only then we can safely remove it
if len(list(filter(lambda column: column[1] != '*' and column[2] == star_table, columns))) == 0:
# we only remove star-tables if they are the start or end table in the graph.
# remember, an join_clause tuple looks like this: (start, start_alias, end, end_alias, entry_column, exit_column)
start_edge = join_clause[0]
start_edge_from, _, start_edge_to, _, _, _ = start_edge
end_edge = join_clause[len(join_clause) - 1]
end_edge_from, _, end_edge_to, _, _, _ = end_edge
if start_edge_from == star_table:
if second_table_in_edge_is_availabe_elswhere(start_edge_to, join_clause[1:]):
return join_clause[1:]
if end_edge_to == star_table:
if second_table_in_edge_is_availabe_elswhere(end_edge_from, join_clause[:-1]):
return join_clause[:-1]
return join_clause
def second_table_in_edge_is_availabe_elswhere(second_table, remaining_edges):
"""
By removing an edge, we basically remove two tables. If there schema is a "normal" schema, where the edges are "A --> B", "B --> C"
this is not an issue.
We we though have a non-linear schema, like "A --> B", "A --> C" we can't just remove the first edge - we would loose B completely!
To avoid this we make sure the second table in the edge we plan to remove is available in another edge.
A schema where we have to deal with this issue is e.g. "flight_2", where two relations go from "flights" to "airports".
"""
for edge in remaining_edges:
start, _, end, _, _, _ = edge
if second_table == start or second_table == end:
return True
return False
def _deduplicate_edges(edges):
deduplicated = []
for e1 in edges:
found_match = False
for e2 in deduplicated:
# make sure two edges do not match - while paying no attention to the direction of the edge!
# more complex might make it necessary to also include the foreign key/primary key here, as you could theoretically have multiple relationships between two tables.
if (e1.start == e2.start and e1.end == e2.end) or (e1.start == e2.end and e1.end == e2.start):
found_match = True
if not found_match:
deduplicated.append(e1)
return deduplicated
def _tables_in_join_clauses(join_clauses):
unique_tables = set()
for clause in join_clauses:
start_table, _, end_table, _, _, _ = clause
unique_tables.add(start_table)
unique_tables.add(end_table)
return list(unique_tables)
| 45.616162
| 175
| 0.665633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,066
| 0.450177
|
450109704aaa9e57ec8952a08e13c1c362e0340c
| 21
|
py
|
Python
|
test.py
|
AlanFnz/profiles-rest-api
|
c606999f86235ed74fd98421bd02bc598d5a5463
|
[
"MIT"
] | null | null | null |
test.py
|
AlanFnz/profiles-rest-api
|
c606999f86235ed74fd98421bd02bc598d5a5463
|
[
"MIT"
] | null | null | null |
test.py
|
AlanFnz/profiles-rest-api
|
c606999f86235ed74fd98421bd02bc598d5a5463
|
[
"MIT"
] | null | null | null |
print('Test script')
| 10.5
| 20
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.619048
|
45022a2b63c33a9252e7e2198671dfbcf5309e06
| 2,208
|
py
|
Python
|
src/markdown_storage/folder.py
|
stephanpoetschner/markdown_storage
|
69005db4484010e0d2282bdeb0d0bcc30a316932
|
[
"MIT"
] | null | null | null |
src/markdown_storage/folder.py
|
stephanpoetschner/markdown_storage
|
69005db4484010e0d2282bdeb0d0bcc30a316932
|
[
"MIT"
] | null | null | null |
src/markdown_storage/folder.py
|
stephanpoetschner/markdown_storage
|
69005db4484010e0d2282bdeb0d0bcc30a316932
|
[
"MIT"
] | null | null | null |
import os
from .exceptions import MarkdownError, MetadataError
from .file import ContentFile
class ContentFolder(object):
def __init__(self, path):
path = os.path.abspath(path)
self._path = path
self._files = []
self._folders = []
for filename in os.listdir(self._path):
abspath = os.path.join(self._path, filename)
if os.path.isfile(abspath):
self._add_file(abspath)
elif os.path.isdir(abspath):
self._add_folder(abspath)
def __repr__(self):
return os.path.basename(self._path)
def _add_file(self, filepath):
if not ContentFile.is_valid(filepath):
return
dirname, filename = os.path.split(filepath)
name, ext = os.path.splitext(filename)
try:
d = ContentFile(filepath)
except (MetadataError, MarkdownError):
return
setattr(self, name, d)
self._files.append(d)
def _add_folder(self, dirpath):
_, basename = os.path.split(dirpath)
d = ContentFolder(dirpath)
setattr(self, basename, d)
self._folders.append(d)
@property
def folders(self):
for folder in self._folders:
yield folder
@property
def files(self):
for file in self._files:
yield file
def __iter__(self):
return iter(self._files)
# def __next__(self):
# import ipdb; ipdb.set_trace()
# for file in self._files:
# return file
# raise StopIteration
# def sort(self, key=None, reverse=False):
# return ContentIterator(self._files, key, reverse)
#
# class ContentIterator(object):
# def __init__(self, items, key=None, reverse=False):
# self.items = sorted(items, key=key, reverse=reverse)
# self.i = 0
#
# def __iter__(self):
# return self
#
# def next(self):
# if self.i >= len(self.items):
# raise StopIteration
#
# item = self.items[self.i]
# self.i += 1
#
# return item
#
# def sorted(self, sort_key):
# return ContentIterator(self, self.items, sort_key)
#
#
| 24
| 62
| 0.579257
| 1,601
| 0.725091
| 152
| 0.068841
| 180
| 0.081522
| 0
| 0
| 717
| 0.324728
|
4506dc61f56a8eae8242703dae9838d15d5a49a2
| 2,327
|
py
|
Python
|
test/test_session.py
|
Sunmxt/UESTC-EAMS
|
760a7387a5d73967e45a0b9d211acb383bb50fe1
|
[
"Apache-2.0"
] | 1
|
2020-07-25T13:53:35.000Z
|
2020-07-25T13:53:35.000Z
|
test/test_session.py
|
Sunmxt/UESTC-EAMS
|
760a7387a5d73967e45a0b9d211acb383bb50fe1
|
[
"Apache-2.0"
] | null | null | null |
test/test_session.py
|
Sunmxt/UESTC-EAMS
|
760a7387a5d73967e45a0b9d211acb383bb50fe1
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/python
'''
Test for session module
'''
import unittest
import uestc_eams
from .mock_server import LoginMockServer
from .utils import HookedMethod, MakeResponse
mock_login = LoginMockServer()
class TestSession(unittest.TestCase):
@mock_login.Patch
def test_Session(self):
self.__session = uestc_eams.EAMSSession()
# Try login
print('--> Login test <--')
self.assertTrue(self.__session.Login('2015070804011', '104728'))
self.assertTrue(mock_login.Logined)
self.assertEqual(mock_login.GetIndexCount, 1)
print('passed.', end = '\n\n')
# Test expire session
print('--> test expired cookies <--')
test_url = 'http://eams.uestc.edu.cn/eams'
mock_login.ExpireTestTiggered = False
rep = self.__session.TryRequestGet(test_url)
self.assertTrue(mock_login.ExpireTestTiggered)
self.assertTrue(mock_login.Logined)
self.assertNotEqual(-1, rep.url.find(test_url))
print('passed.', end = '\n\n')
# Test expire session with no redirects following
print('--> test expired cookies (no redirects following) <--')
test_url = 'http://eams.uestc.edu.cn/eams/redirect_test'
mock_login.ExpireTestTiggered = False
rep = self.__session.TryRequestGet(test_url, allow_redirects = False)
self.assertTrue(mock_login.ExpireTestTiggered)
self.assertTrue(mock_login.Logined)
self.assertNotEqual(-1, rep.url.find(test_url))
self.assertEqual(rep.status_code, 302)
print('passed.', end = '\n\n')
# Test expire session with HTTP 200 redirects.
print('--> test expired cookies (200 redirect) <--')
test_url = 'http://eams.uestc.edu.cn/eams/200redirect'
mock_login.ExpireTestTiggered = False
mock_login._200RedirectTiggered = False
rep = self.__session.TryRequestGet(test_url)
self.assertEqual(mock_login.ExpireTestTiggered, True)
self.assertEqual(mock_login._200RedirectTiggered, True)
print('passed.', end = '\n\n')
# Test expire session with redirect inside page.
print('--> test logout <--')
self.assertTrue(self.__session.Logout())
self.assertFalse(mock_login.Logined)
print('passed.', end = '\n\n')
| 34.731343
| 77
| 0.65578
| 2,107
| 0.905458
| 0
| 0
| 2,063
| 0.886549
| 0
| 0
| 618
| 0.265578
|
4507d40889bdeb2efc06f9fd94721d09e699f4c0
| 159
|
py
|
Python
|
Asignacion2/App/main.py
|
HarambeGeek/uip-iq-pc3
|
6e9a0678a90c4bfd7499dfb5c71c9a3ea9effe1e
|
[
"Apache-2.0"
] | null | null | null |
Asignacion2/App/main.py
|
HarambeGeek/uip-iq-pc3
|
6e9a0678a90c4bfd7499dfb5c71c9a3ea9effe1e
|
[
"Apache-2.0"
] | null | null | null |
Asignacion2/App/main.py
|
HarambeGeek/uip-iq-pc3
|
6e9a0678a90c4bfd7499dfb5c71c9a3ea9effe1e
|
[
"Apache-2.0"
] | null | null | null |
from App.numeros import numeros
if __name__ == "__main__":
x = int(input("Ingrese el nรบmero que desea evaluar: \n"))
pi = numeros(x)
pi.parImpar()
| 26.5
| 61
| 0.660377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.325
|
45085ba62031c2ca173669eca6938ca7aaf578c8
| 6,950
|
py
|
Python
|
tensorflow/python/distribute/test_util.py
|
TL-Rubick/tensorflow
|
6cf1ccf6060a95aad3ccc84544d0aa166990ec72
|
[
"Apache-2.0"
] | 11
|
2018-01-03T15:11:09.000Z
|
2021-04-13T05:47:27.000Z
|
tensorflow/python/distribute/test_util.py
|
TL-Rubick/tensorflow
|
6cf1ccf6060a95aad3ccc84544d0aa166990ec72
|
[
"Apache-2.0"
] | 88
|
2020-11-24T08:18:10.000Z
|
2022-03-25T20:28:30.000Z
|
tensorflow/python/distribute/test_util.py
|
TL-Rubick/tensorflow
|
6cf1ccf6060a95aad3ccc84544d0aa166990ec72
|
[
"Apache-2.0"
] | 10
|
2018-07-31T10:56:21.000Z
|
2019-10-07T08:05:21.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from absl import app
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
def gather(strategy, value):
"""Gathers value from all workers.
This is intended for tests before we implement an official all-gather API.
Args:
strategy: a `tf.distribute.Strategy`.
value: a nested structure of n-dim `tf.distribute.DistributedValue` of
`tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica.
Cannot contain tf.sparse.SparseTensor.
Returns:
a (n+1)-dim `tf.Tensor`.
"""
return nest.map_structure(functools.partial(_gather, strategy), value)
def _gather(strategy, value):
"""Gathers a single value."""
# pylint: disable=protected-access
if not isinstance(value, values.DistributedValues):
value = values.PerReplica([ops.convert_to_tensor(value)])
if not isinstance(strategy.extended,
collective_all_reduce_strategy.CollectiveAllReduceExtended):
return array_ops.stack(value._values)
assert len(strategy.extended.worker_devices) == len(value._values)
inputs = [array_ops.expand_dims_v2(v, axis=0) for v in value._values]
return strategy.gather(values.PerReplica(inputs), axis=0)
# pylint: enable=protected-access
def set_logical_devices_to_at_least(device, num):
"""Create logical devices of at least a given number."""
if num < 1:
raise ValueError("`num` must be at least 1 not %r" % (num,))
physical_devices = config.list_physical_devices(device)
if not physical_devices:
raise RuntimeError("No {} found".format(device))
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We create
# multiple logical devices for the last physical device so that we have `num`
# logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
if device.upper() == "GPU":
logical_devices.append(
context.LogicalDeviceConfiguration(memory_limit=2048))
else:
logical_devices.append(context.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
config.set_logical_device_configuration(physical_devices[-1], logical_devices)
def _set_logical_devices():
if config.list_physical_devices("GPU"):
set_logical_devices_to_at_least("GPU", 2)
if config.list_physical_devices("CPU"):
set_logical_devices_to_at_least("CPU", 2)
def main(enable_v2_behavior=True, config_logical_devices=True):
"""All-in-one main function for tf.distribute tests."""
if config_logical_devices:
app.call_after_init(_set_logical_devices)
if enable_v2_behavior:
v2_compat.enable_v2_behavior()
else:
v2_compat.disable_v2_behavior()
# TODO(b/131360402): configure default logical devices.
multi_process_runner.test_main()
def _op_dependencies(op):
"""Returns the data and control dependencies of a tf.Operation combined."""
deps = []
for node in itertools.chain(op.inputs, op.control_inputs):
if isinstance(node, ops.Tensor):
node = node.op
assert isinstance(node, ops.Operation)
deps.append(node)
return deps
def topological_sort_operations(operations):
"""Topological sorts a list of operations.
This does a topological sort of the operations in a graph. The edges include
both data dependencies and control dependencies. Note that the edge goes from
an operation to its dependencies.
Args:
operations: a list of tf.Operation in the same graph.
Returns:
A map from a tf.Operation to its topological order.
"""
in_degrees = {}
for op in operations:
if op not in in_degrees:
in_degrees[op] = 0
for next_op in _op_dependencies(op):
in_degrees[next_op] = in_degrees.get(next_op, 0) + 1
nexts = []
for op, in_degree in in_degrees.items():
if in_degree == 0:
nexts.append(op)
order = {}
next_order = 0
while nexts:
op, nexts = nexts[0], nexts[1:]
order[op] = next_order
next_order += 1
for next_op in _op_dependencies(op):
in_degrees[next_op] -= 1
if in_degrees[next_op] == 0:
nexts.append(next_op)
assert len(order) == len(operations)
return order
def _exists_dependency(start, end):
"""Returns whether there exists a dependency chain from start to end."""
nexts = [start]
while nexts:
op, nexts = nexts[0], nexts[1:]
for next_op in _op_dependencies(op):
if next_op == end:
return True
nexts.append(next_op)
return False
def assert_sequential_execution(order, operations):
"""Asserts there's a deterministic execution order between the operations.
Args:
order: a map from a tf.Operation to its topological order.
operations: a list of operations that should be executed sequentially. It
can be given in any order.
"""
# Topological ordering guarantees that, if there's a dependency from N_a to
# N_b, then order[N_a] < order[N_b]. If there do exist a path of dependencies
# among the operations, it always goes from a operation with a smaller
# topological order to one with a larger topological order. Therefore, we only
# need to sort the operations by their topological orders, and verify that
# there's a path of dependency between adjacent pairs.
operations = sorted(operations, key=lambda op: order[op])
for i in range(len(operations) - 1):
if not _exists_dependency(operations[i], operations[i + 1]):
print(operations[i].graph.as_graph_def())
raise AssertionError(
"No dependency between {} and {}. Graph is dumped to stdout.".format(
operations[i].name, operations[i + 1].name))
| 36.010363
| 80
| 0.726475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,027
| 0.43554
|
4508cb0373bf214606e00078a7d58793158e28a1
| 3,928
|
py
|
Python
|
experiment_data_handler.py
|
grdddj/Diploma-Thesis---Inverse-Heat-Transfer
|
636182fa4c57913002675ed3afca8c1b3dc35e1c
|
[
"MIT"
] | 2
|
2019-09-09T18:49:14.000Z
|
2021-11-15T23:41:00.000Z
|
experiment_data_handler.py
|
grdddj/Diploma-Thesis---Inverse-Heat-Transfer
|
636182fa4c57913002675ed3afca8c1b3dc35e1c
|
[
"MIT"
] | null | null | null |
experiment_data_handler.py
|
grdddj/Diploma-Thesis---Inverse-Heat-Transfer
|
636182fa4c57913002675ed3afca8c1b3dc35e1c
|
[
"MIT"
] | null | null | null |
import csv
class ExperimentDataCannotBeParsedError(Exception):
"""
Defining custom exception type that will be thrown when something fails here
"""
def __init__(self, msg: str = "ERROR"):
self.message = msg
def __str__(self):
"""
Defines what to show when exception is printed - giving some useful info
"""
return """
Experiment data cannot be processed. Please take a look at them.
Column names should be:
- Time
- Temperature
- HeatFlux
- T_amb
Issue: {}
""".format(self.message)
class Material:
"""
Class responsible for initializing and storing material data
"""
def __init__(self, rho, cp, lmbd):
self.rho = rho # Mass density
self.cp = cp # Specific heat capacity
self.lmbd = lmbd # Heat conductivity
class ExperimentalData:
"""
Class responsible for initializing and storing experimental data
TODO: really agree on the parsing logic (indexes vs named columns)
"""
def __init__(self, csv_file_path="DATA.csv"):
# Preparing lists for storing all the data
self.t_data = [] # experimental data of time points
self.T_data = [] # experimental data of temperature
self.q_data = [] # experimental data of Measured HeatFluxes (might be missing)
self.T_amb_data = [] # experimental data of ambient temperature
# Defining the column names we are expecting
self.t_column_name = "Time"
self.T_column_name = "Temperature"
self.q_column_name = "HeatFlux"
self.T_amb_column_name = "T_amb"
# Trying to parse the file with experimental data, in case of any error
# relay it with our custom name
try:
with open(csv_file_path) as csv_file:
# NOTE: when using DictReader, skipping first row is not necessary,
# on the contrary, we would be losing one row of data by it
csv_reader = csv.DictReader(csv_file)
# Validating the correctness of CSV file
self.check_CSV_file_correctness(csv_reader.fieldnames)
# Filling the data row by row
for row in csv_reader:
self.t_data.append(float(row.get(self.t_column_name, 0)))
self.T_data.append(float(row.get(self.T_column_name, 0)))
self.q_data.append(float(row.get(self.q_column_name, 0)))
self.T_amb_data.append(float(row.get(self.T_amb_column_name, 0)))
except ExperimentDataCannotBeParsedError:
raise
except Exception as e:
raise ExperimentDataCannotBeParsedError(e)
def check_CSV_file_correctness(self, column_names: list) -> None:
"""
Making sure the CSV file contains the right columns.
We need the time and ambient temperatures to be there all the time,
and them at least one of the temperature and flux.
In case of some problem throw our custom error.
Args:
column_names ... list of columns from the CSV file
"""
if self.t_column_name not in column_names:
msg = "Time data is empty, please use 'Time' column for this data"
raise ExperimentDataCannotBeParsedError(msg)
if self.T_amb_column_name not in column_names:
msg = "Ambient temperature data is empty, please use 'T_amb' column for this data"
raise ExperimentDataCannotBeParsedError(msg)
if self.T_column_name not in column_names and self.q_column_name not in column_names:
msg = "Temperature and flux data are empty, please use 'Temperature' and 'HeatFlux' columns for this data"
raise ExperimentDataCannotBeParsedError(msg)
| 38.509804
| 118
| 0.621436
| 3,908
| 0.994908
| 0
| 0
| 0
| 0
| 0
| 0
| 1,958
| 0.498473
|
450a6b5edd6e30d83bb61609d61f4702dee03bf9
| 23,457
|
py
|
Python
|
hybrideb/_bineb.py
|
beckermr/hybrideb
|
a72d712020943dbbed35cb244f9e7f13fc6b2d4d
|
[
"BSD-3-Clause"
] | null | null | null |
hybrideb/_bineb.py
|
beckermr/hybrideb
|
a72d712020943dbbed35cb244f9e7f13fc6b2d4d
|
[
"BSD-3-Clause"
] | null | null | null |
hybrideb/_bineb.py
|
beckermr/hybrideb
|
a72d712020943dbbed35cb244f9e7f13fc6b2d4d
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
class BinEB(object):
def __init__(
self, tmin, tmax, Nb, windows=None, linear=False, useArcmin=True, fname=None
):
if fname is not None:
self.read_data(fname)
else:
# set basic params
if useArcmin:
am2r = np.pi / 180.0 / 60.0
else:
am2r = 1.0
self.Nb = Nb
self.L = tmin * am2r
self.H = tmax * am2r
if linear:
self.Lb = (self.H - self.L) / Nb * np.arange(Nb) + self.L
self.Hb = (self.H - self.L) / Nb * (np.arange(Nb) + 1.0) + self.L
else:
self.Lb = np.exp(np.log(self.H / self.L) / Nb * np.arange(Nb)) * self.L
self.Hb = (
np.exp(np.log(self.H / self.L) / Nb * (np.arange(Nb) + 1.0))
* self.L
)
self.have_ell_win = False
# make the bin window functions
if windows is None:
def _make_geomwin(L, H):
return lambda x: 2.0 * x / (H * H - L * L)
self.windows = []
for i in range(self.Nb):
self.windows.append(_make_geomwin(self.Lb[i], self.Hb[i]))
else:
def _make_normwin(winf, norm):
return lambda x: winf(x / am2r) / norm
self.windows = []
assert (
len(windows) == Nb
), "binEB requires as many windows as angular bins!"
for i in range(self.Nb):
twin = _make_normwin(windows[i], 1.0)
norm, err = scipy.integrate.quad(twin, self.Lb[i], self.Hb[i])
self.windows.append(_make_normwin(windows[i], norm))
# get fa and fb
self.fa = np.zeros(self.Nb)
self.fa[:] = 1.0
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def fb_int(x, args):
win = args[0]
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(fb_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.fb[i] = val
else:
def fb_int(x, win):
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
fb_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
self.fb[i] = val
self.fa_on = self.fa / np.sqrt(np.sum(self.fa * self.fa))
self.fb_on = self.fb - self.fa * np.sum(self.fa * self.fb) / np.sum(
self.fa * self.fa
)
self.fb_on = self.fb_on / np.sqrt(np.sum(self.fb_on * self.fb_on))
# get Mplus matrix
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def knorm_int(x, args):
win = args[0]
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(knorm_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, args):
win = args[0]
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv2_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv2[i] = val
def inv4_int(x, args):
win = args[0]
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv4_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv4[i] = val
else:
def knorm_int(x, win):
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
knorm_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, win):
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv2_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv2[i] = val
def inv4_int(x, win):
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv4_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv4[i] = val
if HAVE_PYGSL:
def _mp_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if p > t:
val = (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mp_int(p, t, k, i):
if p > t:
return (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mp = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i < k:
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[i] * self.Hb[i]
- self.Lb[i] * self.Lb[i]
)
* np.log(self.Hb[k] / self.Lb[k])
+ 3.0
/ 2.0
* (
np.power(self.Hb[i], 4.0)
- np.power(self.Lb[i], 4.0)
)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
if k == i:
self.mp[k, i] += 1.0
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
-0.5
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
- 2.0
* self.Lb[i]
* self.Lb[i]
* np.log(self.Hb[k] / self.Lb[k])
- 3.0
/ 2.0
* np.power(self.Lb[i], 4.0)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
else:
if k == i:
self.mp[k, i] += 1.0
val = dblquad(
_mp_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mp[k, i] += val / knorm[k]
if i < k:
self.mp[k, i] = (
4.0 * inv2[k] - 12.0 * inv4[k] * self.fb[i]
) / knorm[k]
# sys.stdout.write("\n")
if HAVE_PYGSL:
def _mm_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if t > p:
val = (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mm_int(p, t, k, i):
if t > p:
return (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mm = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i > k:
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
* np.log(self.Hb[i] / self.Lb[i])
+ 3.0
/ 2.0
* (
np.power(self.Hb[k], 4.0)
- np.power(self.Lb[k], 4.0)
)
* (
1.0 / self.Hb[i] / self.Hb[i]
- 1.0 / self.Lb[i] / self.Lb[i]
)
)
)
if k == i:
self.mm[k, i] += 1.0
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
0.5
* (
-1.0 * self.Hb[k] * self.Hb[k]
+ self.Lb[k]
* self.Lb[k]
* (
4.0
- 3.0
* self.Lb[k]
* self.Lb[k]
/ self.Hb[i]
/ self.Hb[i]
- 4.0 * np.log(self.Hb[i] / self.Lb[k])
)
)
)
)
else:
if k == i:
self.mm[k, i] += 1.0
val = dblquad(
_mm_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mm[k, i] += val / knorm[k]
if i > k:
self.mm[k, i] = (
4.0 * inv2[i] - 12.0 * inv4[i] * self.fb[k]
) / knorm[k]
# sys.stdout.write("\n")
# compute the ell windows
self.comp_ell_windows()
def comp_ell_windows(self):
# get the windows in ell
self.have_ell_win = True
if HAVE_PYGSL:
def ellwin_int(theta, args):
ell = args[0]
win = args[1]
n = args[2]
return (pygsl.sf.bessel_Jn(n, ell * theta))[0] * win(theta)
else:
def ellwin_int(theta, ell, win, n):
return scipy.special.jn(n, ell * theta) * win(theta)
self.ellv = np.logspace(0.0, 5.5, 1500)
self.ellwindowsJ0 = np.zeros((self.Nb, len(self.ellv)))
self.ellwindowsJ4 = np.zeros((self.Nb, len(self.ellv)))
for i in range(self.Nb):
sys.stdout.write("|")
sys.stdout.flush()
if HAVE_PYGSL:
epsabs = 1e-6
epsrel = 1e-6
limit = 1000
w = pygsl.integrate.workspace(limit)
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 0]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,
# limit,pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ0[i, j] = val
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 4]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,limit,
# pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ4[i, j] = val
else:
win0 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 0),
)
)[0]
for ell in self.ellv
]
)
win4 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 4),
)
)[0]
for ell in self.ellv
]
)
self.ellwindowsJ0[i, :] = win0
self.ellwindowsJ4[i, :] = win4
sys.stdout.write("\n")
def write_data(self, fname):
"""
writes a simple text file with object info
# N L H
100 1.0 400.0
# Lb
1.0 1.2 ... 398.0
# Hb
1.2 1.4 ... 400.0
# fa
1.0 1.0 .... 1.0
# fb
blah blah ... blah
# fa_on
blah blah ... blah
# fb_on
blah blah ... blah
# invnorm
blah blah ... blah
# Mplus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# Mminus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellv
blah blah ... blah
# ellwinJ0
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellwinJ4
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
"""
def write_vec(fp, vec):
for val in vec:
fp.write("%.20lg " % val)
fp.write("\n#\n")
def write_mat(fp, mat):
shape = mat.shape
for i in range(shape[0]):
for val in mat[i, :]:
fp.write("%.20lg " % val)
fp.write("\n")
fp.write("#\n")
fp = open(fname, "w")
fp.write("# N L H\n")
fp.write("%ld %.20lg %.20lg\n" % (self.Nb, self.L, self.H))
fp.write("# Lb\n")
write_vec(fp, self.Lb)
fp.write("# Hb\n")
write_vec(fp, self.Hb)
fp.write("# fa\n")
write_vec(fp, self.fa)
fp.write("# fb\n")
write_vec(fp, self.fb)
fp.write("# fa_on\n")
write_vec(fp, self.fa_on)
fp.write("# fb_on\n")
write_vec(fp, self.fb_on)
fp.write("# invnorm\n")
write_vec(fp, self.invnorm)
fp.write("# Mplus\n")
write_mat(fp, self.mp)
fp.write("# Mminus\n")
write_mat(fp, self.mm)
fp.write("# ellv\n")
write_vec(fp, self.ellv)
fp.write("# ellwinJ0\n")
write_mat(fp, self.ellwindowsJ0)
fp.write("# ellwinJ4\n")
write_mat(fp, self.ellwindowsJ4)
fp.close()
def read_data(self, fname):
def read_vec(fp):
line = fp.readline()
line = line.strip()
val = np.array([float(tag) for tag in line.split()])
line = fp.readline()
return val
def read_mat(fp):
mat = []
line = fp.readline()
while line[0] != "#":
line = line.strip()
mat.append([float(tag) for tag in line.split()])
line = fp.readline()
mat = np.array(mat)
return mat
fp = open(fname, "r")
line = fp.readline()
line = fp.readline()
line = line.strip()
line = line.split()
self.Nb = int(line[0])
self.L = float(line[1])
self.H = float(line[2])
line = fp.readline()
self.Lb = read_vec(fp)
line = fp.readline()
self.Hb = read_vec(fp)
line = fp.readline()
self.fa = read_vec(fp)
line = fp.readline()
self.fb = read_vec(fp)
line = fp.readline()
self.fa_on = read_vec(fp)
line = fp.readline()
self.fb_on = read_vec(fp)
line = fp.readline()
self.invnorm = read_vec(fp)
line = fp.readline()
self.mp = read_mat(fp)
line = fp.readline()
self.mm = read_mat(fp)
line = fp.readline()
self.ellv = read_vec(fp)
line = fp.readline()
self.ellwindowsJ0 = read_mat(fp)
line = fp.readline()
self.ellwindowsJ4 = read_mat(fp)
self.have_ell_win = True
fp.close()
def fplusminus(self, fptest):
fp = fptest - np.sum(fptest * self.fa_on) * self.fa_on
fp = fp - np.sum(fp * self.fb_on) * self.fb_on
fm = np.dot(self.mp, fp)
"""
code to test
fm = np.zeros(len(fp))
for i in range(len(fp)):
for j in range(len(fp)):
fm[i] += self.mp[i,j]*fp[j]
print fm-np.dot(self.mp,fp)
"""
return fp, fm
def wplus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5
def wminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum - msum) * 0.5
def wplusminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5, (psum - msum) * 0.5
| 34.394428
| 87
| 0.336488
| 23,225
| 0.99011
| 0
| 0
| 0
| 0
| 0
| 0
| 1,891
| 0.080616
|
450a8b0c8c6133dd03a986ca11b5d16bc7850c24
| 9,945
|
py
|
Python
|
test_fast_ndimage.py
|
grlee77/skimage_accel_demos
|
96606ca27c8c622733958c01620bc55e616319db
|
[
"BSD-3-Clause"
] | null | null | null |
test_fast_ndimage.py
|
grlee77/skimage_accel_demos
|
96606ca27c8c622733958c01620bc55e616319db
|
[
"BSD-3-Clause"
] | null | null | null |
test_fast_ndimage.py
|
grlee77/skimage_accel_demos
|
96606ca27c8c622733958c01620bc55e616319db
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy.testing import assert_allclose, run_module_suite
from fast_ndimage import (
median_filter, sobel, convolve, correlate, gaussian_filter,
gaussian_filter1d, uniform_filter, uniform_filter1d)
def test_median_filter():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape).astype(np.float32)
for mode in ['reflect', ]:
kwargs = dict(mode=mode, size=3)
result_ndi = median_filter(x, backend='ndimage', **kwargs)
result_opencv = median_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_sobel_filter():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for axis in [0, 1]:
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode, axis=axis)
result_ndi = sobel(x, backend='ndimage', **kwargs)
result_opencv = sobel(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
axis = 0
mode = 'reflect'
for scale in [0.5, 1, 2, None]:
for delta in [0, 0.5, 2]:
kwargs = dict(mode=mode, axis=axis, scale=scale, delta=delta)
result_ndi = sobel(x[:, 0], backend='ndimage', **kwargs)
result_opencv = sobel(x[:, 0], backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_uniform_filter():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode, size=(2, 3))
result_ndi = uniform_filter(x, backend='ndimage', **kwargs)
result_opencv = uniform_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for squared in [False, True]:
for normalize in [False, True]:
kwargs = dict(size=3, mode='reflect', normalize=normalize,
squared=squared)
result_ndi = uniform_filter(x, backend='ndimage', **kwargs)
result_opencv = uniform_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for size in [5, (5, 6), (6, 5), 6]:
for origin in [-2, -1, 0, 1, 2, (0, 0), (1, 1), (0, 1), (2, 1),
(-1, -2)]:
kwargs = dict(mode='reflect', size=size, origin=origin)
result_ndi = uniform_filter(x, backend='ndimage', **kwargs)
result_opencv = uniform_filter(x, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_uniform_filter1d():
rtol = atol = 1e-7
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
size = 3
for axis in [0, 1, -1]:
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = uniform_filter1d(x, size, axis, backend='ndimage', **kwargs)
result_opencv = uniform_filter1d(x, size, axis, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for squared in [False, True]:
for normalize in [False, True]:
kwargs = dict(mode='reflect', normalize=normalize,
squared=squared)
result_ndi = uniform_filter1d(x, size, axis, backend='ndimage', **kwargs)
result_opencv = uniform_filter1d(x, size, axis, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for origin in [-1, 0, 1]:
kwargs = dict(mode='reflect', origin=origin)
result_ndi = uniform_filter1d(x, size, axis, backend='ndimage', **kwargs)
result_opencv = uniform_filter1d(x, size, axis, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_gaussian_filter():
rtol = atol = 1e-12
shape = (63, 64)
sigma = (1.5, 3)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = gaussian_filter(x, sigma, backend='ndimage', **kwargs)
result_opencv = gaussian_filter(x, sigma, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
mode = 'reflect'
for truncate in [1, 1.1, 1.5, 2, 4, 5]:
kwargs = dict(mode=mode, truncate=truncate)
result_ndi = gaussian_filter(x, sigma, backend='ndimage', **kwargs)
result_opencv = gaussian_filter(x, sigma, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_gaussian_filter1d():
rtol = atol = 1e-12
shape = (63, 64)
sigma = 2.5
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for axis in [0, 1, -1]:
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = gaussian_filter1d(x, sigma, axis, backend='ndimage',
**kwargs)
result_opencv = gaussian_filter1d(x, sigma, axis, backend='opencv',
**kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
mode = 'reflect'
for truncate in [1, 2]:
kwargs = dict(mode=mode, truncate=truncate, axis=axis)
result_ndi = gaussian_filter1d(x, sigma, backend='ndimage',
**kwargs)
result_opencv = gaussian_filter1d(x, sigma, backend='opencv',
**kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
def test_convolve():
rtol = atol = 1e-12
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
weights = rstate.standard_normal((3, 6))
func = convolve
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for delta in [0, -0.5, 2]:
kwargs = dict(mode='reflect', delta=delta)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for origin in [-1, 0, 1, (0, 0), (1, 1)]:
kwargs = dict(mode='reflect', origin=origin)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
# TODO: test threading
def test_correlate():
rtol = atol = 1e-12
shape = (63, 64)
rstate = np.random.RandomState(0)
x = rstate.standard_normal(shape)
weights = rstate.standard_normal((4, 4))
func = correlate
# TODO: OpenCV 3.3 currently crashing for mode 'wrap':
# error: ~/miniconda3/conda-bld/opencv_1513818334462/work/opencv-3.3.0/modules/imgproc/src/filter.cpp:127: error: (-215) columnBorderType != BORDER_WRAP in function init
for mode in ['reflect', 'mirror', 'constant', 'nearest']:
kwargs = dict(mode=mode)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for delta in [0, -0.5, 2]:
kwargs = dict(mode='reflect', delta=delta)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
for origin in [-1, 0, 1, (0, 0), (1, 1)]:
kwargs = dict(mode='reflect', origin=origin)
result_ndi = func(x, weights, backend='ndimage', **kwargs)
result_opencv = func(x, weights, backend='opencv', **kwargs)
assert_allclose(result_ndi, result_opencv, rtol=rtol, atol=atol)
# TODO: assert_raises ValueError on origin=(-1, 1) etc.
if __name__ == "__main__":
run_module_suite()
| 44.2
| 173
| 0.627954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,108
| 0.211966
|
450aba433942ebcf2d5698d6bec5bdbf826e634d
| 628
|
py
|
Python
|
RecamanSequence/recaman_sequence.py
|
urosjevremovic/Recamans-Sequence
|
ab6a90c363271dc842f26ccd1b69168a9764de9e
|
[
"MIT"
] | null | null | null |
RecamanSequence/recaman_sequence.py
|
urosjevremovic/Recamans-Sequence
|
ab6a90c363271dc842f26ccd1b69168a9764de9e
|
[
"MIT"
] | null | null | null |
RecamanSequence/recaman_sequence.py
|
urosjevremovic/Recamans-Sequence
|
ab6a90c363271dc842f26ccd1b69168a9764de9e
|
[
"MIT"
] | null | null | null |
import sys
from itertools import count, islice
def sequence():
"""Generate Recaman's sequence"""
seen = set()
a = 0
for n in count(1):
yield a
seen.add(a)
c = a - n
if c < 0 or c in seen:
c = a + n
a = c
def write_sequence(num):
"""Write Recaman's sequence to a text file"""
filename = "recaman.txt"
with open(filename, mode="wt", encoding="utf-8") as f:
f.writelines(f"{r}\n" for r in islice(sequence(), num))
def main():
write_sequence(num=int(sys.argv[1]))
if __name__ == '__main__':
write_sequence(num=int(sys.argv[1]))
| 20.258065
| 63
| 0.565287
| 0
| 0
| 224
| 0.356688
| 0
| 0
| 0
| 0
| 120
| 0.191083
|
450b1dc0c660308c26a032b98dc820700aea0504
| 533
|
py
|
Python
|
2018/aoc/d8/test.py
|
lukaselmer/adventofcode
|
b96ffc9040b63b338bca653830ba4ff7e90a8b2a
|
[
"MIT"
] | 1
|
2018-12-12T22:59:44.000Z
|
2018-12-12T22:59:44.000Z
|
2018/aoc/d8/test.py
|
lukaselmer/adventofcode
|
b96ffc9040b63b338bca653830ba4ff7e90a8b2a
|
[
"MIT"
] | null | null | null |
2018/aoc/d8/test.py
|
lukaselmer/adventofcode
|
b96ffc9040b63b338bca653830ba4ff7e90a8b2a
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import mock_open, patch
from aoc.d8.main import metadata_sum, supervalue
DATA = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\n"
class TestCase(unittest.TestCase):
def test_metadata_sum(self):
with patch("builtins.open", mock_open(read_data=DATA)):
self.assertEqual(138, metadata_sum())
def test_supervalue(self):
with patch("builtins.open", mock_open(read_data=DATA)):
self.assertEqual(66, supervalue())
if __name__ == "__main__":
unittest.main()
| 25.380952
| 63
| 0.682927
| 324
| 0.60788
| 0
| 0
| 0
| 0
| 0
| 0
| 79
| 0.148218
|
450b884de08b19e1126451db4abb472fc660d42a
| 955
|
py
|
Python
|
tests/calculators/openbabel/test_obabel_calculators.py
|
stevenbennett96/stko
|
ee340af4fc549d5a2c3e9cba8360661335efe0fd
|
[
"MIT"
] | null | null | null |
tests/calculators/openbabel/test_obabel_calculators.py
|
stevenbennett96/stko
|
ee340af4fc549d5a2c3e9cba8360661335efe0fd
|
[
"MIT"
] | null | null | null |
tests/calculators/openbabel/test_obabel_calculators.py
|
stevenbennett96/stko
|
ee340af4fc549d5a2c3e9cba8360661335efe0fd
|
[
"MIT"
] | 2
|
2020-05-08T17:51:25.000Z
|
2020-05-11T09:03:24.000Z
|
import stko
import pytest
try:
from openbabel import openbabel
except ImportError:
openbabel = None
def test_open_babel_energy(unoptimized_mol):
if openbabel is None:
with pytest.raises(stko.WrapperNotInstalledException):
calculator = stko.OpenBabelEnergy('uff')
else:
calculator = stko.OpenBabelEnergy('uff')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 141.44622279628743
calculator = stko.OpenBabelEnergy('gaff')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 66.47095418890525
calculator = stko.OpenBabelEnergy('ghemical')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 86.59956589041794
calculator = stko.OpenBabelEnergy('mmff94')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 7.607999187460175
| 29.84375
| 62
| 0.709948
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.035602
|
450c6f3fc4e2b60e9dc2c7675ce23445e63cfa2b
| 1,773
|
py
|
Python
|
psono/restapi/views/membership_decline.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 48
|
2018-04-19T15:50:58.000Z
|
2022-01-23T15:58:11.000Z
|
psono/restapi/views/membership_decline.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 9
|
2018-09-13T14:56:18.000Z
|
2020-01-17T16:44:33.000Z
|
psono/restapi/views/membership_decline.py
|
dirigeant/psono-server
|
a18c5b3c4d8bbbe4ecf1615b210d99fb77752205
|
[
"Apache-2.0",
"CC0-1.0"
] | 11
|
2019-09-20T11:53:47.000Z
|
2021-07-18T22:41:31.000Z
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from django.core.cache import cache
from django.conf import settings
from ..authentication import TokenAuthentication
from ..app_settings import (
MembershipDeclineSerializer,
)
class MembershipDeclineView(GenericAPIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
allowed_methods = ('POST', 'OPTIONS', 'HEAD')
def get(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def put(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, request, *args, **kwargs):
"""
Marks a membership as declined. In addition deletes now unnecessary information.
:param request:
:param uuid: share_right_id
:param args:
:param kwargs:
:return: 200 / 403
"""
serializer = MembershipDeclineSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
membership_obj = serializer.validated_data.get('membership_obj')
membership_obj.accepted = False
membership_obj.save()
if settings.CACHE_ENABLE:
cache_key = 'psono_user_status_' + str(membership_obj.user.id)
cache.delete(cache_key)
return Response(status=status.HTTP_200_OK)
def delete(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
| 30.568966
| 106
| 0.693739
| 1,414
| 0.797518
| 0
| 0
| 0
| 0
| 0
| 0
| 293
| 0.165257
|
450c825cc3c91a3ffe9479b87c3868422b01ed4b
| 5,350
|
py
|
Python
|
tests/data/samplers/bucket_batch_sampler_test.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 11,433
|
2017-06-27T03:08:46.000Z
|
2022-03-31T18:14:33.000Z
|
tests/data/samplers/bucket_batch_sampler_test.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 4,006
|
2017-06-26T21:45:43.000Z
|
2022-03-31T02:11:10.000Z
|
tests/data/samplers/bucket_batch_sampler_test.py
|
MSLars/allennlp
|
2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475
|
[
"Apache-2.0"
] | 2,560
|
2017-06-26T21:16:53.000Z
|
2022-03-30T07:55:46.000Z
|
from allennlp.common import Params
from allennlp.data import Instance, Token, Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import BucketBatchSampler
from allennlp.data.data_loaders import MultiProcessDataLoader
from .sampler_test import SamplerTest
class TestBucketSampler(SamplerTest):
def test_create_batches_groups_correctly(self):
sampler = BucketBatchSampler(batch_size=2, padding_noise=0, sorting_keys=["text"])
grouped_instances = []
for indices in sampler.get_batch_indices(self.instances):
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for group in grouped_instances:
assert group in expected_groups
expected_groups.remove(group)
assert expected_groups == []
def test_disable_shuffle(self):
sampler = BucketBatchSampler(batch_size=2, sorting_keys=["text"], shuffle=False)
grouped_instances = []
for indices in sampler.get_batch_indices(self.instances):
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for idx, group in enumerate(grouped_instances):
assert group == expected_groups[idx]
def test_guess_sorting_key_picks_the_longest_key(self):
sampler = BucketBatchSampler(batch_size=2, padding_noise=0)
instances = []
short_tokens = [Token(t) for t in ["what", "is", "this", "?"]]
long_tokens = [Token(t) for t in ["this", "is", "a", "not", "very", "long", "passage"]]
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
assert sampler.sorting_keys is None
sampler._guess_sorting_keys(instances)
assert sampler.sorting_keys == ["passage"]
def test_from_params(self):
params = Params({})
sorting_keys = ["s1", "s2"]
params["sorting_keys"] = sorting_keys
params["batch_size"] = 32
sampler = BucketBatchSampler.from_params(params=params)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.1
assert sampler.batch_size == 32
params = Params(
{
"sorting_keys": sorting_keys,
"padding_noise": 0.5,
"batch_size": 100,
"drop_last": True,
}
)
sampler = BucketBatchSampler.from_params(params=params)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.5
assert sampler.batch_size == 100
assert sampler.drop_last
def test_drop_last_works(self):
sampler = BucketBatchSampler(
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
def collate_fn(x, **kwargs):
return Batch(x)
data_loader = MultiProcessDataLoader(
self.get_mock_reader(),
"fake_path",
batch_sampler=sampler,
)
data_loader.collate_fn = collate_fn
data_loader.index_with(self.vocab)
batches = [batch for batch in iter(data_loader)]
stats = self.get_batches_stats(batches)
# all batches have length batch_size
assert all(batch_len == 2 for batch_len in stats["batch_lengths"])
# we should have lost one instance by skipping the last batch
assert stats["total_instances"] == len(self.instances) - 1
def test_batch_count(self):
sampler = BucketBatchSampler(batch_size=2, padding_noise=0, sorting_keys=["text"])
data_loader = MultiProcessDataLoader(
self.get_mock_reader(), "fake_path", batch_sampler=sampler
)
data_loader.index_with(self.vocab)
assert len(data_loader) == 3
def test_batch_count_with_drop_last(self):
sampler = BucketBatchSampler(
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
data_loader = MultiProcessDataLoader(
self.get_mock_reader(), "fake_path", batch_sampler=sampler
)
assert len(data_loader) == 2
| 35.90604
| 95
| 0.601121
| 5,065
| 0.946729
| 0
| 0
| 0
| 0
| 0
| 0
| 510
| 0.095327
|
45112f2abb035e911415cb428c007f107a543914
| 3,332
|
py
|
Python
|
tests/test_sphere.py
|
dkirkby/batoid
|
734dccc289eb7abab77a62cdc14563ed5981753b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_sphere.py
|
dkirkby/batoid
|
734dccc289eb7abab77a62cdc14563ed5981753b
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_sphere.py
|
dkirkby/batoid
|
734dccc289eb7abab77a62cdc14563ed5981753b
|
[
"BSD-2-Clause"
] | null | null | null |
import batoid
import numpy as np
import math
from test_helpers import timer, do_pickle, all_obj_diff
@timer
def test_properties():
import random
random.seed(5)
for i in range(100):
R = random.gauss(0.7, 0.8)
sphere = batoid.Sphere(R)
assert sphere.R == R
do_pickle(sphere)
@timer
def test_sag():
import random
random.seed(57)
for i in range(100):
R = random.gauss(4.2, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = random.uniform(-0.7*R, 0.7*R)
y = random.uniform(-0.7*R, 0.7*R)
result = sphere.sag(x, y)
np.testing.assert_allclose(result, R*(1-math.sqrt(1.0-(x*x + y*y)/R/R)))
# Check that it returned a scalar float and not an array
assert isinstance(result, float)
# Check vectorization
x = np.random.uniform(-0.7*R, 0.7*R, size=(10, 10))
y = np.random.uniform(-0.7*R, 0.7*R, size=(10, 10))
np.testing.assert_allclose(sphere.sag(x, y), R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)))
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.sag(x[::5,::2], y[::5,::2]),
(R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)))[::5, ::2]
)
@timer
def test_intersect():
import random
random.seed(577)
for i in range(100):
R = random.gauss(10.0, 0.1)
sphere = batoid.Sphere(R)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r0 = batoid.Ray(x, y, -1000, 0, 0, 1, 0)
r = sphere.intersect(r0)
np.testing.assert_allclose(r.r[0], x)
np.testing.assert_allclose(r.r[1], y)
np.testing.assert_allclose(r.r[2], sphere.sag(x, y), rtol=0, atol=1e-9)
# Check normal for R=0 paraboloid (a plane)
sphere = batoid.Sphere(0.0)
np.testing.assert_array_equal(sphere.normal(0.1,0.1), [0,0,1])
@timer
def test_intersect_vectorized():
import random
random.seed(5772)
r0s = [batoid.Ray([random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(10.0, 0.1)],
[random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(-1.0, 0.1)],
random.gauss(0.0, 0.1))
for i in range(1000)]
r0s = batoid.RayVector(r0s)
for i in range(100):
R = random.gauss(0.05, 0.01)
sphere = batoid.Sphere(R)
r1s = sphere.intersect(r0s)
r2s = batoid.RayVector([sphere.intersect(r0) for r0 in r0s])
assert r1s == r2s
@timer
def test_ne():
objs = [
batoid.Sphere(1.0),
batoid.Sphere(2.0),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sphere = batoid.Sphere(1.0)
ray = batoid.Ray([0,0,-1], [0,0,-1])
ray = sphere.intersect(ray)
assert ray.failed
ray = batoid.Ray([0,0,-1], [0,0,-1])
sphere.intersectInPlace(ray)
assert ray.failed
if __name__ == '__main__':
test_properties()
test_sag()
test_intersect()
test_intersect_vectorized()
test_ne()
test_fail()
| 28.237288
| 88
| 0.545618
| 0
| 0
| 0
| 0
| 3,064
| 0.919568
| 0
| 0
| 257
| 0.077131
|
4511821928e83d509f748b6119d6ba8794c26a88
| 2,678
|
py
|
Python
|
site_stats/middlewares.py
|
ganlvtech/blueking-django-startup-project
|
042aa36b0757c0d3929d88bc23534f54963d333e
|
[
"MIT"
] | 1
|
2018-11-22T21:13:25.000Z
|
2018-11-22T21:13:25.000Z
|
site_stats/middlewares.py
|
ganlvtech/blueking-django-startup-project
|
042aa36b0757c0d3929d88bc23534f54963d333e
|
[
"MIT"
] | null | null | null |
site_stats/middlewares.py
|
ganlvtech/blueking-django-startup-project
|
042aa36b0757c0d3929d88bc23534f54963d333e
|
[
"MIT"
] | null | null | null |
from django.http.response import HttpResponseForbidden
from .models import Counter, VisitLog
from .utils import get_client_ip
class SiteStatistics(object):
visit_log = None
def process_request(self, request):
if request.path_info.startswith('/admin/'):
return
counter = Counter.objects.first()
counter.value += 1
counter.save()
try:
self.visit_log = VisitLog()
self.visit_log.user_id = request.session.get('openid', '')[:128]
user_info = ''
openkey = request.session.get('openkey', '')
nick_name = request.session.get('nick_name', '')
if openkey or nick_name:
user_info = nick_name + ' ' + openkey
self.visit_log.user_info = user_info[:255]
self.visit_log.path = request.path[:1024]
self.visit_log.method = request.method
self.visit_log.ip = get_client_ip(request)
self.visit_log.user_agent = request.META['HTTP_USER_AGENT'][:1024]
self.visit_log.query = request.META['QUERY_STRING'][:1024]
self.visit_log.body = request.body[:4096]
self.visit_log.response_length = -1
self.visit_log.save()
except Exception as e:
print(e)
def process_response(self, request, response):
try:
if self.visit_log:
self.visit_log.response_code = response.status_code
if hasattr(response, 'content'):
self.visit_log.response_length = len(response.content)
self.visit_log.response_body = response.content[:4096]
elif 'Content-Length' in response:
self.visit_log.response_length = response['Content-Length']
else:
self.visit_log.response_length = -2
self.visit_log.save()
except Exception as e:
print(e)
return response
class BanUser(object):
ban_openid_list = (
"144115212352913603",
)
ban_nick_name_list = (
"453413024",
)
ban_ip_list = (
"116.228.88.252",
)
def process_request(self, request):
ip = get_client_ip(request)
if ip in self.ban_ip_list:
return HttpResponseForbidden('Banned IP')
openid = request.session.get('openid')
if openid and openid in self.ban_openid_list:
return HttpResponseForbidden('Banned openid')
nick_name = request.session.get('nick_name')
if nick_name and nick_name in self.ban_nick_name_list:
return HttpResponseForbidden('Banned QQ')
| 34.779221
| 79
| 0.596341
| 2,545
| 0.950336
| 0
| 0
| 0
| 0
| 0
| 0
| 223
| 0.083271
|
4511a28651e9a1abc5a51540a0f550556e34f6c9
| 1,753
|
py
|
Python
|
gpu_bdb/bdb_tools/q24_utils.py
|
VibhuJawa/gpu-bdb
|
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
|
[
"Apache-2.0"
] | 62
|
2020-05-14T13:33:02.000Z
|
2020-10-29T13:28:26.000Z
|
gpu_bdb/bdb_tools/q24_utils.py
|
VibhuJawa/gpu-bdb
|
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
|
[
"Apache-2.0"
] | 104
|
2020-07-01T21:07:42.000Z
|
2020-11-13T16:36:04.000Z
|
gpu_bdb/bdb_tools/q24_utils.py
|
VibhuJawa/gpu-bdb
|
13987b4ef8b92db3b9d2905dec7bd2fd81f42ae9
|
[
"Apache-2.0"
] | 21
|
2020-05-14T14:44:40.000Z
|
2020-11-07T12:08:28.000Z
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
ws_cols = ["ws_item_sk", "ws_sold_date_sk", "ws_quantity"]
item_cols = ["i_item_sk", "i_current_price"]
imp_cols = [
"imp_item_sk",
"imp_competitor_price",
"imp_start_date",
"imp_end_date",
"imp_sk",
]
ss_cols = ["ss_item_sk", "ss_sold_date_sk", "ss_quantity"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
### read tables
ws_df = table_reader.read("web_sales", relevant_cols=ws_cols)
item_df = table_reader.read("item", relevant_cols=item_cols)
imp_df = table_reader.read("item_marketprices", relevant_cols=imp_cols)
ss_df = table_reader.read("store_sales", relevant_cols=ss_cols)
if c:
c.create_table("web_sales", ws_df, persist=False)
c.create_table("item", item_df, persist=False)
c.create_table("item_marketprices", imp_df, persist=False)
c.create_table("store_sales", ss_df, persist=False)
return ws_df, item_df, imp_df, ss_df
| 34.372549
| 75
| 0.714775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 930
| 0.530519
|
451235b4dc66c44ae6da7b46c7877673b9a0d562
| 8,175
|
py
|
Python
|
tests/test_compare.py
|
mys-lang/mys
|
070431fdedd7a6bf537f3a30583cd44f644cdbf4
|
[
"MIT"
] | 59
|
2021-01-06T14:21:40.000Z
|
2022-02-22T21:49:39.000Z
|
tests/test_compare.py
|
mys-lang/mys
|
070431fdedd7a6bf537f3a30583cd44f644cdbf4
|
[
"MIT"
] | 31
|
2021-01-05T00:32:36.000Z
|
2022-02-23T13:34:33.000Z
|
tests/test_compare.py
|
mys-lang/mys
|
070431fdedd7a6bf537f3a30583cd44f644cdbf4
|
[
"MIT"
] | 7
|
2021-01-03T11:53:03.000Z
|
2022-02-22T17:49:42.000Z
|
from .utils import TestCase
from .utils import build_and_test_module
from .utils import transpile_source
class Test(TestCase):
def test_compare(self):
with self.assertRaises(SystemExit):
build_and_test_module('compare')
def test_assert_between(self):
self.assert_transpile_raises(
'def foo():\n'
' a = 2\n'
' assert 1 <= a < 3\n',
' File "", line 3\n'
" assert 1 <= a < 3\n"
' ^\n'
"CompileError: can only compare two values\n")
def test_between(self):
self.assert_transpile_raises(
'def foo():\n'
' a = 2\n'
' print(1 <= a < 3)\n',
' File "", line 3\n'
" print(1 <= a < 3)\n"
' ^\n'
"CompileError: can only compare two values\n")
def test_i64_and_bool(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1 == True',
' File "", line 2\n'
' return 1 == True\n'
' ^\n'
"CompileError: cannot convert 'i64/i32/i16/i8/u64/u32/u16/u8' "
"to 'bool'\n")
def test_mix_of_literals_and_known_types_1(self):
source = transpile_source('def foo():\n'
' k: u64 = 1\n'
' v: i64 = 1\n'
' if 0xffffffffffffffff == k:\n'
' pass\n'
' print(v)\n')
self.assert_in('18446744073709551615ull', source)
def test_wrong_types_1(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1 == [""]\n',
' File "", line 2\n'
' return 1 == [""]\n'
' ^\n'
"CompileError: cannot convert 'i64/i32/i16/i8/u64/u32/u16/u8' to "
"'[string]'\n")
def test_wrong_types_2(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return [""] in 1\n',
' File "", line 2\n'
' return [""] in 1\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_3(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return [""] not in 1\n',
' File "", line 2\n'
' return [""] not in 1\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_4(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 2.0 == 1\n',
' File "", line 2\n'
' return 2.0 == 1\n'
' ^\n'
"CompileError: cannot convert 'f64/f32' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_wrong_types_5(self):
self.assert_transpile_raises(
'def foo() -> bool:\n'
' return 1.0 == [""]\n',
' File "", line 2\n'
' return 1.0 == [""]\n'
' ^\n'
"CompileError: cannot convert 'f64/f32' to '[string]'\n")
def test_wrong_types_6(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return a in [""]\n',
' File "", line 2\n'
' return a in [""]\n'
' ^\n'
"CompileError: types 'i32' and 'string' differs\n")
def test_wrong_types_7(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return a in a\n',
' File "", line 2\n'
' return a in a\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_8(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return 1 in a\n',
' File "", line 2\n'
' return 1 in a\n'
' ^\n'
"CompileError: not an iterable\n")
def test_wrong_types_9(self):
self.assert_transpile_raises(
'def foo(a: i32) -> bool:\n'
' return "" == a\n',
' File "", line 2\n'
' return "" == a\n'
' ^\n'
"CompileError: types 'string' and 'i32' differs\n")
def test_wrong_types_10(self):
self.assert_transpile_raises(
'def foo():\n'
' print(1 is None)\n',
' File "", line 2\n'
' print(1 is None)\n'
' ^\n'
"CompileError: 'i64' cannot be None\n")
def test_wrong_types_11(self):
self.assert_transpile_raises(
'def foo():\n'
' print(1.0 is None)\n',
' File "", line 2\n'
' print(1.0 is None)\n'
' ^\n'
"CompileError: 'f64' cannot be None\n")
def test_wrong_types_12(self):
self.assert_transpile_raises(
'def foo(a: i32):\n'
' print(a is None)\n',
' File "", line 2\n'
' print(a is None)\n'
' ^\n'
"CompileError: 'i32' cannot be None\n")
def test_wrong_types_13(self):
self.assert_transpile_raises(
'def foo(a: i32):\n'
' print(None is a)\n',
' File "", line 2\n'
' print(None is a)\n'
' ^\n'
"CompileError: 'i32' cannot be None\n")
def test_wrong_types_14(self):
self.assert_transpile_raises(
'def foo():\n'
' print(True is None)\n',
' File "", line 2\n'
' print(True is None)\n'
' ^\n'
"CompileError: 'bool' cannot be None\n")
def test_wrong_types_15(self):
self.assert_transpile_raises(
'def foo(a: bool):\n'
' print(None is a)\n',
' File "", line 2\n'
' print(None is a)\n'
' ^\n'
"CompileError: 'bool' cannot be None\n")
def test_wrong_types_16(self):
self.assert_transpile_raises(
'def foo(a: bool):\n'
' print(a is not 1)\n',
' File "", line 2\n'
' print(a is not 1)\n'
' ^\n'
"CompileError: cannot convert 'bool' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_wrong_types_17(self):
self.assert_transpile_raises(
'def foo():\n'
' print(None in [1, 5])\n',
' File "", line 2\n'
' print(None in [1, 5])\n'
' ^\n'
"CompileError: 'i64' cannot be None\n")
def test_wrong_types_18(self):
self.assert_transpile_raises(
'def foo():\n'
' print(None == "")\n',
' File "", line 2\n'
' print(None == "")\n'
' ^\n'
"CompileError: use 'is' and 'is not' to compare to None\n")
def test_wrong_types_20(self):
self.assert_transpile_raises(
'def foo():\n'
' if (1, ("", True)) == (1, ("", 1)):\n'
' pass\n',
# ToDo: Marker in wrong place.
' File "", line 2\n'
' if (1, ("", True)) == (1, ("", 1)):\n'
' ^\n'
"CompileError: cannot convert 'bool' to "
"'i64/i32/i16/i8/u64/u32/u16/u8'\n")
def test_bare_compare(self):
self.assert_transpile_raises(
'def foo():\n'
' 1 == 2\n',
' File "", line 2\n'
' 1 == 2\n'
' ^\n'
"CompileError: bare comparision\n")
| 34.493671
| 78
| 0.416147
| 8,067
| 0.986789
| 0
| 0
| 0
| 0
| 0
| 0
| 3,998
| 0.489052
|
4512ba1f9249887e49626300dacbdb0fac5b7fbe
| 170
|
py
|
Python
|
epidemioptim/environments/cost_functions/costs/__init__.py
|
goodhamgupta/EpidemiOptim
|
a4fe3fcfc2d82a10db16a168526982c03ca2c8d3
|
[
"MIT"
] | null | null | null |
epidemioptim/environments/cost_functions/costs/__init__.py
|
goodhamgupta/EpidemiOptim
|
a4fe3fcfc2d82a10db16a168526982c03ca2c8d3
|
[
"MIT"
] | null | null | null |
epidemioptim/environments/cost_functions/costs/__init__.py
|
goodhamgupta/EpidemiOptim
|
a4fe3fcfc2d82a10db16a168526982c03ca2c8d3
|
[
"MIT"
] | null | null | null |
from epidemioptim.environments.cost_functions.costs.death_toll_cost import DeathToll
from epidemioptim.environments.cost_functions.costs.gdp_recess_cost import GdpRecess
| 56.666667
| 84
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
451354227c3d203ff804c452ae15b439b4e8924c
| 1,587
|
py
|
Python
|
BFS/70.py
|
wilbertgeng/LintCode_exercise
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
[
"MIT"
] | null | null | null |
BFS/70.py
|
wilbertgeng/LintCode_exercise
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
[
"MIT"
] | null | null | null |
BFS/70.py
|
wilbertgeng/LintCode_exercise
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
[
"MIT"
] | null | null | null |
"""70 ยท Binary Tree Level Order Traversal II"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: A tree
@return: buttom-up level order a list of lists of integer
"""
def levelOrderBottom(self, root):
# write your code here
if not root:
return []
res = []
queue = collections.deque([root])
while queue:
temp = []
for _ in range(len(queue)):
node = queue.popleft()
temp.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(temp)
return res[::-1]
###
if not root:
return []
queue = [[root]]
index = 0
res = [[root.val]]
while index < len(queue):
curr_level = queue[index]
index += 1
next_level = []
next_level_vals = []
for node in curr_level:
if node.left:
next_level.append(node.left)
next_level_vals.append(node.left.val)
if node.right:
next_level.append(node.right)
next_level_vals.append(node.right.val)
if next_level:
queue.append(next_level)
res.append(next_level_vals)
return res[::-1]
| 24.415385
| 61
| 0.477001
| 1,394
| 0.877834
| 0
| 0
| 0
| 0
| 0
| 0
| 312
| 0.196474
|
451370b67d994c4274e51949234d8fd9c7be285d
| 2,554
|
py
|
Python
|
yuque_py/clients/client.py
|
kingJiaYouwen/yuque-py
|
451ec6b88860a984de9456d48c0af341676513a3
|
[
"MIT"
] | null | null | null |
yuque_py/clients/client.py
|
kingJiaYouwen/yuque-py
|
451ec6b88860a984de9456d48c0af341676513a3
|
[
"MIT"
] | null | null | null |
yuque_py/clients/client.py
|
kingJiaYouwen/yuque-py
|
451ec6b88860a984de9456d48c0af341676513a3
|
[
"MIT"
] | null | null | null |
import typing
import requests
from urllib.parse import urlencode
from .abstract_client import AbstractClient
from yuque_py.exceptions.request_error import RequestError
class Client(AbstractClient):
api_host: str
user_token: str
def __init__(self, api_host: str, user_token: str) -> None:
self.api_host = api_host
self.user_token = user_token
def request(
self,
api: str,
method: str,
requests_data: typing.Optional[typing.Dict[str, typing.Any]] = None,
user_agent: str = "@yuque/sdk",
) -> typing.Dict:
request_url = f"{self.api_host}/{api}"
request_header = {"User-Agent": user_agent, "X-Auth-Token": self.user_token}
if method == "GET":
func = self._get_request
elif method == "POST":
request_header["Content-Type"] = "application/json"
func = self._post_request
elif method == "PUT":
request_header["Content-Type"] = "application/json"
func = self._put_request
elif method == "DELETE":
func = self._delete_request
else:
raise ValueError
response = func(request_url, requests_data, request_header)
if response.status_code != 200:
raise RequestError(response.status_code, response.text)
return response.json()
@staticmethod
def _get_request(
request_url: str,
requests_data: typing.Dict[str, typing.Any],
request_header: typing.Dict[str, str],
) -> requests.Response:
if requests_data:
request_url = f"{request_url}&{urlencode(requests_data)}"
return requests.get(request_url, headers=request_header)
@staticmethod
def _post_request(
request_url: str,
requests_data: typing.Dict[str, typing.Any],
request_header: typing.Dict[str, str],
) -> requests.Response:
return requests.post(request_url, json=requests_data, headers=request_header)
@staticmethod
def _put_request(
request_url: str,
requests_data: typing.Dict[str, typing.Any],
request_header: typing.Dict[str, str],
) -> requests.Response:
return requests.put(request_url, json=requests_data, headers=request_header)
@staticmethod
def _delete_request(
request_url: str,
requests_data: typing.Dict[str, typing.Any],
request_header: typing.Dict[str, str],
) -> requests.Response:
return requests.delete(request_url, headers=request_header)
| 32.74359
| 85
| 0.643696
| 2,381
| 0.932263
| 0
| 0
| 1,161
| 0.454581
| 0
| 0
| 193
| 0.075568
|
45138db5ed51843c9a5afaaec91f905c3ac8de23
| 671
|
py
|
Python
|
results/views/sports.py
|
JukkaKarvonen/sal-kiti
|
3dcff71552ab323e3c97eccf502c0d72eb683967
|
[
"MIT"
] | 1
|
2021-06-12T08:46:32.000Z
|
2021-06-12T08:46:32.000Z
|
results/views/sports.py
|
JukkaKarvonen/sal-kiti
|
3dcff71552ab323e3c97eccf502c0d72eb683967
|
[
"MIT"
] | 8
|
2020-07-01T15:06:52.000Z
|
2022-02-20T09:11:23.000Z
|
results/views/sports.py
|
JukkaKarvonen/sal-kiti
|
3dcff71552ab323e3c97eccf502c0d72eb683967
|
[
"MIT"
] | 3
|
2020-03-01T17:02:24.000Z
|
2020-07-05T14:37:59.000Z
|
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import viewsets
from results.models.sports import Sport
from results.serializers.sports import SportSerializer
class SportViewSet(viewsets.ModelViewSet):
"""API endpoint for sports.
list:
Returns a list of all the existing sports.
retrieve:
Returns the given sport.
create:
Creates a new sport instance.
update:
Updates a given sport.
partial_update:
Updates a given sport.
destroy:
Removes the given sport.
"""
permission_classes = (DRYPermissions,)
queryset = Sport.objects.all()
serializer_class = SportSerializer
| 20.96875
| 56
| 0.728763
| 479
| 0.71386
| 0
| 0
| 0
| 0
| 0
| 0
| 315
| 0.469449
|
45148079bc72efab4e9fdeacd43d659e9726c7f1
| 1,332
|
py
|
Python
|
tests/pvsystemprofiler/test_equation_of_time.py
|
slacgismo/pv-system-profiler
|
5ab663cd186511605bbb1e6aa387c8b897e47d83
|
[
"BSD-2-Clause"
] | 4
|
2020-08-18T14:28:44.000Z
|
2021-10-14T13:17:03.000Z
|
tests/pvsystemprofiler/test_equation_of_time.py
|
slacgismo/pv-system-profiler
|
5ab663cd186511605bbb1e6aa387c8b897e47d83
|
[
"BSD-2-Clause"
] | 10
|
2020-04-14T18:57:03.000Z
|
2021-09-14T15:26:24.000Z
|
tests/pvsystemprofiler/test_equation_of_time.py
|
slacgismo/pv-system-profiler
|
5ab663cd186511605bbb1e6aa387c8b897e47d83
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
import os
from pathlib import Path
import numpy as np
path = Path.cwd().parent.parent
os.chdir(path)
from pvsystemprofiler.utilities.equation_of_time import eot_da_rosa, eot_duffie
class TestEquationOfTime(unittest.TestCase):
# importing input for both eot tests
filepath = Path(__file__).parent.parent
input_data_file_path = filepath / 'fixtures' / 'longitude' / 'eot_input.csv'
with open(input_data_file_path) as file:
input_data = np.genfromtxt(file, delimiter=',')
def test_eot_duffie(self):
expected_data_file_path = self.filepath / 'fixtures' / 'longitude' / 'eot_duffie_output.csv'
with open(expected_data_file_path) as file:
expected_output = np.genfromtxt(file, delimiter=',')
actual_output = eot_duffie(self.input_data)
np.testing.assert_array_almost_equal(actual_output, expected_output)
def test_eot_da_rosa(self):
expected_data_file_path = self.filepath / 'fixtures' / 'longitude' / 'eot_da_rosa_output.csv'
with open(expected_data_file_path) as file:
expected_output = np.genfromtxt(file, delimiter=',')
actual_output = eot_da_rosa(self.input_data)
np.testing.assert_array_almost_equal(actual_output, expected_output)
if __name__ == '__main__':
unittest.main()
| 32.487805
| 101
| 0.722973
| 1,083
| 0.813063
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.135135
|
4514a6b0a130ee0e5e4417b6086e78904e058a13
| 1,942
|
py
|
Python
|
raiden/tests/unit/transfer/test_node.py
|
gasparmedina/raiden
|
649c43b7233b9e95f13831e61d5db187d583367a
|
[
"MIT"
] | null | null | null |
raiden/tests/unit/transfer/test_node.py
|
gasparmedina/raiden
|
649c43b7233b9e95f13831e61d5db187d583367a
|
[
"MIT"
] | null | null | null |
raiden/tests/unit/transfer/test_node.py
|
gasparmedina/raiden
|
649c43b7233b9e95f13831e61d5db187d583367a
|
[
"MIT"
] | 4
|
2019-01-24T14:45:06.000Z
|
2019-04-01T16:12:40.000Z
|
from raiden.constants import EMPTY_MERKLE_ROOT
from raiden.tests.utils.factories import HOP1, HOP2, UNIT_SECRETHASH, make_block_hash
from raiden.transfer.events import ContractSendChannelBatchUnlock
from raiden.transfer.node import is_transaction_effect_satisfied
from raiden.transfer.state_change import ContractReceiveChannelBatchUnlock
def test_is_transaction_effect_satisfied(
chain_state,
token_network_state,
token_network_id,
netting_channel_state,
):
transaction = ContractSendChannelBatchUnlock(
token_address=token_network_state.token_address,
token_network_identifier=token_network_id,
channel_identifier=netting_channel_state.identifier,
participant=HOP2,
triggered_by_block_hash=make_block_hash(),
)
state_change = ContractReceiveChannelBatchUnlock(
transaction_hash=UNIT_SECRETHASH,
token_network_identifier=token_network_id,
participant=HOP1,
partner=HOP2,
locksroot=EMPTY_MERKLE_ROOT,
unlocked_amount=0,
returned_tokens=0,
block_number=1,
block_hash=make_block_hash(),
)
# unlock for a channel in which this node is not a participant must return False
assert not is_transaction_effect_satisfied(chain_state, transaction, state_change)
# now call normally with us being the partner and not the participant
state_change.partner = netting_channel_state.our_state.address
state_change.participant = netting_channel_state.partner_state.address
assert not is_transaction_effect_satisfied(chain_state, transaction, state_change)
# finally call with us being the participant and not the partner which should check out
state_change.participant = netting_channel_state.our_state.address
state_change.partner = netting_channel_state.partner_state.address
assert is_transaction_effect_satisfied(chain_state, transaction, state_change)
| 45.162791
| 91
| 0.785273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 236
| 0.121524
|
451505779ddfa18b109340abfbe8b097a645a054
| 2,826
|
py
|
Python
|
tfx/utils/channel_test.py
|
HassanDayoub/tfx
|
dc9221abbb8dad991d1ae22fb91876da1290efae
|
[
"Apache-2.0"
] | 2
|
2019-07-08T20:56:13.000Z
|
2020-08-04T17:07:26.000Z
|
tfx/utils/channel_test.py
|
HassanDayoub/tfx
|
dc9221abbb8dad991d1ae22fb91876da1290efae
|
[
"Apache-2.0"
] | null | null | null |
tfx/utils/channel_test.py
|
HassanDayoub/tfx
|
dc9221abbb8dad991d1ae22fb91876da1290efae
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.channel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Standard Imports
import tensorflow as tf
from tfx.utils import channel
from tfx.utils import types
class ChannelTest(tf.test.TestCase):
def test_valid_channel(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
chnl = channel.Channel(
'MyTypeName', static_artifact_collection=[instance_a, instance_b])
self.assertEqual(chnl.type_name, 'MyTypeName')
self.assertItemsEqual(chnl.get(), [instance_a, instance_b])
def test_invalid_channel_type(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
with self.assertRaises(ValueError):
channel.Channel(
'AnotherTypeName',
static_artifact_collection=[instance_a, instance_b])
def test_artifact_collection_as_channel(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
chnl = channel.as_channel([instance_a, instance_b])
self.assertEqual(chnl.type_name, 'MyTypeName')
self.assertItemsEqual(chnl.get(), [instance_a, instance_b])
def test_channel_as_channel_success(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
chnl_original = channel.Channel(
'MyTypeName', static_artifact_collection=[instance_a, instance_b])
chnl_result = channel.as_channel(chnl_original)
self.assertEqual(chnl_original, chnl_result)
def test_empty_artifact_collection_as_channel_fail(self):
with self.assertRaises(ValueError):
channel.as_channel([])
def test_invalid_source_as_channel_fail(self):
with self.assertRaises(ValueError):
channel.as_channel(source='invalid source')
def test_type_check_success(self):
chnl = channel.Channel('MyTypeName')
chnl.type_check('MyTypeName')
def test_type_check_fail(self):
chnl = channel.Channel('MyTypeName')
with self.assertRaises(TypeError):
chnl.type_check('AnotherTypeName')
if __name__ == '__main__':
tf.test.main()
| 34.888889
| 74
| 0.754777
| 1,893
| 0.669851
| 0
| 0
| 0
| 0
| 0
| 0
| 875
| 0.309625
|
45154cfb764af63ed99ff0eaf6a51b8393aa6827
| 1,870
|
py
|
Python
|
lyrics.py
|
samiraafreen/lyrics-generator
|
37f894bdb8986c153985104af83e12ef8d6dac07
|
[
"MIT"
] | null | null | null |
lyrics.py
|
samiraafreen/lyrics-generator
|
37f894bdb8986c153985104af83e12ef8d6dac07
|
[
"MIT"
] | null | null | null |
lyrics.py
|
samiraafreen/lyrics-generator
|
37f894bdb8986c153985104af83e12ef8d6dac07
|
[
"MIT"
] | null | null | null |
import configparser
import requests
from bs4 import BeautifulSoup
def getAccessToken():
config = configparser.ConfigParser()
config.read('config.ini')
return config['Client_Access_Token']['token']
token = getAccessToken()
def searchMusicArtist(name):
api_url = "https://api.genius.com/search?q={}".format(name)
headers = {'authorization':token}
r = requests.get(api_url, headers=headers)
return r.json()
#searchMusicArtist("drake")
def getArtistID(name):
r = searchMusicArtist(name)
id = r['response']['hits'][0]['result']['primary_artist']['id']
return id
#print(getArtistID('drake'))
def getTopTenSongs(name):
id = getArtistID(name)
#api_url = "https://api.genius.com/artists/{}/songs?sort=popularity&per_page=10".format(id)
api_url = "https://api.genius.com/artists/{}/songs".format(id)
headers = {
'authorization':token
}
params={
'sort':'popularity',
'per_page':10
}
r = requests.get(api_url, headers=headers, params=params)
return r.json()
#print(getTopTenSongs('drake'))
def getSongURLs(name):
topTenSongs = getTopTenSongs(name)
songs = topTenSongs['response']['songs']
song_urls = []
for song in songs:
song_urls.append(song['url'])
return song_urls
def scrapeLyricText(name):
links = getSongURLs(name)
song_lyrics = []
for link in links:
page = requests.get(link)
soup = BeautifulSoup(page.content, 'html.parser')
lyrics_div = soup.find(class_='lyrics')
anchor_tags = lyrics_div.find_all('a')
current_lyrics = []
for anchor in anchor_tags:
text = anchor.text
if len(text) > 0 and text[0] != '[':
current_lyrics.append(text)
song_lyrics.append(current_lyrics)
return song_lyrics
#print(scrapeLyricText('drake'))
| 28.333333
| 95
| 0.648128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 477
| 0.25508
|