hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45156df577791ff159e318cbc3c7550a59d3c192
| 2,968
|
py
|
Python
|
src/slack/slack_json_factories/dialog_json/comment.py
|
yejingyu/jama-slack-integration
|
55d1366d6cde3962e788afebe0001747cbe11fe8
|
[
"MIT"
] | 1
|
2019-07-30T01:41:53.000Z
|
2019-07-30T01:41:53.000Z
|
src/slack/slack_json_factories/dialog_json/comment.py
|
yejingyu/jama-slack-integration
|
55d1366d6cde3962e788afebe0001747cbe11fe8
|
[
"MIT"
] | 4
|
2018-11-16T05:56:06.000Z
|
2018-11-29T05:07:52.000Z
|
src/slack/slack_json_factories/dialog_json/comment.py
|
yejingyu/jama-slack-integration
|
55d1366d6cde3962e788afebe0001747cbe11fe8
|
[
"MIT"
] | 6
|
2018-11-08T03:49:28.000Z
|
2019-04-29T19:53:25.000Z
|
def comment_dialog(data=None):
"""
Function takes in a JSON object, and uses the following format:
https://api.slack.com/dialogs
Returns created JSON object, then is sent back to Slack.
"""
text = ""
state = ""
project_holder = None
item_holder = None
if data is not None:
if data["type"] == "message_action":
text = data["message"]["text"] + "\n"
# get attachment images from the massage
if "attachments" in data["message"]:
text += "Attachments:\n"
for att in data["message"]["attachments"]:
text += att["title"] + ":\n"
if "image_url" in att:
text += att["image_url"] + "\n"
# get files from the massage
if "files" in data["message"]:
text += "Attach files:\n"
for file in data["message"]["files"]:
text += file["title"] + ":\n"
text += file["url_private"] + "\n"
if data["type"] == "interactive_message":
if data["callback_id"] == "bot_project":
label = data["original_message"]["attachments"][0]["fallback"]
project_holder = [
{
"label": label,
"value": data["actions"][0]["value"]
}
]
state = data["actions"][0]["value"]
elif data["callback_id"] == "bot_item":
label = data["original_message"]["attachments"][0]["fallback"]
item_holder = [
{
"label": label,
"value": data["actions"][0]["value"]
}
]
return {
"title": "JamaConnect - Comment",
"submit_label": "Submit",
"callback_id": "comment",
"elements": [
{
"label": "Search Projects:",
"type": "select",
"name": "project",
"optional": "true",
"data_source": "external",
"selected_options": project_holder
},
{
"label": "Project ID:",
"type": "select",
"name": "project_id",
"optional": "true",
"data_source": "external",
"selected_options": project_holder
},
{
"label": "Item ID or Name:",
"type": "select",
"name": "item",
"data_source": "external",
"min_query_length": 0,
"selected_options": item_holder
},
{
"type": "textarea",
"label": "Comment",
"name": "comment",
"value": text
}
],
"state": state
}
| 34.511628
| 78
| 0.412062
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,172
| 0.394879
|
45168a0a61e3273b57493bda1e9d073423e6c698
| 8,105
|
py
|
Python
|
tests/hahm/test_config_flow.py
|
Voxxie/custom_homematic
|
d199f1fcc565febe42e686198a9eb33ef4d755f6
|
[
"MIT"
] | null | null | null |
tests/hahm/test_config_flow.py
|
Voxxie/custom_homematic
|
d199f1fcc565febe42e686198a9eb33ef4d755f6
|
[
"MIT"
] | null | null | null |
tests/hahm/test_config_flow.py
|
Voxxie/custom_homematic
|
d199f1fcc565febe42e686198a9eb33ef4d755f6
|
[
"MIT"
] | null | null | null |
"""Test the HaHomematic config flow."""
from typing import Any
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.hahm.config_flow import (
ATTR_BICDOS_RF_ENABLED,
ATTR_BICDOS_RF_PORT,
ATTR_HMIP_RF_ENABLED,
ATTR_HOST,
ATTR_HS485D_ENABLED,
ATTR_INSTANCE_NAME,
ATTR_PASSWORD,
ATTR_PORT,
ATTR_TLS,
ATTR_USERNAME,
ATTR_VIRTUAL_DEVICES_ENABLED,
IF_BIDCOS_RF_NAME,
IF_HMIP_RF_NAME,
IF_HS485D_NAME,
IF_VIRTUAL_DEVICES_NAME,
CannotConnect,
InvalidAuth,
)
from homeassistant.components.hahm.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
TEST_INSTANCE_NAME = "pytest"
TEST_HOST = "1.1.1.1"
TEST_USERNAME = "test-username"
TEST_PASSWORD = "test-password"
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
interface = await async_check_form(hass, interface_data={})
if_hmip_rf = interface[IF_HMIP_RF_NAME]
assert if_hmip_rf[ATTR_PORT] == 2010
if_bidcos_rf = interface[IF_BIDCOS_RF_NAME]
assert if_bidcos_rf[ATTR_PORT] == 2001
if_virtual_devices = interface[IF_VIRTUAL_DEVICES_NAME]
assert if_virtual_devices[ATTR_PORT] == 9292
assert interface.get(IF_HS485D_NAME) is None
async def test_form_no_hmip_other_bidcos_port(hass: HomeAssistant) -> None:
"""Test we get the form."""
interface_data = {ATTR_HMIP_RF_ENABLED: False, ATTR_BICDOS_RF_PORT: 5555}
interface = await async_check_form(hass, interface_data=interface_data)
assert interface.get(IF_HMIP_RF_NAME) is None
if_bidcos_rf = interface[IF_BIDCOS_RF_NAME]
assert if_bidcos_rf[ATTR_PORT] == 5555
if_virtual_devices = interface[IF_VIRTUAL_DEVICES_NAME]
assert if_virtual_devices[ATTR_PORT] == 9292
assert interface.get(IF_HS485D_NAME) is None
async def test_form_only_hs485(hass: HomeAssistant) -> None:
"""Test we get the form."""
interface_data = {
ATTR_HMIP_RF_ENABLED: False,
ATTR_BICDOS_RF_ENABLED: False,
ATTR_VIRTUAL_DEVICES_ENABLED: False,
ATTR_HS485D_ENABLED: True,
}
interface = await async_check_form(hass, interface_data=interface_data)
assert interface.get(IF_HMIP_RF_NAME) is None
assert interface.get(IF_BIDCOS_RF_NAME) is None
assert interface.get(IF_VIRTUAL_DEVICES_NAME) is None
if_hs485d = interface[IF_HS485D_NAME]
assert if_hs485d[ATTR_PORT] == 2000
async def test_form_tls(hass: HomeAssistant) -> None:
"""Test we get the form with tls."""
interface = await async_check_form(hass, interface_data={}, tls=True)
if_hmip_rf = interface[IF_HMIP_RF_NAME]
assert if_hmip_rf[ATTR_PORT] == 42010
if_bidcos_rf = interface[IF_BIDCOS_RF_NAME]
assert if_bidcos_rf[ATTR_PORT] == 42001
if_virtual_devices = interface[IF_VIRTUAL_DEVICES_NAME]
assert if_virtual_devices[ATTR_PORT] == 49292
assert interface.get(IF_HS485D_NAME) is None
async def async_check_form(
hass: HomeAssistant, interface_data: dict[str, Any], tls: bool = False
) -> dict[str, Any]:
"""Test we get the form."""
if interface_data is None:
interface_data = {}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.hahm.config_flow.validate_input",
return_value=True,
), patch(
"homeassistant.components.hahm.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
ATTR_INSTANCE_NAME: TEST_INSTANCE_NAME,
ATTR_HOST: TEST_HOST,
ATTR_USERNAME: TEST_USERNAME,
ATTR_PASSWORD: TEST_PASSWORD,
ATTR_TLS: tls,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["handler"] == DOMAIN
assert result2["step_id"] == "interface"
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "pytest"
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
interface_data,
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_CREATE_ENTRY
assert result3["handler"] == DOMAIN
assert result3["title"] == TEST_INSTANCE_NAME
data = result3["data"]
assert data[ATTR_INSTANCE_NAME] == TEST_INSTANCE_NAME
assert data[ATTR_HOST] == TEST_HOST
assert data[ATTR_USERNAME] == TEST_USERNAME
assert data[ATTR_PASSWORD] == TEST_PASSWORD
return data["interface"]
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.hahm.config_flow.validate_input",
side_effect=InvalidAuth,
), patch(
"homeassistant.components.hahm.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
ATTR_INSTANCE_NAME: TEST_INSTANCE_NAME,
ATTR_HOST: TEST_HOST,
ATTR_USERNAME: TEST_USERNAME,
ATTR_PASSWORD: TEST_PASSWORD,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["handler"] == DOMAIN
assert result2["step_id"] == "interface"
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "pytest"
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_FORM
assert result3["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass: HomeAssistant) -> None:
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] is None
with patch(
"homeassistant.components.hahm.config_flow.validate_input",
side_effect=CannotConnect,
), patch(
"homeassistant.components.hahm.async_setup_entry",
return_value=True,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
ATTR_INSTANCE_NAME: TEST_INSTANCE_NAME,
ATTR_HOST: TEST_HOST,
ATTR_USERNAME: TEST_USERNAME,
ATTR_PASSWORD: TEST_PASSWORD,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["handler"] == DOMAIN
assert result2["step_id"] == "interface"
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert flow["context"]["unique_id"] == "pytest"
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_FORM
assert result3["errors"] == {"base": "cannot_connect"}
| 33.217213
| 84
| 0.664405
| 0
| 0
| 0
| 0
| 0
| 0
| 7,214
| 0.890068
| 1,099
| 0.135595
|
451695e3856e2d5dd4a42abbf9ad2c012826eaed
| 792
|
py
|
Python
|
komapy/decorators.py
|
bpptkg/komapy
|
a33fce5f4fbfacf085fd1f8043a57564be192a8d
|
[
"MIT"
] | null | null | null |
komapy/decorators.py
|
bpptkg/komapy
|
a33fce5f4fbfacf085fd1f8043a57564be192a8d
|
[
"MIT"
] | null | null | null |
komapy/decorators.py
|
bpptkg/komapy
|
a33fce5f4fbfacf085fd1f8043a57564be192a8d
|
[
"MIT"
] | null | null | null |
from functools import partial
class counter:
"""
A counter decorator to track how many times a function is called.
"""
def __init__(self, func):
self.func = func
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
return self.func(*args, **kwargs)
def register_as_decorator(func):
"""
Register extensions, transforms, or addons function as decorator.
"""
def wrapper(*args, **kwargs):
# If argument length < 2, user just provides function name without its
# resolver. So return partial function. Otherwise, return original
# function.
if len(args) < 2:
return partial(func, *args, **kwargs)
return partial(func, *args, **kwargs)()
return wrapper
| 26.4
| 78
| 0.616162
| 287
| 0.362374
| 0
| 0
| 0
| 0
| 0
| 0
| 309
| 0.390152
|
4516fa710b28e684423724f2bca16759c34404c0
| 5,883
|
py
|
Python
|
Applications/Examples/python/market_price_authentication.py
|
Refinitiv/websocket-api
|
15a5957510d2bb246cbbf65ed999ff0089b3a65d
|
[
"Apache-2.0"
] | 36
|
2019-01-08T17:43:38.000Z
|
2022-03-11T21:59:58.000Z
|
Applications/Examples/python/market_price_authentication.py
|
thomsonreuters/websocket-api
|
52c940a01d40a6c073d35922d8214d927327caa4
|
[
"Apache-2.0"
] | 14
|
2019-12-27T15:58:12.000Z
|
2021-11-03T21:39:27.000Z
|
Applications/Examples/python/market_price_authentication.py
|
thomsonreuters/websocket-api
|
52c940a01d40a6c073d35922d8214d927327caa4
|
[
"Apache-2.0"
] | 28
|
2019-01-22T21:43:15.000Z
|
2022-03-29T11:43:05.000Z
|
#|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright (C) 2017-2020 Refinitiv. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of outputting Market Price JSON data using Websockets with authentication """
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
from threading import Thread, Event
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Global Default Variables
app_id = '555'
auth_hostname = '127.0.0.1'
auth_port = '8443'
hostname = '127.0.0.1'
password = ''
position = socket.gethostbyname(socket.gethostname())
token = ''
user = ''
port = '15000'
# Global Variables
web_socket_app = None
web_socket_open = False
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(ws, message_json):
""" Send item request """
send_market_price_request(ws)
def send_market_price_request(ws):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': 'TRI.N',
},
}
ws.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws, close_status_code, close_msg):
""" Called when websocket is closed """
global web_socket_open
web_socket_open = False
print("WebSocket Closed")
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "password=", "position=", "auth_hostname=", "auth_port="])
except getopt.GetoptError:
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--password"):
password = arg
elif opt in ("--position"):
position = arg
elif opt in ("--auth_hostname"):
auth_hostname = arg
elif opt in ("--auth_port"):
auth_port = arg
# Send login info for authentication token
print("Sending authentication request...")
r = requests.post('https://{}:{}/getToken'.format(auth_hostname, auth_port),
data={'username': user, 'password': password},
verify=True)
auth_json = r.json()
print("RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
if auth_json['success'] is True:
token = r.cookies['AuthToken']
print('Authentication Succeeded. Received AuthToken: {}'.format(token))
cookie = "AuthToken={};AuthPosition={};applicationId={};".format(token, position, app_id)
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'],
cookie=cookie)
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
web_socket_app.close()
else:
print('Authentication failed')
| 34.810651
| 235
| 0.590345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,269
| 0.385688
|
4517ac136f86ccb5533a40509e2b215d308bd04d
| 571
|
py
|
Python
|
cardDao.py
|
Blueredemption/Inventory
|
8d61671071f89b51b3e34c5eb673200fc8baffc0
|
[
"MIT"
] | null | null | null |
cardDao.py
|
Blueredemption/Inventory
|
8d61671071f89b51b3e34c5eb673200fc8baffc0
|
[
"MIT"
] | null | null | null |
cardDao.py
|
Blueredemption/Inventory
|
8d61671071f89b51b3e34c5eb673200fc8baffc0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
class CardDao():
def __init__(self): # constructor
super().__init__()
self.create()
self.return()
self.update()
self.delete()
self.populate()
def create(self): # there will be create for cards
print('Create')
def return(self):
print('Read')
def update(self):
print('Update')
def delete(self):
print('Delete')
def populate(self):
print('Populate')
def main():
run = CardDao()
if __name__ == '__main__':
main()
| 16.794118
| 54
| 0.528897
| 473
| 0.828371
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.197898
|
451838fb8b3acc8747399824b9d60c1c29d67e5c
| 3,416
|
py
|
Python
|
test_kmethods.py
|
quinlan-lab/kmertools
|
93e90919c26e2fc899a905b77748857404389e13
|
[
"MIT"
] | 1
|
2020-08-25T01:35:38.000Z
|
2020-08-25T01:35:38.000Z
|
test_kmethods.py
|
quinlan-lab/kmertools
|
93e90919c26e2fc899a905b77748857404389e13
|
[
"MIT"
] | null | null | null |
test_kmethods.py
|
quinlan-lab/kmertools
|
93e90919c26e2fc899a905b77748857404389e13
|
[
"MIT"
] | 1
|
2021-07-13T23:21:56.000Z
|
2021-07-13T23:21:56.000Z
|
from unittest import TestCase
from eskedit.kmethods import *
class Test(TestCase):
def test_generate_kmers(self):
print('Testing %s' % 'test_generate_kmers')
for i in range(1, 8):
self.assertEqual(len(generate_kmers(i)), 4 ** i)
def test_gen_random_sequence(self):
print('Testing %s' % 'test_gen_random_sequence')
self.assertTrue(len(gen_random_sequence(7)) == 7)
self.assertTrue(True)
def test_ref_genome_as_string(self):
print('Testing %s' % 'test_ref_genome_as_string')
self.assertTrue(True)
def test_complement(self):
print("Testing %s" % "test_complement")
self.assertTrue(True)
def test_get_complementary_sequence(self):
print("Testing %s" % "test_get_complementary_sequence")
self.assertTrue(True)
def test_is_quality_snv(self):
print("Testing %s" % "test_is_quality_snv")
self.assertTrue(True)
def test_is_quality_nonsingleton(self):
print("Testing %s" % "test_is_quality_nonsingleton")
self.assertTrue(True)
def test_is_quality_singleton(self):
print("Testing %s" % "test_is_quality_singleton")
self.assertTrue(True)
def test_is_singleton_snv(self):
print("Testing %s" % "test_is_singleton_snv")
self.assertTrue(True)
def test_complete_sequence(self):
print("Testing %s" % "test_complete_sequence")
self.assertTrue(True)
def test_kmer_search(self):
print("Testing %s" % "test_kmer_search")
self.assertTrue(True)
def test_get_vcf_info_fields(self):
print("Testing %s" % "test_get_vcf_info_fields")
self.assertTrue(True)
def test_get_kmer_count(self):
print("Testing %s" % "test_get_kmer_count")
self.assertTrue(True)
def test_merge_transitions_ddc(self):
print("Testing %s" % "test_merge_transitions_ddc")
self.assertTrue(True)
def test_merge_positions_dd(self):
print("Testing %s" % "test_merge_positions_dd")
self.assertTrue(True)
def test_combine_df_kmer_indices(self):
print("Testing %s" % "test_combine_df_kmer_indices")
self.assertTrue(True)
def test_clean_counts_df(self):
print("Testing %s" % "test_clean_counts_df")
self.assertTrue(True)
def test_file_len(self):
print("Testing %s" % "test_file_len")
self.assertTrue(True)
def test_get_counts_from_file(self):
print("Testing %s" % "test_get_counts_from_file")
self.assertTrue(True)
def test_get_counts_dict(self):
print("Testing %s" % "test_get_counts_dict")
self.assertTrue(True)
def test_count_regional_variants(self):
print("Testing %s" % "test_count_regional_variants")
self.assertTrue(True)
def test_count_regional_af(self):
print("Testing %s" % "test_count_regional_af")
self.assertTrue(True)
def test_query_bed_region(self):
print("Testing %s" % "test_query_bed_region")
self.assertTrue(True)
def test_check_bed_regions(self):
print("Testing %s" % "test_check_bed_regions")
self.assertTrue(True)
def test_is_dash(self):
print("Testing %s" % "test_is_dash")
self.assertTrue(True)
def test_check_clinvar(self):
print("Testing %s" % "test_check_clinvar")
self.assertTrue(True)
| 30.5
| 63
| 0.660422
| 3,352
| 0.981265
| 0
| 0
| 0
| 0
| 0
| 0
| 930
| 0.272248
|
45187c05fe7efccfb6cd2366904c5d7b0e9849c8
| 8,747
|
py
|
Python
|
distributed_social_network/users/views.py
|
leevtori/CMPUT404-project
|
52214288855ae4b3f05b8d17e67a2686debffb19
|
[
"Apache-2.0"
] | null | null | null |
distributed_social_network/users/views.py
|
leevtori/CMPUT404-project
|
52214288855ae4b3f05b8d17e67a2686debffb19
|
[
"Apache-2.0"
] | 51
|
2019-03-22T00:31:06.000Z
|
2021-06-10T21:17:30.000Z
|
distributed_social_network/users/views.py
|
leevtori/CMPUT404-project
|
52214288855ae4b3f05b8d17e67a2686debffb19
|
[
"Apache-2.0"
] | 1
|
2019-08-03T14:41:22.000Z
|
2019-08-03T14:41:22.000Z
|
from django.urls import reverse_lazy, reverse
from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect
from requests.auth import HTTPBasicAuth
from .models import User, Node
from .forms import CustomUserCreationForm, UserCreationForm
from django.views.generic import ListView
from django.views.generic.edit import UpdateView
from django.views import View
from django.views import generic
import requests
from users.serializers import *
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
import json
class UserList(LoginRequiredMixin, ListView):
"""Lists all users on the server."""
model = User
template_name = "user_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['login_user'] = self.request.user
context['friends'] = self.request.user.friends.all()
context['followers'] = self.request.user.followers.all()
context['following'] = self.request.user.following.all()
context['incomingFriendRequest'] = self.request.user.incomingRequests.all()
context['sendFriendRequest'] = self.request.user.outgoingRequests.all()
return context
def get_queryset(self):
qs = super().get_queryset()
qs = qs.filter(is_active=True).order_by("username")
n = Node.objects.all().values_list('user_auth', flat=True)
qs = qs.exclude(id__in=n)
return qs
class FriendList(LoginRequiredMixin, ListView):
"""This view lists all friends of logged in user."""
model = User
template_name = "friends_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = get_object_or_404(User, username=self.kwargs['username'])
context['following'] = user.following.all()
return context
def get_queryset(self):
qs = super().get_queryset()
user = get_object_or_404(User, username=self.kwargs['username'])
return user.friends.all()
class FollowerList(LoginRequiredMixin, ListView):
"""This view lists all the followers of logged in user. """
model = User
template_name = "followers_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = get_object_or_404(User, username=self.kwargs['username'])
context['friends'] = user.friends.all()
context['following'] = user.following.all()
return context
def get_queryset(self):
qs = super().get_queryset()
user = get_object_or_404(User, username=self.kwargs['username'])
return user.followers.all()
class FollowingList(LoginRequiredMixin, ListView):
"""This view lists all the followers of logged in user. """
model = User
template_name = "following_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = get_object_or_404(User, username=self.kwargs['username'])
context['friends'] = user.friends.all()
context['following'] = user.following.all()
return context
def get_queryset(self):
qs = super().get_queryset()
user = get_object_or_404(User, username=self.kwargs['username'])
return user.following.all()
class SendFriendRequest(LoginRequiredMixin, View):
def post(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
# print("friend_id ", friend_id)
friend = get_object_or_404(User, id=friend_id)
#friend is on our host
print(str(friend.host))
if(friend.host is None):
print('local')
friend.incomingRequests.add(self.request.user)
self.request.user.outgoingRequests.add(friend)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
return HttpResponse(200)
#friend is on another host
else:
friend_host = get_object_or_404(Node, hostname=friend.host.hostname)
link = str(friend_host)+'friendrequest'
print("LINK ", link)
validated_friend=FriendRequestUsers(friend)
validated_user=FriendRequestUsers(self.request.user)
friend.incomingRequests.add(self.request.user)
self.request.user.outgoingRequests.add(friend)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
returnDict = dict()
returnDict['query'] = 'friendrequest'
returnDict['author']=validated_user.data
returnDict['friend']=validated_friend.data
print(json.dumps(returnDict))
friend_request = requests.post(link,
auth=HTTPBasicAuth(friend_host.send_username,friend_host.send_password),
headers={"Content-type":"application/json"},
data=json.dumps(returnDict)
)
print("CODE", friend_request.status_code)
return HttpResponse(200)
class ConfirmFriendRequest(LoginRequiredMixin, View):
def post(self, requst):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
if friend in self.request.user.incomingRequests.all():
self.request.user.friends.add(friend)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
friend.outgoingRequests.remove(self.request.user)
self.request.user.incomingRequests.remove(friend)
return HttpResponse(status=200)
return HttpResponse(status=404)
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('first_name', 'last_name', 'email') + UserCreationForm.Meta.fields
class SignUp(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
success_message = "Congratulations, you've successfully signed up! Wait to be approved."
class DeleteFriend(LoginRequiredMixin, View):
model = User
def delete(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
if friend:
self.request.user.friends.remove(friend_id)
context = {'object_list': self.request.user.friends.all()}
return render(request, 'friends_list.html', context)
class AccountSettingsView(LoginRequiredMixin, UpdateView):
model = User
fields = ['display_name', 'github', 'bio', 'is_active']
template_name = 'account_settings.html'
def get_object(self):
return self.request.user
def get_success_url(self):
return reverse('profile', kwargs={'username': self.request.user.username})
class FriendRequests(LoginRequiredMixin, ListView):
"""This view lists all the pending friend requests. """
model = User
template_name = 'pending_friend_requests.html'
def get_queryset(self):
q = self.request.user.incomingRequests.all()
return q
class Unfollow(LoginRequiredMixin, View):
model = User
def post(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
friend.followers.remove(self.request.user.id)
self.request.user.following.remove(friend)
context = {'friends_list': self.request.user.friends.all(),
'following_list': self.request.user.following.all()
}
return render(request, 'friends_list.html', context)
class Follow(LoginRequiredMixin, View):
model = User
def post(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
context = {'friend_list': self.request.user.friends.all(),
'following_list': self.request.user.following.all()
}
return render(request, 'friends_list.html', context)
| 36.598326
| 115
| 0.658169
| 8,106
| 0.926718
| 0
| 0
| 0
| 0
| 0
| 0
| 1,154
| 0.131931
|
4519b87ff604566c931c34b6c30b719a965b014c
| 1,569
|
py
|
Python
|
util.py
|
reckenrode/ParanoiaCharGen
|
07aa5a5cf1699ae47edab95ee5490b2f1d48c501
|
[
"BSD-2-Clause"
] | 1
|
2021-12-20T00:03:34.000Z
|
2021-12-20T00:03:34.000Z
|
util.py
|
reckenrode/ParanoiaCharGen
|
07aa5a5cf1699ae47edab95ee5490b2f1d48c501
|
[
"BSD-2-Clause"
] | null | null | null |
util.py
|
reckenrode/ParanoiaCharGen
|
07aa5a5cf1699ae47edab95ee5490b2f1d48c501
|
[
"BSD-2-Clause"
] | null | null | null |
#from weakref import WeakValueDictionary
import random, operator, weakref
def format_service_group(group):
"""pretty prints the group"""
rstr = '%s [%s]'
if group.cover != None: # Spy for IntSec
return rstr % (group.cover, group.cover.firm)
elif group.spyon != None:
return rstr % (group.spyon, group.spyon.firm)
else:
return rstr % (group, group.firm)
def format_society(society):
rstr = '%s (degree: %s)'
if society.cover != None:
return rstr % (society.cover, society.cover.degree)
elif society.spyon != None:
return rstr % (society.spyon.name, society.spyon.degree)
else:
return rstr % (society.name, society.degree)
def format_power(char):
rstr = '%s'
if char.registered:
rstr += ' [registered]'
return rstr % char.power
def build_skill_table(skill):
"""makes an nx2 table of the skill's specs where n = len(skill.specs)"""
table = [[spec, skill[spec]] for spec in skill]
table.sort(lambda x, y: cmp(x[0], y[0]))
if 'Energy Weapons' not in skill:
table.append(['________________________', '__'])
table.append(['________________________', '__'])
table.append(['________________________', '__'])
table.append(['________________________', '__'])
return table
class tag(int): pass
class weightedchoice(object):
__slots__ = ['cache']
cache = {}
def __new__(cls, lst):
lid = id(lst)
try:
return random.choice(weightedchoice.cache[lid])
except KeyError:
weightedchoice.cache[lid] = reduce(operator.add, [[item for n in xrange(weight)] for weight, item in lst])
return random.choice(weightedchoice.cache[lid])
| 30.173077
| 109
| 0.702358
| 364
| 0.231995
| 0
| 0
| 0
| 0
| 0
| 0
| 345
| 0.219885
|
451a01d6bf880434d082fec4bb6d94642deb72ee
| 2,195
|
py
|
Python
|
moex/tests/test_service.py
|
ChanTerelLy/broker-account-analist
|
a723c83fe9a924905eb0754b4acb1231b31f9c87
|
[
"MIT"
] | null | null | null |
moex/tests/test_service.py
|
ChanTerelLy/broker-account-analist
|
a723c83fe9a924905eb0754b4acb1231b31f9c87
|
[
"MIT"
] | 11
|
2021-02-21T19:39:41.000Z
|
2021-06-13T16:29:47.000Z
|
moex/tests/test_service.py
|
ChanTerelLy/broker-account-analist
|
a723c83fe9a924905eb0754b4acb1231b31f9c87
|
[
"MIT"
] | 2
|
2021-11-16T16:31:37.000Z
|
2022-02-11T02:55:37.000Z
|
import asyncio
import unittest
from moex.service import Cbr, Moex
class CbrTest(unittest.TestCase):
def setUp(self) -> None:
self.cbr = Cbr('01.01.2021')
def test_usd(self):
self.assertEqual(self.cbr.USD, 73.88)
def test_euro(self):
self.assertEqual(self.cbr.EUR, 90.79)
class MoexTest(unittest.TestCase):
def setUp(self) -> None:
self.loop = asyncio.get_event_loop()
def test_not_contain_empty_list(self):
data = self.loop.run_until_complete(Moex().get_shares('etf', ['RU000A100JH0', 'RU0009029540', 'IE00BD3QFB18']))
for d in data:
self.assertTrue(d)
def test_bonds_tsqb(self):
data = self.loop.run_until_complete(Moex().get_shares('bonds', ['RU000A100JH0']))
self.assertEqual(data[0][0], 'RU000A100JH0')
self.assertEqual(data[0][1], 'RU000A100JH0')
def test_bonds_tqir(self):
data = self.loop.run_until_complete(Moex().get_shares('bonds', ['RU000A1015P6']))
self.assertEqual(data[0][0], 'RU000A1015P6')
self.assertEqual(data[0][1], 'RU000A1015P6')
def test_etf(self):
data = self.loop.run_until_complete(Moex().get_shares('etf', ['IE00BD3QFB18', 'US0231351067']))
self.assertEqual(data[0][0], 'IE00BD3QFB18')
self.assertEqual(data[0][1], 'FXCN')
def test_foreignshares(self):
data = self.loop.run_until_complete(Moex().get_shares('foreignshares', ['US0231351067']))
self.assertEqual(data[0][0], 'US0231351067')
self.assertEqual(data[0][1], 'AMZN-RM')
def test_shares(self):
data = self.loop.run_until_complete(Moex().get_shares('shares', ['RU0009029540']))
self.assertEqual(data[0][0], 'RU0009029540')
self.assertEqual(data[0][1], 'SBER')
def test_coupons_tsqb(self):
data = self.loop.run_until_complete(Moex().get_coupon_by_isins(['RU000A100JH0']))
self.assertEqual(data[0][0]['isin'], 'RU000A100JH0')
def test_coupons_tqir(self):
data = self.loop.run_until_complete(Moex().get_coupon_by_isins(['RU000A1015P6']))
self.assertEqual(data[0][0]['isin'], 'RU000A1015P6')
if __name__ == '__main__':
unittest.main()
| 34.296875
| 119
| 0.655125
| 2,075
| 0.94533
| 0
| 0
| 0
| 0
| 0
| 0
| 382
| 0.174032
|
451aa8ccde2d865dd652ad209fefdf68afe0ad46
| 2,820
|
py
|
Python
|
streamlit_app.py
|
guim4dev/education-cv
|
ffd880090de28e36849b4d53c424c2009791aaf5
|
[
"MIT"
] | null | null | null |
streamlit_app.py
|
guim4dev/education-cv
|
ffd880090de28e36849b4d53c424c2009791aaf5
|
[
"MIT"
] | null | null | null |
streamlit_app.py
|
guim4dev/education-cv
|
ffd880090de28e36849b4d53c424c2009791aaf5
|
[
"MIT"
] | null | null | null |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
st.title("Relatório de Aula")
df = pd.read_csv('data/emocoes.csv')
agg = pd.read_csv('data/agg.csv')
Engajado = df[df['emocao'] == 'Engajado']
Engajado_agg = Engajado.groupby(['emocao', 'pessoa']).size().reset_index(name='size')
Engajado_agg = Engajado_agg.sort_values(by=['size'], ascending=False)
emotions_count = df.value_counts('emocao').reset_index()
def is_authenticated(password):
return password == "182916f6-756d-40d6-95fc-3283ba5efdf8"
def generate_time_agg_graph():
fig = px.line(agg, x="tempo", y="size", labels= { 'tempo': 'tempo (s)',
'size': 'número de alunos' }, color='emocao', title='Emoções ao longo do tempo')
st.plotly_chart(fig, use_container_width=True)
def generate_top_students():
st.markdown('<br/>', unsafe_allow_html=True)
st.markdown("<center style='font-size:2em'=>Alunos Mais Engajados</center>", unsafe_allow_html=True)
top_three = Engajado_agg.head(3).to_numpy()
for row in top_three:
st.markdown(f"<center><span style='color:#00FF00;font-size:1.5em'>{row[1]}</span></center>", unsafe_allow_html=True)
st.markdown('<br/>', unsafe_allow_html=True)
def generate_bottom_students():
st.markdown("<center style='font-size:2em'>Alunos Menos Engajados</center>", unsafe_allow_html=True)
bottom_three = np.flip(Engajado_agg.tail(3).to_numpy(), 0)
for row in bottom_three:
st.write(f"<center><span style='color:red;font-size:1.5em'>{row[1]}</span></center>", unsafe_allow_html=True)
st.markdown('<br/> <br/>', unsafe_allow_html=True)
def generate_emotions_pizza():
fig = px.pie(emotions_count, values=emotions_count.index, names='emocao', title='Predominância de Emoções')
st.plotly_chart(fig, use_container_width=True)
def generate_login_block():
block1 = st.empty()
block2 = st.empty()
return block1, block2
def clean_blocks(blocks):
for block in blocks:
block.empty()
def graph_columns():
generate_time_agg_graph()
generate_top_students()
generate_bottom_students()
generate_emotions_pizza()
def login(blocks):
return blocks[1].text_input('ID da Aula')
login_blocks = generate_login_block()
password = login(login_blocks)
drive_block = st.empty()
google_drive = drive_block.text_input('Link da aula para processamento', '')
id_block = st.empty()
if google_drive != '':
drive_block.empty()
id_block.text("ID da Aula processada: 182916f6-756d-40d6-95fc-3283ba5efdf8")
if is_authenticated(password):
id_block.empty()
drive_block.empty()
clean_blocks(login_blocks)
st.balloons()
graph_columns()
elif password:
st.info("Aula não encontrada. Por favor, insira um ID válido.")
| 32.413793
| 134
| 0.699291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 747
| 0.264051
|
451af08e7abf6ac7cd2e6c4f9832ea860419a281
| 4,410
|
py
|
Python
|
other_nominate/register_other_nominate.py
|
kondounagi/japanese_movies_dataset
|
349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e
|
[
"MIT"
] | 1
|
2019-08-05T21:43:09.000Z
|
2019-08-05T21:43:09.000Z
|
other_nominate/register_other_nominate.py
|
kondounagi/japanese_movies_dataset
|
349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e
|
[
"MIT"
] | 3
|
2020-03-31T05:53:37.000Z
|
2021-12-13T20:07:39.000Z
|
other_nominate/register_other_nominate.py
|
kondounagi/japanese_movies_dataset
|
349f217cd04e07fd44a401ecb2f2dcaea7bc2e5e
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
from os import listdir
from os.path import isfile, join
class RegisterOtherNominate:
# Register the prize winners of each award to the formatting style.
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory",
default="movies_other_nominate",
help="path of the json directory",
type=str)
parser.add_argument("-j", "--jsonfile",
default="annual_other_nominate_data.json",
help="path of the other nominate json data",
type=str)
self.args = parser.parse_args()
self.key = 'other_nominate'
self.output = []
self.years = range(1977, 2020)
def __call__(self, *args, **kwargs):
self.files = self.create_files_list()
self.modify_index()
self.dump_data()
def create_files_list(self):
extension = '.json'
files = [int(f.rstrip(extension)) for f in listdir(self.args.directory)
if isfile(join(self.args.directory, f))]
files.sort()
return [self.args.directory + '/' + str(f) + extension for f in files]
def _filter_by_year(self, lst, year):
for elm in lst:
if elm['year'] == year:
yield elm
def modify_index(self):
with open(self.args.jsonfile, 'r') as jsonfile:
other_nominate = json.load(jsonfile)
# OPTIMIZE: this nests too deep ...
for year in self.years:
current = list(self._filter_by_year(other_nominate, year))
if not current:
continue
add_data = current[0]
movielist = '../{}_movie_clean'.format(year)
year_data = []
for prize in add_data['prize_winners']:
if os.path.exists(movielist):
with open(movielist) as f:
for movie in f:
index, title = movie.split('\t')[0:2]
index = int(index)
if title == prize['work']['title']:
add_prize = prize
add_prize['work']['index'] = index
year_data.append(add_prize)
break
else:
year_data.append(prize)
add_data['prize_winners'] = year_data
self.output.append(add_data)
with open(self.args.jsonfile, 'w') as jsonfile:
json.dump(self.output, jsonfile,
ensure_ascii=False,
indent=4,
separators=(',', ':'))
jsonfile.write('\n')
def dump_data(self):
for year in self.years:
movielist = '../{}_movie_clean'.format(year)
if os.path.exists(movielist):
with open(movielist) as f:
for movie in f:
nominates = []
index, title = movie.split('\t')[0:2]
index = int(index)
file_name = ('movies_other_nominate/{year}/{index}.json'
.format(year=year, index=index))
for award in self._filter_by_year(self.output, year):
for winner in award['prize_winners']:
result = {}
i = winner['work']['index']
if index == i:
nominates.append({
'nominate_name': winner['award'],
})
result['title'] = title
result['other_nominate'] = nominates
with open(file_name, 'w') as wf:
json.dump(result, wf,
ensure_ascii=False,
indent=4,
separators=(',', ':'))
wf.write('\n')
def main():
register_other_nominate = RegisterOtherNominate()
register_other_nominate()
if __name__ == '__main__':
main()
| 35.28
| 80
| 0.460544
| 4,175
| 0.946712
| 123
| 0.027891
| 0
| 0
| 0
| 0
| 546
| 0.12381
|
451b7210c711e56db4043ee4381d442e6b1a9d25
| 5,477
|
py
|
Python
|
PC6/table.py
|
ReneCapella/pythonTinkering
|
93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a
|
[
"MIT"
] | null | null | null |
PC6/table.py
|
ReneCapella/pythonTinkering
|
93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a
|
[
"MIT"
] | null | null | null |
PC6/table.py
|
ReneCapella/pythonTinkering
|
93a5080e8ef6f67fe8ca0b67eb1fb27370beb26a
|
[
"MIT"
] | null | null | null |
class Table:
# Constructor
# Defauls row and col to 0 if less than 0
def __init__(self, col_count, row_count, headers = [], border_size = 0):
self.col_count = col_count if col_count >= 0 else 0
self.row_count = row_count if row_count >= 0 else 0
self.border_size = border_size if border_size > 0 else 0
self.headers = headers
# Getters
def get_row_count(self):
return self.row_count
def get_border_size(self):
return self.border_size
def get_col_count(self):
return self.col_count
def get_headers(self):
return self.headers
# Setters
def set_row_count(self, count):
self.row_count = count
def set_border_size(self, new_border_size):
# Pre-Condition: must be between 0 and 5
if border_size > 5 or border_size < 0:
raise Exception("Border size must be a number between 0 and 5 inclusively")
self.border_size = new_border_size
def set_headers(self, headers):
# Pre-condition: headers length must be equal to column count
if len(headers) != self.col_count:
raise Exception("Headers amount must be the same as column count")
self.headers = headers
# Mutators
def add_rows(self, count):
# Pre-Condition: count to add must be greater than 0
if count < 1:
raise Exception("Number of rows to add must be greater than 0")
self.row_count += count
def delete_rows(self, count):
# Pre-Condition: count to remove must be greater than 0
if count < 1:
raise Exception("Number of rows to delete must be greater than 0")
new_total = self.row_count - count
self.row_count = new_total if count < self.row_count else 0
def add_cols(self, col_amt_to_add, headers = []):
if len(headers) > 0:
if len(headers) != col_amt_to_add:
raise Exception("Headers amount must be the same as column count to add")
self.add_headers(headers)
else:
if len(self.headers) > 0:
raise Exception("Please send through desired header names for columns")
self.col_count += col_amt_to_add
def delete_cols(self, col_amt_to_delete, headers = []):
if len(headers) > 0:
if len(headers) != col_amt_to_delete:
raise Exception("Headers amount must be the same as column count to delete")
self.delete_headers(headers)
else:
if len(self.headers) > 0:
raise Exception("Please send through desired header names for columns removal")
self.col_count -= col_amt_to_delete
def add_headers(self, headers):
self.headers = self.headers + headers
# Must add the columns if adding Headers
self.col_count = len(self.headers)
def delete_headers(self, headers):
print(headers)
print(self.headers)
for header in headers:
if header in self.headers:
self.headers.remove(header)
# Must decrement the column count if removing headers
self.col_count = len(self.headers)
def make_table(self):
reasonable_border = self.border_size > 0
added_border_element = ["<br>\n","<table border=\"" + str(border_size) +"\">\n"]
elements = added_border_element if reasonable_border else ["<br>\n","<table>\n"]
col_counter = 0
row_counter = 0
file = open("table.html", "a")
if len(self.headers) > 0:
elements.append("\t<tr>\n")
for n in self.headers:
elements.append("\t\t<th>" + n + "</th>\n")
elements.append("\t</tr>\n")
while row_counter < self.row_count:
elements.append("\t<tr>\n")
while col_counter < self.col_count:
elements.append("\t\t<td>test</td>\n")
col_counter += 1
elements.append("\t</tr>\n")
row_counter += 1
col_counter = 0
elements.append("</table>\n")
file.writelines(elements)
file.close()
col = 0
row = 0
header = ""
headers = []
border_size = -1
while col < 1 or col > 100:
col = input("How many columns do you want (1 to 100)? ")
col = int(col)
while row < 1 or col > 100:
row = input("How many rows do you want (1 to 100)? ")
row = int(row)
while header != "Y" and header != "N":
header = input("Do you want headers? Y/N ")
# If headers are wanted, give them names
if header == "Y":
header = True
for n in range(col):
headers.append(input("Header #" + str(n + 1) + ": "))
else:
header = False
while border_size < 0 or border_size > 5:
border_size = input("Enter a number for border size 1 to 5 ")
border_size = int(border_size)
# DEMOOOOOO
table = Table(col, row, headers, border_size)
table.make_table()
table.add_headers(["1", "2", "4"])
print("Here are your current headers: ")
print(table.get_headers())
print("Here is your current border size: ")
print(table.get_border_size())
table.make_table()
table.delete_cols(3, ["1", "2", "4"])
print("Here are your headers now: ")
print(table.get_headers())
print("Let's check your column count: ")
print(table.get_col_count())
# table.delete_cols(4) # should throw error
table.set_row_count(3)
table.add_rows(5)
print("Row count should be 8 because I just set it to 3 and added 5: ")
print(table.get_row_count())
| 33.601227
| 95
| 0.617126
| 4,134
| 0.754793
| 0
| 0
| 0
| 0
| 0
| 0
| 1,459
| 0.266387
|
451d32ddace64c14dc2a20c09b0af3249bd93791
| 676
|
py
|
Python
|
api/db/models/child_datum.py
|
peuan-testai/opentestdata-api
|
9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba
|
[
"MIT"
] | 15
|
2019-06-27T02:48:02.000Z
|
2020-11-29T09:01:29.000Z
|
api/db/models/child_datum.py
|
peuan-testai/opentestdata-api
|
9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba
|
[
"MIT"
] | 16
|
2019-07-26T19:51:55.000Z
|
2022-03-12T00:00:24.000Z
|
api/db/models/child_datum.py
|
peuan-testai/opentestdata-api
|
9e9b12e73abc30a2031eb49d51d5b9d5412ed6ba
|
[
"MIT"
] | 7
|
2019-06-26T11:10:50.000Z
|
2020-09-04T08:52:58.000Z
|
from .. import db
from .base import BaseModel
class ChildDatum(BaseModel):
__tablename__ = 'child_data'
# fields
parent_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
datum_id = db.Column(db.Integer, db.ForeignKey('data.id'), nullable=False)
name = db.Column(db.String(length=100), nullable=False)
# relationships
parent = db.relationship('Datum', back_populates='children', foreign_keys=[parent_id])
datum = db.relationship('Datum', back_populates='included_in', foreign_keys=[datum_id])
def __repr__(self):
return (
"<ChildDatum '%s' of %s>" %
(self.name, self.parent)
)
| 30.727273
| 91
| 0.659763
| 627
| 0.927515
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.170118
|
451d5be5b8b8cc7a0af4de177c971df7cd94b93b
| 13,474
|
py
|
Python
|
mate3/devices.py
|
kodonnell/mate3
|
6c378cc7d5eee59e322075b7fcdc91c49b24265f
|
[
"MIT"
] | null | null | null |
mate3/devices.py
|
kodonnell/mate3
|
6c378cc7d5eee59e322075b7fcdc91c49b24265f
|
[
"MIT"
] | null | null | null |
mate3/devices.py
|
kodonnell/mate3
|
6c378cc7d5eee59e322075b7fcdc91c49b24265f
|
[
"MIT"
] | null | null | null |
import dataclasses as dc
from typing import Any, Dict, Iterable, List, Optional
from loguru import logger
from mate3.field_values import FieldValue, ModelValues
from mate3.read import AllModelReads
from mate3.sunspec.fields import IntegerField
from mate3.sunspec.model_base import Model
from mate3.sunspec.models import (
ChargeControllerConfigurationModel,
ChargeControllerModel,
FLEXnetDCConfigurationModel,
FLEXnetDCRealTimeModel,
FXInverterConfigurationModel,
FXInverterRealTimeModel,
OutBackModel,
OutBackSystemControlModel,
RadianInverterConfigurationModel,
SinglePhaseRadianInverterRealTimeModel,
SplitPhaseRadianInverterRealTimeModel,
)
from mate3.sunspec.values import (
ChargeControllerConfigurationValues,
ChargeControllerValues,
FLEXnetDCConfigurationValues,
FLEXnetDCRealTimeValues,
FXInverterConfigurationValues,
FXInverterRealTimeValues,
OPTICSPacketStatisticsValues,
OutBackSystemControlValues,
OutBackValues,
RadianInverterConfigurationValues,
SinglePhaseRadianInverterRealTimeValues,
SplitPhaseRadianInverterRealTimeValues,
)
@dc.dataclass
class ChargeControllerDeviceValues(ChargeControllerValues):
"""
Simple wrapper to combine the value and config models.
"""
config: ChargeControllerConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class FNDCDeviceValues(FLEXnetDCRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: FLEXnetDCConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class FXInverterDeviceValues(FXInverterRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: FXInverterConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class SinglePhaseRadianInverterDeviceValues(SinglePhaseRadianInverterRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: RadianInverterConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class SplitPhaseRadianInverterDeviceValues(SplitPhaseRadianInverterRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: RadianInverterConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class Mate3DeviceValues(OutBackValues):
"""
Simple wrapper to combine the value and config models.
"""
config: OutBackSystemControlValues = dc.field(metadata={"field": False})
class DeviceValues:
"""
This is basically a way for storing state (i.e. current values) about all devices. It's the main interface for users
to access values etc.
"""
def __init__(self, client):
self._client = client
self.mate3s: Dict[None, Mate3DeviceValues] = {}
self.charge_controllers: Dict[int, ChargeControllerDeviceValues] = {}
self.fndcs: Dict[int, FNDCDeviceValues] = {}
self.fx_inverters: Dict[int, FXInverterDeviceValues] = {}
self.single_phase_radian_inverters: Dict[int, SinglePhaseRadianInverterDeviceValues] = {}
self.split_phase_radian_inverters: Dict[int, SplitPhaseRadianInverterDeviceValues] = {}
self.optics: Optional[OPTICSPacketStatisticsValues] = None
@property
def connected_devices(self) -> Iterable[ModelValues]:
# First ones with only a single device:
for d in ("mate3", "optics"):
device = getattr(self, d)
if device:
yield device
# Now those with device and config. (NB: we're explicit here as opposed to relying on hasattr(device, 'config')
# just in case a model actually had a 'config' field.)
for d in (
"charge_controllers",
"fndcs",
"fx_inverters",
"single_phase_radian_inverters",
"split_phase_radian_inverters",
):
for device in getattr(self, d).values():
yield device
yield device.config
def _get_single_device(self, name: str) -> ModelValues:
"""
Helper function so that e.g. if there's only one charge controller in self.charge_controllers, you can call
self.charge_controller to get it.
"""
devices = getattr(self, f"{name}s")
if len(devices) != 1:
raise RuntimeError(
(
f"Must be one, and only one, {name} device to be able to use `{name}` attribute - but there are "
f"{len(devices)}"
)
)
return list(devices.values())[0]
@property
def mate3(self) -> Mate3DeviceValues:
"""
Return the mate3.
"""
return self._get_single_device("mate3")
@property
def charge_controller(self) -> ChargeControllerDeviceValues:
"""
Return the charge controller if there's only one.
"""
return self._get_single_device("charge_controller")
@property
def fndc(self) -> FNDCDeviceValues:
"""
Return the FNDC if there's only one.
"""
return self._get_single_device("fndc")
@property
def fx_inverter(self) -> FXInverterDeviceValues:
"""
Return the FX inverter if there's only one.
"""
return self._get_single_device("fx_inverter")
@property
def single_phase_radian_inverter(self) -> SinglePhaseRadianInverterDeviceValues:
"""
Return the single phase radian inverter if there's only one.
"""
return self._get_single_device("single_phase_radian_inverter")
@property
def split_phase_radian_inverter(self) -> SplitPhaseRadianInverterDeviceValues:
"""
Return the split phase radian inverter if there's only one.
"""
return self._get_single_device("split_phase_radian_inverter")
def update(self, all_reads: AllModelReads) -> None:
"""
This is the key method, and is used to update the state of the devices with new values.
"""
# Update mate:
self._update_model_and_config(
all_reads=all_reads,
model_class=OutBackModel,
config_class=OutBackSystemControlModel,
config_values_class=OutBackSystemControlValues,
device_values=self.mate3s,
device_class=Mate3DeviceValues,
)
# Charge controller
self._update_model_and_config(
all_reads=all_reads,
model_class=ChargeControllerModel,
config_class=ChargeControllerConfigurationModel,
config_values_class=ChargeControllerConfigurationValues,
device_values=self.charge_controllers,
device_class=ChargeControllerDeviceValues,
)
# FNDCs
self._update_model_and_config(
all_reads=all_reads,
model_class=FLEXnetDCRealTimeModel,
config_class=FLEXnetDCConfigurationModel,
config_values_class=FLEXnetDCConfigurationValues,
device_values=self.fndcs,
device_class=FNDCDeviceValues,
)
# FX inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=FXInverterRealTimeModel,
config_class=FXInverterConfigurationModel,
config_values_class=FXInverterConfigurationValues,
device_values=self.fx_inverters,
device_class=FXInverterDeviceValues,
)
# Single phase radian inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=SinglePhaseRadianInverterRealTimeModel,
config_class=RadianInverterConfigurationModel,
config_values_class=RadianInverterConfigurationValues,
device_values=self.single_phase_radian_inverters,
device_class=SinglePhaseRadianInverterDeviceValues,
)
# Split phase radian inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=SplitPhaseRadianInverterRealTimeModel,
config_class=RadianInverterConfigurationModel,
config_values_class=RadianInverterConfigurationValues,
device_values=self.split_phase_radian_inverters,
device_class=SplitPhaseRadianInverterDeviceValues,
)
def _update_model_and_config(
self,
all_reads: AllModelReads,
model_class: Model,
config_class: Model,
config_values_class: ModelValues,
device_values: Dict[int, ModelValues],
device_class: ModelValues,
) -> None:
model_field_reads_per_port = all_reads.get_reads_per_model_by_port(model_class)
config_field_reads_per_port = all_reads.get_reads_per_model_by_port(config_class)
# OK, there's a few options around whether the above variables contain anything.
# - Both present, then we're good - continue. All devices should have a configuration class.
# - Model isn't present - this means the device itself wasn't detected, so ignore. Note that usually this would
# imply the config class is null (since the config shouldn't be there if the device isn't) except in the case
# of Radian inverters, as the same config class is shared across both single and split phase devices (so that
# if only one type is present, the other will have empty model values and non-empty config).
# - Both are missing - this is covered by the above.
# So, the short summary is we only care about devices where the model field values are present, and in all other
# cases there *should* be config field values too.
if model_field_reads_per_port is None:
return
else:
if config_field_reads_per_port is None:
logger.warning(
(
f"Only model ({model_class}) field values and no config ({config_class}) fields were read. This"
f" is undefined behaviour, so ignoring {model_class}."
)
)
return
# Check model and config have the same ports:
if set(model_field_reads_per_port).symmetric_difference(set(config_field_reads_per_port)):
raise RuntimeError("Config and models have different ports!")
# Create/update any devices for the given ports:
for port in model_field_reads_per_port:
model_reads_this_port = model_field_reads_per_port[port]
config_reads_this_port = config_field_reads_per_port[port]
if port not in device_values:
# OK, it's new - create it:
config_values = self._create_new_model_values(
model=config_class,
values_class=config_values_class,
device_address=config_reads_this_port["did"].address,
)
device_values[port] = self._create_new_model_values(
model=model_class,
values_class=device_class,
device_address=model_reads_this_port["did"].address,
config=config_values,
)
# Either way, update the field values:
for reads, device_val in (
(model_reads_this_port, device_values[port]),
(config_reads_this_port, device_values[port].config),
):
for field_name, field_read in reads.items():
field_value = getattr(device_val, field_name)
field_value._raw_value = field_read.raw_value
field_value._implemented = field_read.implemented
field_value._last_read = field_read.time
# If there are any ports that were used for this device, but are no longer, remove them:
old_device_ports = set(list(device_values.keys())) - set(model_field_reads_per_port.keys())
for port in old_device_ports:
logger.warning(
f"Device(s) of model {model_class} on ports {old_device_ports} have disappeared. These will be ignored."
)
del device_values[port]
def _create_new_model_values(
self, model: Model, values_class: ModelValues, device_address: int, config: Optional[ModelValues] = None
):
# Create empty FieldValues
field_values = {}
scale_factors = {}
for field in model.fields():
address = device_address + field.start - 1
field_values[field.name] = FieldValue(
client=self._client,
field=field,
address=address,
scale_factor=None,
raw_value=None,
implemented=True,
read_time=None,
)
if isinstance(field, IntegerField) and field.scale_factor is not None:
scale_factors[field.name] = field.scale_factor.name
# Now assign scale factors:
for field, scale_factor in scale_factors.items():
field_values[field]._scale_factor = field_values[scale_factor]
kwargs = {"model": model, "address": device_address, **field_values}
return values_class(**kwargs) if config is None else values_class(config=config, **kwargs)
| 38.062147
| 120
| 0.656672
| 12,231
| 0.907748
| 733
| 0.054401
| 3,396
| 0.252041
| 0
| 0
| 3,425
| 0.254193
|
451de10c0477bdaf31e0d063879d50b5418e6b0b
| 490
|
py
|
Python
|
catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py
|
min-chuir-Park/ROS_Tutorials
|
4c19e7673ec7098019c747833c45f0d32b85dab4
|
[
"MIT"
] | 1
|
2019-07-04T04:49:05.000Z
|
2019-07-04T04:49:05.000Z
|
catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py
|
min-chuir-Park/ROS_Tutorials
|
4c19e7673ec7098019c747833c45f0d32b85dab4
|
[
"MIT"
] | null | null | null |
catkin_ws/src/ros_python/function_ws/srv_sub_pub/src/srv_server.py
|
min-chuir-Park/ROS_Tutorials
|
4c19e7673ec7098019c747833c45f0d32b85dab4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from srv_sub_pub.srv import *
NAME = "add_two_ints_server"
def add_two_ints(req):
print("Returning [%s + %s = %s]" % (req.a, req.b, (req.a + req.b)))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node(NAME)
s = rospy.Service('add_two_ints', AddTwoInts, add_two_ints)
# spin() keeps Python from exiting until node is shutdown
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
| 22.272727
| 71
| 0.681633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.304082
|
451f9b7ff4174b43f88b83397cc76cc631f10347
| 148
|
py
|
Python
|
app/captcha/handlers/verify.py
|
huioo/tornadoWeb
|
001efbae9815b30d8a0c0b4ba8819cc711b99dc4
|
[
"Apache-2.0"
] | null | null | null |
app/captcha/handlers/verify.py
|
huioo/tornadoWeb
|
001efbae9815b30d8a0c0b4ba8819cc711b99dc4
|
[
"Apache-2.0"
] | null | null | null |
app/captcha/handlers/verify.py
|
huioo/tornadoWeb
|
001efbae9815b30d8a0c0b4ba8819cc711b99dc4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
class Handler(tornado.web.RequestHandler):
def initialize(self):
pass
| 18.5
| 42
| 0.662162
| 81
| 0.547297
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.297297
|
451fd926ec9ad4d32166fa0f1f6362133bc3d0df
| 1,113
|
py
|
Python
|
simplydomain/src/module_recursion.py
|
SimplySecurity/SimplyDomain-Old
|
101dd55b213009b449a96a1fa8b143d85dcdba88
|
[
"BSD-3-Clause"
] | 17
|
2018-08-08T11:51:26.000Z
|
2022-03-27T19:43:25.000Z
|
simplydomain/src/module_recursion.py
|
SimplySecurity/SimplyDomain-Old
|
101dd55b213009b449a96a1fa8b143d85dcdba88
|
[
"BSD-3-Clause"
] | 10
|
2018-06-14T21:33:49.000Z
|
2020-08-26T18:10:54.000Z
|
simplydomain/src/module_recursion.py
|
SimplySecurity/SimplyDomain-Old
|
101dd55b213009b449a96a1fa8b143d85dcdba88
|
[
"BSD-3-Clause"
] | 6
|
2018-07-20T17:52:03.000Z
|
2021-10-18T09:08:33.000Z
|
import multiprocessing as mp
class ModuleRecursion(object):
"""Class to handle recursion.
Simple class to handle tracking and storing prior
sub-domains discovred.
"""
def __init__(self):
"""class init.
"""
self.recursion_queue = mp.Queue()
def add_subdomain(self, domain):
"""add subdomain to Q.
uses a non-blocking call to add to the Q
to prevent any errors with size.
Arguments:
domain {str} -- subdomain to add to Q
"""
self.recursion_queue.put(domain)
def get_subdomain_list(self, valid_only=True):
"""build subdomain list.
Using the JSON from the event consumer, we
can easily build a unique list of
subdomains for module use.
Keyword Arguments:
valid_only {bool} -- filter only valid subdomains (default: {True})
Returns:
list -- list of raw subdomains
"""
data = []
refill = []
while True:
try:
x = self.recursion_queue.get_nowait()
if valid_only and x.valid:
data.append(x.subdomain)
if not valid_only:
data.append(x.subdomain)
except Exception as e:
print(e)
break
return set(data)
| 21.403846
| 70
| 0.674753
| 1,082
| 0.972147
| 0
| 0
| 0
| 0
| 0
| 0
| 587
| 0.527403
|
451ff3d3aabbbe325d6f684b5fc8911f70524e81
| 1,691
|
py
|
Python
|
python/tests/spatial_operator/test_polygon_range.py
|
Maxar-Corp/GeoSpark
|
6248c6773dc88bf3354ea9b223f16ceb064e7627
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-10-19T07:57:29.000Z
|
2021-10-19T07:57:29.000Z
|
python/tests/spatial_operator/test_polygon_range.py
|
mayankkt9/GeoSpark
|
618da90413f7d86c59def92ba765fbd6d9d49761
|
[
"Apache-2.0",
"MIT"
] | 3
|
2020-03-24T18:20:35.000Z
|
2021-02-02T22:36:37.000Z
|
python/tests/spatial_operator/test_polygon_range.py
|
mayankkt9/GeoSpark
|
618da90413f7d86c59def92ba765fbd6d9d49761
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-26T15:51:22.000Z
|
2021-09-26T15:51:22.000Z
|
import os
from pyspark import StorageLevel
from geospark.core.SpatialRDD import PolygonRDD
from geospark.core.enums import IndexType, FileDataSplitter
from geospark.core.geom.envelope import Envelope
from geospark.core.spatialOperator import RangeQuery
from tests.test_base import TestBase
from tests.tools import tests_path
input_location = os.path.join(tests_path, "resources/primaryroads-polygon.csv")
splitter = FileDataSplitter.CSV
gridType = "rtree"
indexType = "rtree"
class TestPolygonRange(TestBase):
loop_times = 5
query_envelope = Envelope(-85.01, -60.01, 34.01, 50.01)
def test_spatial_range_query(self):
spatial_rdd = PolygonRDD(
self.sc, input_location, splitter, True, StorageLevel.MEMORY_ONLY
)
for i in range(self.loop_times):
result_size = RangeQuery.\
SpatialRangeQuery(spatial_rdd, self.query_envelope, False, False).count()
assert result_size == 704
assert RangeQuery.SpatialRangeQuery(
spatial_rdd, self.query_envelope, False, False).take(10)[0].getUserData() is not None
def test_spatial_range_query_using_index(self):
spatial_rdd = PolygonRDD(
self.sc, input_location, splitter, True, StorageLevel.MEMORY_ONLY
)
spatial_rdd.buildIndex(IndexType.RTREE, False)
for i in range(self.loop_times):
result_size = RangeQuery.\
SpatialRangeQuery(spatial_rdd, self.query_envelope, False, False).count()
assert result_size == 704
assert RangeQuery.SpatialRangeQuery(
spatial_rdd, self.query_envelope, False, False).take(10)[0].getUserData() is not None
| 36.76087
| 97
| 0.707865
| 1,209
| 0.714962
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.029568
|
45202479629fa2ae422e3a2c76ead8cf08a4c08c
| 2,004
|
py
|
Python
|
river/compose/renamer.py
|
online-ml/creme
|
60872844e6052b5ef20e4075aea30f9031377136
|
[
"BSD-3-Clause"
] | 1,105
|
2019-01-24T15:15:30.000Z
|
2020-11-10T18:27:00.000Z
|
river/compose/renamer.py
|
online-ml/creme
|
60872844e6052b5ef20e4075aea30f9031377136
|
[
"BSD-3-Clause"
] | 328
|
2019-01-25T13:48:43.000Z
|
2020-11-11T11:41:44.000Z
|
river/compose/renamer.py
|
online-ml/creme
|
60872844e6052b5ef20e4075aea30f9031377136
|
[
"BSD-3-Clause"
] | 150
|
2019-01-29T19:05:21.000Z
|
2020-11-11T11:50:14.000Z
|
from typing import Dict
from river import base
__all__ = ["Renamer", "Prefixer", "Suffixer"]
class Renamer(base.Transformer):
"""Renames features following substitution rules.
Parameters
----------
mapping
Dictionnary describing substitution rules. Keys in `mapping` that are not a feature's name are silently ignored.
Examples
--------
>>> from river import compose
>>> mapping = {'a': 'v', 'c': 'o'}
>>> x = {'a': 42, 'b': 12}
>>> compose.Renamer(mapping).transform_one(x)
{'b': 12, 'v': 42}
"""
def __init__(self, mapping: Dict[str, str]):
self.mapping = mapping
def transform_one(self, x):
for old_key, new_key in self.mapping.items():
try:
x[new_key] = x.pop(old_key)
except KeyError:
pass # Ignoring keys that are not a feature's name
return x
class Prefixer(base.Transformer):
"""Prepends a prefix on features names.
Parameters
----------
prefix
Examples
--------
>>> from river import compose
>>> x = {'a': 42, 'b': 12}
>>> compose.Prefixer('prefix_').transform_one(x)
{'prefix_a': 42, 'prefix_b': 12}
"""
def __init__(self, prefix: str):
self.prefix = prefix
def _rename(self, s: str) -> str:
return f"{self.prefix}{s}"
def transform_one(self, x):
return {self._rename(i): xi for i, xi in x.items()}
class Suffixer(base.Transformer):
"""Appends a suffix on features names.
Parameters
----------
suffix
Examples
--------
>>> from river import compose
>>> x = {'a': 42, 'b': 12}
>>> compose.Suffixer('_suffix').transform_one(x)
{'a_suffix': 42, 'b_suffix': 12}
"""
def __init__(self, suffix: str):
self.suffix = suffix
def _rename(self, s: str) -> str:
return f"{s}{self.suffix}"
def transform_one(self, x):
return {self._rename(i): xi for i, xi in x.items()}
| 21.094737
| 120
| 0.560878
| 1,900
| 0.948104
| 0
| 0
| 0
| 0
| 0
| 0
| 1,087
| 0.542415
|
452450d09a4bf187252d74d278741b2191dfc928
| 4,660
|
py
|
Python
|
open_publishing/catalog/catalog_types.py
|
open-publishing/open-publishing-api
|
0d1646bb2460c6f35cba610a355941d2e07bfefd
|
[
"BSD-3-Clause"
] | null | null | null |
open_publishing/catalog/catalog_types.py
|
open-publishing/open-publishing-api
|
0d1646bb2460c6f35cba610a355941d2e07bfefd
|
[
"BSD-3-Clause"
] | null | null | null |
open_publishing/catalog/catalog_types.py
|
open-publishing/open-publishing-api
|
0d1646bb2460c6f35cba610a355941d2e07bfefd
|
[
"BSD-3-Clause"
] | null | null | null |
from open_publishing.core import FieldGroup
from open_publishing.core import FieldDescriptor
from open_publishing.core.enums import CatalogType, VLBCategory, AcademicCategory
from open_publishing.core import SimpleField
from open_publishing.extendable_enum_field import ExtendableEnumField
from open_publishing.genre import GenresList
from open_publishing.bisac import BisacList
from .thema import ThemaList
from .subject import SubjectField
from .series import SeriesList
from .institution import InstitutionField
class CatalogTypeBase(FieldGroup):
_catalog_type = None
def __init__(self,
document):
super(CatalogTypeBase, self).__init__(document)
self._fields['series'] = SeriesList(document)
self._fields['thema'] = ThemaList(document=document)
series = FieldDescriptor('series')
thema = FieldDescriptor('thema')
@property
def catalog_type(self):
return self._catalog_type
class Academic(CatalogTypeBase):
_catalog_type = CatalogType.academic
def __init__(self,
document):
super(Academic, self).__init__(document)
self._fields['subject'] = SubjectField(document=document)
self._fields['category'] = SimpleField(database_object=document,
aspect='academic.*',
dtype=AcademicCategory,
field_locator='academic.category_id',
nullable=True,
serialized_null=0)
self._fields['publication_year'] = SimpleField(database_object=document,
dtype=str,
nullable=True,
aspect='academic.*',
field_locator='academic.year_of_text')
self._fields['institution'] = InstitutionField(document=document)
subject = FieldDescriptor('subject')
category = FieldDescriptor('category')
publication_year = FieldDescriptor('publication_year')
institution = FieldDescriptor('institution')
class NonAcademic(CatalogTypeBase):
_catalog_type = CatalogType.non_academic
def __init__(self,
document = None):
super(NonAcademic, self).__init__(document)
self._fields['publication_year'] = NullableIntField(database_object=document,
aspect='non_academic.*',
field_locator='non_academic.publication_year')
self._fields['copyright_year'] = NullableIntField(database_object=document,
aspect='non_academic.*',
field_locator='non_academic.copyright_year')
self._fields['vlb_category'] = ExtendableEnumField(database_object=document,
aspect='non_academic.*',
field_locator='non_academic.vlb_kat_id',
dtype=VLBCategory,
nullable=True)
self._fields['genres'] = GenresList(document)
self._fields['bisac'] = BisacList(document=document)
publication_year = FieldDescriptor('publication_year')
copyright_year = FieldDescriptor('copyright_year')
vlb_category = FieldDescriptor('vlb_category')
bisac = FieldDescriptor('bisac')
genres = FieldDescriptor('genres')
class NullableIntField(SimpleField):
def __init__(self,
database_object,
aspect,
field_locator):
super(NullableIntField, self).__init__(database_object,
aspect,
field_locator)
def _parse_value(self,
value):
if value == '':
return None
else :
return int(value)
def _value_validation(self,
value):
if value is None or isinstance(value, int):
return value
else:
raise ValueError('expected int or None, got {0}'.format(value))
def _serialize_value(self,
value):
return str(value) if value is not None else ''
| 41.981982
| 106
| 0.547425
| 4,108
| 0.881545
| 0
| 0
| 71
| 0.015236
| 0
| 0
| 493
| 0.105794
|
4524547bc1c556606b6ef59589378a69ffa68a6d
| 2,263
|
py
|
Python
|
app/request.py
|
aenshtyn/News-Update
|
2a09099cd6468d00e2e1972072a88db3e4b7cb78
|
[
"MIT"
] | null | null | null |
app/request.py
|
aenshtyn/News-Update
|
2a09099cd6468d00e2e1972072a88db3e4b7cb78
|
[
"MIT"
] | null | null | null |
app/request.py
|
aenshtyn/News-Update
|
2a09099cd6468d00e2e1972072a88db3e4b7cb78
|
[
"MIT"
] | null | null | null |
import urllib.request,json
from .models import Source,Article
# Getting Api Key
api_key = None
#Getting the base urls
source_base_url = None
article_base_url = None
def configure_request(app):
global api_key,source_base_url,article_base_url
api_key = app.config['SOURCE_API_KEY']
source_base_url = app.config['SOURCE_BASE_URL']
article_base_url = app.config['ARTICLE_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = source_base_url.format(category,api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
source_results = None
if get_sources_response['sources']:
source_results_list = get_sources_response['sources']
source_results = process_results(source_results_list)
return source_results
def process_results(source_list):
'''
Function that processes the source result and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
if url:
source_object = Source(id,name,description,url,category,language,country)
source_results.append(source_object)
return source_results
def get_articles(id):
get_article_url = article_base_url.format(id,api_key)
with urllib.request.urlopen(get_article_url) as url:
get_article_data = url.read()
get_article_response = json.loads(get_article_data)
source_object = None
if get_article_response['articles']:
article_results_list = get_article_response['articles']
article_results = process_results(article_results_list)
return article_results
| 30.173333
| 86
| 0.699956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 501
| 0.221388
|
4524add159eab216540f8144d587795ca3f57c91
| 5,027
|
py
|
Python
|
corai_util/finance/src/param_iv.py
|
Code-Cornelius/python_libraries
|
71c388da60e2aeb94369c3813faca93bf6a18ebf
|
[
"MIT"
] | 1
|
2022-01-01T22:10:04.000Z
|
2022-01-01T22:10:04.000Z
|
corai_util/finance/src/param_iv.py
|
Code-Cornelius/python_libraries
|
71c388da60e2aeb94369c3813faca93bf6a18ebf
|
[
"MIT"
] | null | null | null |
corai_util/finance/src/param_iv.py
|
Code-Cornelius/python_libraries
|
71c388da60e2aeb94369c3813faca93bf6a18ebf
|
[
"MIT"
] | null | null | null |
# normal libraries
import math
import numpy as np
# priv_libraries
from corai_util.finance.src.financials import compute_price, compute_integral
from corai_util.finance.src.implied_vol import implied_volatility_newton, total_implied_vol_newton
phi_heston = lambda xx: (1 - (1 - np.exp(-xx)) / xx) / xx
phi_heston_lambda = lambda xx, lamb: phi_heston(xx * lamb)
phi_heston_curry = lambda lamb: lambda xx: phi_heston_lambda(xx, lamb)
phi_power_law = lambda eta, gamma: lambda theta: eta * theta ** (- gamma)
# section ######################################################################
# #############################################################################
# parametrisation
def g_jacquier(kk, w, w_dash, w_dash_dash, parametrisation):
# k vectorised
temp_w = w(parametrisation, kk)
temp_w_dash = w_dash(parametrisation, kk)
temp_w_dash_dash = w_dash_dash(parametrisation, kk)
temp = (1 - kk * temp_w_dash / (2 * temp_w))
return (temp * temp
- temp_w_dash * temp_w_dash / 4 * (1 / temp_w + 0.25)
+ temp_w_dash_dash / 2)
def SVIparam_natural2raw(parametrisation):
delta, mu, rho, omega, zeta = parametrisation
a = delta + omega / 2 * (1 - rho * rho)
b = omega * zeta / 2
m = mu - rho / zeta
sigma = math.sqrt(1 - rho * rho) / zeta
return a, b, rho, m, sigma
def total_implied_vol2density_litzenberg(kk, w, w_dash, w_dash_dash, parametrisation):
# time through parametrisation
temp_w = w(parametrisation, kk)
g_value = g_jacquier(kk, w, w_dash, w_dash_dash, parametrisation)
temp2 = np.sqrt(2 * math.pi * temp_w)
temp_d_ = -kk / np.sqrt(temp_w) - np.sqrt(temp_w) / 2
temp3 = np.exp(- temp_d_ * temp_d_ / 2)
return g_value * temp3 / temp2
def total_implied_vol_ssvi(parametrisation, kk):
# kk vectorised
a, b, rho, m, sigma = parametrisation
assert b >= 0
assert abs(rho) <= 1
assert sigma > 0
under_the_root = (kk - m) * (kk - m) + sigma * sigma
return a + b * (rho * (kk - m) + np.sqrt(under_the_root))
def total_implied_vol_ssvi_dash(parametrisation, kk):
# kk vectorised
a, b, rho, m, sigma = parametrisation
assert b >= 0
assert abs(rho) <= 1
assert sigma > 0
under_the_root = (kk - m) * (kk - m) + sigma * sigma
return (b * rho + b * (kk - m) / np.sqrt(under_the_root))
def total_implied_vol_ssvi_dash_dash(parametrisation, kk):
# kk vectorised
a, b, rho, m, sigma = parametrisation
assert b >= 0
assert abs(rho) <= 1
assert sigma > 0
under_the_root = (kk - m) * (kk - m) + sigma * sigma
return b * sigma * sigma * np.power(under_the_root, -3 / 2)
# section ######################################################################
# #############################################################################
# SSVI
def compute_total_implied_vol_SSVI(KK, theta, rho, phi, S0, log_price=True):
# computes for all [rhos,theta,phi(theta)] * K the SSVI
# K length nb of strikes
# UU theta and rho same length
# all numpy arrays
if log_price:
k = KK
else:
k = np.log(KK / S0)
phi = phi(theta)
k = np.repeat(k[None, :], len(theta), axis=0)
theta = np.repeat(theta[:, None], len(KK), axis=1)
rho = np.repeat(rho[:, None], len(KK), axis=1)
phi = np.repeat(phi[:, None], len(KK), axis=1)
expression_in_root = (phi * k + rho)
return theta / 2 * (1 + rho * phi * k
+ np.sqrt(expression_in_root * expression_in_root + 1 - rho * rho)
)
def natural_SVIparam2density(xx_for_density, parameters):
# """ takes natural SVI parameters. """
"""
Semantics:
From
Args:
xx_for_density:
parameters:
Returns:
"""
w = total_implied_vol_ssvi
w_dash = total_implied_vol_ssvi_dash
w_dash_dash = total_implied_vol_ssvi_dash_dash
return total_implied_vol2density_litzenberg(xx_for_density, w, w_dash, w_dash_dash, parameters)
def natural_SVIparameters2price(log_asset_for_density, parameters, log_moneyness):
""" takes natural SVI parameters."""
values_density_of_SVI = natural_SVIparam2density(log_asset_for_density, parameters) * np.exp(-log_asset_for_density)
asset_for_density = np.exp(log_asset_for_density) # density of S_T
s0 = compute_integral(asset_for_density, values_density_of_SVI)
c_k = compute_price(asset_for_density, np.exp(log_moneyness), values_density_of_SVI)
return values_density_of_SVI, c_k, s0
def natural_SVIparameters2TIV(val_density, parameters, log_moneyness):
""" takes natural SVI parameters."""
values_density_of_SVI, c_k, s0 = natural_SVIparameters2price(val_density, parameters, log_moneyness)
sigma = implied_volatility_newton(True, s0, np.exp(log_moneyness), 1, 0, 0, c_k)
total_implied_vol = 1 * sigma * sigma
total_implied_vol = total_implied_vol_newton(True, s0, np.exp(log_moneyness), 0, 0, c_k)
return values_density_of_SVI, c_k, s0, total_implied_vol
| 33.966216
| 120
| 0.63139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 831
| 0.165307
|
45266515995c4fa2eef2c47f14074dcb92d42fdb
| 687
|
py
|
Python
|
cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
|
angelusualle/algorithms
|
86286a49db2a755bc57330cb455bcbd8241ea6be
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from get_all_permutations_of_string import get_all_permutations_of_string, get_all_permutations_of_string_with_dups
class Test_Case_Get_All_Permutations_Of_String(unittest.TestCase):
def test_get_all_permutations_of_string(self):
self.assertListEqual(get_all_permutations_of_string("tea"), ['tea', 'eta', 'ate', 'tae', 'eat', 'aet'])
def test_get_all_permutations_of_string_with_dups(self):
self.assertListEqual(get_all_permutations_of_string_with_dups("aaa"), ['aaa'])
self.assertListEqual(get_all_permutations_of_string_with_dups("teat"), ['ttea', 'ttae', 'teta', 'teat', 'tate', 'taet', 'etta', 'etat', 'eatt', 'atte', 'atet', 'aett'])
| 76.333333
| 176
| 0.764192
| 554
| 0.806405
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.179039
|
4526f09b63533011d0dbd7fc3b49ed217cae0f86
| 8,171
|
py
|
Python
|
third-party/webscalesqlclient/mysql-5.6/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/xb_partial_test.py
|
hkirsman/hhvm_centos7_builds
|
2a1fd6de0d2d289c1575f43f10018f3bec23bb13
|
[
"PHP-3.01",
"Zend-2.0"
] | 2
|
2018-03-07T08:31:29.000Z
|
2019-02-01T10:10:48.000Z
|
third-party/webscalesqlclient/mysql-5.6/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/xb_partial_test.py
|
hkirsman/hhvm_centos7_builds
|
2a1fd6de0d2d289c1575f43f10018f3bec23bb13
|
[
"PHP-3.01",
"Zend-2.0"
] | 1
|
2021-02-23T14:52:22.000Z
|
2021-02-23T14:52:22.000Z
|
xtrabackup_main/xb_partial_test.py
|
isabella232/kewpie
|
47d67124fa755719eda3ca5a621a2abf0322d3f9
|
[
"Apache-2.0"
] | 1
|
2020-11-13T10:17:28.000Z
|
2020-11-13T10:17:28.000Z
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [['--innodb-file-per-table']]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, 'full_backup')
inc_backup_path = os.path.join(master_server.vardir, 'inc_backup')
# remove backup paths
for del_path in [backup_path, inc_backup_path]:
if os.path.exists(del_path):
shutil.rmtree(del_path)
def load_table(self, table_name, row_count, server):
queries = []
for i in range(row_count):
queries.append("INSERT INTO %s VALUES (%d, %d)" %(table_name,i, row_count))
retcode, result = self.execute_queries(queries, server)
self.assertEqual(retcode, 0, msg=result)
def test_xb_partial(self):
self.servers = servers
logging = test_executor.logging
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, 'full_backup')
inc_backup_path = os.path.join(master_server.vardir, 'inc_backup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
table_name = "`test`"
# populate our server with a test bed
queries = ["DROP TABLE IF EXISTS %s" %(table_name)
,("CREATE TABLE %s "
"(`a` int(11) DEFAULT NULL, "
"`number` int(11) DEFAULT NULL) "
" ENGINE=InnoDB DEFAULT CHARSET=latin1 "
%(table_name)
)
]
retcode, result = self.execute_queries(queries, master_server)
self.assertEqual(retcode, 0, msg = result)
row_count = 100
self.load_table(table_name, row_count, master_server)
# Additional tables via randgen
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
#self.assertEqual(retcode, 0, msg=output)
# take a backup
cmd = [ xtrabackup
, "--defaults-file=%s" %master_server.cnf_file
, "--datadir=%s" %master_server.datadir
, "--backup"
, '--tables="^test[.]test|DD"'
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# Get a checksum for our `test` table
query = "CHECKSUM TABLE %s" %table_name
retcode, orig_checksum1 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Original checksum1: %s" %orig_checksum1)
# Get a checksum for our `DD` table
query = "CHECKSUM TABLE DD"
retcode, orig_checksum2 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Original checksum2: %s" %orig_checksum2)
# Clear our table so we know the backup restored
for del_table in [table_name,'DD']:
query = "DELETE FROM %s" %del_table
retcode, result = self.execute_query(query,master_server)
self.assertEqual(retcode, 0, result)
# Remove old tables
for table in ['A','AA','B','BB','C','CC','D']:
query = "DROP TABLE %s" %table
retcode, result = self.execute_query(query,master_server)
self.assertEqual(retcode,0,result)
# shutdown our server
master_server.stop()
# do final prepare on main backup
cmd = [ xtrabackup
, "--prepare"
, "--datadir=%s" %master_server.datadir
, "--use-memory=500M"
, "--target-dir=%s" %backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# copy our data files back
for root, dirs, files in os.walk(backup_path):
if files:
file_info = root.split(backup_path)[1]
for file_name in files:
# We do a quick check to make sure
# no names start with '/' as os.path
# throws a hissy when it sees such things
if file_info.startswith('/'):
file_info = file_info[1:]
if file_name.startswith('/'):
file_name = file_name[1:]
to_path = os.path.join(master_server.datadir
, file_info
, file_name)
new_dir = os.path.dirname(to_path)
try:
if not os.path.exists(new_dir):
os.makedirs(new_dir)
except OSError, e:
logging.error("Could not create directory: %s | %s" %(new_dir, e))
try:
shutil.copy(os.path.join(root,file_name),to_path)
except IOError, e:
logging.error( "ERROR: Could not copy file: %s | %s" %(file_name, e))
# restart server (and ensure it doesn't crash)
master_server.start()
self.assertTrue(master_server.status==1, 'Server failed restart from restored datadir...')
# Get a checksum for our test table
query = "CHECKSUM TABLE %s" %table_name
retcode, restored_checksum1 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Restored checksum1: %s" %restored_checksum1)
self.assertEqual(orig_checksum1, restored_checksum1, msg = "Orig: %s | Restored: %s" %(orig_checksum1, restored_checksum1))
# Get a checksum for our DD table
query = "CHECKSUM TABLE DD"
retcode, restored_checksum2 = self.execute_query(query, master_server)
self.assertEqual(retcode, 0, msg=result)
logging.test_debug("Restored checksum1: %s" %restored_checksum2)
self.assertEqual(orig_checksum2, restored_checksum2, msg = "Orig: %s | Restored: %s" %(orig_checksum2, restored_checksum2))
| 45.648045
| 135
| 0.571778
| 6,993
| 0.855832
| 0
| 0
| 0
| 0
| 0
| 0
| 2,491
| 0.304859
|
45271fd81da1faed37c0972df122fea04a51747b
| 104
|
py
|
Python
|
filter.py
|
Gerrydh/MPP-Recursion
|
de81bb0dcd50f7f66971db9000e6262767168b8f
|
[
"Apache-2.0"
] | null | null | null |
filter.py
|
Gerrydh/MPP-Recursion
|
de81bb0dcd50f7f66971db9000e6262767168b8f
|
[
"Apache-2.0"
] | null | null | null |
filter.py
|
Gerrydh/MPP-Recursion
|
de81bb0dcd50f7f66971db9000e6262767168b8f
|
[
"Apache-2.0"
] | null | null | null |
print filter((lambda x: (x%2) ==0 ), [1,2,3,4,5,6])
print filter((lambda x: (x%2) !=0 ), [1,2,3,4,5,6])
| 52
| 52
| 0.519231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
45278aea9c424ae5e3cd32a1bd843d89d29dbea4
| 156
|
py
|
Python
|
project euler/q2.py
|
milkmeat/thomas
|
fbc72af34267488d931a4885d4e19fce22fea582
|
[
"MIT"
] | null | null | null |
project euler/q2.py
|
milkmeat/thomas
|
fbc72af34267488d931a4885d4e19fce22fea582
|
[
"MIT"
] | null | null | null |
project euler/q2.py
|
milkmeat/thomas
|
fbc72af34267488d931a4885d4e19fce22fea582
|
[
"MIT"
] | null | null | null |
l=[0]*100
l[0]=1
l[1]=2
for x in range (2,100):
l[x]=l[x-1]+l[x-2]
#print l
f=0
for c in l:
if c%2==0 and c<4000000:
f=f+c
print f
| 14.181818
| 29
| 0.474359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.057692
|
452797680ac9c44f15c014b0a008440ac1ea29cb
| 12,809
|
py
|
Python
|
recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py
|
mj-kh/speechbrain
|
9351f61cc057ddf3f8a0b7074a9c3c857dec84ed
|
[
"Apache-2.0"
] | 3,913
|
2021-03-14T13:54:52.000Z
|
2022-03-30T05:09:55.000Z
|
recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py
|
mj-kh/speechbrain
|
9351f61cc057ddf3f8a0b7074a9c3c857dec84ed
|
[
"Apache-2.0"
] | 667
|
2021-03-14T20:11:17.000Z
|
2022-03-31T04:07:17.000Z
|
recipes/LibriSpeech/ASR/CTC/train_with_wav2vec.py
|
mj-kh/speechbrain
|
9351f61cc057ddf3f8a0b7074a9c3c857dec84ed
|
[
"Apache-2.0"
] | 785
|
2021-03-14T13:20:57.000Z
|
2022-03-31T03:26:03.000Z
|
#!/usr/bin/env/python3
"""Recipe for training a wav2vec-based ctc ASR system with librispeech.
The system employs wav2vec as its encoder. Decoding is performed with
ctc greedy decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml
The neural network is trained on CTC likelihood target and character units
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
Authors
* Sung-Lin Yeh 2021
* Titouan Parcollet 2021
* Ju-Chieh Chou 2020
* Mirco Ravanelli 2020
* Abdel Heba 2020
* Peter Plantinga 2020
* Samuele Cornell 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs)
x = self.modules.enc(feats)
# Compute outputs
p_tokens = None
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
if stage != sb.Stage.TRAIN:
p_tokens = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
return p_ctc, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
p_ctc, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = loss_ctc
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
"".join(self.tokenizer.decode_ndim(utt_seq)).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec_optimizer.step()
self.model_optimizer.step()
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "char_list", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
char_list = list(wrd)
yield char_list
tokens_list = label_encoder.encode_sequence(char_list)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"bos_label": hparams["bos_index"],
"eos_label": hparams["eos_index"],
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "wrd", "char_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, label_encoder = dataio_prepare(
hparams
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = label_encoder
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| 35.879552
| 89
| 0.640331
| 6,040
| 0.471543
| 3,584
| 0.279803
| 796
| 0.062144
| 0
| 0
| 3,243
| 0.253181
|
45284a1d25fe21c81004bcc320ecfac7a3fe05f4
| 907
|
py
|
Python
|
src/dask_awkward/tests/test_utils.py
|
douglasdavis/dask-awkward
|
e8829d32ed080d643c7e4242036ce64aee60eda6
|
[
"BSD-3-Clause"
] | 21
|
2021-09-09T19:32:30.000Z
|
2022-03-01T15:42:06.000Z
|
src/dask_awkward/tests/test_utils.py
|
douglasdavis/dask-awkward
|
e8829d32ed080d643c7e4242036ce64aee60eda6
|
[
"BSD-3-Clause"
] | 14
|
2021-09-23T16:54:10.000Z
|
2022-03-23T19:24:53.000Z
|
src/dask_awkward/tests/test_utils.py
|
douglasdavis/dask-awkward
|
e8829d32ed080d643c7e4242036ce64aee60eda6
|
[
"BSD-3-Clause"
] | 3
|
2021-09-09T19:32:32.000Z
|
2021-11-18T17:27:35.000Z
|
from __future__ import annotations
from ..utils import normalize_single_outer_inner_index
def test_normalize_single_outer_inner_index() -> None:
divisions = (0, 12, 14, 20, 23, 24)
indices = [0, 1, 2, 8, 12, 13, 14, 15, 17, 20, 21, 22]
results = [
(0, 0),
(0, 1),
(0, 2),
(0, 8),
(1, 0),
(1, 1),
(2, 0),
(2, 1),
(2, 3),
(3, 0),
(3, 1),
(3, 2),
]
for i, r in zip(indices, results):
res = normalize_single_outer_inner_index(divisions, i)
assert r == res
divisions = (0, 12) # type: ignore
indices = [0, 2, 3, 6, 8, 11]
results = [
(0, 0),
(0, 2),
(0, 3),
(0, 6),
(0, 8),
(0, 11),
]
for i, r in zip(indices, results):
res = normalize_single_outer_inner_index(divisions, i)
assert r == res
| 22.675
| 62
| 0.46527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.015436
|
45288cac480034ff3c670253791c5dd9e04dcb61
| 16,413
|
py
|
Python
|
core/client_socket.py
|
schalekamp/ibapipy
|
a9e02d604d9f4a2ad87e78089654b29305aa110d
|
[
"Apache-2.0"
] | 1
|
2020-08-13T05:45:48.000Z
|
2020-08-13T05:45:48.000Z
|
core/client_socket.py
|
schalekamp/ibapipy
|
a9e02d604d9f4a2ad87e78089654b29305aa110d
|
[
"Apache-2.0"
] | null | null | null |
core/client_socket.py
|
schalekamp/ibapipy
|
a9e02d604d9f4a2ad87e78089654b29305aa110d
|
[
"Apache-2.0"
] | null | null | null |
"""Implements the EClientSocket interface for the Interactive Brokers API."""
import threading
import ibapipy.config as config
from ibapipy.core.network_handler import NetworkHandler
class ClientSocket:
"""Provides methods for sending requests to TWS."""
def __init__(self):
"""Initialize a new instance of a ClientSocket."""
self.__listener_thread__ = None
self.__network_handler__ = NetworkHandler()
self.server_version = 0
self.tws_connection_time = ''
self.is_connected = False
def __send__(self, *args):
"""Hand off each element in args to the NetworkHandler for sending over
the network.
*args -- items to send
"""
for item in args:
self.__network_handler__.socket_out_queue.put(item, block=False)
def account_download_end(self, account_name):
pass
def cancel_calculate_implied_volatility(self, req_id):
raise NotImplementedError()
def calculate_option_price(self, req_id, contract, volatility,
under_price):
raise NotImplementedError()
def calculate_implied_volatility(self, req_id, contract, price,
under_price):
raise NotImplementedError()
def commission_report(self, report):
pass
def contract_details(self, req_id, contract):
pass
def contract_details_end(self, req_id):
pass
def cancel_calculate_option_price(self, req_id):
raise NotImplementedError()
def cancel_fundamental_data(self, req_id):
raise NotImplementedError()
def cancel_historical_data(self, req_id):
version = 1
self.__send__(config.CANCEL_HISTORICAL_DATA, version, req_id)
def cancel_mkt_data(self, req_id):
version = 1
self.__send__(config.CANCEL_MKT_DATA, version, req_id)
def cancel_mkt_depth(self, req_id):
raise NotImplementedError()
def cancel_news_bulletins(self):
raise NotImplementedError()
def cancel_order(self, req_id):
version = 1
self.__send__(config.CANCEL_ORDER, version, req_id)
def cancel_real_time_bars(self, req_id):
raise NotImplementedError()
def cancel_scanner_subscription(self, req_id):
raise NotImplementedError()
def connect(self, host=config.HOST, port=config.PORT,
client_id=config.CLIENT_ID):
"""Connect to the remote TWS.
Keyword arguments:
host -- host name or IP address of the TWS machine
port -- port number on the TWS machine
client_id -- number used to identify this client connection
"""
if self.is_connected:
return
# Connect
results = self.__network_handler__.connect(host, port, client_id)
self.server_version, self.tws_connection_time = results
self.is_connected = True
# Listen for incoming messages
self.__listener_thread__ = threading.Thread(
target=listen, args=(self, self.__network_handler__.message_queue))
self.__listener_thread__.start()
def disconnect(self):
"""Disconnect from the remote TWS."""
self.__network_handler__.disconnect()
self.is_connected = False
self.server_version = 0
self.tws_connection_time = ''
def error(self, req_id, code, message):
pass
def exercise_options(self, req_id, contract, action, quantity, account,
override):
raise NotImplementedError()
def exec_details(self, req_id, contract, execution):
pass
def exec_details_end(self, req_id):
pass
def historical_data(self, req_id, date, open, high, low, close, volume,
bar_count, wap, has_gaps):
pass
def managed_accounts(self, accounts):
pass
def next_valid_id(self, req_id):
pass
def open_order(self, req_id, contract, order):
pass
def open_order_end(self):
pass
def order_status(self, req_id, status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id, why_held):
pass
def place_order(self, req_id, contract, order):
version = 35
# Intro and request ID
self.__send__(config.PLACE_ORDER, version, req_id)
# Contract fields
self.__send__(contract.con_id, contract.symbol, contract.sec_type,
contract.expiry, contract.strike, contract.right,
contract.multiplier, contract.exchange,
contract.primary_exch, contract.currency,
contract.local_symbol, contract.sec_id_type,
contract.sec_id)
# Main order fields
self.__send__(order.action, order.total_quantity, order.order_type,
order.lmt_price, order.aux_price)
# Extended order fields
self.__send__(order.tif, order.oca_group, order.account,
order.open_close, order.origin, order.order_ref,
order.transmit, order.parent_id, order.block_order,
order.sweep_to_fill, order.display_size,
order.trigger_method, order.outside_rth, order.hidden)
# Send combo legs for bag requests
if config.BAG_SEC_TYPE == contract.sec_type.upper():
raise NotImplementedError('Bag type not supported yet.')
self.__send__('') # deprecated shares_allocation field
# Everything else (broken into quasi-readble chunks)
self.__send__(order.discretionary_amt, order.good_after_time,
order.good_till_date, order.fa_group, order.fa_method,
order.fa_percentage, order.fa_profile,
order.short_sale_slot, order.designated_location)
self.__send__(order.exempt_code, order.oca_type, order.rule_80a,
order.settling_firm, order.all_or_none,
check(order.min_qty), check(order.percent_offset),
order.etrade_only, order.firm_quote_only,
check(order.nbbo_price_cap))
self.__send__(check(order.auction_strategy),
check(order.starting_price),
check(order.stock_ref_price), check(order.delta),
check(order.stock_range_lower),
check(order.stock_range_upper),
order.override_percentage_constraints,
check(order.volatility), check(order.volatility_type),
order.delta_neutral_order_type,
check(order.delta_neutral_aux_price))
if len(order.delta_neutral_order_type) > 0:
self.__send__(order.delta_neutral_con_id,
order.delta_neutral_settling_firm,
order.delta_neutral_clearing_account,
order.delta_neutral_clearing_intent)
self.__send__(order.continuous_update,
check(order.reference_price_type),
check(order.trail_stop_price),
check(order.scale_init_level_size),
check(order.scale_subs_level_size),
check(order.scale_price_increment), order.hedge_type)
if len(order.hedge_type) > 0:
self.__send__(order.hedge_param)
self.__send__(order.opt_out_smart_routing, order.clearing_account,
order.clearing_intent, order.not_held)
if contract.under_comp is not None:
raise NotImplementedError('Under comp not supported yet.')
else:
self.__send__(False)
self.__send__(order.algo_strategy)
if len(order.algo_strategy) > 0:
raise NotImplementedError('Algo strategy not supported yet.')
self.__send__(order.what_if)
def replace_fa(self, fa_data_type, xml):
raise NotImplementedError()
def req_account_updates(self, subscribe, acct_code):
version = 2
self.__send__(config.REQ_ACCOUNT_DATA, version, subscribe, acct_code)
def req_all_open_orders(self):
version = 1
self.__send__(config.REQ_ALL_OPEN_ORDERS, version)
def req_auto_open_orders(self, auto_bind):
version = 1
self.__send__(config.REQ_AUTO_OPEN_ORDERS, version, auto_bind)
def req_contract_details(self, req_id, contract):
version = 6
# Contract data message
self.__send__(config.REQ_CONTRACT_DATA, version, req_id)
# Contract fields
self.__send__(contract.con_id, contract.symbol, contract.sec_type,
contract.expiry, contract.strike, contract.right,
contract.multiplier, contract.exchange,
contract.currency, contract.local_symbol,
contract.include_expired, contract.sec_id_type,
contract.sec_id)
def req_current_time(self):
"""Returns the current system time on the server side via the
current_time() wrapper method.
"""
version = 1
self.__send__(config.REQ_CURRENT_TIME, version)
def req_executions(self, req_id, exec_filter):
version = 3
# Execution message
self.__send__(config.REQ_EXECUTIONS, version, req_id)
# Execution report filter
self.__send__(exec_filter.client_id, exec_filter.acct_code,
exec_filter.time, exec_filter.symbol,
exec_filter.sec_type, exec_filter.exchange,
exec_filter.side)
def req_fundamental_data(self, req_id, contract, report_type):
raise NotImplementedError()
def req_historical_data(self, req_id, contract, end_date_time,
duration_str, bar_size_setting, what_to_show,
use_rth, format_date):
version = 4
self.__send__(config.REQ_HISTORICAL_DATA, version, req_id)
# Contract fields
self.__send__(contract.symbol, contract.sec_type, contract.expiry,
contract.strike, contract.right, contract.multiplier,
contract.exchange, contract.primary_exch,
contract.currency, contract.local_symbol,
contract.include_expired)
# Other stuff
self.__send__(end_date_time, bar_size_setting, duration_str, use_rth,
what_to_show, format_date)
# Combo legs for bag requests
if config.BAG_SEC_TYPE == contract.sec_type.upper():
raise NotImplementedError('Bag type not supported yet.')
def req_ids(self, num_ids):
version = 1
self.__send__(config.REQ_IDS, version, num_ids)
def req_managed_accts(self):
version = 1
self.__send__(config.REQ_MANAGED_ACCTS, version)
def req_market_data_type(self, type):
raise NotImplementedError()
def req_mkt_data(self, req_id, contract, generic_ticklist='',
snapshot=False):
"""Return market data via the tick_price(), tick_size(),
tick_option_computation(), tick_generic(), tick_string() and
tick_EFP() wrapper methods.
Keyword arguments:
req_id -- unique request ID
contract -- ibapi.contract.Contract object
generic_ticklist -- comma delimited list of generic tick types
(default: '')
snapshot -- True to return a single snapshot of market data
and have the market data subscription cancel;
False, otherwise (default: False)
"""
version = 9
# Intro and request ID
self.__send__(config.REQ_MKT_DATA, version, req_id)
# Contract fields
self.__send__(contract.con_id, contract.symbol, contract.sec_type,
contract.expiry, contract.strike, contract.right,
contract.multiplier, contract.exchange,
contract.primary_exch, contract.currency,
contract.local_symbol)
if config.BAG_SEC_TYPE == contract.sec_type:
raise NotImplementedError('Bag type not supported yet.')
if contract.under_type is not None:
raise NotImplementedError('Under comp not supported yet.')
else:
self.__send__(False)
# Remaining parameters
self.__send__(generic_ticklist, snapshot)
def req_mkt_depth(self, req_id, contract, num_rows):
raise NotImplementedError()
def req_news_bulletins(self, all_msgs):
raise NotImplementedError()
def req_open_orders(self):
version = 1
self.__send__(config.REQ_OPEN_ORDERS, version)
def req_real_time_bars(self, req_id, contract, bar_size, what_to_show,
use_rth):
raise NotImplementedError()
def req_scanner_parameters(self):
raise NotImplementedError()
def req_scanner_subscription(self, req_id, subscription):
raise NotImplementedError()
def request_fa(self, fa_data_type):
raise NotImplementedError()
def set_server_log_level(self, log_level=2):
"""Set the logging level of the server.
Keyword arguments:
log_level -- level of log entry detail used by the server (TWS)
when processing API requests. Valid values include:
1 = SYSTEM; 2 = ERROR; 3 = WARNING; 4 = INFORMATION;
5 = DETAIL (default: 2)
"""
version = 1
self.__send__(config.SET_SERVER_LOGLEVEL, version, log_level)
def tick_price(self, req_id, tick_type, price, can_auto_execute):
pass
def tick_size(self, req_id, tick_type, size):
pass
def update_account_time(self, timestamp):
pass
def update_account_value(self, key, value, currency, account_name):
pass
def update_portfolio(self, contract, position, market_price, market_value,
average_cost, unrealized_pnl, realized_pnl,
account_name):
pass
def update_unknown(self, *args):
"""Callback for updated known data that does not match any existing
callbacks.
"""
pass
def check(value):
"""Check to see if the specified value is equal to JAVA_INT_MAX or
JAVA_DOUBLE_MAX and return None if such is the case; otherwise return
'value'.
Interactive Brokers will set certain integers and floats to be their
maximum possible value in Java.
This is used as a sentinal value that should be replaced with an EOL when
transmitting. Here, we check the value and, if it is a max, return None
which the codec will interpret as an EOL.
Keyword arguments:
value -- integer or floating-point value to check
"""
if is_java_int_max(value) or is_java_double_max(value):
return None
else:
return value
def is_java_double_max(number):
"""Returns True if the specified number is equal to the maximum value of
a Double in Java; False, otherwise.
Keyword arguments:
number -- number to check
"""
return type(number) == float and number == config.JAVA_DOUBLE_MAX
def is_java_int_max(number):
"""Returns True if the specified number is equal to the maximum value of
an Integer in Java; False, otherwise.
Keyword arguments:
number -- number to check
"""
return type(number) == int and number == config.JAVA_INT_MAX
def listen(client, in_queue):
"""Listen to messages in the specified incoming queue and call the
appropriate methods in the client.
Keyword arguments:
client -- client
in_queue -- incoming message queue
"""
# Loop until we receive a stop message in the incoming queue
while True:
method, parms = in_queue.get()
if method == 'stop':
return
elif method is None:
continue
elif hasattr(client, method):
getattr(client, method)(*parms)
else:
parms = list(parms)
parms.insert(0, method)
getattr(client, 'update_unknown')(*parms)
| 36.718121
| 79
| 0.628709
| 14,305
| 0.871565
| 0
| 0
| 0
| 0
| 0
| 0
| 3,616
| 0.220313
|
4529524b72ee8b6f655a486a5542d22fd69041be
| 2,234
|
py
|
Python
|
common.py
|
shawnau/DataScienceBowl2018
|
3c6f0f26dd86b71aad55fca52314e6432d0b3a82
|
[
"MIT"
] | null | null | null |
common.py
|
shawnau/DataScienceBowl2018
|
3c6f0f26dd86b71aad55fca52314e6432d0b3a82
|
[
"MIT"
] | null | null | null |
common.py
|
shawnau/DataScienceBowl2018
|
3c6f0f26dd86b71aad55fca52314e6432d0b3a82
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
IDENTIFIER = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#numerical libs
import numpy as np
import random
import matplotlib
matplotlib.use('TkAgg')
import cv2
# torch libs
import torch
from torch.utils.data.sampler import *
import torchvision.transforms as transforms
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import *
from torch.nn import init
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from torch.nn.parallel.data_parallel import data_parallel
# std libs
import collections
import copy
import numbers
import math
import inspect
import shutil
from timeit import default_timer as timer
import csv
import pandas as pd
import pickle
import glob
import sys
from distutils.dir_util import copy_tree
import time
import matplotlib.pyplot as plt
import skimage
import skimage.color
import skimage.morphology
from scipy import ndimage
print('@%s: ' % os.path.basename(__file__))
if 1:
SEED = int(time.time())
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
print ('\tset random seed')
print ('\t\tSEED=%d'%SEED)
if 1:
# uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
print('\tset cuda environment')
print('\t\ttorch.__version__ =', torch.__version__)
print('\t\ttorch.version.cuda =', torch.version.cuda)
print('\t\ttorch.backends.cudnn.version() =', torch.backends.cudnn.version())
try:
print('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =',os.environ['CUDA_VISIBLE_DEVICES'])
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
print('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =','None')
NUM_CUDA_DEVICES = 1
print('\t\ttorch.cuda.device_count() =', torch.cuda.device_count())
print('\t\ttorch.cuda.current_device() =', torch.cuda.current_device())
print('')
| 26.282353
| 90
| 0.723814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 530
| 0.237243
|
452af70b9e1700ac30a7f0af42ce0f50e0812342
| 1,460
|
py
|
Python
|
NumberExtractor.py
|
Dikshit15/SolveSudoku
|
7a84e64c9b708c730179f65c8cce8a360ff96d7f
|
[
"MIT"
] | 54
|
2019-01-03T20:05:26.000Z
|
2022-02-22T12:46:47.000Z
|
NumberExtractor.py
|
Dikshit15/SolveSudoku
|
7a84e64c9b708c730179f65c8cce8a360ff96d7f
|
[
"MIT"
] | 1
|
2021-05-18T07:05:28.000Z
|
2021-05-20T04:38:30.000Z
|
NumberExtractor.py
|
Dikshit15/SolveSudoku
|
7a84e64c9b708c730179f65c8cce8a360ff96d7f
|
[
"MIT"
] | 29
|
2019-02-28T13:54:45.000Z
|
2021-12-17T03:22:33.000Z
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras.models import model_from_json
# Load the saved model
json_file = open('models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("models/model.h5")
print("Loaded saved model from disk.")
# evaluate loaded model on test data
def identify_number(image):
image_resize = cv2.resize(image, (28,28)) # For plt.imshow
image_resize_2 = image_resize.reshape(1,1,28,28) # For input to model.predict_classes
# cv2.imshow('number', image_test_1)
loaded_model_pred = loaded_model.predict_classes(image_resize_2 , verbose = 0)
# print('Prediction of loaded_model: {}'.format(loaded_model_pred[0]))
return loaded_model_pred[0]
def extract_number(sudoku):
sudoku = cv2.resize(sudoku, (450,450))
# cv2.imshow('sudoku', sudoku)
# split sudoku
grid = np.zeros([9,9])
for i in range(9):
for j in range(9):
# image = sudoku[i*50+3:(i+1)*50-3,j*50+3:(j+1)*50-3]
image = sudoku[i*50:(i+1)*50,j*50:(j+1)*50]
# filename = "images/sudoku/file_%d_%d.jpg"%(i, j)
# cv2.imwrite(filename, image)
if image.sum() > 25000:
grid[i][j] = identify_number(image)
else:
grid[i][j] = 0
return grid.astype(int)
| 31.73913
| 92
| 0.656164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 534
| 0.365753
|
452b051b1e4ced509f2b30c049b1e85fd074fa94
| 38,889
|
py
|
Python
|
code/Rts.py
|
andreschristen/RTs
|
d3dceb7d2f518222cfaa940b4ecfc9c7f63a25a9
|
[
"CC0-1.0"
] | null | null | null |
code/Rts.py
|
andreschristen/RTs
|
d3dceb7d2f518222cfaa940b4ecfc9c7f63a25a9
|
[
"CC0-1.0"
] | null | null | null |
code/Rts.py
|
andreschristen/RTs
|
d3dceb7d2f518222cfaa940b4ecfc9c7f63a25a9
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:28:54 2020
@author: Dr J A Christen (CIMAT-CONACYT, Mexico) jac at cimat.mx
Instantaneous reproduction numbers calculations.
Rts_P, Implementation of Cori et al (2013)
Rts_AR, new filtering version using an autoregressive linear model of Capistrán, Capella and Christen (2020):
https://arxiv.org/abs/2012.02168, 05DIC2021
01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1.
Go directly to __main__ for examples.
"""
import os
from datetime import date, timedelta
from pickle import load, dump
from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones
from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt
from numpy import sum as np_sum
from scipy.stats import erlang, gamma, nbinom, uniform, beta
from scipy.stats import t as t_student
from matplotlib.pyplot import subplots, rcParams, close
from matplotlib.dates import drange
from pytwalk import pytwalk
from plotfrozen import PlotFrozenDist
def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt as in:
Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
if q == 2: # return a and b of post gamma
rt = zeros(( q, n))
else:
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
for t in range(max(m-n,0), m):
S1 = 0.0
S2 = 0.0
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
S2 += data[(t-k)]
S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k
#print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b))
if simulate:
if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)
rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b)
else:
rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q)
else:
rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b))
return rt
def PlotRts_P( data_fnam, init_date, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_P.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_P(data=data[:,1],\
tau=tau, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
"""
def loglikelihood_NB( x, mu, psi):
mu_psi = mu/psi
return -gammaln(x + 1) + gammaln(x + psi) - gammaln(psi)\
-(x + psi)*log(1 + mu_psi) + x*log(mu_psi)
"""
def loglikelihood_NB( x, mu, psi):
return beta.logcdf(x, mu*psi, (1-mu)*psi)
def Rts_NB( data, n=30, tau=7, psi=10, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt Using a Negative Binomial instead of Poisson.
Here one needs to fix psi = 1/theta (= 10).
Extension of (not documented):
Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
quantiles = zeros(len(q))
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
R = linspace( 0.1, 3.0, num=100)
DeltaR = R[1]-R[0]
#omega = 1
#theta = THETA_MEAN #0.01
#psi = 1/theta
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for t in range(max(m-n,0), m):
#S1 = 0.0
log_likelihood_I = zeros(R.shape) ## Same size of array for values for R
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
Gammak = I @ w[(m-(t-k)):] #\Gamma_k
#S1 += Gammak
I_k = data[(t-k)]
log_likelihood_I += loglikelihood_NB( I_k, R*Gammak, psi)
log_post = log_likelihood_I + gamma.logpdf( R, Rt_pr_a, scale=1/Rt_pr_b)
pdf = exp(log_post)
pdf /= sum(pdf)*DeltaR
cdf = cumsum(pdf)*DeltaR
if simulate:
u = uniform.rvs()
rt[:,t-(m-n)] = R[where(cdf < u)[0][-1]]
else:
for i,qua in enumerate(q):
quantiles[i] = R[where(cdf < qua)[0][-1]]
rt[:,t-(m-n)] = quantiles
return rt
def PlotRts_NB( data_fnam, init_date, psi, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_NB.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_NB(data=data[:,1],\
tau=tau, psi=psi, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color='red' )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_NB_psi:
def __init__( self, data_fnam, init_date, trim=0, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90], workdir="./../"):
"""Calculate Rt Using a Negative Binomial with unknown psi = 1/theta.
Here one needs to run the MCMC first, RunMCMC.
See example below.
Extension of (not documented):
Anne Cori, Neil M. Ferguson, Christophe Fraser, Simon Cauchemez,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
#convolve
self.init_date = init_date
self.m = len(data)
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
self.n = min(self.m, n)
self.tau = tau
self.Rt_pr_a = Rt_pr_a
self.Rt_pr_b = Rt_pr_b
self.prior = gamma( self.Rt_pr_a, scale=1/self.Rt_pr_b)
#omega = 1
self.psi = 100
self.psi_prior = gamma( 3, scale=self.psi/3)
for t in range( self.m - self.n, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.Gammak = zeros(self.m) ##We calculate all gammas previously:
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
if os.path.isfile(workdir + 'output/' + self.data_fnam + '_rts.pkl'): # samples file exists
print("File with rts and psi samples exists, loading rts ...", end=' ')
self.rts = load(open(workdir + 'output/' + self.data_fnam + '_rts.pkl', 'rb'))
self.psi_samples = load(open(workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'rb'))
else:
print("File with rts and psi samples does not exist, run RunMCMC first.")
def logpost( self, Rs, psi):
log_post = 0.0
for t in range( self.m - self.n, self.m):
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)]) +\
np_sum(loglikelihood_NB( self.data[(t-self.tau+1):t], Rs[t-(self.m - self.n)]*tst.Gammak[(t-self.tau+1):t], psi))
#log_post += sum([loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi) for s in range( t-self.tau+1, t)])
"""
for k in range(self.tau):
s = t-k
#I = self.data[:s] ## window of reports
#Gammak = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
#I_k = self.data[s]
log_post += loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi)
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)])
"""
return log_post
def sim_init(self):
"""Simulate initial values from the Rts_NB and the prior for psi."""
# Shake the Rts_NB simulation to avoid repeated values
#shake = Rts_NB( self.data*self.Z, tau=self.tau, n=self.n, IP_dist=self.IP_dist,\
# Rt_pr_a=self.Rt_pr_a, Rt_pr_b=self.Rt_pr_b, q=1) + 0.001*uniform.rvs(size=self.n)
shake = ones(self.n) + 0.001*uniform.rvs(size=self.n)
return append( shake, self.psi_prior.rvs(size=1))
#Simulate intial values from the prior.
#return append(self.prior.rvs(size=self.n),self.psi_prior.rvs(size=1))
def support(self, x):
rt = all( (0.1 <= x[:-1]) * (x[:-1] <= 40) ) #Rt's
rt &= (x[-1] > 0.0)
return rt
def RunMCMC( self, T, burnin=5000, q=[10,25,50,75,90]):
"""Run twalk MCMC, T = number of iterations.
burnin, thining = IAT.
"""
#self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], self.psi), Supp =self.support) #Ignore x[-1] = psi
self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], x[-1]) - self.prior.logpdf(x[-1]), Supp =self.support)
self.twalk.Run( T=T, x0 = self.sim_init(), xp0 = self.sim_init())
self.burnin = burnin
self.Rts(q=q)
dump( self.rts, open(self.workdir + 'output/' + self.data_fnam + '_rts.pkl', 'wb'))
self.psi_samples = self.twalk.Output[self.burnin:, self.n]
dump( self.psi_samples, open(self.workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'wb'))
def PlotPostPsi( self, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.psi_samples, density=True)
ax.set_xlabel(r'$\psi$')
def PlotPostRt( self, i, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
#PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.twalk.Output[self.burnin:,i], density=True)
ax.set_xlabel(r'$R_%d$' % (i))
def Rts( self, q=[10,25,50,75,90]):
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rts = zeros(( len(q), self.n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rts = zeros(( q, self.n))
simulate = True
self.q = q
self.simulate = simulate
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for i in range(self.n):
if simulate:
#u = uniform.rvs()
rts[:,i] = self.twalk.Output[self.burnin+0,i]
else:
rts[:,i] = quantile( self.twalk.Output[self.burnin:,i], q=q)
self.rts = rts
return rts
def PlotRts( self, color='blue', median_color='red', csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam is an optional file name to save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
#self.rts already been produced after running RunMCMC
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(self.n))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(self.n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_AR:
def __init__( self, data_fnam, init_date, trim=0,\
IP_dist=erlang( a=3, scale=8/3), tau=7, m0=0, c_a_0=1, w_a_t=2/7, n0=2, s0=3,\
n=30, pred=0, workdir="./../"):
"""Calculate Rt Using a log autoregressive time series on the logs.
See: ...
See example below.
Parameters:
data_fnam: file name = workdir + 'data/' + data_fnam + '.csv'
or array with case incidence.
init_date: intial date for firt datum, e.g. date(2020, 2, 27).
trim: (negative) cut trim days at the end of data.
tau: number of days to lern form the past (default 7, see paper).
n: calculate n R_t's to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
m0=0, c_a_0=1, w_a_t=0.25, n0=2, s0=3, m_0, c_0^*, w_t^*, n_0 prior
hyperparameters (see paper).
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
self.init_date = init_date
self.m = len(self.data) ##Data size
### Calculate the serial time distribution
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
### Calculation range
self.shift = 5*tau #Number of days to start calculation before the frist Rt.
self.n = min(self.m, n) #Number of Rt's to calculate, from the present into the past.
self.N = n+self.shift #Total range (into the past) for calculation
#If self.N is larger than the whole data set
if self.N > (self.m-1):
self.n -= self.N - (self.m-1)#Reduce self.n accordingly
self.N = n+self.shift
if self.n < 0:
raise ValueError("ERROR: Not enough data to calculate Rts: 5*tau > %d (data size)" % (self.m,))
print("Not enough data to calculate Rts: 5*tau + n > %d (data size)" % (self.m,))
print("Reducing to n=%d" % (self.n,))
for t in range(self.n):
if self.data[self.m-(self.n - t)] >= 10:
break
else:
self.n -= 1 #Reduce n if the counts have not reached 10
print("Incidence below 10, reducing n to %d." % (self.n,))
self.N = self.n+self.shift
### Setting prior parameters
self.delta = 1-(1/tau)
self.tau = tau
self.pred = pred
self.g = 1 #exp(-2/tau)
self.m0 = m0
self.c_a_0 = c_a_0
self.w_a_t = w_a_t
self.n0 = n0
self.s0 = s0
"""
### Calculation range
for t in range( self.m - self.N, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.N = min(self.m, n+self.shift)
"""
### We calculate all gammas previously:
self.Gammak = zeros(self.m)
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
### Calculate the log data:
### We add 1e-6 for convinience, since very early data may be zero
### This makes no diference at the end.
self.y = log(self.data + 1e-6) - log(self.Gammak + 1e-6)
def sim_data( self, R, I0):
pass
def CalculateRts( self, q=[10,25,50,75,90]):
"""Calculate the posterior distribution and the Rt's quantiles.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt.
If q=2, save the mean and dispersion parameter of the posterior for Rt
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
self.rts = zeros(( len(q), self.n))
self.rts_pred = zeros((len(q), self.pred))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
self.rts = zeros(( q, self.n))
self.rts_pred = zeros(( q, self.pred))
simulate = True
self.q = q
self.simulate = simulate
### nt, at, rt, qt, st, mt, ct # hiperparameters
### 0 1 2 3 4 5 6
self.hiper = zeros(( self.N+1, 7))
### nt, at, rt, qt, st, mt, ct # hiperparameters
self.hiper[0,:] = self.n0, -1, -1, -1, self.s0, self.m0, self.s0*self.c_a_0
for t in range( self.N ):
r_a_t = self.g**2 * self.hiper[t,6] + self.w_a_t #r^*_t
At = r_a_t/(r_a_t + 1)
self.hiper[t+1,0] = self.delta*self.hiper[t,0] + 1 #nt
self.hiper[t+1,1] = self.g * self.hiper[t,5] #at
et = self.y[self.m-(self.N - t)] - self.hiper[t+1,1]
self.hiper[t+1,2] = self.hiper[t,4]*r_a_t #rt
self.hiper[t+1,3] = self.hiper[t,4]*(r_a_t + 1) #qt
# st:
self.hiper[t+1,4] = self.delta*(self.hiper[t,0]/self.hiper[t+1,0])*self.hiper[t,4] +\
self.hiper[t,4]/self.hiper[t+1,0] * (et**2/self.hiper[t+1,3])
self.hiper[t+1,5] = self.hiper[t+1,1] + At*et #mt
#ct
self.hiper[t+1,6] = (self.hiper[t+1,4]/self.hiper[t,4]) * (self.hiper[t+1,2]- self.hiper[t+1,3]*At**2)
if t >= self.shift:
if self.simulate:
self.rts[:,t-self.shift] = exp(t_student.rvs( size=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
else:
self.rts[:,t-self.shift] = exp(t_student.ppf( q=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
if self.pred>0:
t = self.N
self.pred_hiper = zeros(( self.pred, 2)) # a_t^k and r_t^k
for k in range(self.pred):
self.pred_hiper[k,0] = self.g**(k+1) * self.hiper[t,5] #a_t^k
if self.g == 1:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * (k+1) #r_t^k
else:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * ((1-self.g**(2*(k+1)))/(1-self.g**2)) #r_t^k
if self.simulate:
self.rts_pred[:,k] = exp(t_student.rvs( size=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
else:
self.rts_pred[:,k] = exp(t_student.ppf( q=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
def PlotPostRt( self, i, ax=None, color='black'):
"""Plot the i-th Rt posterior distribution."""
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
t = i+self.tau
y = linspace( 0.01, 4, num=500)
### Transformed pdf using the Jacobian y^{-1}
pdf = (y**-1) * t_student.pdf( log(y), df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) )
ax.plot( y, pdf, '-', color=color)
ax.set_ylabel("Density")
ax.set_xlabel(r'$R_{%d}$' % (i))
def PlotRts( self, color='blue', median_color='red', x_jump=1, plot_area=[0.4,2.2], alpha=0.25, csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam: optional file name to save the Rts info: workdir/csv/csv_fnam.csv
ax: Axis hadle to for the plot, if None, it creates one and retruns it.
x_jump: put ticks every x_jump days.
plot_area: ([0.4,2.2]), interval with the y-axis (Rt values) plot area.
"""
#self.rts already been produced after running CalculateRts
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
### Plot the Rt's posterior quantiles
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
### Plot the observed Rt's
ax.plot( exp(self.y[self.m-self.n:]), '-', color='grey')
### Plot the predictions
if self.pred >0:
for k in range(self.pred):
h = self.rts_pred[:,k]
i=self.n+k
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color='light'+color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color='light'+color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(0,self.n,x_jump))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(0,self.n,x_jump)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim(plot_area)
ax.set_yticks(arange( plot_area[0], plot_area[1], step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( self.workdir + "csv/" + csv_fnam + ".csv", sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
##### Dirctionary with general information for the metro zone or region to be analyzed:
##### id Name not used Population init date
ZMs = { "9-01": ["Mexico city", 2, 21.942666e6, date(2020, 2, 27)],\
"15-02": ["Toluca", 1, 2.377828e6, date(2020, 3, 7)],\
"31-01": ["Mérida", 2, 1.237697e6, date(2020, 3, 7)],\
"17-02": ["Cuernavaca", 1, 1.059521e6, date(2020, 3, 2)],\
"12-01": ["Acapulco", 2, 0.919726e6, date(2020, 3, 11)],\
"25-01": ["Culiacán", 2, 0.962871e6, date(2020, 3, 1)],\
"23-01": ["Cancun", 2, 0.867768e6, date(2020, 3, 1)]}
### The correponding data files have two columns separated by space, deaths and incidence.
### Each row is one day.
### The file for clave="9-01" (Mexico city) is: ../data/clave.csv etc.
if __name__=='__main__':
rcParams.update({'font.size': 14})
close('all')
#Plot the imputed serial time distribution for covid: erlang( a=3, scale=8/3 )
fig, ax = subplots( num=30, figsize=( 4.5, 3.5))
PlotFrozenDist( erlang( a=3, scale=8/3 ), ax=ax)
### Plota the erlang( a=5, scale=9/5 ) alternative
PlotFrozenDist( erlang( a=5, scale=9/5 ), color='grey', ax=ax)
ax.set_xlim((0,20))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_ylabel(r"Density")
ax.set_xlabel("days")
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/Covid19_SerialTimeDist.png")
### Plot the Rt's estimation. Only Merida, '13-01' and Mexico city, '9-01', are in the paper
claves = ['15-02', '17-02', '23-01', '25-01', '12-01', "31-01", '9-01']
n=60 ## Number of days to calculate the Rt's
trim=0 ## Number of days to cut data from the end, negative, e.g. -10, cut 10 days
x_jump = 7 ## For ploting, put ticks every x_jump days.
for i,clave in enumerate(claves):
print(clave)
### Open an instance of the Rts_AR class:
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=5, n=n)
tst.CalculateRts() # Most be called before ploting the Rt's
### Plot the Rts:
fig, ax = subplots( num=i+1, figsize=( 8, 3.5))
### Plot Cori et al (2013) Poisson model version:
PlotRts_P( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3]+timedelta(days=4),\
n=tst.n, trim=trim, ax=ax, color='green', alpha=0.5, median_color='black')
### Plot ours:
tst.PlotRts( ax=ax, x_jump=x_jump, plot_area=[0.4,2.2], csv_fnam=clave)
ax.set_title("")
ax.set_ylabel(r"$R_t$")
ax.set_xlabel("")
ax.set_title(ZMs[clave][0] + ", Mexico")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
if clave == '9-01':
m_max = tst.m
ax.set_xlabel("day.month, 2020")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
### Figure with Cori et al (2013) posterior distributions of '31-01' and '9-01'
fig1, ax1 = subplots( num=20, nrows=1, ncols=2, figsize=( 10, 3.5))
color = [ "red", "black", "darkred"]
for i,clave in enumerate([ '31-01', '9-01']):
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data, '.-', color=color[i], label=ZMs[clave][0])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[i])
last_date = tst.init_date + timedelta(tst.m)
ax1[0].set_xlabel('')
ax1[0].set_xticks(range(0,tst.m,x_jump*2))
ax1[0].set_xticklabels([(last_date-timedelta(tst.m-i)).strftime("%d.%m") for i in range(0,tst.m,x_jump*2)], ha='right')
ax1[0].tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax1[0].set_xlabel("day.month, 2020")
#ax1[0].set_ylim((0,1.1*max(tst.data[-n:])))
ax1[0].grid(color='grey', linestyle='--', linewidth=0.5)
ax1[0].set_ylabel(r"Incidence")
ax1[0].legend(loc=0, shadow = False)
### Add '31-01', with incidence multiplied by 10
clave = '31-01'
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data*10, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data*10, '.-', color=color[2])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[2])
ax1[1].set_xticks(arange(0.8,1.4,0.2))
ax1[1].set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax1[1].grid(color='grey', linestyle='--', linewidth=0.5)
fig1.tight_layout()
fig1.savefig("../figs/Rts_Compare.png")
### Comparison of results changing the serial time distribution
fig, ax = subplots( num=31, figsize=( 4.5, 3.5))
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax)
#### Here we change the serial time: Any other positive density could be used.
tst = Rts_AR( clave, IP_dist=erlang( a=5, scale=9/5), init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax, color='grey')
ax.set_xlim((0.5,2.5))
ax.set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_Compare.png" % (clave,))
"""
################# Example of use of Rts_NB_psi and Rts_NB (not documented)
T=100000
for clave in claves: #Instance of the object and run the MCMC
tst = Rts_NB_psi( clave, init_date=ZMs[clave][3], n=n)
if T > 0:
tst.RunMCMC(T=T)
### Plot the Rts
close(1)
fig, ax = subplots( num=1, figsize=( 10, 3.5) )
tst.PlotRts( ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB_psi.")
fig.savefig("../figs/%s_Rts_NB_psi.png" % (clave,))
### Plot the posterior distribution of \psi
close(3)
fig, ax = subplots( num=3, figsize=( 5,5) )
tst.PlotPostPsi(ax=ax)
ax.set_title(ZMs[clave][0])
fig.savefig("../figs/%s_Rts_NB_Post_psi.png" % clave)
### Fix \psi with the postrior expeted value and use that for PlotRts_NB
close(2)
fig, ax = subplots( num=2, figsize=( 10, 3.5) )
psi = mean(tst.psi_samples) #Posterior mean of psi
PlotRts_NB( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3],\
n=n, psi=psi, ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB, fixed $\psi$.")
fig.savefig("../figs/%s_Rts.png" % (clave,))
"""
| 44.906467
| 156
| 0.550618
| 20,219
| 0.519795
| 0
| 0
| 0
| 0
| 0
| 0
| 15,343
| 0.394442
|
452ce291eab1e58321278df273620d4a3c795783
| 678
|
py
|
Python
|
zombieclusters.py
|
tnkteja/notthisagain
|
85e2b2cbea1298a052986e9dfe5e73d022b537f3
|
[
"MIT"
] | null | null | null |
zombieclusters.py
|
tnkteja/notthisagain
|
85e2b2cbea1298a052986e9dfe5e73d022b537f3
|
[
"MIT"
] | null | null | null |
zombieclusters.py
|
tnkteja/notthisagain
|
85e2b2cbea1298a052986e9dfe5e73d022b537f3
|
[
"MIT"
] | null | null | null |
class cluster(object):
def __init__(self,members=[]):
self.s=set(members)
def merge(self, other):
self.s.union(other.s)
return self
class clusterManager(object):
def __init__(self,clusters={}):
self.c=clusters
def merge(self, i, j):
self.c[i]=self.c[j]=self.c[i].merge(self.c[j])
def count(self):
return len(set(self.c.values()))
def zombieCluster(zombies):
cm=clusterManager(clusters={i:cluster(members=[i]) for i in xrange(len(zombies))})
for i,row in enumerate(zombies):
for j,column in enumerate(row):
if column == '1':
cm.merge(i,j)
return cm.count()
| 26.076923
| 86
| 0.59292
| 399
| 0.588496
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.004425
|
452dfafcf95365869f17107edc7e9285e32b7078
| 2,989
|
py
|
Python
|
CondTools/Ecal/python/EcalO2O_laser_online_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
CondTools/Ecal/python/EcalO2O_laser_online_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
CondTools/Ecal/python/EcalO2O_laser_online_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("ProcessOne")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.DBParameters.authenticationPath = '/nfshome0/popcondev/conddb'
#
# Choose the output database
#
process.CondDBCommon.connect = 'oracle://cms_orcon_prod/CMS_COND_42X_ECAL_LASP'
#process.CondDBCommon.connect = 'sqlite_file:DB.db'
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('*'),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.untracked.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_last')
))
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:DBLog.db'),
timetype = cms.untracked.string('timestamp'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_last')
))
)
#
# Be sure to comment the following line while testing
#
#process.PoolDBOutputService.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
process.Test1 = cms.EDAnalyzer("ExTestEcalLaserAnalyzer",
SinceAppendMode = cms.bool(True),
record = cms.string('EcalLaserAPDPNRatiosRcd'),
loggingOn = cms.untracked.bool(True),
Source = cms.PSet(
# maxtime is mandatory
# it can be expressed either as an absolute time with format YYYY-MM-DD HH24:MI:SS
# or as a relative time w.r.t. now, using -N, where N is expressed in units
# of hours
# maxtime = cms.string("-40"),
maxtime = cms.string("2012-12-12 23:59:59"),
sequences = cms.string("16"),
OnlineDBUser = cms.string('CMS_ECAL_LASER_COND'),
# debug must be False for production
debug = cms.bool(False),
# if fake is True, no insertion in the db is performed
fake = cms.bool(True),
OnlineDBPassword = cms.string('0r4cms_3c4l_2011'),
OnlineDBSID = cms.string('CMS_OMDS_LB')
)
)
process.p = cms.Path(process.Test1)
| 41.513889
| 112
| 0.577451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,047
| 0.350284
|
452e242fef5c444f6a84742a55e2adf53a8f64d3
| 9,907
|
py
|
Python
|
algofi/v1/staking.py
|
zhengxunWu3/algofi-py-sdk
|
8388d71d55eae583ac3579286b5f870aa3db2913
|
[
"MIT"
] | null | null | null |
algofi/v1/staking.py
|
zhengxunWu3/algofi-py-sdk
|
8388d71d55eae583ac3579286b5f870aa3db2913
|
[
"MIT"
] | null | null | null |
algofi/v1/staking.py
|
zhengxunWu3/algofi-py-sdk
|
8388d71d55eae583ac3579286b5f870aa3db2913
|
[
"MIT"
] | null | null | null |
from algosdk import logic
from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn
from ..contract_strings import algofi_manager_strings as manager_strings
from .prepend import get_init_txns
from ..utils import TransactionGroup, Transactions, randint, int_to_bytes
OPT_IN_MIN_BALANCE=0.65
def prepare_staking_contract_optin_transactions(manager_app_id, market_app_id, sender, storage_address, suggested_params):
"""Returns a :class:`TransactionGroup` object representing a staking contract opt in
group transaction. The sender and storage account opt in to the staking application
and the storage account is rekeyed to the manager account address, rendering it
unable to be transacted against by the sender and therefore immutable.
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param max_atomic_opt_in_market_app_ids: max opt in market app ids
:type max_atomic_opt_in_market_app_ids: list
:param sender: account address for the sender
:type sender: string
:param storage_address: address of the storage account
:type storage_address: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:return: :class:`TransactionGroup` object representing a manager opt in group transaction
:rtype: :class:`TransactionGroup`
"""
txn_payment = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=storage_address,
amt=int(OPT_IN_MIN_BALANCE*1e6)
)
txn_market = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=market_app_id
)
txn_user_opt_in_manager = ApplicationOptInTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id
)
app_address = logic.get_application_address(manager_app_id)
txn_storage_opt_in_manager = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=manager_app_id,
rekey_to=app_address
)
txn_group = TransactionGroup([txn_payment, txn_market, txn_user_opt_in_manager, txn_storage_opt_in_manager])
return txn_group
def prepare_stake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, market_address, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a stake
transaction against the algofi protocol. The sender sends assets to the
staking account and is credited with a stake.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of asset to supply for minting collateral
:type amount: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the asset market application
:type market_app_id: int
:param market_address: account address for the market application
:type market_address: string
:param oracle_app_id: id of the asset market application
:type oracle_app_id: int
:param asset_id: asset id of the asset being supplied, defaults to None (algo)
:type asset_id: int, optional
:return: :class:`TransactionGroup` object representing a mint to collateral group transaction
:rtype: :class:`TransactionGroup`
"""
supported_oracle_app_ids = [oracle_app_id]
supported_market_app_ids = [market_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.MINT_TO_COLLATERAL,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
)
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
if asset_id:
txn2 = AssetTransferTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount,
index=asset_id
)
else:
txn2 = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1, txn2])
return txn_group
def prepare_unstake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a remove stake
group transaction against the algofi protocol. The sender requests to remove stake
from a stake acount and if successful, the stake is removed.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of collateral to remove from the market
:type amount: int
:param asset_id: asset id of the asset underlying the collateral
:type asset_id: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application of the collateral
:type oracle_app_id: int
:return: :class:`TransactionGroup` object representing a remove collateral underlying group transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.REMOVE_COLLATERAL_UNDERLYING,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode(), int_to_bytes(amount)]
)
if asset_id:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
foreign_assets=[asset_id],
accounts=[storage_account]
)
else:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1])
return txn_group
def prepare_claim_staking_rewards_transactions(sender, suggested_params, storage_account, manager_app_id, market_app_id, oracle_app_id, foreign_assets):
"""Returns a :class:`TransactionGroup` object representing a claim rewards
underlying group transaction against the algofi protocol. The sender requests
to claim rewards from the manager acount. If not, the account sends
back the user the amount of asset underlying their posted collateral.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application
:type oracle_app_id: int
:param foreign_assets: list of rewards assets in the staking contract
:type foreign_assets: list
:return: :class:`TransactionGroup` object representing a claim rewards transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.CLAIM_REWARDS,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.claim_rewards.encode()],
accounts=[storage_account],
foreign_assets=foreign_assets
)
txn_group = TransactionGroup(prefix_transactions + [txn0])
return txn_group
| 43.262009
| 159
| 0.731402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,531
| 0.457353
|
452e279a6193abc88461babe810211a3f8d434ff
| 1,661
|
py
|
Python
|
main.py
|
kirantambe/koinex-status-ticker
|
487028a00605912e9fee97f4b29f260a2ab4f66f
|
[
"MIT"
] | null | null | null |
main.py
|
kirantambe/koinex-status-ticker
|
487028a00605912e9fee97f4b29f260a2ab4f66f
|
[
"MIT"
] | 1
|
2021-06-01T21:56:55.000Z
|
2021-06-01T21:56:55.000Z
|
main.py
|
kirantambe/koinex-status-ticker
|
487028a00605912e9fee97f4b29f260a2ab4f66f
|
[
"MIT"
] | 1
|
2018-01-16T03:51:09.000Z
|
2018-01-16T03:51:09.000Z
|
import rumps
import requests
import json
API_URL = 'https://koinex.in/api/ticker'
UPDATE_INTERVAL = 60
CURRENCIES = {
'Bitcoin': 'BTC',
'Ethereum': 'ETH',
'Ripple': 'XRP',
'Litecoin': 'LTC',
'Bitcoin Cash': 'BCH',
}
class KoinexStatusBarApp(rumps.App):
def __init__(self):
super(KoinexStatusBarApp, self).__init__("Koinex")
self.currencies = CURRENCIES.keys()
self.menu = CURRENCIES.keys()
self.enabled = ['Bitcoin', 'Ripple']
self.prices = {}
# Initialize click handlers
for item in self.menu:
rumps.clicked(item)(self.toggle_currency)
# Add check to menu items which are enabled
for item in self.enabled:
self.menu[item].state = 1
# Add separator
self.menu.add(None)
@rumps.timer(UPDATE_INTERVAL)
def update(self, sender):
response = requests.get(API_URL)
title = ''
if response.status_code == 200:
data = json.loads(response.content)
self.prices = data.get('prices', {})
for currency in self.enabled:
short = CURRENCIES.get(currency)
title += u'{} \u20B9 {} | '.format(short, self.prices.get(short))
self.title = title[:-3] # Last 3 characters will be ' | '
def toggle_currency(self, menuitem):
currency = menuitem.title
if currency in self.enabled:
self.enabled.remove(currency)
menuitem.state = 0
else:
self.enabled.append(currency)
menuitem.state = 1
self.update(None)
if __name__ == "__main__":
KoinexStatusBarApp().run()
| 26.790323
| 77
| 0.587598
| 1,360
| 0.818784
| 0
| 0
| 484
| 0.291391
| 0
| 0
| 287
| 0.172787
|
452f2babff6fef2a136326734c3cab066e39250a
| 900
|
py
|
Python
|
app.py
|
migueljunior/docker
|
09effa41a2207294ec9ab8bd34b166c862edea72
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
migueljunior/docker
|
09effa41a2207294ec9ab8bd34b166c862edea72
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
migueljunior/docker
|
09effa41a2207294ec9ab8bd34b166c862edea72
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, jsonify
from time import strftime
from socket import gethostname
from socket import gethostbyname
app = Flask(__name__)
@app.route('/')
def welcome():
message = 'Please add /docker after the port 8888'
return message
@app.route('/docker')
def docker():
currentDate = strftime('%d/%m/%y')
currentTime = strftime('%H:%M:%S')
currentHostname = gethostname()
currentIPNumber = gethostbyname(gethostname())
date = f' The current date is: {currentDate}'
time = f' The current time is: {currentTime}'
hostname = f' The hostname is: {currentHostname}'
ipNumber = f' The IP number is: {currentIPNumber}'
docker = {
'Date' : date,
'Time' : time,
'Hostname' : hostname,
'IP Number' : ipNumber
}
return jsonify(docker)
if __name__ == '__main__':
app.run(debug=True , port=8888 , host='0.0.0.0')
| 31.034483
| 54
| 0.646667
| 0
| 0
| 0
| 0
| 669
| 0.743333
| 0
| 0
| 277
| 0.307778
|
452f9d740231a724ca7b77510cb8a67453b7e2aa
| 8,605
|
py
|
Python
|
knn_and_regression/src/free_response.py
|
WallabyLester/Machine_Learning_From_Scratch
|
6042cf421f5de2db61fb570b7c4de64dc03453f3
|
[
"MIT"
] | null | null | null |
knn_and_regression/src/free_response.py
|
WallabyLester/Machine_Learning_From_Scratch
|
6042cf421f5de2db61fb570b7c4de64dc03453f3
|
[
"MIT"
] | null | null | null |
knn_and_regression/src/free_response.py
|
WallabyLester/Machine_Learning_From_Scratch
|
6042cf421f5de2db61fb570b7c4de64dc03453f3
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy.core.fromnumeric import mean
from numpy.core.numeric import True_
from numpy.testing._private.utils import rand
from polynomial_regression import PolynomialRegression
from generate_regression_data import generate_regression_data
from metrics import mean_squared_error # mse
from math import log # use if scale too large to see error
from k_nearest_neighbor import KNearestNeighbor
try:
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Number 7, split A
degree = 4
N = 100
x, y = generate_regression_data(degree, N, amount_of_noise=0.1)
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:10]], y[rand_sampl[:10]]
x_test, y_test = x[rand_sampl[10:]], y[rand_sampl[10:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N7_splitA/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N7_splitA/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/lowest_training_and_test_error.png")
# Number 10, split A
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(knn)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k]), kplots[low_test_err_k], label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/lowest_test_error.png")
# Number 9, split B
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:50]], y[rand_sampl[:50]]
x_test, y_test = x[rand_sampl[50:]], y[rand_sampl[50:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N9_splitB/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N9_splitB/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/lowest_training_and_test_error.png")
# Number 10, split B
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(poly)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k].X_training), kplots[low_test_err_k].f, label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/lowest_test_error.png")
| 38.936652
| 176
| 0.664614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,165
| 0.251598
|
45303096a42f87f1631edf145b0ae0b347d69c0b
| 753
|
py
|
Python
|
chapter10/exercises/EG10-20 Twinkle Twinkle classes.py
|
munnep/begin_to_code_with_python
|
3ef14d90785526b6b26d262a7627eee73791d7d0
|
[
"MIT"
] | null | null | null |
chapter10/exercises/EG10-20 Twinkle Twinkle classes.py
|
munnep/begin_to_code_with_python
|
3ef14d90785526b6b26d262a7627eee73791d7d0
|
[
"MIT"
] | null | null | null |
chapter10/exercises/EG10-20 Twinkle Twinkle classes.py
|
munnep/begin_to_code_with_python
|
3ef14d90785526b6b26d262a7627eee73791d7d0
|
[
"MIT"
] | null | null | null |
# EG10-20 Twinkle Twinkle classes
import time
import snaps
class Note:
def __init__(self, note, duration):
self.__note = note
self.__duration = duration
def play(self):
snaps.play_note(self.__note)
time.sleep(self.__duration)
tune = [Note(note=0, duration=0.4), Note(note=0, duration=0.4),
Note(note=7, duration=0.4), Note(note=7, duration=0.4),
Note(note=9, duration=0.4), Note(note=9, duration=0.4),
Note(note=7, duration=0.8), Note(note=5, duration=0.4),
Note(note=5, duration=0.4), Note(note=4, duration=0.4),
Note(note=4, duration=0.4), Note(note=2, duration=0.4),
Note(note=2, duration=0.4), Note(note=0, duration=0.8)]
for note in tune:
note.play()
| 30.12
| 63
| 0.622842
| 207
| 0.2749
| 0
| 0
| 0
| 0
| 0
| 0
| 33
| 0.043825
|
453161fcfe76bf1d62b11f9f68c0fa622f378ff1
| 3,523
|
py
|
Python
|
tests/python/gpu/test_forward.py
|
xudong-sun/mxnet
|
fe42d30d5885dd576cb871fd70594c53efce9b42
|
[
"Apache-2.0"
] | 31
|
2016-04-29T09:13:44.000Z
|
2021-02-16T21:27:00.000Z
|
tests/python/gpu/test_forward.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 23
|
2018-06-11T20:03:54.000Z
|
2018-08-10T03:17:49.000Z
|
tests/python/gpu/test_forward.py
|
greenpea0104/incubator-mxnet
|
fc9e70bf2d349ad4c6cb65ff3f0958e23a7410bf
|
[
"Apache-2.0"
] | 47
|
2016-04-19T22:46:09.000Z
|
2020-09-30T08:09:16.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import os
import numpy as np
import mxnet as mx
from mxnet.test_utils import *
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed
from mxnet.gluon import utils
def _get_model():
if not os.path.exists('model/Inception-7-symbol.json'):
download('http://data.mxnet.io/models/imagenet/inception-v3.tar.gz', dirname='model')
os.system("cd model; tar -xf inception-v3.tar.gz --strip-components 1")
def _dump_images(shape):
import skimage.io
import skimage.transform
img_list = []
for img in sorted(os.listdir('data/test_images/')):
img = skimage.io.imread('data/test_images/'+img)
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
img = img[yy : yy + short_egde, xx : xx + short_egde]
img = skimage.transform.resize(img, shape)
img_list.append(img)
imgs = np.asarray(img_list, dtype=np.float32).transpose((0, 3, 1, 2)) - 128
np.save('data/test_images_%d_%d.npy'%shape, imgs)
def _get_data(shape):
hash_test_img = "355e15800642286e7fe607d87c38aeeab085b0cc"
hash_inception_v3 = "91807dfdbd336eb3b265dd62c2408882462752b9"
utils.download("http://data.mxnet.io/data/test_images_%d_%d.npy" % (shape),
path="data/test_images_%d_%d.npy" % (shape),
sha1_hash=hash_test_img)
utils.download("http://data.mxnet.io/data/inception-v3-dump.npz",
path='data/inception-v3-dump.npz',
sha1_hash=hash_inception_v3)
@with_seed()
def test_consistency(dump=False):
shape = (299, 299)
_get_model()
_get_data(shape)
if dump:
_dump_images(shape)
gt = None
else:
gt = {n: mx.nd.array(a) for n, a in np.load('data/inception-v3-dump.npz').items()}
data = np.load('data/test_images_%d_%d.npy'%shape)
sym, arg_params, aux_params = mx.model.load_checkpoint('model/Inception-7', 1)
arg_params['data'] = data
arg_params['softmax_label'] = np.random.randint(low=1, high=1000, size=(data.shape[0],))
ctx_list = [{'ctx': mx.gpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}},
{'ctx': mx.cpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}]
gt = check_consistency(sym, ctx_list, arg_params=arg_params, aux_params=aux_params,
tol=1e-3, grad_req='null', raise_on_err=False, ground_truth=gt)
if dump:
np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()})
if __name__ == '__main__':
test_consistency(False)
| 43.493827
| 93
| 0.676412
| 0
| 0
| 0
| 0
| 992
| 0.281578
| 0
| 0
| 1,438
| 0.408175
|
4532f548d365251f68edc483eadcb7b23a21e639
| 7,706
|
py
|
Python
|
docs/src/conf.py
|
rbeucher/LavaVu
|
317a234d69ba3eb06a827a1f8658feb031fe358b
|
[
"CC-BY-4.0"
] | 23
|
2016-01-26T23:06:53.000Z
|
2019-06-11T08:31:32.000Z
|
docs/src/conf.py
|
rbeucher/LavaVu
|
317a234d69ba3eb06a827a1f8658feb031fe358b
|
[
"CC-BY-4.0"
] | 73
|
2016-03-16T03:02:35.000Z
|
2019-07-18T07:29:52.000Z
|
docs/src/conf.py
|
rbeucher/LavaVu
|
317a234d69ba3eb06a827a1f8658feb031fe358b
|
[
"CC-BY-4.0"
] | 6
|
2016-03-25T23:22:49.000Z
|
2018-01-16T14:38:09.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
# LavaVu conf based on conf.py from underworld2
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os, sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
sys.path.insert(0, abspath(join(dirname(__file__), '..', '..')))
import setup as lsetup
# -- Project information -----------------------------------------------------
project = 'LavaVu'
copyright = '2020, Monash University'
author = 'Owen Kaluza, Monash University'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = lsetup.version
print('BUILDING LAVAVU DOCS FOR VERSION', release)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx_markdown_tables',
'myst_parser',
# 'nbsphinx',
]
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
#html_theme = 'pyramid'
#import sphinx_rtd_theme
#html_theme = "sphinx_rtd_theme"
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Set the width of the content area. Defaults to '900px'
'sidebar_width': '300px',
'page_width': '90%',
#'fixed_sidebar': 'true', #Need to scroll for full table of contents
'font_family': 'sans',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
#html_sidebars = {}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LavaVudoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LavaVu.tex', 'LavaVu Documentation',
'Owen Kaluza', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lavavu', 'LavaVu Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LavaVu', 'LavaVu Documentation',
author, 'LavaVu', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# setup mock classes so no building is required
# generate rst files
import os
import sys
# add current directory for `generate_api_documentation`
sys.path.append(os.path.dirname(__name__))
# add top project directory as well
sys.path.insert(0, os.path.join(os.path.dirname(__name__),'../../lavavu'))
try:
import lavavu
import convert
import points
import tracers
import control
except (Exception) as e:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['scipy', 'numpy', '_LavaVuPython']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import generate_api_documentation
import subprocess
subprocess.call("./run-nb-to-rst.sh", shell=True)
| 30.101563
| 79
| 0.666623
| 114
| 0.014794
| 0
| 0
| 83
| 0.010771
| 0
| 0
| 5,549
| 0.720088
|
453357739358367ed9649135f97753882d4359cd
| 25,824
|
py
|
Python
|
experiments/pamogk_exp.py
|
tastanlab/pamogk
|
fdd1a5b3dcd43b91ce9aa9989c7815b71f13e710
|
[
"FTL"
] | 6
|
2020-06-18T14:37:01.000Z
|
2021-09-12T07:25:47.000Z
|
experiments/pamogk_exp.py
|
tastanlab/pamogk
|
fdd1a5b3dcd43b91ce9aa9989c7815b71f13e710
|
[
"FTL"
] | null | null | null |
experiments/pamogk_exp.py
|
tastanlab/pamogk
|
fdd1a5b3dcd43b91ce9aa9989c7815b71f13e710
|
[
"FTL"
] | 5
|
2020-01-02T09:08:36.000Z
|
2021-07-17T12:35:37.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
import mkkm_mr
import networkx as nx
from sklearn.cluster import KMeans, SpectralClustering
from snf_simple import SNF
from pamogk import config
from pamogk import label_mapper
from pamogk.data_processor import rnaseq_processor as rp, synapse_rppa_processor as rpp
from pamogk.gene_mapper import uniprot_mapper
from pamogk.kernels.lmkkmeans_train import lmkkmeans_train
from pamogk.kernels.pamogk import kernel
from pamogk.lib.sutils import *
from pamogk.pathway_reader import cx_pathway_reader as cx_pw
# see https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html
from pamogk.result_processor.label_analysis import LabelAnalysis
# import sys
# sys.path.insert(0, '/Users/fma/dev/bilkent/research/snf')
# sys.path.insert(0, '/Users/fma/dev/bilkent/research/mkkm-mr')
parser = argparse.ArgumentParser(description='Run PAMOGK-mut algorithms on pathways')
parser.add_argument('--run-id', '-rid', metavar='run-id', dest='run_id', type=str, help='Unique Run ID')
parser.add_argument('--rs-patient-data', '-rs', metavar='file-path', dest='rnaseq_patient_data', type=str2path,
help='rnaseq pathway ID list',
default=config.DATA_DIR / 'kirc_data/unc.edu_KIRC_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_tumor.txt')
parser.add_argument('--rp-patient-data', '-rp', metavar='file-path', dest='rppa_patient_data', type=str2path,
help='rppa pathway ID list', default=config.DATA_DIR / 'kirc_data/kirc_rppa_data')
parser.add_argument('--som-patient-data', '-s', metavar='file-path', dest='som_patient_data', type=str2path,
help='som mut pathway ID list',
default=config.DATA_DIR / 'kirc_data/kirc_somatic_mutation_data.csv')
parser.add_argument('--label', '-m', metavar='label', dest='label', type=str, default='th196',
help='Label value that will be smoothed')
# used values: [0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
parser.add_argument('--smoothing-alpha', '-a', metavar='alpha', dest='smoothing_alpha', type=float, default=0.01,
help='Smoothing alpha in range of 0-1')
parser.add_argument('--drop-percent', '-p', metavar='drop-percent', dest='drop_percent', type=int, default=1,
help='Drop percentage in range of 0-100')
parser.add_argument('--threshold', '-t', metavar='threshold', dest='threshold', type=float, default=1.96,
help='Cut off threshold')
parser.add_argument('--continuous', '-c', metavar='bool', dest='continuous', type=str2bool, default=True,
help='Whether to produce continuous values for under/over expressed')
parser.add_argument('--normalize-kernels', '-nk', dest='kernel_normalization', type=str2bool, default=True,
help='Kernel Normalization')
args = {}
class Experiment1(object):
def __init__(self, args):
"""
Parameters
----------
args:
arguments
"""
self.args = args
self.label = args.label
self.smoothing_alpha = args.smoothing_alpha
self.kernel_normalization = args.kernel_normalization
self.drop_percent = args.drop_percent
self.threshold = args.threshold
self.log2_lambdas = list(range(-15, 16, 3))
# these are kernel related params
# each experiment may have different methods to build kernels
exp_subdir = f'{Path(__file__).stem}-{self.__class__.__name__}'
param_dir = f'label={self.label}-smoothing_alpha={self.smoothing_alpha}-kr_norm={self.kernel_normalization}'
run_suffix = ''
if self.args.run_id is not None:
run_suffix = f'-run={self.args.run_id}'
self.data_dir = config.DATA_DIR / 'pamogk_kirc' / exp_subdir / param_dir
self.result_dir = self.data_dir / ('results' + run_suffix)
self.kernel_dir = self.data_dir / 'kernels'
self.label_analyzer = None
# this will create with all roots
safe_create_dir(self.result_dir)
safe_create_dir(self.kernel_dir)
# change log and create log file
change_log_path(self.data_dir / 'run.log')
log('exp_data_dir:', self.data_dir)
self.get_rnaseq_pw_path = lambda \
pw_id: self.kernel_dir / f'rnaseq-over-under-expressed-pw_id={pw_id}.gpickle'
self.get_rppa_pw_path = lambda \
pw_id: self.kernel_dir / f'rppa-over-under-expressed-pw_id={pw_id}.gpickle'
self.get_som_pw_path = lambda \
pw_id: self.kernel_dir / f'pamogk-som-expressed-pw_id={pw_id}.gpickle'
@timeit
def read_rnaseq_data(self):
# Real Data #
# process RNA-seq expression data
gene_exp, gene_name_map = rp.process(self.args.rnaseq_patient_data, self.args.continuous, self.args.threshold)
# convert entrez gene id to uniprot id
pat_ids = gene_exp.columns.values # patient TCGA ids
ent_ids = gene_exp.index.values # gene entrez ids
return gene_exp.values, pat_ids, ent_ids
@timeit
def read_rppa_data(self):
# Real Data #
# process RNA-seq expression data
gene_exp = rpp.process(self.args.rppa_patient_data, self.args.continuous, self.args.threshold)
# convert entrez gene id to uniprot id
pat_ids = gene_exp.columns.values # patient TCGA ids
ent_ids = gene_exp.index.values # gene entrez ids
return gene_exp.values, pat_ids, ent_ids
@timeit
def read_som_data(self):
"""
Returns
-------
mapping of patient to mutations by entrez ids
"""
# Real Data #
# process RNA-seq expression data
patients = {}
with open(config.get_safe_data_file(self.args.som_patient_data)) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
pat_id = row['Patient ID']
ent_id = row['Entrez Gene ID']
if pat_id not in patients:
patients[pat_id] = {ent_id}
else:
patients[pat_id].add(ent_id)
return collections.OrderedDict(sorted(patients.items()))
@timeit
def find_intersection_patients(self, rs_GE, rs_pat, rp_GE, rp_pat, som_pat):
rs_pat_list = simplify_pat_ids(rs_pat)
rp_pat_list = simplify_pat_ids(rp_pat)
som_pat_list = simplify_pat_ids(som_pat.keys())
intersection_list = list(set(rs_pat_list).intersection(rp_pat_list, som_pat_list))
intersection_list.sort()
intersect_loc = self.data_dir / 'patients.csv'
save_csv(intersect_loc, [[pid] for pid in intersection_list])
def clean_patient_list_and_ge_data(patients, ge, whitelist):
pat_list = simplify_pat_ids(patients)
to_del = [idx for idx, value in enumerate(pat_list) if value not in whitelist]
return np.delete(patients, to_del), np.delete(ge, to_del, axis=1)
rs_pat, rs_GE = clean_patient_list_and_ge_data(rs_pat, rs_GE, intersection_list)
rp_pat, rp_GE = clean_patient_list_and_ge_data(rp_pat, rp_GE, intersection_list)
som_pat_deleted_list = [pid for pid in som_pat.keys() if pid not in intersection_list]
for item in som_pat_deleted_list:
som_pat.pop(item, None)
return rs_GE, rs_pat, rp_GE, rp_pat, som_pat
@timeit
def preprocess_seq_patient_data(self, GE, all_ent_ids):
# get the dictionary of gene id mappers
uni2ent, ent2uni = uniprot_mapper.json_to_dict()
found_ent_ids = [eid in ent2uni for eid in all_ent_ids]
ent_ids = np.array([eid for eid in all_ent_ids if eid in ent2uni])
uni_ids = np.array([ent2uni[eid] for eid in ent_ids], dtype=object)
log('uni_ids:', len(uni_ids))
log('miss_ent_ids:', len(all_ent_ids) - sum(found_ent_ids))
# prune genes whose uniprot id is not found
GE = GE[found_ent_ids]
return GE, uni_ids
@timeit
def preprocess_som_patient_data(self, patients):
# get the dictionary of gene id mappers
uni2ent, ent2uni = uniprot_mapper.json_to_dict()
res = []
num_empty = 0
for pat_id, ent_ids in patients.items():
# uni_ids = [uid for eid in ent_ids if eid in ent2uni for uid in ent2uni[eid]]
uni_ids = [uid for eid in ent_ids if eid in ent2uni for uid in ent2uni[eid]]
# if there are any matches map them
res.append({
'pat_id': pat_id,
'mutated_nodes': uni_ids,
})
log('removed patients:', num_empty)
return res
@timeit
def read_pathways(self):
# get all pathways
return cx_pw.read_pathways()
def rnaseq_pathways_save_valid(self, all_pw_map):
return np.all([self.get_rnaseq_pw_path(pw_id).exists() for pw_id in all_pw_map])
def rppa_pathways_save_valid(self, all_pw_map):
return np.all([self.get_rppa_pw_path(pw_id).exists() for pw_id in all_pw_map])
def som_pathways_save_valid(self, all_pw_map):
return np.all([self.get_som_pw_path(pw_id).exists() for pw_id in all_pw_map])
@timeit
def restore_rnaseq_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
res_pw_map = collections.OrderedDict()
for ind, pw_id in enumerate(all_pw_map.keys()):
path = self.get_rnaseq_pw_path(pw_id)
logr(f'Loading over/under rnaseq expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
res_pw_map[pw_id] = nx.read_gpickle(path)
log()
return res_pw_map
@timeit
def restore_rppa_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
res_pw_map = collections.OrderedDict()
for ind, pw_id in enumerate(all_pw_map.keys()):
path = self.get_rppa_pw_path(pw_id)
logr(f'Loading over/under rppa expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
res_pw_map[pw_id] = nx.read_gpickle(path)
log()
return res_pw_map
@timeit
def restore_som_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
res_pw_map = collections.OrderedDict()
for ind, pw_id in enumerate(all_pw_map.keys()):
path = self.get_som_pw_path(pw_id)
logr(f'Loading somatic mutation data {ind + 1:3}/{num_pw} pw_id={pw_id}')
res_pw_map[pw_id] = nx.read_gpickle(path)
log()
return res_pw_map
@timeit
def save_rnaseq_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
for ind, (pw_id, pw) in enumerate(all_pw_map.items()):
path = self.get_rnaseq_pw_path(pw_id)
logr(f'Saving over/under rnaseq expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
nx.write_gpickle(pw, path)
log()
@timeit
def save_rppa_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
for ind, (pw_id, pw) in enumerate(all_pw_map.items()):
path = self.get_rppa_pw_path(pw_id)
logr(f'Saving over/under rppa expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
nx.write_gpickle(pw, path)
log()
@timeit
def save_som_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
for ind, (pw_id, pw) in enumerate(all_pw_map.items()):
path = self.get_som_pw_path(pw_id)
logr(f'Saving somatic mutation data {ind + 1:3}/{num_pw} pw_id={pw_id}')
nx.write_gpickle(pw, path)
log()
@timeit
def label_rnaseq_patient_genes(self, all_pw_map, pat_ids, GE, uni_ids):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
pat_ids: :obj:`list` of :obj:`str`
list of patient ids
GE: :obj:`numpy.ndarray`
Gene expression data array in shape of genes by patients
uni_ids: :obj:`numpy.ndarray`
mapping from uniprot to gene
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.rnaseq_pathways_save_valid(all_pw_map):
return self.restore_rnaseq_pathways(all_pw_map)
num_pat = pat_ids.shape[0]
# if there are missing ones calculate all of them
log('RNAseq Over and under expressed patient pathway labeling')
for ind, pid in enumerate(pat_ids):
if self.args.continuous:
gene_vals = (GE[..., pat_ids == pid]).flatten() # over expressed genes
logr(f'RNAseq Checking patient for over-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('oe', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'oe-{self.label}', pid, all_pw_map, 'oe', self.threshold)
logr(f'RNAseq Checking patient for under-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('ue', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'ue-{self.label}', pid, all_pw_map, 'ue', self.threshold)
else:
logr(f'RNAseq Checking patient for over-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == 1).flatten() # over expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('oe', pid, all_pw_map, genes, self.label)
logr(f'RNAseq Checking patient for under-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == -1).flatten() # under expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('ue', pid, all_pw_map, genes, self.label)
log()
self.save_rnaseq_pathways(all_pw_map)
return all_pw_map
@timeit
def label_rppa_patient_genes(self, all_pw_map, pat_ids, GE, uni_ids):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
pat_ids: :obj:`list` of :obj:`str`
list of patient ids
GE: :obj:`numpy.ndarray`
Gene expression data array in shape of genes by patients
uni_ids: :obj:`numpy.ndarray`
mapping from uniprot to gene
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.rppa_pathways_save_valid(all_pw_map):
return self.restore_rppa_pathways(all_pw_map)
num_pat = pat_ids.shape[0]
# if there are missing ones calculate all of them
log('RPPA Over and under expressed patient pathway labeling')
for ind, pid in enumerate(pat_ids):
if self.args.continuous:
gene_vals = (GE[..., pat_ids == pid]).flatten() # over expressed genes
logr(f'RPPA Checking patient for over-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('oe', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'oe-{self.label}', pid, all_pw_map, 'oe', self.threshold)
logr(f'RPPA Checking patient for under-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('ue', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'ue-{self.label}', pid, all_pw_map, 'ue', self.threshold)
else:
logr(f'RPPA Checking patient for rppa over-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == 1).flatten() # over expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('oe', pid, all_pw_map, genes, self.label)
logr(f'RPPA Checking patient for rppa under-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == -1).flatten() # under expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('ue', pid, all_pw_map, genes, self.label)
log()
self.save_rppa_pathways(all_pw_map)
return all_pw_map
def label_som_patient_genes(self, all_pw_map, patients):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
patients: :obj:`list`
list of patients with mutation mappings
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.som_pathways_save_valid(all_pw_map):
return self.restore_som_pathways(all_pw_map)
num_pat = len(patients)
# if there are missing ones calculate all of them
log('Somatic mutation patient pathway labeling')
for ind, patient in enumerate(patients):
pid = patient['pat_id']
genes = patient['mutated_nodes'] # get uniprot gene ids from indices
genes = np.array([genes])
logr(f'Checking patient for somatic mutation {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_label_on_pathways('som', pid, all_pw_map, genes, self.label)
log()
self.save_som_pathways(all_pw_map)
return all_pw_map
@timeit
def create_seq_kernels(self, all_pw_map, pat_ids, kms_file_name):
# experiment variables
num_pat = pat_ids.shape[0]
num_pw = len(all_pw_map)
kms_path = self.kernel_dir / f'{kms_file_name}.npz'
if kms_path.exists(): return np_load_data(kms_path, key='kms')
# calculate kernel matrices for over expressed genes
over_exp_kms = np.zeros((num_pw, num_pat, num_pat))
for ind, (pw_id, pw) in enumerate(all_pw_map.items()): # for each pathway
over_exp_kms[ind] = kernel(pat_ids, pw, label_key=f'label-oe-{self.label}', alpha=self.smoothing_alpha,
normalization=self.kernel_normalization)
logr(f'Calculating oe pathway kernel={kms_file_name} {ind + 1:4}/{num_pw} pw_id={pw_id}')
log()
# calculate kernel matrices for under expressed genes
under_exp_kms = np.zeros((num_pw, num_pat, num_pat))
for ind, (pw_id, pw) in enumerate(all_pw_map.items()): # for each pathway
under_exp_kms[ind] = kernel(pat_ids, pw, label_key=f'label-ue-{self.label}', alpha=self.smoothing_alpha,
normalization=self.kernel_normalization)
logr(f'Calculating ue pathway kernel={kms_file_name} {ind + 1:4}/{num_pw} pw_id={pw_id}')
log()
kms = np.vstack([over_exp_kms, under_exp_kms]) # stack all kernels
np.savez_compressed(kms_path, kms=kms) # save kernels
return kms
@timeit
def create_som_kernels(self, all_pw_map, patients):
# experiment variables
num_pat = len(patients)
num_pw = len(all_pw_map)
kms_path = self.kernel_dir / 'som-kms.npz'
if kms_path.exists(): return np_load_data(kms_path, key='kms')
# calculate kernel matrices for over expressed genes
kms = np.zeros((num_pw, num_pat, num_pat))
pat_ids = np.array([pat['pat_id'] for pat in patients])
for ind, (pw_id, pw) in enumerate(all_pw_map.items()): # for each pathway
kms[ind] = kernel(pat_ids, pw, label_key='label-som', alpha=self.smoothing_alpha,
normalization=self.kernel_normalization)
logr(f'Calculating som mut pathway kernel {ind + 1:4}/{num_pat} pw_id={pw_id}')
log()
np.savez_compressed(kms_path, kms=kms) # save kernels
return kms
@staticmethod
def kmeans_cluster(U, n_clusters):
U_normalized = mkkm_mr.lib.normalize_unit_row(U)
return KMeans(n_clusters=n_clusters, max_iter=100, n_init=50).fit_predict(U_normalized)
def cluster_cont(self, kernels, n_clusters):
snf_K = 20 # number of neighbors, usually (10~30)
snf_t = 20 # number of iterations, usually (10~20)
# SNF
# W = snf_compute.snf(*kernels, K=snf_K, t=snf_t)
W = SNF(kernels, K=snf_K, t=snf_t)
# KMeans
labels = self.kmeans_cluster(W, n_clusters)
np_save_npz(self.result_dir / f'pamogk-snf-kmeans-k={n_clusters}', labels=labels)
# Spectral
labels = SpectralClustering(n_clusters, affinity='precomputed').fit_predict(W)
np_save_npz(self.result_dir / f'pamogk-snf-spectral-k={n_clusters}', labels=labels)
KH = mkkm_mr.lib.kernel_centralize(kernels)
KH = mkkm_mr.lib.kernel_normalize(KH)
num_ker = kernels.shape[0]
gamma0 = np.ones((num_ker, 1)) / num_ker
avgKer = mkkm_mr.lib.combine_kernels(KH, gamma0)
H = mkkm_mr.lib.kernel_kmeans_iter(avgKer, n_clusters)
labels = self.kmeans_cluster(H, n_clusters)
np_save_npz(self.result_dir / f'pamogk-kmeans-k={n_clusters}.csv', labels=labels)
# AAAI - 16 - MKKM-MR
M = mkkm_mr.lib.calM(KH)
lambdas = np.power(2., self.log2_lambdas)
for log2_lambda, lambda_ in zip(self.log2_lambdas, lambdas):
log(f'running for n_clusters={n_clusters} log2_lambda={log2_lambda}')
[H, weights, obj] = mkkm_mr.mkkm_mr(KH, M, n_clusters, lambda_)
labels = self.kmeans_cluster(H, n_clusters)
out_file = self.result_dir / f'pamogk-mkkm-k={n_clusters}-log2_lambda={log2_lambda}'
np_save_npz(out_file, labels=labels, weights=weights, obj=obj)
def cluster_discrete(self, kernels, n_clusters):
save_path = self.result_dir / f'labels_dropped={self.drop_percent}' / f'pamogk-all-lmkkmeans-k={n_clusters}'
if save_path.exists():
with np.load(save_path) as data:
return data['labels', 'weights']
labels, weights = lmkkmeans_train(kernels, cluster_count=n_clusters, iteration_count=5)
ensure_file_dir(save_path)
np_save_npz(f'{save_path}-weights', labels=labels, weights=weights)
return labels, weights
@timeit
def cluster(self, kernels, n_clusters):
if self.args.continuous:
return self.cluster_cont(kernels, n_clusters)
else:
return self.cluster_discrete(kernels, n_clusters)
@timeit
def run(self):
# Patient part
# RnaSeq Data
rs_GE, rs_pat_ids, rs_ent_ids = self.read_rnaseq_data()
# Rppa Data
rp_GE, rp_pat_ids, rp_ent_ids = self.read_rppa_data()
# Somatic mutation data
som_patients = self.read_som_data()
# Find intersect
rs_GE, rs_pat_ids, rp_GE, rp_pat_ids, som_patients = self.find_intersection_patients(rs_GE, rs_pat_ids, rp_GE,
rp_pat_ids, som_patients)
# Kernel part
# RnaSeq Data
rs_GE, rs_uni_ids = self.preprocess_seq_patient_data(rs_GE, rs_ent_ids)
all_rs_pw_map = self.read_pathways()
labeled_all_rs_pw_map = self.label_rnaseq_patient_genes(all_rs_pw_map, rs_pat_ids, rs_GE, rs_uni_ids)
rs_kernels = self.create_seq_kernels(labeled_all_rs_pw_map, rs_pat_ids, 'rnaseq-kms')
# Rppa Data
rp_GE, rp_uni_ids = self.preprocess_seq_patient_data(rp_GE, rp_ent_ids)
all_rp_pw_map = self.read_pathways()
labeled_all_rp_pw_map = self.label_rppa_patient_genes(all_rp_pw_map, rp_pat_ids, rp_GE, rp_uni_ids)
rp_kernels = self.create_seq_kernels(labeled_all_rp_pw_map, rp_pat_ids, 'rppa-kms')
# Somatic mutation data
som_patients = self.preprocess_som_patient_data(som_patients)
all_som_pw_map = self.read_pathways()
labeled_all_som_pw_map = self.label_som_patient_genes(all_som_pw_map, som_patients)
som_kernels = self.create_som_kernels(labeled_all_som_pw_map, som_patients)
kernels = np.concatenate((rs_kernels, rp_kernels, som_kernels))
total = kernels.shape[1] * kernels.shape[2]
limit = (self.drop_percent * total) / 100.0
valid_kernels = kernels[np.count_nonzero(kernels, axis=(1, 2)) >= limit]
log(f'kernel_count={kernels.shape[0]} valid_kernel_count={valid_kernels.shape[0]}')
cluster_sizes = [2, 3, 4, 5]
for k in cluster_sizes:
log(f'Running clustering for k={k}')
self.cluster(valid_kernels, k)
self.label_analyzer = LabelAnalysis(results_dir=self.result_dir, methods=['mkkm', 'kmeans'],
cluster_sizes=cluster_sizes, log2_lambdas=self.log2_lambdas)
self.label_analyzer.run()
def create_experiment(*nargs):
global args
if __name__ == '__main__': # if running directly use command line arguments
args = parser.parse_args()
else: # otherwise use user given arguments
args = parser.parse_args(nargs)
print_args(args)
return Experiment1(args)
if __name__ == '__main__':
create_experiment().run()
| 44.755633
| 122
| 0.642852
| 22,542
| 0.872909
| 0
| 0
| 16,816
| 0.651177
| 0
| 0
| 7,655
| 0.29643
|
45346855166d8c198852fc2c2b74490101e9dbc6
| 1,703
|
py
|
Python
|
d3network/data/handschriftencensus_scrap.py
|
GusRiva/GusRiva
|
50d63e3bc84f007b10df6edadbab85e23cf15731
|
[
"MIT"
] | null | null | null |
d3network/data/handschriftencensus_scrap.py
|
GusRiva/GusRiva
|
50d63e3bc84f007b10df6edadbab85e23cf15731
|
[
"MIT"
] | null | null | null |
d3network/data/handschriftencensus_scrap.py
|
GusRiva/GusRiva
|
50d63e3bc84f007b10df6edadbab85e23cf15731
|
[
"MIT"
] | null | null | null |
import requests
from lxml import html
from bs4 import BeautifulSoup
import json
import codecs
import re
#In this variable I will store the information as a dictionary with this structure:
# {number : "Name"}
ms_dict = {}
links_dict = {"links" : []}
for index in range(1,27000):
print(index)
page = requests.get('http://www.handschriftencensus.de/'+ str(index))
c = page.content
soup = BeautifulSoup(c, "lxml")
ms_label = soup.find_all("th", class_="ort")
if len(ms_label) > 0:
ms_label = ms_label[0].text.rstrip()
ms_dict[ "h" + str(index)] = ms_label
inhalt = soup.find_all("a", class_="aw")
for el in inhalt:
work_id = re.findall('/\d+$', el['href'])[0][1:]
links_dict['links'].append( { "source": "h" + str(index), "target": "w" + work_id } )
# In td id="inhalt" get the href, and only the number. Create the links at the same time
# work = work[0].text
# work = work.replace("'","")
# final_dict[index +1] = {"title":work}
#
# signaturen = soup.find_all("ol", class_="signaturen")
# if len(signaturen) > 0:
# final_dict[index+1]["manuscripts"] = []
# signaturen = signaturen[0]
# for elem in signaturen:
# if len(elem) > 1:
# manuscript = elem.find_all("a")[0]
#
# final_dict[index+1]["manuscripts"].append(manuscript.text)
index = index + 1
#Save data as json
with codecs.open('manuscripts_ids.json', 'w', 'utf-8') as outfile:
json.dump(ms_dict,outfile, indent=2)
with codecs.open('links.json', 'w', 'utf-8') as outfile:
json.dump(links_dict,outfile, indent=2)
#To save the data as a csv
# table = pd.DataFrame.from_dict(final_dict, orient='index')
# table.to_csv("Handschriftencensus_full.csv", encoding="utf-8")
| 27.467742
| 90
| 0.658837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 913
| 0.536113
|
4534bb68221abad8193f98fdfa1110b766c99aa2
| 2,590
|
py
|
Python
|
tests/emukit/quadrature/test_quadrature_acquisitions.py
|
alexgessner/emukit
|
355e26bb30edd772a81af2a1267c569d7f446d42
|
[
"Apache-2.0"
] | 6
|
2019-06-02T21:23:27.000Z
|
2020-02-17T09:46:30.000Z
|
tests/emukit/quadrature/test_quadrature_acquisitions.py
|
Tony-Chiong/emukit
|
a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a
|
[
"Apache-2.0"
] | 4
|
2019-05-17T13:30:21.000Z
|
2019-06-21T13:49:19.000Z
|
tests/emukit/quadrature/test_quadrature_acquisitions.py
|
Tony-Chiong/emukit
|
a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a
|
[
"Apache-2.0"
] | 1
|
2020-01-12T19:50:44.000Z
|
2020-01-12T19:50:44.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import GPy
from math import isclose
from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy
from emukit.quadrature.methods import VanillaBayesianQuadrature
from emukit.quadrature.acquisitions import MutualInformation, IntegralVarianceReduction
REL_TOL = 1e-5
ABS_TOL = 1e-4
@pytest.fixture
def model():
rng = np.random.RandomState(42)
x_init = rng.rand(5, 2)
y_init = rng.rand(5, 1)
gpy_kernel = GPy.kern.RBF(input_dim=x_init.shape[1])
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=gpy_kernel)
qrbf = QuadratureRBF(RBFGPy(gpy_kernel), integral_bounds=x_init.shape[1] * [(-3, 3)])
basegp = BaseGaussianProcessGPy(kern=qrbf, gpy_model=gpy_model)
model = VanillaBayesianQuadrature(base_gp=basegp)
return model
def test_mutual_information_shapes(model):
aq = MutualInformation(model)
x = np.array([[-1, 1], [0, 0], [-2, 0.1]])
# value
res = aq.evaluate(x)
assert res.shape == (3, 1)
# gradient
res = aq.evaluate_with_gradients(x)
assert res[0].shape == (3, 1)
assert res[1].shape == (3, 2)
def test_integral_variance_reduction_shapes(model):
aq = IntegralVarianceReduction(model)
x = np.array([[-1, 1], [0, 0], [-2, 0.1]])
# value
res = aq.evaluate(x)
assert res.shape == (3, 1)
# gradient
res = aq.evaluate_with_gradients(x)
assert res[0].shape == (3, 1)
assert res[1].shape == (3, 2)
def test_mutual_information_gradients(model):
aq = MutualInformation(model)
x = np.array([[-2.5, 1.5]])
_check_grad(aq, x)
def test_integral_variance_reduction_gradients(model):
aq = IntegralVarianceReduction(model)
x = np.array([[-2.5, 1.5]])
_check_grad(aq, x)
def _compute_numerical_gradient(aq, x, eps=1e-6):
f, grad = aq.evaluate_with_gradients(x)
grad_num = np.zeros(grad.shape)
for d in range(x.shape[1]):
x_tmp = x.copy()
x_tmp[:, d] = x_tmp[:, d] + eps
f_tmp = aq.evaluate(x_tmp)
grad_num_d = (f_tmp - f) / eps
grad_num[:, d] = grad_num_d[:, 0]
return grad, grad_num
def _check_grad(aq, x):
grad, grad_num = _compute_numerical_gradient(aq, x)
isclose_all = 1 - np.array([isclose(grad[i, j], grad_num[i, j], rel_tol=REL_TOL, abs_tol=ABS_TOL)
for i in range(grad.shape[0]) for j in range(grad.shape[1])])
assert isclose_all.sum() == 0
| 28.777778
| 103
| 0.667568
| 0
| 0
| 0
| 0
| 486
| 0.187645
| 0
| 0
| 144
| 0.055598
|
4535c1a7513cb60d8687c9c277406f75c8762e19
| 2,039
|
py
|
Python
|
tests/test_ProtocolService/test_ProtocolService.py
|
danilocgsilva/awsinstances
|
c0ab6ae42b3bfbe94735f7ba4741b3facec271ce
|
[
"MIT"
] | null | null | null |
tests/test_ProtocolService/test_ProtocolService.py
|
danilocgsilva/awsinstances
|
c0ab6ae42b3bfbe94735f7ba4741b3facec271ce
|
[
"MIT"
] | null | null | null |
tests/test_ProtocolService/test_ProtocolService.py
|
danilocgsilva/awsinstances
|
c0ab6ae42b3bfbe94735f7ba4741b3facec271ce
|
[
"MIT"
] | null | null | null |
import unittest
import sys
sys.path.insert(2, "..")
from awsec2instances_includes.ProtocolService import ProtocolService
class test_ProtocolService(unittest.TestCase):
def test_execption_wrong_argument(self):
wrong_argument = "some-invalid"
with self.assertRaises(Exception):
ProtocolService(wrong_argument)
def test_get_zero_element_string(self):
protocolServoce = ProtocolService("")
self.assertEqual(0, len(protocolServoce.get_ports()))
def test_get_zero_element_none(self):
protocolServoce = ProtocolService()
self.assertEqual(0, len(protocolServoce.get_ports()))
def test_port_both_options(self):
protocolService = ProtocolService("with-ssh,with-http")
returned_ports = protocolService.get_ports()
self.assertEqual(22, returned_ports[0])
self.assertEqual(80, returned_ports[1])
def test_port_three_options(self):
protocolService = ProtocolService("with-ssh,with-http,with-database")
returned_ports = protocolService.get_ports()
self.assertEqual(22, returned_ports[0])
self.assertEqual(80, returned_ports[1])
self.assertEqual(3306, returned_ports[2])
def test_one_option_wrong(self):
one_option_wrong = "with-ssh,with-cassandra"
with self.assertRaises(Exception):
ProtocolService(one_option_wrong)
def test_three_options_one_wrong(self):
three_options = "with-ssh,with-http,with-cassandra"
with self.assertRaises(Exception):
ProtocolService(three_options)
def test_is_not_empty_false(self):
protocolService = ProtocolService()
self.assertFalse(protocolService.is_not_empty())
def test_is_not_empty_true(self):
protocolService = ProtocolService("with-ssh")
self.assertTrue(protocolService.is_not_empty())
def test_is_not_empty_true2(self):
protocolService = ProtocolService("with-ssh,with-http")
self.assertTrue(protocolService.is_not_empty())
| 36.410714
| 77
| 0.714076
| 1,915
| 0.939186
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.080432
|
4538624158b0321268253bb048733d15b3730192
| 873
|
py
|
Python
|
mltoolkit/mldp/utils/helpers/nlp/token_matching.py
|
mancunian1792/FewSum
|
c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e
|
[
"MIT"
] | 28
|
2020-10-12T19:05:22.000Z
|
2022-03-18T01:19:29.000Z
|
mltoolkit/mldp/utils/helpers/nlp/token_matching.py
|
mancunian1792/FewSum
|
c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e
|
[
"MIT"
] | 1
|
2022-01-30T01:52:59.000Z
|
2022-02-19T08:04:54.000Z
|
mltoolkit/mldp/utils/helpers/nlp/token_matching.py
|
mancunian1792/FewSum
|
c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e
|
[
"MIT"
] | 7
|
2020-10-29T14:01:04.000Z
|
2022-02-22T18:33:10.000Z
|
from .constants import SPECIAL_TOKENS
try:
import re2 as re
except ImportError:
import re
def twitter_sentiment_token_matching(token):
"""Special token matching function for twitter sentiment data."""
if 'URL_TOKEN' in SPECIAL_TOKENS and re.match(r'https?:\/\/[^\s]+', token):
return SPECIAL_TOKENS['URL_TOKEN']
if 'POS_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\)|D|p)+', token):
return SPECIAL_TOKENS['POS_EM_TOKEN']
if 'NEG_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\(|\\|/)+', token):
return SPECIAL_TOKENS['NEG_EM_TOKEN']
if 'USER_TOKEN' in SPECIAL_TOKENS and re.match(
r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)', token):
return SPECIAL_TOKENS['USER_TOKEN']
if 'HEART_TOKEN' in SPECIAL_TOKENS and re.match(r'<3+', token):
return SPECIAL_TOKENS['HEART_TOKEN']
| 41.571429
| 79
| 0.651775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.34937
|
45387e1e55f5181cc1ef4691f476f0481b601834
| 1,503
|
py
|
Python
|
setup.py
|
geirem/pyconfig
|
e99693b7bc0acb3fe6b82acd29e8724336f95c43
|
[
"CC0-1.0"
] | 1
|
2020-05-15T16:22:36.000Z
|
2020-05-15T16:22:36.000Z
|
setup.py
|
geirem/pyconfig
|
e99693b7bc0acb3fe6b82acd29e8724336f95c43
|
[
"CC0-1.0"
] | 9
|
2020-05-14T08:31:48.000Z
|
2021-04-22T12:35:15.000Z
|
setup.py
|
geirem/pyconfig
|
e99693b7bc0acb3fe6b82acd29e8724336f95c43
|
[
"CC0-1.0"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='envyconfig',
version='1.2.1',
description='YAML reader with ENV interpolation.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/geirem/envyconfig',
author='https://github.com/geirem',
author_email='geiremb@gmail.com',
classifiers=[
# https://pypi.org/classifiers/
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
keywords='configtools development',
package_dir={
'': 'src',
},
packages=find_packages(where='src'),
python_requires='>=3.8',
extras_require={
'test': ['pytest'],
'googlesecrets': ["google-cloud-secret-manager"]
},
project_urls={ # Optional
'Bug Reports': 'https://github.com/geirem/envyconfig/issues',
'Funding': 'https://donate.pypi.org',
'Source': 'https://github.com/geirem/envyconfig/',
},
)
| 31.3125
| 75
| 0.641384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 820
| 0.545576
|
45389f146c6eea595dda9a6c6445b4f79a204445
| 3,457
|
py
|
Python
|
pybamm/solvers/scipy_solver.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/solvers/scipy_solver.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/solvers/scipy_solver.py
|
danieljtait/PyBaMM
|
f9d6143770e4a01099f06e3574142424730f731a
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Solver class using Scipy's adaptive time stepper
#
import casadi
import pybamm
import scipy.integrate as it
import numpy as np
class ScipySolver(pybamm.BaseSolver):
"""Solve a discretised model, using scipy._integrate.solve_ivp.
Parameters
----------
method : str, optional
The method to use in solve_ivp (default is "BDF")
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
"""
def __init__(self, method="BDF", rtol=1e-6, atol=1e-6):
super().__init__(method, rtol, atol)
self.ode_solver = True
self.name = "Scipy solver ({})".format(method)
pybamm.citations.register("virtanen2020scipy")
def _integrate(self, model, t_eval, inputs=None):
"""
Solve a model defined by dydt with initial conditions y0.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate.
t_eval : :class:`numpy.array`, size (k,)
The times at which to compute the solution
inputs : dict, optional
Any input parameters to pass to the model when solving
Returns
-------
object
An object containing the times and values of the solution, as well as
various diagnostic messages.
"""
if model.convert_to_format == "casadi":
inputs = casadi.vertcat(*[x for x in inputs.values()])
extra_options = {"rtol": self.rtol, "atol": self.atol}
# check for user-supplied Jacobian
implicit_methods = ["Radau", "BDF", "LSODA"]
if np.any([self.method in implicit_methods]):
if model.jacobian_eval:
extra_options.update(
{"jac": lambda t, y: model.jacobian_eval(t, y, inputs)}
)
# make events terminal so that the solver stops when they are reached
if model.terminate_events_eval:
def event_wrapper(event):
def event_fn(t, y):
return event(t, y, inputs)
event_fn.terminal = True
return event_fn
events = [event_wrapper(event) for event in model.terminate_events_eval]
extra_options.update({"events": events})
sol = it.solve_ivp(
lambda t, y: model.rhs_eval(t, y, inputs),
(t_eval[0], t_eval[-1]),
model.y0,
t_eval=t_eval,
method=self.method,
dense_output=True,
**extra_options
)
if sol.success:
# Set the reason for termination
if sol.message == "A termination event occurred.":
termination = "event"
t_event = []
for time in sol.t_events:
if time.size > 0:
t_event = np.append(t_event, np.max(time))
t_event = np.array([np.max(t_event)])
y_event = sol.sol(t_event)
elif sol.message.startswith("The solver successfully reached the end"):
termination = "final time"
t_event = None
y_event = np.array(None)
return pybamm.Solution(sol.t, sol.y, t_event, y_event, termination)
else:
raise pybamm.SolverError(sol.message)
| 33.563107
| 84
| 0.565519
| 3,322
| 0.960949
| 0
| 0
| 0
| 0
| 0
| 0
| 1,332
| 0.385305
|
4539375fe3de0d453832a057381afb182d19ced7
| 5,204
|
py
|
Python
|
crits/core/fields.py
|
dutrow/crits
|
6b357daa5c3060cf622d3a3b0c7b41a9ca69c049
|
[
"MIT"
] | 738
|
2015-01-02T12:39:55.000Z
|
2022-03-23T11:05:51.000Z
|
crits/core/fields.py
|
deadbits/crits
|
154097a1892e9d3960d6faaed4bd2e912a196a47
|
[
"MIT"
] | 605
|
2015-01-01T01:03:39.000Z
|
2021-11-17T18:51:07.000Z
|
crits/core/fields.py
|
deadbits/crits
|
154097a1892e9d3960d6faaed4bd2e912a196a47
|
[
"MIT"
] | 316
|
2015-01-07T12:35:01.000Z
|
2022-03-30T04:44:30.000Z
|
import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
#from mongoengine.python_support import str_types
from six import string_types as str_types
import io
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
class CritsDateTimeField(DateTimeField):
"""
Custom MongoEngine DateTimeField. Utilizes a transform such that if the
value passed in is a string we will convert it to a datetime.datetime
object, or if it is set to None we will use the current datetime (useful
when instantiating new objects and wanting the default dates to all be the
current datetime).
"""
def __set__(self, instance, value):
value = self.transform(value)
return super(CritsDateTimeField, self).__set__(instance, value)
def transform(self, value):
if value and isinstance(value, basestring):
return parse(value, fuzzy=True)
elif not value:
return datetime.datetime.now()
else:
return value
class S3Proxy(object):
"""
Custom proxy for MongoEngine which uses S3 to store binaries instead of
GridFS.
"""
def __init__(self, grid_id=None, key=None, instance=None,
db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'):
self.grid_id = grid_id # Store id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def delete(self):
# Delete file from S3, FileField still remains
S3.delete_file_s3(self.grid_id,self.collection_name)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = io.BytesIO(S3.get_file_s3(self.grid_id, self.collection_name))
return self.gridout
except:
return None
def put(self, file_obj, **kwargs):
if self.grid_id:
raise Exception('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = S3.put_file_s3(file_obj, self.collection_name)
self._mark_as_changed()
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class S3FileField(FileField):
"""
Custom FileField for MongoEngine which utilizes S3.
"""
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs",
**kwargs):
super(S3FileField, self).__init__(db_alias, collection_name, **kwargs)
self.proxy_class = S3Proxy
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, self.proxy_class)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
| 33.574194
| 93
| 0.613951
| 4,419
| 0.849154
| 0
| 0
| 0
| 0
| 0
| 0
| 1,174
| 0.225596
|
453972bee5e4b38dcaee26d48c6dcec6950939dd
| 821
|
py
|
Python
|
custom_uss/custom_widgets/outlog.py
|
shuanet/dss
|
5daafeb89aac58e4614775f301bec920f4abfa24
|
[
"Apache-2.0"
] | 2
|
2022-02-13T19:13:16.000Z
|
2022-02-17T14:52:05.000Z
|
custom_uss/custom_widgets/outlog.py
|
shuanet/dss
|
5daafeb89aac58e4614775f301bec920f4abfa24
|
[
"Apache-2.0"
] | null | null | null |
custom_uss/custom_widgets/outlog.py
|
shuanet/dss
|
5daafeb89aac58e4614775f301bec920f4abfa24
|
[
"Apache-2.0"
] | 1
|
2022-02-16T20:17:38.000Z
|
2022-02-16T20:17:38.000Z
|
import sys
from PySide6 import QtGui
class OutLog:
def __init__(self, edit, out=None, color=None):
"""(edit, out=None, color=None) -> can write stdout, stderr to a
QTextEdit.
edit = QTextEdit
out = alternate stream ( can be the original sys.stdout )
color = alternate color (i.e. color stderr a different color)
"""
self.edit = edit
self.out = None
self.color = color
def write(self, m):
if self.color:
tc = self.edit.textColor()
self.edit.setTextColor(self.color)
self.edit.moveCursor(QtGui.QTextCursor.End)
self.edit.insertPlainText( m )
if self.color:
self.edit.setTextColor(tc)
if self.out:
self.out.write(m)
def flush(self):
pass
| 25.65625
| 72
| 0.576127
| 782
| 0.952497
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.311815
|
453a307689bad4f488cdb3f14eea66f7d9566594
| 5,079
|
py
|
Python
|
2020/day07/day07.py
|
maxschalz/advent_of_code
|
537ff10b74fb0faaba4fb7dffcba4a5cf3a999ae
|
[
"BSD-3-Clause"
] | null | null | null |
2020/day07/day07.py
|
maxschalz/advent_of_code
|
537ff10b74fb0faaba4fb7dffcba4a5cf3a999ae
|
[
"BSD-3-Clause"
] | null | null | null |
2020/day07/day07.py
|
maxschalz/advent_of_code
|
537ff10b74fb0faaba4fb7dffcba4a5cf3a999ae
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import unittest
COLOR = "shiny gold"
FNAME = "input.txt"
N_ITER = 1e7
TEST_FNAME = "test_input.txt"
def main():
"""Main function."""
data = load_input(FNAME)
part1(data)
part2(data)
print("\nUnittests")
unittest.main()
def part1(data):
"""Solution to day 7, part 1."""
for rule in data:
Bag(rule)
n_bags = Bag.n_bags_containing_specific_bag(COLOR)
print(f"{n_bags} bags can contain at least one {COLOR} bag.")
return n_bags
def part2(data):
"""Solution to day 7, part 2."""
for rule in data:
Bag(rule)
n_bags = Bag.n_bags_inside(COLOR)
print(f"One {COLOR} bag contains {n_bags} other bags.")
return n_bags
def load_input(fname):
"""Read in the data, return as a list."""
with open(fname, "r") as f:
data = f.readlines()
data = [x.strip("\n") for x in data]
return data
class Bag:
all_bags = {}
def __init__(self, rule):
self.color, self.descendants = self.init_bag(rule)
self.no_descendants = not bool(self.descendants)
Bag.all_bags[self.color] = self
def init_bag(self, rule):
"""Get the color of the bag and its descendants.
Parameters
----------
rule : str
Contains the rule defining the bag, e.g.:
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
Returns
-------
color : str
The color of the bag, e.g., `dark olive`
descendants_dict : dict
A dictionary with the keys being the colors of the bags
contained in this bag and the values being the corresponding
amount of bags of the color.
"""
color, descendants = rule.split(" bags contain ")
descendants_dict = {}
for desc in descendants.split(","):
match = re.match(r"(\d+) ([a-z]+ [a-z]+) bags?",
desc.strip())
if match is None:
return color, None
else:
amount = int(match.group(1))
descendant_color = match.group(2)
descendants_dict[descendant_color] = amount
return color, descendants_dict
def bag_in_descendants(self, bag_color, n_iter):
"""Check if bag_color is in this bag or in its descendants.
This function recursively looks for the bag in question. There
surely are more efficient ways to do this but I think this is
quite intuitive and understandable.
"""
# Prevent an infinite loop.
if n_iter > N_ITER:
raise RuntimeError("Exceeded maximum number of iterations!")
if self.color==bag_color:
return True
if self.no_descendants:
return False
for descendant_bag_color in self.descendants.keys():
descendant_bag = Bag.all_bags[descendant_bag_color]
if descendant_bag.bag_in_descendants(bag_color, n_iter+1):
return True
return False
def n_bags_in_descendants(self, n_iter):
"""Return the number of bags in the descendants of this bag.
Note
----
This includes the bag itself, e.g., consider one red bag
containing four green bags. In that case, the function would
return 5 (and not 4).
"""
# Prevent an infinite loop.
if n_iter > N_ITER:
raise RuntimeError("Exceeded maximum number of iterations!")
if self.no_descendants:
return 0
n_iter += 1
bags_inside = 0
for descendant_color, descendant_num in self.descendants.items():
descendant_bag = Bag.all_bags[descendant_color]
if descendant_bag.no_descendants:
bags_inside += descendant_num
else:
bags_inside += (
descendant_num
* descendant_bag.n_bags_in_descendants(n_iter))
bags_inside += 1
return bags_inside
@classmethod
def n_bags_containing_specific_bag(cls, bag_color):
"""Return the number of bags containing the bag `bag_color`"""
n_bags = 0
for bag in Bag.all_bags.values():
if bag is Bag.all_bags[COLOR]:
continue
n_bags += int(bag.bag_in_descendants(COLOR, 0))
return n_bags
@classmethod
def n_bags_inside(self, bag_color):
"""Return the number of bags inside the bag `bag_color`."""
n_bags = Bag.all_bags[bag_color].n_bags_in_descendants(0)
n_bags -= 1 # Substract the bag itself.
return n_bags
class TestMethods(unittest.TestCase):
def setUp(self):
Bag.all_bags = {}
self.data = load_input(TEST_FNAME)
def test_part1(self):
counts = part1(self.data)
self.assertEqual(counts, 4)
def test_part2(self):
counts = part2(self.data)
self.assertEqual(counts, 32)
if __name__=="__main__":
main()
| 29.189655
| 74
| 0.590471
| 4,091
| 0.805474
| 0
| 0
| 617
| 0.121481
| 0
| 0
| 1,758
| 0.346131
|
453b632b266da30271e1e4710f1d5bea075bf4fb
| 1,937
|
py
|
Python
|
cluster/image/pro_seafile_7.1/scripts_7.1/start.py
|
chaosbunker/seafile-docker
|
560d982d8cd80a20508bf616abc0dc741d7b5d84
|
[
"Apache-2.0"
] | 503
|
2015-11-11T22:07:36.000Z
|
2022-03-28T21:29:30.000Z
|
cluster/image/pro_seafile_7.1/scripts_7.1/start.py
|
chaosbunker/seafile-docker
|
560d982d8cd80a20508bf616abc0dc741d7b5d84
|
[
"Apache-2.0"
] | 209
|
2015-07-13T04:49:38.000Z
|
2022-03-25T22:06:18.000Z
|
cluster/image/pro_seafile_7.1/scripts_7.1/start.py
|
chaosbunker/seafile-docker
|
560d982d8cd80a20508bf616abc0dc741d7b5d84
|
[
"Apache-2.0"
] | 195
|
2015-07-09T18:11:47.000Z
|
2022-03-25T11:56:53.000Z
|
#!/usr/bin/env python3
#coding: UTF-8
import os
import sys
import time
import json
import argparse
from os.path import join, exists, dirname
from upgrade import check_upgrade
from utils import call, get_conf, get_script, get_command_output, get_install_dir
installdir = get_install_dir()
topdir = dirname(installdir)
def watch_controller():
maxretry = 4
retry = 0
while retry < maxretry:
controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip()
garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip()
if not controller_pid and not garbage_collector_pid:
retry += 1
else:
retry = 0
time.sleep(5)
print('seafile controller exited unexpectedly.')
sys.exit(1)
def main(args):
call('/scripts/create_data_links.sh')
# check_upgrade()
os.chdir(installdir)
call('service nginx start &')
admin_pw = {
'email': get_conf('SEAFILE_ADMIN_EMAIL', 'me@example.com'),
'password': get_conf('SEAFILE_ADMIN_PASSWORD', 'asecret'),
}
password_file = join(topdir, 'conf', 'admin.txt')
with open(password_file, 'w+') as fp:
json.dump(admin_pw, fp)
try:
call('{} start'.format(get_script('seafile.sh')))
call('{} start'.format(get_script('seahub.sh')))
if args.mode == 'backend':
call('{} start'.format(get_script('seafile-background-tasks.sh')))
finally:
if exists(password_file):
os.unlink(password_file)
print('seafile server is running now.')
try:
watch_controller()
except KeyboardInterrupt:
print('Stopping seafile server.')
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Seafile cluster start script')
parser.add_argument('--mode')
main(parser.parse_args())
| 29.348485
| 113
| 0.653588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 563
| 0.290656
|
453bee7425b707c77a51058c3fca5c10f29a6d05
| 4,774
|
py
|
Python
|
codit/stats/de_duplication_stat.py
|
saikat107/OpenNMT-py
|
148b0d860e78120de704f7a6671e8eced251801b
|
[
"MIT"
] | null | null | null |
codit/stats/de_duplication_stat.py
|
saikat107/OpenNMT-py
|
148b0d860e78120de704f7a6671e8eced251801b
|
[
"MIT"
] | null | null | null |
codit/stats/de_duplication_stat.py
|
saikat107/OpenNMT-py
|
148b0d860e78120de704f7a6671e8eced251801b
|
[
"MIT"
] | null | null | null |
import sys, os
import nltk
import numpy as np
class Patch():
def __init__(self):
self.id = -1
self.parent_code = ''
self.child_code = ''
self.patches = []
self.verdict = False
self.distance = 0
self.verdict_token = False
pass
def __repr__(self):
return str(self.id) + '\n' + ' '.join(self.parent_code) + '\n' + ' '.join(self.child_code) \
+ '\n' + str(self.distance) + '\n' + str(self.verdict)
def read_patch(file_path, size):
num_line_per_patch = size * 2 + 9
patches_lines = []
with open(file_path) as f:
patch = []
for ln, line in enumerate(f):
line = line.strip()
if (ln % num_line_per_patch == 0) and (ln != 0):
patches_lines.append([l for l in patch])
patch = []
patch.append(line)
patches_lines.append(patch)
patches = []
for lines in patches_lines:
ex = Patch()
ex.id = int(lines[0])
ex.parent_code = [token.strip() for token in lines[1].split()]
ex.child_code = [token.strip() for token in lines[3].split()]
ex.patches = []
for gen_idx in range(size):
cidx = gen_idx * 2
didx = cidx + 1
ex.patches.append([lines[cidx + 7], int(lines[didx + 7])])
verdict = lines[-2].strip()
if verdict == 'True':
ex.verdict = True
else:
ex.verdict = False
# print(verdict)
ex.distance = nltk.edit_distance([token.strip() for token in ex.parent_code],
[token.strip() for token in ex.child_code])
patches.append(ex)
return np.asarray(patches)
def de_duplicate_patches(patches):
patch_map = {}
for pidx, patch in enumerate(patches):
key = ' '.join(patch.parent_code) + ' '.join(patch.child_code)
if key not in patch_map.keys():
patch_map[key] = []
patch_map[key].append([patch, pidx])
unique_indices = []
for key in patch_map:
ps = patch_map[key]
if len(ps) == 1:
unique_indices.append(ps[0][1])
else:
idx = -1
for pi, p in enumerate(ps):
if p[0].verdict:
idx = pi
unique_indices.append(ps[idx][1])
return unique_indices
pass
if __name__ == '__main__':
result_base = '/home/sc2nf/codit-clone'
option = 'token' # 'token
size = 10
# if option == 'tree':
# file_name = 'codit-all-concrete_' + str(size) + '.2_' + str(2*size) + '_decode_res.txt'
# else:
# file_name = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_name_tree = 'codit-all-concrete_' + str(size) + '.2_' + str(2 * size) + '_decode_res.txt'
file_path_tree = result_base + '/' + file_name_tree
patches_tree = read_patch(file_path_tree, size)
unique_indices = de_duplicate_patches(patches_tree)
# unique_patches_tree = patches_tree[unique_indices]
# unique_count = len(unique_patches_tree)
file_name_token = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_path_token = result_base + '/' + file_name_token
patches_token = read_patch(file_path_token, size)
# unique_patches = patches_token[unique_indices]
unified_patches = []
for idx, (p_tree, p_token) in enumerate(zip(patches_tree, patches_token)):
if idx in unique_indices:
assert isinstance(p_tree, Patch) and isinstance(p_token, Patch)
p_tree.verdict_token = p_token.verdict
unified_patches.append(p_tree)
tree_count = np.sum([1 if p.verdict else 0 for p in unified_patches])
token_count = np.sum([1 if p.verdict_token else 0 for p in unified_patches])
tree_indices = set()
token_indices = set()
for i, p in enumerate(unified_patches):
if p.verdict:
tree_indices.add(i)
if p.verdict_token:
token_indices.add(i)
only_tree = tree_indices.difference(token_indices)
only_token = token_indices.difference(tree_indices)
common = tree_indices.intersection(token_indices)
print(tree_count, token_count, len(only_token), len(only_tree), len(common), len(unified_patches))
#
# total_success_tree = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(unique_patches, total_success_tree)
# tree_success_indices_in_unique = set()
# for idx, p in enumerate(unique_patches):
# if p.verdict:
# tree_success_indices_in_unique.add(idx)
#
#
#
# total_success_token = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(tree_count, total_success_token)
| 35.626866
| 102
| 0.59845
| 443
| 0.092794
| 0
| 0
| 0
| 0
| 0
| 0
| 942
| 0.197319
|
453c01d612c70ef3a56e01d8f48863230c296568
| 4,934
|
py
|
Python
|
pysnark/qaptools/runqapgen.py
|
Charterhouse/pysnark
|
4d8ae194a918c57a84c9f42f2d9809d66e90f006
|
[
"RSA-MD"
] | 65
|
2018-01-12T08:49:18.000Z
|
2022-03-16T07:35:40.000Z
|
pysnark/qaptools/runqapgen.py
|
Charterhouse/pysnark
|
4d8ae194a918c57a84c9f42f2d9809d66e90f006
|
[
"RSA-MD"
] | 9
|
2018-01-19T21:14:02.000Z
|
2019-10-15T09:48:01.000Z
|
pysnark/qaptools/runqapgen.py
|
Charterhouse/pysnark
|
4d8ae194a918c57a84c9f42f2d9809d66e90f006
|
[
"RSA-MD"
] | 13
|
2018-01-15T20:50:57.000Z
|
2022-03-25T05:39:36.000Z
|
# Copyright (c) 2016-2018 Koninklijke Philips N.V. All rights reserved. A
# copyright license for redistribution and use in source and binary forms,
# with or without modification, is hereby granted for non-commercial,
# experimental and research purposes, provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution. If
# you wish to use this software commercially, kindly contact
# info.licensing@philips.com to obtain a commercial license.
#
# This license extends only to copyright and does not include or grant any
# patent license or other license whatsoever.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
import sys
import pysnark.options
def run(eksize, pksize, genmk=False):
"""
Run the qapgen tool
:param eksize: Desired master evaluation key size
:param pksize: Desired master public key size
:param genmk: True if a new master secret key should be generated, False otherwise
:return: None
"""
mskfile = pysnark.options.get_mskey_file()
mkeyfile = pysnark.options.get_mkey_file()
mpkeyfile = pysnark.options.get_mpkey_file()
if not genmk and not os.path.isfile(mskfile):
raise IOError("Could not enlarge master key materiak: master secret key missing")
print >> sys.stderr, "*** " + ("Generating" if genmk else "Enlarging") + " master key material"
if subprocess.call([pysnark.options.get_qaptool_exe("qapgen"), str(max(pksize,eksize,0)), str(max(pksize,0)),
mskfile, mkeyfile, mpkeyfile]) != 0:
sys.exit(2)
def get_mekey_size():
"""
Get the size (maximal exponent) of the current master evaluation key
:return: Size, or -1 if key does not exist
"""
try:
mekf = open(pysnark.options.get_mkey_file())
curmk = int(mekf.next().strip().split(" ")[2])
mekf.close()
return curmk
except IOError:
return -1
def get_mpkey_size():
"""
Get the size (maximal exponent) of the current master public key
:return: Size, or -1 if key does not exist
"""
try:
mpkf = open(pysnark.options.get_mpkey_file())
curmpk = int(mpkf.next().strip().split(" ")[2])
mpkf.close()
return curmpk
except IOError:
return -1
def ensure_mkey(eksize, pksize):
"""
Ensures that there are master evaluation and public keys of the given sizes.
If master evaluation/public keys exist but are to small, and there is no
master secret key, this raises an error.
If there is no key material at all, a fresh master secret key will be
generated.
:param eksize: Minimal evaluation key size (-1 if not needed)
:param pksize: Minimal public key size (-1 if not needed)
:return: Actual evaluation key, public key size after key generation
"""
curek = get_mekey_size()
curpk = get_mpkey_size()
havemsk = os.path.isfile(pysnark.options.get_mskey_file())
havekeys = os.path.isfile(pysnark.options.get_mpkey_file()) or os.path.isfile(pysnark.options.get_mkey_file())
if curek < eksize or curpk < pksize:
if havemsk:
run(max(curek, eksize), max(curpk, pksize), False)
return (max(curek, eksize), max(curpk, pksize))
elif havekeys:
raise IOError("Key material too small ("+str(curek)+","+str(curpk)+
")<("+str(eksize)+","+str(pksize)+") and missing master secret key")
else:
run(eksize, pksize, True)
return (eksize,pksize)
else:
return (curek,curpk)
if __name__ == "__main__":
if len(sys.argv)<3:
print >>sys.stderr, "*** Usage:", sys.argv[0], "<eksize>", "<pksize>"
sys.exit(2)
argeksize = int(sys.argv[1])
argpksize = int(sys.argv[2])
run(argeksize, argpksize, not os.path.isfile(pysnark.options.get_mskey_file()))
| 37.378788
| 114
| 0.688285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,916
| 0.591001
|
453c20b8c1cf91ca7912ad336c0a4f1a000e5011
| 4,024
|
py
|
Python
|
tubee/utils/__init__.py
|
tomy0000000/Tubee
|
1bfbd3cde118cd8a31499b8255b311602fde85bc
|
[
"MIT"
] | 8
|
2020-12-09T13:01:41.000Z
|
2022-01-09T10:06:25.000Z
|
tubee/utils/__init__.py
|
tomy0000000/Tubee
|
1bfbd3cde118cd8a31499b8255b311602fde85bc
|
[
"MIT"
] | 141
|
2019-08-21T20:23:07.000Z
|
2022-03-29T14:02:27.000Z
|
tubee/utils/__init__.py
|
tomy0000000/Tubee
|
1bfbd3cde118cd8a31499b8255b311602fde85bc
|
[
"MIT"
] | 7
|
2020-07-28T08:52:06.000Z
|
2021-07-26T02:15:36.000Z
|
"""Helper Functions
Some Misc Functions used in this app
"""
import secrets
import string
from functools import wraps
from urllib.parse import urljoin, urlparse
from dateutil import parser
from flask import abort, current_app, request
from flask_login import current_user
from flask_migrate import upgrade
def setup_app():
# Migrate database to latest revision
upgrade()
current_app.logger.info("Database migrated")
from ..models.user import User
# Create an admin user if none exists
if not User.query.filter_by(admin=True).first():
# Create a random password
alphabet = string.ascii_letters + string.digits
password = "".join(secrets.choice(alphabet) for i in range(20))
User(username="admin", password=password, admin=True)
current_app.db.session.commit()
current_app.logger.info("Admin created automatically:")
current_app.logger.info("Username: admin")
current_app.logger.info(f"Password: {password}")
# Reschedule all tasks
from ..models import Channel
from ..tasks import remove_all_tasks, schedule_channel_renewal
remove_all_tasks()
current_app.logger.info("All tasks removed")
schedule_channel_renewal(Channel.query.all())
current_app.logger.info("Channel renewal scheduled")
# TODO: Update channels metadata
def try_parse_datetime(string):
try:
return parser.parse(string).replace(tzinfo=None)
except (ValueError, TypeError):
return None
def admin_required(*args, **kwargs):
if not current_user.admin:
abort(403)
def admin_required_decorator(func):
"""Restrict view function to admin-only
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
@wraps(func)
def decorated_view_function(*args, **kwargs):
admin_required()
return func(*args, **kwargs)
return decorated_view_function
def pushover_required(func):
"""Restrict view function to users who have configured Pushover account
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if not current_user.pushover:
abort(403)
return func(*args, **kwargs)
return decorated_function
def youtube_required(func):
"""Restrict view function to users who have configured YouTube account
Arguments:
func {view function} -- The view function to be restricting
Returns:
view function -- The restricted function
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if not current_user.youtube:
abort(403)
return func(*args, **kwargs)
return decorated_function
def is_safe_url(target):
"""Helper used to check endpoint before redirecting user
Arguments:
target {url} -- a url with complete scheme and domain to be examine
Returns:
bool -- target is a safe url or not
"""
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
def notify_admin(initiator, service, **kwargs):
"""Send Notification to all Admin
A Temporary function used to notify admin
Arguments:
initiator {str} -- Action or reason that trigger this notification
service {str or notification.Service} -- Service used to send notification
**kwargs {dict} -- optional arguments passed to notification
Returns:
dict -- Response from notification service
"""
from ..models.user import User
admins = User.query.filter_by(admin=True).all()
response = {}
for admin in admins:
response[admin.username] = admin.send_notification(initiator, service, **kwargs)
return response
| 27.006711
| 88
| 0.685885
| 0
| 0
| 0
| 0
| 433
| 0.107604
| 0
| 0
| 1,627
| 0.404324
|
453cbca8f170f8d57f86e5292c872a332ff4738e
| 1,094
|
py
|
Python
|
HourlyCrime/hour.py
|
pauljrodriguezcs/Chicago_Crime_Analysis
|
8f385fdfbb8b770631a458edf03f90836f33b674
|
[
"MIT"
] | 1
|
2020-02-12T16:25:23.000Z
|
2020-02-12T16:25:23.000Z
|
HourlyCrime/hour.py
|
pauljrodriguezcs/Chicago_Crime_Analysis
|
8f385fdfbb8b770631a458edf03f90836f33b674
|
[
"MIT"
] | null | null | null |
HourlyCrime/hour.py
|
pauljrodriguezcs/Chicago_Crime_Analysis
|
8f385fdfbb8b770631a458edf03f90836f33b674
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.linalg as la
print("loading time series... ")
plt.figure(figsize=(16,9))
timeSeries = np.loadtxt('TheftTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'r-', label='Theft')
timeSeries = np.loadtxt('BatteryTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'g-', label='Battery')
timeSeries = np.loadtxt('CriminalDamageTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'b-', label='Criminal_Damage')
timeSeries = np.loadtxt('TarcoticsTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'c-', label='Narcotics')
timeSeries = np.loadtxt('AssaultTS.txt',delimiter=',',dtype=float) # load data
hourly = timeSeries[:,0]
plt.plot(hourly,'m-', label='Assault')
plt.xticks(np.arange(0,24,step=1))
plt.grid(True)
plt.legend()
plt.xlabel('Hour')
plt.ylabel('Total Crimes')
plt.title('Crime per Hour')
# plt.show()
plt.savefig('CrimePerHour.png',format='png',dpi=600)
| 30.388889
| 85
| 0.71755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 321
| 0.293419
|
453cc274659ff78328110cd29e7888f4f4d189f2
| 2,733
|
py
|
Python
|
scripts/annotation_csv.py
|
RulerOf/keras-yolo3
|
8d091cf42b2f126626ad8610adf31293225b7daa
|
[
"MIT"
] | 37
|
2018-10-20T15:50:18.000Z
|
2021-06-18T14:31:50.000Z
|
scripts/annotation_csv.py
|
RulerOf/keras-yolo3
|
8d091cf42b2f126626ad8610adf31293225b7daa
|
[
"MIT"
] | 34
|
2019-04-10T18:59:08.000Z
|
2021-03-24T11:08:36.000Z
|
scripts/annotation_csv.py
|
RulerOf/keras-yolo3
|
8d091cf42b2f126626ad8610adf31293225b7daa
|
[
"MIT"
] | 13
|
2019-08-29T08:19:05.000Z
|
2021-09-20T10:13:31.000Z
|
"""
Creating training file from own custom dataset
>> python annotation_csv.py \
--path_dataset ~/Data/PeopleDetections \
--path_output ../model_data
"""
import os
import sys
import glob
import argparse
import logging
import pandas as pd
import tqdm
sys.path += [os.path.abspath('.'), os.path.abspath('..')]
from keras_yolo3.utils import update_path
IMAGE_EXTENSIONS = ('.png', '.jpg')
ANNOT_COLUMNS = ('xmin', 'ymin', 'xmax', 'ymax', 'class')
def parse_arguments():
parser = argparse.ArgumentParser(description='Annotation Converter (custom CSV).')
parser.add_argument('--path_dataset', type=str, required=True,
help='Path to custom CSV dataset.')
parser.add_argument('--path_output', type=str, required=False, default='.',
help='Path to output folder.')
arg_params = vars(parser.parse_args())
for k in (k for k in arg_params if 'path' in k):
arg_params[k] = update_path(arg_params[k])
assert os.path.exists(arg_params[k]), 'missing (%s): %s' % (k, arg_params[k])
logging.info('PARAMETERS: \n%s', '\n'.join(['"%s": \t\t %r' % (k, arg_params[k])
for k in arg_params]))
return arg_params
def convert_annotation(path_csv, classes=None):
df = pd.read_csv(path_csv)
if 'class' in df.columns and classes:
df = df[df['class'].isin(classes)]
elif 'class' not in df.columns:
df['class'] = 0
records = []
for idx, row in df[list(ANNOT_COLUMNS)].iterrows():
records.append(','.join([str(v) for v in row]))
return records
def _main(path_dataset, path_output, classes=None):
name_dataset = os.path.basename(os.path.dirname(path_dataset))
list_csv = sorted(glob.glob(os.path.join(path_dataset, '*.csv')))
path_out_list = os.path.join(path_output, '%s_dataset.txt' % name_dataset)
logging.info('creating list file: %s', path_out_list)
with open(path_out_list, 'w') as list_file:
for path_csv in tqdm.tqdm(list_csv):
name = os.path.splitext(os.path.basename(path_csv))[0]
list_images = []
for ext in IMAGE_EXTENSIONS:
list_images += glob.glob(os.path.join(path_dataset, name + ext))
if not list_images:
logging.warning('missing image: %s', os.path.join(path_dataset, name))
continue
recs = convert_annotation(path_csv, classes)
path_img = sorted(list_images)[0]
list_file.write(path_img + ' ' + ' '.join(recs) + '\n')
logging.info('Done.')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_params = parse_arguments()
_main(**arg_params)
| 33.740741
| 86
| 0.626418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 523
| 0.191365
|
453d3dd28ce2e196af7faf59e068a64e46af26e2
| 3,280
|
py
|
Python
|
Assignment_2/one_vs_one.py
|
hthuwal/mcs-ml-assignments
|
1d2850b82f49ccf31eec3c8f921f09d6260d13a4
|
[
"MIT"
] | 3
|
2018-11-23T10:36:36.000Z
|
2021-12-16T17:47:22.000Z
|
Assignment_2/one_vs_one.py
|
hthuwal/mcs-ml-assignments
|
1d2850b82f49ccf31eec3c8f921f09d6260d13a4
|
[
"MIT"
] | null | null | null |
Assignment_2/one_vs_one.py
|
hthuwal/mcs-ml-assignments
|
1d2850b82f49ccf31eec3c8f921f09d6260d13a4
|
[
"MIT"
] | 7
|
2018-11-14T18:14:12.000Z
|
2021-12-16T17:47:34.000Z
|
from pegasos import bgd_pegasos
import numpy as np
import pandas as pd
import pickle
import sys
def read_data(file):
x = pd.read_csv(file, header=None)
if x.shape[1] > 784:
y = np.array(x[[784]]).flatten()
x = x.drop(columns=[784])
else:
y = np.zeros(x.shape[0])
x = x.as_matrix()
return x, y
def read_data_svm(file):
x = []
y = []
with open(file, "r") as f:
for line in f:
temp = [0 for i in range(784)]
line = line.strip().split(" ")
y.append(int(line[0].strip()))
line = line[1:]
for each in line:
each = each.split(":")
temp[int(each[0].strip()) - 1] = np.float64(each[1].strip())
x.append(temp)
# input()
x = np.array(x)
y = np.array(y)
# print(y.shape)
return x, y
retrain = False
wandbs = None
if retrain:
x_train, y_train = read_data("mnist/train.csv")
num_classes = len(set(y_train))
wandbs = [[() for j in range(num_classes)] for i in range(num_classes)]
count = 0
for i in range(num_classes):
for j in range(num_classes):
if(i < j):
count += 1
print("\nClassifier %d: %d vs %d\n" % (count, i, j))
xc, yc = [], []
for x, y in zip(x_train, y_train):
if (y == i):
xc.append(x)
yc.append(1)
elif(y == j):
xc.append(x)
yc.append(-1)
wandbs[i][j] = bgd_pegasos(xc, yc, 10e-4, c=1.0)
with open("models/pegasos.model", "wb") as f:
pickle.dump(wandbs, f)
else:
print("\nLoading Model")
with open("models/pegasos.model", "rb") as f:
wandbs = pickle.load(f)
def hypothesis(w, b, x):
if (w@x + b) <= 0:
return -1
return 1
def predict(model, x):
num_classes = len(model)
counts = [0 for i in range(num_classes)]
for i in range(num_classes):
for j in range(num_classes):
if(i < j):
if hypothesis(model[i][j][0], model[i][j][1], x) == 1:
counts[i] += 1
else:
counts[j] += 1
return np.argmax(counts)
def run(x_set, y_set, model, output_file):
correct = 0
with open(output_file, "w") as f:
for x, y in zip(x_set, y_set):
prediction = predict(model, x)
f.write("%d\n" % (prediction))
if prediction == y:
correct += 1
accuracy = correct / (x_set.shape[0])
print("Accuracy: %f\n" % (accuracy))
def run2(x_set, y_set, model, output_file):
with open(output_file, "w") as f:
length = len(x_set)
for i in range(length):
sys.stdout.write("\r\x1b[K" + "%d/%d : %0.2f percent" % (i + 1, length, (i + 1) * 100 / length))
sys.stdout.flush()
x, y = x_set[i], y_set[i]
prediction = predict(model, x)
f.write("%d\n" % (prediction))
print("\n")
input_file = sys.argv[1].strip()
output_file = sys.argv[2].strip()
x_set, y_set = read_data(input_file)
print("Predicting")
run2(x_set, y_set, wandbs, output_file)
| 27.107438
| 108
| 0.498171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 232
| 0.070732
|
453dfae7aa03af853f997301ef1cbbd1ca05e43a
| 1,533
|
py
|
Python
|
tools/numpy-examples.py
|
martinahogg/machinelearning
|
03b473375e64a0398177194df2fe26a1a89feedf
|
[
"Apache-2.0"
] | 2
|
2017-08-17T14:38:14.000Z
|
2017-08-17T14:40:32.000Z
|
tools/numpy-examples.py
|
martinahogg/machinelearning
|
03b473375e64a0398177194df2fe26a1a89feedf
|
[
"Apache-2.0"
] | null | null | null |
tools/numpy-examples.py
|
martinahogg/machinelearning
|
03b473375e64a0398177194df2fe26a1a89feedf
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
# Inner (or dot) product
a = np.array([1,2])
b = np.array([3,4])
np.inner(a, b)
a.dot(b)
# Outer product
a = np.array([1,2])
b = np.array([3,4])
np.outer(a, b)
# Inverse
m = np.array([[1,2], [3,4]])
np.linalg.inv(m)
# Inner (or dot) product
m = np.array([[1,2], [3,4]])
minv = np.linalg.inv(m)
m.dot(minv)
# Diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
m = np.array([1,2])
np.diag(m)
# Determinant
m = np.array([[1,2], [3,4]])
np.linalg.det(m)
# Trace - sum of elements of the diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
np.diag(m).sum()
np.trace(m)
# Transpose
m = np.array([ [1,2], [3,4] ])
m.T
# Gaussian distribution
m = np.random.randn(2,3)
m
# Covariance
X = np.random.randn(100,3)
np.cov(X.T)
# Eigen vectors and values
# For symmetric matrix (m == m.T) and hermitian matrix (m = m.H) we use eigh.
m = np.array([
[ 0.89761228, 0.00538701, -0.03229084],
[ 0.00538701, 1.04860676, -0.25001666],
[-0.03229084, -0.25001666, 0.81116126]])
# The first tuple contains three Eigen values.
# The second tuple contains Eigen vectors stored in columns.
np.linalg.eigh(m)
# Solving linear systems
# The admissions fee at a small far is $1.50 for children an $4.00 for adults.
# On a certain day 2,200 people enter the fair and $5050 is collected.
# How many children and how many adults attended.
#
# Let X1 = number of children
# Let X2 = number of adults
# X1 + X2 = 2200
# 1.5X1 + 4X2 = 5050
a = np.array([ [1,1], [1.5,4] ])
b = np.array( [ 2200, 5050] )
np.linalg.solve(a, b)
| 20.716216
| 79
| 0.629485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 708
| 0.46184
|
453fdaffee2d4ec5ec8223f0fa753fce8c413273
| 14,337
|
py
|
Python
|
src/relstorage/tests/util.py
|
lungj/relstorage
|
e18394b0197f6b70708037f36defbd3fe3ee5137
|
[
"ZPL-2.1"
] | null | null | null |
src/relstorage/tests/util.py
|
lungj/relstorage
|
e18394b0197f6b70708037f36defbd3fe3ee5137
|
[
"ZPL-2.1"
] | null | null | null |
src/relstorage/tests/util.py
|
lungj/relstorage
|
e18394b0197f6b70708037f36defbd3fe3ee5137
|
[
"ZPL-2.1"
] | null | null | null |
import os
import platform
import unittest
# ZODB >= 3.9. The blob directory can be a private cache.
shared_blob_dir_choices = (False, True)
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS')
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
def _do_not_skip(reason): # pylint:disable=unused-argument
def dec(f):
return f
return dec
if RUNNING_ON_CI:
skipOnCI = unittest.skip
else:
skipOnCI = _do_not_skip
if RUNNING_ON_APPVEYOR:
skipOnAppveyor = unittest.skip
else:
skipOnAppveyor = _do_not_skip
CACHE_SERVERS = None
CACHE_MODULE_NAME = None
if RUNNING_ON_TRAVIS:
# We expect to have access to a local memcache server
# on travis. Use it if we can import drivers.
# pylint:disable=unused-import
try:
import pylibmc
CACHE_SERVERS = ["localhost:11211"]
CACHE_MODULE_NAME = 'relstorage.pylibmc_wrapper'
except ImportError:
try:
import memcache
CACHE_SERVERS = ["localhost:11211"]
CACHE_MODULE_NAME = 'memcache'
except ImportError:
pass
USE_SMALL_BLOBS = ((RUNNING_ON_CI # slow here
or platform.system() == 'Darwin' # interactive testing
or os.environ.get("RS_SMALL_BLOB")) # define
and not os.environ.get('RS_LARGE_BLOB'))
# mysqlclient (aka MySQLdb) and possibly other things that
# use libmysqlclient.so will try to connect over the
# default Unix socket that was established when that
# library was compiled if no host is given. But that
# server may not be running, or may not be the one we want
# to use for testing, so explicitly ask it to use TCP
# socket by giving an IP address (using 'localhost' will
# still try to use the socket.) (The TCP port can be bound
# by non-root, but the default Unix socket often requires
# root permissions to open.)
STANDARD_DATABASE_SERVER_HOST = '127.0.0.1'
DEFAULT_DATABASE_SERVER_HOST = os.environ.get('RS_DB_HOST',
STANDARD_DATABASE_SERVER_HOST)
TEST_UNAVAILABLE_DRIVERS = not bool(os.environ.get('RS_SKIP_UNAVAILABLE_DRIVERS'))
if RUNNING_ON_CI:
TEST_UNAVAILABLE_DRIVERS = False
class MinimalTestLayer(object):
__bases__ = ()
__module__ = ''
def __init__(self, name):
self.__name__ = name
def setUp(self):
pass
def tearDown(self):
pass
def testSetUp(self):
pass
def testTearDown(self):
pass
class _Availability(object):
"""
Has a boolean value telling whether the driver or database is available,
and a string explaining why it is/is not.
"""
def __init__(self, factory, drivers, max_priority, use_adapter, db_name):
from relstorage.adapters.interfaces import DriverNotAvailableError
self.driver_name = factory.driver_name
self.escaped_driver_name = self.driver_name.replace(' ', '').replace('/', '_')
try:
self.driver = drivers.select_driver(self.driver_name)
except DriverNotAvailableError:
self.driver = None
self._available = self.driver is not None and self.driver.priority <= max_priority
if not self._available:
if self.driver is None:
msg = 'Driver %s is not installed' % (self.driver_name,)
else:
msg = 'Driver %s has test priority %d >= max %d' % (
self.driver_name, self.driver.priority, max_priority
)
else:
msg = 'Driver %s is installed' % (self.driver_name,)
self._msg = msg
if self.driver is not None:
type(self.driver).STRICT = True
if self._available:
# See if we can connect.
self.__check_db_access(use_adapter, db_name)
def __str__(self):
return self._msg
def __bool__(self):
return self._available
__nonzero__ = __bool__
def __check_db_access_cb(self, _conn, _cursor):
"Does nothing"
__check_db_access_cb.transaction_read_only = True
def __check_db_access(self, use_adapter, db_name):
# We need to get an adapter to get a connmanager to try to connect.
from relstorage.options import Options
options = Options(driver=self.driver_name)
adapter_maker = use_adapter()
adapter_maker.driver_name = self.driver_name
adapter = adapter_maker.make_adapter(options, db_name)
try:
adapter.connmanager.open_and_call(self.__check_db_access_cb)
except (TypeError, AttributeError):
raise
except Exception as e: # pylint:disable=broad-except
self._available = False
self._msg = "%s: Failed to connect: %r %s" % (self._msg, type(e), e)
class AbstractTestSuiteBuilder(object):
__name__ = None # PostgreSQL, MySQL, Oracle
# Drivers with a priority over this amount won't be part of the
# test run even if installed.
MAX_PRIORITY = int(os.environ.get('RS_MAX_TEST_PRIORITY', '100'))
# Ask the drivers to be in their strictest possible mode.
STRICT_DRIVER = True
def __init__(self, driver_options, use_adapter, extra_test_classes=()):
"""
:param driver_options: The ``IDBDriverOptions``
:param use_adapter: A mixin class implementing the abstract methods
defined by ``StorageCreatingMixin``.
"""
self.drivers = driver_options
self.extra_test_classes = extra_test_classes
self.base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
self.db_names = {
'data': self.base_dbname,
'1': self.base_dbname,
'2': self.base_dbname + '2',
'dest': self.base_dbname + '2',
}
self.use_adapter = use_adapter
use_adapter.base_dbname = self.base_dbname
self.large_blob_size = self._compute_large_blob_size(USE_SMALL_BLOBS)
def _compute_large_blob_size(self, use_small_blobs):
raise NotImplementedError
def test_suite(self):
from .reltestbase import AbstractIDBDriverTest
from .reltestbase import AbstractIDBOptionsTest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(type(
self.__name__ + 'DBOptionsTest',
(AbstractIDBOptionsTest,),
{'db_options': self.drivers}
)))
for factory in self.drivers.known_driver_factories():
available = _Availability(
factory, self.drivers, self.MAX_PRIORITY,
self.use_adapter,
self.db_names['data']
)
# On CI, we don't even add tests for unavailable drivers to the
# list of tests; this makes the output much shorter and easier to read,
# but it does make zope-testrunner's discovery options less useful.
if available or TEST_UNAVAILABLE_DRIVERS:
# Checking the driver is just a unit test, it doesn't connect or
# need a layer
suite.addTest(unittest.makeSuite(
self.__skipping_if_not_available(
type(
self.__name__ + 'DBDriverTest_' + available.escaped_driver_name,
(AbstractIDBDriverTest,),
{'driver': available.driver}
),
available)))
# We put the various drivers into a zope.testrunner layer
# for ease of selection by name, e.g.,
# zope-testrunner --layer PG8000Driver
driver_suite = unittest.TestSuite()
layer_name = '%s%s' % (
self.__name__,
available.escaped_driver_name,
)
driver_suite.layer = MinimalTestLayer(layer_name)
driver_suite.layer.__module__ = self.__module__
self._add_driver_to_suite(driver_suite, layer_name, available)
suite.addTest(driver_suite)
return suite
def _default_make_check_class(self, bases, name, klass_dict=None):
klass = type(
name,
(self.use_adapter,) + bases,
klass_dict or {}
)
return klass
def _make_check_classes(self):
# The classes that inherit from ZODB tests and use 'check' instead of 'test_'
# This class is sadly not super() cooperative, so we must
# try to explicitly put it last in the MRO.
from ZODB.tests.util import TestCase as ZODBTestCase
from .hftestbase import HistoryFreeFromFileStorage
from .hftestbase import HistoryFreeToFileStorage
from .hftestbase import HistoryFreeRelStorageTests
from .hptestbase import HistoryPreservingFromFileStorage
from .hptestbase import HistoryPreservingToFileStorage
from .hptestbase import HistoryPreservingRelStorageTests
classes = []
for _, bases in (
('HF', (HistoryFreeFromFileStorage,
HistoryFreeToFileStorage,
HistoryFreeRelStorageTests)),
('HP', (HistoryPreservingFromFileStorage,
HistoryPreservingToFileStorage,
HistoryPreservingRelStorageTests))
):
for base in bases:
name = self.__name__ + base.__name__
maker = getattr(self, '_make_check_class_' + base.__name__,
self._default_make_check_class)
__traceback_info__ = maker, base
klass = maker((base, ZODBTestCase), name)
klass.__module__ = self.__module__
klass.__name__ = name
classes.append(klass)
return classes
def _make_zodbconvert_classes(self):
from .reltestbase import AbstractRSDestZodbConvertTests
from .reltestbase import AbstractRSSrcZodbConvertTests
classes = []
for base in (AbstractRSSrcZodbConvertTests, AbstractRSDestZodbConvertTests):
klass = type(
self.__name__ + base.__name__[8:],
(self.use_adapter, base),
{}
)
klass.__module__ = self.__module__
classes.append(klass)
return classes
def __skipping_if_not_available(self, klass, availability):
klass.__module__ = self.__module__
klass = unittest.skipUnless(
availability,
str(availability))(klass)
return klass
def _new_class_for_driver(self, base, driver_available):
klass = type(
base.__name__ + '_' + driver_available.escaped_driver_name,
(base,),
{'driver_name': driver_available.driver_name}
)
return self.__skipping_if_not_available(klass, driver_available)
def _add_driver_to_suite(self, suite, layer_prefix, driver_available):
for klass in self._make_check_classes():
klass = self._new_class_for_driver(klass, driver_available)
suite.addTest(unittest.makeSuite(klass, "check"))
for klass in self._make_zodbconvert_classes():
suite.addTest(unittest.makeSuite(
self._new_class_for_driver(klass,
driver_available)))
for klass in self.extra_test_classes:
suite.addTest(unittest.makeSuite(
self._new_class_for_driver(klass,
driver_available)))
from relstorage.tests.blob.testblob import storage_reusable_suite
from relstorage.options import Options
from relstorage.storage import RelStorage
for shared_blob_dir in shared_blob_dir_choices:
for keep_history in (False, True):
# TODO: Make any of the tests that are needing this
# subclass StorageCreatingMixin so we unify where
# that's handled.
def create_storage(name, blob_dir,
shared_blob_dir=shared_blob_dir,
keep_history=keep_history, **kw):
if not driver_available:
raise unittest.SkipTest(str(driver_available))
assert 'driver' not in kw
kw['driver'] = driver_available.driver_name
db = self.db_names[name]
if not keep_history:
db += '_hf'
options = Options(
keep_history=keep_history,
shared_blob_dir=shared_blob_dir,
blob_dir=os.path.abspath(blob_dir),
**kw)
adapter_maker = self.use_adapter()
adapter_maker.driver_name = driver_available.driver_name
adapter = adapter_maker.make_adapter(options, db)
__traceback_info__ = adapter, options
storage = RelStorage(adapter, name=name, options=options)
storage.zap_all()
return storage
prefix = '%s_%s%s' % (
layer_prefix,
'Shared' if shared_blob_dir else 'Unshared',
'HistoryPreserving' if keep_history else 'HistoryFree',
)
# If the blob directory is a cache, don't test packing,
# since packing can not remove blobs from all caches.
test_packing = shared_blob_dir
suite.addTest(storage_reusable_suite(
prefix, create_storage,
keep_history=keep_history,
test_blob_storage_recovery=True,
test_packing=test_packing,
test_undo=keep_history,
test_blob_cache=(not shared_blob_dir),
# PostgreSQL blob chunks are max 2GB in size
large_blob_size=(not shared_blob_dir) and (self.large_blob_size) + 100,
storage_is_available=driver_available
))
return suite
| 37.046512
| 92
| 0.604032
| 12,093
| 0.843482
| 0
| 0
| 0
| 0
| 0
| 0
| 2,881
| 0.200949
|
45409f2dbf3fb01dea755f2b203a25f415411768
| 2,435
|
py
|
Python
|
sktime/datatypes/_panel/_examples.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 5,349
|
2019-03-21T14:56:50.000Z
|
2022-03-31T11:25:30.000Z
|
sktime/datatypes/_panel/_examples.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 1,803
|
2019-03-26T13:33:53.000Z
|
2022-03-31T23:58:10.000Z
|
sktime/datatypes/_panel/_examples.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 911
|
2019-03-25T01:21:30.000Z
|
2022-03-31T04:45:51.000Z
|
# -*- coding: utf-8 -*-
"""Example generation for testing.
Exports dict of examples, useful for testing as fixtures.
example_dict: dict indexed by triple
1st element = mtype - str
2nd element = considered as this scitype - str
3rd element = int - index of example
elements are data objects, considered examples for the mtype
all examples with same index are considered "same" on scitype content
if None, indicates that representation is not possible
example_lossy: dict of bool indexed by pairs of str
1st element = mtype - str
2nd element = considered as this scitype - str
3rd element = int - index of example
elements are bool, indicate whether representation has information removed
all examples with same index are considered "same" on scitype content
overall, conversions from non-lossy representations to any other ones
should yield the element exactly, identidally (given same index)
"""
import pandas as pd
import numpy as np
example_dict = dict()
example_dict_lossy = dict()
###
X = np.array(
[[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 55, 6]], [[1, 2, 3], [42, 5, 6]]],
dtype=np.int64,
)
example_dict[("numpy3D", "Panel", 0)] = X
example_dict_lossy[("numpy3D", "Panel", 0)] = False
cols = [f"var_{i}" for i in range(2)]
Xlist = [
pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=cols),
pd.DataFrame([[1, 4], [2, 55], [3, 6]], columns=cols),
pd.DataFrame([[1, 42], [2, 5], [3, 6]], columns=cols),
]
example_dict[("df-list", "Panel", 0)] = Xlist
example_dict_lossy[("df-list", "Panel", 0)] = False
cols = ["instances", "timepoints"] + [f"var_{i}" for i in range(2)]
Xlist = [
pd.DataFrame([[0, 0, 1, 4], [0, 1, 2, 5], [0, 2, 3, 6]], columns=cols),
pd.DataFrame([[1, 0, 1, 4], [1, 1, 2, 55], [1, 2, 3, 6]], columns=cols),
pd.DataFrame([[2, 0, 1, 42], [2, 1, 2, 5], [2, 2, 3, 6]], columns=cols),
]
X = pd.concat(Xlist)
X = X.set_index(["instances", "timepoints"])
example_dict[("pd-multiindex", "Panel", 0)] = X
example_dict_lossy[("pd-multiindex", "Panel", 0)] = False
cols = [f"var_{i}" for i in range(2)]
X = pd.DataFrame(columns=cols, index=[0, 1, 2])
X["var_0"] = pd.Series(
[pd.Series([1, 2, 3]), pd.Series([1, 2, 3]), pd.Series([1, 2, 3])]
)
X["var_1"] = pd.Series(
[pd.Series([4, 5, 6]), pd.Series([4, 55, 6]), pd.Series([42, 5, 6])]
)
example_dict[("nested_univ", "Panel", 0)] = X
example_dict_lossy[("nested_univ", "Panel", 0)] = False
| 31.623377
| 79
| 0.632444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,167
| 0.479261
|
4540a0cc0547d5162f147799d9341ebb9bb38b1a
| 2,951
|
py
|
Python
|
ipfnlite/get_los_diaggeom.py
|
guimarais/AUGlite
|
8b6f6fbf57d974eabd7eb4c04c8b18478a38c9de
|
[
"MIT"
] | null | null | null |
ipfnlite/get_los_diaggeom.py
|
guimarais/AUGlite
|
8b6f6fbf57d974eabd7eb4c04c8b18478a38c9de
|
[
"MIT"
] | null | null | null |
ipfnlite/get_los_diaggeom.py
|
guimarais/AUGlite
|
8b6f6fbf57d974eabd7eb4c04c8b18478a38c9de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 15:34:36 2019
@author: eseliuni
"""
from __future__ import print_function
#from builtins import str
#from builtins import range
import os
def get_coordinate_from_line(coordinate, line):
"""
Returns a value of a coordinate from a line
"""
for word in line.split(","):
if str(coordinate)+"=" in word:
if coordinate == "phi":
return float(word[word.index("=")+1:])
else:
return float(word[word.index("=")+1:-1])
def get_los(full_path):
"""
Reads the file *.coordinate from diaggeom with line of sight (LOS) of a
diagnostic. Returns a dictionary with keys:
name: short name of the diagnostic
description: full name of the diagnostic
signals: contains the name of each channel and its LOS
"""
# Split the text to the lines
with open(full_path, "r") as file:
lines = file.readlines()
lines = [line.strip() for line in lines]
los_diag = {"name": lines[0].split()[0],
"description": lines[0][
lines[0].index("(")+1:lines[0].index(")")
],
"signals":{}
}
# Combine lines to the blocks, corresponding specific channel
phrase = "(Line of sight)" # a phrase, that indicates the beginning of the block
signals_line_idx = [ii for ii in range(len(lines)) if phrase in lines[ii]]
signals_line_idx.append(len(lines))
signal_blocks_idx = [(signals_line_idx[ii], signals_line_idx[ii+1]) for ii in range(len(signals_line_idx)-1)[:-1]]
signal_blocks_idx.append((signals_line_idx[-2], signals_line_idx[-1]))
# obtain R, z and phi for each block
for (ii, jj) in signal_blocks_idx:
los = {}
phrase = "From"
block = lines[ii:jj]
line_idx = [ll for ll in range(len(block)) if phrase in block[ll]]
for idx in line_idx:
R = [get_coordinate_from_line("R", block[idx]), get_coordinate_from_line("R", block[idx+1])]
z = [get_coordinate_from_line("z", block[idx]), get_coordinate_from_line("z", block[idx+1])]
phi = [get_coordinate_from_line("phi", block[idx]), get_coordinate_from_line("phi", block[idx+1])]
if block[idx].split()[0] == phrase:
los.update({"0":{"R": R, "z":z, "phi":phi}})
else:
los.update({block[idx].split()[0]:{"R": R, "z":z, "phi":phi}})
los_diag["signals"].update({lines[ii][:lines[ii].index("(")-1]:los})
file.close()
return los_diag
if __name__ == "__main__":
working_dir = os.getcwd()
examples_dir = "../../files/"
path = os.path.join(working_dir, examples_dir)
file_name = 'diaggeom_TS.coords'
los_diag = get_los(os.path.join(path, file_name))
print(los_diag)
| 35.554217
| 118
| 0.583192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 867
| 0.293799
|
4544050d8eb06081ede4910d4218a872c580f338
| 35
|
py
|
Python
|
src/mock/webrepl.py
|
hipek/esp32-heating-control
|
4ee511118eb9a208e92298bdcf9c9242368c1806
|
[
"MIT"
] | null | null | null |
src/mock/webrepl.py
|
hipek/esp32-heating-control
|
4ee511118eb9a208e92298bdcf9c9242368c1806
|
[
"MIT"
] | null | null | null |
src/mock/webrepl.py
|
hipek/esp32-heating-control
|
4ee511118eb9a208e92298bdcf9c9242368c1806
|
[
"MIT"
] | null | null | null |
def start(password=None):
pass
| 11.666667
| 25
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
454453eab91aa77a631ac0aa12025ca69dac8cdb
| 4,294
|
py
|
Python
|
app.py
|
univoid/a3x
|
46f48363f191344747fec5e643efe1b467fb04c3
|
[
"MIT"
] | null | null | null |
app.py
|
univoid/a3x
|
46f48363f191344747fec5e643efe1b467fb04c3
|
[
"MIT"
] | null | null | null |
app.py
|
univoid/a3x
|
46f48363f191344747fec5e643efe1b467fb04c3
|
[
"MIT"
] | null | null | null |
import os
import base64
import botocore
import boto3
import json
import urllib
from chalice import BadRequestError
from chalice import ChaliceViewError
from chalice import Chalice
app = Chalice(app_name='a3x')
app.debug = True
REGION = 'us-east-1'
BUCKET = 'freko-001'
S3 = boto3.resource('s3')
REKOGNITION = boto3.client('rekognition')
@app.route('/face', methods=['POST'], content_types=['application/json'], api_key_required=True)
def face():
req = app.current_request.json_body
# parse request to prepare file
file_basename, img_data = parse_request(req)
## create temp file
image_file = open_image_file(file_basename, img_data)
## create s3 bucket if not exists
create_s3_bucket_if_not_exists()
## upload file to s3 bucket
upload_file_s3_bucket(file_basename, image_file.name)
## delete temp file
close_image_file(image_file)
## detect faces
# res = detect_faces(file_basename)
# return {'respone': file_basename}
res = rec_c(file_basename)
return search_response(res)
def parse_request(req):
file_name = must_get_value(req, 'name')
img_data = decode_base64(must_get_value(req, 'base64'))
return file_name, img_data
def must_get_value(req, key):
try:
return req[key]
except KeyError:
raise BadRequestError(key + ' is not found')
def decode_base64(data):
try:
missing_padding = len(data) % 4
if missing_padding != 0:
data += b'='* (4 - missing_padding)
return base64.b64decode(data)
except Exception:
raise BadRequestError("base64 is not decodable")
def open_image_file(name, data):
try:
image_file = open('/tmp/' + name, 'wb+')
image_file.write(data)
return image_file
except Exception as ex:
raise ChaliceViewError("file is not openable. error = " + ex.message)
def create_s3_bucket_if_not_exists():
exists = True
try:
S3.meta.client.head_bucket(Bucket=BUCKET)
except botocore.exceptions.ClientError as ex:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(ex.response['Error']['Code'])
if error_code == 404:
exists = False
if exists:
return
else:
try:
S3.create_bucket(Bucket=BUCKET, CreateBucketConfiguration={
'LocationConstraint': REGION})
except Exception as ex:
raise ChaliceViewError("fail to create bucket s3. error = " + ex.message)
return
def upload_file_s3_bucket(obj_name, image_file_name):
try:
s3_object = S3.Object(BUCKET, obj_name)
s3_object.upload_file(image_file_name)
except Exception as ex:
raise ChaliceViewError("fail to upload file s3. error = " + ex.message)
def close_image_file(image_file):
try:
image_file.close()
os.remove(image_file.name)
except Exception as ex:
raise ChaliceViewError("file is not closable. error = " + ex.message)
def rec_c(name):
# try:
# return name
response = REKOGNITION.recognize_celebrities(
Image={
'S3Object': {
'Bucket': BUCKET,
'Name': name,
}
}
)
return response
# except Exception as ex:
# raise ChaliceViewError("fail to detect faces. error = " + str(type(ex)))
def search_response(res):
# read name of the person in the picture from JSON
ans = {}
json_dict = res
name = json_dict["CelebrityFaces"][0]["Name"]
ans['Name'] = name
# search with google custom search API
api_key = 'AIzaSyB8aEOTYwqX2UT6GE_2a1LDjVc4b_nymI0'
search_engine_ID = '013044299581177508447:irovpa3a1yo'
tbm = 'nws'
url = 'https://www.googleapis.com/customsearch/v1'
params = {
'key': api_key,
'cx': search_engine_ID,
'tbm': tbm,
'q': name.encode('utf-8')
}
data = urllib.parse.urlencode(params)
full_url = url + '?' + data
response = urllib.request.urlopen(full_url)
result = json.load(response)
ans['news'] = []
for item in result['items']:
ans['news'].append((item['title'], item['link']))
return ans
| 30.027972
| 96
| 0.642757
| 0
| 0
| 0
| 0
| 700
| 0.163018
| 0
| 0
| 1,095
| 0.255007
|
188445351d4fd03596d67479b0ce34074904480c
| 7,387
|
py
|
Python
|
nnutils/laplacian_loss.py
|
lolrudy/GPV_pose
|
f326a623b3e45e6edfc1963b068e8e7aaea2bfff
|
[
"MIT"
] | 10
|
2022-03-16T02:14:56.000Z
|
2022-03-31T19:01:34.000Z
|
nnutils/laplacian_loss.py
|
lolrudy/GPV_pose
|
f326a623b3e45e6edfc1963b068e8e7aaea2bfff
|
[
"MIT"
] | 1
|
2022-03-18T06:43:16.000Z
|
2022-03-18T06:56:35.000Z
|
nnutils/laplacian_loss.py
|
lolrudy/GPV_pose
|
f326a623b3e45e6edfc1963b068e8e7aaea2bfff
|
[
"MIT"
] | 2
|
2022-03-19T13:06:28.000Z
|
2022-03-19T16:08:18.000Z
|
# --------------------------------------------------------
# Written by Yufei Ye (https://github.com/JudyYe)
# --------------------------------------------------------
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# customize laplacian argument
import torch
def mesh_laplacian_smoothing(meshes, verts_packed=None, method: str = "uniform"):
r"""
Computes the laplacian smoothing objective for a batch of meshes.
This function supports three variants of Laplacian smoothing,
namely with uniform weights("uniform"), with cotangent weights ("cot"),
and cotangent cuvature ("cotcurv").For more details read [1, 2].
Args:
meshes: Meshes object with a batch of meshes.
method: str specifying the method for the laplacian.
Returns:
loss: Average laplacian smoothing loss across the batch.
Returns 0 if meshes contains no meshes or all empty meshes.
Consider a mesh M = (V, F), with verts of shape Nx3 and faces of shape Mx3.
The Laplacian matrix L is a NxN tensor such that LV gives a tensor of vectors:
for a uniform Laplacian, LuV[i] points to the centroid of its neighboring
vertices, a cotangent Laplacian LcV[i] is known to be an approximation of
the surface normal, while the curvature variant LckV[i] scales the normals
by the discrete mean curvature. For vertex i, assume S[i] is the set of
neighboring vertices to i, a_ij and b_ij are the "outside" angles in the
two triangles connecting vertex v_i and its neighboring vertex v_j
for j in S[i], as seen in the diagram below.
.. code-block:: python
a_ij
/\
/ \
/ \
/ \
v_i /________\ v_j
\ /
\ /
\ /
\ /
\/
b_ij
The definition of the Laplacian is LV[i] = sum_j w_ij (v_j - v_i)
For the uniform variant, w_ij = 1 / |S[i]|
For the cotangent variant,
w_ij = (cot a_ij + cot b_ij) / (sum_k cot a_ik + cot b_ik)
For the cotangent curvature, w_ij = (cot a_ij + cot b_ij) / (4 A[i])
where A[i] is the sum of the areas of all triangles containing vertex v_i.
There is a nice trigonometry identity to compute cotangents. Consider a triangle
with side lengths A, B, C and angles a, b, c.
.. code-block:: python
c
/|\
/ | \
/ | \
B / H| \ A
/ | \
/ | \
/a_____|_____b\
C
Then cot a = (B^2 + C^2 - A^2) / 4 * area
We know that area = CH/2, and by the law of cosines we have
A^2 = B^2 + C^2 - 2BC cos a => B^2 + C^2 - A^2 = 2BC cos a
Putting these together, we get:
B^2 + C^2 - A^2 2BC cos a
_______________ = _________ = (B/H) cos a = cos a / sin a = cot a
4 * area 2CH
[1] Desbrun et al, "Implicit fairing of irregular meshes using diffusion
and curvature flow", SIGGRAPH 1999.
[2] Nealan et al, "Laplacian Mesh Optimization", Graphite 2006.
"""
if meshes.isempty():
return torch.tensor(
[0.0], dtype=torch.float32, device=meshes.device, requires_grad=True
)
N = len(meshes)
if verts_packed is None:
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
num_verts_per_mesh = meshes.num_verts_per_mesh() # (N,)
verts_packed_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),)
weights = num_verts_per_mesh.gather(0, verts_packed_idx) # (sum(V_n),)
weights = 1.0 / weights.float()
# We don't want to backprop through the computation of the Laplacian;
# just treat it as a magic constant matrix that is used to transform
# verts into normals
with torch.no_grad():
if method == "uniform":
L = meshes.laplacian_packed()
elif method in ["cot", "cotcurv"]:
L, inv_areas = laplacian_cot(meshes)
if method == "cot":
norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1)
idx = norm_w > 0
norm_w[idx] = 1.0 / norm_w[idx]
else:
norm_w = 0.25 * inv_areas
else:
raise ValueError("Method should be one of {uniform, cot, cotcurv}")
if method == "uniform":
loss = L.mm(verts_packed)
elif method == "cot":
loss = L.mm(verts_packed) * norm_w - verts_packed
elif method == "cotcurv":
loss = (L.mm(verts_packed) - verts_packed) * norm_w
loss = loss.norm(dim=1)
loss = loss * weights
return loss.sum() / N
def laplacian_cot(meshes):
"""
Returns the Laplacian matrix with cotangent weights and the inverse of the
face areas.
Args:
meshes: Meshes object with a batch of meshes.
Returns:
2-element tuple containing
- **L**: FloatTensor of shape (V,V) for the Laplacian matrix (V = sum(V_n))
Here, L[i, j] = cot a_ij + cot b_ij iff (i, j) is an edge in meshes.
See the description above for more clarity.
- **inv_areas**: FloatTensor of shape (V,) containing the inverse of sum of
face areas containing each vertex
"""
verts_packed = meshes.verts_packed() # (sum(V_n), 3)
faces_packed = meshes.faces_packed() # (sum(F_n), 3)
# V = sum(V_n), F = sum(F_n)
V, F = verts_packed.shape[0], faces_packed.shape[0]
face_verts = verts_packed[faces_packed]
v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]
# Side lengths of each triangle, of shape (sum(F_n),)
# A is the side opposite v1, B is opposite v2, and C is opposite v3
A = (v1 - v2).norm(dim=1)
B = (v0 - v2).norm(dim=1)
C = (v0 - v1).norm(dim=1)
# Area of each triangle (with Heron's formula); shape is (sum(F_n),)
s = 0.5 * (A + B + C)
# note that the area can be negative (close to 0) causing nans after sqrt()
# we clip it to a small positive value
area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
# Compute cotangents of angles, of shape (sum(F_n), 3)
A2, B2, C2 = A * A, B * B, C * C
cota = (B2 + C2 - A2) / area
cotb = (A2 + C2 - B2) / area
cotc = (A2 + B2 - C2) / area
cot = torch.stack([cota, cotb, cotc], dim=1)
cot /= 4.0
# Construct a sparse matrix by basically doing:
# L[v1, v2] = cota
# L[v2, v0] = cotb
# L[v0, v1] = cotc
ii = faces_packed[:, [1, 2, 0]]
jj = faces_packed[:, [2, 0, 1]]
idx = torch.stack([ii, jj], dim=0).view(2, F * 3)
L = torch.sparse.FloatTensor(idx, cot.view(-1), (V, V))
# Make it symmetric; this means we are also setting
# L[v2, v1] = cota
# L[v0, v2] = cotb
# L[v1, v0] = cotc
L += L.t()
# For each vertex, compute the sum of areas for triangles containing it.
idx = faces_packed.view(-1)
inv_areas = torch.zeros(V, dtype=torch.float32, device=meshes.device)
val = torch.stack([area] * 3, dim=1).view(-1)
inv_areas.scatter_add_(0, idx, val)
idx = inv_areas > 0
inv_areas[idx] = 1.0 / inv_areas[idx]
inv_areas = inv_areas.view(-1, 1)
return L, inv_areas
| 36.569307
| 84
| 0.578855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,666
| 0.63165
|
18864b9d7449a28b5e8d9bd986a21846e666ecbc
| 3,294
|
py
|
Python
|
bioimageio/spec/commands.py
|
esgomezm/spec-bioimage-io
|
2bc3f8177d5346ac94bf8a771ed619e076c6e935
|
[
"MIT"
] | null | null | null |
bioimageio/spec/commands.py
|
esgomezm/spec-bioimage-io
|
2bc3f8177d5346ac94bf8a771ed619e076c6e935
|
[
"MIT"
] | null | null | null |
bioimageio/spec/commands.py
|
esgomezm/spec-bioimage-io
|
2bc3f8177d5346ac94bf8a771ed619e076c6e935
|
[
"MIT"
] | null | null | null |
import shutil
import traceback
from pathlib import Path
from pprint import pprint
from typing import List, Optional, Union
from marshmallow import ValidationError
from bioimageio.spec import export_resource_package, load_raw_resource_description
from bioimageio.spec.shared.raw_nodes import URI
from bioimageio.spec.shared.utils import resolve_uri
def package(
rdf_source: Union[Path, str, URI, dict],
path: Path = Path() / "{src_name}-package.zip",
update_format: bool = False,
weights_priority_order: Optional[List[str]] = None,
verbose: bool = False,
) -> int:
"""Package a BioImage.IO resource described by a BioImage.IO Resource Description File (RDF)."""
code = validate(rdf_source, update_format=update_format, update_format_inner=update_format, verbose=verbose)
source_name = rdf_source.get("name") if isinstance(rdf_source, dict) else rdf_source
if code:
print(f"Cannot export invalid BioImage.IO RDF {source_name}")
return code
try:
tmp_package_path = export_resource_package(
rdf_source, update_to_current_format=update_format, weights_priority_order=weights_priority_order
)
except Exception as e:
print(f"Failed to package {source_name} due to: {e}")
if verbose:
traceback.print_exc()
return 1
try:
rdf_local_source = resolve_uri(rdf_source)
path = path.with_name(path.name.format(src_name=rdf_local_source.stem))
shutil.move(tmp_package_path, path)
except Exception as e:
print(f"Failed to move package from {tmp_package_path} to {path} due to: {e}")
if verbose:
traceback.print_exc()
return 1
print(f"exported bioimageio package from {source_name} to {path}")
return 0
def validate(
rdf_source: Union[Path, str, URI, dict],
update_format: bool = False,
update_format_inner: bool = None,
verbose: bool = False,
) -> int:
"""Validate a BioImage.IO Resource Description File (RDF)."""
if update_format_inner is None:
update_format_inner = update_format
source_name = rdf_source.get("name") if isinstance(rdf_source, dict) else rdf_source
try:
raw_rd = load_raw_resource_description(rdf_source, update_to_current_format=update_format)
except ValidationError as e:
print(f"Invalid {source_name}:")
pprint(e.normalized_messages())
return 1
except Exception as e:
print(f"Could not validate {source_name}:")
pprint(e)
if verbose:
traceback.print_exc()
return 1
code = 0
if raw_rd.type == "collection":
for inner_category in ["application", "collection", "dataset", "model", "notebook"]:
for inner in getattr(raw_rd, inner_category) or []:
try:
inner_source = inner.source
except Exception as e:
pprint(e)
code += 1
else:
code += validate(inner_source, update_format_inner, update_format_inner, verbose)
if code:
print(f"Found invalid RDFs in collection {source_name}.")
if not code:
print(f"successfully verified {raw_rd.type} {source_name}")
return code
| 34.3125
| 112
| 0.663327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 649
| 0.197025
|
18865f902dc6b7cb2f38ac721fff0266a60bf991
| 1,309
|
py
|
Python
|
pettingzoo/test/max_cycles_test.py
|
RedTachyon/PettingZoo
|
0c4be0ca0de5a11bf8eff3f7b87976edcacd093e
|
[
"Apache-2.0"
] | 846
|
2020-05-12T05:55:00.000Z
|
2021-10-08T19:38:40.000Z
|
pettingzoo/test/max_cycles_test.py
|
RedTachyon/PettingZoo
|
0c4be0ca0de5a11bf8eff3f7b87976edcacd093e
|
[
"Apache-2.0"
] | 237
|
2020-04-27T06:01:39.000Z
|
2021-10-13T02:55:54.000Z
|
pettingzoo/test/max_cycles_test.py
|
RedTachyon/PettingZoo
|
0c4be0ca0de5a11bf8eff3f7b87976edcacd093e
|
[
"Apache-2.0"
] | 126
|
2020-05-29T04:20:29.000Z
|
2021-10-13T05:31:12.000Z
|
import numpy as np
def max_cycles_test(mod):
max_cycles = 4
parallel_env = mod.parallel_env(max_cycles=max_cycles)
observations = parallel_env.reset()
dones = {agent: False for agent in parallel_env.agents}
test_cycles = max_cycles + 10 # allows environment to do more than max_cycles if it so wishes
for step in range(test_cycles):
actions = {agent: parallel_env.action_space(agent).sample() for agent in parallel_env.agents if not dones[agent]}
observations, rewards, dones, infos = parallel_env.step(actions)
if all(dones.values()):
break
pstep = step + 1
env = mod.env(max_cycles=max_cycles)
env.reset()
agent_counts = np.zeros(len(env.possible_agents))
for a in env.agent_iter():
# counts agent index
aidx = env.possible_agents.index(a)
agent_counts[aidx] += 1
action = env.action_space(a).sample() if not env.dones[a] else None
env.step(action)
assert max_cycles == pstep
# does not check the minimum value because some agents might be killed before
# all the steps are complete. However, most agents should still be alive
# given a short number of cycles
assert max_cycles == np.max(agent_counts) - 1
assert max_cycles == np.median(agent_counts) - 1
| 36.361111
| 121
| 0.683728
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.201681
|
188756e6fbd2150cbbb2fc3f3acd8c382070ad00
| 3,460
|
py
|
Python
|
models/models.py
|
nv-tlabs/DIB-R-Single-Image-3D-Reconstruction
|
faa6364cc6ec464f81f960a9fa6b55bbf3443d5f
|
[
"Apache-2.0"
] | 8
|
2021-09-10T04:54:54.000Z
|
2022-03-26T02:34:54.000Z
|
models/models.py
|
nv-tlabs/DIB-R-Single-Image-3D-Reconstruction
|
faa6364cc6ec464f81f960a9fa6b55bbf3443d5f
|
[
"Apache-2.0"
] | 2
|
2021-11-12T17:10:26.000Z
|
2022-03-24T14:59:01.000Z
|
models/models.py
|
nv-tlabs/DIB-R-Single-Image-3D-Reconstruction
|
faa6364cc6ec464f81f960a9fa6b55bbf3443d5f
|
[
"Apache-2.0"
] | 2
|
2021-09-19T16:25:26.000Z
|
2021-12-27T16:01:31.000Z
|
'''
MIT License
Copyright (c) 2020 Autonomous Vision Group (AVG), Max Planck Institute for Intelligent Systems Tübingen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Copyright (c) 2020,21 NVIDIA CORPORATION & AFFILIATES.. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The functions in file is mostly borrowed from
https://github.com/autonomousvision/differentiable_volumetric_rendering/blob/11542ed5ac4e7e4c19c5c74eba7929c1333f3896/im2mesh/dvr/models/__init__.py
with some modifications.
Codes released under MIT license
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from .decoder import Decoder
from .conv import Resnet18
import numpy as np
########################################################
class DVR(nn.Module):
''' DVR model class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
template (torch.FloatTensor): of shape (num_vertices, 3), template mesh
'''
def __init__(self, template):
super(DVR, self).__init__()
decoder = Decoder(dim=3,
c_dim=256,
leaky=True,
out_dim=6,
res0=True,
res0ini=torch.ones)
encoder = Resnet18(c_dim=256, normalize=True, use_linear=True)
self.decoder = decoder
self.encoder = encoder
self.template = nn.Parameter(template, requires_grad=False)
# learn the delta
residual_coef = torch.zeros(1)
self.residual_coef = nn.Parameter(residual_coef)
def forward(self, inputs_bx3xhxw):
# encode inputs
c_bxc = self.encoder(inputs_bx3xhxw)
pred_bxpxk = self.decoder(self.template, c=c_bxc)
rgb = pred_bxpxk[:, :, :3]
rgb = F.sigmoid(rgb)
delta = pred_bxpxk[:, :, 3:6]
p = self.template + self.residual_coef * delta
return p, delta, rgb
| 35.306122
| 148
| 0.695954
| 1,219
| 0.35221
| 0
| 0
| 0
| 0
| 0
| 0
| 2,319
| 0.670038
|
18875eee96b4d3c67bcfb581481611caf6ee9b44
| 4,855
|
py
|
Python
|
slixmpp/plugins/xep_0223.py
|
marconfus/slixmpp
|
bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa
|
[
"BSD-3-Clause"
] | null | null | null |
slixmpp/plugins/xep_0223.py
|
marconfus/slixmpp
|
bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa
|
[
"BSD-3-Clause"
] | null | null | null |
slixmpp/plugins/xep_0223.py
|
marconfus/slixmpp
|
bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins.base import BasePlugin, register_plugin
log = logging.getLogger(__name__)
class XEP_0223(BasePlugin):
"""
XEP-0223: Persistent Storage of Private Data via PubSub
"""
name = 'xep_0223'
description = 'XEP-0223: Persistent Storage of Private Data via PubSub'
dependencies = {'xep_0163', 'xep_0060', 'xep_0004'}
profile = {'pubsub#persist_items': True,
'pubsub#access_model': 'whitelist'}
def configure(self, node, ifrom=None, callback=None, timeout=None):
"""
Update a node's configuration to match the public storage profile.
"""
# TODO: that cannot possibly work, why is this here?
config = self.xmpp['xep_0004'].Form()
config['type'] = 'submit'
for field, value in self.profile.items():
config.add_field(var=field, value=value)
return self.xmpp['xep_0060'].set_node_config(None, node, config,
ifrom=ifrom,
callback=callback,
timeout=timeout)
def store(self, stanza, node=None, id=None, ifrom=None, options=None,
callback=None, timeout=None, timeout_callback=None):
"""
Store private data via PEP.
This is just a (very) thin wrapper around the XEP-0060 publish()
method to set the defaults expected by PEP.
Arguments:
stanza -- The private content to store.
node -- The node to publish the content to. If not specified,
the stanza's namespace will be used.
id -- Optionally specify the ID of the item.
options -- Publish options to use, which will be modified to
fit the persistent storage option profile.
ifrom -- Specify the sender's JID.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to slixmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
if not options:
options = self.xmpp['xep_0004'].stanza.Form()
options['type'] = 'submit'
options.add_field(
var='FORM_TYPE',
ftype='hidden',
value='http://jabber.org/protocol/pubsub#publish-options')
fields = options['fields']
for field, value in self.profile.items():
if field not in fields:
options.add_field(var=field)
options.get_fields()[field]['value'] = value
return self.xmpp['xep_0163'].publish(stanza, node, options=options,
ifrom=ifrom, callback=callback,
timeout=timeout,
timeout_callback=timeout_callback)
def retrieve(self, node, id=None, item_ids=None, ifrom=None,
callback=None, timeout=None, timeout_callback=None):
"""
Retrieve private data via PEP.
This is just a (very) thin wrapper around the XEP-0060 publish()
method to set the defaults expected by PEP.
Arguments:
node -- The node to retrieve content from.
id -- Optionally specify the ID of the item.
item_ids -- Specify a group of IDs. If id is also specified, it
will be included in item_ids.
ifrom -- Specify the sender's JID.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to slixmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
if item_ids is None:
item_ids = []
if id is not None:
item_ids.append(id)
return self.xmpp['xep_0060'].get_items(None, node,
item_ids=item_ids, ifrom=ifrom,
callback=callback, timeout=timeout,
timeout_callback=timeout_callback)
register_plugin(XEP_0223)
| 40.458333
| 82
| 0.557158
| 4,469
| 0.920494
| 0
| 0
| 0
| 0
| 0
| 0
| 2,602
| 0.535942
|
18890abc6529f42ce336f6f93049f9ebe7b6d9a1
| 3,578
|
py
|
Python
|
merge_pdf/merge.py
|
DariHernandez/merge_pdf
|
5aa0df950caee81d1a2c2709697f82472858b7ec
|
[
"MIT"
] | null | null | null |
merge_pdf/merge.py
|
DariHernandez/merge_pdf
|
5aa0df950caee81d1a2c2709697f82472858b7ec
|
[
"MIT"
] | null | null | null |
merge_pdf/merge.py
|
DariHernandez/merge_pdf
|
5aa0df950caee81d1a2c2709697f82472858b7ec
|
[
"MIT"
] | 1
|
2021-06-23T19:46:42.000Z
|
2021-06-23T19:46:42.000Z
|
#! python3
# Combines all the pafs in the current working directory into a single pdf
import PyPDF2, os, sys, logging
class Merge ():
"""
Merge all pdfs in the current folder, or specific list of files,
by name, into a single pdf file
"""
def __init__ (self, file_output = "", replace = False, debug = False):
"""
Constructor of class. Generate empty list of files an get dir path and file ouput
"""
# Debug configuration
logging.basicConfig( level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s' )
if not debug:
logging.disable()
self.pdfFiles = []
self.fileOutput = file_output
self.replace = replace
self.__verify_outputh_file()
def merge_file_list (self, file_list):
"""
Merge a specific list of pdf files inside the output file
"""
# verify attribs
if type (file_list) != list:
raise AttributeError (file_list)
self.pdfFiles = file_list
# Short files
self.pdfFiles.sort(key = str.lower)
self.__make_file()
def merge_folder (self, folder):
"""
Merge all files from a specific folder and save inside the output file
"""
# Verify is folder exist
if not os.path.isdir (folder):
raise FileNotFoundError(folder)
# Get files
for filename in os.listdir(folder):
if filename.endswith('.pdf'):
self.pdfFiles.append(os.path.join(folder, filename))
# Order files
self.pdfFiles.sort(key = str.lower)
self.__make_file()
def __verify_outputh_file (self):
"""
Verify the name of the output file and if the file will be replace or not
"""
# verify path and make file name
if os.path.isdir (self.fileOutput):
self.fileOutput = os.path.join(self.fileOutput, 'mergeFiles.pdf')
else:
if not self.fileOutput.endswith('.pdf'):
self.fileOutput += '.pdf'
# Verify replca outputh file
if os.path.isfile(self.fileOutput):
if self.replace:
logging.debug ("Replacing file")
else:
self.fileOutput = 'File "{}" already exist'.format (self.fileOutput)
raise ValueError(self.fileOutput)
def __make_file (self):
"""
Make pdf output file with each page of the file list
"""
pdfWriter = PyPDF2.PdfFileWriter()
# loop through all the pdf files
if self.pdfFiles:
for currentFile in self.pdfFiles:
pdfFileObj = open (currentFile, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# loop through all the pages (except the first) and add them
logging.debug ("Merging {}... ".format (currentFile))
if pdfReader.numPages:
for pageNum in range (0, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage (pageObj)
# Save the resulting pdf to a file
pdfOutput = open (self.fileOutput, 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
logging.debug ('Done. Pages are now in {} file'.format (os.path.basename(self.fileOutput)))
else:
logging.debug ("Dosent exist pdf files in this folder.")
| 31.385965
| 103
| 0.564561
| 3,453
| 0.965064
| 0
| 0
| 0
| 0
| 0
| 0
| 1,156
| 0.323086
|
1889e33c1df53b96578448ca9e90add8e038bfe9
| 3,941
|
py
|
Python
|
test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py
|
eapearson/kbase-skd-module-job-browser-bff
|
426445f90569adac16632ef4921f174e51abd42f
|
[
"MIT"
] | null | null | null |
test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py
|
eapearson/kbase-skd-module-job-browser-bff
|
426445f90569adac16632ef4921f174e51abd42f
|
[
"MIT"
] | 6
|
2020-05-26T17:40:07.000Z
|
2022-03-11T16:33:11.000Z
|
test/inprogress/mock/JobBrowserBFF_get_job_log_mock_test.py
|
eapearson/kbase-skd-module-job-browser-bff
|
426445f90569adac16632ef4921f174e51abd42f
|
[
"MIT"
] | 1
|
2020-05-26T17:12:59.000Z
|
2020-05-26T17:12:59.000Z
|
# -*- coding: utf-8 -*-
import traceback
from JobBrowserBFF.TestBase import TestBase
from biokbase.Errors import ServiceError
import unittest
import re
UPSTREAM_SERVICE = 'mock'
ENV = 'mock'
JOB_ID_WITH_LOGS = '59820c93e4b06f68bf751eeb' # non-admin
JOB_ID_NO_LOGS = '5cf1522aaa5a4d298c5dc2ff' # non-admin
JOB_ID_NOT_FOUND = '5cf1522aaa5a4d298c5dc2fe' # non-admin
JOB_ID_NO_PERMISSION = '57ec06aee4b0b05cf8996b89' # access it as non-admin user
TIMEOUT_MS = 10000
class JobBrowserBFFTest(TestBase):
def assert_job_log_result(self, ret):
self.assertIsInstance(ret, list)
result = ret[0]
self.assertIsInstance(result, dict)
self.assertIn('log', result)
job_log = result.get('log')
self.assertIsInstance(job_log, list)
total_count = result.get('total_count')
return job_log, total_count
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': JOB_ID_WITH_LOGS,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
job_log, total_count = self.assert_job_log_result(ret)
self.assertEqual(len(job_log), 10)
self.assertEqual(total_count, 215)
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_no_logs_happy(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
job_id = JOB_ID_NO_LOGS
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': job_id,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
job_log, total_count = self.assert_job_log_result(ret)
self.assertEqual(len(job_log), 0)
self.assertEqual(total_count, 0)
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_no_permission_sad(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
job_id = JOB_ID_NO_LOGS
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': JOB_ID_NO_PERMISSION,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
print('RET', ret)
self.assertTrue(False, 'Expected an exception')
except ServiceError as se:
self.assertEqual(
se.code, 40, 'Expected error code 40 (permission denied), but received {}'.format(se.code))
except Exception as ex:
self.assert_no_exception(ex)
# Uncomment to skip this test
# @unittest.skip("skipped test_get_job_log_happy")
def test_get_job_log_not_found_sad(self):
self.set_config('upstream-service', UPSTREAM_SERVICE)
job_id = JOB_ID_NO_LOGS
try:
impl, context = self.impl_for(ENV, 'user')
ret = impl.get_job_log(context, {
'job_id': JOB_ID_NOT_FOUND,
'offset': 0,
'limit': 10,
'timeout': TIMEOUT_MS
})
print('RET', ret)
self.assertTrue(False, 'Expected an exception')
except ServiceError as se:
self.assertEqual(
se.code, 10, 'Expected error code 10 (not found), but received {}'.format(se.code))
except Exception as ex:
self.assert_no_exception(ex)
| 36.831776
| 107
| 0.603146
| 3,471
| 0.880741
| 0
| 0
| 0
| 0
| 0
| 0
| 934
| 0.236996
|
188cb20c595f8931979892b300bbc3dc12968c1c
| 674
|
py
|
Python
|
migrations/versions/323f8d77567b_index_related_entity_names.py
|
yaelmi3/backslash
|
edf39caf97af2c926da01c340a83648f4874e97e
|
[
"BSD-3-Clause"
] | 17
|
2015-11-25T13:02:38.000Z
|
2021-12-14T20:18:36.000Z
|
migrations/versions/323f8d77567b_index_related_entity_names.py
|
yaelmi3/backslash
|
edf39caf97af2c926da01c340a83648f4874e97e
|
[
"BSD-3-Clause"
] | 533
|
2015-11-24T12:47:13.000Z
|
2022-02-12T07:59:08.000Z
|
migrations/versions/323f8d77567b_index_related_entity_names.py
|
parallelsystems/backslash
|
577cdd18d5f665a8b493c4b2e2a605b7e0f6e11b
|
[
"BSD-3-Clause"
] | 15
|
2015-11-22T13:25:54.000Z
|
2022-02-16T19:23:11.000Z
|
"""Index related entity names
Revision ID: 323f8d77567b
Revises: 82b34e2777a4
Create Date: 2016-11-16 13:00:25.782487
"""
# revision identifiers, used by Alembic.
revision = '323f8d77567b'
down_revision = '82b34e2777a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_related_entity_name'), 'related_entity', ['name'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_related_entity_name'), table_name='related_entity')
### end Alembic commands ###
| 24.962963
| 93
| 0.71365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 451
| 0.669139
|
1890545ab78e2e102de4a2155cb00d7f5cb2cdc7
| 772
|
py
|
Python
|
learn/02week/code/cc_dicegame.py
|
tmax818/nucamp_intro_python
|
6fac59f53054055ba4ab40559c44eba07b7f9fd6
|
[
"MIT"
] | null | null | null |
learn/02week/code/cc_dicegame.py
|
tmax818/nucamp_intro_python
|
6fac59f53054055ba4ab40559c44eba07b7f9fd6
|
[
"MIT"
] | null | null | null |
learn/02week/code/cc_dicegame.py
|
tmax818/nucamp_intro_python
|
6fac59f53054055ba4ab40559c44eba07b7f9fd6
|
[
"MIT"
] | null | null | null |
import random
high_score = 0
def dice_game():
global high_score
while True:
print("Current High Score: ", high_score)
print("1) Roll Dice")
print("2) Leave Game")
choice = input("Enter your choice: ")
if choice == "2":
print("Goodbye")
break
elif choice == "1":
die1 = random.randint(1, 6)
die2 = random.randint(1, 6)
total = die1 + die2
print("You roll a... ", die1)
print("You roll a... ", die2)
print("You have rolled a total of: ", total)
if total > high_score:
high_score = total
print("New high score!")
else:
continue
dice_game()
| 20.864865
| 56
| 0.477979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 166
| 0.215026
|
189105da68157256feb66cf959f48a9d4b0c8a3a
| 51
|
py
|
Python
|
tests/development/destination/gcs/test_delete_bucket.py
|
denssk/backup
|
292d5f1b1a3765ce0ea8d3cab8bd1ae0c583f72e
|
[
"Apache-2.0"
] | 69
|
2016-06-29T16:13:55.000Z
|
2022-03-21T06:38:37.000Z
|
tests/development/destination/gcs/test_delete_bucket.py
|
denssk/backup
|
292d5f1b1a3765ce0ea8d3cab8bd1ae0c583f72e
|
[
"Apache-2.0"
] | 237
|
2016-09-28T02:12:34.000Z
|
2022-03-25T13:32:23.000Z
|
tests/development/destination/gcs/test_delete_bucket.py
|
denssk/backup
|
292d5f1b1a3765ce0ea8d3cab8bd1ae0c583f72e
|
[
"Apache-2.0"
] | 45
|
2017-01-04T21:20:27.000Z
|
2021-12-29T10:42:22.000Z
|
def test_delete_bucket(gs):
gs.delete_bucket()
| 17
| 27
| 0.745098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
189184fcb0cca3093cef204f192b8979a5c7f238
| 29,762
|
py
|
Python
|
SVS/model/utils/utils.py
|
ftshijt/SVS_system
|
569d0a2f7ae89965bde132e5be538f6a84be471f
|
[
"Apache-2.0"
] | null | null | null |
SVS/model/utils/utils.py
|
ftshijt/SVS_system
|
569d0a2f7ae89965bde132e5be538f6a84be471f
|
[
"Apache-2.0"
] | null | null | null |
SVS/model/utils/utils.py
|
ftshijt/SVS_system
|
569d0a2f7ae89965bde132e5be538f6a84be471f
|
[
"Apache-2.0"
] | null | null | null |
"""Copyright [2020] [Jiatong Shi].
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# !/usr/bin/env python3
import copy
import librosa
from librosa.display import specshow
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy import signal
import soundfile as sf
from SVS.model.layers.global_mvn import GlobalMVN
import SVS.utils.metrics as Metrics
import time
import torch
# from SVS.model.layers.utterance_mvn import UtteranceMVN
# from pathlib import Path
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def collect_stats(train_loader, args):
"""collect_stats."""
print("get in collect stats", flush=True)
count, sum, sum_square = 0, 0, 0
count_mel, sum_mel, sum_square_mel = 0, 0, 0
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(train_loader, 1):
# print(f"spec.shape: {spec.shape},length.shape:
# {length.shape}, mel.shape: {mel.shape}")
for i, seq in enumerate(spec.cpu().numpy()):
# print(f"seq.shape: {seq.shape}")
seq_length = torch.max(length[i])
# print(seq_length)
seq = seq[:seq_length]
sum += seq.sum(0)
sum_square += (seq ** 2).sum(0)
count += len(seq)
for i, seq in enumerate(mel.cpu().numpy()):
seq_length = torch.max(length[i])
seq = seq[:seq_length]
sum_mel += seq.sum(0)
sum_square_mel += (seq ** 2).sum(0)
count_mel += len(seq)
assert count_mel == count
dirnames = [
os.path.dirname(args.stats_file),
os.path.dirname(args.stats_mel_file),
]
for name in dirnames:
if not os.path.exists(name):
os.makedirs(name)
np.savez(
args.stats_file,
count=count,
sum=sum,
sum_square=sum_square,
)
np.savez(
args.stats_mel_file,
count=count_mel,
sum=sum_mel,
sum_square=sum_square_mel,
)
def train_one_epoch(
train_loader,
model,
device,
optimizer,
criterion,
perceptual_entropy,
epoch,
args,
):
"""train_one_epoch."""
losses = AverageMeter()
spec_losses = AverageMeter()
if args.perceptual_loss > 0:
pe_losses = AverageMeter()
if args.n_mels > 0:
mel_losses = AverageMeter()
# mcd_metric = AverageMeter()
# f0_distortion_metric, vuv_error_metric =
# AverageMeter(), AverageMeter()
if args.double_mel_loss:
double_mel_losses = AverageMeter()
model.train()
log_save_dir = os.path.join(
args.model_save_dir, "epoch{}/log_train_figure".format(epoch)
)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
start = time.time()
# f0_ground_truth_all = np.reshape(np.array([]), (-1, 1))
# f0_synthesis_all = np.reshape(np.array([]), (-1, 1))
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(train_loader, 1):
phone = phone.to(device)
beat = beat.to(device)
pitch = pitch.to(device).float()
spec = spec.to(device).float()
if mel is not None:
mel = mel.to(device).float()
real = real.to(device).float()
imag = imag.to(device).float()
length_mask = length.unsqueeze(2)
if mel is not None:
length_mel_mask = length_mask.repeat(1, 1, mel.shape[2]).float()
length_mel_mask = length_mel_mask.to(device)
length_mask = length_mask.repeat(1, 1, spec.shape[2]).float()
length_mask = length_mask.to(device)
length = length.to(device)
char_len_list = char_len_list.to(device)
if not args.use_asr_post:
chars = chars.to(device)
char_len_list = char_len_list.to(device)
else:
phone = phone.float()
# output = [batch size, num frames, feat_dim]
# output_mel = [batch size, num frames, n_mels dimension]
if args.model_type == "GLU_Transformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "LSTM":
output, hidden, output_mel, output_mel2 = model(phone, pitch, beat)
att = None
elif args.model_type == "GRU_gs":
output, att, output_mel = model(spec, phone, pitch, beat, length, args)
att = None
elif args.model_type == "PureTransformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Conformer":
# print(f"chars: {np.shape(chars)}, phone:
# {np.shape(phone)}, length: {np.shape(length)}")
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Comformer_full":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "USTC_DAR":
output_mel = model(
phone, pitch, beat, length, args
) # mel loss written in spec loss
att = None
spec_origin = spec.clone()
mel_origin = mel.clone()
if args.normalize:
sepc_normalizer = GlobalMVN(args.stats_file)
mel_normalizer = GlobalMVN(args.stats_mel_file)
spec, _ = sepc_normalizer(spec, length)
mel, _ = mel_normalizer(mel, length)
if args.model_type == "USTC_DAR":
spec_loss = 0
else:
spec_loss = criterion(output, spec, length_mask)
if args.n_mels > 0:
mel_loss = criterion(output_mel, mel, length_mel_mask)
if args.double_mel_loss:
double_mel_loss = criterion(output_mel2, mel, length_mel_mask)
else:
double_mel_loss = 0
else:
mel_loss = 0
double_mel_loss = 0
train_loss = mel_loss + double_mel_loss + spec_loss
if args.perceptual_loss > 0:
pe_loss = perceptual_entropy(output, real, imag)
final_loss = (
args.perceptual_loss * pe_loss + (1 - args.perceptual_loss) * train_loss
)
else:
final_loss = train_loss
final_loss = final_loss / args.accumulation_steps
final_loss.backward()
if args.gradclip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradclip)
if (epoch + 1) % args.accumulation_steps == 0:
if args.optimizer == "noam":
optimizer.step_and_update_lr()
else:
optimizer.step()
# 梯度清零
optimizer.zero_grad()
losses.update(final_loss.item(), phone.size(0))
if args.model_type != "USTC_DAR":
spec_losses.update(spec_loss.item(), phone.size(0))
if args.perceptual_loss > 0:
pe_losses.update(pe_loss.item(), phone.size(0))
if args.n_mels > 0:
mel_losses.update(mel_loss.item(), phone.size(0))
if args.double_mel_loss:
double_mel_losses.update(double_mel_loss.item(), phone.size(0))
if step % args.train_step_log == 0:
end = time.time()
if args.model_type == "USTC_DAR":
# normalize inverse 只在infer的时候用,因为log过程需要转换成wav,和计算mcd等指标
if args.normalize and args.stats_file:
output_mel, _ = mel_normalizer.inverse(output_mel, length)
log_figure_mel(
step,
output_mel,
mel_origin,
att,
length,
log_save_dir,
args,
)
out_log = "step {}: train_loss {:.4f}; spec_loss {:.4f};".format(
step, losses.avg, spec_losses.avg
)
else:
# normalize inverse 只在infer的时候用,因为log过程需要转换成wav,和计算mcd等指标
if args.normalize and args.stats_file:
output, _ = sepc_normalizer.inverse(output, length)
log_figure(step, output, spec_origin, att, length, log_save_dir, args)
out_log = "step {}: train_loss {:.4f}; spec_loss {:.4f};".format(
step, losses.avg, spec_losses.avg
)
if args.perceptual_loss > 0:
out_log += "pe_loss {:.4f}; ".format(pe_losses.avg)
if args.n_mels > 0:
out_log += "mel_loss {:.4f}; ".format(mel_losses.avg)
if args.double_mel_loss:
out_log += "dmel_loss {:.4f}; ".format(double_mel_losses.avg)
print("{} -- sum_time: {:.2f}s".format(out_log, (end - start)))
info = {"loss": losses.avg, "spec_loss": spec_losses.avg}
if args.perceptual_loss > 0:
info["pe_loss"] = pe_losses.avg
if args.n_mels > 0:
info["mel_loss"] = mel_losses.avg
return info
def validate(dev_loader, model, device, criterion, perceptual_entropy, epoch, args):
"""validate."""
losses = AverageMeter()
spec_losses = AverageMeter()
if args.perceptual_loss > 0:
pe_losses = AverageMeter()
if args.n_mels > 0:
mel_losses = AverageMeter()
mcd_metric = AverageMeter()
if args.double_mel_loss:
double_mel_losses = AverageMeter()
model.eval()
log_save_dir = os.path.join(
args.model_save_dir, "epoch{}/log_val_figure".format(epoch)
)
if not os.path.exists(log_save_dir):
os.makedirs(log_save_dir)
start = time.time()
with torch.no_grad():
for (
step,
(
phone,
beat,
pitch,
spec,
real,
imag,
length,
chars,
char_len_list,
mel,
),
) in enumerate(dev_loader, 1):
phone = phone.to(device)
beat = beat.to(device)
pitch = pitch.to(device).float()
spec = spec.to(device).float()
if mel is not None:
mel = mel.to(device).float()
real = real.to(device).float()
imag = imag.to(device).float()
length_mask = length.unsqueeze(2)
if mel is not None:
length_mel_mask = length_mask.repeat(1, 1, mel.shape[2]).float()
length_mel_mask = length_mel_mask.to(device)
length_mask = length_mask.repeat(1, 1, spec.shape[2]).float()
length_mask = length_mask.to(device)
length = length.to(device)
char_len_list = char_len_list.to(device)
if not args.use_asr_post:
chars = chars.to(device)
char_len_list = char_len_list.to(device)
else:
phone = phone.float()
if args.model_type == "GLU_Transformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "LSTM":
output, hidden, output_mel, output_mel2 = model(phone, pitch, beat)
att = None
elif args.model_type == "GRU_gs":
output, att, output_mel = model(spec, phone, pitch, beat, length, args)
att = None
elif args.model_type == "PureTransformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Conformer":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "Comformer_full":
output, att, output_mel, output_mel2 = model(
chars,
phone,
pitch,
beat,
pos_char=char_len_list,
pos_spec=length,
)
elif args.model_type == "USTC_DAR":
output_mel = model(phone, pitch, beat, length, args)
att = None
spec_origin = spec.clone()
mel_origin = mel.clone()
if args.normalize:
sepc_normalizer = GlobalMVN(args.stats_file)
mel_normalizer = GlobalMVN(args.stats_mel_file)
spec, _ = sepc_normalizer(spec, length)
mel, _ = mel_normalizer(mel, length)
if args.model_type == "USTC_DAR":
spec_loss = 0
else:
spec_loss = criterion(output, spec, length_mask)
if args.n_mels > 0:
mel_loss = criterion(output_mel, mel, length_mel_mask)
if args.double_mel_loss:
double_mel_loss = criterion(output_mel2, mel, length_mel_mask)
else:
double_mel_loss = 0
else:
mel_loss = 0
double_mel_loss = 0
dev_loss = mel_loss + double_mel_loss + spec_loss
if args.perceptual_loss > 0:
pe_loss = perceptual_entropy(output, real, imag)
final_loss = (
args.perceptual_loss * pe_loss
+ (1 - args.perceptual_loss) * dev_loss
)
else:
final_loss = dev_loss
losses.update(final_loss.item(), phone.size(0))
if args.model_type != "USTC_DAR":
spec_losses.update(spec_loss.item(), phone.size(0))
if args.perceptual_loss > 0:
# pe_loss = perceptual_entropy(output, real, imag)
pe_losses.update(pe_loss.item(), phone.size(0))
if args.n_mels > 0:
mel_losses.update(mel_loss.item(), phone.size(0))
if args.double_mel_loss:
double_mel_losses.update(double_mel_loss.item(), phone.size(0))
if args.model_type == "USTC_DAR":
# normalize inverse stage
if args.normalize and args.stats_file:
output_mel, _ = mel_normalizer.inverse(output_mel, length)
mcd_value, length_sum = (
0,
1,
) # FIX ME! Calculate_melcd_fromMelSpectrum
else:
# normalize inverse stage
if args.normalize and args.stats_file:
output, _ = sepc_normalizer.inverse(output, length)
(mcd_value, length_sum,) = Metrics.Calculate_melcd_fromLinearSpectrum(
output, spec_origin, length, args
)
mcd_metric.update(mcd_value, length_sum)
if step % args.dev_step_log == 0:
if args.model_type == "USTC_DAR":
log_figure_mel(
step,
output_mel,
mel_origin,
att,
length,
log_save_dir,
args,
)
else:
log_figure(
step,
output,
spec_origin,
att,
length,
log_save_dir,
args,
)
out_log = (
"step {}: train_loss {:.4f}; "
"spec_loss {:.4f}; mcd_value {:.4f};".format(
step, losses.avg, spec_losses.avg, mcd_metric.avg
)
)
if args.perceptual_loss > 0:
out_log += "pe_loss {:.4f}; ".format(pe_losses.avg)
if args.n_mels > 0:
out_log += "mel_loss {:.4f}; ".format(mel_losses.avg)
if args.double_mel_loss:
out_log += "dmel_loss {:.4f}; ".format(double_mel_losses.avg)
end = time.time()
print("{} -- sum_time: {}s".format(out_log, (end - start)))
info = {
"loss": losses.avg,
"spec_loss": spec_losses.avg,
"mcd_value": mcd_metric.avg,
}
if args.perceptual_loss > 0:
info["pe_loss"] = pe_losses.avg
if args.n_mels > 0:
info["mel_loss"] = mel_losses.avg
return info
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
"""init."""
self.reset()
def reset(self):
"""reset."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, model_filename):
"""save_checkpoint."""
torch.save(state, model_filename)
return 0
def save_model(
args,
epoch,
model,
optimizer,
train_info,
dev_info,
logger,
save_loss_select,
):
"""save_model."""
if args.optimizer == "noam":
save_checkpoint(
{
"epoch": epoch,
"state_dict": model.state_dict(),
"optimizer": optimizer._optimizer.state_dict(),
},
"{}/epoch_{}_{}.pth.tar".format(
args.model_save_dir, save_loss_select, epoch
),
)
else:
save_checkpoint(
{
"epoch": epoch,
"state_dict": model.state_dict(),
},
"{}/epoch_{}_{}.pth.tar".format(
args.model_save_dir, save_loss_select, epoch
),
)
# record training and validation information
if args.use_tfboard:
record_info(train_info, dev_info, epoch, logger)
def record_info(train_info, dev_info, epoch, logger):
"""record_info."""
loss_info = {
"train_loss": train_info["loss"],
"dev_loss": dev_info["loss"],
}
logger.add_scalars("losses", loss_info, epoch)
return 0
def invert_spectrogram(spectrogram, win_length, hop_length):
"""Invert_spectrogram.
applies inverse fft.
Args:
spectrogram: [1+n_fft//2, t]
"""
return librosa.istft(spectrogram, hop_length, win_length=win_length, window="hann")
def griffin_lim(spectrogram, iter_vocoder, n_fft, hop_length, win_length):
"""griffin_lim."""
X_best = copy.deepcopy(spectrogram)
for i in range(iter_vocoder):
X_t = invert_spectrogram(X_best, win_length, hop_length)
est = librosa.stft(X_t, n_fft, hop_length, win_length=win_length)
phase = est / np.maximum(1e-8, np.abs(est))
X_best = spectrogram * phase
X_t = invert_spectrogram(X_best, win_length, hop_length)
y = np.real(X_t)
return y
def spectrogram2wav(
mag, max_db, ref_db, preemphasis, power, sr, hop_length, win_length, n_fft
):
"""Generate wave file from linear magnitude spectrogram.
Args:
mag: A numpy array of (T, 1+n_fft//2)
Returns:
wav: A 1-D numpy array.
"""
hop_length = int(hop_length * sr)
win_length = int(win_length * sr)
n_fft = n_fft
# transpose
mag = mag.T
# de-noramlize
mag = (np.clip(mag, 0, 1) * max_db) - max_db + ref_db
# to amplitude
mag = np.power(10.0, mag * 0.05)
# wav reconstruction
wav = griffin_lim(mag ** power, 100, n_fft, hop_length, win_length)
# de-preemphasis
wav = signal.lfilter([1], [1, -preemphasis], wav)
# trim
wav, _ = librosa.effects.trim(wav)
return wav.astype(np.float32)
def log_figure_mel(step, output, spec, att, length, save_dir, args):
"""log_figure_mel."""
# only get one sample from a batch
# save wav and plot spectrogram
output = output.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output = output[:length]
out_spec = out_spec[:length]
# FIX ME! Need WaveRNN to produce wav from mel-spec
# wav = spectrogram2wav(output, args.max_db, args.ref_db,
# args.preemphasis, args.power, args.sampling_rate,
# args.frame_shift, args.frame_length, args.nfft)
# wav_true = spectrogram2wav(out_spec, args.max_db,
# args.ref_db, args.preemphasis, args.power, args.sampling_rate,
# args.frame_shift, args.frame_length, args.nfft)
# if librosa.__version__ < '0.8.0':
# librosa.output.write_wav(os.path.join(save_dir,
# '{}.wav'.format(step)), wav, args.sampling_rate)
# librosa.output.write_wav(os.path.join(save_dir,
# '{}_true.wav'.format(step)), wav_true, args.sampling_rate)
# else:
# # librosa > 0.8 remove librosa.output.write_wav module
# sf.write(os.path.join(save_dir, '{}.wav'.format(step)),
# wav, args.sampling_rate,format='wav', subtype='PCM_24')
# sf.write(os.path.join(save_dir, '{}_true.wav'.format(step)),
# wav, args.sampling_rate,format='wav', subtype='PCM_24')
plt.subplot(1, 2, 1)
specshow(output.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def log_figure(step, output, spec, att, length, save_dir, args):
"""log_figure."""
# only get one sample from a batch
# save wav and plot spectrogram
output = output.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output = output[:length]
out_spec = out_spec[:length]
wav = spectrogram2wav(
output,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
wav_true = spectrogram2wav(
out_spec,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
if librosa.__version__ < "0.8.0":
librosa.output.write_wav(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
)
librosa.output.write_wav(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
)
else:
# librosa > 0.8 remove librosa.output.write_wav module
sf.write(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
sf.write(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
plt.subplot(1, 2, 1)
specshow(output.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def log_mel(step, output_mel, spec, att, length, save_dir, args, voc_model):
"""log_mel."""
# only get one sample from a batch
# save wav and plot spectrogram
output_mel = output_mel.cpu().detach().numpy()[0]
out_spec = spec.cpu().detach().numpy()[0]
length = np.max(length.cpu().detach().numpy()[0])
output_mel = output_mel[:length]
out_spec = out_spec[:length]
wav = voc_model.generate(output_mel)
wav_true = spectrogram2wav(
out_spec,
args.max_db,
args.ref_db,
args.preemphasis,
args.power,
args.sampling_rate,
args.frame_shift,
args.frame_length,
args.nfft,
)
if librosa.__version__ < "0.8.0":
librosa.output.write_wav(
os.path.join(save_dir, "{}.wav".format(step)), wav, args.sampling_rate
)
librosa.output.write_wav(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
)
else:
# librosa > 0.8 remove librosa.output.write_wav module
sf.write(
os.path.join(save_dir, "{}.wav".format(step)),
wav,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
sf.write(
os.path.join(save_dir, "{}_true.wav".format(step)),
wav_true,
args.sampling_rate,
format="wav",
subtype="PCM_24",
)
plt.subplot(1, 2, 1)
specshow(output_mel.T)
plt.title("prediction")
plt.subplot(1, 2, 2)
specshow(out_spec.T)
plt.title("ground_truth")
plt.savefig(os.path.join(save_dir, "{}.png".format(step)))
if att is not None:
att = att.cpu().detach().numpy()[0]
att = att[:, :length, :length]
plt.subplot(1, 4, 1)
specshow(att[0])
plt.subplot(1, 4, 2)
specshow(att[1])
plt.subplot(1, 4, 3)
specshow(att[2])
plt.subplot(1, 4, 4)
specshow(att[3])
plt.savefig(os.path.join(save_dir, "{}_att.png".format(step)))
def Calculate_time(elapsed_time):
"""Calculate_time."""
elapsed_hours = int(elapsed_time / 3600)
elapsed_mins = int((elapsed_time - (elapsed_hours * 3600)) / 60)
elapsed_secs = int(elapsed_time - (elapsed_hours * 3600) - (elapsed_mins * 60))
return elapsed_hours, elapsed_mins, elapsed_secs
def Calculate_time_path(path):
"""Calculate_time_path."""
num_list = os.listdir(path)
total_time = 0
for number in num_list:
# print(number)
number_path = os.path.join(path, number)
# print(number_path)
wav_name_list = os.listdir(number_path)
for wav_name in wav_name_list:
wav_path = os.path.join(number_path, wav_name)
print(wav_path)
time = librosa.get_duration(filename=wav_path)
print(time)
total_time += time
return total_time
def Calculate_dataset_duration(dataset_path):
"""Calculate_dataset_duration."""
train_path = os.path.join(dataset_path, "train")
dev_path = os.path.join(dataset_path, "dev")
test_path = os.path.join(dataset_path, "test")
total_time = (
Calculate_time_path(train_path)
+ Calculate_time_path(dev_path)
+ Calculate_time_path(test_path)
)
hours, mins, secs = Calculate_time(total_time)
print(f"Time: {hours}h {mins}m {secs}s'")
if __name__ == "__main__":
# path = "/data5/jiatong/SVS_system/SVS/data/
# public_dataset/kiritan_data/wav_info"
path = "/data5/jiatong/SVS_system/SVS/data/public_dataset/hts_data/wav_info"
Calculate_dataset_duration(path)
| 32.140389
| 88
| 0.540152
| 454
| 0.015205
| 0
| 0
| 0
| 0
| 0
| 0
| 5,023
| 0.16823
|
18918e38b1911d887e6cd9f7014807483471a02f
| 10,115
|
py
|
Python
|
run_classifier.py
|
to-aoki/my-pytorch-bert
|
8e412ae6331f5f19fee55b430be389de2f5c49a6
|
[
"Apache-2.0"
] | 21
|
2019-03-04T03:43:19.000Z
|
2022-02-14T15:50:41.000Z
|
run_classifier.py
|
to-aoki/my-pytorch-bert
|
8e412ae6331f5f19fee55b430be389de2f5c49a6
|
[
"Apache-2.0"
] | 1
|
2019-10-07T17:49:21.000Z
|
2019-12-14T11:50:10.000Z
|
run_classifier.py
|
to-aoki/my-pytorch-bert
|
8e412ae6331f5f19fee55b430be389de2f5c49a6
|
[
"Apache-2.0"
] | 5
|
2019-07-19T07:04:55.000Z
|
2020-07-01T13:24:14.000Z
|
# Author Toshihiko Aoki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BertClassifier."""
from mptb import BertClassifier
def classification(
config_path='config/bert_base.json',
train_dataset_path='tests/sample_text_class.txt',
eval_dataset_path='tests/sample_text_class.txt',
pretrain_path='pretrain/bert.pt',
tf_pretrain_path=None,
model_path=None,
vocab_path='tests/sample_text.vocab',
sp_model_path='tests/sample_text.model',
save_dir='classifier/',
log_dir=None,
batch_size=2,
max_pos=128,
lr=5e-5,
warmup_proportion=0.1, # warmup_steps = len(dataset) / batch_size * epoch * warmup_proportion
epochs=5,
per_save_epochs=1,
mode='train',
label_num=-1,
balance_weight=False,
balance_sample=False,
under_sampling=False,
under_sampling_cycle=False,
tokenizer_name='google',
read_head=False,
fp16=False,
task='class',
device=None,
quantize=False,
model_name='bert',
optimizer='bert',
encoder_json_path=None,
vocab_bpe_path=None,
sw_log_dir='runs'
):
if under_sampling_cycle:
under_sampling = True
if mode == 'train':
estimator = BertClassifier(
config_path=config_path,
max_pos=max_pos,
vocab_path=vocab_path,
sp_model_path=sp_model_path,
pretrain_path=pretrain_path,
tf_pretrain_path=tf_pretrain_path,
dataset_path=train_dataset_path,
header_skip=not read_head,
label_num=label_num,
tokenizer_name=tokenizer_name,
under_sampling=under_sampling,
fp16=fp16,
task=task,
device=device,
quantize=quantize,
model_name=model_name,
encoder_json_path=encoder_json_path,
vocab_bpe_path=vocab_bpe_path,
sw_log_dir=sw_log_dir
)
estimator.train(
traing_model_path=model_path,
batch_size=batch_size,
epochs=epochs,
lr=lr, warmup_proportion=warmup_proportion,
balance_weight=balance_weight,
balance_sample=balance_sample,
under_sampling_cycle=under_sampling_cycle,
save_dir=save_dir,
per_save_epochs=per_save_epochs,
optimizer_name=optimizer
)
if eval_dataset_path is None:
return
eval_data_set = estimator.get_dataset(
dataset_path=eval_dataset_path, header_skip=not read_head)
score = estimator.evaluate(dataset=eval_data_set, batch_size=batch_size, log_dir=log_dir)
print(score)
else:
estimator = BertClassifier(
config_path=config_path,
max_pos=max_pos,
vocab_path=vocab_path,
sp_model_path=sp_model_path,
model_path=model_path,
dataset_path=eval_dataset_path,
header_skip=not read_head,
label_num=label_num,
tokenizer_name=tokenizer_name,
under_sampling=under_sampling,
fp16=fp16,
device=device,
quantize=quantize,
model_name=model_name,
encoder_json_path=encoder_json_path,
vocab_bpe_path=vocab_bpe_path,
)
score = estimator.evaluate(batch_size=batch_size, log_dir=log_dir)
print(score)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='BERT classification.', usage='%(prog)s [options]')
parser.add_argument('--config_path', help='JSON file path for defines networks.', nargs='?',
type=str, default='config/bert_base.json')
parser.add_argument('--train_dataset_path', help='Training Dataset file (TSV file) path for classification.',
nargs='?', type=str, default=None)
parser.add_argument('--eval_dataset_path', help='Evaluate Dataset file (TSV file) path for classification.',
nargs='?', type=str, default=None)
parser.add_argument('--pretrain_path', help='Pre-training PyTorch model path.', nargs='?',
type=str, default=None)
parser.add_argument('--tf_pretrain_path', help='Pre-training TensorFlow(Google) model path.', nargs='?',
type=str, default=None)
parser.add_argument('--model_path', help='Classifier PyTorch model path.', nargs='?',
type=str, default=None)
parser.add_argument('--vocab_path', help='Vocabulary file path for BERT to pre-training.', nargs='?', required=True,
type=str)
parser.add_argument('--sp_model_path', help='Trained SentencePiece model path.', nargs='?',
type=str, default=None)
parser.add_argument('--save_dir', help='Classification model saving directory path.', nargs='?',
type=str, default='classifier/')
parser.add_argument('--log_dir', help='Logging file path.', nargs='?',
type=str, default=None)
parser.add_argument('--batch_size', help='Batch size', nargs='?',
type=int, default=4)
parser.add_argument('--max_pos', help='The maximum sequence length for BERT (slow as big).', nargs='?',
type=int, default=512)
parser.add_argument('--lr', help='Learning rate', nargs='?',
type=float, default=2e-5)
parser.add_argument('--warmup_steps', help='Warm-up steps proportion.', nargs='?',
type=float, default=0.1)
parser.add_argument('--epochs', help='Epochs', nargs='?',
type=int, default=10)
parser.add_argument('--per_save_epochs', help=
'Saving training model timing is the number divided by the epoch number', nargs='?',
type=int, default=1)
parser.add_argument('--mode', help='train or eval', nargs='?',
type=str, default='train')
parser.add_argument('--label_num', help='labels number', nargs='?',
type=int, default=-1)
parser.add_argument('--balance_weight', action='store_true',
help='Use automatically adjust weights')
parser.add_argument('--balance_sample', action='store_true',
help='Use automatically adjust samples(random)')
parser.add_argument('--under_sampling', action='store_true',
help='Use automatically adjust under samples')
parser.add_argument('--under_sampling_cycle', action='store_true',
help='Use automatically adjust under samples cycle peer')
parser.add_argument('--tokenizer', nargs='?', type=str, default='google',
help=
'Select from the following name groups tokenizer that uses only vocabulary files.(mecab, juman)'
)
parser.add_argument('--read_head', action='store_true',
help='Use not include header TSV file')
parser.add_argument('--fp16', action='store_true',
help='Use nVidia fp16 (require apex module)')
parser.add_argument('--task', nargs='?', type=str, default='class', help='Target Task (class or choice)')
parser.add_argument('--device', nargs='?', type=str, default=None, help='Target Runing device name.')
parser.add_argument('--quantize', action='store_true',
help='Use quantized bert (testing),')
parser.add_argument('--model_name', nargs='?', type=str, default='bert',
help=
'Select from the following name groups model. (bert, proj, albert)'
)
parser.add_argument('--optimizer', nargs='?', type=str, default='bert',
help=
'Select from the following name groups optimizer. (bert, adamw, lamb)'
)
parser.add_argument('--encoder_json_path', help='GPT2 encoder JSON file path.', nargs='?', type=str)
parser.add_argument('--vocab_bpe_path', help='GPT2 encoder bpe file path.', nargs='?', type=str)
parser.add_argument('--sw_log_dir', help='TensorBoard lgo_dir path.', nargs='?', type=str, default='runs')
args = parser.parse_args()
classification(
config_path=args.config_path,
train_dataset_path=args.train_dataset_path,
eval_dataset_path=args.eval_dataset_path,
pretrain_path= args.pretrain_path,
tf_pretrain_path=args.tf_pretrain_path,
model_path=args.model_path,
vocab_path=args.vocab_path,
sp_model_path=args.sp_model_path,
save_dir=args.save_dir,
log_dir=args.log_dir,
batch_size=args.batch_size,
max_pos=args.max_pos,
lr=args.lr,
warmup_proportion=args.warmup_steps,
epochs=args.epochs,
per_save_epochs=args.per_save_epochs,
mode=args.mode,
label_num=args.label_num,
balance_weight=args.balance_weight,
balance_sample=args.balance_sample,
under_sampling=args.under_sampling,
under_sampling_cycle=args.under_sampling_cycle,
tokenizer_name=args.tokenizer,
read_head=args.read_head,
fp16=args.fp16,
task=args.task,
device=args.device,
quantize=args.quantize,
model_name=args.model_name,
optimizer=args.optimizer,
encoder_json_path=args.encoder_json_path,
vocab_bpe_path=args.vocab_bpe_path,
sw_log_dir=args.sw_log_dir
)
| 43.787879
| 120
| 0.625309
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,909
| 0.287593
|
1891a7b479098620bf760b07575489ea73d1fccf
| 279
|
py
|
Python
|
dev/urls.py
|
ledgku/utilscombine
|
aaf13ca2208bcf522f005c64769c34bc8e8ee9f4
|
[
"MIT"
] | 2
|
2018-07-18T10:10:01.000Z
|
2018-07-18T10:10:17.000Z
|
dev/urls.py
|
ledgku/utilscombine
|
aaf13ca2208bcf522f005c64769c34bc8e8ee9f4
|
[
"MIT"
] | 5
|
2018-09-19T11:33:54.000Z
|
2021-06-10T20:43:32.000Z
|
dev/urls.py
|
ledgku/utilscombine
|
aaf13ca2208bcf522f005c64769c34bc8e8ee9f4
|
[
"MIT"
] | null | null | null |
from django.urls import path
from dev.views import FindMyIp,FindMyGps
app_name = 'dev'
urlpatterns = [
# path('', Main.as_view(), name = 'index'),
path('findmyip', FindMyIp.as_view(), name = 'findmyip'),
path('findmygps', FindMyGps.as_view(), name = 'findmygps'),
]
| 27.9
| 63
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.322581
|
1891f24339199dafe363a5f1dfae29b9615437e5
| 406
|
py
|
Python
|
cookbook/c02/p17_html_xml.py
|
itpubs/python3-cookbook
|
140f5e4cc0416b9674edca7f4c901b1f58fc1415
|
[
"Apache-2.0"
] | 3
|
2018-09-19T06:44:13.000Z
|
2019-03-24T10:07:07.000Z
|
cookbook/c02/p17_html_xml.py
|
itpubs/python3-cookbook
|
140f5e4cc0416b9674edca7f4c901b1f58fc1415
|
[
"Apache-2.0"
] | 2
|
2020-09-19T17:10:23.000Z
|
2020-10-17T16:43:52.000Z
|
cookbook/c02/p17_html_xml.py
|
itpubs/python3-cookbook
|
140f5e4cc0416b9674edca7f4c901b1f58fc1415
|
[
"Apache-2.0"
] | 1
|
2020-07-20T22:10:31.000Z
|
2020-07-20T22:10:31.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 处理html和xml文本
Desc :
"""
import html
def html_xml():
s = 'Elements are written as "<tag>text</tag>".'
print(s)
print(html.escape(s))
# Disable escaping of quotes
print(html.escape(s, quote=False))
s = 'Spicy Jalapeño'
print(s.encode('ascii', errors='xmlcharrefreplace'))
if __name__ == '__main__':
html_xml()
| 16.916667
| 56
| 0.618227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 216
| 0.517986
|
1892414a440ce9963c67565a93d5515f1867c2ed
| 53
|
py
|
Python
|
utils/__init__.py
|
Rfam/rfam-production
|
36f3963380da2a08e9cf73c951691c4e95738ac4
|
[
"Apache-2.0"
] | 7
|
2016-06-17T09:21:11.000Z
|
2021-10-13T20:25:06.000Z
|
utils/__init__.py
|
mb1069/rfam-production
|
10c76e249dc22d30862b3a873fd54f390e859ad8
|
[
"Apache-2.0"
] | 82
|
2016-04-08T10:51:32.000Z
|
2022-03-11T13:49:18.000Z
|
utils/__init__.py
|
mb1069/rfam-production
|
10c76e249dc22d30862b3a873fd54f390e859ad8
|
[
"Apache-2.0"
] | 3
|
2019-09-01T09:46:35.000Z
|
2021-11-29T08:01:58.000Z
|
__all__ = ['db_utils', 'RfamDB', 'parse_taxbrowser']
| 26.5
| 52
| 0.698113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.679245
|
1892cc18e4d651a8551d9ce9d603987daef5b912
| 625
|
py
|
Python
|
jina/drivers/craft.py
|
slettner/jina
|
4140961c62359e3acd540a6d88931665c6313824
|
[
"Apache-2.0"
] | null | null | null |
jina/drivers/craft.py
|
slettner/jina
|
4140961c62359e3acd540a6d88931665c6313824
|
[
"Apache-2.0"
] | null | null | null |
jina/drivers/craft.py
|
slettner/jina
|
4140961c62359e3acd540a6d88931665c6313824
|
[
"Apache-2.0"
] | null | null | null |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional
from . import FlatRecursiveMixin, BaseExecutableDriver, DocsExtractUpdateMixin
class CraftDriver(DocsExtractUpdateMixin, FlatRecursiveMixin, BaseExecutableDriver):
"""Drivers inherited from this Driver will bind :meth:`craft` by default """
def __init__(
self, executor: Optional[str] = None, method: str = 'craft', *args, **kwargs
):
super().__init__(executor, method, *args, **kwargs)
@property
def _stack_document_content(self):
return False
| 31.25
| 84
| 0.72
| 411
| 0.6576
| 0
| 0
| 69
| 0.1104
| 0
| 0
| 153
| 0.2448
|
18931f6a4553b81704db5d7e58f8609781b151d9
| 5,543
|
py
|
Python
|
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py
|
freyrsae/pydatalab
|
9aba1ac6bbe8e1384e7a4b07c5042af84348797d
|
[
"Apache-2.0"
] | 198
|
2016-07-14T19:47:52.000Z
|
2022-03-15T08:45:21.000Z
|
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py
|
freyrsae/pydatalab
|
9aba1ac6bbe8e1384e7a4b07c5042af84348797d
|
[
"Apache-2.0"
] | 534
|
2016-07-15T19:12:43.000Z
|
2022-03-11T23:11:39.000Z
|
solutionbox/structured_data/mltoolbox/_structured_data/preprocess/local_preprocess.py
|
freyrsae/pydatalab
|
9aba1ac6bbe8e1384e7a4b07c5042af84348797d
|
[
"Apache-2.0"
] | 86
|
2016-07-13T17:39:05.000Z
|
2021-11-03T03:39:41.000Z
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import json
import os
import six
import sys
from tensorflow.python.lib.io import file_io
SCHEMA_FILE = 'schema.json'
NUMERICAL_ANALYSIS_FILE = 'stats.json'
CATEGORICAL_ANALYSIS_FILE = 'vocab_%s.csv'
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments, includeing programe name.
Returns:
An argparse Namespace object.
"""
parser = argparse.ArgumentParser(
description='Runs Preprocessing on structured CSV data.')
parser.add_argument('--input-file-pattern',
type=str,
required=True,
help='Input CSV file names. May contain a file pattern')
parser.add_argument('--output-dir',
type=str,
required=True,
help='Google Cloud Storage which to place outputs.')
parser.add_argument('--schema-file',
type=str,
required=True,
help=('BigQuery json schema file'))
args = parser.parse_args(args=argv[1:])
# Make sure the output folder exists if local folder.
file_io.recursive_create_dir(args.output_dir)
return args
def run_numerical_categorical_analysis(args, schema_list):
"""Makes the numerical and categorical analysis files.
Args:
args: the command line args
schema_list: python object of the schema json file.
Raises:
ValueError: if schema contains unknown column types.
"""
header = [column['name'] for column in schema_list]
input_files = file_io.get_matching_files(args.input_file_pattern)
# Check the schema is valid
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
# initialize the results
def _init_numerical_results():
return {'min': float('inf'),
'max': float('-inf'),
'count': 0,
'sum': 0.0}
numerical_results = collections.defaultdict(_init_numerical_results)
categorical_results = collections.defaultdict(set)
# for each file, update the numerical stats from that file, and update the set
# of unique labels.
for input_file in input_files:
with file_io.FileIO(input_file, 'r') as f:
for line in f:
parsed_line = dict(zip(header, line.strip().split(',')))
for col_schema in schema_list:
col_name = col_schema['name']
col_type = col_schema['type']
if col_type.lower() == 'string':
categorical_results[col_name].update([parsed_line[col_name]])
else:
# numerical column.
# if empty, skip
if not parsed_line[col_name].strip():
continue
numerical_results[col_name]['min'] = (
min(numerical_results[col_name]['min'],
float(parsed_line[col_name])))
numerical_results[col_name]['max'] = (
max(numerical_results[col_name]['max'],
float(parsed_line[col_name])))
numerical_results[col_name]['count'] += 1
numerical_results[col_name]['sum'] += float(parsed_line[col_name])
# Update numerical_results to just have min/min/mean
for col_schema in schema_list:
if col_schema['type'].lower() != 'string':
col_name = col_schema['name']
mean = numerical_results[col_name]['sum'] / numerical_results[col_name]['count']
del numerical_results[col_name]['sum']
del numerical_results[col_name]['count']
numerical_results[col_name]['mean'] = mean
# Write the numerical_results to a json file.
file_io.write_string_to_file(
os.path.join(args.output_dir, NUMERICAL_ANALYSIS_FILE),
json.dumps(numerical_results, indent=2, separators=(',', ': ')))
# Write the vocab files. Each label is on its own line.
for name, unique_labels in six.iteritems(categorical_results):
labels = '\n'.join(list(unique_labels))
file_io.write_string_to_file(
os.path.join(args.output_dir, CATEGORICAL_ANALYSIS_FILE % name),
labels)
def run_analysis(args):
"""Builds an analysis files for training."""
# Read the schema and input feature types
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file))
run_numerical_categorical_analysis(args, schema_list)
# Also save a copy of the schema in the output folder.
file_io.copy(args.schema_file,
os.path.join(args.output_dir, SCHEMA_FILE),
overwrite=True)
def main(argv=None):
args = parse_arguments(sys.argv if argv is None else argv)
run_analysis(args)
if __name__ == '__main__':
main()
| 32.798817
| 86
| 0.673642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,993
| 0.359553
|
189920c0b5a16b9f087f121b4d4d90e4791b2184
| 5,876
|
py
|
Python
|
habitrac/habits/api/resolvers.py
|
IgnisDa/habitrac
|
0b5f6f1f4a6659c4cce49aacae54cdb0e74af67a
|
[
"Apache-2.0"
] | null | null | null |
habitrac/habits/api/resolvers.py
|
IgnisDa/habitrac
|
0b5f6f1f4a6659c4cce49aacae54cdb0e74af67a
|
[
"Apache-2.0"
] | null | null | null |
habitrac/habits/api/resolvers.py
|
IgnisDa/habitrac
|
0b5f6f1f4a6659c4cce49aacae54cdb0e74af67a
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import json
from ariadne import MutationType, QueryType, convert_kwargs_to_snake_case
from ariadne_token_auth.decorators import login_required
from django.contrib.auth import get_user_model
from habits import models as habit_models
from utils.general import get_user
from utils.handlers.errors import ErrorContainer
CUSTOM_USER_MODEL = get_user_model()
query = QueryType()
mutation = MutationType()
@mutation.field("createDailyHabit")
@convert_kwargs_to_snake_case
@login_required
def create_daily_habit(_, info, data):
error_container = ErrorContainer("date_from", "date_to", "name")
status = False
habit = None
user = get_user(info)
name = data.get("name")
description = data.get("description")
date_from = data.get("date_from")
date_to = data.get("date_to")
vault = data.get("vault")
today = datetime.date.today()
if date_from < today:
error_container.update_with_error(
"date_from",
f"The starting date must be greater than {today.strftime('%m/%d/%Y')}",
)
if date_to - date_from < datetime.timedelta(days=0):
error_container.update_with_error(
"date_to", "The ending date can not be before the ending date"
)
if habit_models.DailyHabit.objects.filter(name__iexact=name, user=user).exists():
error_container.update_with_error(
"name", "A habit with this name already exists"
)
if not error_container:
habit = habit_models.DailyHabit.objects.create(
user=user,
name=name,
date_from=date_from,
date_to=date_to,
description=description,
vault=vault,
)
status = True
return {"status": status, "errors": error_container.get_all_errors(), "habit": habit}
@mutation.field("updateDailyHabit")
@convert_kwargs_to_snake_case
@login_required
def update_daily_habit(_, info, data, name_slug):
error_container = ErrorContainer("duration", "name", "name_slug")
status = False
habit = None
name = data.get("name")
user = get_user(info)
description = data.get("description")
date_to = data.get("date_to")
vault = data.get('vault')
habits = habit_models.DailyHabit.objects.filter(name_slug=name_slug, user=user)
if len(habits) > 1:
error_container.update_with_error(
"name_slug", "There was an error processing your request"
)
if not habits.exists():
error_container.update_with_error(
"name_slug", "The habit you requested for does not exist"
)
if not error_container:
habit = habits[0]
habit.name = name
habit.description = description
habit.date_to = date_to
habit.vault = vault
habit.save()
status = True
return {"status": status, "errors": error_container.get_all_errors(), "habit": habit}
@query.field("getAllHabits")
@convert_kwargs_to_snake_case
@login_required
def get_all_habits(_, info, username_slug, **kwargs):
qs = CUSTOM_USER_MODEL.objects.get(
username_slug=username_slug
).dailyhabit_set.filter(vault=False)
ret_value = []
for obj in qs:
vars(obj)["is_completed"] = obj.is_completed
vars(obj)["is_done"] = obj.is_done
obj.progress = json.dumps(obj.progress)
ret_value.append(vars(obj))
return ret_value
@query.field("getHabitDetails")
@convert_kwargs_to_snake_case
@login_required
def get_habit_details(_, info, name_slug, **kwargs):
error = None
user = get_user(info)
ret_value = None
try:
habit = habit_models.DailyHabit.objects.get(name_slug=name_slug, user=user)
vars(habit)["is_completed"] = habit.is_completed
vars(habit)["is_done"] = habit.is_done
habit.is_done
habit.progress = json.dumps(habit.progress)
ret_value = vars(habit)
except habit_models.DailyHabit.DoesNotExist:
error = "The habit you requested for does not exist"
return {"habit": ret_value, "error": error}
@mutation.field("toggleTagCycle")
@convert_kwargs_to_snake_case
@login_required
def toggle_tag_cycle(_, info, data, **kwargs):
status = False
error = None
user = get_user(info)
habit = habit_models.DailyHabit.objects.get(name_slug=data["name_slug"], user=user)
try:
today = data.get("date").strftime("%Y-%m-%d")
habit.toggle_day(today)
habit.save()
status = True
except KeyError:
error = "The day you're trying to mark does not fall in this habit's duration."
return {"status": status, "error": error}
@query.field("getHabitReport")
@convert_kwargs_to_snake_case
@login_required
def get_habit_report(_, info, name_slug, **kwargs):
user = get_user(info)
habit = habit_models.DailyHabit.objects.get(name_slug=name_slug, user=user)
return habit.generate_report()
@mutation.field("deleteHabit")
@convert_kwargs_to_snake_case
@login_required
def delete_habit(_, info, name_slug, *args, **kwargs):
user = get_user(info)
status = False
error = None
try:
habit_models.DailyHabit.objects.get(name_slug=name_slug, user=user).delete()
status = True
except habit_models.DailyHabit.DoesNotExist:
error = "The requested habit does not exist in the database."
return {"status": status, "error": error}
@query.field("getVaultHabits")
@convert_kwargs_to_snake_case
@login_required
def get_vault_habits(_, info, username_slug, **kwargs):
qs = CUSTOM_USER_MODEL.objects.get(
username_slug=username_slug
).dailyhabit_set.filter(vault=True)
ret_value = []
for obj in qs:
vars(obj)["is_completed"] = obj.is_completed
vars(obj)["is_done"] = obj.is_done
obj.progress = json.dumps(obj.progress)
ret_value.append(vars(obj))
return ret_value
| 32.826816
| 89
| 0.681246
| 0
| 0
| 0
| 0
| 5,435
| 0.924949
| 0
| 0
| 912
| 0.155208
|
189970d0714654ace0194ba8650e7bc2d279578b
| 2,542
|
py
|
Python
|
src/target_matrix.py
|
smusali/rightwhalerecognition
|
0def80bc7e19864093008112455ae08869b40501
|
[
"MIT"
] | null | null | null |
src/target_matrix.py
|
smusali/rightwhalerecognition
|
0def80bc7e19864093008112455ae08869b40501
|
[
"MIT"
] | null | null | null |
src/target_matrix.py
|
smusali/rightwhalerecognition
|
0def80bc7e19864093008112455ae08869b40501
|
[
"MIT"
] | null | null | null |
import csv, pylab as pl, re
DB = dict();
BD = dict();
whales_ = [];
classes = [];
line_num = 0;
with open('data/train.csv', 'rb') as train_class_data:
data = csv.reader(train_class_data, delimiter=',');
for line in data:
if (line_num == 0):
line_num += 1;
continue;
keys = DB.keys();
syek = BD.keys();
pic_name = line[0];
class_name = line[1];
whales_.append(int(re.sub('w_','',re.sub('.jpg','',pic_name))));
if (class_name not in keys):
DB[class_name] = [pic_name];
classes.append(int(re.sub('whale_','',class_name)));
else:
DB[class_name].append(pic_name);
BD[pic_name] = class_name;
keys = DB.keys();
N = len(keys);
frequency_table = [0 for i in xrange(N)];
for i in xrange(N):
frequency_table[i] = len(DB[keys[i]]);
pl.plot(frequency_table);
M = len(whales_);
match_table = [[0 for j in xrange(N+1)] for i in xrange(M+1)];
for j in xrange(N):
match_table[0][j+1] = classes[j];
for i in xrange(M):
match_table[i+1][0] = whales_[i];
for i in xrange(N):
for j in xrange(M):
strWhale = 'w_'+str(whales_[j])+'.jpg';
num_zero = 0;
if (classes[i] < 10):
num_zero += 4;
elif (classes[i] < 100):
num_zero += 3;
elif (classes[i] < 1000):
num_zero += 2;
elif (classes[i] < 10000):
num_zero += 1;
zeros = num_zero*'0';
strClass = 'whale_'+zeros+str(classes[i]);
if (strWhale in DB[strClass]):
match_table[j+1][i+1] = 1;
match_table = pl.array(match_table);
pl.savetxt('data/match_table.csv', match_table, delimiter=',');
target_matrix = pl.array([[0 for j in xrange(M)] for j in xrange(M)]);
i = 0;
for pic_name_i in whales_:
j = 0;
for pic_name_j in whales_:
class_of_i = BD['w_'+str(pic_name_i)+'.jpg'];
class_of_j = BD['w_'+str(pic_name_j)+'.jpg'];
if (class_of_i == class_of_j):
target_matrix[i,j] = 1;
j += 1;
target_matrix[i,i] = 1;
i += 1;
new_train_numerical = pl.array([[0 for it1 in xrange(2)] for it2 in xrange(M)]);
for i in xrange(M):
whale = whales_[i];
new_train_numerical[i,0] = whale;
class_ = class_of_i = BD['w_'+str(whale)+'.jpg'];
new_train_numerical[i,1] = int(re.sub('whale_','',class_));
pl.savetxt('data/target_matrix.csv', target_matrix, delimiter=',');
pl.savetxt('data/train_numer.csv', new_train_numerical, delimiter=',');
| 30.261905
| 80
| 0.562549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 185
| 0.072777
|
1899c8f523a33646e893cd33cd3682188e0ca8e5
| 5,990
|
py
|
Python
|
src/fsec/yammler.py
|
HiggsHydra/permian-frac-exchange
|
1dd1e409e5389373590c7d3780a54cd9f12d1166
|
[
"MIT"
] | null | null | null |
src/fsec/yammler.py
|
HiggsHydra/permian-frac-exchange
|
1dd1e409e5389373590c7d3780a54cd9f12d1166
|
[
"MIT"
] | null | null | null |
src/fsec/yammler.py
|
HiggsHydra/permian-frac-exchange
|
1dd1e409e5389373590c7d3780a54cd9f12d1166
|
[
"MIT"
] | null | null | null |
from typing import Union
from datetime import datetime
import os
import tempfile
from contextlib import contextmanager
import logging
from collections import Counter
import yaml
logger = logging.getLogger(__name__)
class Yammler(dict):
_no_dump = ["changed"]
_metavars = ["fspath", "updated_at"]
def __init__(self, fspath: str, auto_dump: bool = False, data: dict = None):
self.fspath = fspath
self.auto_dump = auto_dump
self.changed = False
self.updated_at = self.stamp()
_yml = None
with open(fspath) as f:
_yml = yaml.safe_load(f) or {}
if isinstance(_yml, dict):
_yml_data = _yml.pop("data", {})
_yml_data.update(data or {})
self._set_data(_yml_data)
self.meta = _yml
@property
def meta(self) -> dict:
meta = {}
for mv in self._metavars:
try:
meta[mv] = getattr(self, mv)
except Exception as e:
logger.debug(e)
meta[mv] = None
return meta
@meta.setter
def meta(self, data: dict) -> None:
data = data.pop("meta", data) # reduce if possible
[setattr(self, key, value) for key, value in data.items()]
def _set_data(self, data: dict) -> None:
super().update(data or {})
def dump(self, force=False):
if self.changed or force:
with self.durable(self.fspath, "w") as f:
d = {}
d.update({"data": dict(self), "meta": self.meta})
[d["data"].pop(k, None) for k in self._no_dump]
yaml.safe_dump(d, f, default_flow_style=False)
self.changed = False
def updated(self):
self.updated_at = datetime.utcnow()
if self.auto_dump:
self.dump(force=True)
else:
self.changed = True
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.updated()
def __delitem__(self, key):
super().__delitem__(key)
self.updated()
def update(self, kwargs):
super().update(kwargs)
self.updated()
def overwrite(self, data: dict):
return Yammler(self.fspath, self.auto_dump, data)
@staticmethod
def stamp():
return datetime.utcnow()
@classmethod
@contextmanager
def context(cls, fspath):
obj = cls(fspath)
try:
yield obj
finally:
obj.dump(force=True)
@classmethod
@contextmanager
def durable(cls, fspath: str, mode: str = "w+b"):
""" Safely write to file """
_fspath = fspath
_mode = mode
_file = tempfile.NamedTemporaryFile(_mode, delete=False)
try:
yield _file
except Exception as e: # noqa
os.unlink(_file.name)
raise e
else:
_file.close()
os.rename(_file.name, _fspath)
class DownloadLog(Yammler):
_special = "_known_files"
def __init__(self, fspath: str):
super().__init__(fspath)
def __repr__(self):
return f"DownloadLog: {len(self.known_files)} tracked files"
def __iter__(self):
for f in self.known_files:
yield f
def __len__(self):
return len(self.__getitem__(self._special))
def __missing__(self, key):
if key == self._special:
value = dict()
self[key] = value
return value
else:
raise KeyError
@property
def known_files(self) -> dict:
return self.__getitem__(self._special)
def add(self, paths: Union[str, list]) -> Counter:
if not isinstance(paths, list):
paths = [paths]
c: Counter = Counter()
for path in paths:
try:
self.known_files[path] = datetime.now().isoformat()
c["ok"] += 1
except ValueError as ve:
c["failed"] += 1
logger.info(f"Failed to add entry to {self.__class__.__name__}: {path}")
raise ve
return c
def remove(self, paths: Union[str, list]) -> Counter:
if not isinstance(paths, list):
paths = [paths]
c: Counter = Counter()
for path in paths:
try:
self.known_files.pop(path)
c["ok"] += 1
except ValueError:
c["failed"] += 1
logger.debug(f"{path} not in {self.__class__.__name__}")
logger.debug(c)
return c
# def cycle(self, items: Union[str, list], max_age: int = 42):
# """ Purge log older than max_age (days)"""
# self.remove(items)
# self.purged_at = datetime.now()
# return self
if __name__ == "__main__":
fspath = "./config/download_log.yaml"
import loggers
from yammler import Yammler
loggers.standard_config()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
with DownloadLog.context(fspath) as f:
# print(f)
# f.known_files = "test"
# print(f.known_files)
f.add("test1")
print(f)
y = Yammler("./config/operators.yaml")
s2 = [{x.pop("operator"): x} for x in s]
from stringprocessor import StringProcessor as sp
for s in s2:
for k, v in s.items():
x = s.pop(k)
x["alias"] = sp.normalize(x.pop("alias"), lower=True)
x["method"] = sp.normalize(x.pop("method"), lower=True)
s[sp.normalize(k, lower=True)] = x
for x in s2:
for key, value in x.items():
try:
value["created"] = value["created"].isoformat()
value["updated"] = value["updated"].isoformat()
except:
pass
finally:
y[key] = value
f = DownloadLog(fspath)
# f.known_files
# f.add("test1")
# f.dump()
# f.remove("test1")
| 26.504425
| 88
| 0.543573
| 4,568
| 0.762604
| 621
| 0.103673
| 1,224
| 0.204341
| 0
| 0
| 763
| 0.127379
|
189b834780427e5805e8ddb0880c32074d93411d
| 879
|
py
|
Python
|
pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py
|
oocemb/Calculation
|
91d202d1b5a2dde6376487147517310682294278
|
[
"Apache-2.0"
] | null | null | null |
pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py
|
oocemb/Calculation
|
91d202d1b5a2dde6376487147517310682294278
|
[
"Apache-2.0"
] | null | null | null |
pricecalc/pricecalc/apps/calc/migrations/0024_ldstp_alter_furnitureincalc_options.py
|
oocemb/Calculation
|
91d202d1b5a2dde6376487147517310682294278
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 4.0.2 on 2022-03-25 11:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calc', '0023_delete_handle_alter_calctag_options_and_more'),
]
operations = [
migrations.CreateModel(
name='Ldstp',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128)),
('price', models.DecimalField(decimal_places=2, max_digits=8)),
('availability', models.CharField(max_length=32)),
],
),
migrations.AlterModelOptions(
name='furnitureincalc',
options={'verbose_name': 'Фурнитура в расчёте', 'verbose_name_plural': 'Фурнитура в расчёте'},
),
]
| 32.555556
| 117
| 0.602958
| 820
| 0.898138
| 0
| 0
| 0
| 0
| 0
| 0
| 275
| 0.301205
|
189c6b3a4cd4803a7422b2fd630d54013aa0aa1e
| 14,356
|
py
|
Python
|
aiida_siesta/calculations/stm.py
|
mailhexu/aiida_siesta_plugin
|
313ef4b3532b54d8d0c81788b683c53cb4701965
|
[
"MIT"
] | null | null | null |
aiida_siesta/calculations/stm.py
|
mailhexu/aiida_siesta_plugin
|
313ef4b3532b54d8d0c81788b683c53cb4701965
|
[
"MIT"
] | 2
|
2019-05-12T22:11:46.000Z
|
2019-05-13T11:46:16.000Z
|
aiida_siesta/calculations/stm.py
|
mailhexu/aiida_siesta_plugin
|
313ef4b3532b54d8d0c81788b683c53cb4701965
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
# Module with fdf-aware dictionary
from tkdict import FDFDict
from aiida.orm.calculation.job import JobCalculation
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo
from aiida.common.utils import classproperty
from aiida.common.datastructures import CodeInfo
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.remote import RemoteData
__copyright__ = u"Copyright (c), 2015, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland and ROBERT BOSCH LLC, USA. All rights reserved."
__license__ = "MIT license, see LICENSE.txt file"
__version__ = "0.9.10"
__contributors__ = "Victor M. Garcia-Suarez, Alberto Garcia"
class STMCalculation(JobCalculation):
"""
Plugin for the "plstm" program in the Siesta distribution, which
takes and .LDOS or .RHO file and generates a plot file to simulate
an STM image.
"""
_stm_plugin_version = 'aiida-0.12.0--stm-0.9.10'
def _init_internal_params(self):
super(STMCalculation, self)._init_internal_params()
# Default Siesta output parser provided by AiiDA
self._default_parser = "siesta.stm"
# Keywords that cannot be set
# We need to canonicalize this!
self._aiida_blocked_keywords = ['mode','system-label','extension']
# Default input and output files
self._DEFAULT_INPUT_FILE = 'stm.in'
self._DEFAULT_OUTPUT_FILE = 'stm.out'
self._DEFAULT_PLOT_FILE = 'aiida.CH.STM'
self._OUTPUT_SUBFOLDER = './'
self._PREFIX = 'aiida'
self._INPUT_FILE_NAME = 'stm.in'
self._OUTPUT_FILE_NAME = 'stm.out'
self._PLOT_FILE_NAME = 'aiida.CH.STM'
# in restarts, it will copy from the parent the following
self._restart_copy_from = os.path.join(self._OUTPUT_SUBFOLDER, '*.LDOS')
# in restarts, it will copy the previous folder in the following one
self._restart_copy_to = self._OUTPUT_SUBFOLDER
@classproperty
def _use_methods(cls):
"""
Extend the parent _use_methods with further keys.
"""
retdict = JobCalculation._use_methods
retdict["settings"] = {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'settings',
'docstring': "Use an additional node for special settings",
}
retdict["parameters"] = {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the input parameters "
"for the namelists"),
}
retdict["parent_folder"] = {
'valid_types': RemoteData,
'additional_parameter': None,
'linkname': 'parent_calc_folder',
'docstring': ("Use a remote folder as parent folder (for "
"restarts and similar"),
}
return retdict
def _prepare_for_submission(self,tempfolder,
inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
local_copy_list = []
remote_copy_list = []
# Process the settings dictionary first
# Settings can be undefined, and defaults to an empty dictionary
settings = inputdict.pop(self.get_linkname('settings'),None)
if settings is None:
settings_dict = {}
else:
if not isinstance(settings, ParameterData):
raise InputValidationError("settings, if specified, must be of "
"type ParameterData")
# Settings converted to UPPERCASE
# Presumably to standardize the usage and avoid
# ambiguities
settings_dict = _uppercase_dict(settings.get_dict(),
dict_name='settings')
try:
parameters = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
raise InputValidationError("No parameters specified for this "
"calculation")
if not isinstance(parameters, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
parent_calc_folder = inputdict.pop(self.get_linkname('parent_folder'))
except KeyError:
raise InputValidationError("No parent_calc_folder specified for this "
"calculation")
if not isinstance(parent_calc_folder, RemoteData):
raise InputValidationError("parent_calc_folder, if specified,"
"must be of type RemoteData")
#
# Important note: This program should NOT be run with MPI.
# Scripts using this plugin should use:
#
# calc.set_withmpi(False)
#
# We do it right here, and hope that it will not be overriden
#
# self.set_withmpi(False)
#
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("No code specified for this calculation")
# Here, there should be no more parameters...
if inputdict:
raise InputValidationError("The following input data nodes are "
"unrecognized: {}".format(inputdict.keys()))
# END OF INITIAL INPUT CHECK #
#
# There should be a warning for duplicated (canonicalized) keys
# in the original dictionary in the script
input_params = FDFDict(parameters.get_dict())
# Look for blocked keywords and
# add the proper values to the dictionary
for blocked_key in self._aiida_blocked_keywords:
canonical_blocked = FDFDict.translate_key(blocked_key)
for key in input_params:
if key == canonical_blocked:
raise InputValidationError(
"You cannot specify explicitly the '{}' flag in the "
"input parameters".format(input_params.get_last_key(key)))
input_params.update({'system-label': self._PREFIX})
input_params.update({'mode': 'constant-height'})
input_params.update({'extension': 'ldos'})
# Maybe check that the 'z' coordinate makes sense...
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename,'w') as infile:
infile.write("aiida\n")
infile.write("ldos\n")
infile.write("constant-height\n")
# Convert height to bohr...
infile.write("{}\n".format(input_params['z']/0.529177))
infile.write("unformatted\n")
# ------------------------------------- END of input file creation
# The presence of a 'parent_calc_folder' is mandatory, to get the LDOS file
# as indicated in the self._restart_copy_from attribute.
# (this is not technically a restart, though)
# It will be copied to the current calculation's working folder.
if parent_calc_folder is not None:
remote_copy_list.append(
(parent_calc_folder.get_computer().uuid,
os.path.join(parent_calc_folder.get_remote_path(),
self._restart_copy_from),
self._restart_copy_to
))
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
#
# Empty command line by default
# Why use 'pop' ?
cmdline_params = settings_dict.pop('CMDLINE', [])
if cmdline_params:
calcinfo.cmdline_params = list(cmdline_params)
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
calcinfo.stdin_name = self._INPUT_FILE_NAME
calcinfo.stdout_name = self._OUTPUT_FILE_NAME
#
# Code information object
#
codeinfo = CodeInfo()
codeinfo.cmdline_params = list(cmdline_params)
codeinfo.stdin_name = self._INPUT_FILE_NAME
codeinfo.stdout_name = self._OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
calcinfo.codes_info = [codeinfo]
# Retrieve by default: the output file and the plot file
calcinfo.retrieve_list = []
calcinfo.retrieve_list.append(self._OUTPUT_FILE_NAME)
calcinfo.retrieve_list.append(self._PLOT_FILE_NAME)
# Any other files specified in the settings dictionary
settings_retrieve_list = settings_dict.pop('ADDITIONAL_RETRIEVE_LIST',[])
calcinfo.retrieve_list += settings_retrieve_list
return calcinfo
def _set_parent_remotedata(self,remotedata):
"""
Used to set a parent remotefolder that holds the LDOS file
from a previous Siesta calculation
"""
from aiida.common.exceptions import ValidationError
if not isinstance(remotedata,RemoteData):
raise ValueError('remotedata must be a RemoteData')
# complain if another remotedata is already found
input_remote = self.get_inputs(node_type=RemoteData)
if input_remote:
raise ValidationError("Cannot set several parent calculation to a "
"{} calculation".format(self.__class__.__name__))
self.use_parent_folder(remotedata)
def get_input_data_text(key,val, mapping=None):
"""
Given a key and a value, return a string (possibly multiline for arrays)
with the text to be added to the input file.
:param key: the flag name
:param val: the flag value. If it is an array, a line for each element
is produced, with variable indexing starting from 1.
Each value is formatted using the conv_to_fortran function.
:param mapping: Optional parameter, must be provided if val is a dictionary.
It maps each key of the 'val' dictionary to the corresponding
list index. For instance, if ``key='magn'``,
``val = {'Fe': 0.1, 'O': 0.2}`` and ``mapping = {'Fe': 2, 'O': 1}``,
this function will return the two lines ``magn(1) = 0.2`` and
``magn(2) = 0.1``. This parameter is ignored if 'val'
is not a dictionary.
"""
from aiida.common.utils import conv_to_fortran
# I check first the dictionary, because it would also match
# hasattr(__iter__)
if isinstance(val, dict):
if mapping is None:
raise ValueError("If 'val' is a dictionary, you must provide also "
"the 'mapping' parameter")
list_of_strings = []
for elemk, itemval in val.iteritems():
try:
idx = mapping[elemk]
except KeyError:
raise ValueError("Unable to find the key '{}' in the mapping "
"dictionary".format(elemk))
list_of_strings.append((idx," {0}({2}) = {1}\n".format(
key, conv_to_fortran(itemval), idx)))
# I first have to resort, then to remove the index from the first
# column, finally to join the strings
list_of_strings = zip(*sorted(list_of_strings))[1]
return "".join(list_of_strings)
elif hasattr(val,'__iter__'):
# a list/array/tuple of values
list_of_strings = [
"{0}({2}) {1}\n".format(key, conv_to_fortran(itemval), idx+1)
for idx, itemval in enumerate(val)]
return "".join(list_of_strings)
else:
# single value
if key[:6] == '%block':
bname = key.split()[1]
b1 = "{0} {1}".format(key, my_conv_to_fortran(val))
return b1 + "\n%endblock " + bname + "\n"
else:
return "{0} {1}\n".format(key, my_conv_to_fortran(val))
def my_conv_to_fortran(val):
"""
Special version to avoid surrounding strings with extra ' '. Otherwise the
fdf tokenizer will not split values and units, for example.
:param val: the value to be read and converted to a Fortran-friendly string.
"""
# Note that bool should come before integer, because a boolean matches also
# isinstance(...,int)
if (isinstance(val, bool)):
if val:
val_str = '.true.'
else:
val_str = '.false.'
elif (isinstance(val, (int, long))):
val_str = "{:d}".format(val)
elif (isinstance(val, float)):
val_str = ("{:18.10e}".format(val)).replace('e', 'd')
elif (isinstance(val, basestring)):
val_str = "{!s}".format(val)
else:
raise ValueError("Invalid value passed, accepts only bools, ints, "
"floats and strings")
return val_str
def _uppercase_dict(d, dict_name):
from collections import Counter
if isinstance(d,dict):
new_dict = dict((str(k).upper(), v) for k, v in d.iteritems())
if len(new_dict) != len(d):
num_items = Counter(str(k).upper() for k in d.keys())
double_keys = ",".join([k for k, v in num_items if v > 1])
raise InputValidationError(
"Inside the dictionary '{}' there are the following keys that "
"are repeated more than once when compared case-insensitively: "
"{}."
"This is not allowed.".format(dict_name, double_keys))
return new_dict
else:
raise TypeError("_lowercase_dict accepts only dictionaries as argument")
| 39.657459
| 278
| 0.60156
| 9,335
| 0.650251
| 0
| 0
| 1,026
| 0.071468
| 0
| 0
| 6,061
| 0.422193
|
189dad7d4fc31c11984202f5abd8d52b7d7034ce
| 5,974
|
py
|
Python
|
backend/api/fixtures/test/functional_test/load_ft_data.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 18
|
2017-05-10T21:55:11.000Z
|
2021-03-01T16:41:32.000Z
|
backend/api/fixtures/test/functional_test/load_ft_data.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 1,167
|
2017-03-04T00:18:43.000Z
|
2022-03-03T22:31:51.000Z
|
backend/api/fixtures/test/functional_test/load_ft_data.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 48
|
2017-03-09T17:19:39.000Z
|
2022-02-24T16:38:17.000Z
|
import uuid
import os
from datetime import datetime
from django.db import transaction
from api.management.data_script import OperationalDataScript
from api.models.CompliancePeriod import CompliancePeriod
from api.models.Organization import Organization
from api.models.OrganizationActionsType import OrganizationActionsType
from api.models.OrganizationBalance import OrganizationBalance
from api.models.OrganizationStatus import OrganizationStatus
from api.models.OrganizationType import OrganizationType
from api.models.Role import Role
from api.models.User import User
from api.models.UserRole import UserRole
class LoadFTData(OperationalDataScript):
comment = 'Load BDD functional test users'
is_revertable = False
_usernames = ['bdd-fuelsupplier1',
'bdd-fuelsupplier2',
'bdd-analyst',
'bdd-director',
'bdd-fuelsupplier1admin',
'bdd-admin']
_orgs = ['TFRS Fantastic Fuels', 'TFRS IMBeing Green']
def check_run_preconditions(self):
for username in self._usernames:
if User.objects.filter(username=username).exists():
print('Found an existing user {}'.format(username))
return False
for org in self._orgs:
if Organization.objects.filter(name=org).exists():
print('Found an existing organization {}'.format(org))
return False
return True
@transaction.atomic
def run(self):
Organization(name=self._orgs[0],
actions_type=OrganizationActionsType.objects.get_by_natural_key("Buy And Sell"),
type=OrganizationType.objects.get_by_natural_key("Part3FuelSupplier"),
status=OrganizationStatus.objects.get_by_natural_key('Active'), id=2).save()
Organization(name=self._orgs[1],
actions_type=OrganizationActionsType.objects.get_by_natural_key("Buy And Sell"),
type=OrganizationType.objects.get_by_natural_key("Part3FuelSupplier"),
status=OrganizationStatus.objects.get_by_natural_key('Active'), id=3).save()
OrganizationBalance(organization=Organization.objects.get_by_natural_key(self._orgs[0]), credit_trade=None,
validated_credits=1000, effective_date=datetime.today().strftime('%Y-%m-%d')).save()
OrganizationBalance(organization=Organization.objects.get_by_natural_key(self._orgs[1]), credit_trade=None,
validated_credits=1000, effective_date=datetime.today().strftime('%Y-%m-%d')).save()
User( is_superuser='f', username='bdd-fuelsupplier1',
email='bdd-fuelsupplier1@test.com', first_name='fuelsupplier1', last_name='bdd',
is_staff='f', is_active='t', display_name='bdd-fuelsupplier1',
organization=Organization.objects.get_by_natural_key(self._orgs[0])).save()
User( is_superuser='f', username='bdd-fuelsupplier2',
email='bdd-fuelsupplier2@test.com', first_name='fuelsupplier2', last_name='bdd',
is_staff='f', is_active='t', display_name='bdd-fuelsupplier2',
organization=Organization.objects.get_by_natural_key(self._orgs[1])).save()
User( is_superuser='f', username='bdd-analyst',
email='bdd-analyst@test.com', first_name='analyst', last_name='bdd',
is_staff='f', is_active='t', display_name='bdd-analyst',
organization=Organization.objects.get_by_natural_key("Government of British Columbia")).save()
User( is_superuser='f', username='bdd-director',
email='bdd-director@test.com', first_name='director', last_name='bdd',
is_staff='f', is_active='t', display_name='bdd-director',
organization=Organization.objects.get_by_natural_key("Government of British Columbia")).save()
User( is_superuser='f', username='bdd-fuelsupplier1admin',
email='bdd-fuelsupplier1admin@test.com', first_name='fuelsupplier1admin', last_name='bdd',
is_staff='f', is_active='t', display_name='bdd-fuelsupplier1admin',
organization=Organization.objects.get_by_natural_key(self._orgs[0])).save()
User( is_superuser='f', username='bdd-admin',
email='bdd-admin@test.com', first_name='admin', last_name='bdd',
is_staff='f', is_active='t', display_name='bdd-admin',
organization=Organization.objects.get_by_natural_key("Government of British Columbia")).save()
UserRole(user=User.objects.get(username='bdd-fuelsupplier1'), role=Role.objects.get_by_natural_key('FSManager')).save()
UserRole(user=User.objects.get(username='bdd-fuelsupplier2'), role=Role.objects.get_by_natural_key('FSManager')).save()
UserRole(user=User.objects.get(username='bdd-analyst'), role=Role.objects.get_by_natural_key('Admin')).save()
UserRole(user=User.objects.get(username='bdd-analyst'), role=Role.objects.get_by_natural_key('GovUser')).save()
UserRole(user=User.objects.get(username='bdd-director'), role=Role.objects.get_by_natural_key('GovDirector')).save()
UserRole(user=User.objects.get(username='bdd-fuelsupplier1admin'), role=Role.objects.get_by_natural_key('FSUser')).save()
UserRole(user=User.objects.get(username='bdd-fuelsupplier1admin'), role=Role.objects.get_by_natural_key('FSManager')).save()
UserRole(user=User.objects.get(username='bdd-fuelsupplier1admin'), role=Role.objects.get_by_natural_key('FSAdmin')).save()
UserRole(user=User.objects.get(username='bdd-admin'), role=Role.objects.get_by_natural_key('Admin')).save()
UserRole(user=User.objects.get(username='bdd-admin'), role=Role.objects.get_by_natural_key('GovUser')).save()
UserRole(user=User.objects.get(username='bdd-admin'), role=Role.objects.get_by_natural_key('GovDirector')).save()
script_class = LoadFTData
| 55.831776
| 132
| 0.68999
| 5,329
| 0.892032
| 0
| 0
| 4,470
| 0.748242
| 0
| 0
| 1,239
| 0.207399
|
189e049c8ff1d8fc6680e58e527827763bd3d33c
| 3,456
|
py
|
Python
|
challenge/agoda_cancellation_prediction.py
|
ZebraForce9/IML.HUJI
|
a263a621331c7c7d51c90c8325f76aa0797d424e
|
[
"MIT"
] | null | null | null |
challenge/agoda_cancellation_prediction.py
|
ZebraForce9/IML.HUJI
|
a263a621331c7c7d51c90c8325f76aa0797d424e
|
[
"MIT"
] | null | null | null |
challenge/agoda_cancellation_prediction.py
|
ZebraForce9/IML.HUJI
|
a263a621331c7c7d51c90c8325f76aa0797d424e
|
[
"MIT"
] | null | null | null |
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
from IMLearn.base import BaseEstimator
import numpy as np
import pandas as pd
def load_data(filename: str):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
full_data = pd.read_csv(filename).drop_duplicates()
full_data["cancellation_datetime"] = full_data["cancellation_datetime"].fillna(0)
full_data = full_data.dropna()
# full_data = full_data.fillna("Unknown")
full_data[["booking_datetime", "checkin_date", "checkout_date", "hotel_live_date", "cancellation_datetime"]] = \
full_data[
["booking_datetime", "checkin_date", "checkout_date", "hotel_live_date", "cancellation_datetime"]].apply(
pd.to_datetime)
full_data["cancellation_datetime"] = full_data["cancellation_datetime"].apply(lambda x: x.value // 10**9)
full_data["booking_date"], full_data["booking_time"] = full_data["booking_datetime"].dt.date, full_data[
"booking_datetime"].dt.time
features_to_dummify = ["hotel_id", "hotel_country_code", "accommadation_type_name", "charge_option",
"customer_nationality", "guest_nationality_country_name", "origin_country_code", "language",
"original_payment_method", "original_payment_currency", "hotel_area_code", "hotel_city_code"]
for feature in features_to_dummify:
feature_dummies = pd.get_dummies(full_data[feature]).add_prefix(f"{feature}")
full_data = full_data.join(feature_dummies)
full_data = full_data.drop(["h_booking_id", "h_customer_id", "booking_datetime"] + features_to_dummify, axis=1)
labels = full_data.pop("cancellation_datetime")
return full_data, labels
def evaluate_and_export(estimator: BaseEstimator, X: np.ndarray, filename: str):
"""
Export to specified file the prediction results of given estimator on given test set.
File saved is in csv format with a single column named 'predicted_values' and n_samples rows containing
predicted values.
Parameters
----------
estimator: BaseEstimator or any object implementing predict() method as in BaseEstimator (for example sklearn)
Fitted estimator to use for prediction
X: ndarray of shape (n_samples, n_features)
Test design matrix to predict its responses
filename:
path to store file at
"""
pd.DataFrame(estimator.predict(X), columns=["predicted_values"]).to_csv(filename, index=False)
if __name__ == '__main__':
np.random.seed(0)
# Load data
df, cancellation_labels = load_data("agoda_cancellation_train.csv")
train_X, train_y, test_X, test_y = split_train_test(df, cancellation_labels)
# Fit model over data
from time import time
start = time()
estimator = AgodaCancellationEstimator().fit(train_X, train_y)
pred = estimator.predict(test_X)
print(time() - start)
# Store model predictions over test set
evaluate_and_export(estimator, test_X, "id1_id2_id3.csv")
| 37.565217
| 120
| 0.713252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,853
| 0.536169
|
189ea5728ae22b441ea875f1bd0c5faac3a76ced
| 294
|
py
|
Python
|
example2.py
|
presidento/scripthelper
|
71b9e69f2967fb8d352376213c046263d5c31849
|
[
"MIT"
] | null | null | null |
example2.py
|
presidento/scripthelper
|
71b9e69f2967fb8d352376213c046263d5c31849
|
[
"MIT"
] | 3
|
2020-04-28T13:14:31.000Z
|
2021-01-15T09:41:56.000Z
|
example2.py
|
presidento/scripthelper
|
71b9e69f2967fb8d352376213c046263d5c31849
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import scripthelper
scripthelper.add_argument("-n", "--name", help="Name to greet")
logger, args = scripthelper.bootstrap_args()
if args.name:
logger.debug("Name was provided")
logger.info(f"Hello {args.name}")
else:
logger.warning("Name was not provided")
| 24.5
| 63
| 0.710884
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.377551
|
18a1a66ddfc12bbc493302b88cd1fbc01b59d040
| 71
|
py
|
Python
|
Chapter 01/Chap01_Example1.40.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.40.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
Chapter 01/Chap01_Example1.40.py
|
Anancha/Programming-Techniques-using-Python
|
e80c329d2a27383909d358741a5cab03cb22fd8b
|
[
"MIT"
] | null | null | null |
#backslash and new line ignored
print("one\
two\
three")
| 14.2
| 31
| 0.591549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.873239
|
18a1ae9b017d856fff834435791dc30cf0986f3f
| 15,575
|
py
|
Python
|
turbo_seti/find_event/find_event_pipeline.py
|
savinshynu/turbo_seti
|
7d756f130af5a323403affcdcb9f9bfa62325836
|
[
"MIT"
] | null | null | null |
turbo_seti/find_event/find_event_pipeline.py
|
savinshynu/turbo_seti
|
7d756f130af5a323403affcdcb9f9bfa62325836
|
[
"MIT"
] | null | null | null |
turbo_seti/find_event/find_event_pipeline.py
|
savinshynu/turbo_seti
|
7d756f130af5a323403affcdcb9f9bfa62325836
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
r"""
Front-facing script to find drifting, narrowband events in a set of generalized
cadences of ON-OFF radio SETI observations.
The main function contained in this file is :func:`find_event_pipeline` calls
find_events from find_events.py to read a list of turboSETI .dat files.
It then finds events within this group of files.
"""
#required packages and programs
import os
from operator import attrgetter
import logging
logger_name = 'find_event_pipeline'
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
import pandas as pd
import numpy as np
from blimpy import Waterfall
from blimpy.utils import change_the_ext
from turbo_seti.find_event.find_event import find_events
RTOL_DIFF = 0.01 # 1%
class PathRecord:
r''' Definition of a DAT record '''
def __init__(self, path_dat, tstart, source_name, fch1, foff, nchans):
self.path_dat = path_dat
self.tstart = tstart
self.source_name = source_name
self.fch1 = fch1
self.foff = foff
self.nchans = nchans
def __repr__(self):
return repr((self.path_dat, self.tstart, self.source_name))
def get_file_header(filepath_h5):
r'''
Extract and return the target's source name from the DAT file path.
Parameters
----------
dat_path : str
Full or relative path name of the DAT file
Returns
-------
header : Waterfall header object
'''
wf = Waterfall(filepath_h5, load_data=False)
return wf.container.header
def close_enough(x, y):
r"""Make sure that x and y are close enough to be considered roughly equal."""
if np.isclose(float(x), float(y), rtol=RTOL_DIFF):
return True
return False
def find_event_pipeline(dat_file_list_str,h5_file_list_str=None, SNR_cut=10, check_zero_drift=False, filter_threshold=3,
on_off_first='ON', number_in_cadence=6, on_source_complex_cadence=False,
saving=True, csv_name=None, user_validation=False,
sortby_tstart=True):
"""
Find event pipeline.
Parameters
----------
dat_file_list_str : str
The string name of a plaintext file ending in .lst
that contains the filenames of .dat files, each on a
new line, that were created with seti_event.py. The
.lst should contain a set of cadences (ON observations
alternating with OFF observations). The cadence can be
of any length, given that the ON source is every other
file. This includes Breakthrough Listen standard ABACAD
as well as OFF first cadences like BACADA. Minimum
cadence length is 2, maximum cadence length is
unspecified (currently tested up to 6).
Example: ABACAD|ABACAD|ABACAD
h5_file_list_str : str | None
The string name of a plaintext file ending in .lst
that contains the filenames of .h5 files, each on a
new line, that were created with seti_event.py. The
.lst should contain a set of cadences (ON observations
alternating with OFF observations). The cadence can be
of any length, given that the ON source is every other
file. This includes Breakthrough Listen standard ABACAD
as well as OFF first cadences like BACADA. Minimum
cadence length is 2, maximum cadence length is
unspecified (currently tested up to 6).
SNR_cut : int
The threshold SNR below which hits in the ON source
will be disregarded. For the least strict thresholding,
set this parameter equal to the minimum-searched SNR
that you used to create the .dat files from
seti_event.py. Recommendation (and default) is 10.
check_zero_drift : bool
A True/False flag that tells the program whether to
include hits that have a drift rate of 0 Hz/s. Earth-
based RFI tends to have no drift rate, while signals
from the sky are expected to have non-zero drift rates.
filter_threshold : int
Specification for how strict the hit filtering will be.
There are 3 different levels of filtering, specified by
the integers 1, 2, and 3. Filter_threshold = 1
returns hits above an SNR cut, taking into account the
check_zero_drift parameter, but without an ON-OFF check.
Filter_threshold = 2 returns hits that passed level 1
AND that are in at least one ON but no OFFs.
Filter_threshold = 3 returns events that passed level 2
AND that are present in *ALL* ONs.
on_off_first : str {'ON', 'OFF'}
Tells the code whether the .dat sequence starts with
the ON or the OFF observation. Valid entries are 'ON'
and 'OFF' only. Default is 'ON'.
number_in_cadence : int
The number of files in a single ON-OFF cadence.
Default is 6 for ABACAD.
on_source_complex_cadence : bool
If using a complex cadence (i.e. ons and offs not
alternating), this variable should be the string
target name used in the .dat filenames. The code will
then determine which files in your dat_file_list_str
cadence are ons and which are offs.
saving : bool
A True/False flag that tells the program whether to
save the output array as a .csv.
user_validation : bool
A True/False flag that, when set to True, asks if the
user wishes to continue with their input parameters
(and requires a 'y' or 'n' typed as confirmation)
before beginning to run the program. Recommended when
first learning the program, not recommended for
automated scripts.
sortby_tstart : bool
If True, the input file list is sorted by header.tstart.
Returns
-------
Either:
* a Pandas dataframe with all the events that were found.
* None, if no events were found.
Notes
-----
The HDF5 file is ASSUMED(!!) to have the same name as .dat files.
Examples
--------
>>> import find_event_pipeline;
>>> find_event_pipeline.find_event_pipeline(dat_file_list_str,
... SNR_cut=10,
... check_zero_drift=False,
... filter_threshold=3,
... on_off_first='ON',
... number_in_cadence=6,
... on_source_complex_cadence=False,
... saving=True,
... user_validation=False)
"""
print()
print("************ BEGINNING FIND_EVENT PIPELINE **************")
print()
if on_source_complex_cadence:
print("Assuming a complex cadence for the following on source: {}"
.format(on_source_complex_cadence))
else: # not on_source_complex_cadence:
print("Assuming the first observation is an " + on_off_first)
complex_cadence = on_source_complex_cadence
# Get a list of the DAT files.
# Get source names and build path_record list.
source_name_list = []
path_record = []
# Get a list of the DAT/h5 files.
def list_of_files(dat_file_list_str):
dat_file_list = open(dat_file_list_str).readlines()
dat_file_list = [files.replace('\n','') for files in dat_file_list]
dat_file_list = [files.replace(',','') for files in dat_file_list]
n_files = len(dat_file_list)
return n_files, dat_file_list
n_files, dat_file_list = list_of_files(dat_file_list_str)
if h5_file_list_str is None:
h5_file_list = dat_file_list
for hf in h5_file_list:
header = get_file_header(change_the_ext(hf, 'dat', 'h5'))
source_name = header["source_name"]
tstart = header["tstart"]
path_record.append(PathRecord(hf, tstart, source_name, header["fch1"],
header["foff"], header["nchans"]))
source_name_list.append(source_name)
else:
hn_files, h5_file_list = list_of_files(h5_file_list_str)
for hf in h5_file_list:
header = get_file_header(hf)
for dat in dat_file_list: # O(n^2) TODO: create tests in pytest
if os.path.basename(dat).replace('.dat','.h5')==os.path.basename(hf):
source_name = header["source_name"]
tstart = header["tstart"]
path_record.append(PathRecord(dat, tstart, source_name, header["fch1"],
header["foff"], header["nchans"]))
source_name_list.append(source_name)
# If sorting by header.tstart, then rewrite the dat_file_list in header.tstart order.
if sortby_tstart:
path_record = sorted(path_record, key=attrgetter('tstart'))
dat_file_list = []
for obj in path_record:
dat_file_list.append(obj.path_dat)
# Set up the frequency range matcher record.
# If a complex cadence, the source name is used to select the matcher;
# Otherwise, just use the first record.
if on_source_complex_cadence:
flag_terminate = True
for obj in path_record: # Look for 1st occurence of source_name.
if obj.source_name == on_source_complex_cadence:
matcher = obj
flag_terminate = False
break
if flag_terminate:
logger.error("find_event_pipeline: Source '{}' is not in this complex cadence!"
.format(on_source_complex_cadence))
for obj in path_record:
logger.info("find_event_pipeline: file={}, tstart={}, source_name={}, fch1={}, foff={}, nchans={}"
.format(os.path.basename(obj.path_dat), obj.tstart, obj.source_name,
obj.fch1, obj.foff, obj.nchans))
return None
else:
matcher = path_record[0]
# Display path_record rows.
flag_terminate = False
for obj in path_record:
logger.info("find_event_pipeline: file={}, tstart={}, source_name={}, fch1={}, foff={}, nchans={}"
.format(os.path.basename(obj.path_dat), obj.tstart, obj.source_name,
obj.fch1, obj.foff, obj.nchans))
if on_source_complex_cadence: # Complex cadence?
# If not a part of the complex cadence, then skip it.
if on_source_complex_cadence != obj.source_name:
continue
# Part of the cadence, complex or not.
# Make sure that the frequency range makes sense.
if not close_enough(obj.fch1, matcher.fch1) \
or not close_enough(obj.foff, matcher.foff) \
or obj.nchans != matcher.nchans:
logger.error("find_event_pipeline: Inconsistent frequency range! This does not look like a cadence of related files.")
flag_terminate = True
if flag_terminate:
return None
# If this is a complex cadence,
# * construct a complex_cadence list of 1s and 0s.
# * compute count_cadence = number of matches on on_source_complex_cadence.
if on_source_complex_cadence:
complex_cadence = []
count_cadence = 0
for i in range(0, len(source_name_list)):
source = source_name_list[i]
if source == on_source_complex_cadence:
complex_cadence.append(1)
count_cadence += 1
else:
complex_cadence.append(0)
if count_cadence > 0:
print("The derived complex cadence is: " + str(complex_cadence))
else:
print("\n*** find_event_pipeline [complex cadence]: Sorry, no potential candidates with your given on_source_complex_cadence={} :("
.format(on_source_complex_cadence))
return None
num_of_sets = int(n_files / number_in_cadence)
print("There are " + str(len(dat_file_list)) + " total files in the filelist "
+ dat_file_list_str)
print("therefore, looking for events in " + str(num_of_sets) + " on-off set(s)")
print("with a minimum SNR of " + str(SNR_cut))
if filter_threshold == 1:
print("Present in an ON source only, above SNR_cut")
if filter_threshold == 2:
print("Present in at least one ON source with RFI rejection from the OFF sources")
if filter_threshold == 3:
print("Present in all ON sources with RFI rejection from the OFF sources")
if not check_zero_drift:
print("not including signals with zero drift")
else:
print("including signals with zero drift")
if not saving:
print("not saving the output files")
else:
print("saving the output files")
if user_validation:
question = "Do you wish to proceed with these settings?"
while "the answer is invalid":
reply = str(input(question+' (y/n): ')).lower().strip()
if reply == '':
return None
if reply[0] == 'y':
break
if reply[0] == 'n':
return None
#Looping over number_in_cadence chunks.
candidate_list = []
for ii in range(num_of_sets):
sublist_low = number_in_cadence * ii
sublist_high = sublist_low + number_in_cadence
file_sublist = dat_file_list[sublist_low : sublist_high]
if not complex_cadence:
if on_off_first == 'ON':
filename = os.path.basename(file_sublist[0])
else: # on_off_first == 'OFF'
filename = os.path.basename(file_sublist[1])
else: # complex_cadence
filename = os.path.basename(file_sublist[complex_cadence.index(1)])
print()
print("*** First DAT file in set: " + filename + " ***")
print()
cand = find_events(file_sublist,
SNR_cut=SNR_cut,
check_zero_drift=check_zero_drift,
filter_threshold=filter_threshold,
on_off_first=on_off_first,
complex_cadence=complex_cadence)
cand_len = 1
if cand is None:
cand_len = 0
if cand_len != 0:
candidate_list.append(cand)
if len(candidate_list) > 0:
find_event_output_dataframe = pd.concat(candidate_list)
else:
print("\n*** find_event_pipeline: Sorry, no potential candidates with your given parameters :(")
return None
print("*** find_event_output_dataframe is complete ***")
if saving:
if csv_name is None:
prefix = os.path.dirname(dat_file_list[0]) + '/' + source_name_list[0]
if check_zero_drift:
filestring = prefix + '_f' + str(filter_threshold) + '_snr' \
+ str(SNR_cut) + '_zero' + '.csv'
else:
filestring = prefix + '_f' + str(filter_threshold) + '_snr' \
+ str(SNR_cut) + '.csv'
else:
filestring = csv_name
if not isinstance(find_event_output_dataframe, list):
find_event_output_dataframe.to_csv(filestring)
print("find_event_pipeline: Saved CSV file to {}".format(filestring))
else:
print("\n*** find_event_pipeline: Sorry, no events to save :(")
return None
return find_event_output_dataframe
| 41.094987
| 144
| 0.616116
| 404
| 0.025939
| 0
| 0
| 0
| 0
| 0
| 0
| 7,899
| 0.507159
|
18a1b4659b986cda93994b346c85aae4f37fb1a4
| 1,558
|
py
|
Python
|
scripts/plot_snaps.py
|
wordsworthgroup/libode
|
c3e9dbfe3e09c49ed666f10ae8fb964b37ecb479
|
[
"MIT"
] | 11
|
2020-02-27T22:32:04.000Z
|
2021-05-06T17:51:50.000Z
|
scripts/plot_snaps.py
|
markmbaum/libode
|
c3e9dbfe3e09c49ed666f10ae8fb964b37ecb479
|
[
"MIT"
] | null | null | null |
scripts/plot_snaps.py
|
markmbaum/libode
|
c3e9dbfe3e09c49ed666f10ae8fb964b37ecb479
|
[
"MIT"
] | 2
|
2021-09-26T07:36:55.000Z
|
2021-11-29T23:45:32.000Z
|
import numpy as np
import matplotlib.pyplot as plt
#Dahlquist test
#sol1ex = lambda t: np.exp(-t)
#sol2ex = lambda t: np.exp(-2*t)
#oscillator 1
sol1ex = lambda t: np.cos(t**2/2)
sol2ex = lambda t: np.sin(t**2/2)
#oscillator 2
#sol1ex = lambda t: np.exp(np.sin(t**2))
#sol2ex = lambda t: np.exp(np.cos(t**2))
name = 'Osc1'
t = np.fromfile('../out/%s_snap_t' % name)
nsnap = len(t)
sol1 = np.zeros((nsnap,))
sol2 = sol1.copy()
for i in range(nsnap):
s = np.fromfile('../out/%s_snap_%d' % (name,i))
sol1[i] = s[0]
sol2[i] = s[1]
fig, axs = plt.subplots(2, 3, figsize=(10,5))
axs = [item for sublist in axs for item in sublist]
tdense = np.linspace(min(t), max(t), 2500)
axs[0].plot(tdense, sol1ex(tdense), 'k', linewidth=0.5, label='$y_1$ exact')
axs[0].plot(t, sol1, 'C0.', label='$y_1$ numerical')
axs[0].set_title('Solutions')
axs[0].set_ylabel('$y_1$')
axs[0].legend()
axs[3].plot(tdense, sol2ex(tdense), 'k', linewidth=0.5, label='$y_2$ exact')
axs[3].plot(t, sol2, 'C1.', label='$y_2$ numerical')
axs[3].set_ylabel('$y_2$')
axs[3].legend()
axs[1].semilogy(t, np.abs(sol1 - sol1ex(t)), 'C0.', label='$y_1$ abs err')
axs[4].semilogy(t, np.abs(sol2 - sol2ex(t)), 'C1.', label='$y_2$ abs err')
axs[1].set_title('Absolute Error')
axs[2].semilogy(t, np.abs((sol1 - sol1ex(t))/sol1ex(t)), 'C0.', label='$y_1$ rel err')
axs[5].semilogy(t, np.abs((sol2 - sol2ex(t))/sol1ex(t)), 'C1.', label='$y_2$ rel err')
axs[2].set_title('Relative Error')
axs[3].set_xlabel('t')
axs[4].set_xlabel('t')
axs[5].set_xlabel('t')
plt.tight_layout()
plt.show()
| 29.396226
| 86
| 0.632863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 448
| 0.287548
|
18a22f9ecd12b8cd2ba070dcb05f2e55ef3f8d64
| 86
|
py
|
Python
|
mne/datasets/kiloword/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 1,953
|
2015-01-17T20:33:46.000Z
|
2022-03-30T04:36:34.000Z
|
mne/datasets/kiloword/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 8,490
|
2015-01-01T13:04:18.000Z
|
2022-03-31T23:02:08.000Z
|
mne/datasets/kiloword/__init__.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 1,130
|
2015-01-08T22:39:27.000Z
|
2022-03-30T21:44:26.000Z
|
"""MNE visual_92_categories dataset."""
from .kiloword import data_path, get_version
| 21.5
| 44
| 0.790698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.453488
|
18a3288be8f39dff9c36e526ba62428f9babcc0d
| 1,688
|
py
|
Python
|
app/framework/tagger_framework/tagger/pos/evaluation.py
|
kislerdm/nlp_pos_demo
|
cea5a0432e3fc0a626f090d40a28e084e3243efc
|
[
"MIT"
] | null | null | null |
app/framework/tagger_framework/tagger/pos/evaluation.py
|
kislerdm/nlp_pos_demo
|
cea5a0432e3fc0a626f090d40a28e084e3243efc
|
[
"MIT"
] | null | null | null |
app/framework/tagger_framework/tagger/pos/evaluation.py
|
kislerdm/nlp_pos_demo
|
cea5a0432e3fc0a626f090d40a28e084e3243efc
|
[
"MIT"
] | null | null | null |
# Dmitry Kisler © 2020-present
# www.dkisler.com
from typing import List, Dict
from sklearn.metrics import f1_score, accuracy_score
def model_performance(y_true: List[List[str]],
y_pred: List[List[str]]) -> Dict[str, float]:
"""Accuracy calculation function
Args:
y_true: List of true labels of the tokenized sentese.
y_pred: List of predicted labels of the tokenized sentese.
Returns:
Dict of metrics:
{
"accuracy": float,
"f1_micro": float,
"f1_macro": float,
"f1_weighted": float,
}
Raises:
ValueError: Exception occurred when input lists' length don't match.
"""
if len(y_true) == 0:
return None
if len(y_true) != len(y_pred):
raise ValueError("Lengths of input lists don't match.")
def _list_flattener(inpt: List[List[str]]) -> List[str]:
"""Flattener for list of lists into a single list."""
output = []
for i in inpt:
output.extend(i)
return output
y_true = _list_flattener(y_true)
y_pred = _list_flattener(y_pred)
if len(y_true) != len(y_pred):
raise ValueError("Numper of tokens don't match between y_true and y_pred.")
try:
metrics = {
"accuracy": accuracy_score(y_true, y_pred),
"f1_micro": f1_score(y_true, y_pred, average='micro'),
"f1_macro": f1_score(y_true, y_pred, average='macro'),
"f1_weighted": f1_score(y_true, y_pred, average='weighted'),
}
except Exception as ex:
raise Exception(f"Metrics calculation error: {ex}")
return metrics
| 29.103448
| 83
| 0.598934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 757
| 0.448194
|
18a5fa9fa6c228a2b9d1020387a728db780df2f0
| 3,451
|
py
|
Python
|
tools/interpret.py
|
Notgnoshi/generative
|
d9702c18b59553541f0cce706089f9fad501cd33
|
[
"MIT"
] | 5
|
2021-02-11T07:55:51.000Z
|
2022-02-10T01:11:02.000Z
|
tools/interpret.py
|
Notgnoshi/generative
|
d9702c18b59553541f0cce706089f9fad501cd33
|
[
"MIT"
] | 67
|
2020-12-31T18:02:05.000Z
|
2022-02-21T14:57:52.000Z
|
tools/interpret.py
|
Notgnoshi/generative
|
d9702c18b59553541f0cce706089f9fad501cd33
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Interpret an L-String as a set of 3D Turtle commands and record the turtle's path.
Multiple lines of input will be treated as a continuation of a single L-String.
Default commandset:
F,G - Step forward while drawing
f,g - Step forward without drawing
-,+ - Yaw around the normal axis
v,^ - Pitch around the transverse axis
<,> - Roll around the longitudinal axis
| - Flip orientation 180 degrees
d,D - Turn drawing on, off
[,] - Push, pop position and orientation onto a stack
"""
import argparse
import logging
import pathlib
import sys
root = pathlib.Path(__file__).resolve().parent.parent
sys.path.insert(0, str(root))
from generative.lsystem.interpreter import LSystemInterpeter
from generative.wkio import serialize_geometries
LOG_LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
DEFAULT_LEVEL = "WARNING"
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--input",
"-i",
type=argparse.FileType("r"),
default=sys.stdin,
help="A file containing the L-String to interpret. Defaults to stdin.",
)
parser.add_argument(
"--output",
"-o",
# TODO: I seem to not be able to open stdout in binary mode.
# See: https://github.com/python/cpython/pull/13165
# Potential workaround: open in 'wb' mode, and default to sys.stdout.buffer.
type=argparse.FileType("w"),
default=sys.stdout,
help="A file to output the expanded axiom to. Defaults to stdout.",
)
parser.add_argument(
"--commandset",
"-c",
type=str,
default="default",
choices=LSystemInterpeter.commandsets,
help="The commandset to use to interpret the given L-String. Defaults to 'default'.",
)
parser.add_argument(
"--stepsize",
"-s",
type=float,
default=1.0,
help="The step size for the turtle's forward motion. Defaults to 1.0.",
)
parser.add_argument(
"--angle",
"-a",
type=float,
default=45.0,
help="The angle in degrees used for the turtle's orientation modifications. Defaults to 45.",
)
parser.add_argument(
"--output-format",
"-O",
type=str,
default="wkt",
choices=["wkt", "wkb", "flat"],
help="The output format for the turtle path. Defaults to WKT.",
)
parser.add_argument(
"-l",
"--log-level",
type=str,
default=DEFAULT_LEVEL,
choices=LOG_LEVELS.keys(),
help=f"Set the logging output level. Defaults to {DEFAULT_LEVEL}.",
)
return parser.parse_args()
def main(args):
interpreter = LSystemInterpeter(args.commandset, args.stepsize, args.angle)
tokens = interpreter.tokenize(args.input)
geometries = interpreter.interpret(tokens)
serialize_geometries(geometries, args.output, args.output_format)
if __name__ == "__main__":
args = parse_args()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=LOG_LEVELS.get(args.log_level),
stream=sys.stderr,
)
logger = logging.getLogger(name=__file__)
main(args)
| 29
| 101
| 0.634888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,464
| 0.424225
|
18a6ffdb28982da58249e4d719411ed0e1af6ac5
| 699
|
py
|
Python
|
PointsToRobot.py
|
chuong/robot-arm-manipulation
|
a5ad277f86c278ccf8fe99abe337d0c64f8a407e
|
[
"MIT"
] | null | null | null |
PointsToRobot.py
|
chuong/robot-arm-manipulation
|
a5ad277f86c278ccf8fe99abe337d0c64f8a407e
|
[
"MIT"
] | null | null | null |
PointsToRobot.py
|
chuong/robot-arm-manipulation
|
a5ad277f86c278ccf8fe99abe337d0c64f8a407e
|
[
"MIT"
] | null | null | null |
"""
@author: yuboya
"""
### pins position to be sent to robot
## from TransformationCalculation:
import numpy as np
import math
def PointsToRobot(alpha, deltax,deltay,deltaz,xyzc):
sina = math.sin(alpha)
cosa = math.cos(alpha)
pointrs = []
for pointc in xyzc:
# METHOD 2: matrix calculation
pc = pointc.reshape(3,1)
R = np.array([cosa, -sina, 0, sina, cosa, 0, 0,0,1])
R = R.reshape(3,3)
T= np.array([deltax,deltay,deltaz])
T = T.reshape(3,1)
pr = np.dot(np.transpose(R),pc)+T
pointr = pr.reshape(1,3)
pointrs.append(pointr)
return pointrs
| 18.891892
| 61
| 0.542203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 0.187411
|
18a73b665ef0eeab4028398fb264c011541365f0
| 2,418
|
py
|
Python
|
plugins/module_utils/github_api.py
|
zp4rker/ansible-github-api
|
8b4d154915a5d92ec6f379d50cfb2c66a07fb16c
|
[
"Apache-2.0"
] | null | null | null |
plugins/module_utils/github_api.py
|
zp4rker/ansible-github-api
|
8b4d154915a5d92ec6f379d50cfb2c66a07fb16c
|
[
"Apache-2.0"
] | null | null | null |
plugins/module_utils/github_api.py
|
zp4rker/ansible-github-api
|
8b4d154915a5d92ec6f379d50cfb2c66a07fb16c
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
from json import JSONDecodeError
base_uri = "https://api.github.com/"
licenses = ['afl-3.0', 'apache-2.0', 'artistic-2.0', 'bsl-1.0', 'bsd-2-clause', 'license bsd-3-clause', 'bsd-3-clause-clear', 'cc', 'cc0-1.0', 'cc-by-4.0', 'cc-by-sa-4.0', 'wtfpl', 'ecl-2.0', 'epl-1.0', 'epl-2.0', 'eupl-1.1', 'agpl-3.0', 'gpl', 'gpl-2.0', 'gpl-3.0', 'lgpl', 'lgpl-2.1', 'lgpl-3.0', 'isc', 'lppl-1.3c', 'ms-pl', 'mit', 'mpl-2.0', 'osl-3.0', 'postgresql', 'ofl-1.1', 'ncsa', 'unlicense', 'zlib']
def make_request(request):
error = None
if not request['api_key']:
error = dict(msg='Github API Key was not provided! Please either use api_key or use an ENV variable named GITHUB_API_KEY')
return dict(error=error, payload=None, raw=None)
# Remove unnecessary slashes
if request['endpoint'][0:1] == '/':
request['endpoint'] = request['endpoint'][1:]
headers = {
'Authorization': f'token {request["api_key"]}',
'Accept': 'application/vnd.github.v3+json'
}
if 'accept' in request:
headers['Accept'] = request['accept']
uri = '{}{}'.format(base_uri, request['endpoint'])
if 'data' in request.keys() and request['data']:
response = requests.request(request['method'], uri, data=json.dumps(request['data']), headers=headers)
else:
response = requests.request(request['method'], uri, headers=headers)
try:
payload = json.loads(response.text)
except JSONDecodeError:
payload = response.text
if response.reason == 'Unauthorized' and payload['message'] == 'Bad credentials':
error = dict(msg='Failed to authorise due to invalid credentials.')
elif not response.ok:
error = dict(msg=f'Request failed with reason: {response.reason}', payload=payload, raw=response)
return dict(error=error, payload=payload, raw=response)
def get_login(api_key):
request = dict(
api_key=api_key,
method='GET',
endpoint='user'
)
response = make_request(request)
if response['error']:
return None
else:
return response['login']
def repo_exists(api_key, owner, name):
request = dict(
api_key=api_key,
method='GET',
endpoint=f'repos/{owner}/{name}'
)
response = make_request(request)
return not response['error'] and not response['error']['message'] == 'Not Found'
| 34.056338
| 413
| 0.619934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 915
| 0.378412
|
18a994a759d85007cf88e43e5353bf80d7ac9a5c
| 3,055
|
py
|
Python
|
src/onegov/core/datamanager.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/datamanager.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/datamanager.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import os
import tempfile
import transaction
from onegov.core import log
from onegov.core.utils import safe_move
class MailDataManager(object):
""" Takes a postman and an envelope and sends it when the transaction
is commited.
Since we can't really know if a mail can be sent until it happens, we
simply log an exception if the sending failed.
"""
transaction_manager = transaction.manager
def __init__(self, postman, envelope):
self.postman = postman
self.envelope = envelope
@classmethod
def send_email(cls, postman, envelope):
transaction.get().join(cls(postman, envelope))
def sortKey(self):
return 'mails'
def bind_connection(self, transaction, connection):
assert 'mail_connection' not in transaction.extension
def after_commit_hook(*args):
connection.quit()
transaction.addAfterCommitHook(after_commit_hook)
transaction.extension['mail_connection'] = connection
def open_connection(self):
connection = self.postman.transport(
self.postman.host,
self.postman.port,
**self.postman.options
)
connection.ehlo()
for item in self.postman.middlewares:
item(connection)
return connection
def commit(self, transaction):
if 'mail_connection' not in transaction.extension:
self.bind_connection(transaction, self.open_connection())
try:
self.postman.deliver(
transaction.extension['mail_connection'],
self.envelope
)
except Exception:
log.exception("Failed to send e-mail")
def abort(self, transaction):
pass
def tpc_vote(self, transaction):
pass
def tpc_abort(self, transaction):
pass
def tpc_begin(self, transaction):
pass
def tpc_finish(self, transaction):
pass
class FileDataManager(object):
""" Writes a file when the transaction is commited. """
transaction_manager = transaction.manager
def __init__(self, data, path):
self.data = data
self.path = path
@classmethod
def write_file(cls, data, path):
transaction.get().join(cls(data, path))
def sortKey(self):
return 'files'
def commit(self, transaction):
with tempfile.NamedTemporaryFile(delete=False) as temp:
self.tempfn = temp.name
temp.write(self.data)
def abort(self, transaction):
pass
def tpc_vote(self, transaction):
if not os.path.exists(self.tempfn):
raise ValueError('%s doesnt exist' % self.tempfn)
if os.path.exists(self.path):
raise ValueError('file already exists')
def tpc_abort(self, transaction):
try:
os.remove(self.tempfn)
except OSError:
pass
def tpc_begin(self, transaction):
pass
def tpc_finish(self, transaction):
safe_move(self.tempfn, self.path)
| 24.637097
| 73
| 0.629133
| 2,935
| 0.96072
| 0
| 0
| 208
| 0.068085
| 0
| 0
| 419
| 0.137152
|
18aaa916c943bdb538fc41fcf2673ef26fba2444
| 3,603
|
py
|
Python
|
djangocms_modules/models.py
|
crydotsnake/djangocms-modules
|
ab5b75ee1076e6fccab1a26b8dbe1c754c4de8d7
|
[
"BSD-3-Clause"
] | 8
|
2019-01-29T15:11:30.000Z
|
2020-06-07T19:27:50.000Z
|
djangocms_modules/models.py
|
crydotsnake/djangocms-modules
|
ab5b75ee1076e6fccab1a26b8dbe1c754c4de8d7
|
[
"BSD-3-Clause"
] | 11
|
2018-12-14T14:01:06.000Z
|
2020-09-02T09:02:49.000Z
|
djangocms_modules/models.py
|
divio/djangocms-modules
|
8328f130cddd4cf5f90beca170d1303b95158cda
|
[
"BSD-3-Clause"
] | 3
|
2021-04-16T12:26:27.000Z
|
2021-06-25T14:53:47.000Z
|
from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.urls import Resolver404, resolve
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from cms import operations
from cms.models import CMSPlugin, Placeholder
from cms.models.fields import PlaceholderField
from cms.signals import pre_placeholder_operation
from cms.utils.plugins import get_bound_plugins
def _get_placeholder_slot(category):
return 'module-category-{}'.format(category.pk)
@receiver(pre_placeholder_operation)
def sync_module_plugin(sender, **kwargs):
"""
Updates the created placeholder operation record,
based on the configured post operation handlers.
"""
operation_type = kwargs.pop('operation')
affected_operations = (operations.MOVE_PLUGIN, operations.PASTE_PLUGIN)
if operation_type not in affected_operations:
return
try:
match = resolve(kwargs['origin'])
except Resolver404:
match = None
is_in_modules = match and match.url_name == 'cms_modules_list'
if not is_in_modules:
return
plugin = kwargs['plugin']
placeholder = kwargs.get('target_placeholder')
needs_sync = (
plugin.plugin_type
== 'Module'
and placeholder.pk
!= plugin.module_category.modules_id
)
if needs_sync:
# User has moved module to another category placeholder
# or pasted a copied module plugin.
new_category = Category.objects.get(modules=placeholder)
(ModulePlugin
.objects
.filter(path__startswith=plugin.path, depth__gte=plugin.depth)
.update(module_category=new_category))
class Category(models.Model):
name = models.CharField(
verbose_name=_('Name'),
max_length=120,
unique=True,
)
modules = PlaceholderField(slotname=_get_placeholder_slot)
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
@cached_property
def modules_placeholder(self):
return ModulesPlaceholder.objects.get(pk=self.modules_id)
def get_non_empty_modules(self):
unbound_plugins = (
self
.modules
.get_plugins(language=settings.LANGUAGE_CODE)
.filter(parent__isnull=True, numchild__gte=1)
)
return get_bound_plugins(unbound_plugins)
class ModulesPlaceholder(Placeholder):
class Meta:
proxy = True
def _get_attached_model(self):
return Category
def _get_attached_models(self):
return self._get_attached_model()
def _get_attached_objects(self):
return self._get_attached_model().objects.filter(modules=self.pk)
@cached_property
def category(self):
return self._get_attached_model().objects.get(modules=self.pk)
def get_label(self):
return self.category.name
class ModulePlugin(CMSPlugin):
module_name = models.CharField(
verbose_name=_('Name'),
max_length=120,
)
module_category = models.ForeignKey(
to=Category,
verbose_name=_('Category'),
on_delete=models.CASCADE,
)
def __str__(self):
return self.module_name
def update(self, refresh=False, **fields):
ModulePlugin.objects.filter(pk=self.pk).update(**fields)
if refresh:
return self.reload()
return
def get_unbound_plugins(self):
return CMSPlugin.get_tree(self).order_by('path')
| 27.090226
| 75
| 0.686095
| 1,856
| 0.515126
| 0
| 0
| 1,402
| 0.38912
| 0
| 0
| 351
| 0.097419
|
18ad2e27ac7dd39dc407f19da04c84adb7ca9a06
| 553
|
py
|
Python
|
do_tasks.py
|
youqingkui/zhihufav
|
97c465d1bf825a6621d221c39a3677887cbd9261
|
[
"MIT"
] | null | null | null |
do_tasks.py
|
youqingkui/zhihufav
|
97c465d1bf825a6621d221c39a3677887cbd9261
|
[
"MIT"
] | null | null | null |
do_tasks.py
|
youqingkui/zhihufav
|
97c465d1bf825a6621d221c39a3677887cbd9261
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
import json
from lib.sqs import zhihufav_sqs
from lib.tasks import add_note
def get_sqs_queue():
sqs_info = zhihufav_sqs.get_messages(10)
for sqs in sqs_info:
sqs_body = sqs.get_body()
receipt_handle = sqs.receipt_handle
sqs_json = json.loads(sqs_body)
api_url = sqs_json.get('api_url')
parent_note = sqs_json.get('parent_note')
add_note.delay(api_url, parent_note, receipt_handle)
if __name__=="__main__":
for i in range(5):
get_sqs_queue()
| 22.12
| 60
| 0.679928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.119349
|