hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d97ce5b1583caa862f15cea2cc1f385f8676bdfd | 207 | py | Python | server/api/admin.py | haliciyazilim/beste-yarismasi | 34acc5067caa547c12c81c8ffa9e524c288945aa | [
"MIT"
] | null | null | null | server/api/admin.py | haliciyazilim/beste-yarismasi | 34acc5067caa547c12c81c8ffa9e524c288945aa | [
"MIT"
] | null | null | null | server/api/admin.py | haliciyazilim/beste-yarismasi | 34acc5067caa547c12c81c8ffa9e524c288945aa | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Composition, Contest, Vote, Content
admin.site.register(Content)
admin.site.register(Composition)
admin.site.register(Contest)
admin.site.register(Vote)
| 25.875 | 55 | 0.821256 | from django.contrib import admin
from .models import Composition, Contest, Vote, Content
admin.site.register(Content)
admin.site.register(Composition)
admin.site.register(Contest)
admin.site.register(Vote)
| 0 | 0 | 0 |
f36c54205fb7bfc423d5970e687b7dc0ee76796c | 2,495 | py | Python | connectivity/src/stations/comstation.py | nakata5321/sensors-connectivity | 307c61196791a62365eda5516cdd161999a1a7f2 | [
"BSD-3-Clause"
] | null | null | null | connectivity/src/stations/comstation.py | nakata5321/sensors-connectivity | 307c61196791a62365eda5516cdd161999a1a7f2 | [
"BSD-3-Clause"
] | null | null | null | connectivity/src/stations/comstation.py | nakata5321/sensors-connectivity | 307c61196791a62365eda5516cdd161999a1a7f2 | [
"BSD-3-Clause"
] | null | null | null | import threading
import typing
import nacl.signing
import time
import typing as tp
import logging.config
from .istation import IStation, StationData, STATION_VERSION, Measurement
from ..drivers.sds011 import SDS011_MODEL, SDS011
from collections import deque
from connectivity.config.logging import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("sensors-connectivity")
class COMStation(IStation):
"""
Reads data from a serial port
"""
| 31.582278 | 87 | 0.598397 | import threading
import typing
import nacl.signing
import time
import typing as tp
import logging.config
from .istation import IStation, StationData, STATION_VERSION, Measurement
from ..drivers.sds011 import SDS011_MODEL, SDS011
from collections import deque
from connectivity.config.logging import LOGGING_CONFIG
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("sensors-connectivity")
def _read_data_thread(sensor: SDS011, q: deque, timeout: int) -> None:
while True:
meas = sensor.query()
timestamp = int(time.time())
q.append((meas, timestamp))
time.sleep(timeout)
class COMStation(IStation):
"""
Reads data from a serial port
"""
def __init__(self, config: dict) -> None:
super().__init__(config)
self.version: str = f"airalab-com-{STATION_VERSION}"
self.sensor: SDS011 = SDS011(config["comstation"]["port"])
work_period: int = int(config["comstation"]["work_period"])
self.sensor.set_work_period(work_time=int(work_period / 60))
self.geo: tp.List[float, float] = [0, 0]
if config["comstation"]["geo"]:
self.geo = config["comstation"]["geo"].split(",")
if "public_key" in config["comstation"] and config["comstation"]["public_key"]:
self.public = config["comstation"]["public_key"]
else:
signing_key = nacl.signing.SigningKey.generate()
verify_key = signing_key.verify_key
self.public = bytes(verify_key).hex()
logger.info(f"COMStation public key: {self.public}")
self.meas_data = {"pm25": 0, "pm10": 0, "timestamp": 0}
self.q = deque(maxlen=1)
threading.Thread(
target=_read_data_thread, args=(self.sensor, self.q, work_period)
).start()
def get_data(self) -> tp.List[StationData]:
meas = Measurement(self.public, SDS011_MODEL, 0, 0, self.meas_data)
if self.q:
values = self.q[0]
pm = values[0]
self.meas_data.update(
{"pm25": pm[0], "pm10": pm[1], "timestamp": values[1]}
)
meas = Measurement(
self.public,
SDS011_MODEL,
float(self.geo[0]),
float(self.geo[1]),
self.meas_data,
)
return [
StationData(
self.version, self.mac_address, time.time() - self.start_time, meas
)
]
| 1,928 | 0 | 77 |
bbc03689b792220d2c67aec673e094e5a211d4d8 | 3,188 | py | Python | app/apimodels/ApiModel.py | Voltra/prodLogPy | df8e069342262beefd4210edce8cb13b2a9b936a | [
"MIT"
] | null | null | null | app/apimodels/ApiModel.py | Voltra/prodLogPy | df8e069342262beefd4210edce8cb13b2a9b936a | [
"MIT"
] | null | null | null | app/apimodels/ApiModel.py | Voltra/prodLogPy | df8e069342262beefd4210edce8cb13b2a9b936a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from app.models.Model import Model
from flask_restful import abort
import sqlite3
"""
A class that factorizes the behavior of models used for the API
"""
# | 32.530612 | 151 | 0.608218 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from app.models.Model import Model
from flask_restful import abort
import sqlite3
"""
A class that factorizes the behavior of models used for the API
"""
class ApiModel(Model):
LIMIT = 20
"""
Construct an ApiModel from meta data of a DB table
@:param idCol being the column that serves as an ID
@:param tableName being the name of the table bound to this ApiModel
@:param connection being an SQLite3 DB connection
"""
def __init__(self, idCol, tableName, connection):
super().__init__(tableName, connection)
self.id = idCol
#
"""
Determines whether or not there's an item that has the given id in th DB tabe
@:param _id being the ID to test
@:returns A response tuple (Boolean, Status), True if it exists, False otherwise ; a status 200 if everything went well, 400 if there were an error
"""
def exists(self, _id):
query = "SELECT * FROM `%s` WHERE `%s` like ? LIMIT 1" % (self.table, self.id)
try:
self.cursor.execute(query, (_id,))
except sqlite3.Error as e:
abort(400, message=e.args[0])
except sqlite3.Warning as e:
abort(400, message=e.args[0])
length = len(self.cursor.fetchall())
return length != 0, 200
#
"""
Retrieve the data associated to the given ID in the bound table
@:param _id being the ID of the tuple to retrieve
@:returns a response tuple (Json, Status), the data associated to the tuple ; 200 if there were no error, 400 if there were any
"""
def get(self, _id, limit=LIMIT, skip=0):
if not self.exists(_id):
abort(404, message="No resource matching the given id")
if limit <= 0 or limit > ApiModel.LIMIT:
limit = ApiModel.LIMIT
if skip < 0:
skip = 0
query = "SELECT * FROM `%s` WHERE `%s` like ? LIMIT ? OFFSET ?" % (self.table, self.id)
try:
self.cursor.execute(query, (_id, limit, skip))
except sqlite3.Error as e:
abort(400, message=e.args[0])
except sqlite3.Warning as e:
abort(400, message=e.args[0])
return self.cursor.fetchall(), 200
#
"""
Retrieve "all" the data from the bound table (limited by a limit amount)
@:param limit [defaulted to ApiModel.LIMIT] being the maximum amount of tuple to get
@:param skip [defaulted to 0] being the offset
@:returns a response tuple (Json, Status), the data associated to the tuple ; 200 if there were no error, 400 if there were any
"""
def getAll(self, limit=LIMIT, skip=0):
if limit <= 0 or limit > ApiModel.LIMIT:
limit = ApiModel.LIMIT
if skip < 0:
skip = 0
print(limit, skip, sep=" ")
query = "SELECT * FROM %s LIMIT ? OFFSET ?" % (self.table) #, limit, skip
try:
self.cursor.execute(query, (limit, skip))
except sqlite3.DatabaseError as e:
abort(400, message=e.args[0])
except sqlite3.Warning as e:
abort(400, message=e.args[0])
return self.cursor.fetchall(), 200
#
# | 1,640 | 1,325 | 22 |
e2f9721e2bc57b9639d847af890205856469becc | 810 | py | Python | mission_control/devices/lock.py | aborger/RockSatX2020-KauIda | 4ee505c7dfac1a9f14a86f17e273fbdaa2af8319 | [
"MIT"
] | null | null | null | mission_control/devices/lock.py | aborger/RockSatX2020-KauIda | 4ee505c7dfac1a9f14a86f17e273fbdaa2af8319 | [
"MIT"
] | 1 | 2021-04-02T03:53:32.000Z | 2021-04-02T03:53:32.000Z | mission_control/devices/lock.py | aborger/RockSatX2020-KauIda | 4ee505c7dfac1a9f14a86f17e273fbdaa2af8319 | [
"MIT"
] | null | null | null | """
* The Lock class controls multiple servos to latch the door shut before the rocket spins up.
* Author: Aaron Borger <aborger@nnu.edu (307)534-6265>
"""
from devices.device import Device
import RPi.GPIO as GPIO
SERVO_PIN = 14
ZERO = 2.5
NINETY = 7.5
ONE_EIGHTY = 12.5
| 23.823529 | 92 | 0.688889 | """
* The Lock class controls multiple servos to latch the door shut before the rocket spins up.
* Author: Aaron Borger <aborger@nnu.edu (307)534-6265>
"""
from devices.device import Device
import RPi.GPIO as GPIO
SERVO_PIN = 14
ZERO = 2.5
NINETY = 7.5
ONE_EIGHTY = 12.5
class Lock(Device):
def __init__(self):
#GPIO.setup(SERVO_PIN, GPIO.OUT)
self.servo = GPIO.PWM(SERVO_PIN, 50) # Sets servo to use PWM on servo_pin at 50 Hz
self.servo.start(ZERO)
self.servo.ChangeDutyCycle(ZERO) # Sets servo to starting position
def activate(self):
self.servo.ChangeDutyCycle(NINETY) # Rotates servo to 90 degrees position
def deactivate(self):
self.servo.ChangeDutyCycle(ZERO) # Sets servo to starting position
def shutdown(self):
return
| 404 | -2 | 131 |
eafe6d5fd00caff37beb5f3a4362830c727ed60f | 1,028 | py | Python | scripts/sundry/generate_features.py | huydhn/certstream-analytics | ed4dced6fcc399ef02be2c03754e49018623785b | [
"MIT"
] | 10 | 2019-04-27T17:24:14.000Z | 2021-01-21T01:30:39.000Z | scripts/sundry/generate_features.py | huydhn/certstream-analytics | ed4dced6fcc399ef02be2c03754e49018623785b | [
"MIT"
] | null | null | null | scripts/sundry/generate_features.py | huydhn/certstream-analytics | ed4dced6fcc399ef02be2c03754e49018623785b | [
"MIT"
] | 1 | 2019-09-16T13:07:12.000Z | 2019-09-16T13:07:12.000Z | '''
Generate features for outlier detection.
'''
import json
import sys
from certstream_analytics.analysers import WordSegmentation
from certstream_analytics.analysers import IDNADecoder
from certstream_analytics.analysers import FeaturesGenerator
def main(max_count=None):
'''
The record is assumed to be stored in a JSON file passed in as the first
parameter of the script.
'''
segmenter = WordSegmentation()
decoder = IDNADecoder()
generator = FeaturesGenerator()
with open(sys.argv[1]) as fhandle:
count = 0
for line in fhandle:
try:
record = json.loads(line.strip())
except json.decoder.JSONDecodeError:
continue
record = decoder.run(record)
record = segmenter.run(record)
record = generator.run(record)
print(json.dumps(record))
count += 1
if max_count and count > max_count:
break
if __name__ == '__main__':
main()
| 23.906977 | 76 | 0.628405 | '''
Generate features for outlier detection.
'''
import json
import sys
from certstream_analytics.analysers import WordSegmentation
from certstream_analytics.analysers import IDNADecoder
from certstream_analytics.analysers import FeaturesGenerator
def main(max_count=None):
'''
The record is assumed to be stored in a JSON file passed in as the first
parameter of the script.
'''
segmenter = WordSegmentation()
decoder = IDNADecoder()
generator = FeaturesGenerator()
with open(sys.argv[1]) as fhandle:
count = 0
for line in fhandle:
try:
record = json.loads(line.strip())
except json.decoder.JSONDecodeError:
continue
record = decoder.run(record)
record = segmenter.run(record)
record = generator.run(record)
print(json.dumps(record))
count += 1
if max_count and count > max_count:
break
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
76058567d78556566acc67bff6c4ae064b8fd719 | 2,190 | py | Python | stage1/lib/fitTool.py | YU-Zhiyang/WEVI | 0282dc6de58722fc3ed3829a004800b035685b3a | [
"MIT"
] | 14 | 2021-08-10T06:58:07.000Z | 2022-02-25T23:03:10.000Z | stage1/lib/fitTool.py | YU-Zhiyang/WEVI | 0282dc6de58722fc3ed3829a004800b035685b3a | [
"MIT"
] | 4 | 2021-10-30T13:01:52.000Z | 2022-03-22T04:59:46.000Z | stage2/lib/fitTool.py | YU-Zhiyang/WEVI | 0282dc6de58722fc3ed3829a004800b035685b3a | [
"MIT"
] | null | null | null | import torch
import numpy as np
from torch.nn import functional as F
import torch.nn as nn
from torch.autograd import Variable
| 26.071429 | 76 | 0.56758 | import torch
import numpy as np
from torch.nn import functional as F
import torch.nn as nn
from torch.autograd import Variable
def backWarp(img: torch.Tensor, flow: torch.Tensor):
device = img.device
N, C, H, W = img.size()
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
gridX, gridY = np.meshgrid(np.arange(W), np.arange(H))
gridX = torch.tensor(gridX, requires_grad=False).to(device)
gridY = torch.tensor(gridY, requires_grad=False).to(device)
x = gridX.unsqueeze(0).expand_as(u).float() + u
y = gridY.unsqueeze(0).expand_as(v).float() + v
# range -1 to 1
x = 2 * x / (W - 1.0) - 1.0
y = 2 * y / (H - 1.0) - 1.0
# stacking X and Y
grid = torch.stack((x, y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = F.grid_sample(img, grid, mode='bilinear', padding_mode='zeros')
# mask = torch.ones_like(img, requires_grad=False)
# mask = F.grid_sample(mask, grid)
#
# mask[mask < 0.9999] = 0
# mask[mask > 0] = 1
# return imgOut * (mask.detach())
return imgOut
class IdCoRe(object):
def __init__(self, intime, device, target='idx'):
self.device = device
if isinstance(intime, int):
intime = torch.tensor(intime).to(self.device)
if target == 'idx':
self.coord = intime + 1 # if the index in frameT is 0
elif target == 'coord':
self.coord = intime # then the related time in dct coord is 1
elif target == 'time': # and the real time is 0.125s
self.coord = intime * 8.0
@property
def idx(self):
return (self.coord - 1).int()
@idx.setter
def idx(self, x):
x = torch.tensor(x)
self.coord = x + 1
@property
def time(self):
return self.coord.float() / 8.0
@time.setter
def time(self, x):
x = torch.tensor(x).to(self.device)
self.coord = x * 8.0
def getAccFlow(a0, b0, a1, b1, t, device):
F0t = a0 * (t ** 2) + b0 * t
F1t = a1 * ((1 - t) ** 2) + b1 * (1 - t)
return F0t.to(device), F1t.to(device)
def getAccParam(F0_1, F01):
a = (F01 + F0_1) / 2.0
b = (F01 - F0_1) / 2.0
return a, b
| 1,772 | 195 | 92 |
fb4fb8e645fc7e3b4d149216cfec4b327f930503 | 564 | py | Python | stamps/stest/designmatrix.py | stemlab689/stamps | 5494d4e86ad005082c677d9a07f71e1606338ba0 | [
"MIT"
] | null | null | null | stamps/stest/designmatrix.py | stemlab689/stamps | 5494d4e86ad005082c677d9a07f71e1606338ba0 | [
"MIT"
] | null | null | null | stamps/stest/designmatrix.py | stemlab689/stamps | 5494d4e86ad005082c677d9a07f71e1606338ba0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy
#only accept order = numpy.nan or 0
| 22.56 | 81 | 0.576241 | # -*- coding: utf-8 -*-
import numpy
#only accept order = numpy.nan or 0
def designmatrix( c, order ):
if numpy.isnan(order):
return numpy.array([],ndmin=2).reshape(c.shape[0],0), numpy.array([],ndmin = 2)
else:
n, nd = c.shape
X = numpy.ones((n, 1))
index = numpy.zeros((2, 1))
if nd == 1:
return X, index[0,:]
else:
return X, index
# order = numpy.array([ [order, order] ])
# if ~numpy.isnan( order[0][0] ) or ~numpy.isnan( order[0][1] ):
# X = numpy.ones((n, 1))
# index = numpy.zeros((2, 1))
# if ~numpy.isnan( order[0][0] ):
| 466 | 0 | 22 |
fb25ec637dd722abb244d9b33c2f496b443fee74 | 36 | py | Python | utils/db/__init__.py | NikolaySimakov/Shop-bot | c13d5a2b91d9524af156948ff0014ff5357c376c | [
"MIT"
] | 50 | 2020-09-27T13:27:02.000Z | 2022-03-28T13:11:33.000Z | utils/db/__init__.py | NikolaySimakov/Shop-bot | c13d5a2b91d9524af156948ff0014ff5357c376c | [
"MIT"
] | null | null | null | utils/db/__init__.py | NikolaySimakov/Shop-bot | c13d5a2b91d9524af156948ff0014ff5357c376c | [
"MIT"
] | 18 | 2021-02-06T16:54:50.000Z | 2022-03-25T07:49:37.000Z | from .storage import DatabaseManager | 36 | 36 | 0.888889 | from .storage import DatabaseManager | 0 | 0 | 0 |
daf47cb37e81ee6379fc73e1083c92143c9ef311 | 377 | py | Python | profile/compute.py | emilleishida/School2019 | d5141df58d3240ceb0037e8f084f60fc2ef9b4c1 | [
"MIT"
] | 14 | 2019-02-02T08:33:10.000Z | 2021-05-04T17:38:26.000Z | profile/compute.py | emilleishida/School2019 | d5141df58d3240ceb0037e8f084f60fc2ef9b4c1 | [
"MIT"
] | 10 | 2019-04-01T11:39:40.000Z | 2019-04-09T12:53:33.000Z | profile/compute.py | emilleishida/School2019 | d5141df58d3240ceb0037e8f084f60fc2ef9b4c1 | [
"MIT"
] | 23 | 2019-03-25T18:37:26.000Z | 2021-08-19T16:41:45.000Z |
if __name__ == "__main__":
main()
| 17.136364 | 40 | 0.633952 | def generate_data(size):
return [i ** 2 for i in range(size)]
def compute_result(data):
total = 0
for _ in range(10):
total += sum(data)
return total
def main():
data = generate_data(size=100_000)
result = compute_result(data)
data = generate_data(size=200_000)
result = compute_result(data)
if __name__ == "__main__":
main()
| 267 | 0 | 68 |
c087cb6cc61b4a2b2df255ea5c8e9c35642e6d36 | 824 | py | Python | locust_swarm/config.py | ryankanno/locust-swarm | bd652b7fb6fc2c0cb23ef2b6a7f0c5abd80973d7 | [
"MIT"
] | 26 | 2015-03-02T23:09:12.000Z | 2022-02-22T13:21:55.000Z | locust_swarm/config.py | ryankanno/locust-swarm | bd652b7fb6fc2c0cb23ef2b6a7f0c5abd80973d7 | [
"MIT"
] | 3 | 2015-02-09T12:41:38.000Z | 2015-09-03T15:59:11.000Z | locust_swarm/config.py | ryankanno/locust-swarm | bd652b7fb6fc2c0cb23ef2b6a7f0c5abd80973d7 | [
"MIT"
] | 15 | 2015-02-09T20:16:27.000Z | 2021-04-08T07:17:18.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ConfigParser
from helpers import get_abs_path
DEFAULT_CFG_FILEPATH = 'locust-swarm.cfg'
DEFAULT_MASTER_ROLE_NAME = 'locust-master'
DEFAULT_SLAVE_ROLE_NAME = 'locust-slave'
DEFAULT_MASTER_BOOTSTRAP_DIR = './bootstrap-master'
DEFAULT_SLAVE_BOOTSTRAP_DIR = './bootstrap-slave'
DEFAULT_NUM_SLAVES = 5
DEFAULT_CUSTOM_TAG_NAME = 'MachineRole'
get_config = _parse
# vim: filetype=python
| 26.580645 | 78 | 0.723301 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ConfigParser
from helpers import get_abs_path
DEFAULT_CFG_FILEPATH = 'locust-swarm.cfg'
DEFAULT_MASTER_ROLE_NAME = 'locust-master'
DEFAULT_SLAVE_ROLE_NAME = 'locust-slave'
DEFAULT_MASTER_BOOTSTRAP_DIR = './bootstrap-master'
DEFAULT_SLAVE_BOOTSTRAP_DIR = './bootstrap-slave'
DEFAULT_NUM_SLAVES = 5
DEFAULT_CUSTOM_TAG_NAME = 'MachineRole'
def _parse(path_to_config=DEFAULT_CFG_FILEPATH):
config = ConfigParser.SafeConfigParser()
config_path = get_abs_path(path_to_config)
try:
with open(config_path, 'r') as f:
config.readfp(f)
except IOError:
raise Exception("Unable to open locust-swarm configuration file @ {0}"
.format(config_path))
return config
get_config = _parse
# vim: filetype=python
| 362 | 0 | 23 |
b443bb6a343b457998597961b1e456c536f15615 | 358 | py | Python | Codes.python/P12/P12.py | hanzenglong/robot | fc686f751fc224b331bae2f1ee7b26b603c04634 | [
"MIT"
] | null | null | null | Codes.python/P12/P12.py | hanzenglong/robot | fc686f751fc224b331bae2f1ee7b26b603c04634 | [
"MIT"
] | null | null | null | Codes.python/P12/P12.py | hanzenglong/robot | fc686f751fc224b331bae2f1ee7b26b603c04634 | [
"MIT"
] | null | null | null | #-------by HYH -------#
import numpy as np
pCan=0.001
pNon=0.999
pPosCan=0.8
pPosNon=0.1
z='positive'
if 'positive'==z:
p=[pPosCan*pCan,pPosNon*pNon]
else:
p=[(1-pPosCan)*pCan,(1-pPosNon)*pNon]
p=p/np.sum(p)
print('The probability of having cancer given the %s test:\n'% z,'\n',p[0])
print('The probability of cancer free given the %s test:\n'%z,'\n',p[1]) | 25.571429 | 75 | 0.653631 | #-------by HYH -------#
import numpy as np
pCan=0.001
pNon=0.999
pPosCan=0.8
pPosNon=0.1
z='positive'
if 'positive'==z:
p=[pPosCan*pCan,pPosNon*pNon]
else:
p=[(1-pPosCan)*pCan,(1-pPosNon)*pNon]
p=p/np.sum(p)
print('The probability of having cancer given the %s test:\n'% z,'\n',p[0])
print('The probability of cancer free given the %s test:\n'%z,'\n',p[1]) | 0 | 0 | 0 |
74b5b86bbc8e2112482a8c3593af3294d721f966 | 1,104 | py | Python | nit/__main__.py | udasitharani/name-initials-tile-generator | 31fb8722cf1084e0827061f47baadf3ec034184d | [
"MIT"
] | null | null | null | nit/__main__.py | udasitharani/name-initials-tile-generator | 31fb8722cf1084e0827061f47baadf3ec034184d | [
"MIT"
] | null | null | null | nit/__main__.py | udasitharani/name-initials-tile-generator | 31fb8722cf1084e0827061f47baadf3ec034184d | [
"MIT"
] | null | null | null | import argparse, os, sys
from nit import generate_tile, generate_tile_from_initials, generate_initials_from_string
my_parser = argparse.ArgumentParser(prog="name initials tile generator",
usage="$(prog)s [options] name save_path",
description="Generate a name initials tile icon given name")
my_parser.add_argument("Name", metavar="name", type=str, help="Name to generate initials.")
my_parser.add_argument("Save_Path", metavar="save_path", type=str, help="Path where the generated tile should be saved.")
my_parser.add_argument("-bg", "--bg_color", type=str, help="Background color to be used in tile.")
my_parser.add_argument("-fg", "--fg_color", type=str, help="Color of the text to be used in tile.")
args = my_parser.parse_args()
if not os.path.isdir(os.path.split(args.Save_Path)[0]):
print("The path does not exist.")
sys.exit()
kwargs = dict(text=args.Name, save_path=args.Save_Path, bgColor=args.bg_color, fgColor=args.fg_color)
generate_tile_from_initials(**{k: v for k, v in kwargs.items() if v is not None})
| 55.2 | 121 | 0.707428 | import argparse, os, sys
from nit import generate_tile, generate_tile_from_initials, generate_initials_from_string
my_parser = argparse.ArgumentParser(prog="name initials tile generator",
usage="$(prog)s [options] name save_path",
description="Generate a name initials tile icon given name")
my_parser.add_argument("Name", metavar="name", type=str, help="Name to generate initials.")
my_parser.add_argument("Save_Path", metavar="save_path", type=str, help="Path where the generated tile should be saved.")
my_parser.add_argument("-bg", "--bg_color", type=str, help="Background color to be used in tile.")
my_parser.add_argument("-fg", "--fg_color", type=str, help="Color of the text to be used in tile.")
args = my_parser.parse_args()
if not os.path.isdir(os.path.split(args.Save_Path)[0]):
print("The path does not exist.")
sys.exit()
kwargs = dict(text=args.Name, save_path=args.Save_Path, bgColor=args.bg_color, fgColor=args.fg_color)
generate_tile_from_initials(**{k: v for k, v in kwargs.items() if v is not None})
| 0 | 0 | 0 |
f72c69bd895eea56254b314d4418757ffc5e1cbe | 1,266 | py | Python | Scripts/Legacy/line1prep.py | rhong3/CPTAC-UCEC | ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9 | [
"MIT"
] | 4 | 2019-01-04T21:11:03.000Z | 2020-12-11T16:56:15.000Z | Scripts/Legacy/line1prep.py | rhong3/CPTAC-UCEC | ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9 | [
"MIT"
] | null | null | null | Scripts/Legacy/line1prep.py | rhong3/CPTAC-UCEC | ec83fbee234b5ad3df6524cdd960b5f0f3da9ea9 | [
"MIT"
] | null | null | null | import pandas as pd
labels = pd.read_csv('../Fusion_dummy_His_MUT_joined.csv', header=0)
# line = pd.read_csv('../../Line1.csv', header=0)
line = pd.read_csv('../EC_cyclin_expression.csv', header=0)
# line['name'] = line['Proteomics_Participant_ID']
# line = line.drop(['Proteomics_Participant_ID', 'Histologic_type', 'Genomics_subtype', 'TP53_TP53'], axis=1)
# labels = labels.join(line.set_index('name'), on='name')
# labels['LINE1_ORF1p'] = (labels['LINE1_ORF1p'].dropna() > 0).astype(int)
# labels['RAD50-S635'] = (labels['RAD50-S635'].dropna() > 0).astype(int)
# labels['NBN-S343'] = (labels['NBN-S343'].dropna() > 0).astype(int)
# labels['ATR-T1989'] = (labels['ATR-T1989'].dropna() > 0).astype(int)
# labels['ATM-S1981'] = (labels['ATM-S1981'].dropna() > 0).astype(int)
line['name'] = line['Sample_ID'].str.slice(start=0, stop=9)
line = line.drop(['Sample_ID', 'Genomic_subtype'], axis=1)
labels = labels.join(line.set_index('name'), on='name')
labels['CCND1'] = (labels['CCND1'].dropna() > 0).astype(int)
labels['CCNE1'] = (labels['CCNE1'].dropna() > 0).astype(int)
labels['CCNA2'] = (labels['CCNA2'].dropna() > 0).astype(int)
labels['CCNB1'] = (labels['CCNB1'].dropna() > 0).astype(int)
labels.to_csv('../Fusion_dummy_His_MUT_joined.csv', index=False)
| 48.692308 | 109 | 0.671406 | import pandas as pd
labels = pd.read_csv('../Fusion_dummy_His_MUT_joined.csv', header=0)
# line = pd.read_csv('../../Line1.csv', header=0)
line = pd.read_csv('../EC_cyclin_expression.csv', header=0)
# line['name'] = line['Proteomics_Participant_ID']
# line = line.drop(['Proteomics_Participant_ID', 'Histologic_type', 'Genomics_subtype', 'TP53_TP53'], axis=1)
# labels = labels.join(line.set_index('name'), on='name')
# labels['LINE1_ORF1p'] = (labels['LINE1_ORF1p'].dropna() > 0).astype(int)
# labels['RAD50-S635'] = (labels['RAD50-S635'].dropna() > 0).astype(int)
# labels['NBN-S343'] = (labels['NBN-S343'].dropna() > 0).astype(int)
# labels['ATR-T1989'] = (labels['ATR-T1989'].dropna() > 0).astype(int)
# labels['ATM-S1981'] = (labels['ATM-S1981'].dropna() > 0).astype(int)
line['name'] = line['Sample_ID'].str.slice(start=0, stop=9)
line = line.drop(['Sample_ID', 'Genomic_subtype'], axis=1)
labels = labels.join(line.set_index('name'), on='name')
labels['CCND1'] = (labels['CCND1'].dropna() > 0).astype(int)
labels['CCNE1'] = (labels['CCNE1'].dropna() > 0).astype(int)
labels['CCNA2'] = (labels['CCNA2'].dropna() > 0).astype(int)
labels['CCNB1'] = (labels['CCNB1'].dropna() > 0).astype(int)
labels.to_csv('../Fusion_dummy_His_MUT_joined.csv', index=False)
| 0 | 0 | 0 |
d3859c10913045fb875d56e28ad9df02ba42ab36 | 1,043 | py | Python | CursoEmVideo/Python/Mundo 2/ex039.py | GabriellyBailon/Cursos | 0fe82881638a48dabbfd5963db39d2a0b7d7e4c3 | [
"MIT"
] | null | null | null | CursoEmVideo/Python/Mundo 2/ex039.py | GabriellyBailon/Cursos | 0fe82881638a48dabbfd5963db39d2a0b7d7e4c3 | [
"MIT"
] | null | null | null | CursoEmVideo/Python/Mundo 2/ex039.py | GabriellyBailon/Cursos | 0fe82881638a48dabbfd5963db39d2a0b7d7e4c3 | [
"MIT"
] | null | null | null | #Ler o ano de nascimento de um jovem e verificar se ele está na hora de alistar, se está muito cedo
#para isso ou já passou do momento certo
from datetime import date
nascimento = int(input("Digite o ano do seu nascimento: "));
sexo = str(input("Você é homem ou mulher? Digite H para homem e M se for mulher: ")).upper().strip();
atual = date.today().year;
idade = (atual - nascimento);
if sexo == "H":
if idade < 18:
print(f"Você ainda tem \033[1:38m{idade}\033[m anos, ainda não está na hora de se alistar. Faltam \033[1:39m{18-idade}\033[m anos.");
print(f"Você deve se alistar em {nascimento + 18}")
elif idade == 18:
print(f"Você já tem \033[1:35m{idade}\033[m anos, chegou a hora! Aliste-se!");
elif idade > 18:
print(f"Você tem \033[1:34m{idade}\033[m anos, seu alistamento foi em \033[1:31m{nascimento + 18}\033[m. Verifique sua situação e caso necessário, regularize-a o mais rápido possível.");
else:
print("Como você é uma mulher, não precisa se alistar.");
| 47.409091 | 195 | 0.662512 | #Ler o ano de nascimento de um jovem e verificar se ele está na hora de alistar, se está muito cedo
#para isso ou já passou do momento certo
from datetime import date
nascimento = int(input("Digite o ano do seu nascimento: "));
sexo = str(input("Você é homem ou mulher? Digite H para homem e M se for mulher: ")).upper().strip();
atual = date.today().year;
idade = (atual - nascimento);
if sexo == "H":
if idade < 18:
print(f"Você ainda tem \033[1:38m{idade}\033[m anos, ainda não está na hora de se alistar. Faltam \033[1:39m{18-idade}\033[m anos.");
print(f"Você deve se alistar em {nascimento + 18}")
elif idade == 18:
print(f"Você já tem \033[1:35m{idade}\033[m anos, chegou a hora! Aliste-se!");
elif idade > 18:
print(f"Você tem \033[1:34m{idade}\033[m anos, seu alistamento foi em \033[1:31m{nascimento + 18}\033[m. Verifique sua situação e caso necessário, regularize-a o mais rápido possível.");
else:
print("Como você é uma mulher, não precisa se alistar.");
| 0 | 0 | 0 |
b6fdcee777e9b0d18d33cf3d235811c1799c4efa | 4,549 | py | Python | tests/utils/test_sal_on_coco_dets.py | schencej/xaitk-saliency | d51dcc32e15118839133f7023e4f8e8cd65824af | [
"BSD-3-Clause"
] | null | null | null | tests/utils/test_sal_on_coco_dets.py | schencej/xaitk-saliency | d51dcc32e15118839133f7023e4f8e8cd65824af | [
"BSD-3-Clause"
] | null | null | null | tests/utils/test_sal_on_coco_dets.py | schencej/xaitk-saliency | d51dcc32e15118839133f7023e4f8e8cd65824af | [
"BSD-3-Clause"
] | null | null | null | from click.testing import CliRunner
import os
import py
import pytest
import builtins
import sys
from typing import Any
from tests import DATA_DIR
from xaitk_saliency.utils.bin.sal_on_coco_dets import sal_on_coco_dets
from importlib.util import find_spec
deps = ['kwcoco']
specs = [find_spec(dep) for dep in deps]
is_usable = all([spec is not None for spec in specs])
dets_file = os.path.join(DATA_DIR, 'test_dets.json')
config_file = os.path.join(DATA_DIR, 'config.json')
class TestSalOnCocoDetsNotUsable:
"""
These tests make use of the `tmpdir` fixture from `pytest`. Find more
information here: https://docs.pytest.org/en/6.2.x/tmpdir.html
"""
def test_warning(self, tmpdir: py.path.local) -> None:
"""
Test that proper warning is displayed when required dependencies are
not installed.
"""
output_dir = tmpdir.join('out')
runner = CliRunner()
if is_usable:
real_import = builtins.__import__
# mock import function that acts as if kwcoco is not installed
# monkeypatch import function
builtins.__import__ = mock_import
del sys.modules['xaitk_saliency.utils.bin.sal_on_coco_dets']
from xaitk_saliency.utils.bin.sal_on_coco_dets import sal_on_coco_dets as fail_sal_on_coco_dets
result = runner.invoke(fail_sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file)])
else:
result = runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file)])
assert result.output == "This tool requires additional dependencies, please install 'xaitk-saliency[tools]'\n"
assert not output_dir.check(dir=1)
@pytest.mark.skipif(not is_usable, reason="Extra 'xaitk-saliency[tools]' not installed.")
class TestSalOnCocoDets:
"""
These tests make use of the `tmpdir` fixture from `pytest`. Find more
information here: https://docs.pytest.org/en/6.2.x/tmpdir.html
"""
def test_coco_sal_gen(self, tmpdir: py.path.local) -> None:
"""
Test saliency map generation with RandomDetector, RISEGrid, and
DRISEScoring.
"""
output_dir = tmpdir.join('out')
runner = CliRunner()
runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file), "-v"])
# expected created directories for image saliency maps
img_dirs = [output_dir.join(d) for d in ["test_image1", "test_image2"]]
# detection ids that belong to each image
img_dets = [[1, 2, 3], [4, 5]]
assert sorted(output_dir.listdir()) == sorted(img_dirs)
for img_dir, det_ids in zip(img_dirs, img_dets):
map_files = [img_dir.join(f"det_{det_id}.jpeg") for det_id in det_ids]
assert sorted(img_dir.listdir()) == sorted(map_files)
def test_coco_sal_gen_img_overlay(self, tmpdir: py.path.local) -> None:
"""
Test saliency map generation with RandomDetector, RISEGrid, and
DRISEScoring with the overlay image option.
"""
output_dir = tmpdir.join('out')
runner = CliRunner()
runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file), "--overlay-image"])
# expected created directories for image saliency maps
img_dirs = [output_dir.join(d) for d in ["test_image1", "test_image2"]]
# detection ids that belong to each image
img_dets = [[1, 2, 3], [4, 5]]
assert sorted(output_dir.listdir()) == sorted(img_dirs)
for img_dir, det_ids in zip(img_dirs, img_dets):
map_files = [img_dir.join(f"det_{det_id}.jpeg") for det_id in det_ids]
assert sorted(img_dir.listdir()) == sorted(map_files)
def test_config_gen(self, tmpdir: py.path.local) -> None:
"""
Test the generate configuration file option.
"""
output_dir = tmpdir.join('out')
output_config = tmpdir.join('gen_conf.json')
runner = CliRunner()
runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file), "-g", str(output_config)])
# check that config file was created
assert output_config.check(file=1)
# check that no output was generated
assert not output_dir.check(dir=1)
| 35.818898 | 118 | 0.65377 | from click.testing import CliRunner
import os
import py
import pytest
import builtins
import sys
from typing import Any
from tests import DATA_DIR
from xaitk_saliency.utils.bin.sal_on_coco_dets import sal_on_coco_dets
from importlib.util import find_spec
deps = ['kwcoco']
specs = [find_spec(dep) for dep in deps]
is_usable = all([spec is not None for spec in specs])
dets_file = os.path.join(DATA_DIR, 'test_dets.json')
config_file = os.path.join(DATA_DIR, 'config.json')
class TestSalOnCocoDetsNotUsable:
"""
These tests make use of the `tmpdir` fixture from `pytest`. Find more
information here: https://docs.pytest.org/en/6.2.x/tmpdir.html
"""
def test_warning(self, tmpdir: py.path.local) -> None:
"""
Test that proper warning is displayed when required dependencies are
not installed.
"""
output_dir = tmpdir.join('out')
runner = CliRunner()
if is_usable:
real_import = builtins.__import__
# mock import function that acts as if kwcoco is not installed
def mock_import(name: str, *args: Any, **kw: Any) -> None:
if name == 'kwcoco':
raise ModuleNotFoundError
return real_import(name, *args, **kw)
# monkeypatch import function
builtins.__import__ = mock_import
del sys.modules['xaitk_saliency.utils.bin.sal_on_coco_dets']
from xaitk_saliency.utils.bin.sal_on_coco_dets import sal_on_coco_dets as fail_sal_on_coco_dets
result = runner.invoke(fail_sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file)])
else:
result = runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file)])
assert result.output == "This tool requires additional dependencies, please install 'xaitk-saliency[tools]'\n"
assert not output_dir.check(dir=1)
@pytest.mark.skipif(not is_usable, reason="Extra 'xaitk-saliency[tools]' not installed.")
class TestSalOnCocoDets:
"""
These tests make use of the `tmpdir` fixture from `pytest`. Find more
information here: https://docs.pytest.org/en/6.2.x/tmpdir.html
"""
def test_coco_sal_gen(self, tmpdir: py.path.local) -> None:
"""
Test saliency map generation with RandomDetector, RISEGrid, and
DRISEScoring.
"""
output_dir = tmpdir.join('out')
runner = CliRunner()
runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file), "-v"])
# expected created directories for image saliency maps
img_dirs = [output_dir.join(d) for d in ["test_image1", "test_image2"]]
# detection ids that belong to each image
img_dets = [[1, 2, 3], [4, 5]]
assert sorted(output_dir.listdir()) == sorted(img_dirs)
for img_dir, det_ids in zip(img_dirs, img_dets):
map_files = [img_dir.join(f"det_{det_id}.jpeg") for det_id in det_ids]
assert sorted(img_dir.listdir()) == sorted(map_files)
def test_coco_sal_gen_img_overlay(self, tmpdir: py.path.local) -> None:
"""
Test saliency map generation with RandomDetector, RISEGrid, and
DRISEScoring with the overlay image option.
"""
output_dir = tmpdir.join('out')
runner = CliRunner()
runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file), "--overlay-image"])
# expected created directories for image saliency maps
img_dirs = [output_dir.join(d) for d in ["test_image1", "test_image2"]]
# detection ids that belong to each image
img_dets = [[1, 2, 3], [4, 5]]
assert sorted(output_dir.listdir()) == sorted(img_dirs)
for img_dir, det_ids in zip(img_dirs, img_dets):
map_files = [img_dir.join(f"det_{det_id}.jpeg") for det_id in det_ids]
assert sorted(img_dir.listdir()) == sorted(map_files)
def test_config_gen(self, tmpdir: py.path.local) -> None:
"""
Test the generate configuration file option.
"""
output_dir = tmpdir.join('out')
output_config = tmpdir.join('gen_conf.json')
runner = CliRunner()
runner.invoke(sal_on_coco_dets, [str(dets_file), str(output_dir), str(config_file), "-g", str(output_config)])
# check that config file was created
assert output_config.check(file=1)
# check that no output was generated
assert not output_dir.check(dir=1)
| 174 | 0 | 34 |
c36de79193b1d457ea4dabbbff54cc86fe2825ee | 1,584 | py | Python | setup.py | JohnGoertz/Gumbi | 7a7df9bf97bf10cdf5dc8af36026dba578e161c9 | [
"Apache-2.0"
] | 34 | 2021-11-29T11:40:52.000Z | 2022-03-10T09:08:59.000Z | setup.py | JohnGoertz/Gumbi | 7a7df9bf97bf10cdf5dc8af36026dba578e161c9 | [
"Apache-2.0"
] | 13 | 2021-12-30T17:07:34.000Z | 2022-02-18T18:46:37.000Z | setup.py | JohnGoertz/Gumbi | 7a7df9bf97bf10cdf5dc8af36026dba578e161c9 | [
"Apache-2.0"
] | null | null | null | from setuptools import find_packages, setup
import pathlib as pl
DISTNAME = "gumbi"
DESCRIPTION = "Gaussian Process Model Building Interface"
AUTHOR = "John Goertz"
AUTHOR_EMAIL = ""
URL = "https://github.com/JohnGoertz/Gumbi"
LICENSE = "Apache 2.0"
PROJECT_ROOT = pl.Path(__file__).resolve().parent
REQUIREMENTS = PROJECT_ROOT / "requirements.txt"
README = PROJECT_ROOT / "README.md"
VERSION = PROJECT_ROOT / "VERSION"
with open(REQUIREMENTS) as f:
install_reqs = f.read().splitlines()
with open(README, 'r') as fh:
long_description = fh.read()
with open(VERSION, encoding="utf-8") as f:
version = f.read().strip()
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
setup(
name=DISTNAME,
version=version,
author="John Goertz",
author_email="",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
url=URL,
license=LICENSE,
python_requires='>=3.7',
packages=find_packages(),
include_package_data=True,
install_requires=install_reqs,
classifiers=classifiers,
#keywords=['python'],
)
| 28.8 | 58 | 0.668561 | from setuptools import find_packages, setup
import pathlib as pl
DISTNAME = "gumbi"
DESCRIPTION = "Gaussian Process Model Building Interface"
AUTHOR = "John Goertz"
AUTHOR_EMAIL = ""
URL = "https://github.com/JohnGoertz/Gumbi"
LICENSE = "Apache 2.0"
PROJECT_ROOT = pl.Path(__file__).resolve().parent
REQUIREMENTS = PROJECT_ROOT / "requirements.txt"
README = PROJECT_ROOT / "README.md"
VERSION = PROJECT_ROOT / "VERSION"
with open(REQUIREMENTS) as f:
install_reqs = f.read().splitlines()
with open(README, 'r') as fh:
long_description = fh.read()
with open(VERSION, encoding="utf-8") as f:
version = f.read().strip()
classifiers = [
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
setup(
name=DISTNAME,
version=version,
author="John Goertz",
author_email="",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
url=URL,
license=LICENSE,
python_requires='>=3.7',
packages=find_packages(),
include_package_data=True,
install_requires=install_reqs,
classifiers=classifiers,
#keywords=['python'],
)
| 0 | 0 | 0 |
ba00ea527fb88c2eeb702875e541d67aadb66a24 | 14,056 | py | Python | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mpls_te_datatypes.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mpls_te_datatypes.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_mpls_te_datatypes.py | bopopescu/ACI | dd717bc74739eeed4747b3ea9e36b239580df5e1 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-07-22T04:04:44.000Z | 2020-07-22T04:04:44.000Z | """ Cisco_IOS_XR_mpls_te_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BfdReversePath(Enum):
"""
BfdReversePath (Enum Class)
Bfd reverse path
.. data:: bfd_reverse_path_binding_label = 1
BindingLabel
"""
bfd_reverse_path_binding_label = Enum.YLeaf(1, "bfd-reverse-path-binding-label")
class Ctype(Enum):
"""
Ctype (Enum Class)
Ctype
.. data:: ctype_null = 0
CTYPE NULL
.. data:: ctype_ipv4 = 1
CTYPE IPV4
.. data:: ctype_ipv4_p2p_tunnel = 7
CTYPE IPV4 P2P TUNNEL
.. data:: ctype_ipv6_p2p_tunnel = 8
CTYPE IPV6 P2P TUNNEL
.. data:: ctype_ipv4_uni = 9
CTYPE IPV4 UNI
.. data:: ctype_ipv4_p2mp_tunnel = 13
CTYPE IPV4 P2MP TUNNEL
.. data:: ctype_ipv6_p2mp_tunnel = 14
CTYPE IPV6 P2MP TUNNEL
"""
ctype_null = Enum.YLeaf(0, "ctype-null")
ctype_ipv4 = Enum.YLeaf(1, "ctype-ipv4")
ctype_ipv4_p2p_tunnel = Enum.YLeaf(7, "ctype-ipv4-p2p-tunnel")
ctype_ipv6_p2p_tunnel = Enum.YLeaf(8, "ctype-ipv6-p2p-tunnel")
ctype_ipv4_uni = Enum.YLeaf(9, "ctype-ipv4-uni")
ctype_ipv4_p2mp_tunnel = Enum.YLeaf(13, "ctype-ipv4-p2mp-tunnel")
ctype_ipv6_p2mp_tunnel = Enum.YLeaf(14, "ctype-ipv6-p2mp-tunnel")
class MplsTeAffinityValue(Enum):
"""
MplsTeAffinityValue (Enum Class)
Mpls te affinity value
.. data:: hex_value = 1
Affinity value in Hex number
.. data:: bit_position = 2
Affinity value by Bit-Position
"""
hex_value = Enum.YLeaf(1, "hex-value")
bit_position = Enum.YLeaf(2, "bit-position")
class MplsTeAttrSet(Enum):
"""
MplsTeAttrSet (Enum Class)
Mpls te attr set
.. data:: not_used = 0
Not used
.. data:: static = 1
Static
.. data:: lsp = 2
LSP
.. data:: unassigned = 3
Unassigned
.. data:: auto_backup = 4
Auto backup
.. data:: auto_mesh = 5
Auto mesh
.. data:: xro = 6
XRO
.. data:: p2mp_te = 7
P2MP TE
.. data:: otn_pp = 8
OTN Path Protection
.. data:: p2p_te = 9
P2P TE
"""
not_used = Enum.YLeaf(0, "not-used")
static = Enum.YLeaf(1, "static")
lsp = Enum.YLeaf(2, "lsp")
unassigned = Enum.YLeaf(3, "unassigned")
auto_backup = Enum.YLeaf(4, "auto-backup")
auto_mesh = Enum.YLeaf(5, "auto-mesh")
xro = Enum.YLeaf(6, "xro")
p2mp_te = Enum.YLeaf(7, "p2mp-te")
otn_pp = Enum.YLeaf(8, "otn-pp")
p2p_te = Enum.YLeaf(9, "p2p-te")
class MplsTeAutorouteMetric(Enum):
"""
MplsTeAutorouteMetric (Enum Class)
Mpls te autoroute metric
.. data:: relative = 1
Relative
.. data:: absolute = 2
Absolute
.. data:: constant = 3
Constant
"""
relative = Enum.YLeaf(1, "relative")
absolute = Enum.YLeaf(2, "absolute")
constant = Enum.YLeaf(3, "constant")
class MplsTeBackupBandwidthClass(Enum):
"""
MplsTeBackupBandwidthClass (Enum Class)
Mpls te backup bandwidth class
.. data:: class0 = 0
Class 0
.. data:: class1 = 1
Class 1
.. data:: any_class = 9
Any Class
"""
class0 = Enum.YLeaf(0, "class0")
class1 = Enum.YLeaf(1, "class1")
any_class = Enum.YLeaf(9, "any-class")
class MplsTeBackupBandwidthPool(Enum):
"""
MplsTeBackupBandwidthPool (Enum Class)
Mpls te backup bandwidth pool
.. data:: any_pool = 1
Any Pool
.. data:: global_pool = 2
Global Pool
.. data:: sub_pool = 4
Sub Pool
"""
any_pool = Enum.YLeaf(1, "any-pool")
global_pool = Enum.YLeaf(2, "global-pool")
sub_pool = Enum.YLeaf(4, "sub-pool")
class MplsTeBandwidthDste(Enum):
"""
MplsTeBandwidthDste (Enum Class)
Mpls te bandwidth dste
.. data:: standard_dste = 0
IETF-Standard DSTE
.. data:: pre_standard_dste = 1
Pre-Standard DSTE
"""
standard_dste = Enum.YLeaf(0, "standard-dste")
pre_standard_dste = Enum.YLeaf(1, "pre-standard-dste")
class MplsTeBandwidthLimit(Enum):
"""
MplsTeBandwidthLimit (Enum Class)
Mpls te bandwidth limit
.. data:: unlimited = 64
Unlimited
.. data:: limited = 128
Limited
"""
unlimited = Enum.YLeaf(64, "unlimited")
limited = Enum.YLeaf(128, "limited")
class MplsTeBandwidthPool(Enum):
"""
MplsTeBandwidthPool (Enum Class)
Mpls te bandwidth pool
.. data:: any_pool = 0
Any Pool
.. data:: sub_pool = 1
Sub Pool
"""
any_pool = Enum.YLeaf(0, "any-pool")
sub_pool = Enum.YLeaf(1, "sub-pool")
class MplsTeBfdSessionDownAction(Enum):
"""
MplsTeBfdSessionDownAction (Enum Class)
Mpls te bfd session down action
.. data:: re_setup = 1
Tear down and resetup
"""
re_setup = Enum.YLeaf(1, "re-setup")
class MplsTeIgpProtocol(Enum):
"""
MplsTeIgpProtocol (Enum Class)
Mpls te igp protocol
.. data:: none = 0
Not set
.. data:: isis = 1
IS IS
.. data:: ospf = 2
OSPF
"""
none = Enum.YLeaf(0, "none")
isis = Enum.YLeaf(1, "isis")
ospf = Enum.YLeaf(2, "ospf")
class MplsTeLogFrrProtection(Enum):
"""
MplsTeLogFrrProtection (Enum Class)
Mpls te log frr protection
.. data:: frr_active_primary = 1
Track only FRR active on primary LSP
.. data:: backup = 256
backup tunnel
.. data:: frr_ready_primary = 512
Track only FRR ready on primary LSP
.. data:: primary = 513
primary LSP
.. data:: all = 769
all
"""
frr_active_primary = Enum.YLeaf(1, "frr-active-primary")
backup = Enum.YLeaf(256, "backup")
frr_ready_primary = Enum.YLeaf(512, "frr-ready-primary")
primary = Enum.YLeaf(513, "primary")
all = Enum.YLeaf(769, "all")
class MplsTeOtnApsProtection(Enum):
"""
MplsTeOtnApsProtection (Enum Class)
Mpls te otn aps protection
.. data:: Y_1plus1_unidir_no_aps = 4
1PLUS1 UNIDIR NO APS
.. data:: Y_1plus1_unidir_aps = 8
1PLUS1 UNIDIR APS
.. data:: Y_1plus1_bdir_aps = 16
1PLUS1 BIDIR APS
"""
Y_1plus1_unidir_no_aps = Enum.YLeaf(4, "1plus1-unidir-no-aps")
Y_1plus1_unidir_aps = Enum.YLeaf(8, "1plus1-unidir-aps")
Y_1plus1_bdir_aps = Enum.YLeaf(16, "1plus1-bdir-aps")
class MplsTeOtnApsProtectionMode(Enum):
"""
MplsTeOtnApsProtectionMode (Enum Class)
Mpls te otn aps protection mode
.. data:: revertive = 1
Revertive
.. data:: non_revertive = 2
Non Revertive
"""
revertive = Enum.YLeaf(1, "revertive")
non_revertive = Enum.YLeaf(2, "non-revertive")
class MplsTeOtnApsRestorationStyle(Enum):
"""
MplsTeOtnApsRestorationStyle (Enum Class)
Mpls te otn aps restoration style
.. data:: keep_failed_lsp = 1
Keep Failed Lsp
.. data:: delete_failed_lsp = 2
Delete Failed Lsp
"""
keep_failed_lsp = Enum.YLeaf(1, "keep-failed-lsp")
delete_failed_lsp = Enum.YLeaf(2, "delete-failed-lsp")
class MplsTeOtnSncMode(Enum):
"""
MplsTeOtnSncMode (Enum Class)
Mpls te otn snc mode
.. data:: snc_n = 1
SNC N
.. data:: snc_i = 2
SNC I
.. data:: snc_s = 3
SNC S
"""
snc_n = Enum.YLeaf(1, "snc-n")
snc_i = Enum.YLeaf(2, "snc-i")
snc_s = Enum.YLeaf(3, "snc-s")
class MplsTePathDiversityConformance(Enum):
"""
MplsTePathDiversityConformance (Enum Class)
Mpls te path diversity conformance
.. data:: strict = 0
Strict
.. data:: best_effort = 1
Best effort
"""
strict = Enum.YLeaf(0, "strict")
best_effort = Enum.YLeaf(1, "best-effort")
class MplsTePathOption(Enum):
"""
MplsTePathOption (Enum Class)
Mpls te path option
.. data:: not_set = 0
Not Set
.. data:: dynamic = 1
Dynamic
.. data:: explicit_name = 3
Explicit, identified by name
.. data:: explicit_number = 4
Explicit, identified by number
.. data:: no_ero = 5
No ERO
.. data:: sr = 6
Segment routing
"""
not_set = Enum.YLeaf(0, "not-set")
dynamic = Enum.YLeaf(1, "dynamic")
explicit_name = Enum.YLeaf(3, "explicit-name")
explicit_number = Enum.YLeaf(4, "explicit-number")
no_ero = Enum.YLeaf(5, "no-ero")
sr = Enum.YLeaf(6, "sr")
class MplsTePathOptionProperty(Enum):
"""
MplsTePathOptionProperty (Enum Class)
Mpls te path option property
.. data:: none = 0
No property
.. data:: lockdown = 1
Path is not a canditate forreoptimization
.. data:: verbatim = 4
Explicit path does not require topology
database
.. data:: pce = 8
Dynamic path found by PCE server
.. data:: segment_routing = 16
Segment Routing path
"""
none = Enum.YLeaf(0, "none")
lockdown = Enum.YLeaf(1, "lockdown")
verbatim = Enum.YLeaf(4, "verbatim")
pce = Enum.YLeaf(8, "pce")
segment_routing = Enum.YLeaf(16, "segment-routing")
class MplsTePathOptionProtection(Enum):
"""
MplsTePathOptionProtection (Enum Class)
Mpls te path option protection
.. data:: active = 0
Active path
.. data:: protecting = 1
Protecting Path
"""
active = Enum.YLeaf(0, "active")
protecting = Enum.YLeaf(1, "protecting")
class MplsTePathSelectionInvalidationTimerExpire(Enum):
"""
MplsTePathSelectionInvalidationTimerExpire (Enum Class)
Mpls te path selection invalidation timer expire
.. data:: tunnel_action_tear = 1
Tear down tunnel.
.. data:: tunnel_action_drop = 2
Drop tunnel traffic.
"""
tunnel_action_tear = Enum.YLeaf(1, "tunnel-action-tear")
tunnel_action_drop = Enum.YLeaf(2, "tunnel-action-drop")
class MplsTePathSelectionMetric(Enum):
"""
MplsTePathSelectionMetric (Enum Class)
Mpls te path selection metric
.. data:: igp = 1
IGP Metric
.. data:: te = 2
TE Metric
.. data:: delay = 4
DELAY Metric
"""
igp = Enum.YLeaf(1, "igp")
te = Enum.YLeaf(2, "te")
delay = Enum.YLeaf(4, "delay")
class MplsTePathSelectionSegmentRoutingAdjacencyProtection(Enum):
"""
MplsTePathSelectionSegmentRoutingAdjacencyProtection (Enum Class)
Mpls te path selection segment routing adjacency
protection
.. data:: not_set = 0
Any segment can be used in a path.
.. data:: adj_unprotected = 1
Only unprotected adjacency segments can be used
in a path.
.. data:: adj_protected = 2
Only protected adjacency segments can be used
in a path.
"""
not_set = Enum.YLeaf(0, "not-set")
adj_unprotected = Enum.YLeaf(1, "adj-unprotected")
adj_protected = Enum.YLeaf(2, "adj-protected")
class MplsTePathSelectionTiebreaker(Enum):
"""
MplsTePathSelectionTiebreaker (Enum Class)
Mpls te path selection tiebreaker
.. data:: min_fill = 1
Prefer the path with the least-utilized links
.. data:: max_fill = 2
Prefer the path with the most-utilized links
.. data:: random = 3
Prefer a path with links utilized randomly
"""
min_fill = Enum.YLeaf(1, "min-fill")
max_fill = Enum.YLeaf(2, "max-fill")
random = Enum.YLeaf(3, "random")
class MplsTeSigNameOption(Enum):
"""
MplsTeSigNameOption (Enum Class)
Mpls te sig name option
.. data:: none = 0
None
.. data:: address = 1
Address
.. data:: name = 2
Name
"""
none = Enum.YLeaf(0, "none")
address = Enum.YLeaf(1, "address")
name = Enum.YLeaf(2, "name")
class MplsTeSwitchingCap(Enum):
"""
MplsTeSwitchingCap (Enum Class)
Mpls te switching cap
.. data:: psc1 = 1
PSC1
.. data:: lsc = 150
LSC
.. data:: fsc = 200
FSC
"""
psc1 = Enum.YLeaf(1, "psc1")
lsc = Enum.YLeaf(150, "lsc")
fsc = Enum.YLeaf(200, "fsc")
class MplsTeTunnelAffinity(Enum):
"""
MplsTeTunnelAffinity (Enum Class)
Mpls te tunnel affinity
.. data:: include = 1
Include Affinity
.. data:: include_strict = 2
Strictly Include Affinity
.. data:: exclude = 3
Exclude Affinity
.. data:: exclude_all = 4
Exclude All Affinities
.. data:: ignore = 5
Ignore Affinity
"""
include = Enum.YLeaf(1, "include")
include_strict = Enum.YLeaf(2, "include-strict")
exclude = Enum.YLeaf(3, "exclude")
exclude_all = Enum.YLeaf(4, "exclude-all")
ignore = Enum.YLeaf(5, "ignore")
class MplsTesrlgExclude(Enum):
"""
MplsTesrlgExclude (Enum Class)
Mpls tesrlg exclude
.. data:: mandatory = 1
SRLG Mandatory Exclude
.. data:: preferred = 2
SRLG Preferred Exclude
.. data:: weighted = 3
SRLG Weighted Exclude
"""
mandatory = Enum.YLeaf(1, "mandatory")
preferred = Enum.YLeaf(2, "preferred")
weighted = Enum.YLeaf(3, "weighted")
class PathInvalidationAction(Enum):
"""
PathInvalidationAction (Enum Class)
Path invalidation action
.. data:: tear = 1
Tear
.. data:: drop = 2
Drop
"""
tear = Enum.YLeaf(1, "tear")
drop = Enum.YLeaf(2, "drop")
class SrPrepend(Enum):
"""
SrPrepend (Enum Class)
Sr prepend
.. data:: none_type = 0
NoneType
.. data:: next_label = 1
Next Label
.. data:: bgp_n_hop = 2
BGP NHOP
"""
none_type = Enum.YLeaf(0, "none-type")
next_label = Enum.YLeaf(1, "next-label")
bgp_n_hop = Enum.YLeaf(2, "bgp-n-hop")
| 15.617778 | 126 | 0.611483 | """ Cisco_IOS_XR_mpls_te_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class BfdReversePath(Enum):
"""
BfdReversePath (Enum Class)
Bfd reverse path
.. data:: bfd_reverse_path_binding_label = 1
BindingLabel
"""
bfd_reverse_path_binding_label = Enum.YLeaf(1, "bfd-reverse-path-binding-label")
class Ctype(Enum):
"""
Ctype (Enum Class)
Ctype
.. data:: ctype_null = 0
CTYPE NULL
.. data:: ctype_ipv4 = 1
CTYPE IPV4
.. data:: ctype_ipv4_p2p_tunnel = 7
CTYPE IPV4 P2P TUNNEL
.. data:: ctype_ipv6_p2p_tunnel = 8
CTYPE IPV6 P2P TUNNEL
.. data:: ctype_ipv4_uni = 9
CTYPE IPV4 UNI
.. data:: ctype_ipv4_p2mp_tunnel = 13
CTYPE IPV4 P2MP TUNNEL
.. data:: ctype_ipv6_p2mp_tunnel = 14
CTYPE IPV6 P2MP TUNNEL
"""
ctype_null = Enum.YLeaf(0, "ctype-null")
ctype_ipv4 = Enum.YLeaf(1, "ctype-ipv4")
ctype_ipv4_p2p_tunnel = Enum.YLeaf(7, "ctype-ipv4-p2p-tunnel")
ctype_ipv6_p2p_tunnel = Enum.YLeaf(8, "ctype-ipv6-p2p-tunnel")
ctype_ipv4_uni = Enum.YLeaf(9, "ctype-ipv4-uni")
ctype_ipv4_p2mp_tunnel = Enum.YLeaf(13, "ctype-ipv4-p2mp-tunnel")
ctype_ipv6_p2mp_tunnel = Enum.YLeaf(14, "ctype-ipv6-p2mp-tunnel")
class MplsTeAffinityValue(Enum):
"""
MplsTeAffinityValue (Enum Class)
Mpls te affinity value
.. data:: hex_value = 1
Affinity value in Hex number
.. data:: bit_position = 2
Affinity value by Bit-Position
"""
hex_value = Enum.YLeaf(1, "hex-value")
bit_position = Enum.YLeaf(2, "bit-position")
class MplsTeAttrSet(Enum):
"""
MplsTeAttrSet (Enum Class)
Mpls te attr set
.. data:: not_used = 0
Not used
.. data:: static = 1
Static
.. data:: lsp = 2
LSP
.. data:: unassigned = 3
Unassigned
.. data:: auto_backup = 4
Auto backup
.. data:: auto_mesh = 5
Auto mesh
.. data:: xro = 6
XRO
.. data:: p2mp_te = 7
P2MP TE
.. data:: otn_pp = 8
OTN Path Protection
.. data:: p2p_te = 9
P2P TE
"""
not_used = Enum.YLeaf(0, "not-used")
static = Enum.YLeaf(1, "static")
lsp = Enum.YLeaf(2, "lsp")
unassigned = Enum.YLeaf(3, "unassigned")
auto_backup = Enum.YLeaf(4, "auto-backup")
auto_mesh = Enum.YLeaf(5, "auto-mesh")
xro = Enum.YLeaf(6, "xro")
p2mp_te = Enum.YLeaf(7, "p2mp-te")
otn_pp = Enum.YLeaf(8, "otn-pp")
p2p_te = Enum.YLeaf(9, "p2p-te")
class MplsTeAutorouteMetric(Enum):
"""
MplsTeAutorouteMetric (Enum Class)
Mpls te autoroute metric
.. data:: relative = 1
Relative
.. data:: absolute = 2
Absolute
.. data:: constant = 3
Constant
"""
relative = Enum.YLeaf(1, "relative")
absolute = Enum.YLeaf(2, "absolute")
constant = Enum.YLeaf(3, "constant")
class MplsTeBackupBandwidthClass(Enum):
"""
MplsTeBackupBandwidthClass (Enum Class)
Mpls te backup bandwidth class
.. data:: class0 = 0
Class 0
.. data:: class1 = 1
Class 1
.. data:: any_class = 9
Any Class
"""
class0 = Enum.YLeaf(0, "class0")
class1 = Enum.YLeaf(1, "class1")
any_class = Enum.YLeaf(9, "any-class")
class MplsTeBackupBandwidthPool(Enum):
"""
MplsTeBackupBandwidthPool (Enum Class)
Mpls te backup bandwidth pool
.. data:: any_pool = 1
Any Pool
.. data:: global_pool = 2
Global Pool
.. data:: sub_pool = 4
Sub Pool
"""
any_pool = Enum.YLeaf(1, "any-pool")
global_pool = Enum.YLeaf(2, "global-pool")
sub_pool = Enum.YLeaf(4, "sub-pool")
class MplsTeBandwidthDste(Enum):
"""
MplsTeBandwidthDste (Enum Class)
Mpls te bandwidth dste
.. data:: standard_dste = 0
IETF-Standard DSTE
.. data:: pre_standard_dste = 1
Pre-Standard DSTE
"""
standard_dste = Enum.YLeaf(0, "standard-dste")
pre_standard_dste = Enum.YLeaf(1, "pre-standard-dste")
class MplsTeBandwidthLimit(Enum):
"""
MplsTeBandwidthLimit (Enum Class)
Mpls te bandwidth limit
.. data:: unlimited = 64
Unlimited
.. data:: limited = 128
Limited
"""
unlimited = Enum.YLeaf(64, "unlimited")
limited = Enum.YLeaf(128, "limited")
class MplsTeBandwidthPool(Enum):
"""
MplsTeBandwidthPool (Enum Class)
Mpls te bandwidth pool
.. data:: any_pool = 0
Any Pool
.. data:: sub_pool = 1
Sub Pool
"""
any_pool = Enum.YLeaf(0, "any-pool")
sub_pool = Enum.YLeaf(1, "sub-pool")
class MplsTeBfdSessionDownAction(Enum):
"""
MplsTeBfdSessionDownAction (Enum Class)
Mpls te bfd session down action
.. data:: re_setup = 1
Tear down and resetup
"""
re_setup = Enum.YLeaf(1, "re-setup")
class MplsTeIgpProtocol(Enum):
"""
MplsTeIgpProtocol (Enum Class)
Mpls te igp protocol
.. data:: none = 0
Not set
.. data:: isis = 1
IS IS
.. data:: ospf = 2
OSPF
"""
none = Enum.YLeaf(0, "none")
isis = Enum.YLeaf(1, "isis")
ospf = Enum.YLeaf(2, "ospf")
class MplsTeLogFrrProtection(Enum):
"""
MplsTeLogFrrProtection (Enum Class)
Mpls te log frr protection
.. data:: frr_active_primary = 1
Track only FRR active on primary LSP
.. data:: backup = 256
backup tunnel
.. data:: frr_ready_primary = 512
Track only FRR ready on primary LSP
.. data:: primary = 513
primary LSP
.. data:: all = 769
all
"""
frr_active_primary = Enum.YLeaf(1, "frr-active-primary")
backup = Enum.YLeaf(256, "backup")
frr_ready_primary = Enum.YLeaf(512, "frr-ready-primary")
primary = Enum.YLeaf(513, "primary")
all = Enum.YLeaf(769, "all")
class MplsTeOtnApsProtection(Enum):
"""
MplsTeOtnApsProtection (Enum Class)
Mpls te otn aps protection
.. data:: Y_1plus1_unidir_no_aps = 4
1PLUS1 UNIDIR NO APS
.. data:: Y_1plus1_unidir_aps = 8
1PLUS1 UNIDIR APS
.. data:: Y_1plus1_bdir_aps = 16
1PLUS1 BIDIR APS
"""
Y_1plus1_unidir_no_aps = Enum.YLeaf(4, "1plus1-unidir-no-aps")
Y_1plus1_unidir_aps = Enum.YLeaf(8, "1plus1-unidir-aps")
Y_1plus1_bdir_aps = Enum.YLeaf(16, "1plus1-bdir-aps")
class MplsTeOtnApsProtectionMode(Enum):
"""
MplsTeOtnApsProtectionMode (Enum Class)
Mpls te otn aps protection mode
.. data:: revertive = 1
Revertive
.. data:: non_revertive = 2
Non Revertive
"""
revertive = Enum.YLeaf(1, "revertive")
non_revertive = Enum.YLeaf(2, "non-revertive")
class MplsTeOtnApsRestorationStyle(Enum):
"""
MplsTeOtnApsRestorationStyle (Enum Class)
Mpls te otn aps restoration style
.. data:: keep_failed_lsp = 1
Keep Failed Lsp
.. data:: delete_failed_lsp = 2
Delete Failed Lsp
"""
keep_failed_lsp = Enum.YLeaf(1, "keep-failed-lsp")
delete_failed_lsp = Enum.YLeaf(2, "delete-failed-lsp")
class MplsTeOtnSncMode(Enum):
"""
MplsTeOtnSncMode (Enum Class)
Mpls te otn snc mode
.. data:: snc_n = 1
SNC N
.. data:: snc_i = 2
SNC I
.. data:: snc_s = 3
SNC S
"""
snc_n = Enum.YLeaf(1, "snc-n")
snc_i = Enum.YLeaf(2, "snc-i")
snc_s = Enum.YLeaf(3, "snc-s")
class MplsTePathDiversityConformance(Enum):
"""
MplsTePathDiversityConformance (Enum Class)
Mpls te path diversity conformance
.. data:: strict = 0
Strict
.. data:: best_effort = 1
Best effort
"""
strict = Enum.YLeaf(0, "strict")
best_effort = Enum.YLeaf(1, "best-effort")
class MplsTePathOption(Enum):
"""
MplsTePathOption (Enum Class)
Mpls te path option
.. data:: not_set = 0
Not Set
.. data:: dynamic = 1
Dynamic
.. data:: explicit_name = 3
Explicit, identified by name
.. data:: explicit_number = 4
Explicit, identified by number
.. data:: no_ero = 5
No ERO
.. data:: sr = 6
Segment routing
"""
not_set = Enum.YLeaf(0, "not-set")
dynamic = Enum.YLeaf(1, "dynamic")
explicit_name = Enum.YLeaf(3, "explicit-name")
explicit_number = Enum.YLeaf(4, "explicit-number")
no_ero = Enum.YLeaf(5, "no-ero")
sr = Enum.YLeaf(6, "sr")
class MplsTePathOptionProperty(Enum):
"""
MplsTePathOptionProperty (Enum Class)
Mpls te path option property
.. data:: none = 0
No property
.. data:: lockdown = 1
Path is not a canditate forreoptimization
.. data:: verbatim = 4
Explicit path does not require topology
database
.. data:: pce = 8
Dynamic path found by PCE server
.. data:: segment_routing = 16
Segment Routing path
"""
none = Enum.YLeaf(0, "none")
lockdown = Enum.YLeaf(1, "lockdown")
verbatim = Enum.YLeaf(4, "verbatim")
pce = Enum.YLeaf(8, "pce")
segment_routing = Enum.YLeaf(16, "segment-routing")
class MplsTePathOptionProtection(Enum):
"""
MplsTePathOptionProtection (Enum Class)
Mpls te path option protection
.. data:: active = 0
Active path
.. data:: protecting = 1
Protecting Path
"""
active = Enum.YLeaf(0, "active")
protecting = Enum.YLeaf(1, "protecting")
class MplsTePathSelectionInvalidationTimerExpire(Enum):
"""
MplsTePathSelectionInvalidationTimerExpire (Enum Class)
Mpls te path selection invalidation timer expire
.. data:: tunnel_action_tear = 1
Tear down tunnel.
.. data:: tunnel_action_drop = 2
Drop tunnel traffic.
"""
tunnel_action_tear = Enum.YLeaf(1, "tunnel-action-tear")
tunnel_action_drop = Enum.YLeaf(2, "tunnel-action-drop")
class MplsTePathSelectionMetric(Enum):
"""
MplsTePathSelectionMetric (Enum Class)
Mpls te path selection metric
.. data:: igp = 1
IGP Metric
.. data:: te = 2
TE Metric
.. data:: delay = 4
DELAY Metric
"""
igp = Enum.YLeaf(1, "igp")
te = Enum.YLeaf(2, "te")
delay = Enum.YLeaf(4, "delay")
class MplsTePathSelectionSegmentRoutingAdjacencyProtection(Enum):
"""
MplsTePathSelectionSegmentRoutingAdjacencyProtection (Enum Class)
Mpls te path selection segment routing adjacency
protection
.. data:: not_set = 0
Any segment can be used in a path.
.. data:: adj_unprotected = 1
Only unprotected adjacency segments can be used
in a path.
.. data:: adj_protected = 2
Only protected adjacency segments can be used
in a path.
"""
not_set = Enum.YLeaf(0, "not-set")
adj_unprotected = Enum.YLeaf(1, "adj-unprotected")
adj_protected = Enum.YLeaf(2, "adj-protected")
class MplsTePathSelectionTiebreaker(Enum):
"""
MplsTePathSelectionTiebreaker (Enum Class)
Mpls te path selection tiebreaker
.. data:: min_fill = 1
Prefer the path with the least-utilized links
.. data:: max_fill = 2
Prefer the path with the most-utilized links
.. data:: random = 3
Prefer a path with links utilized randomly
"""
min_fill = Enum.YLeaf(1, "min-fill")
max_fill = Enum.YLeaf(2, "max-fill")
random = Enum.YLeaf(3, "random")
class MplsTeSigNameOption(Enum):
"""
MplsTeSigNameOption (Enum Class)
Mpls te sig name option
.. data:: none = 0
None
.. data:: address = 1
Address
.. data:: name = 2
Name
"""
none = Enum.YLeaf(0, "none")
address = Enum.YLeaf(1, "address")
name = Enum.YLeaf(2, "name")
class MplsTeSwitchingCap(Enum):
"""
MplsTeSwitchingCap (Enum Class)
Mpls te switching cap
.. data:: psc1 = 1
PSC1
.. data:: lsc = 150
LSC
.. data:: fsc = 200
FSC
"""
psc1 = Enum.YLeaf(1, "psc1")
lsc = Enum.YLeaf(150, "lsc")
fsc = Enum.YLeaf(200, "fsc")
class MplsTeTunnelAffinity(Enum):
"""
MplsTeTunnelAffinity (Enum Class)
Mpls te tunnel affinity
.. data:: include = 1
Include Affinity
.. data:: include_strict = 2
Strictly Include Affinity
.. data:: exclude = 3
Exclude Affinity
.. data:: exclude_all = 4
Exclude All Affinities
.. data:: ignore = 5
Ignore Affinity
"""
include = Enum.YLeaf(1, "include")
include_strict = Enum.YLeaf(2, "include-strict")
exclude = Enum.YLeaf(3, "exclude")
exclude_all = Enum.YLeaf(4, "exclude-all")
ignore = Enum.YLeaf(5, "ignore")
class MplsTesrlgExclude(Enum):
"""
MplsTesrlgExclude (Enum Class)
Mpls tesrlg exclude
.. data:: mandatory = 1
SRLG Mandatory Exclude
.. data:: preferred = 2
SRLG Preferred Exclude
.. data:: weighted = 3
SRLG Weighted Exclude
"""
mandatory = Enum.YLeaf(1, "mandatory")
preferred = Enum.YLeaf(2, "preferred")
weighted = Enum.YLeaf(3, "weighted")
class PathInvalidationAction(Enum):
"""
PathInvalidationAction (Enum Class)
Path invalidation action
.. data:: tear = 1
Tear
.. data:: drop = 2
Drop
"""
tear = Enum.YLeaf(1, "tear")
drop = Enum.YLeaf(2, "drop")
class SrPrepend(Enum):
"""
SrPrepend (Enum Class)
Sr prepend
.. data:: none_type = 0
NoneType
.. data:: next_label = 1
Next Label
.. data:: bgp_n_hop = 2
BGP NHOP
"""
none_type = Enum.YLeaf(0, "none-type")
next_label = Enum.YLeaf(1, "next-label")
bgp_n_hop = Enum.YLeaf(2, "bgp-n-hop")
| 0 | 0 | 0 |
9ea88e402074b31fd58c53eea583cd8b32a5a4bc | 454 | py | Python | backend/api/migrations/0010_auto_20180505_0100.py | genehsun/HwakimBlog | 714b4ab43675f4a21cf356282238d03b9585e5c4 | [
"MIT"
] | 6 | 2018-03-30T09:45:14.000Z | 2022-02-25T07:10:37.000Z | backend/api/migrations/0010_auto_20180505_0100.py | genehsun/HwakimBlog | 714b4ab43675f4a21cf356282238d03b9585e5c4 | [
"MIT"
] | 2 | 2019-02-25T18:36:26.000Z | 2019-02-25T18:37:37.000Z | backend/api/migrations/0010_auto_20180505_0100.py | genehsun/HwakimBlog | 714b4ab43675f4a21cf356282238d03b9585e5c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-05 01:00
from __future__ import unicode_literals
from django.db import migrations
| 19.73913 | 46 | 0.581498 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-05 01:00
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20180503_0708'),
]
operations = [
migrations.RemoveField(
model_name='daily',
name='owner',
),
migrations.DeleteModel(
name='Daily',
),
]
| 0 | 285 | 23 |
fd53637a318ac81a6cb7c7c4d4895aec1be44fc3 | 948 | py | Python | 2021/03/solution_one.py | adtok/advent-of-code | df1f61759bd8f3bfd7995b7e2a124d7f6e97ba01 | [
"MIT"
] | null | null | null | 2021/03/solution_one.py | adtok/advent-of-code | df1f61759bd8f3bfd7995b7e2a124d7f6e97ba01 | [
"MIT"
] | null | null | null | 2021/03/solution_one.py | adtok/advent-of-code | df1f61759bd8f3bfd7995b7e2a124d7f6e97ba01 | [
"MIT"
] | null | null | null | """
Advent of Code: Day 03 Part 1
tldr: most prevalent bit
"""
from collections import defaultdict
if __name__ == "__main__":
main()
| 22.046512 | 86 | 0.617089 | """
Advent of Code: Day 03 Part 1
tldr: most prevalent bit
"""
from collections import defaultdict
def bitchar(condition: bool) -> str:
return "1" if condition else "0"
def ZERO() -> int:
return 0
def solve(input_file):
tracker = defaultdict(ZERO)
with open(input_file, "r") as file:
for count, line in enumerate(file):
for i, bit_value in enumerate(map(int, line.strip())):
tracker[i] += int(bit_value)
thresh = (count + 1) // 2
gam = int("".join((bitchar(tracker[i] > thresh) for i in range(len(tracker)))), 2)
eps = int("".join((bitchar(tracker[i] < thresh) for i in range(len(tracker)))), 2)
result = gam * eps
print(f"The answer for {input_file!r} is {result}!")
return result
def main():
test_result = solve("input.test")
test_answer = 198
assert test_result == test_answer
solve("input.solution")
if __name__ == "__main__":
main()
| 711 | 0 | 92 |
979869f37ae6498474b7c34a957d81033e23233c | 5,598 | py | Python | scrape_crs_website.py | JoshData/crs-reports-website | b72d09307511973a892a77f7ea0643cab28926a0 | [
"CC0-1.0"
] | 41 | 2016-09-17T13:12:41.000Z | 2022-01-04T08:32:26.000Z | scrape_crs_website.py | JoshData/crs-reports-website | b72d09307511973a892a77f7ea0643cab28926a0 | [
"CC0-1.0"
] | 13 | 2016-10-19T18:53:02.000Z | 2018-08-19T16:11:03.000Z | scrape_crs_website.py | JoshData/crs-reports-website | b72d09307511973a892a77f7ea0643cab28926a0 | [
"CC0-1.0"
] | 6 | 2016-10-19T18:44:53.000Z | 2021-01-07T02:52:11.000Z | #!/usr/bin/env python3
#
# Scrape reports from https://crsreports.congress.gov.
#
# This site provides many of the same reports that
# are available through our own archive, but only as
# PDFs and only with versions as of the site launch
# date and going forward.
from collections import OrderedDict
import datetime
import hashlib
import json
import os
import re
import subprocess
import scrapelib
BASE_PATH = "incoming/crsreports.congress.gov"
# Create a scraper that automatically throttles our requests
# so that we don't overload the CRS server.
scraper = scrapelib.Scraper(
requests_per_minute=35,
retry_attempts=2,
retry_wait_seconds=10)
ProdTypeDisplayName = {
"R": "CRS Report",
"RS": "CRS Report",
"RL": "CRS Report",
"IN": "CRS Insight",
"IF": "CRS In Focus",
}
if __name__ == "__main__":
# Make the directories for the output files.
os.makedirs(BASE_PATH + "/documents", exist_ok=True)
os.makedirs(BASE_PATH + "/files", exist_ok=True)
scrape_report_listing()
| 34.134146 | 111 | 0.65577 | #!/usr/bin/env python3
#
# Scrape reports from https://crsreports.congress.gov.
#
# This site provides many of the same reports that
# are available through our own archive, but only as
# PDFs and only with versions as of the site launch
# date and going forward.
from collections import OrderedDict
import datetime
import hashlib
import json
import os
import re
import subprocess
import scrapelib
BASE_PATH = "incoming/crsreports.congress.gov"
# Create a scraper that automatically throttles our requests
# so that we don't overload the CRS server.
scraper = scrapelib.Scraper(
requests_per_minute=35,
retry_attempts=2,
retry_wait_seconds=10)
ProdTypeDisplayName = {
"R": "CRS Report",
"RS": "CRS Report",
"RL": "CRS Report",
"IN": "CRS Insight",
"IF": "CRS In Focus",
}
def scrape_report_listing():
page_number = 1
fetched_reports = 0
total_reports = ""
last_report_date = ""
while True:
# Get the next page of reports...
url = "https://crsreports.congress.gov/search/results?orderBy=Date&pageNumber={}".format(page_number)
print("{}... [{}/{}/{}]".format(url, fetched_reports, total_reports, last_report_date))
body = scraper.get(url).content
body = json.loads(body.decode("utf8"))
total_reports = body["TotalRecCount"]
last_report_date = body["SearchResults"][-1]["CoverDate"].split("T")[0]
did_fetch = False
# For each report...
for report in body["SearchResults"]:
fetched_reports += 1
# Skip this --- it doesn't follow the same URL structure for PDFs.
if report["Title"] == "Appropriations Status Table" \
or "appropriations" in report["ProductNumber"].lower():
continue
# Get the report versions.
report_versions = []
if "PreviousVersions" in report:
# All of the version (including the current version) are listed
# in the PreviousVersions field when it is present.
for prev_version in report["PreviousVersions"].split("|"):
seq, cover_date, seq_type, seq_code = prev_version.split(";") # "1;13-AUG-20;NEW;Auth"
seq = int(seq)
cover_date = datetime.datetime.strptime(cover_date, "%d-%b-%y").date()
report_versions.append((seq, cover_date))
else:
# There is just the current version.
seq = int(report["CurrentSeqNumber"])
cover_date = datetime.datetime.strptime(report["CoverDate"], "%Y-%m-%dT%H:%M:%S").date()
report_versions.append((seq, cover_date))
# Fetch each report version.
for seq, cover_date in report_versions:
did_fetch = did_fetch or fetch_report_version(report, seq, cover_date)
# If we didn't find anything new on this page, stop here rather than going
# through all 500+ pages of results.
if not did_fetch: return
if fetched_reports == body["TotalRecCount"]:
return
page_number += 1
def fetch_report_version(doc, seq, cover_date):
did_fetch = False
report_version_id = doc["ProductNumber"] + "_" + str(seq) + "_" + cover_date.isoformat()
pdf_file = None
json_fn = BASE_PATH +"/documents/" + report_version_id + ".json"
if os.path.exists(json_fn):
# If we've already fetched this report version, there is no need to get
# the PDF again --- we assume that once published, report versions do not
# change. We could also skip writting the JSON file, but we may want to
# update the JSON format.
with open(json_fn) as f:
rec = json.load(f)
assert rec["formats"][0]["format"] == "PDF"
pdf_file = rec["formats"][0]["filename"]
pdf_url = rec["formats"][0]["url"]
pdf_content_hash = rec["formats"][0]["sha1"]
if not pdf_file or not os.path.exists(BASE_PATH + "/" + rec["formats"][0]["filename"]):
# Download the PDF.
pdf_url = "https://crsreports.congress.gov/product/pdf/{}/{}/{}".format(
doc["ProductTypeCode"], doc["ProductNumber"], str(seq))
print(pdf_url)
pdf_content = scraper.get(pdf_url).content
did_fetch = True
# Get the SHA1 hash of the content, construct a path to save the PDF to,
# and save it.
h = hashlib.sha1()
h.update(pdf_content)
pdf_content_hash = h.hexdigest()
pdf_file = "files/" + cover_date.isoformat() + "_" + doc["ProductNumber"] + "_" + pdf_content_hash + ".pdf"
with open(BASE_PATH + "/" + pdf_file, "wb") as f:
f.write(pdf_content)
# Construct metadata record.
rec = OrderedDict([
("source", "CRSReports.Congress.gov"),
("sourceLink", "https://crsreports.congress.gov/product/details?prodcode=" + doc["ProductNumber"]),
("id", report_version_id),
('date', cover_date.isoformat()),
('retrieved', datetime.datetime.now().isoformat()),
("title", doc["Title"]),
("summary", None),
("type", ProdTypeDisplayName.get(doc['ProductTypeCode'], "CRS Report Type " + doc['ProductTypeCode'])),
("typeId", doc['ProductTypeCode']),
("active", doc["StatusFlag"] == "Active"), # "Active" or "Archived", not sure if it's meaningful
("formats", [
OrderedDict([
("format", "PDF"),
("url", pdf_url),
("sha1", pdf_content_hash), # the SHA-1 hash of the file content
("filename", pdf_file),
]),
])
])
# Write out the metadata for this report version.
with open(json_fn, "w") as f:
f.write(json.dumps(rec, indent=2))
return did_fetch
if __name__ == "__main__":
# Make the directories for the output files.
os.makedirs(BASE_PATH + "/documents", exist_ok=True)
os.makedirs(BASE_PATH + "/files", exist_ok=True)
scrape_report_listing()
| 4,551 | 0 | 46 |
56ae42b088337f9464441adc110055e63c597818 | 188 | py | Python | locale/pot/api/core/_autosummary/pyvista-Light-specular_color-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/core/_autosummary/pyvista-Light-specular_color-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/core/_autosummary/pyvista-Light-specular_color-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Create a light and set its specular color to bright green.
#
import pyvista as pv
light = pv.Light()
light.specular_color = '#00FF00'
light.specular_color
# Expected:
## (0.0, 1.0, 0.0)
| 20.888889 | 60 | 0.712766 | # Create a light and set its specular color to bright green.
#
import pyvista as pv
light = pv.Light()
light.specular_color = '#00FF00'
light.specular_color
# Expected:
## (0.0, 1.0, 0.0)
| 0 | 0 | 0 |
d31dfbf24fe2af161e356b5834c663977684c417 | 6,407 | py | Python | opennem/db/views/__init__.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 22 | 2020-06-30T05:27:21.000Z | 2022-02-21T12:13:51.000Z | opennem/db/views/__init__.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 71 | 2020-08-07T13:06:30.000Z | 2022-03-15T06:44:49.000Z | opennem/db/views/__init__.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 13 | 2020-06-30T03:28:32.000Z | 2021-12-30T08:17:16.000Z | import logging
from operator import attrgetter
from pathlib import Path
from typing import List
from opennem.db import get_database_engine
from opennem.db.views.queries import (
get_all_views_query,
get_query_drop_view,
get_view_unique_index_query,
)
from .continuous_aggregates import (
create_continuous_aggregation_query,
remove_continuous_aggregation_query,
)
from .schema import ContinuousAggregationPolicy, ViewDefinition
logger = logging.getLogger("opennem.db.views")
VIEW_PATH = Path(__file__).parent.parent / "fixtures" / "views"
AggregationPolicy30Minutes = ContinuousAggregationPolicy(
interval="30 minutes", start_interval="2 hours"
)
AggregationPolicy2Hours = ContinuousAggregationPolicy(
interval="2 hours", start_interval="6 hours", end_interval="2 hours"
)
AggregationPolicy6Hours = ContinuousAggregationPolicy(
interval="6 hours", start_interval="12 hours", end_interval="2 hours"
)
_VIEW_MAP = [
ViewDefinition(
priority=11,
name="mv_facility_all",
materialized=True,
filepath="mv_facility_all.sql",
primary_key=["trading_interval", "network_id", "code"],
indexes=[],
),
ViewDefinition(
priority=11,
name="mv_network_fueltech_days",
materialized=True,
filepath="mv_network_fueltech_days.sql",
primary_key=["trading_day", "network_id", "code"],
),
ViewDefinition(
priority=15,
name="mv_facility_45d",
materialized=True,
filepath="mv_facility_45d.sql",
primary_key=["trading_interval", "network_id", "code"],
),
ViewDefinition(
priority=20,
name="mv_region_emissions",
materialized=True,
filepath="mv_region_emissions.sql",
primary_key=["trading_interval", "network_id", "network_region"],
),
ViewDefinition(
priority=30,
name="mv_interchange_energy_nem_region",
materialized=True,
filepath="mv_interchange_energy_nem_region.sql",
primary_key=["trading_interval", "network_id", "network_region"],
),
ViewDefinition(
priority=40,
name="vw_region_flow_emissions",
materialized=False,
filepath="vw_region_flow_emissions.sql",
),
]
POSTGIS_VIEWS = ["geography_columns", "geometry_columns", "raster_columns", "raster_overviews"]
def purge_views() -> None:
"""Remove views that aren't in the view table"""
engine = get_database_engine()
all_views_query = get_all_views_query()
all_views = []
with engine.connect() as c:
result = list(c.execute(all_views_query))
# Dont drop postgis or mapped views
all_views = [i[0] for i in result if i[0] not in POSTGIS_VIEWS + [i.name for i in _VIEW_MAP]]
for view_name in all_views:
with engine.connect() as c:
c.execution_options(isolation_level="AUTOCOMMIT")
query = "drop materialized view if exists {} cascade;".format(view_name)
logger.info("Dropping view {}".format(view_name))
logger.debug(query)
try:
c.execute(query)
except Exception as e:
logger.error("Error dropping view: {}".format(e))
def init_database_views() -> None:
""" Initialize all the database view """
engine = get_database_engine()
views_sorted_by_priority = list(sorted(_VIEW_MAP, key=attrgetter("priority")))
for view in views_sorted_by_priority:
logger.info("Initializing view {}".format(view.name))
with engine.connect() as c:
c.execution_options(isolation_level="AUTOCOMMIT")
# drop
drop_query = get_query_drop_view(view)
logger.debug(drop_query)
try:
c.execute(drop_query)
except Exception as e:
logger.warn("Could not drop view {}".format(view.name))
# create
create_query = get_view_content(view)
logger.debug(create_query)
c.execute(create_query)
# index
index_create_query = get_view_unique_index_query(view)
if index_create_query:
logger.debug(index_create_query)
try:
c.execute(index_create_query)
except Exception as e:
logger.error("Error creating index: {}".format(e))
return None
def init_aggregation_policies() -> None:
""" Initializes the continuous aggregation policies """
# @TODO check what exists with query
engine = get_database_engine()
for view in _VIEW_MAP:
if not view.aggregation_policy:
logging.debug("Skipping {}".format(view.name))
continue
with engine.connect() as c:
drop_query = remove_continuous_aggregation_query(view)
try:
logger.debug(drop_query)
c.execute(drop_query)
except Exception:
logger.warn("Could not drop continuous aggregation query: {}".format(view.name))
pass
create_query = create_continuous_aggregation_query(view)
logger.debug(create_query)
try:
c.execute(create_query)
except Exception as e:
logger.warn("Could not create continuous aggregation query: {}".format(e))
def get_materialized_view_names() -> List[str]:
""" Returns a list of material view names in priority order """
return list(
v.name
for v in filter(
lambda x: x.materialized is True and x.aggregation_policy is None, _VIEW_MAP
)
)
def get_timescale_view_names() -> List[str]:
""" Returns a list of timescale view names in priority order """
return list(
v.name
for v in filter(lambda x: x.materialized is True and x.aggregation_policy, _VIEW_MAP)
)
| 28.602679 | 97 | 0.64133 | import logging
from operator import attrgetter
from pathlib import Path
from typing import List
from opennem.db import get_database_engine
from opennem.db.views.queries import (
get_all_views_query,
get_query_drop_view,
get_view_unique_index_query,
)
from .continuous_aggregates import (
create_continuous_aggregation_query,
remove_continuous_aggregation_query,
)
from .schema import ContinuousAggregationPolicy, ViewDefinition
logger = logging.getLogger("opennem.db.views")
VIEW_PATH = Path(__file__).parent.parent / "fixtures" / "views"
AggregationPolicy30Minutes = ContinuousAggregationPolicy(
interval="30 minutes", start_interval="2 hours"
)
AggregationPolicy2Hours = ContinuousAggregationPolicy(
interval="2 hours", start_interval="6 hours", end_interval="2 hours"
)
AggregationPolicy6Hours = ContinuousAggregationPolicy(
interval="6 hours", start_interval="12 hours", end_interval="2 hours"
)
_VIEW_MAP = [
ViewDefinition(
priority=11,
name="mv_facility_all",
materialized=True,
filepath="mv_facility_all.sql",
primary_key=["trading_interval", "network_id", "code"],
indexes=[],
),
ViewDefinition(
priority=11,
name="mv_network_fueltech_days",
materialized=True,
filepath="mv_network_fueltech_days.sql",
primary_key=["trading_day", "network_id", "code"],
),
ViewDefinition(
priority=15,
name="mv_facility_45d",
materialized=True,
filepath="mv_facility_45d.sql",
primary_key=["trading_interval", "network_id", "code"],
),
ViewDefinition(
priority=20,
name="mv_region_emissions",
materialized=True,
filepath="mv_region_emissions.sql",
primary_key=["trading_interval", "network_id", "network_region"],
),
ViewDefinition(
priority=30,
name="mv_interchange_energy_nem_region",
materialized=True,
filepath="mv_interchange_energy_nem_region.sql",
primary_key=["trading_interval", "network_id", "network_region"],
),
ViewDefinition(
priority=40,
name="vw_region_flow_emissions",
materialized=False,
filepath="vw_region_flow_emissions.sql",
),
]
def get_view_content(viewdef: ViewDefinition) -> str:
if not VIEW_PATH.is_dir():
raise Exception("View directory: {} does not exist".format(VIEW_PATH))
view_full_path = VIEW_PATH / Path(viewdef.filepath)
if not view_full_path.is_file():
raise Exception("View {} not found in view path:".format(view_full_path))
view_content: str = ""
with view_full_path.open() as fh:
view_content = fh.read()
return view_content
POSTGIS_VIEWS = ["geography_columns", "geometry_columns", "raster_columns", "raster_overviews"]
def purge_views() -> None:
"""Remove views that aren't in the view table"""
engine = get_database_engine()
all_views_query = get_all_views_query()
all_views = []
with engine.connect() as c:
result = list(c.execute(all_views_query))
# Dont drop postgis or mapped views
all_views = [i[0] for i in result if i[0] not in POSTGIS_VIEWS + [i.name for i in _VIEW_MAP]]
for view_name in all_views:
with engine.connect() as c:
c.execution_options(isolation_level="AUTOCOMMIT")
query = "drop materialized view if exists {} cascade;".format(view_name)
logger.info("Dropping view {}".format(view_name))
logger.debug(query)
try:
c.execute(query)
except Exception as e:
logger.error("Error dropping view: {}".format(e))
def init_database_views() -> None:
""" Initialize all the database view """
engine = get_database_engine()
views_sorted_by_priority = list(sorted(_VIEW_MAP, key=attrgetter("priority")))
for view in views_sorted_by_priority:
logger.info("Initializing view {}".format(view.name))
with engine.connect() as c:
c.execution_options(isolation_level="AUTOCOMMIT")
# drop
drop_query = get_query_drop_view(view)
logger.debug(drop_query)
try:
c.execute(drop_query)
except Exception as e:
logger.warn("Could not drop view {}".format(view.name))
# create
create_query = get_view_content(view)
logger.debug(create_query)
c.execute(create_query)
# index
index_create_query = get_view_unique_index_query(view)
if index_create_query:
logger.debug(index_create_query)
try:
c.execute(index_create_query)
except Exception as e:
logger.error("Error creating index: {}".format(e))
return None
def init_aggregation_policies() -> None:
""" Initializes the continuous aggregation policies """
# @TODO check what exists with query
engine = get_database_engine()
for view in _VIEW_MAP:
if not view.aggregation_policy:
logging.debug("Skipping {}".format(view.name))
continue
with engine.connect() as c:
drop_query = remove_continuous_aggregation_query(view)
try:
logger.debug(drop_query)
c.execute(drop_query)
except Exception:
logger.warn("Could not drop continuous aggregation query: {}".format(view.name))
pass
create_query = create_continuous_aggregation_query(view)
logger.debug(create_query)
try:
c.execute(create_query)
except Exception as e:
logger.warn("Could not create continuous aggregation query: {}".format(e))
def get_materialized_view_names() -> List[str]:
""" Returns a list of material view names in priority order """
return list(
v.name
for v in filter(
lambda x: x.materialized is True and x.aggregation_policy is None, _VIEW_MAP
)
)
def get_timescale_view_names() -> List[str]:
""" Returns a list of timescale view names in priority order """
return list(
v.name
for v in filter(lambda x: x.materialized is True and x.aggregation_policy, _VIEW_MAP)
)
| 444 | 0 | 23 |
f853927ccd43fef77f91ed6e39a184b8c8436f43 | 4,264 | py | Python | callback_script_template.py | Phedorabot/phedorabot-python-sdk | 9e5e4a72a573cf85d41dfd765f96ec725f844ea6 | [
"Apache-2.0"
] | null | null | null | callback_script_template.py | Phedorabot/phedorabot-python-sdk | 9e5e4a72a573cf85d41dfd765f96ec725f844ea6 | [
"Apache-2.0"
] | null | null | null | callback_script_template.py | Phedorabot/phedorabot-python-sdk | 9e5e4a72a573cf85d41dfd765f96ec725f844ea6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2017 Phedorabot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
# This is a script that shows how to handle the instant task execution post
# request from the Phedorabot server, you must ensure that this is done only
# with a post request
# import the webhook and the webhook excdeption class
from phedorabot.webhook import PhedorabotWebHookEngine
from phedorabot.webhook import PhedorabotWebHookException
# Wrap everything in a try/except block so we can deal with errors rightly
try:
# First your server should be able to read the headers sent in as dictionary
# and the raw body sent in
# Initialize the webhook engine
engine = PhedorabotWebHookEngine()
# set the headers as a raw dictionary from your server
engine.set_raw_header(server.request.headers)
# set the raw boy as string type this will be parsed by the engine
engine.set_raw_body(server.request.body)
# Next we need to ensure that we received this instant task execution payload
# from Phedorabot, before we can trust the payload enough to use it for
# any meaningful task execution
if engine.is_valid_task_execution():
# Ok this looks good we have a valid task execution otherwise the webhook
# will raise an exception for us
# At this point we have a valid task execution payload we need to get
# the public api key that is associated with this callback request data
# so that you can provide the corresponding api secret for verifying the
# integrity of the task payload
api_key = engine.get_api_key()
# Query for the corresponding api secret on your server, database or
# configuration storage using this api key
# after which set the below api secret to the corresponding secret
api_secret = ''
engine.set_api_secret(api_secret)
# Next verify the integrity of the task execution payload
if engine.verify_task_execution_payload():
# Getting this far means that the tash execution payload is valid
# and can be trusted.
# get the headers incase you passed customer headers when creating
# the task
headers = engine.get_headers()
# get the payload
payload = engine.get_payload()
# Now you can execute the task you want to execute here using the
# contents of the payload as well as the headers after that if you
# want to set customer status of the task execution you can call the
# engine.add_result() method, this expects a key and a value
# it will be registered on your Phedorabot task execution log so
# you can review it later
# e.g engine.add_result('status', 'Executed Successfully')
# TODO: task executtion here, after this part you are all done
# Note that Phedorabot server will give your server a 30 seconds
# window to get feed back from this callback scripts otherwise it
# will consider it a failure
except (Exception, PhedorabotWebHookException) as ex:
# if this is a Phedorabot Webhook exception we need to capture it
if hasattr(ex, 'what'):
engine.set_error(ex.get_what())
engine.set_error_description(ex.get_reason())
else:
engine.set_error('webhook_error')
engine.set_error_description(str(ex))
finally:
# send back response to Phedorabot so that you can see a log of how your
# callback script is executing
response = engine.get_response()
# Print this depending on your server type
print response
| 45.361702 | 81 | 0.705441 | #!/usr/bin/env python
#
# Copyright 2017 Phedorabot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
# This is a script that shows how to handle the instant task execution post
# request from the Phedorabot server, you must ensure that this is done only
# with a post request
# import the webhook and the webhook excdeption class
from phedorabot.webhook import PhedorabotWebHookEngine
from phedorabot.webhook import PhedorabotWebHookException
# Wrap everything in a try/except block so we can deal with errors rightly
try:
# First your server should be able to read the headers sent in as dictionary
# and the raw body sent in
# Initialize the webhook engine
engine = PhedorabotWebHookEngine()
# set the headers as a raw dictionary from your server
engine.set_raw_header(server.request.headers)
# set the raw boy as string type this will be parsed by the engine
engine.set_raw_body(server.request.body)
# Next we need to ensure that we received this instant task execution payload
# from Phedorabot, before we can trust the payload enough to use it for
# any meaningful task execution
if engine.is_valid_task_execution():
# Ok this looks good we have a valid task execution otherwise the webhook
# will raise an exception for us
# At this point we have a valid task execution payload we need to get
# the public api key that is associated with this callback request data
# so that you can provide the corresponding api secret for verifying the
# integrity of the task payload
api_key = engine.get_api_key()
# Query for the corresponding api secret on your server, database or
# configuration storage using this api key
# after which set the below api secret to the corresponding secret
api_secret = ''
engine.set_api_secret(api_secret)
# Next verify the integrity of the task execution payload
if engine.verify_task_execution_payload():
# Getting this far means that the tash execution payload is valid
# and can be trusted.
# get the headers incase you passed customer headers when creating
# the task
headers = engine.get_headers()
# get the payload
payload = engine.get_payload()
# Now you can execute the task you want to execute here using the
# contents of the payload as well as the headers after that if you
# want to set customer status of the task execution you can call the
# engine.add_result() method, this expects a key and a value
# it will be registered on your Phedorabot task execution log so
# you can review it later
# e.g engine.add_result('status', 'Executed Successfully')
# TODO: task executtion here, after this part you are all done
# Note that Phedorabot server will give your server a 30 seconds
# window to get feed back from this callback scripts otherwise it
# will consider it a failure
except (Exception, PhedorabotWebHookException) as ex:
# if this is a Phedorabot Webhook exception we need to capture it
if hasattr(ex, 'what'):
engine.set_error(ex.get_what())
engine.set_error_description(ex.get_reason())
else:
engine.set_error('webhook_error')
engine.set_error_description(str(ex))
finally:
# send back response to Phedorabot so that you can see a log of how your
# callback script is executing
response = engine.get_response()
# Print this depending on your server type
print response
| 0 | 0 | 0 |
cf19db66604da08c8957990aad01534c1093b2d9 | 5,031 | py | Python | tests/test_wsClaims.py | pedromtorres/TigerShark | 2790a7c03905a094b126b48387c7919c09cce238 | [
"BSD-3-Clause"
] | 24 | 2015-03-18T10:15:20.000Z | 2022-03-18T13:38:34.000Z | tests/test_wsClaims.py | tspannhw/TigerShark | 5081641f1b189a43e9eab4813256598cc0a79f6f | [
"BSD-3-Clause"
] | 6 | 2015-03-27T12:36:57.000Z | 2021-04-13T15:01:24.000Z | tests/test_wsClaims.py | tspannhw/TigerShark | 5081641f1b189a43e9eab4813256598cc0a79f6f | [
"BSD-3-Clause"
] | 21 | 2015-11-21T09:19:47.000Z | 2020-09-17T16:52:50.000Z | #!/usr/bin/env python2.6
"""
Unit test of web.claims application as a complete Django WSGI web service.
"""
from __future__ import print_function
import unittest
import httplib
import urllib2, urllib
import logging, sys
import os.path
import datetime
import base64
import subprocess, time
import json
logger= logging.getLogger( __file__ )
class TestWS( unittest.TestCase ):
"""Exercise load and fetch operations.
The tests must be run in order to force the expected behavior.
"""
def setUpModule():
"""Spawn the test server process.
This should build a test database, load fixtures, and then provide
the Django-based services.
"""
global the_proc, the_log, the_err
command= ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python2.6", "-m", "web.manage", "testserver",
'--addrport=18000', '--settings=web.settings',
'--noinput', '--verbosity=1',
'example837.json',
]
log_file= 'testserver.log'
err_file= 'testserver.err'
logger.info( '{0} >{1} 2>{2}'.format( ' '.join( command ), log_file, err_file ) )
the_log= open( log_file, 'w', 0 )
the_err= open( err_file, 'w', 0 )
the_proc = subprocess.Popen(command, shell=False, stdout=the_log, stderr=the_err)
time.sleep(6) # Wait for fixtures to load
status= the_proc.poll()
logger.info( 'PID %d, status %r', the_proc.pid, status )
logger.info( datetime.datetime.now() )
def tearDownModule():
"""Kill the server process."""
global the_proc
logger.info( "Stopping server" )
the_proc.kill()
logger.debug( "Waiting for %d to finally exit", the_proc.pid )
the_proc.wait()
logger.info( "PID %d, status %r", the_proc.pid, the_proc.returncode )
the_log.close()
the_err.close()
for f, p in (the_log, 'log>'), (the_err, 'err>'):
print()
with open( f.name, 'r' ) as source:
for line in source:
print( p, line, end='' )
print()
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stderr,
level=logging.DEBUG,
)
if sys.version_info[:2] <= ( 2, 6 ):
#Python2.6 work-around
setUpModule()
tests= unittest.defaultTestLoader.loadTestsFromModule(__import__('__main__'))
result= unittest.TextTestRunner().run( tests )
tearDownModule()
sys.exit(not result.wasSuccessful())
#Python2.7
unittest.main()
| 35.429577 | 114 | 0.602465 | #!/usr/bin/env python2.6
"""
Unit test of web.claims application as a complete Django WSGI web service.
"""
from __future__ import print_function
import unittest
import httplib
import urllib2, urllib
import logging, sys
import os.path
import datetime
import base64
import subprocess, time
import json
logger= logging.getLogger( __file__ )
class TestWS( unittest.TestCase ):
"""Exercise load and fetch operations.
The tests must be run in order to force the expected behavior.
"""
def setUp( self ):
self.claimDict= {
'GWID':'06E266185200',
'CLAIM-ID':'22559311',
'BENEFIT':'CHRC',
'TYPE-OF-SERVICE':'CI',
'LOCATION':'ALB',
'TYPE':'P',
'SECONDARY':'M',
'GENDER':'U',
'AGE-FROM':'0',
'AGE-TO':'125',
'CLAIM-FILE': os.path.join("test","837-example.txt"),
}
with open( self.claimDict['CLAIM-FILE'],"rU" ) as claims:
self.claimText= "".join(x.strip() for x in claims)
self.headers={'Authorization':'BASIC '+base64.encodestring("admin:admin")[:-1]}
self.client= httplib.HTTPConnection( 'localhost', 18000 )
def test_01_Load( self ):
# Build Properties dict
row= self.claimDict
propCols= ( "BENEFIT", "TYPE-OF-SERVICE", "LOCATION", "TYPE", "SECONDARY" )
properties= dict( [ (k,row[k]) for k in propCols ] )
prop_json= json.dumps( properties )
# Build Constraints dict
consCols= ( "GENDER", "AGE-FROM", "AGE-TO" )
constraints= dict( [ (k,row[k]) for k in consCols ] )
cons_json= json.dumps( constraints )
# load claims
claimId= row["CLAIM-ID"]
params= urllib.urlencode({'claim':self.claimText, 'claim_id':claimId,
'properties':prop_json, 'constraints':cons_json})
self.client.request( 'POST', "/claim/load/", body=params, headers=self.headers)
response= self.client.getresponse()
result= response.read()
#print( result )
self.client.close()
self.assertEquals( "CREATED", response.reason )
object= json.loads(result)
self.assertEquals( claimId, object['claim_id'] )
def test_02_Fetch( self ):
claimId= self.claimDict["CLAIM-ID"]
self.client.request( 'GET', "/claim/{0}/".format(claimId), headers=self.headers )
response= self.client.getresponse()
result= response.read()
self.client.close()
object= json.loads(result)
self.assertEquals( "OK", response.reason )
self.assertEquals( self.claimText, object['claim'] )
def test_03_Fetch( self ):
claimId= '837_example'
self.client.request( 'GET', "/claim_837/{0}/".format(claimId), headers=self.headers )
response= self.client.getresponse()
result= response.read()
self.client.close()
object= json.loads(result)
#print( result )
self.assertEquals( "OK", response.reason )
print( object['claim'] )
def setUpModule():
"""Spawn the test server process.
This should build a test database, load fixtures, and then provide
the Django-based services.
"""
global the_proc, the_log, the_err
command= ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python2.6", "-m", "web.manage", "testserver",
'--addrport=18000', '--settings=web.settings',
'--noinput', '--verbosity=1',
'example837.json',
]
log_file= 'testserver.log'
err_file= 'testserver.err'
logger.info( '{0} >{1} 2>{2}'.format( ' '.join( command ), log_file, err_file ) )
the_log= open( log_file, 'w', 0 )
the_err= open( err_file, 'w', 0 )
the_proc = subprocess.Popen(command, shell=False, stdout=the_log, stderr=the_err)
time.sleep(6) # Wait for fixtures to load
status= the_proc.poll()
logger.info( 'PID %d, status %r', the_proc.pid, status )
logger.info( datetime.datetime.now() )
def tearDownModule():
"""Kill the server process."""
global the_proc
logger.info( "Stopping server" )
the_proc.kill()
logger.debug( "Waiting for %d to finally exit", the_proc.pid )
the_proc.wait()
logger.info( "PID %d, status %r", the_proc.pid, the_proc.returncode )
the_log.close()
the_err.close()
for f, p in (the_log, 'log>'), (the_err, 'err>'):
print()
with open( f.name, 'r' ) as source:
for line in source:
print( p, line, end='' )
print()
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stderr,
level=logging.DEBUG,
)
if sys.version_info[:2] <= ( 2, 6 ):
#Python2.6 work-around
setUpModule()
tests= unittest.defaultTestLoader.loadTestsFromModule(__import__('__main__'))
result= unittest.TextTestRunner().run( tests )
tearDownModule()
sys.exit(not result.wasSuccessful())
#Python2.7
unittest.main()
| 2,465 | 0 | 104 |
ed83f26f4982188dfda254ff2bee2fbae7eae451 | 507 | py | Python | libs/core/cms/api/migrations/0015_auto_20190321_1017.py | myog-io/WebDjangular | 73d3c40aa449eec5acc59d4493ee94059bddabbd | [
"MIT"
] | 1 | 2018-09-14T15:17:19.000Z | 2018-09-14T15:17:19.000Z | libs/core/cms/api/migrations/0015_auto_20190321_1017.py | MyOwnGamesLLC/WebDjangular | 73d3c40aa449eec5acc59d4493ee94059bddabbd | [
"MIT"
] | 41 | 2018-12-16T16:58:54.000Z | 2019-02-22T20:08:58.000Z | libs/core/cms/api/migrations/0015_auto_20190321_1017.py | myog-io/WebDjangular | 73d3c40aa449eec5acc59d4493ee94059bddabbd | [
"MIT"
] | 1 | 2019-12-10T09:32:49.000Z | 2019-12-10T09:32:49.000Z | # Generated by Django 2.1.7 on 2019-03-21 13:17
from django.db import migrations, models
| 22.043478 | 74 | 0.571992 | # Generated by Django 2.1.7 on 2019-03-21 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0014_auto_20190316_0855'),
]
operations = [
migrations.RemoveField(
model_name='menuitem',
name='alt',
),
migrations.AlterField(
model_name='menuitem',
name='url',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
| 0 | 393 | 23 |
b635f0b214c294a4e5c938f85314753a4f657db5 | 760 | py | Python | astro/units.py | cristobal-sifon/astro | e3ca00bebc5dbdd33e2df8df30191cc54c17f722 | [
"MIT"
] | null | null | null | astro/units.py | cristobal-sifon/astro | e3ca00bebc5dbdd33e2df8df30191cc54c17f722 | [
"MIT"
] | 3 | 2018-01-28T18:27:35.000Z | 2019-10-11T13:29:11.000Z | astro/units.py | cristobal-sifon/astro | e3ca00bebc5dbdd33e2df8df30191cc54c17f722 | [
"MIT"
] | 1 | 2021-12-06T14:29:13.000Z | 2021-12-06T14:29:13.000Z | # -*- coding: utf-8 -*-
"""
Conversion factors and units, in cgs. To convert a given value, in cgs, to the desired units,
divide by that unit.
Example:
The speed of light in km·s⁻¹ would be
c_km = c / km
"""
# length
cm = 1.
m = 1e2
km = 1e5
AU = 1.4959787066e13
ly = 9.460730472e17
pc = 3.0856776e18
kpc = 1e3 * pc
Mpc = 1e6 * pc
Gpc = 1e9 * pc
mm = 1e-1
micron = 1e-4
um = micron
nm = 1e-7
angstrom = 1e-8
# mass
Msun = 1.9891e33
g = 1.
kg = 1e3
mg = 1e-3
# time
s = 1.
hr = 3600.
yr_Sidereal = 3.1558145e7
yr_Tropical = 3.155692519e7
yr_Gregorian = 3.1556952e7
yr_Julian = 3.15576e7
yr = yr_Julian
Myr = 1e6 * yr
Gyr = 1e9 * yr
# energy
eV = 1.6021765e-12 # one electron-volt, in erg
keV = 1e3 * eV
J = 1e-7 # one Joule, in erg | 16.888889 | 93 | 0.628947 | # -*- coding: utf-8 -*-
"""
Conversion factors and units, in cgs. To convert a given value, in cgs, to the desired units,
divide by that unit.
Example:
The speed of light in km·s⁻¹ would be
c_km = c / km
"""
# length
cm = 1.
m = 1e2
km = 1e5
AU = 1.4959787066e13
ly = 9.460730472e17
pc = 3.0856776e18
kpc = 1e3 * pc
Mpc = 1e6 * pc
Gpc = 1e9 * pc
mm = 1e-1
micron = 1e-4
um = micron
nm = 1e-7
angstrom = 1e-8
# mass
Msun = 1.9891e33
g = 1.
kg = 1e3
mg = 1e-3
# time
s = 1.
hr = 3600.
yr_Sidereal = 3.1558145e7
yr_Tropical = 3.155692519e7
yr_Gregorian = 3.1556952e7
yr_Julian = 3.15576e7
yr = yr_Julian
Myr = 1e6 * yr
Gyr = 1e9 * yr
# energy
eV = 1.6021765e-12 # one electron-volt, in erg
keV = 1e3 * eV
J = 1e-7 # one Joule, in erg | 0 | 0 | 0 |
7154d3b8131cd6b3d365f8e65bab08b014365ee7 | 645 | py | Python | exp/noisy_hd.py | dataiku-research/paper_ial_2021 | f860b6eb2d8471bc23e44d282e50c4deaf0813d9 | [
"Apache-2.0"
] | 1 | 2021-09-06T11:06:07.000Z | 2021-09-06T11:06:07.000Z | exp/noisy_ld.py | dataiku-research/paper_ial_2021 | f860b6eb2d8471bc23e44d282e50c4deaf0813d9 | [
"Apache-2.0"
] | null | null | null | exp/noisy_ld.py | dataiku-research/paper_ial_2021 | f860b6eb2d8471bc23e44d282e50c4deaf0813d9 | [
"Apache-2.0"
] | null | null | null | import openml
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
| 19.545455 | 85 | 0.631008 | import openml
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
def get_config():
return {
'start_size': 20,
'batches': [10] * 19,
'n_iter': 20,
'stop_size': 220,
#'oracle_error': .1
}
def get_dataset():
X = np.load('X.npy')
y = np.load('y.npy')
return {
'X': X,
'y': y
}
def get_clf():
return RandomForestClassifier(n_estimators=100, max_depth=10, min_samples_leaf=1)
def fit_clf(clf, tx, ty):
return clf.fit(tx, ty)
| 355 | 0 | 92 |
bd1439993c7b2de1eaa84498533416500afc6a36 | 3,276 | py | Python | bids/config.py | DimitriPapadopoulos/pybids | 9449fdc319c4bdff4ed9aa1b299964352f394d56 | [
"MIT"
] | 101 | 2018-10-11T07:49:03.000Z | 2022-03-13T10:24:29.000Z | bids/config.py | DimitriPapadopoulos/pybids | 9449fdc319c4bdff4ed9aa1b299964352f394d56 | [
"MIT"
] | 572 | 2018-09-26T20:13:24.000Z | 2022-03-30T00:03:15.000Z | bids/config.py | Remi-Gau/pybids | 0f460a8f31eb2ea829c17c85720a1effcfaaf791 | [
"MIT"
] | 61 | 2018-09-26T21:12:13.000Z | 2022-02-15T00:51:45.000Z | ''' Utilities for manipulating package-level settings. '''
import json
from pathlib import Path
import os
from io import open
import warnings
from .utils import listify
__all__ = ['set_option', 'set_options', 'get_option']
_config_name = 'pybids_config.json'
conf_path = str(Path(__file__).absolute().parent.joinpath('layout', 'config', '{}.json'))
_default_settings = {
'config_paths': {
name: conf_path.format(name) for name in ['bids', 'derivatives']},
# XXX 0.16: Remove
'extension_initial_dot': True,
}
def set_option(key, value):
""" Set a package-wide option.
Args:
key (str): The name of the option to set.
value (object): The new value of the option.
"""
if key not in _settings:
raise ValueError("Invalid pybids setting: '%s'" % key)
# XXX 0.16: Remove
elif key == "extension_initial_dot":
if value is not True:
raise ValueError(f"Cannot set {key!r} to {value!r} as of pybids 0.14. "
"This setting is always True, and will be removed "
"entirely in 0.16.")
warnings.warn("Setting 'extension_initial_dot' will be removed in pybids 0.16.",
FutureWarning)
_settings[key] = value
def set_options(**kwargs):
""" Set multiple package-wide options.
Args:
kwargs: Keyword arguments to pass onto set_option().
"""
for k, v in kwargs.items():
set_option(k, v)
def get_option(key):
""" Retrieve the current value of a package-wide option.
Args:
key (str): The name of the option to retrieve.
"""
if key not in _settings:
raise ValueError("Invalid pybids setting: '%s'" % key)
return _settings[key]
def from_file(filenames, error_on_missing=True):
""" Load package-wide settings from specified file(s).
Args:
filenames (str, list): Filename or list of filenames containing JSON
dictionary of settings.
error_on_missing (bool): If True, raises an error if a file doesn't
exist.
"""
filenames = listify(filenames)
for f in filenames:
if Path(f).exists():
settings = json.loads(Path(f).read_text(encoding='utf-8'))
_settings.update(settings)
elif error_on_missing:
raise ValueError("Config file '%s' does not exist." % f)
def reset_options(update_from_file=False):
""" Reset all options to the package defaults.
Args:
update_from_file (bool): If True, re-applies any config files found in
standard locations.
"""
global _settings
_settings = _default_settings.copy()
if update_from_file:
_update_from_standard_locations()
def _update_from_standard_locations():
""" Check standard locations for config files and update settings if found.
Order is user's home dir, environment variable ($PYBIDS_CONFIG), and then
current directory--with later files taking precedence over earlier ones.
"""
locs = [
Path.home() / _config_name,
Path('.') / _config_name
]
if 'PYBIDS_CONFIG' in os.environ:
locs.insert(1, os.environ['PYBIDS_CONFIG'])
from_file(locs, False)
_settings = {}
reset_options(True)
| 28.99115 | 89 | 0.637363 | ''' Utilities for manipulating package-level settings. '''
import json
from pathlib import Path
import os
from io import open
import warnings
from .utils import listify
__all__ = ['set_option', 'set_options', 'get_option']
_config_name = 'pybids_config.json'
conf_path = str(Path(__file__).absolute().parent.joinpath('layout', 'config', '{}.json'))
_default_settings = {
'config_paths': {
name: conf_path.format(name) for name in ['bids', 'derivatives']},
# XXX 0.16: Remove
'extension_initial_dot': True,
}
def set_option(key, value):
""" Set a package-wide option.
Args:
key (str): The name of the option to set.
value (object): The new value of the option.
"""
if key not in _settings:
raise ValueError("Invalid pybids setting: '%s'" % key)
# XXX 0.16: Remove
elif key == "extension_initial_dot":
if value is not True:
raise ValueError(f"Cannot set {key!r} to {value!r} as of pybids 0.14. "
"This setting is always True, and will be removed "
"entirely in 0.16.")
warnings.warn("Setting 'extension_initial_dot' will be removed in pybids 0.16.",
FutureWarning)
_settings[key] = value
def set_options(**kwargs):
""" Set multiple package-wide options.
Args:
kwargs: Keyword arguments to pass onto set_option().
"""
for k, v in kwargs.items():
set_option(k, v)
def get_option(key):
""" Retrieve the current value of a package-wide option.
Args:
key (str): The name of the option to retrieve.
"""
if key not in _settings:
raise ValueError("Invalid pybids setting: '%s'" % key)
return _settings[key]
def from_file(filenames, error_on_missing=True):
""" Load package-wide settings from specified file(s).
Args:
filenames (str, list): Filename or list of filenames containing JSON
dictionary of settings.
error_on_missing (bool): If True, raises an error if a file doesn't
exist.
"""
filenames = listify(filenames)
for f in filenames:
if Path(f).exists():
settings = json.loads(Path(f).read_text(encoding='utf-8'))
_settings.update(settings)
elif error_on_missing:
raise ValueError("Config file '%s' does not exist." % f)
def reset_options(update_from_file=False):
""" Reset all options to the package defaults.
Args:
update_from_file (bool): If True, re-applies any config files found in
standard locations.
"""
global _settings
_settings = _default_settings.copy()
if update_from_file:
_update_from_standard_locations()
def _update_from_standard_locations():
""" Check standard locations for config files and update settings if found.
Order is user's home dir, environment variable ($PYBIDS_CONFIG), and then
current directory--with later files taking precedence over earlier ones.
"""
locs = [
Path.home() / _config_name,
Path('.') / _config_name
]
if 'PYBIDS_CONFIG' in os.environ:
locs.insert(1, os.environ['PYBIDS_CONFIG'])
from_file(locs, False)
_settings = {}
reset_options(True)
| 0 | 0 | 0 |
82b4c9eb61538b0dde49e019fd485fb5be6c2b4a | 221 | py | Python | broca/tokenize/keyword/__init__.py | ftzeng/broca | 7236dcf54edc0a4a54a55eb93be30800910667e7 | [
"MIT"
] | 50 | 2015-07-25T01:15:50.000Z | 2015-11-29T11:19:25.000Z | broca/tokenize/keyword/__init__.py | publicscience/broca | 7236dcf54edc0a4a54a55eb93be30800910667e7 | [
"MIT"
] | 2 | 2016-06-22T16:34:45.000Z | 2017-03-20T16:29:56.000Z | broca/tokenize/keyword/__init__.py | publicscience/broca | 7236dcf54edc0a4a54a55eb93be30800910667e7 | [
"MIT"
] | 7 | 2016-01-22T13:33:27.000Z | 2021-05-21T07:58:35.000Z | """
Keyword extraction methods.
These accept lists of strings as arguments.
"""
from .pos import POSTokenizer
from .rake import RAKETokenizer
from .apriori import AprioriTokenizer
from .overkill import OverkillTokenizer
| 22.1 | 43 | 0.81448 | """
Keyword extraction methods.
These accept lists of strings as arguments.
"""
from .pos import POSTokenizer
from .rake import RAKETokenizer
from .apriori import AprioriTokenizer
from .overkill import OverkillTokenizer
| 0 | 0 | 0 |
6fcdd38766391ed20a91e9da9df553aa6d10c9ce | 16,251 | py | Python | pm4pyws/handlers/xes/xes.py | ehbasouri/pm4py-ws | 9bf5f88848a4aa2873bae86af95d37f64ae1dde1 | [
"Apache-2.0"
] | null | null | null | pm4pyws/handlers/xes/xes.py | ehbasouri/pm4py-ws | 9bf5f88848a4aa2873bae86af95d37f64ae1dde1 | [
"Apache-2.0"
] | null | null | null | pm4pyws/handlers/xes/xes.py | ehbasouri/pm4py-ws | 9bf5f88848a4aa2873bae86af95d37f64ae1dde1 | [
"Apache-2.0"
] | null | null | null | from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.algo.filtering.log.start_activities import start_activities_filter
from pm4py.algo.filtering.log.variants import variants_filter
from pm4py.objects.conversion.log import factory as conversion_factory
from pm4py.objects.log.exporter.xes.versions.etree_xes_exp import export_log_as_string
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.objects.log.util import insert_classifier
from pm4py.objects.log.util import xes
from pm4py.statistics.traces.log import case_statistics
from pm4py.util import constants
from pm4pyws.handlers.xes.alignments import get_align
from pm4pyws.handlers.xes.cases import variants
from pm4pyws.handlers.xes.ctmc import transient
from pm4pyws.handlers.xes.filtering import factory as filtering_factory
from pm4pyws.handlers.xes.process_schema import factory as process_schema_factory
from pm4pyws.handlers.xes.sna import get_sna as sna_obtainer
from pm4pyws.handlers.xes.statistics import events_per_time, case_duration
from pm4pyws.util import casestats
| 33.233129 | 116 | 0.6239 | from pm4py.algo.filtering.log.attributes import attributes_filter
from pm4py.algo.filtering.log.end_activities import end_activities_filter
from pm4py.algo.filtering.log.start_activities import start_activities_filter
from pm4py.algo.filtering.log.variants import variants_filter
from pm4py.objects.conversion.log import factory as conversion_factory
from pm4py.objects.log.exporter.xes.versions.etree_xes_exp import export_log_as_string
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.objects.log.util import insert_classifier
from pm4py.objects.log.util import xes
from pm4py.statistics.traces.log import case_statistics
from pm4py.util import constants
from pm4pyws.handlers.xes.alignments import get_align
from pm4pyws.handlers.xes.cases import variants
from pm4pyws.handlers.xes.ctmc import transient
from pm4pyws.handlers.xes.filtering import factory as filtering_factory
from pm4pyws.handlers.xes.process_schema import factory as process_schema_factory
from pm4pyws.handlers.xes.sna import get_sna as sna_obtainer
from pm4pyws.handlers.xes.statistics import events_per_time, case_duration
from pm4pyws.util import casestats
class XesHandler(object):
def __init__(self):
"""
Constructor (set all variables to None)
"""
# sets the current log to None
self.log = None
# sets the first ancestor (in the filtering chain) to None
self.first_ancestor = self
# sets the last ancestor (in the filtering chain) to None
self.last_ancestor = self
# sets the filter chain
self.filters_chain = []
# classifier
self.activity_key = None
# variants
self.variants = None
# number of variants
self.variants_number = 0
# number of cases
self.cases_number = 0
# number of events
self.events_number = 0
def get_filters_chain_repr(self):
"""
Gets the representation of the current filters chain
Returns
-----------
stri
Representation of the current filters chain
"""
return str(self.filters_chain)
def copy_from_ancestor(self, ancestor):
"""
Copies from ancestor
Parameters
-------------
ancestor
Ancestor
"""
self.first_ancestor = ancestor.first_ancestor
self.last_ancestor = ancestor
#self.filters_chain = ancestor.filters_chain
self.log = ancestor.log
self.activity_key = ancestor.activity_key
def remove_filter(self, filter, all_filters):
"""
Removes a filter from the current handler
Parameters
-----------
filter
Filter to remove
all_filters
All the filters that are still there
Returns
------------
new_handler
New handler
"""
new_handler = XesHandler()
new_handler.copy_from_ancestor(self.first_ancestor)
for filter in all_filters:
new_handler.add_filter0(filter)
new_handler.build_variants()
new_handler.calculate_events_number()
new_handler.calculate_cases_number()
new_handler.calculate_variants_number()
return new_handler
def add_filter(self, filter, all_filters):
"""
Adds a filter to the current handler
Parameters
-----------
filter
Filter to add
all_filters
All the filters that were added
Returns
------------
new_handler
New handler
"""
new_handler = XesHandler()
new_handler.copy_from_ancestor(self.first_ancestor)
for filter in all_filters:
new_handler.add_filter0(filter)
new_handler.build_variants()
new_handler.calculate_events_number()
new_handler.calculate_cases_number()
new_handler.calculate_variants_number()
return new_handler
def add_filter0(self, filter):
"""
Technical, void, method to add a filter
Parameters
------------
filter
Filter to add
"""
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
#parameters["variants"] = self.variants
self.log = filtering_factory.apply(self.log, filter, parameters=parameters)
self.filters_chain.append(filter)
def build_from_path(self, path, parameters=None):
"""
Builds the handler from the specified path to XES file
Parameters
-------------
path
Path to the log file
parameters
Parameters of the algorithm
"""
if parameters is None:
parameters = {}
try:
# try faster non standard importer
self.log = xes_importer.apply(path, variant="nonstandard")
if len(self.log) == 0:
# non standard imported failed
self.log = xes_importer.apply(path)
except:
# revert to classic importer
self.log = xes_importer.apply(path)
self.log, classifier_key = insert_classifier.search_act_class_attr(self.log,
force_activity_transition_insertion=True)
self.activity_key = xes.DEFAULT_NAME_KEY
if classifier_key is not None:
self.activity_key = classifier_key
self.build_variants()
self.calculate_variants_number()
self.calculate_cases_number()
self.calculate_events_number()
def build_variants(self, parameters=None):
"""
Build the variants of the event log
Parameters
------------
parameters
Possible parameters of the method
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
self.variants = variants_filter.get_variants(self.log, parameters=parameters)
def calculate_variants_number(self):
"""
Calculate the number of variants in this log
"""
self.variants_number = len(self.variants.keys())
def calculate_cases_number(self):
"""
Calculate the number of cases in this log
"""
self.cases_number = len(self.log)
def calculate_events_number(self):
"""
Calculate the number of events in this log
"""
self.events_number = sum([len(case) for case in self.log])
def get_schema(self, variant=process_schema_factory.DFG_FREQ, parameters=None):
"""
Gets the process schema in the specified variant and with the specified parameters
Parameters
-------------
variant
Variant of the algorithm
parameters
Parameters of the algorithm
Returns
------------
schema
Process schema (in base64)
model
Model file possibly describing the process schema
format
Format of the process schema (e.g. PNML)
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return process_schema_factory.apply(self.log, variant=variant, parameters=parameters)
def get_case_duration_svg(self, parameters=None):
"""
Gets the SVG of the case duration
Parameters
------------
parameters
Parameters of the algorithm
Returns
-----------
graph
Case duration graph (expressed in Base 64)
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return case_duration.get_case_duration_svg(self.log, parameters=parameters)
def get_events_per_time_svg(self, parameters=None):
"""
Gets the SVG of the events per time
Parameters
-------------
parameters
Parameters of the algorithm
Returns
-------------
graph
Events per time graph (expressed in Base 64)
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return events_per_time.get_events_per_time_svg(self.log, parameters=parameters)
def get_variant_statistics(self, parameters=None):
"""
Gets the variants of the given log
Parameters
--------------
parameters
Parameters of the algorithm
Returns
--------------
variants
Variants of the log
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
parameters["variants"] = self.variants
return variants.get_statistics(self.log, parameters=parameters)
def get_sna(self, variant="handover", parameters=None):
"""
Gets a Social Network representation from a given log
Parameters
-------------
variant
Variant of the algorithm (metric to use)
parameters
Parameters of the algorithm (e.g. arc threshold)
Returns
------------
sna
SNA representation
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return sna_obtainer.apply(self.log, variant=variant, parameters=parameters)
def get_transient(self, delay, parameters=None):
"""
Perform CTMC simulation on a log
Parameters
-------------
delay
Delay
parameters
Possible parameters of the algorithm
Returns
-------------
graph
Case duration graph
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return transient.apply(self.log, delay, parameters=parameters)
def get_case_statistics(self, parameters=None):
"""
Gets the statistics on cases
Parameters
-------------
parameters
Possible parameters of the algorithm
Returns
-------------
list_cases
List of cases
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
parameters["max_ret_cases"] = 500
parameters["sort_by_index"] = parameters["sort_by_index"] if "sort_by_index" in parameters else 0
parameters["sort_ascending"] = parameters["sort_ascending"] if "sort_ascending" in parameters else False
parameters["variants"] = self.variants
if "variant" in parameters:
filtered_log = variants_filter.apply(self.log, [parameters["variant"]], parameters=parameters)
return casestats.include_key_in_value_list(
case_statistics.get_cases_description(filtered_log, parameters=parameters))
else:
return casestats.include_key_in_value_list(
case_statistics.get_cases_description(self.log, parameters=parameters))
def get_events(self, caseid, parameters=None):
"""
Gets the events of a case
Parameters
-------------
caseid
Case ID
parameters
Parameters of the algorithm
Returns
------------
list_events
Events belonging to the case
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return case_statistics.get_events(self.log, caseid, parameters=parameters)
def get_alignments(self, petri_string, parameters=None):
"""
Gets the alignments from a string
Parameters
-------------
petri_string
Petri string
parameters
Parameters of the algorithm
Returns
-------------
petri
SVG of the decorated Petri
table
SVG of the decorated table
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return get_align.perform_alignments(self.log, petri_string, parameters=parameters)
def download_xes_log(self):
"""
Downloads the XES log as string
"""
log_string = export_log_as_string(self.log)
return log_string
def download_csv_log(self):
"""
Downloads the CSV log as string
"""
dataframe = conversion_factory.apply(self.log, variant=conversion_factory.TO_DATAFRAME)
log_string = dataframe.to_string()
return log_string
def get_start_activities(self, parameters=None):
"""
Gets the start activities from the log
Returns
------------
start_activities_dict
Dictionary of start activities
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return start_activities_filter.get_start_activities(self.log, parameters=parameters)
def get_end_activities(self, parameters=None):
"""
Gets the end activities from the log
Returns
-------------
end_activities_dict
Dictionary of end activities
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = self.activity_key
return end_activities_filter.get_end_activities(self.log, parameters=parameters)
def get_attributes_list(self, parameters=None):
"""
Gets the attributes list from the log
Returns
-------------
attributes_list
List of attributes
"""
return attributes_filter.get_all_event_attributes_from_log(self.log)
def get_attribute_values(self, attribute_key, parameters=None):
"""
Gets the attribute values from the log
Returns
-------------
attribute_values
List of values
"""
if parameters is None:
parameters = {}
parameters[constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = self.activity_key
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = attribute_key
initial_dict = attributes_filter.get_attribute_values(self.log, attribute_key, parameters=parameters)
return_dict = {}
for key in initial_dict:
return_dict[str(key)] = int(initial_dict[key])
return return_dict
| 0 | 15,065 | 23 |
01ec0b3d1d4096d3b37ba06c1d0bd436c4d3b83d | 303 | py | Python | 01_strategy/duck/lib/fly_behaviors.py | denzow/practice-design-pattern | 141d59c51375e36769a73b6ff135a8afae64b664 | [
"MIT"
] | 1 | 2018-08-15T08:07:58.000Z | 2018-08-15T08:07:58.000Z | 01_strategy/duck/lib/fly_behaviors.py | denzow/practice-design-pattern | 141d59c51375e36769a73b6ff135a8afae64b664 | [
"MIT"
] | null | null | null | 01_strategy/duck/lib/fly_behaviors.py | denzow/practice-design-pattern | 141d59c51375e36769a73b6ff135a8afae64b664 | [
"MIT"
] | null | null | null | # coding: utf-8
from abc import ABCMeta, abstractmethod
| 13.173913 | 39 | 0.650165 | # coding: utf-8
from abc import ABCMeta, abstractmethod
class FlyBehavior(metaclass=ABCMeta):
@abstractmethod
def fly(self):
pass
class FlyWithWings(FlyBehavior):
def fly(self):
print('ばっさばっさ')
class FlyNoWay(FlyBehavior):
def fly(self):
print('飛べない豚')
| 61 | 81 | 123 |
2b51b3261d36e5e14448058fed17cc77ecbb1371 | 1,384 | py | Python | emailcrawler/spiders/emailspider.py | seth2000/scrapyExample | 8eb47195d0d4dd4a174d10b358c2bd7f1fa67496 | [
"MIT"
] | null | null | null | emailcrawler/spiders/emailspider.py | seth2000/scrapyExample | 8eb47195d0d4dd4a174d10b358c2bd7f1fa67496 | [
"MIT"
] | null | null | null | emailcrawler/spiders/emailspider.py | seth2000/scrapyExample | 8eb47195d0d4dd4a174d10b358c2bd7f1fa67496 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
| 33.756098 | 75 | 0.632948 | # -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.crawler import CrawlerProcess
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
class EmailspiderSpider(scrapy.Spider):
name = 'emailspider'
allowed_domains = ['']
start_urls = ['https://www.asx.com.au/contact/#/']
def start_requests(self):
#query = input("Enter your query: ")
for url in self.start_urls:
#yield scrapy.Request("{}{}".format(url, query))
yield scrapy.Request(url)
def parse(self, response):
url_to_follow = LxmlLinkExtractor(allow=()).extract_links(response)
url_to_follow = [str(link.url) for link in url_to_follow]
url_to_follow.append(str(response.url))
for url in url_to_follow:
yield scrapy.Request(
url=url, callback=self.parse_email, dont_filter=True)
def parse_email(self, response):
html_str = response.text
emails = self.extract_email(html_str)
phone_no = self.extract_phone_number(html_str)
yield{
"url": response.url,
"emails": emails,
"phone numbers": phone_no
}
def extract_email(self, html_as_str):
return re.findall(r'[\w\.-]+@[\w\.-]+', html_as_str)
def extract_phone_number(self, html_as_str):
return re.findall(r'\+\d{2}\s?0?\d{10}', html_as_str)
| 950 | 260 | 23 |
e6cd6c80307fa4e80b5fc81d74610ff4a1853c06 | 7,324 | py | Python | wildfireassessment/test.py | aalten77/wildfireassessment | 0d4f1639b443350b50068e154e845f1abafcb49f | [
"MIT"
] | null | null | null | wildfireassessment/test.py | aalten77/wildfireassessment | 0d4f1639b443350b50068e154e845f1abafcb49f | [
"MIT"
] | null | null | null | wildfireassessment/test.py | aalten77/wildfireassessment | 0d4f1639b443350b50068e154e845f1abafcb49f | [
"MIT"
] | null | null | null | from wildfireassessment.ops import * #my package
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from skimage import morphology
from skimage.transform import resize
import pandas as pd
import geopandas as gpd
import pickle
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn import linear_model
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score, f1_score
from sklearn.externals import joblib
from rasterstats import zonal_stats
import fiona
from joblib import Parallel, delayed
import multiprocessing
import time
"""
def writeRasters():
#read in filepaths for data
print("Reading filepaths...")
filepath_post = Path("./data/Paradise/post")
filepath_pre = Path("./data/Paradise/pre")
#WorldView Post/Pre
fps_wv_post = sorted(list(filepath_post.glob("2*_clip.tif")))
fps_wv_pre = sorted(list(filepath_pre.glob("2*_clip.tif")))
#WorldView Post/Pre
fps_sent2_post = sorted(list((filepath_post / "clippedB08s").glob("B08_*.tif")))
fps_sent2_pre = sorted(list((filepath_pre / "clippedB08s").glob("B08_*.tif")))
print("Loading Model")
#LOAD model
rf_model = joblib.load(open("models/rf_grid_bin_precision.pkl", 'rb'))
print("Start reading images")
for i in range(len(fps_wv_post)):
print("Reading RGB...")
raster_src_post, rgb_post = readRGBImg(fps_wv_post[i])
raster_src_pre, rgb_pre = readRGBImg(fps_wv_pre[i])
print("Reading S2 B8...")
raster_src_post_b08, b08_post = readOneImg(fps_sent2_post[i])
raster_src_pre_b08, b08_pre = readOneImg(fps_sent2_pre[i])
print("Resizing B8 images")
b08_upscaled_post = resize(b08_post, raster_src_post.shape, anti_aliasing=True)
b08_upscaled_post = b08_upscaled_post * 255
b08_upscaled_post = b08_upscaled_post.astype(rasterio.uint8)
b08_upscaled_pre = resize(b08_pre, raster_src_pre.shape, anti_aliasing=True)
b08_upscaled_pre = b08_upscaled_pre * 255
b08_upscaled_pre = b08_upscaled_pre.astype(rasterio.uint8)
print("unravel rgb, b08")
#unravel
rgb_rav_post = {0 : rgb_post[:,:,0].ravel().astype(float),
1 : rgb_post[:,:,1].ravel().astype(float),
2 : rgb_post[:,:,2].ravel().astype(float)}
rgb_rav_pre = {0 : rgb_pre[:,:,0].ravel().astype(float),
1 : rgb_pre[:,:,1].ravel().astype(float),
2 : rgb_pre[:,:,2].ravel().astype(float)}
b08_rav_post = b08_upscaled_post.ravel().astype(float)
b08_rav_pre = b08_upscaled_pre.ravel().astype(float)
#release mem
b08_upscaled_post = None
b08_upscaled_pre = None
b08_post = None
b08_pre = None
rgb_pre = None
rgb_post = None
print("starting predictions with model")
def processInParallel(i):
X_chunk = makeChunkX(rgb_rav_post[2][i:i+100], rgb_rav_post[1][i:i+100], rgb_rav_post[0][i:i+100], b08_rav_post[i:i+100],
rgb_rav_pre[2][i:i+100], rgb_rav_pre[1][i:i+100], rgb_rav_pre[0][i:i+100], b08_rav_pre[i:i+100])
#impute by mean for missing values
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(X_chunk)
X_chunk_imp = imp.transform(X_chunk)
return rf_model.predict(X_chunk_imp)
start_time = time.time()
num_cores = multiprocessing.cpu_count()
pred_y = Parallel(n_jobs=num_cores, backend="multiprocessing")(delayed(processInParallel)(i) for i in range(0, len(b08_rav_post), 100))
print("--- %s seconds ---" % (time.time() - start_time))
print("Create mask")
#create mask
pred_y_rf = np.hstack(pred_y).reshape(raster_src_post.shape)
#clean mask
pred_y_rf_clean = morphology.remove_small_holes(pred_y_rf==1, 500)
pred_y_rf_clean = morphology.remove_small_objects(pred_y_rf_clean, 500)
fileNameMask = "../results/predict_mask_rf_" + fps_wv_post[i].name.split('_')[0] + ".tif"
print("Writing image mask to path:", fileNameMask)
metadata = {
'driver': 'GTiff',
'dtype': 'uint8',
'width': raster_src_post.meta['width'],
'height': raster_src_post.meta['height'],
'count': 1,
'crs': raster_src_post.meta['crs'],
'transform': raster_src_post.meta['transform']
}
with rasterio.open(fileNameMask, 'w', **metadata) as dst:
dst.write(pred_y_rf_clean.astype(np.uint8), 1)
def computeSI(b1, b2):
return (b1-b2)/(b1+b2)
def changedSI(SI_pre, SI_post):
return SI_pre - SI_post
def makeChunkX(b, g, r, n, b_p, g_p, r_p, n_p):
SI_gb = (computeSI(g, b), computeSI(g_p, b_p)) #(post, pre)
SI_rb = (computeSI(r, b), computeSI(r_p, b_p))
SI_rg = (computeSI(r, g), computeSI(r_p, g_p))
SI_nb = (computeSI(n, b), computeSI(n_p, b_p))
SI_ng = (computeSI(n, g), computeSI(n_p, g_p))
SI_nr = (computeSI(n, r), computeSI(n_p, r_p))
dSI_gb = changedSI(SI_gb[1], SI_gb[0])
dSI_rb = changedSI(SI_rb[1], SI_rb[0])
dSI_rg = changedSI(SI_rg[1], SI_rg[0])
dSI_nb = changedSI(SI_nb[1], SI_nb[0])
dSI_ng = changedSI(SI_ng[1], SI_ng[0])
dSI_nr = changedSI(SI_nr[1], SI_nr[0])
return np.dstack((b, b_p, g, g_p, r, r_p, n, n_p,
SI_gb[0], SI_rb[0], SI_rg[0], SI_nb[0], SI_ng[0], SI_nr[0],
SI_gb[1], SI_rb[1], SI_rg[1], SI_nb[1], SI_ng[1], SI_nr[1],
dSI_nb, dSI_rg, dSI_rb, dSI_gb, dSI_nr, dSI_ng))[0]
"""
| 39.165775 | 155 | 0.63722 | from wildfireassessment.ops import * #my package
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from skimage import morphology
from skimage.transform import resize
import pandas as pd
import geopandas as gpd
import pickle
from sklearn.impute import SimpleImputer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn import linear_model
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score, f1_score
from sklearn.externals import joblib
from rasterstats import zonal_stats
import fiona
from joblib import Parallel, delayed
import multiprocessing
import time
def labelPburns():
print("Reading filepaths...")
filepath_mask = Path("./results")
fps_masks = sorted(list(filepath_mask.glob("predict*.tif")))
print("Going through all shapefiles...")
pathShapeFiles = Path("./data/shapefiles/trainingData")
for i in range(len(fps_masks)-2):
nameID = fps_masks[i].name.split('_')[3].replace('.tif', '')
fileShapes = sorted(list(pathShapeFiles.glob("*"+ nameID +"*.shp")))
print("reading raster")
raster_src_mask, mask = readOneImg(fps_masks[i])
print("reading shapefiles for", nameID)
for fileShape in fileShapes:
gdf = gpd.read_file(fileShape)
gdfcrs = gdf.crs
prediction_segs = zonal_stats(gdf['geometry'], mask, affine=raster_src_mask.meta['transform'], stats='majority', nodata=-999, all_touched=True)
df = pd.DataFrame(prediction_segs)
gdf['pred_burn'] = df.to_numpy()
gdf = gdf.astype({"pred_burn": int})
gdf = gpd.GeoDataFrame(gdf, crs=gdfcrs)
newFileName = fileShape.parent / Path(fileShape.name.split('.shp')[0] + "_pburn.shp")
print("writing to path...", newFileName)
gdf.to_file(newFileName)
"""
def writeRasters():
#read in filepaths for data
print("Reading filepaths...")
filepath_post = Path("./data/Paradise/post")
filepath_pre = Path("./data/Paradise/pre")
#WorldView Post/Pre
fps_wv_post = sorted(list(filepath_post.glob("2*_clip.tif")))
fps_wv_pre = sorted(list(filepath_pre.glob("2*_clip.tif")))
#WorldView Post/Pre
fps_sent2_post = sorted(list((filepath_post / "clippedB08s").glob("B08_*.tif")))
fps_sent2_pre = sorted(list((filepath_pre / "clippedB08s").glob("B08_*.tif")))
print("Loading Model")
#LOAD model
rf_model = joblib.load(open("models/rf_grid_bin_precision.pkl", 'rb'))
print("Start reading images")
for i in range(len(fps_wv_post)):
print("Reading RGB...")
raster_src_post, rgb_post = readRGBImg(fps_wv_post[i])
raster_src_pre, rgb_pre = readRGBImg(fps_wv_pre[i])
print("Reading S2 B8...")
raster_src_post_b08, b08_post = readOneImg(fps_sent2_post[i])
raster_src_pre_b08, b08_pre = readOneImg(fps_sent2_pre[i])
print("Resizing B8 images")
b08_upscaled_post = resize(b08_post, raster_src_post.shape, anti_aliasing=True)
b08_upscaled_post = b08_upscaled_post * 255
b08_upscaled_post = b08_upscaled_post.astype(rasterio.uint8)
b08_upscaled_pre = resize(b08_pre, raster_src_pre.shape, anti_aliasing=True)
b08_upscaled_pre = b08_upscaled_pre * 255
b08_upscaled_pre = b08_upscaled_pre.astype(rasterio.uint8)
print("unravel rgb, b08")
#unravel
rgb_rav_post = {0 : rgb_post[:,:,0].ravel().astype(float),
1 : rgb_post[:,:,1].ravel().astype(float),
2 : rgb_post[:,:,2].ravel().astype(float)}
rgb_rav_pre = {0 : rgb_pre[:,:,0].ravel().astype(float),
1 : rgb_pre[:,:,1].ravel().astype(float),
2 : rgb_pre[:,:,2].ravel().astype(float)}
b08_rav_post = b08_upscaled_post.ravel().astype(float)
b08_rav_pre = b08_upscaled_pre.ravel().astype(float)
#release mem
b08_upscaled_post = None
b08_upscaled_pre = None
b08_post = None
b08_pre = None
rgb_pre = None
rgb_post = None
print("starting predictions with model")
def processInParallel(i):
X_chunk = makeChunkX(rgb_rav_post[2][i:i+100], rgb_rav_post[1][i:i+100], rgb_rav_post[0][i:i+100], b08_rav_post[i:i+100],
rgb_rav_pre[2][i:i+100], rgb_rav_pre[1][i:i+100], rgb_rav_pre[0][i:i+100], b08_rav_pre[i:i+100])
#impute by mean for missing values
imp = SimpleImputer(missing_values=np.nan, strategy='mean')
imp.fit(X_chunk)
X_chunk_imp = imp.transform(X_chunk)
return rf_model.predict(X_chunk_imp)
start_time = time.time()
num_cores = multiprocessing.cpu_count()
pred_y = Parallel(n_jobs=num_cores, backend="multiprocessing")(delayed(processInParallel)(i) for i in range(0, len(b08_rav_post), 100))
print("--- %s seconds ---" % (time.time() - start_time))
print("Create mask")
#create mask
pred_y_rf = np.hstack(pred_y).reshape(raster_src_post.shape)
#clean mask
pred_y_rf_clean = morphology.remove_small_holes(pred_y_rf==1, 500)
pred_y_rf_clean = morphology.remove_small_objects(pred_y_rf_clean, 500)
fileNameMask = "../results/predict_mask_rf_" + fps_wv_post[i].name.split('_')[0] + ".tif"
print("Writing image mask to path:", fileNameMask)
metadata = {
'driver': 'GTiff',
'dtype': 'uint8',
'width': raster_src_post.meta['width'],
'height': raster_src_post.meta['height'],
'count': 1,
'crs': raster_src_post.meta['crs'],
'transform': raster_src_post.meta['transform']
}
with rasterio.open(fileNameMask, 'w', **metadata) as dst:
dst.write(pred_y_rf_clean.astype(np.uint8), 1)
def computeSI(b1, b2):
return (b1-b2)/(b1+b2)
def changedSI(SI_pre, SI_post):
return SI_pre - SI_post
def makeChunkX(b, g, r, n, b_p, g_p, r_p, n_p):
SI_gb = (computeSI(g, b), computeSI(g_p, b_p)) #(post, pre)
SI_rb = (computeSI(r, b), computeSI(r_p, b_p))
SI_rg = (computeSI(r, g), computeSI(r_p, g_p))
SI_nb = (computeSI(n, b), computeSI(n_p, b_p))
SI_ng = (computeSI(n, g), computeSI(n_p, g_p))
SI_nr = (computeSI(n, r), computeSI(n_p, r_p))
dSI_gb = changedSI(SI_gb[1], SI_gb[0])
dSI_rb = changedSI(SI_rb[1], SI_rb[0])
dSI_rg = changedSI(SI_rg[1], SI_rg[0])
dSI_nb = changedSI(SI_nb[1], SI_nb[0])
dSI_ng = changedSI(SI_ng[1], SI_ng[0])
dSI_nr = changedSI(SI_nr[1], SI_nr[0])
return np.dstack((b, b_p, g, g_p, r, r_p, n, n_p,
SI_gb[0], SI_rb[0], SI_rg[0], SI_nb[0], SI_ng[0], SI_nr[0],
SI_gb[1], SI_rb[1], SI_rg[1], SI_nb[1], SI_ng[1], SI_nr[1],
dSI_nb, dSI_rg, dSI_rb, dSI_gb, dSI_nr, dSI_ng))[0]
"""
| 1,224 | 0 | 23 |
ca77a34e6b30cd8393feee1458421f616d04bc53 | 186 | py | Python | PyAirwave/__init__.py | zhuyuehui1993/PyAirwave | 690a290a3e6ef8126c1a16d6d5ac37907cdc3072 | [
"MIT"
] | 1 | 2021-11-08T02:26:17.000Z | 2021-11-08T02:26:17.000Z | PyAirwave/__init__.py | zhuyuehui1993/PyAirwave | 690a290a3e6ef8126c1a16d6d5ac37907cdc3072 | [
"MIT"
] | null | null | null | PyAirwave/__init__.py | zhuyuehui1993/PyAirwave | 690a290a3e6ef8126c1a16d6d5ac37907cdc3072 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# !/usr/bin/env python3
"""
@author: zhuyuehui
@contact: zhuyuehui02@meituan.com
@time: 2021/11/6 12:57 下午
"""
name = "PyAirwave"
from .PyAirwave import AirWave
| 16.909091 | 33 | 0.672043 | # -*- coding: utf-8 -*-
# !/usr/bin/env python3
"""
@author: zhuyuehui
@contact: zhuyuehui02@meituan.com
@time: 2021/11/6 12:57 下午
"""
name = "PyAirwave"
from .PyAirwave import AirWave
| 0 | 0 | 0 |
f3cb6bd49a8549eee2ca13a7669b29bcdaf92856 | 590 | py | Python | setup.py | nichdu/Adafruit_Python_CharLCD | 82f08d5f8d991666070e622e42816f5bf70c1965 | [
"MIT"
] | 2 | 2015-12-14T03:17:03.000Z | 2018-03-24T23:17:24.000Z | setup.py | nichdu/Adafruit_Python_CharLCD | 82f08d5f8d991666070e622e42816f5bf70c1965 | [
"MIT"
] | 1 | 2017-10-13T00:25:50.000Z | 2017-11-10T01:53:14.000Z | setup.py | nichdu/Adafruit_Python_CharLCD | 82f08d5f8d991666070e622e42816f5bf70c1965 | [
"MIT"
] | 1 | 2022-03-28T08:28:04.000Z | 2022-03-28T08:28:04.000Z | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(name = 'Adafruit_CharLCD',
version = '1.0.0',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Library to drive character LCD display and plate.',
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_CharLCD/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.4.0'],
install_requires = ['Adafruit-GPIO>=0.4.0'],
packages = find_packages())
| 39.333333 | 114 | 0.705085 | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(name = 'Adafruit_CharLCD',
version = '1.0.0',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Library to drive character LCD display and plate.',
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_CharLCD/',
dependency_links = ['https://github.com/adafruit/Adafruit_Python_GPIO/tarball/master#egg=Adafruit-GPIO-0.4.0'],
install_requires = ['Adafruit-GPIO>=0.4.0'],
packages = find_packages())
| 0 | 0 | 0 |
1e1f613430421f25217068f4b3fd9a6953bcc0d5 | 1,177 | py | Python | projects/keypad/keypad-12.py | romilly/explorer-hat-examples | e92157d647c3ba4bc719e9255e52279e7eaa9d74 | [
"MIT"
] | 3 | 2020-09-02T14:17:13.000Z | 2022-03-18T04:18:53.000Z | projects/keypad/keypad-12.py | romilly/explorer-hat-examples | e92157d647c3ba4bc719e9255e52279e7eaa9d74 | [
"MIT"
] | null | null | null | projects/keypad/keypad-12.py | romilly/explorer-hat-examples | e92157d647c3ba4bc719e9255e52279e7eaa9d74 | [
"MIT"
] | null | null | null | import explorerhat as eh
from time import sleep
CHAR_TABLE = [['1','2','3'],['4','5','6'],['7','8','9'],['*','0','#']]
while True:
ch = decode_key(key_pressed(), CHAR_TABLE)
print('%s pressed' % ch)
wait_for_release()
sleep(0.1)
| 21.4 | 70 | 0.542906 | import explorerhat as eh
from time import sleep
CHAR_TABLE = [['1','2','3'],['4','5','6'],['7','8','9'],['*','0','#']]
def prepare_column(column):
for each_col in range(3):
if each_col == column:
eh.output[each_col].off() # so it's at 5v
else:
eh.output[each_col].on() # so it's at 0v
def all_columns_on():
for col in range(3):
eh.output[col].on()
def check_for_key():
while True:
for col in range(3):
prepare_column(col)
for row in range(4):
sleep(0.01)
if eh.input[row].read():
all_columns_on()
return True, row, col
all_columns_on()
return False, 0, 0
def key_pressed():
pressed, row, col = False, 0, 0
while not pressed:
pressed, row, col = check_for_key()
return pressed, row, col
def wait_for_release():
while check_for_key()[0]:
sleep(0.1)
def decode_key(prc,table):
p, row, col = prc
return table[row][col]
while True:
ch = decode_key(key_pressed(), CHAR_TABLE)
print('%s pressed' % ch)
wait_for_release()
sleep(0.1)
| 785 | 0 | 138 |
df07f31c61d1cff722dac100df336a6fb4a46aea | 479 | py | Python | data/convertCSV2JSONreizigerskilometers.py | LinseyUvA/Programmeerproject | aee51d2a77559fd5f7754fc9aad020f9949b1a68 | [
"Apache-2.0"
] | null | null | null | data/convertCSV2JSONreizigerskilometers.py | LinseyUvA/Programmeerproject | aee51d2a77559fd5f7754fc9aad020f9949b1a68 | [
"Apache-2.0"
] | null | null | null | data/convertCSV2JSONreizigerskilometers.py | LinseyUvA/Programmeerproject | aee51d2a77559fd5f7754fc9aad020f9949b1a68 | [
"Apache-2.0"
] | null | null | null | # Name: Linsey Schaap
# Student number: 11036109
"""
This script convert a csv file into a JSON format.
"""
import csv
import json
csvbestand = open("reizigerskilometers.csv", "r")
jsonbestand = open("reizigerskilometers.json", "w")
namen = ("Vervoerswijze", "Periode", "Provincie", "Afstand")
bestand = csv.DictReader(csvbestand, namen)
# Parse the CSV into JSON
out = json.dumps( [ regel for regel in bestand ] )
# Save the JSON
jsonbestand.write('{"data": ' + out + '}')
| 22.809524 | 60 | 0.699374 | # Name: Linsey Schaap
# Student number: 11036109
"""
This script convert a csv file into a JSON format.
"""
import csv
import json
csvbestand = open("reizigerskilometers.csv", "r")
jsonbestand = open("reizigerskilometers.json", "w")
namen = ("Vervoerswijze", "Periode", "Provincie", "Afstand")
bestand = csv.DictReader(csvbestand, namen)
# Parse the CSV into JSON
out = json.dumps( [ regel for regel in bestand ] )
# Save the JSON
jsonbestand.write('{"data": ' + out + '}')
| 0 | 0 | 0 |
22f6324caacb20f0fb373f5a7cde45e49be2eba4 | 88 | py | Python | src/tests/__init__.py | Arseny-Tokmancev/channels-watchbot | 102edc07c9d8c306f47b6a5b8318fa0ba56534f0 | [
"MIT"
] | 1 | 2020-11-10T22:50:14.000Z | 2020-11-10T22:50:14.000Z | src/tests/__init__.py | Arseny-Tokmancev/channels-watchbot | 102edc07c9d8c306f47b6a5b8318fa0ba56534f0 | [
"MIT"
] | null | null | null | src/tests/__init__.py | Arseny-Tokmancev/channels-watchbot | 102edc07c9d8c306f47b6a5b8318fa0ba56534f0 | [
"MIT"
] | 1 | 2022-01-31T19:23:03.000Z | 2022-01-31T19:23:03.000Z | from . import data, update_handlers | 17.6 | 35 | 0.681818 | from . import data, update_handlers
def run():
data.run()
update_handlers.run() | 30 | 0 | 23 |
1a829d5a0111c4cb59be4b92c77947f0fff833c0 | 892 | py | Python | tests/support/test_variant.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 532 | 2017-02-01T19:23:28.000Z | 2022-03-29T09:57:39.000Z | tests/support/test_variant.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 1,639 | 2017-02-01T19:22:04.000Z | 2022-03-31T17:26:40.000Z | tests/support/test_variant.py | bdraco/pyatv | 9541d21e6101c60866d832626be97bf962774cd5 | [
"MIT"
] | 102 | 2017-02-02T01:42:13.000Z | 2022-02-26T08:49:34.000Z | """Unit tests for pyatv.protocols.mrp.variant."""
import pytest
from pyatv.support.variant import read_variant, write_variant
| 24.777778 | 61 | 0.700673 | """Unit tests for pyatv.protocols.mrp.variant."""
import pytest
from pyatv.support.variant import read_variant, write_variant
def test_read_single_byte():
assert read_variant(b"\x00")[0] == 0x00
assert read_variant(b"\x35")[0] == 0x35
def test_read_multiple_bytes():
assert read_variant(b"\xb5\x44")[0] == 8757
assert read_variant(b"\xc5\x92\x01")[0] == 18757
def test_read_and_return_remaining_data():
value, remaining = read_variant(b"\xb5\x44\xca\xfe")
assert value == 8757
assert remaining == b"\xca\xfe"
def test_read_invalid_variant():
with pytest.raises(Exception):
read_variant(b"\x80")
def test_write_single_byte():
assert write_variant(0x00) == b"\x00"
assert write_variant(0x35) == b"\x35"
def test_write_multiple_bytes():
assert write_variant(8757) == b"\xb5\x44"
assert write_variant(18757) == b"\xc5\x92\x01"
| 621 | 0 | 138 |
63aad93e1350667c6ea3104ec3e6a0686257c3aa | 11,263 | py | Python | pet_mk_viii/pet_potentiometer_node.py | Pet-Series/Pet-Mk-VII | 4f14d8af46d6a3f4d9838028a6ac0d3c37695ab2 | [
"MIT"
] | null | null | null | pet_mk_viii/pet_potentiometer_node.py | Pet-Series/Pet-Mk-VII | 4f14d8af46d6a3f4d9838028a6ac0d3c37695ab2 | [
"MIT"
] | 1 | 2022-03-30T20:40:19.000Z | 2022-03-30T20:40:19.000Z | pet_mk_viii/pet_potentiometer_node.py | Pet-Series/Pet-Mk-VIII | 4f14d8af46d6a3f4d9838028a6ac0d3c37695ab2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3'
# coding = utf-8
########################################################################################
##
## Maintainer: stefan.kull@gmail.com
## Inspired by: https://github.com/somervda/ourbotmanager_ros.git
##
## Input: Analog potentiometer 1 + 2 + 3 (+4 )
## Output: micro-ROS node (ROS2) that publish topic /cmd_vel with msg.type twist_stamped
## Angular = X-axis = Pull stick Left/Right
## Linear = Y-axis = Pull stick Up/Down
## Twist = Z-axis = Turn/Twist stick (Not used right now)
##
## Behaviour:
## 1) Once: Read/Set all the parameters
## 2) Repeatedly: Read analog joystick via ADC
## 3) Repeatedly: Transform indata to a +/-100% values
## 4) Repeatedly: Map where the stick are => Depending om location, then adjust behivaiur.
## 5) Repeatedly: Publish ros-topic
##
## Prerequisite:
## $ sudo apt install i2c-tools
## $ sudo apt install python3-pip
## $ sudo pip3 install smbus2
## $ sudo pip3 install adafruit-ads1x15
## $ sudo i2cdetect -y 1
## $ sudo chmod a+rw /dev/i2c-1
##
## Hardware: KY-053 Analog Digital Converter (ADS1115, 16-bit) via default I2C adr.=0x48
## Hardware: Joystick with analog 10K resistors for X, Y and Z
## Host: Raspberry Pi 4(Ubuntu) via I2C
##
## Launch sequence:
## 1) $ ros2 run pet_mk_viii_joystick pet_potentiometer_node.py
##
# TODO: Get rid of time.sleep() with something more real time/concurrent and ROS2 friendly way of wait...
# Import the ROS2-stuff
import rclpy # TODO: IS this line neccesary. Due to the two following lines that importing "Node" and "Parameter"
from rclpy.node import Node
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterDescriptor
from std_msgs.msg import Int32
# Import the Ubuntu/Linux-hardware stuff
from smbus2 import SMBus
import Adafruit_ADS1x15
#from gpiozero import LED
# Import the common Ubuntu/Linux stuff
import sys
import time
import signal
class PotentiometerPublisher(Node):
'''
Analog potentiometer class
Read analog input -> Publish on ROS-topic
'''
# Keep track of last joystick values. Used due to reducing communication of equal values.
last_value_p0 = 0
last_value_p1 = 0
last_value_p2 = 0
last_value_p3 = 0
if __name__ == "__main__":
main()
| 46.159836 | 206 | 0.648584 | #!/usr/bin/env python3'
# coding = utf-8
########################################################################################
##
## Maintainer: stefan.kull@gmail.com
## Inspired by: https://github.com/somervda/ourbotmanager_ros.git
##
## Input: Analog potentiometer 1 + 2 + 3 (+4 )
## Output: micro-ROS node (ROS2) that publish topic /cmd_vel with msg.type twist_stamped
## Angular = X-axis = Pull stick Left/Right
## Linear = Y-axis = Pull stick Up/Down
## Twist = Z-axis = Turn/Twist stick (Not used right now)
##
## Behaviour:
## 1) Once: Read/Set all the parameters
## 2) Repeatedly: Read analog joystick via ADC
## 3) Repeatedly: Transform indata to a +/-100% values
## 4) Repeatedly: Map where the stick are => Depending om location, then adjust behivaiur.
## 5) Repeatedly: Publish ros-topic
##
## Prerequisite:
## $ sudo apt install i2c-tools
## $ sudo apt install python3-pip
## $ sudo pip3 install smbus2
## $ sudo pip3 install adafruit-ads1x15
## $ sudo i2cdetect -y 1
## $ sudo chmod a+rw /dev/i2c-1
##
## Hardware: KY-053 Analog Digital Converter (ADS1115, 16-bit) via default I2C adr.=0x48
## Hardware: Joystick with analog 10K resistors for X, Y and Z
## Host: Raspberry Pi 4(Ubuntu) via I2C
##
## Launch sequence:
## 1) $ ros2 run pet_mk_viii_joystick pet_potentiometer_node.py
##
# TODO: Get rid of time.sleep() with something more real time/concurrent and ROS2 friendly way of wait...
# Import the ROS2-stuff
import rclpy # TODO: IS this line neccesary. Due to the two following lines that importing "Node" and "Parameter"
from rclpy.node import Node
from rclpy.parameter import Parameter
from rcl_interfaces.msg import ParameterDescriptor
from std_msgs.msg import Int32
# Import the Ubuntu/Linux-hardware stuff
from smbus2 import SMBus
import Adafruit_ADS1x15
#from gpiozero import LED
# Import the common Ubuntu/Linux stuff
import sys
import time
import signal
class PotentiometerPublisher(Node):
'''
Analog potentiometer class
Read analog input -> Publish on ROS-topic
'''
# Keep track of last joystick values. Used due to reducing communication of equal values.
last_value_p0 = 0
last_value_p1 = 0
last_value_p2 = 0
last_value_p3 = 0
def __init__(self):
super().__init__("PotentiometerPublisher_node")
# Set default topic-name for publishing. Accessed via ROS Parameters...
self.declare_parameter( 'ros_topic_p0', 'potentiometer_p0', ParameterDescriptor(description='ROS-topc name. Publish posotion of potentiometer [default "potentiometer_p0"]') )
self.ROS_TOPIC_P0 = self.get_parameter('ros_topic_p0').get_parameter_value().string_value
self.declare_parameter( 'ros_topic_p1', 'potentiometer_p1', ParameterDescriptor(description='ROS-topc name. Publish posotion of potentiometer [default "potentiometer_p1"]') )
self.ROS_TOPIC_P1 = self.get_parameter('ros_topic_p1').get_parameter_value().string_value
self.declare_parameter( 'ros_topic_p2', 'potentiometer_p2', ParameterDescriptor(description='ROS-topc name. Publish posotion of potentiometer [default "potentiometer_p2"]') )
self.ROS_TOPIC_P2 = self.get_parameter('ros_topic_p2').get_parameter_value().string_value
self.declare_parameter( 'ros_topic_p3', 'potentiometer_p3', ParameterDescriptor(description='ROS-topc name. Publish posotion of potentiometer [default "potentiometer_p3"]') )
self.ROS_TOPIC_P3 = self.get_parameter('ros_topic_p3').get_parameter_value().string_value
# Set default ADC-I2C address. Accessed via ROS Parameters...
self.declare_parameter( 'adc_i2c_address', "0x49", ParameterDescriptor(description='ADC I2C address [default "0x49"]') )
self.ADC_I2C_ADDRESS = self.get_parameter('adc_i2c_address').get_parameter_value().string_value
# Scale factor for each potentiometer. Accessed via ROS Parameters...
self.declare_parameter( 'scale_p0', 1, ParameterDescriptor(description='Scale factor 0->1000 value [default=1]') )
self.SCALE_P0 = self.get_parameter( 'scale_p0' ).value
self.declare_parameter( 'scale_p1', 1, ParameterDescriptor(description='Scale factor 0->1000 value [default=1]') )
self.SCALE_P1 = self.get_parameter( 'scale_p1' ).value
self.declare_parameter( 'scale_p2', 1, ParameterDescriptor(description='Scale factor 0->1000 value [default=1]') )
self.SCALE_P2 = self.get_parameter( 'scale_p2' ).value
self.declare_parameter( 'scale_p3', 1, ParameterDescriptor(description='Scale factor 0->1000 value [default=1]') )
self.SCALE_P3 = self.get_parameter( 'scale_p3' ).value
# Granularity is the values stepsize between. Accessed via ROS Parameters...
self.declare_parameter( 'granularity', 5, ParameterDescriptor(description='Joystick value step-size [default 5].') )
self.GRANULARITY = self.get_parameter( 'granularity' ).value
# Cycle time - How often to read joystick position via A/D converter
self.declare_parameter( 'cycle_timer', 0.1, ParameterDescriptor(description='Cycle time how often to read joystick position [default 0.1 sec(10Hz)]') )
self.CYCLE_TIMER = self.get_parameter( 'cycle_timer' ).value
# Republish every n number of cycles to make sure base got last value
self.declare_parameter( 'cycles_publish', 50, ParameterDescriptor(description='Number of cycles/timer before publish [default 50]') )
self.CYCLES_COUNTER = self.get_parameter( 'cycles_publish' ).value
self.republish_counter = self.CYCLES_COUNTER
exit = False
# Check we can open the analog/digitala converter, ads1115, via I2C-interface.
try:
self.adc = Adafruit_ADS1x15.ADS1115( busnum=1, address=int(self.ADC_I2C_ADDRESS,0) ) # "0" works for both Dec and Hex strings...
# Create topic publisher
self.pub_p0 = self.create_publisher(Int32, self.ROS_TOPIC_P0 ,10)
self.pub_p1 = self.create_publisher(Int32, self.ROS_TOPIC_P1 ,10)
self.pub_p2 = self.create_publisher(Int32, self.ROS_TOPIC_P2 ,10)
self.pub_p3 = self.create_publisher(Int32, self.ROS_TOPIC_P3 ,10)
# Set cycle time (Hz) how often to read the joystick position.
self.joy_timer = self.create_timer(self.CYCLE_TIMER , self.process_potentiometers)
# TODO: self.rosRunLED.on()
# Some basic information on the console
self.get_logger().info("PotentiometerPublisher_node has started")
self.get_logger().info("- A/D: " + self.ADC_I2C_ADDRESS + ", P0-chn=" + self.ROS_TOPIC_P0 + ", P1-chn=" + self.ROS_TOPIC_P1 + ", P2-chn=" + self.ROS_TOPIC_P2 + ", P3-chn=" + self.ROS_TOPIC_P3)
self.get_logger().info("- A/D sampling: " + str(100 *self.CYCLE_TIMER) +"Hz")
except:
# Note: a permission error can be fixed with a "sudo chmod a+rw /dev/i2c-1"
self.get_logger().error("During PotentiometerPublisher_node initialization😒" + str(sys.exc_info()[1]) )
self.exit = True
#########################################################################################################
# 1) Initiate & assign start values for the ros-msg.
# 2) Publish a first time
self.msg_p0 = Int32()
self.msg_p0.data = 0
self.msg_p1 = Int32()
self.msg_p1.data = 0
self.msg_p2 = Int32()
self.msg_p2.data = 0
self.msg_p3 = Int32()
self.msg_p3.data = 0
def process_potentiometers(self):
# Read values from potetinometers P0, P1, P2 and P3
# With gain=1 the "raw"-value(1...26400) and middle 13200
# With gain=2/3 the "raw"-value(1...17600) and middle 8900
# My ads1115 needed a little sleep between measuerments to settle on a good value
p0_raw= self.adc.read_adc(0, gain=1, data_rate=128) # ADS1115 channel 0.
time.sleep(0.01)
p1_raw= self.adc.read_adc(1, gain=1, data_rate=128) # ADS1115 channel 1
time.sleep(0.01)
p2_raw= self.adc.read_adc(2, gain=1, data_rate=128) # ADS1115 channel 2
time.sleep(0.01)
p3_raw= self.adc.read_adc(3, gain=1, data_rate=128) # ADS1115 channel 3
time.sleep(0.01)
# self.get_logger().info("Raw: P0=" + str(p0_raw) + " P1=" + str(p1_raw) + " P2=" + str(p2_raw))
# Convert to a value bettween -100 and +100 and number of distict values set by the granularity
value_p0 = ( round((round(p0_raw/26.410) * self.SCALE_P0 ) / self.GRANULARITY)) * self.GRANULARITY
value_p1 = ( round((round(p1_raw/26.410) * self.SCALE_P1 ) / self.GRANULARITY)) * self.GRANULARITY
value_p2 = ( round((round(p2_raw/26.410) * self.SCALE_P2 ) / self.GRANULARITY)) * self.GRANULARITY
value_p3 = ( round((round(p3_raw/26.410) * self.SCALE_P3 ) / self.GRANULARITY)) * self.GRANULARITY
# Only output a twist message when the joystick values change,
# If nothing published for N loops/iterations - Then send the last value again.
doPublish = False
self.republish_counter -= 1
if (self.last_value_p0 != value_p0):
# Change in P0 value
self.get_logger().info("-----Publish P0!")
self.msg_p0.data = value_p0
doPublish = True
if (self.last_value_p1 != value_p1):
# Change in P1 value
self.get_logger().info("-----Publish P1!")
self.msg_p1.data = value_p1
doPublish = True
if (self.last_value_p2 != value_p2):
# Change in P2 value
self.get_logger().info("-----Publish P2!")
self.msg_p2.data = value_p2
doPublish = True
if (self.last_value_p3 != value_p3):
# Change in P3 value
self.get_logger().info("-----Publish P3!")
self.msg_p3.data = value_p3
doPublish = True
if doPublish or self.republish_counter == 0:
self.get_logger().info("Raw : P0=" + str(p0_raw) + " P1=" + str(p1_raw) + " P2=" + str(p2_raw) + " P3=" + str(p3_raw))
self.get_logger().info("Value: P0=" + str(value_p0) + " P1=" + str(value_p1) + " P2=" + str(value_p2) + " P3=" + str(value_p3) )
self.pub_p0.publish(self.msg_p0)
self.pub_p1.publish(self.msg_p1)
self.pub_p2.publish(self.msg_p2)
self.pub_p3.publish(self.msg_p3)
self.republish_counter = self.CYCLES_COUNTER # Restart the counter
# Save current value, so we can see if the stick have been moved during next lap.
self.last_value_p0 = value_p0
self.last_value_p1 = value_p1
self.last_value_p2 = value_p2
self.last_value_p3 = value_p3
def main(args=None):
rclpy.init(args=args)
node = PotentiometerPublisher()
try:
rclpy.spin(node)
except KeyboardInterrupt:
print("**** * 💀 Ctrl-C detected...")
finally:
print("**** 🪦 joystick_node ending... " + str(sys.exc_info()[1]) )
# Time to clean up stuff!
rclpy.shutdown()
if __name__ == "__main__":
main()
| 8,909 | 0 | 99 |
e294e0268e0b169ee1d20bb1135f8707589d4ce5 | 171 | py | Python | examples/timed_set/config.py | fredstro/mrq | eec5dfb425c765afa1ab5b41ca1e6f76869a6726 | [
"MIT"
] | 745 | 2015-01-02T06:54:37.000Z | 2022-03-27T13:23:33.000Z | examples/timed_set/config.py | fredstro/mrq | eec5dfb425c765afa1ab5b41ca1e6f76869a6726 | [
"MIT"
] | 175 | 2015-01-01T20:46:08.000Z | 2022-01-24T09:40:55.000Z | examples/timed_set/config.py | fredstro/mrq | eec5dfb425c765afa1ab5b41ca1e6f76869a6726 | [
"MIT"
] | 143 | 2015-01-06T06:55:26.000Z | 2021-09-13T19:47:12.000Z | RAW_QUEUES = {
"example_timed_set": {
"job_factory": lambda rawparam: {
"path": "example.Print",
"params": {
"test": rawparam
}
}
}
} | 17.1 | 37 | 0.502924 | RAW_QUEUES = {
"example_timed_set": {
"job_factory": lambda rawparam: {
"path": "example.Print",
"params": {
"test": rawparam
}
}
}
} | 0 | 0 | 0 |
32b8c3e29d61e4ef123b1d590e376a697da3489d | 24,380 | py | Python | dmriprep/data.py | akeshavan/dmriprep | c2829fff51e499ea68e5416f336a0bc53bae10e0 | [
"BSD-3-Clause"
] | null | null | null | dmriprep/data.py | akeshavan/dmriprep | c2829fff51e499ea68e5416f336a0bc53bae10e0 | [
"BSD-3-Clause"
] | 1 | 2018-12-07T13:45:33.000Z | 2018-12-07T13:45:33.000Z | dmriprep/data.py | akeshavan/dmriprep | c2829fff51e499ea68e5416f336a0bc53bae10e0 | [
"BSD-3-Clause"
] | null | null | null | """
Functions to download example data from public repositories.
"""
from .base import InputFiles, InputFilesWithSession
import os
import os.path as op
from pathlib import Path
def get_s3_register(subject_id, site, raw_keys, deriv_keys):
"""Get the S3 keys for a single subject's input files
Parameters
----------
subject_id : string
Subject ID on which to filter the s3 keys
site : string
Site ID from which to collect raw data
raw_keys : sequence
Sequence of raw data s3 keys to filter
deriv_keys : sequence
Sequence of derivative data s3 keys to filter
Returns
-------
InputFiles namedtuple
If all prerequisite s3 keys are present, return a namedtuple of
s3 keys. Otherwise, use the default None values.
"""
# Get only the s3 keys corresponding to this subject_id
sub_dwi_files = [k for k in raw_keys if subject_id in k and '/dwi/' in k]
sub_fmap_files = [k for k in raw_keys if subject_id in k and '/fmap/' in k]
sub_deriv_files = [k for k in deriv_keys if subject_id in k]
# Get the dwi files, bvec files, and bval files
dwi = [f for f in sub_dwi_files
if f.endswith('.nii.gz') and 'TRACEW' not in f]
bvec = [f for f in sub_dwi_files if f.endswith('.bvec')]
bval = [f for f in sub_dwi_files if f.endswith('.bval')]
epi_nii = [f for f in sub_fmap_files if f.endswith('epi.nii.gz')
and 'fMRI' not in f]
epi_json = [f for f in sub_fmap_files if f.endswith('epi.json')
and 'fMRI' not in f]
t1w = [f for f in sub_deriv_files if f.endswith('/T1w.nii.gz')]
freesurfer = [f for f in sub_deriv_files
if '/freesurfer/' in f]
# Use truthiness of non-empty lists to verify that all
# of the required prereq files exist in `s3_keys`
# TODO: If some of the files are missing, look farther up in the directory
# TODO: structure to see if there are files we should inherit
if all([dwi, bval, bvec, epi_nii, epi_json, t1w, freesurfer]):
return InputFiles(
subject=subject_id,
site=site,
valid=True,
files=dict(
dwi=dwi,
bvec=bvec,
bval=bval,
epi_nii=epi_nii,
epi_json=epi_json,
freesurfer=freesurfer,
t1w=t1w,
),
file_type='s3'
)
else:
return InputFiles(
subject=subject_id,
site=site,
valid=False,
files=None,
file_type='s3'
)
def get_s3_keys(prefix, s3_client, bucket='fcp-indi'):
"""Retrieve all keys in an S3 bucket that match the prefix and site ID
Parameters
----------
prefix : string
S3 prefix designating the S3 "directory" in which to search.
Do not include the site ID in the prefix.
s3_client : boto3 client object
from the get_s3_client() function
bucket : string
AWS S3 bucket in which to search
Returns
-------
list
All the keys matching the prefix and site in the S3 bucket
"""
# Avoid duplicate trailing slash in prefix
prefix = prefix.rstrip('/')
response = s3_client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
)
try:
keys = [d['Key'] for d in response.get('Contents')]
except TypeError:
raise ValueError(
'There are no subject files in the S3 bucket with prefix '
'{pfix:s}'.format(pfix=prefix)
)
while response['IsTruncated']:
response = s3_client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
ContinuationToken=response['NextContinuationToken']
)
keys += [d['Key'] for d in response.get('Contents')]
return keys
def keys_to_subject_register(keys, prefix, site):
"""Filter S3 keys based on data availability and return
Parameters
----------
keys : sequence
sequence of S3 keys
prefix : string
S3 prefix designating the S3 "directory" in which to search.
Do not include the site ID in the prefix.
site : string
Site ID from which to collect raw data
Returns
-------
list
List of `InputFiles` namedtuples for each valid subject
"""
deriv_keys = [
k for k in keys
if k.startswith(prefix + '/' + site + '/derivatives/sub-')
]
raw_keys = [
k for k in keys
if k.startswith(prefix + '/' + site + '/sub-')
]
subs_with_dwi = {
get_subject_id(k) for k in raw_keys
if '/dwi/' in k
}
subs_with_epi_nii = {
get_subject_id(k) for k in raw_keys
if (
k.endswith('epi.nii.gz')
and '/fmap/' in k
and 'fMRI' not in k
)
}
subs_with_epi_json = {
get_subject_id(k) for k in raw_keys
if (
k.endswith('epi.json')
and '/fmap/' in k
and 'fMRI' not in k
)
}
subs_with_freesurfer = {
get_subject_id(k) for k in deriv_keys
if '/freesurfer/' in k
}
subs_with_t1w = {
get_subject_id(k) for k in deriv_keys
if k.endswith('T1w.nii.gz')
}
valid_subjects = (
subs_with_dwi
& subs_with_epi_nii
& subs_with_epi_json
& subs_with_freesurfer
& subs_with_t1w
)
s3_registers = [
get_s3_register(subject_id=s, site=site, raw_keys=raw_keys,
deriv_keys=deriv_keys)
for s in valid_subjects
]
s3_registers = list(filter(
lambda sub: sub.valid,
s3_registers
))
return s3_registers
def download_register(subject_keys, s3_client,
bucket='fcp-indi', directory='./input',
overwrite=False):
"""
Parameters
----------
subject_keys : InputFiles namedtuple
Input s3 keys stored in namedtuple. Must have the fields
'subject': subjectID,
'site': siteID,
'files': dictionary of S3 keys
bucket : string
S3 bucket from which to extract files
directory : string
Local directory to which to save files
overwrite : bool
Flag to overwrite existing files
Returns
-------
files : InputFiles namedtuple
Input file paths stored in namedtuple. Has the fields
'subject': subjectID,
'site' : siteID,
'valid' : True,
'files' : local file paths,
'file_type' : 'local',
"""
subject = subject_keys.subject
site = subject_keys.site
input_files = InputFiles(
subject=subject,
site=site,
valid=True,
files={
k: [op.abspath(op.join(
directory, site, p.split('/' + site + '/')[-1]
)) for p in v] for k, v in subject_keys.files.items()
},
file_type='local'
)
s3keys = subject_keys.files
files = input_files.files
for ftype in s3keys.keys():
if isinstance(s3keys[ftype], str):
download_from_s3(fname_=files[ftype],
bucket_=bucket,
key_=s3keys[ftype])
elif all(isinstance(x, str) for x in s3keys[ftype]):
for key, fname in zip(s3keys[ftype], files[ftype]):
download_from_s3(fname_=fname, bucket_=bucket, key_=key)
else:
raise TypeError(
'This subject {sub:s} has {ftype:s} S3 keys that are neither '
'strings nor a sequence of strings. The S3 keys are {keys!s}'
''.format(sub=subject, ftype=ftype, keys=s3keys[ftype])
)
return input_files
def determine_directions(input_files,
input_type='s3',
bucket=None,
metadata_source='json',
json_key='PhaseEncodingDirection',
ap_value='j-', pa_value='j'):
"""Determine direction ['AP', 'PA'] of single subject's EPI nifty files
Use either metadata in associated json file or filename
Parameters
----------
input_files : InputFiles namedtuple
The local input files for the subject
input_type : "s3" or "local", default="s3"
The location of the input files, local or on S3
bucket : string or None, default=None
S3 Bucket where the input files are located.
If input_type == 's3', then bucket must not be None
metadata_source : "json" or "filename", default="json"
If "filename," look for the direction in the filename,
otherwise, use the json file and the other parameters
json_key : string, default="PhaseEncodingDirection"
The key that stores the direction information
ap_value : string, default="j-"
Metadata value to associate with dir-AP
pa_value : string, default="j"
Metadata value to associate with dir-PA
Returns
-------
InputFiles namedtuple
An InputFiles namedtuple where all fields match the `input_files`
namedtuple except that in the `files` field, the "epi_nii" and
"epi_json" keys have been replaced with "epi_ap" and "epi_pa."
"""
if metadata_source not in ['filename', 'json']:
raise ValueError('metadata_source must be "filename" or "json".')
if input_type not in ['s3', 'local']:
raise ValueError('input_type must be "local" or "s3".')
if input_type == 's3' and bucket is None:
raise ValueError('If input_type is "s3," you must supply a bucket.')
epi_files = input_files.files['epi_nii']
json_files = input_files.files['epi_json']
if metadata_source == 'filename':
ap_files = [f for f in epi_files if 'dir-AP' in f]
pa_files = [f for f in epi_files if 'dir-PA' in f]
else:
# Confirm that each nifty file has a corresponding json file.
required_json = set([f.replace('.nii.gz', '.json') for f in epi_files])
if set(json_files) != required_json:
raise ValueError(
'There are nifty files without corresponding json files. We '
'failed to find the following expected files: {files!s}'
''.format(files=required_json - set(json_files))
)
ap_files = []
pa_files = []
for jfile in json_files:
metadata = get_json(jfile)
direction = metadata.get(json_key)
if direction == ap_value:
if 'dir-PA' in jfile:
mod_logger.warning(
'The key {key:s}={val:s} does not match the direction '
'suggested by the filename {fn:s}'.format(
key=json_key, val=direction, fn=jfile
)
)
ap_files.append(jfile.replace('.json', '.nii.gz'))
elif direction == pa_value:
if 'dir-AP' in jfile:
mod_logger.warning(
'The key {key:s}={val:s} does not match the direction '
'suggested by the filename {fn:s}'.format(
key=json_key, val=direction, fn=jfile
)
)
pa_files.append(jfile.replace('.json', '.nii.gz'))
elif direction is None:
mod_logger.warning(
'The key {key:s} does not exist in file {jfile:s}. '
'Falling back on filename to determine directionality.'
'\n\n'.format(key=json_key, jfile=jfile)
)
if 'dir-AP' in jfile:
ap_files.append(jfile.replace('.json', '.nii.gz'))
elif 'dir-PA' in jfile:
pa_files.append(jfile.replace('.json', '.nii.gz'))
else:
raise ValueError(
'The key {key:s} does not exist in file {jfile:s} and '
'the directionality could not be inferred from the '
'file name.'.format(key=json_key, jfile=jfile)
)
else:
mod_logger.warning(
'The metadata in file {jfile:s} does not match the dir-PA '
'or dir-AP values that you provided. {key:s} = {val:s}. '
'Falling back on filename to determine directionality.\n\n'
''.format(jfile=jfile, key=json_key, val=direction)
)
if 'dir-AP' in jfile:
ap_files.append(jfile.replace('.json', '.nii.gz'))
elif 'dir-PA' in jfile:
pa_files.append(jfile.replace('.json', '.nii.gz'))
else:
raise ValueError(
'The metadata for key {key:s} in file {jfile:s} does '
'not match the dir-PA or dir-AP values that you '
'provided. {key:s} = {val:s}. And the directionality '
'could not be inferred from the file name.'.format(
key=json_key,
jfile=jfile,
val=direction,
))
files = copy.deepcopy(input_files.files)
del files['epi_nii']
del files['epi_json']
files['epi_ap'] = ap_files
files['epi_pa'] = pa_files
return InputFiles(
subject=input_files.subject,
site=input_files.site,
valid=input_files.valid,
files=files,
file_type=input_files.file_type
)
def separate_sessions(input_files, multiples_policy='sessions',
assign_empty_sessions=True):
"""Separate input file register into different sessions
Parameters
----------
input_files : InputFiles namedtuple
multiples_policy : "sessions" or "concatenate"
Flag that dictates how to handle multiple files in a session.
If "sessions," treat multiples as different sessions and assign
to new session IDs. If "concatenate," concatenate multiples into
a single session
assign_empty_sessions : bool
If True, assign session IDs to files without a session ID in
their path
Returns
-------
list of InputFiles namedtuples
List of InputFiles namedtuples for each session ID.
"""
if multiples_policy not in ['sessions', 'concatenate']:
raise ValueError('`multiples_policy` must be either "sessions" or '
'"concatenate"')
# Take only the first of the T1W nifty files
if len(input_files.files['t1w']) > 1:
mod_logger.warning(
'Found more than one T1W file for subject {sub:s} at site {site:s}'
'. Discarding the others.\n\n'.format(sub=input_files.subject,
site=input_files.site)
)
t1w = input_files.files['t1w']
# Take only the first freesurfer directory
freesurfer_dirs = {
f.split('/freesurfer/')[0] for f in input_files.files['freesurfer']
}
if len(freesurfer_dirs) > 1:
mod_logger.warning(
'Found more than one freesurfer dir for subject {sub:s} at site '
'{site:s}. Discarding the others.\n\n'.format(
sub=input_files.subject, site=input_files.site
)
)
freesurfer_dir = freesurfer_dirs.pop()
freesurfer = [f for f in input_files.files['freesurfer']
if f.startswith(freesurfer_dir)]
# Organize the files by session ID
ftypes = ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']
sess_ids = {ft: {get_sess_id(fn) for fn in input_files.files[ft]}
for ft in ftypes}
if not all([s == list(sess_ids.values())[0] for s in sess_ids.values()]):
mod_logger.warning(
'Session numbers are inconsistent for subject {sub:s} at site '
'{site:s}. Sess-IDs: {sess_ids!s}.\nFiles: {files!s}\n\n'.format(
sub=input_files.subject,
site=input_files.site,
sess_ids=sess_ids,
files={k: (v) for k, v in input_files.files.items()
if k in ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']},
)
)
return [InputFilesWithSession(
subject=input_files.subject,
site=input_files.site,
session=None,
files=None,
file_type=None,
)]
# We just confirmed that all of the session ID sets are equal so we can
# pop one set of session IDs off of `sess_ids` and use it from now on
sess_ids = sess_ids[ftypes[0]]
# Collect files by session ID and then file type
files_by_session = {
sess: {
ft: [
f for f in input_files.files[ft] if get_sess_id(f) == sess
]
for ft in ftypes
}
for sess in sess_ids
}
output_files = []
# Loop over each session ID
for session, files in files_by_session.items():
# Confirm that the subject has an equal number of each type of file
n_files = {k: len(v) for k, v in files.items()
if k in ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']}
if len(set(n_files.values())) != 1:
mod_logger.warning(
'The number of files is inconsistent for subject {sub:s} at '
'site {site:s}. The file numbers are {n_files!s}\n\n'.format(
sub=input_files.subject,
site=input_files.site,
n_files=n_files
)
)
output_files.append(InputFilesWithSession(
subject=input_files.subject,
site=input_files.site,
session=None,
files=None,
file_type=None,
))
elif len(set(n_files.values())) == 1:
# There is only one set of files in this session. Append to output.
if session == 'null':
output_session = 'sess-01' if assign_empty_sessions else None
else:
output_session = session
output_files.append(InputFilesWithSession(
subject=input_files.subject,
site=input_files.site,
session=output_session,
files=dict(
dwi=input_files.files['dwi'],
bvec=input_files.files['bvec'],
bval=input_files.files['bval'],
epi_ap=input_files.files['epi_ap'],
epi_pa=input_files.files['epi_pa'],
t1w=t1w,
freesurfer=freesurfer,
),
file_type=input_files.file_type,
))
else:
# There are multiple copies of files for this one session ID.
if multiples_policy == 'concatenate':
# The multiple copies represent one session and should be
# concatenated
raise NotImplementedError('Concatenation of multiples not yet '
'implemented.')
else:
# The multiple copies represent multiple sessions and
# should be further subdivided into sessions
raise NotImplementedError('Session subdivision not yet '
'implemented.')
return output_files
def get_all_s3_registers(prefix, sites, bucket='fcp-indi'):
"""
Parameters
----------
prefix : string
S3 prefix designating the S3 "directory" in which to search.
Do not include the site ID in the prefix.
sites : sequence of strings
Site IDs from which to collect raw data
bucket : string
AWS S3 bucket in which to search
Returns
-------
dict
dict where the keys are site IDs and the values are
list of `InputFiles` namedtuples for each valid subject
at that site
"""
subjects = {}
for site in sites:
# Get all S3 keys
keys = get_s3_keys(prefix=prefix, site=site, bucket='fcp-indi')
# Get all registers (without the AP/PA directions)
regs = keys_to_subject_register(keys=keys, prefix=prefix, site=site)
# Assign the fmap files to either AP/PA
regs_pa_ap = [
determine_directions(input_files=reg,
input_type='s3',
bucket=bucket,
metadata_source='json',
json_key='PhaseEncodingDirection',
ap_value='j-', pa_value='j')
for reg in regs
]
# Separate each subject register into different sessions
regs_nested = [
separate_sessions(reg,
multiples_policy='sessions',
assign_empty_sessions=True)
for reg in regs_pa_ap
]
# But `separate_sessions` returns a list of namedtuples
# so `regs_nested` is nested and needs to be flattened
regs_flat = [item for sublist in regs_nested for item in sublist]
subjects[site] = [reg for reg in regs_flat if reg.files is not None]
return subjects
| 34.241573 | 118 | 0.555701 | """
Functions to download example data from public repositories.
"""
from .base import InputFiles, InputFilesWithSession
import os
import os.path as op
from pathlib import Path
def get_dataset(output_dir, source='HBN'):
if source in ['HBN']:
get_hbn_data(output_dir)
else:
raise ValueError('Invalid dataset source')
def get_hbn_data(output_dir):
subject = 'sub-NDARBA507GCT'
site = 'SI'
s3_client = get_s3_client()
raw_keys = get_s3_keys(prefix='data/Projects/HBN/MRI/Site-{site}/{subject}'.format(site=site, subject=subject),
s3_client=s3_client)
deriv_keys = get_s3_keys(prefix='data/Projects/HBN/MRI/Site-{site}/derivatives/{subject}'.format(site=site,
subject=subject),
s3_client=s3_client)
register = get_s3_register(subject_id=subject,
site='Site-{}'.format(site),
raw_keys=raw_keys,
deriv_keys=deriv_keys)
download_register(register, s3_client=s3_client, directory=output_dir)
# TODO: return a dict of subject ids and folder locations.
return os.path.join(output_dir, subject)
def get_s3_client():
import boto3
from botocore import UNSIGNED
from botocore.client import Config
# Global s3 client to preserve anonymous config
s3_client = boto3.client('s3', config=Config(signature_version=UNSIGNED))
return s3_client
def get_s3_register(subject_id, site, raw_keys, deriv_keys):
"""Get the S3 keys for a single subject's input files
Parameters
----------
subject_id : string
Subject ID on which to filter the s3 keys
site : string
Site ID from which to collect raw data
raw_keys : sequence
Sequence of raw data s3 keys to filter
deriv_keys : sequence
Sequence of derivative data s3 keys to filter
Returns
-------
InputFiles namedtuple
If all prerequisite s3 keys are present, return a namedtuple of
s3 keys. Otherwise, use the default None values.
"""
# Get only the s3 keys corresponding to this subject_id
sub_dwi_files = [k for k in raw_keys if subject_id in k and '/dwi/' in k]
sub_fmap_files = [k for k in raw_keys if subject_id in k and '/fmap/' in k]
sub_deriv_files = [k for k in deriv_keys if subject_id in k]
# Get the dwi files, bvec files, and bval files
dwi = [f for f in sub_dwi_files
if f.endswith('.nii.gz') and 'TRACEW' not in f]
bvec = [f for f in sub_dwi_files if f.endswith('.bvec')]
bval = [f for f in sub_dwi_files if f.endswith('.bval')]
epi_nii = [f for f in sub_fmap_files if f.endswith('epi.nii.gz')
and 'fMRI' not in f]
epi_json = [f for f in sub_fmap_files if f.endswith('epi.json')
and 'fMRI' not in f]
t1w = [f for f in sub_deriv_files if f.endswith('/T1w.nii.gz')]
freesurfer = [f for f in sub_deriv_files
if '/freesurfer/' in f]
# Use truthiness of non-empty lists to verify that all
# of the required prereq files exist in `s3_keys`
# TODO: If some of the files are missing, look farther up in the directory
# TODO: structure to see if there are files we should inherit
if all([dwi, bval, bvec, epi_nii, epi_json, t1w, freesurfer]):
return InputFiles(
subject=subject_id,
site=site,
valid=True,
files=dict(
dwi=dwi,
bvec=bvec,
bval=bval,
epi_nii=epi_nii,
epi_json=epi_json,
freesurfer=freesurfer,
t1w=t1w,
),
file_type='s3'
)
else:
return InputFiles(
subject=subject_id,
site=site,
valid=False,
files=None,
file_type='s3'
)
def get_s3_keys(prefix, s3_client, bucket='fcp-indi'):
"""Retrieve all keys in an S3 bucket that match the prefix and site ID
Parameters
----------
prefix : string
S3 prefix designating the S3 "directory" in which to search.
Do not include the site ID in the prefix.
s3_client : boto3 client object
from the get_s3_client() function
bucket : string
AWS S3 bucket in which to search
Returns
-------
list
All the keys matching the prefix and site in the S3 bucket
"""
# Avoid duplicate trailing slash in prefix
prefix = prefix.rstrip('/')
response = s3_client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
)
try:
keys = [d['Key'] for d in response.get('Contents')]
except TypeError:
raise ValueError(
'There are no subject files in the S3 bucket with prefix '
'{pfix:s}'.format(pfix=prefix)
)
while response['IsTruncated']:
response = s3_client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
ContinuationToken=response['NextContinuationToken']
)
keys += [d['Key'] for d in response.get('Contents')]
return keys
def keys_to_subject_register(keys, prefix, site):
"""Filter S3 keys based on data availability and return
Parameters
----------
keys : sequence
sequence of S3 keys
prefix : string
S3 prefix designating the S3 "directory" in which to search.
Do not include the site ID in the prefix.
site : string
Site ID from which to collect raw data
Returns
-------
list
List of `InputFiles` namedtuples for each valid subject
"""
def get_subject_id(key):
match = re.search('/sub-[0-9a-zA-Z]*/', key)
if match is not None:
return match.group().strip('/')
else:
return None
deriv_keys = [
k for k in keys
if k.startswith(prefix + '/' + site + '/derivatives/sub-')
]
raw_keys = [
k for k in keys
if k.startswith(prefix + '/' + site + '/sub-')
]
subs_with_dwi = {
get_subject_id(k) for k in raw_keys
if '/dwi/' in k
}
subs_with_epi_nii = {
get_subject_id(k) for k in raw_keys
if (
k.endswith('epi.nii.gz')
and '/fmap/' in k
and 'fMRI' not in k
)
}
subs_with_epi_json = {
get_subject_id(k) for k in raw_keys
if (
k.endswith('epi.json')
and '/fmap/' in k
and 'fMRI' not in k
)
}
subs_with_freesurfer = {
get_subject_id(k) for k in deriv_keys
if '/freesurfer/' in k
}
subs_with_t1w = {
get_subject_id(k) for k in deriv_keys
if k.endswith('T1w.nii.gz')
}
valid_subjects = (
subs_with_dwi
& subs_with_epi_nii
& subs_with_epi_json
& subs_with_freesurfer
& subs_with_t1w
)
s3_registers = [
get_s3_register(subject_id=s, site=site, raw_keys=raw_keys,
deriv_keys=deriv_keys)
for s in valid_subjects
]
s3_registers = list(filter(
lambda sub: sub.valid,
s3_registers
))
return s3_registers
def download_register(subject_keys, s3_client,
bucket='fcp-indi', directory='./input',
overwrite=False):
"""
Parameters
----------
subject_keys : InputFiles namedtuple
Input s3 keys stored in namedtuple. Must have the fields
'subject': subjectID,
'site': siteID,
'files': dictionary of S3 keys
bucket : string
S3 bucket from which to extract files
directory : string
Local directory to which to save files
overwrite : bool
Flag to overwrite existing files
Returns
-------
files : InputFiles namedtuple
Input file paths stored in namedtuple. Has the fields
'subject': subjectID,
'site' : siteID,
'valid' : True,
'files' : local file paths,
'file_type' : 'local',
"""
subject = subject_keys.subject
site = subject_keys.site
input_files = InputFiles(
subject=subject,
site=site,
valid=True,
files={
k: [op.abspath(op.join(
directory, site, p.split('/' + site + '/')[-1]
)) for p in v] for k, v in subject_keys.files.items()
},
file_type='local'
)
def download_from_s3(fname_, bucket_, key_):
# Create the directory and file if necessary
Path(op.dirname(fname_)).mkdir(parents=True, exist_ok=True)
try:
Path(fname_).touch(exist_ok=overwrite)
# Download the file
s3_client.download_file(Bucket=bucket_, Key=key_, Filename=fname_)
except FileExistsError:
pass
# TODO: add back logging
# mod_logger.info('File {fname:s} already exists. Continuing...')
s3keys = subject_keys.files
files = input_files.files
for ftype in s3keys.keys():
if isinstance(s3keys[ftype], str):
download_from_s3(fname_=files[ftype],
bucket_=bucket,
key_=s3keys[ftype])
elif all(isinstance(x, str) for x in s3keys[ftype]):
for key, fname in zip(s3keys[ftype], files[ftype]):
download_from_s3(fname_=fname, bucket_=bucket, key_=key)
else:
raise TypeError(
'This subject {sub:s} has {ftype:s} S3 keys that are neither '
'strings nor a sequence of strings. The S3 keys are {keys!s}'
''.format(sub=subject, ftype=ftype, keys=s3keys[ftype])
)
return input_files
def determine_directions(input_files,
input_type='s3',
bucket=None,
metadata_source='json',
json_key='PhaseEncodingDirection',
ap_value='j-', pa_value='j'):
"""Determine direction ['AP', 'PA'] of single subject's EPI nifty files
Use either metadata in associated json file or filename
Parameters
----------
input_files : InputFiles namedtuple
The local input files for the subject
input_type : "s3" or "local", default="s3"
The location of the input files, local or on S3
bucket : string or None, default=None
S3 Bucket where the input files are located.
If input_type == 's3', then bucket must not be None
metadata_source : "json" or "filename", default="json"
If "filename," look for the direction in the filename,
otherwise, use the json file and the other parameters
json_key : string, default="PhaseEncodingDirection"
The key that stores the direction information
ap_value : string, default="j-"
Metadata value to associate with dir-AP
pa_value : string, default="j"
Metadata value to associate with dir-PA
Returns
-------
InputFiles namedtuple
An InputFiles namedtuple where all fields match the `input_files`
namedtuple except that in the `files` field, the "epi_nii" and
"epi_json" keys have been replaced with "epi_ap" and "epi_pa."
"""
if metadata_source not in ['filename', 'json']:
raise ValueError('metadata_source must be "filename" or "json".')
if input_type not in ['s3', 'local']:
raise ValueError('input_type must be "local" or "s3".')
if input_type == 's3' and bucket is None:
raise ValueError('If input_type is "s3," you must supply a bucket.')
epi_files = input_files.files['epi_nii']
json_files = input_files.files['epi_json']
if metadata_source == 'filename':
ap_files = [f for f in epi_files if 'dir-AP' in f]
pa_files = [f for f in epi_files if 'dir-PA' in f]
else:
# Confirm that each nifty file has a corresponding json file.
required_json = set([f.replace('.nii.gz', '.json') for f in epi_files])
if set(json_files) != required_json:
raise ValueError(
'There are nifty files without corresponding json files. We '
'failed to find the following expected files: {files!s}'
''.format(files=required_json - set(json_files))
)
def get_json(json_file):
if input_type == 'local':
with open(json_file, 'r') as fp:
meta = json.load(fp)
else:
response = s3_client.get_object(
Bucket=bucket,
Key=json_file,
)
meta = json.loads(response.get('Body').read())
return meta
ap_files = []
pa_files = []
for jfile in json_files:
metadata = get_json(jfile)
direction = metadata.get(json_key)
if direction == ap_value:
if 'dir-PA' in jfile:
mod_logger.warning(
'The key {key:s}={val:s} does not match the direction '
'suggested by the filename {fn:s}'.format(
key=json_key, val=direction, fn=jfile
)
)
ap_files.append(jfile.replace('.json', '.nii.gz'))
elif direction == pa_value:
if 'dir-AP' in jfile:
mod_logger.warning(
'The key {key:s}={val:s} does not match the direction '
'suggested by the filename {fn:s}'.format(
key=json_key, val=direction, fn=jfile
)
)
pa_files.append(jfile.replace('.json', '.nii.gz'))
elif direction is None:
mod_logger.warning(
'The key {key:s} does not exist in file {jfile:s}. '
'Falling back on filename to determine directionality.'
'\n\n'.format(key=json_key, jfile=jfile)
)
if 'dir-AP' in jfile:
ap_files.append(jfile.replace('.json', '.nii.gz'))
elif 'dir-PA' in jfile:
pa_files.append(jfile.replace('.json', '.nii.gz'))
else:
raise ValueError(
'The key {key:s} does not exist in file {jfile:s} and '
'the directionality could not be inferred from the '
'file name.'.format(key=json_key, jfile=jfile)
)
else:
mod_logger.warning(
'The metadata in file {jfile:s} does not match the dir-PA '
'or dir-AP values that you provided. {key:s} = {val:s}. '
'Falling back on filename to determine directionality.\n\n'
''.format(jfile=jfile, key=json_key, val=direction)
)
if 'dir-AP' in jfile:
ap_files.append(jfile.replace('.json', '.nii.gz'))
elif 'dir-PA' in jfile:
pa_files.append(jfile.replace('.json', '.nii.gz'))
else:
raise ValueError(
'The metadata for key {key:s} in file {jfile:s} does '
'not match the dir-PA or dir-AP values that you '
'provided. {key:s} = {val:s}. And the directionality '
'could not be inferred from the file name.'.format(
key=json_key,
jfile=jfile,
val=direction,
))
files = copy.deepcopy(input_files.files)
del files['epi_nii']
del files['epi_json']
files['epi_ap'] = ap_files
files['epi_pa'] = pa_files
return InputFiles(
subject=input_files.subject,
site=input_files.site,
valid=input_files.valid,
files=files,
file_type=input_files.file_type
)
def separate_sessions(input_files, multiples_policy='sessions',
assign_empty_sessions=True):
"""Separate input file register into different sessions
Parameters
----------
input_files : InputFiles namedtuple
multiples_policy : "sessions" or "concatenate"
Flag that dictates how to handle multiple files in a session.
If "sessions," treat multiples as different sessions and assign
to new session IDs. If "concatenate," concatenate multiples into
a single session
assign_empty_sessions : bool
If True, assign session IDs to files without a session ID in
their path
Returns
-------
list of InputFiles namedtuples
List of InputFiles namedtuples for each session ID.
"""
if multiples_policy not in ['sessions', 'concatenate']:
raise ValueError('`multiples_policy` must be either "sessions" or '
'"concatenate"')
# Take only the first of the T1W nifty files
if len(input_files.files['t1w']) > 1:
mod_logger.warning(
'Found more than one T1W file for subject {sub:s} at site {site:s}'
'. Discarding the others.\n\n'.format(sub=input_files.subject,
site=input_files.site)
)
t1w = input_files.files['t1w']
# Take only the first freesurfer directory
freesurfer_dirs = {
f.split('/freesurfer/')[0] for f in input_files.files['freesurfer']
}
if len(freesurfer_dirs) > 1:
mod_logger.warning(
'Found more than one freesurfer dir for subject {sub:s} at site '
'{site:s}. Discarding the others.\n\n'.format(
sub=input_files.subject, site=input_files.site
)
)
freesurfer_dir = freesurfer_dirs.pop()
freesurfer = [f for f in input_files.files['freesurfer']
if f.startswith(freesurfer_dir)]
# Organize the files by session ID
def get_sess_id(filename, fallback='null'):
# Retrieve the session ID from a filename
match = re.search('/sess-[0-9a-zA-Z]*/', filename)
if match is not None:
return match.group().strip('/')
else:
return fallback
ftypes = ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']
sess_ids = {ft: {get_sess_id(fn) for fn in input_files.files[ft]}
for ft in ftypes}
if not all([s == list(sess_ids.values())[0] for s in sess_ids.values()]):
mod_logger.warning(
'Session numbers are inconsistent for subject {sub:s} at site '
'{site:s}. Sess-IDs: {sess_ids!s}.\nFiles: {files!s}\n\n'.format(
sub=input_files.subject,
site=input_files.site,
sess_ids=sess_ids,
files={k: (v) for k, v in input_files.files.items()
if k in ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']},
)
)
return [InputFilesWithSession(
subject=input_files.subject,
site=input_files.site,
session=None,
files=None,
file_type=None,
)]
# We just confirmed that all of the session ID sets are equal so we can
# pop one set of session IDs off of `sess_ids` and use it from now on
sess_ids = sess_ids[ftypes[0]]
# Collect files by session ID and then file type
files_by_session = {
sess: {
ft: [
f for f in input_files.files[ft] if get_sess_id(f) == sess
]
for ft in ftypes
}
for sess in sess_ids
}
output_files = []
# Loop over each session ID
for session, files in files_by_session.items():
# Confirm that the subject has an equal number of each type of file
n_files = {k: len(v) for k, v in files.items()
if k in ['dwi', 'bvec', 'bval', 'epi_ap', 'epi_pa']}
if len(set(n_files.values())) != 1:
mod_logger.warning(
'The number of files is inconsistent for subject {sub:s} at '
'site {site:s}. The file numbers are {n_files!s}\n\n'.format(
sub=input_files.subject,
site=input_files.site,
n_files=n_files
)
)
output_files.append(InputFilesWithSession(
subject=input_files.subject,
site=input_files.site,
session=None,
files=None,
file_type=None,
))
elif len(set(n_files.values())) == 1:
# There is only one set of files in this session. Append to output.
if session == 'null':
output_session = 'sess-01' if assign_empty_sessions else None
else:
output_session = session
output_files.append(InputFilesWithSession(
subject=input_files.subject,
site=input_files.site,
session=output_session,
files=dict(
dwi=input_files.files['dwi'],
bvec=input_files.files['bvec'],
bval=input_files.files['bval'],
epi_ap=input_files.files['epi_ap'],
epi_pa=input_files.files['epi_pa'],
t1w=t1w,
freesurfer=freesurfer,
),
file_type=input_files.file_type,
))
else:
# There are multiple copies of files for this one session ID.
if multiples_policy == 'concatenate':
# The multiple copies represent one session and should be
# concatenated
raise NotImplementedError('Concatenation of multiples not yet '
'implemented.')
else:
# The multiple copies represent multiple sessions and
# should be further subdivided into sessions
raise NotImplementedError('Session subdivision not yet '
'implemented.')
return output_files
def get_all_s3_registers(prefix, sites, bucket='fcp-indi'):
"""
Parameters
----------
prefix : string
S3 prefix designating the S3 "directory" in which to search.
Do not include the site ID in the prefix.
sites : sequence of strings
Site IDs from which to collect raw data
bucket : string
AWS S3 bucket in which to search
Returns
-------
dict
dict where the keys are site IDs and the values are
list of `InputFiles` namedtuples for each valid subject
at that site
"""
subjects = {}
for site in sites:
# Get all S3 keys
keys = get_s3_keys(prefix=prefix, site=site, bucket='fcp-indi')
# Get all registers (without the AP/PA directions)
regs = keys_to_subject_register(keys=keys, prefix=prefix, site=site)
# Assign the fmap files to either AP/PA
regs_pa_ap = [
determine_directions(input_files=reg,
input_type='s3',
bucket=bucket,
metadata_source='json',
json_key='PhaseEncodingDirection',
ap_value='j-', pa_value='j')
for reg in regs
]
# Separate each subject register into different sessions
regs_nested = [
separate_sessions(reg,
multiples_policy='sessions',
assign_empty_sessions=True)
for reg in regs_pa_ap
]
# But `separate_sessions` returns a list of namedtuples
# so `regs_nested` is nested and needs to be flattened
regs_flat = [item for sublist in regs_nested for item in sublist]
subjects[site] = [reg for reg in regs_flat if reg.files is not None]
return subjects
| 2,587 | 0 | 179 |
e033f9fd73d722ddff5f01e01c531a7ab26d7f5f | 483 | py | Python | ec2_lambda/lambda_function.py | thiagop-o/python-automacao_lambda_aws | d5db1f37b898a422a500dff392e0da0bbeef1fc6 | [
"MIT"
] | null | null | null | ec2_lambda/lambda_function.py | thiagop-o/python-automacao_lambda_aws | d5db1f37b898a422a500dff392e0da0bbeef1fc6 | [
"MIT"
] | null | null | null | ec2_lambda/lambda_function.py | thiagop-o/python-automacao_lambda_aws | d5db1f37b898a422a500dff392e0da0bbeef1fc6 | [
"MIT"
] | null | null | null | import os
import boto3
AMI = os.environ['AMI']
INSTANCE_TYPE = os.environ['INSTANCE_TYPE']
KEY_NAME = os.environ['KEY_NAME']
SUBNET_ID = os.environ['SUBNET_ID']
ec2 = boto3.resource('ec2')
| 21 | 51 | 0.652174 | import os
import boto3
AMI = os.environ['AMI']
INSTANCE_TYPE = os.environ['INSTANCE_TYPE']
KEY_NAME = os.environ['KEY_NAME']
SUBNET_ID = os.environ['SUBNET_ID']
ec2 = boto3.resource('ec2')
def lambda_handler(event, context):
instance = ec2.create_instances(
ImageId=AMI,
InstanceType=INSTANCE_TYPE,
KeyName=KEY_NAME,
SubnetId=SUBNET_ID,
MaxCount=1,
MinCount=1
)
print('New Instance Created: ', instance[0].id) | 269 | 0 | 23 |
d5a445a9060d6cd20ce9d7173af9483d4cbd7c1e | 8,837 | py | Python | tests/unit_tests/logic/camera/test_cameraAscom.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 16 | 2020-01-11T22:32:26.000Z | 2022-03-31T15:18:14.000Z | tests/unit_tests/logic/camera/test_cameraAscom.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 196 | 2020-01-16T13:56:01.000Z | 2022-03-29T02:06:51.000Z | tests/unit_tests/logic/camera/test_cameraAscom.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:39:33.000Z | 2021-05-27T13:14:20.000Z | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
import unittest.mock as mock
import platform
if not platform.system() == 'Windows':
pytest.skip("skipping windows-only tests", allow_module_level=True)
# external packages
from astropy.io import fits
from PyQt5.QtCore import QThreadPool, QObject, pyqtSignal
from skyfield.api import Angle, wgs84
import ctypes
# local import
from mountcontrol.mount import Mount
from logic.environment.skymeter import Skymeter
from logic.camera.cameraAscom import CameraAscom
from base.driverDataClass import Signals
from base.ascomClass import AscomClass
from base.loggerMW import setupLogging
setupLogging()
@pytest.fixture(autouse=True, scope='function')
| 27.024465 | 115 | 0.602806 | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
import unittest.mock as mock
import platform
if not platform.system() == 'Windows':
pytest.skip("skipping windows-only tests", allow_module_level=True)
# external packages
from astropy.io import fits
from PyQt5.QtCore import QThreadPool, QObject, pyqtSignal
from skyfield.api import Angle, wgs84
import ctypes
# local import
from mountcontrol.mount import Mount
from logic.environment.skymeter import Skymeter
from logic.camera.cameraAscom import CameraAscom
from base.driverDataClass import Signals
from base.ascomClass import AscomClass
from base.loggerMW import setupLogging
setupLogging()
@pytest.fixture(autouse=True, scope='function')
def module_setup_teardown():
class Test1:
CameraXSize = 1000
CameraYSize = 500
CanAbortExposure = True
CanFastReadout = True
CanGetCoolerPower = True
CanSetCCDTemperature = True
FastReadout = True
PixelSizeX = 4
PixelSizeY = 4
MaxBinX = 3
MaxBinY = 3
BinX = 1
BinY = 1
StartX = 0
StartY = 0
CameraState = 0
CCDTemperature = 10
CoolerOn = True
CoolerPower = 100
Name = 'test'
DriverVersion = '1'
DriverInfo = 'test1'
ImageReady = True
image = [1, 1, 1]
ImageArray = (ctypes.c_int * len(image))(*image)
# see
# https://stackoverflow.com/questions/4145775/how-do-i-convert-a-python-list-into-a-c-array-by-using-ctypes
@staticmethod
def StartExposure(time, light=True):
return True
@staticmethod
def StopExposure():
return True
class TestApp:
threadPool = QThreadPool()
class Test(QObject):
threadPool = QThreadPool()
message = pyqtSignal(str, int)
mount = Mount(host='localhost', MAC='00:00:00:00:00:00', verbose=False,
pathToData='tests/workDir/data')
mount.obsSite.location = wgs84.latlon(latitude_degrees=0,
longitude_degrees=0,
elevation_m=0)
mount.obsSite.raJNow = Angle(hours=12)
mount.obsSite.decJNow = Angle(degrees=45)
deviceStat = {'mount': True}
skymeter = Skymeter(app=TestApp())
global app
app = CameraAscom(app=Test(), signals=Signals(), data={})
app.client = Test1()
app.clientProps = []
yield
def test_workerGetInitialConfig_1():
with mock.patch.object(AscomClass,
'workerGetInitialConfig',
return_value=True):
suc = app.workerGetInitialConfig()
assert suc
def test_workerPollData_1():
app.data['CAN_FAST'] = True
app.data['CAN_SET_CCD_TEMPERATURE'] = True
app.data['CAN_GET_COOLER_POWER'] = True
suc = app.workerPollData()
assert suc
def test_sendDownloadMode_1():
app.data['CAN_FAST'] = True
suc = app.sendDownloadMode()
assert suc
def test_sendDownloadMode_2():
app.data['CAN_FAST'] = True
suc = app.sendDownloadMode(fastReadout=True)
assert suc
def test_sendDownloadMode_3():
app.data['CAN_FAST'] = False
suc = app.sendDownloadMode()
assert not suc
def test_workerExpose_1():
def mockGetAscomProperty(a):
return False
app.data['CAN_FAST'] = False
app.data['CCD_INFO.CCD_PIXEL_SIZE_X'] = 1000
app.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] = 1000
app.imagePath = ''
app.app.deviceStat['mount'] = True
app.abortExpose = True
tmp = app.getAscomProperty
app.getAscomProperty = mockGetAscomProperty
with mock.patch.object(fits.PrimaryHDU,
'writeto'):
with mock.patch.object(app.client,
'StartExposure'):
suc = app.workerExpose()
assert suc
app.getAscomProperty = tmp
def test_workerExpose_2():
def mockGetAscomProperty(a):
return False
app.data['CAN_FAST'] = False
app.data['CCD_INFO.CCD_PIXEL_SIZE_X'] = 1000
app.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] = 1000
app.imagePath = ''
app.app.deviceStat['mount'] = True
app.abortExpose = True
tmp = app.getAscomProperty
app.getAscomProperty = mockGetAscomProperty
with mock.patch.object(fits.PrimaryHDU,
'writeto'):
with mock.patch.object(app.client,
'StartExposure'):
suc = app.workerExpose(expTime=0)
assert suc
app.getAscomProperty = tmp
def test_workerExpose_3():
def mockGetAscomProperty(a):
return True
app.data['CAN_FAST'] = False
app.data['CCD_INFO.CCD_PIXEL_SIZE_X'] = 1000
app.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] = 1000
app.imagePath = ''
app.app.deviceStat['mount'] = True
tmp = app.getAscomProperty
app.getAscomProperty = mockGetAscomProperty
with mock.patch.object(fits.PrimaryHDU,
'writeto'):
with mock.patch.object(app.client,
'StartExposure'):
suc = app.workerExpose(expTime=0)
assert suc
app.getAscomProperty = tmp
def test_workerExpose_4():
def mockGetAscomProperty(a):
return True
app.data['CAN_FAST'] = False
app.data['CCD_INFO.CCD_PIXEL_SIZE_X'] = 1000
app.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] = 1000
app.imagePath = ''
app.app.deviceStat['mount'] = True
tmp = app.getAscomProperty
app.getAscomProperty = mockGetAscomProperty
with mock.patch.object(fits.PrimaryHDU,
'writeto'):
with mock.patch.object(app.client,
'StartExposure'):
suc = app.workerExpose(expTime=0, focalLength=0)
assert suc
app.getAscomProperty = tmp
def test_workerExpose_5():
def mockGetAscomProperty(a):
return True
app.data['CAN_FAST'] = False
app.data['CCD_INFO.CCD_PIXEL_SIZE_X'] = 1000
app.data['CCD_INFO.CCD_PIXEL_SIZE_Y'] = 1000
app.imagePath = ''
app.app.deviceStat['mount'] = True
tmp = app.getAscomProperty
app.getAscomProperty = mockGetAscomProperty
with mock.patch.object(fits.PrimaryHDU,
'writeto'):
with mock.patch.object(app.client,
'StartExposure'):
with mock.patch.object(app.client,
'ImageArray',
return_value=None):
suc = app.workerExpose(expTime=0, focalLength=0)
assert suc
app.getAscomProperty = tmp
def test_expose_1():
with mock.patch.object(app,
'callMethodThreaded'):
suc = app.expose()
assert suc
def test_abort_1():
app.deviceConnected = False
suc = app.abort()
assert not suc
def test_abort_2():
app.deviceConnected = True
app.data['CAN_ABORT'] = False
suc = app.abort()
assert not suc
def test_abort_3():
app.deviceConnected = True
app.data['CAN_ABORT'] = True
with mock.patch.object(app,
'callMethodThreaded'):
suc = app.abort()
assert suc
def test_sendCoolerSwitch_1():
app.deviceConnected = False
suc = app.sendCoolerSwitch()
assert not suc
def test_sendCoolerSwitch_2():
app.deviceConnected = True
suc = app.sendCoolerSwitch(coolerOn=True)
assert suc
def test_sendCoolerTemp_1():
app.deviceConnected = False
suc = app.sendCoolerTemp()
assert not suc
def test_sendCoolerTemp_2():
app.deviceConnected = True
app.data['CAN_SET_CCD_TEMPERATURE'] = False
suc = app.sendCoolerTemp(temperature=-10)
assert not suc
def test_sendCoolerTemp_3():
app.deviceConnected = True
app.data['CAN_SET_CCD_TEMPERATURE'] = True
suc = app.sendCoolerTemp(temperature=-10)
assert suc
def test_sendOffset_1():
app.deviceConnected = False
suc = app.sendOffset()
assert not suc
def test_sendOffset_2():
app.deviceConnected = True
suc = app.sendOffset(offset=50)
assert suc
def test_sendGain_1():
app.deviceConnected = False
suc = app.sendGain()
assert not suc
def test_sendGain_2():
app.deviceConnected = True
suc = app.sendGain(gain=50)
assert suc
| 7,118 | 0 | 551 |
c9fdf87a145b69062ebfc4b957e7c6b7fad9824a | 475 | py | Python | renderers/linkRenderer.py | bgporter/wastebook | 79885a8d503452e1fbeb8ff445cedd2daafff2a0 | [
"MIT"
] | null | null | null | renderers/linkRenderer.py | bgporter/wastebook | 79885a8d503452e1fbeb8ff445cedd2daafff2a0 | [
"MIT"
] | null | null | null | renderers/linkRenderer.py | bgporter/wastebook | 79885a8d503452e1fbeb8ff445cedd2daafff2a0 | [
"MIT"
] | null | null | null |
import re
import renderer
LINK_PATTERN = re.compile(r'(^|\s)(http(?:s)?://[^ \<]+\w)')
class LinkRenderer(renderer.RenderBase):
'''
Converts standalone http or https links in text into the Markdown
equivalent, so
lorem ipsum http://www.example.com, etc
becomes
lorem ipsum [http://www.example.com](http://www.example.com), etc
'''
| 17.592593 | 71 | 0.612632 |
import re
import renderer
LINK_PATTERN = re.compile(r'(^|\s)(http(?:s)?://[^ \<]+\w)')
class LinkRenderer(renderer.RenderBase):
'''
Converts standalone http or https links in text into the Markdown
equivalent, so
lorem ipsum http://www.example.com, etc
becomes
lorem ipsum [http://www.example.com](http://www.example.com), etc
'''
def RenderText(self):
self.text = LINK_PATTERN.sub('\g<1>[\g<2>](\g<2>)', self.text) | 69 | 0 | 26 |
19a1c00cdc6dcf513c84d6a0bc85980dea271cba | 4,913 | py | Python | py/feasability.py | andrewmkeller/niDelayGraph | 41aa0a9e17b6e3c59ef9e7103f8333047e7745e0 | [
"MIT"
] | null | null | null | py/feasability.py | andrewmkeller/niDelayGraph | 41aa0a9e17b6e3c59ef9e7103f8333047e7745e0 | [
"MIT"
] | null | null | null | py/feasability.py | andrewmkeller/niDelayGraph | 41aa0a9e17b6e3c59ef9e7103f8333047e7745e0 | [
"MIT"
] | null | null | null | import niGraphParser
import niGraph
import sys
import glob
import os
import random
import time
import re
from matplotlib import pyplot
import networkx as nx
import networkx.algorithms.shortest_paths.dense as dense
import xml.etree.ElementTree as ET
import networkx.algorithms.simple_paths as nxPaths
graphsDir = "..\DataSets"
# print("Topological sort took " + str(durationTime) + " seconds")
# pyplot.loglog(sizes, times, "o")
# pyplot.xscale("log")
# pyplot.yscale("log")
# pyplot.show()
if __name__ == "__main__":
main()
| 28.398844 | 94 | 0.607572 | import niGraphParser
import niGraph
import sys
import glob
import os
import random
import time
import re
from matplotlib import pyplot
import networkx as nx
import networkx.algorithms.shortest_paths.dense as dense
import xml.etree.ElementTree as ET
import networkx.algorithms.simple_paths as nxPaths
graphsDir = "..\DataSets"
def visit(node, sortedNodes, temp, perm, unmarked):
if node in temp:
raise Exception
if node in unmarked:
unmarked.remove(node)
temp.add(node)
for outEdge in (e for e in node.outEdges if not e.isFeedback):
nextNode = outEdge.target
visit(nextNode, sortedNodes, temp, perm, unmarked)
temp.remove(node)
perm.add(node)
# sortedNodes.insert(0, node)
sortedNodes.append(node)
def topologicalSort(graph):
sortedNodes = []
tempMarks = set()
permMarks = set()
unmarkedVertices = set(graph.getVertices())
while len(unmarkedVertices):
node = next(iter(unmarkedVertices))
visit(node, sortedNodes, tempMarks, permMarks, unmarkedVertices)
return sortedNodes
def longestPathFromTopolSort(sortedNodes):
pathLength = {}
for node in sortedNodes:
pathLength[node] = 0
for node in sortedNodes:
for inEdge in (e for e in node.inEdges if not e.isFeedback):
prevNode = inEdge.source
pathLength[prevNode] = max(pathLength[prevNode], pathLength[node] + 1)
return max(pathLength.values())
def createDOT(graph, filePath):
fp = open(filePath, "w")
fp.write("strict digraph {\n");
for edge in graph.getEdges():
fp.write("n" + str(edge.source.vertexId) + " -> n" + str(edge.target.vertexId) + "\n")
fp.write("}\n")
def calculateDAGPolarPaths(graph, limit=None):
sortedVertices = topologicalSort(graph)
possiblePaths = {};
totalPaths = 0;
for node in sortedVertices:
#print (node.vertexId)
sum = 0;
if len(node.outEdges) == 0:
#print ("terminal")
possiblePaths[node] = 1;
else:
#print("non-terminal");
for edge in node.outEdges:
if not edge.isFeedback:
sum = sum + possiblePaths[edge.target]
possiblePaths[node] = sum;
if len(node.inEdges) == 0:
totalPaths = totalPaths + sum;
if limit and totalPaths > limit:
return -1
return totalPaths
def main():
sys.setrecursionlimit(10000)
# return
graphs = glob.glob(os.path.join(graphsDir, "*.graphml"))
graphs.sort(key = lambda x: int(re.match(".*DelayGraph_(\d+)\.graphml", x).group(1)))
#graphs = graphs[0:500]
graph = niGraphParser.parseGraphMlFile(graphs[0])
createDOT(graph, "graph0.dot")
sizes = []
times = []
fp = open("feasability.csv","a")
for graphPath in graphs:
graph = niGraphParser.parseGraphMlFile(graphPath)
pathMatch = re.match("(.*)DelayGraph(_\d+)\.graphml", graphPath)
originalTargetPath = pathMatch.group(1) + "OriginalGoals" + \
pathMatch.group(2) + ".xml"
print(originalTargetPath)
tree = ET.parse(originalTargetPath)
root = tree.getroot()
targetPeriod = int(root.find('TargetClockPeriodInPicoSeconds').text)
D=nx.DiGraph()
for edge in graph.getEdges():
D.add_edge(edge.source.vertexId,edge.target.vertexId, weight=-edge.delay)
startTime = time.time()
totalPaths = calculateDAGPolarPaths(graph, 1000)
if totalPaths < 0:
print ("Too many paths!")
continue
durationTime = time.time() - startTime
cycleCount = 0
cyclesLimit = 1000;
limitReached = False;
for cycle in nx.simple_cycles(D):
cycleCount = cycleCount + 1;
if cyclesLimit < cycleCount:
limitReached = True
break
if limitReached:
print ("Too many simple_cycles!")
continue
sizes.append(len(graph.getVertices()) + len(graph.getEdges()))
times.append(durationTime)
#longestPath = longestPathFromTopolSort(sortedVertices)
fp.write(graphPath + ",")
fp.write(str(len(graph.vertices))+",")
fp.write(str(len(graph.edges))+",")
fp.write(str(len(graph.getVertices()) + len(graph.getEdges())) + ",")
fp.write(str(durationTime) + ",")
fp.write(str(totalPaths) + ",")
fp.write(str(cycleCount))
fp.write("\n")
print (graphPath, len(graph.vertices), len(graph.edges), totalPaths)
#break
fp.close()
# print("Topological sort took " + str(durationTime) + " seconds")
# pyplot.loglog(sizes, times, "o")
# pyplot.xscale("log")
# pyplot.yscale("log")
# pyplot.show()
if __name__ == "__main__":
main()
| 4,216 | 0 | 138 |
e6b2b01c8bd75ea94536fbb79633083633929b0b | 1,452 | py | Python | utils/draw_utils.py | takurooo/Python-YOLOv3_in_OpneCV | 8f722cdd86e360d557a539431c84636d369549f5 | [
"MIT"
] | null | null | null | utils/draw_utils.py | takurooo/Python-YOLOv3_in_OpneCV | 8f722cdd86e360d557a539431c84636d369549f5 | [
"MIT"
] | 1 | 2018-09-16T05:54:13.000Z | 2018-09-16T05:54:13.000Z | utils/draw_utils.py | takurooo/Python-YOLOv3_in_OpneCV | 8f722cdd86e360d557a539431c84636d369549f5 | [
"MIT"
] | null | null | null | #------------------------------------------------------
# import
#------------------------------------------------------
import os
import cv2
#------------------------------------------------------
# global
#------------------------------------------------------
LINE_COLOR = (0, 0, 255)
LINE_THICKNESS = 10
FONT_STYLE = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 1.0
FONT_THICKNESS = 2
FONT_COLOR = (0, 0, 0) #BGR
FONT_BACKGROUND_COLOR = (0, 0, 255)
#------------------------------------------------------
# function
#------------------------------------------------------
| 32.266667 | 74 | 0.495868 | #------------------------------------------------------
# import
#------------------------------------------------------
import os
import cv2
#------------------------------------------------------
# global
#------------------------------------------------------
LINE_COLOR = (0, 0, 255)
LINE_THICKNESS = 10
FONT_STYLE = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 1.0
FONT_THICKNESS = 2
FONT_COLOR = (0, 0, 0) #BGR
FONT_BACKGROUND_COLOR = (0, 0, 255)
#------------------------------------------------------
# function
#------------------------------------------------------
def draw_results(img, classnames, scores, boxes):
for classname, score, box in zip(classnames, scores, boxes):
draw_result(img, classname, score, box)
def draw_result(img, classname, score, box):
left, top, right, bottom = box
cv2.rectangle(img, (left, top), (right, bottom),
LINE_COLOR, LINE_THICKNESS)
label = '{}:{:.2f}'.format(classname, score)
label_size, _ = cv2.getTextSize(label,
FONT_STYLE, FONT_SCALE, FONT_THICKNESS)
text_width, text_height = label_size
cv2.rectangle(img, (left, top), (left+text_width, top+text_height),
FONT_BACKGROUND_COLOR, thickness=-1)
cv2.putText(img, label, (left, top+text_height),
FONT_STYLE, FONT_SCALE, FONT_COLOR, FONT_THICKNESS,
lineType=cv2.LINE_AA,
bottomLeftOrigin=False)
| 834 | 0 | 45 |
78f957f8fa50603fc1de5fed7674136da6e5f033 | 1,131 | py | Python | my_classes/Context.py | steven-phun/Tutoring-App | df0774f18ede09d4476ea63b9f95f7f1b0763f84 | [
"MIT"
] | null | null | null | my_classes/Context.py | steven-phun/Tutoring-App | df0774f18ede09d4476ea63b9f95f7f1b0763f84 | [
"MIT"
] | null | null | null | my_classes/Context.py | steven-phun/Tutoring-App | df0774f18ede09d4476ea63b9f95f7f1b0763f84 | [
"MIT"
] | 1 | 2021-04-20T02:38:54.000Z | 2021-04-20T02:38:54.000Z | import os
| 29 | 102 | 0.525199 | import os
class Context:
def __init__(self, ctx):
self.ctx = ctx
def discord_id(self):
""":return: an int that represents the member's discord id"""
return self.ctx.author.id
def member(self):
""":return: a class of discord.member.Member"""
return self.ctx.bot.get_guild(int(os.getenv("GUILD_SERVER_ID"))).get_member(self.discord_id())
def voice(self):
""" represents the member's voice state.
example of instances in voice state:
VoiceState:
self_mute=bool
self_deaf=bool
self_stream=bool
channel=
VoiceChannel:
id=int
name=str
position=int
bitrate=int
user_limit=int
category_id=int
:return: the member's discord.member.VoiceState."""
return self.member().voice
def mention(self):
""":return: a str that allows given member to be mentioned."""
return self.member().mention
| 26 | 1,071 | 23 |
2c75071e74d6ac4f4af2e29dd45f88f7b19f3318 | 2,194 | py | Python | cleaner.py | Riviere123/csv_cleaner | 944ec7337232fcc246c276cad7588fda824306d3 | [
"MIT"
] | null | null | null | cleaner.py | Riviere123/csv_cleaner | 944ec7337232fcc246c276cad7588fda824306d3 | [
"MIT"
] | null | null | null | cleaner.py | Riviere123/csv_cleaner | 944ec7337232fcc246c276cad7588fda824306d3 | [
"MIT"
] | null | null | null | import csv
import argparse
parser = argparse.ArgumentParser(description='parameters for cleaning a csv file')
parser.add_argument(
'--columns',
type=int,
nargs="+",
default=[],
help="The columns to remove. Usage: --rows 0 1 10 25",
)
parser.add_argument(
'--file',
type=str,
default=None,
help="The csv file path/name.",
)
parser.add_argument(
'--rows',
type=int,
nargs="+",
default=[],
help="Rows to remove."
)
parser.add_argument(
'--strip',
action="store_true",
help="Remove all leading and trailing white spaces."
)
args = parser.parse_args()
if __name__ == "__main__":
cleaner = Cleaner(args.file, args.columns, args.rows, args.strip) | 25.511628 | 82 | 0.573838 | import csv
import argparse
class Cleaner:
def __init__(self, file, columns, rows, strip) -> None:
self.file = open(f"{file}")
self.columns = columns
self.rows = rows
self.data = csv.reader(self.file)
self.strip = strip
self.cleaned_data = self.data
self.clean()
def clean(self):
self.remove_rows()
self.remove_columns()
self.write_file()
def write_file(self):
self.cleaned_data = "\n".join(self.cleaned_data)
f = open("cleaned_data.csv", "w")
f.write(self.cleaned_data)
f.close()
def remove_rows(self):
cleaned_row_data = []
row_count = -1
for row in self.cleaned_data:
row_count += 1
if row_count in self.rows:
continue
cleaned_row_data.append(row)
self.cleaned_data = cleaned_row_data
def remove_columns(self):
cleaned_column_data = []
for row in self.cleaned_data:
cleaned_column = []
column_count = -1
for column in row:
column_count += 1
if column_count in self.columns:
continue
if self.strip:
cleaned_column.append(column.strip())
else:
cleaned_column.append(column)
cleaned_column_data.append(",".join(cleaned_column))
self.cleaned_data = cleaned_column_data
parser = argparse.ArgumentParser(description='parameters for cleaning a csv file')
parser.add_argument(
'--columns',
type=int,
nargs="+",
default=[],
help="The columns to remove. Usage: --rows 0 1 10 25",
)
parser.add_argument(
'--file',
type=str,
default=None,
help="The csv file path/name.",
)
parser.add_argument(
'--rows',
type=int,
nargs="+",
default=[],
help="Rows to remove."
)
parser.add_argument(
'--strip',
action="store_true",
help="Remove all leading and trailing white spaces."
)
args = parser.parse_args()
if __name__ == "__main__":
cleaner = Cleaner(args.file, args.columns, args.rows, args.strip) | 1,318 | -7 | 165 |
b6f38ce362b5ee22034139f490ad8c25fc8886c6 | 3,370 | py | Python | Scripts/Face2.py | LiuSeeker/Robotica-projeto-1 | 425795d51232470ac840faf9dc7d97863d801554 | [
"CECILL-B"
] | null | null | null | Scripts/Face2.py | LiuSeeker/Robotica-projeto-1 | 425795d51232470ac840faf9dc7d97863d801554 | [
"CECILL-B"
] | null | null | null | Scripts/Face2.py | LiuSeeker/Robotica-projeto-1 | 425795d51232470ac840faf9dc7d97863d801554 | [
"CECILL-B"
] | null | null | null | #! /usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = ["Rachel P. B. Moraes", "Igor Montagner", "Fabio Miranda"]
import rospy
import numpy as np
import tf
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import smach
import smach_ros
face_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
bridge = CvBridge()
global cv_image
global dif_x
global media
global centro
global area1, area2
global p
cv_image = None
dif_x = None
area1, area2 = 0,0
atraso = 1.5E9
delay_miranda = 0.05
# Variáveis para permitir que o roda_todo_frame troque dados com a máquina de estados
media = 0
centro = 0
p = False
## Classes - estados
# main
if __name__ == '__main__':
main()
| 21.883117 | 132 | 0.692285 | #! /usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = ["Rachel P. B. Moraes", "Igor Montagner", "Fabio Miranda"]
import rospy
import numpy as np
import tf
import math
import cv2
import time
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import smach
import smach_ros
face_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
bridge = CvBridge()
global cv_image
global dif_x
global media
global centro
global area1, area2
global p
cv_image = None
dif_x = None
area1, area2 = 0,0
atraso = 1.5E9
delay_miranda = 0.05
# Variáveis para permitir que o roda_todo_frame troque dados com a máquina de estados
media = 0
centro = 0
p = False
def roda_todo_frame(imagem):
print("frame")
global cv_image
global media
global centro
global dif_x
global p
global area1, area2
now = rospy.get_rostime()
imgtime = imagem.header.stamp
lag = now-imgtime
delay = lag.nsecs
if delay > atraso and check_delay==True:
print("delay: {}".format(delay/1.0E9))
return
try:
cv_image = bridge.compressed_imgmsg_to_cv2(imagem, "bgr8")
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for(x,y,z,w) in faces:
cv2.rectangle(cv_image, (x,y), (x+z, y+w), (255,0,0), 2)
roi_gray = gray[y:y+w, x:x+z]
roi_color = cv_image[y:y+w, x:x+z]
media = x+z/2
centro = cv_image.shape[0]//1.5
if p == False:
area1 = z*w
area2 = 0
p = True
elif p ==True:
area2 = z*w
print(area1,area2)
if media != 0 :
dif_x = media-centro
else:
dif_x = None
cv2.imshow("Camera", cv_image)
cv2.waitKey(1)
except CvBridgeError as e:
print("except", e)
## Classes - estados
class Segue(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['segue'])
def execute(self, userdata):
global velocidade_saida
tolerancia = 20
if dif_x > tolerancia:
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,-0.2))
velocidade_saida.publish(velocidade); rospy.sleep(delay_miranda)
return 'segue'
elif dif_x < -tolerancia and dif_x != None:
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,0.2))
velocidade_saida.publish(velocidade); rospy.sleep(delay_miranda)
return 'segue'
elif dif_x > -tolerancia and dif_x < tolerancia:
if area2 > area1:
velocidade = Twist(Vector3(0.1,0,0), Vector3(0,0,0))
elif area2 <= area1:
velocidade = Twist(Vector3(0.4,0,0), Vector3(0,0,0))
velocidade_saida.publish(velocidade); rospy.sleep(delay_miranda)
return 'segue'
elif dif_x == None:
velocidade = Twist(Vector3(0,0,0), Vector3(0,0,0.5))
velocidade_saida.publish(velocidade); rospy.sleep(delay_miranda)
return 'segue'
else:
return 'segue'
# main
def main():
global velocidade_saida
rospy.init_node('cor_estados')
recebedor = rospy.Subscriber("/raspicam_node/image/compressed", CompressedImage, roda_todo_frame, queue_size=10, buff_size = 2**24)
velocidade_saida = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
sm = smach.StateMachine(outcomes=['terminei'])
with sm:
smach.StateMachine.add('SEGUE', Segue(),
transitions={'segue': 'SEGUE'})
outcome = sm.execute()
if __name__ == '__main__':
main()
| 2,379 | 4 | 121 |
c6a49fd8eae1a073336f9014b2bd4874e8b1295d | 1,277 | py | Python | PyCode/Modules/ServoModule.py | busing/RaspberryPi_SmartCarV1 | 1d7761c7f1e6bc385392c6178d9ba1266c28dc2a | [
"Apache-2.0"
] | 49 | 2018-05-20T11:43:42.000Z | 2022-02-22T01:15:01.000Z | PyCode/Modules/ServoModule.py | qq38061481/RaspberryPi_SmartCarV1 | 1d7761c7f1e6bc385392c6178d9ba1266c28dc2a | [
"Apache-2.0"
] | 1 | 2018-05-16T09:38:28.000Z | 2018-05-16T09:38:47.000Z | PyCode/Modules/ServoModule.py | qq38061481/RaspberryPi_SmartCarV1 | 1d7761c7f1e6bc385392c6178d9ba1266c28dc2a | [
"Apache-2.0"
] | 22 | 2018-05-16T09:33:04.000Z | 2021-07-05T07:02:43.000Z | ###################################################
# 智能小车1.0 -- 舵机模块
#
# @author chenph
# @date 2018/5/15
###################################################
import RPi.GPIO as GPIO
import time
# 初始模块
# 舵机左转
# 舵机右转
if __name__ == "__main__":
try:
# 19,21,23
m = ServoModule(19)
m.turnLeft()
time.sleep(5)
m.turnRight()
except KeyboardInterrupt:
pass
GPIO.cleanup() | 23.218182 | 56 | 0.497259 | ###################################################
# 智能小车1.0 -- 舵机模块
#
# @author chenph
# @date 2018/5/15
###################################################
import RPi.GPIO as GPIO
import time
class ServoModule:
# 初始模块
def __init__(self, PIN):
print('Servo Module In Progress')
GPIO.setmode(GPIO.BOARD)
self.PIN = PIN
GPIO.setup(self.PIN, GPIO.OUT, initial=GPIO.LOW)
self.pwm = GPIO.PWM(self.PIN, 50)
self.pwm.start(0)
self.pwm.ChangeDutyCycle(5.5)
time.sleep(0.2)
self.pwm.stop()
# 舵机左转
def turnLeft(self):
self.pwm = GPIO.PWM(self.PIN, 50)
self.pwm.start(0)
self.pwm.ChangeDutyCycle(12.5)
time.sleep(0.02)
self.pwm.ChangeDutyCycle(0)
self.pwm.stop()
# 舵机右转
def turnRight(self):
self.pwm = GPIO.PWM(self.PIN, 50)
self.pwm.start(0)
self.pwm.ChangeDutyCycle(2.5)
time.sleep(0.02)
self.pwm.ChangeDutyCycle(0)
self.pwm.stop()
if __name__ == "__main__":
try:
# 19,21,23
m = ServoModule(19)
m.turnLeft()
time.sleep(5)
m.turnRight()
except KeyboardInterrupt:
pass
GPIO.cleanup() | 693 | -3 | 101 |
e8d8d764446c6ce93834ee77f987452347e579e1 | 810 | py | Python | tests/test_coverage.py | UC-Mind-Lab/Darjeeling | 61dc4fd3c62f43bb7fcfafce6f964f82144e293e | [
"Apache-2.0"
] | null | null | null | tests/test_coverage.py | UC-Mind-Lab/Darjeeling | 61dc4fd3c62f43bb7fcfafce6f964f82144e293e | [
"Apache-2.0"
] | 1 | 2021-04-16T16:53:05.000Z | 2021-07-23T15:21:52.000Z | tests/test_coverage.py | UC-Mind-Lab/Darjeeling | 61dc4fd3c62f43bb7fcfafce6f964f82144e293e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from darjeeling.core import (TestOutcome,
TestCoverage,
FileLine,
FileLineSet)
@pytest.fixture
| 24.545455 | 56 | 0.512346 | # -*- coding: utf-8 -*-
import pytest
from darjeeling.core import (TestOutcome,
TestCoverage,
FileLine,
FileLineSet)
def ln(num: int) -> FileLine:
return FileLine('file.c', num)
@pytest.fixture
def coverage() -> TestCoverage:
outcome = TestOutcome(name="foo",
successful=True,
time_taken=0.35,
output="42")
lines = FileLineSet.from_list([ln(1), ln(2), ln(3)])
return TestCoverage(test='foo',
outcome=outcome,
lines=lines)
def test_length(coverage):
assert len(coverage) == 3
def test_contains(coverage):
assert ln(1) in coverage
assert ln(999) not in coverage
| 493 | 0 | 91 |
5d973a361af88b11163f87ccf03a9de8c21ac4fc | 20,012 | py | Python | pretrain_bert.py | initc/gpt-lm | 941f2816d7a749ea3a3e0c574b35fc3fc67e94e3 | [
"Apache-2.0"
] | 2 | 2019-10-17T04:03:06.000Z | 2020-05-22T13:27:50.000Z | pretrain_bert.py | initc/gpt-lm | 941f2816d7a749ea3a3e0c574b35fc3fc67e94e3 | [
"Apache-2.0"
] | 1 | 2020-05-22T13:36:27.000Z | 2020-05-22T13:36:27.000Z | pretrain_bert.py | initc/gpt-lm | 941f2816d7a749ea3a3e0c574b35fc3fc67e94e3 | [
"Apache-2.0"
] | 1 | 2019-10-17T04:03:08.000Z | 2019-10-17T04:03:08.000Z | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain BERT"""
import os
import random
import math
import numpy as np
import torch
from arguments import get_args
from configure_data import configure_data
from fp16 import FP16_Module
from fp16 import FP16_Optimizer
from learning_rates import AnnealingLR
from model import BertModel
from model import get_params_for_weight_decay_optimization
from model import DistributedDataParallel as DDP
from optim import Adam
from utils import Timers, save_checkpoint, load_checkpoint, check_checkpoint, move_to_cuda
import pdb
def get_model(tokenizer, args):
"""Build the model."""
print('building BERT model ...')
model = BertModel(tokenizer, args)
print(' > number of parameters: {}'.format(
sum([p.nelement() for p in model.parameters()])), flush=True)
# GPU allocation.
model.cuda(torch.cuda.current_device())
# Fp16 conversion.
if args.fp16:
print("fp16 mode")
model = FP16_Module(model)
if args.fp32_embedding:
model.module.model.bert.embeddings.word_embeddings.float()
model.module.model.bert.embeddings.position_embeddings.float()
model.module.model.bert.embeddings.token_type_embeddings.float()
if args.fp32_tokentypes:
model.module.model.bert.embeddings.token_type_embeddings.float()
if args.fp32_layernorm:
for name, _module in model.named_modules():
if 'LayerNorm' in name:
_module.float()
# Wrap model for distributed training.
if args.world_size > 1:
model = DDP(model)
return model
def get_optimizer(model, args):
"""Set up the optimizer."""
# Build parameter groups (weight decay and non-decay).
while isinstance(model, (DDP, FP16_Module)):
model = model.module
layers = model.model.bert.encoder.layer
pooler = model.model.bert.pooler
lmheads = model.model.cls.predictions
nspheads = model.model.cls.seq_relationship
embeddings = model.model.bert.embeddings
param_groups = []
param_groups += list(get_params_for_weight_decay_optimization(layers))
param_groups += list(get_params_for_weight_decay_optimization(pooler))
param_groups += list(get_params_for_weight_decay_optimization(nspheads))
param_groups += list(get_params_for_weight_decay_optimization(embeddings))
param_groups += list(get_params_for_weight_decay_optimization(
lmheads.transform))
param_groups[1]['params'].append(lmheads.bias)
# Use Adam.
optimizer = Adam(param_groups,
lr=args.lr, weight_decay=args.weight_decay)
# Wrap into fp16 optimizer.
if args.fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale,
dynamic_loss_args={
'scale_window': args.loss_scale_window,
'min_scale': args.min_scale,
'delayed_shift': args.hysteresis})
return optimizer
def get_learning_rate_scheduler(optimizer, args):
"""Build the learning rate scheduler."""
# Add linear learning rate scheduler.
if args.lr_decay_iters is not None:
num_iters = args.lr_decay_iters
else:
num_iters = args.train_iters * args.epochs
init_step = -1
warmup_iter = args.warmup * num_iters
lr_scheduler = AnnealingLR(optimizer,
start_lr=args.lr,
warmup_iter=warmup_iter,
num_iters=num_iters,
decay_style=args.lr_decay_style,
last_iter=init_step)
return lr_scheduler
def setup_model_and_optimizer(args, tokenizer):
"""Setup model and optimizer."""
model = get_model(tokenizer, args)
optimizer = get_optimizer(model, args)
lr_scheduler = get_learning_rate_scheduler(optimizer, args)
criterion = torch.nn.CrossEntropyLoss(reduction='sum', ignore_index=-1)
args.continue_train = False
check_checkpoint(model, optimizer, lr_scheduler, args)
if args.load is not None and not args.continue_train:
print("| Resume checkpoints from {}".format(args.load))
epoch, i, total_iters = load_checkpoint(model, optimizer,
lr_scheduler, args)
if args.resume_dataloader:
args.epoch = epoch
args.mid_epoch_iters = i
args.total_iters = total_iters
return model, optimizer, lr_scheduler, criterion
def forward_step(data, model, tokenizer, criterion, args):
"""Forward step."""
sample = move_to_cuda(data, torch.cuda.current_device())
output, nsp, past = model(**sample["net_input"])
nsp_labels = sample["nsp_labels"]
target = sample["target"]
nsp_loss = criterion(nsp.view(-1, 3).contiguous().float(),
nsp_labels.view(-1).contiguous())
losses = criterion(output.view(-1, tokenizer.num_tokens).contiguous().float(),
target.contiguous().view(-1).contiguous())
# pdb.set_trace()
return losses, nsp_loss, sample["nsentences"], sample["ntokens"]
def backward_step(optimizer, model, lm_loss, nsp_loss, batch_size, batch_tokens, args):
"""Backward step."""
# Total loss.
loss = lm_loss / batch_tokens + nsp_loss / batch_size
# Backward pass.
optimizer.zero_grad()
if args.fp16:
optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
# Reduce across processes.
lm_loss_reduced = lm_loss
nsp_loss_reduced = nsp_loss
if args.world_size > 1:
batch_size = torch.Tensor([batch_size]).to(lm_loss.device)
batch_tokens = torch.Tensor([batch_tokens]).to(lm_loss.device)
reduced_losses = torch.cat((lm_loss.view(1), nsp_loss.view(1), batch_size, batch_tokens))
torch.distributed.all_reduce(reduced_losses.data)
# reduced_losses.data = reduced_losses.data / args.world_size
model.allreduce_params(reduce_after=False,
fp32_allreduce=args.fp32_allreduce)
lm_loss_reduced = reduced_losses[0]
nsp_loss_reduced = reduced_losses[1]
batch_size = reduced_losses[2].item()
batch_tokens = reduced_losses[3].item()
# Update master gradients.
if args.fp16:
optimizer.update_master_grads()
# Clipping gradients helps prevent the exploding gradient.
if args.clip_grad > 0:
if not args.fp16:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad)
else:
optimizer.clip_master_grads(args.clip_grad)
return lm_loss_reduced, nsp_loss_reduced, batch_size, batch_tokens
def train_step(input_data, model, tokenizer, criterion, optimizer, lr_scheduler, args):
"""Single training step."""
# Forward model for one step.
lm_loss, nsp_loss, batch_size, batch_tokens = forward_step(input_data, model, tokenizer, criterion, args)
# Calculate gradients, reduce across processes, and clip.
lm_loss_reduced, nsp_loss_reduced, batch_size, batch_tokens = backward_step(optimizer, model, lm_loss,
nsp_loss, batch_size, batch_tokens,
args)
# Update parameters.
optimizer.step()
# Update learning rate.
skipped_iter = 0
if not (args.fp16 and optimizer.overflow):
lr_scheduler.step()
else:
skipped_iter = 1
return lm_loss_reduced, nsp_loss_reduced, skipped_iter, batch_size, batch_tokens
def train_epoch(epoch, model, tokenizer, optimizer, train_data, val_data,
lr_scheduler, criterion, timers, args):
"""Train one full epoch."""
# Turn on training mode which enables dropout.
model.train()
# Tracking loss.
total_lm_loss = 0.0
total_nsp_loss = 0.0
# Iterations.
max_iters = len(train_data)
iteration = 0
update_num = 0
total_tokens = 0
total_batch = 0
skipped_iters = 0
data_iterator = iter(train_data)
if args.resume_dataloader:
iteration = args.mid_epoch_iters
comsume_data(iteration)
args.resume_dataloader = False
lr_scheduler.step(max_iters * (epoch-1) + iteration)
# Data iterator.
timers('interval time').start()
while iteration < max_iters:
lm_loss, nsp_loss, skipped_iter, batch_size, batch_tokens = train_step(next(data_iterator), model, tokenizer, criterion,optimizer, lr_scheduler, args)
update_num += 1
skipped_iters += skipped_iter
iteration += 1
args.cur_iteration = iteration
# Update losses.
total_lm_loss += lm_loss.data.detach().float().item()
total_nsp_loss += nsp_loss.data.detach().float().item()
if nsp_loss != 0.0:
total_batch += batch_size
total_tokens += batch_tokens
if total_batch < 1:
total_batch = 1
# Logging.
if iteration % args.log_interval == 0:
learning_rate = optimizer.param_groups[0]['lr']
avg_nsp_loss = total_nsp_loss / total_batch
avg_lm_loss = total_lm_loss / total_tokens
elapsed_time = timers('interval time').elapsed()
log_string = ' epoch{:2d} |'.format(epoch)
log_string += ' iteration {:8d}/{:8d} |'.format(iteration,
max_iters)
log_string += ' lm loss {:.3f} |'.format(avg_lm_loss)
log_string += ' lm ppl {:.3f} |'.format(math.exp(avg_lm_loss))
log_string += ' nsp loss {:.3f} |'.format(avg_nsp_loss)
log_string += ' batch size {} |'.format(batch_size)
log_string += ' learning rate {:.7f} |'.format(learning_rate)
log_string += ' tpi (ms): {:.2f} |'.format(
elapsed_time * 1000.0 / args.log_interval)
if args.fp16:
log_string += ' loss scale {:.3f} |'.format(
optimizer.loss_scale)
print(log_string, flush=True)
if iteration % args.valid_interval == 0:
lm_loss, nsp_loss = evaluate(val_data, model, tokenizer, criterion, args)
val_loss = lm_loss + nsp_loss
print('-' * 100)
print('| end of epoch {:3d} | valid loss {:.3f} | '
'valid LM Loss {:.3f} | valid LM PPL {:.3f} | valid NSP Loss {:.3f}'.format(
epoch, val_loss, lm_loss, math.exp(lm_loss), nsp_loss))
print('-' * 100)
if args.save:
checkpoints_path = "checkpoints_{}_{}.pt".format(epoch, iteration)
save_checkpoint(checkpoints_path, epoch, iteration, model,
optimizer, lr_scheduler, args)
checkpoints_path = "checkpoints-last.pt"
save_checkpoint(checkpoints_path, epoch, iteration, model,
optimizer, lr_scheduler, args)
if val_loss < evaluate.best_val_loss:
evaluate.best_val_loss = val_loss
if args.save:
best_path = 'checkpoints-best.pt'
print('saving best model to:',
os.path.join(args.save, best_path))
save_checkpoint(best_path, epoch, iteration, model,
optimizer, lr_scheduler, args)
if args.save:
final_path = 'checkpoints_{}.pt'.format(epoch)
print('saving final epoch model to:', os.path.join(args.save, final_path))
save_checkpoint(final_path, epoch + 1, 0, model, optimizer, lr_scheduler, args)
cur_path = 'checkpoints-last.pt'
save_checkpoint(cur_path, epoch + 1, 0, model, optimizer, lr_scheduler, args)
lm_loss, nsp_loss = evaluate(val_data, model, tokenizer, criterion, args)
val_loss = lm_loss + nsp_loss
if val_loss < evaluate.best_val_loss:
evaluate.best_val_loss = val_loss
if args.save:
best_path = 'checkpoints-best.pt'
print('saving best model to:',
os.path.join(args.save, best_path))
save_checkpoint(best_path, epoch+1, 0, model,
optimizer, lr_scheduler, args)
return iteration, skipped_iters
def evaluate(data_source, model, tokenizer, criterion, args):
"""Evaluation."""
# Turn on evaluation mode which disables dropout.
model.eval()
total_lm_loss = 0
total_nsp_loss = 0
total_batch_size = 0
total_batch_tokens = 0
for data_loader in data_source:
local_lm_loss = 0
local_batch_tokens = 0
max_iters = len(data_loader)
with torch.no_grad():
data_iterator = iter(data_loader)
iteration = 0
while iteration < max_iters:
# Forward evaluation.
lm_loss, nsp_loss, batch_size, batch_tokens = forward_step(next(data_iterator), model, tokenizer,criterion, args)
# Reduce across processes.
if isinstance(model, DDP):
batch_size = torch.Tensor([batch_size]).to(lm_loss.device)
batch_tokens = torch.Tensor([batch_tokens]).to(lm_loss.device)
reduced_losses = torch.cat((lm_loss.view(1), nsp_loss.view(1), batch_size, batch_tokens))
torch.distributed.all_reduce(reduced_losses.data)
# reduced_losses.data = reduced_losses.data / args.world_size
lm_loss = reduced_losses[0]
nsp_loss = reduced_losses[1]
batch_size = reduced_losses[2].item()
batch_tokens = reduced_losses[3].item()
if lm_loss == 0.0:
batch_size = 0
total_lm_loss += lm_loss.data.detach().float().item()
total_nsp_loss += nsp_loss.data.detach().float().item()
local_lm_loss += lm_loss.data.detach().float().item()
local_batch_tokens += batch_tokens
total_batch_size += batch_size
total_batch_tokens += batch_tokens
iteration += 1
local_lm_loss /= local_batch_tokens
print('| LOCAL valid LM Loss {:.3f} | valid LM PPL {:.3f}'.format(local_lm_loss, math.exp(local_lm_loss)))
# Move model back to the train mode.
model.train()
total_lm_loss /= total_batch_tokens
total_nsp_loss /= total_batch_size
return total_lm_loss, total_nsp_loss
def initialize_distributed(args):
"""Initialize torch.distributed."""
# Manually set the device ids.
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
# Call the init process
if args.world_size > 1:
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
suppress_output(args.rank == 0)
def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
__builtin__.print = print
def set_random_seed(seed):
"""Set random seed for reproducability."""
if seed is not None and seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def main():
"""Main training program."""
print('Pretrain BERT model')
# Disable CuDNN.
torch.backends.cudnn.enabled = False
# Arguments.
args = get_args()
# Pytorch distributed.
initialize_distributed(args)
set_random_seed(args.seed)
print(args)
# Data stuff.
data_config = configure_data()
data_config.set_defaults(data_set_type='BERT', transpose=False)
(train_data, val_data), tokenizer = data_config.apply(args)
args.train_iters = len(train_data)
evaluate.best_val_loss = float("inf")
# Model, optimizer, and learning rate.
model, optimizer, lr_scheduler, criterion = setup_model_and_optimizer(
args, tokenizer)
# evaluate(val_data, model, tokenizer, criterion, args)
# At any point you can hit Ctrl + C to break out of training early.
try:
total_iters = 0
skipped_iters = 0
start_epoch = 1
best_val_loss = float('inf')
# Resume data loader if necessary.
if args.resume_dataloader:
start_epoch = args.epoch
total_iters = args.total_iters
# For all epochs.
for epoch in range(start_epoch, args.epochs + 1):
timers = Timers()
# if args.shuffle:
# train_data.batch_sampler.sampler.set_epoch(epoch + args.seed)
timers('epoch time').start()
iteration, skipped = train_epoch(epoch, model, tokenizer, optimizer,
train_data, val_data, lr_scheduler,
criterion, timers, args)
elapsed_time = timers('epoch time').elapsed()
total_iters += iteration
skipped_iters += skipped
lm_loss, nsp_loss = evaluate(val_data, model, tokenizer, criterion, args)
val_loss = lm_loss + nsp_loss
print('-' * 100)
print('| end of epoch {:3d} | time: {:.3f}s | valid loss {:.3f} | '
'valid LM Loss {:.3f} | valid LM PPL {:.3f} | valid NSP Loss {:.3f}'.format(
epoch, elapsed_time, val_loss, lm_loss, math.exp(lm_loss), nsp_loss))
print('-' * 100)
if val_loss < evaluate.best_val_loss:
evaluate.best_val_loss = val_loss
if args.save:
best_path = 'checkpoints-best.pt'
print('saving best model to:',
os.path.join(args.save, best_path))
save_checkpoint(best_path, epoch + 1, 0, model, optimizer, lr_scheduler, args)
except KeyboardInterrupt:
print('-' * 100)
print('Exiting from training early')
if args.save:
cur_path = 'checkpoints-last.pt'
print('saving current model to:',
os.path.join(args.save, cur_path))
save_checkpoint(cur_path, epoch, args.cur_iteration, model, optimizer, lr_scheduler, args)
exit()
if __name__ == "__main__":
main()
| 38.70793 | 158 | 0.613382 | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain BERT"""
import os
import random
import math
import numpy as np
import torch
from arguments import get_args
from configure_data import configure_data
from fp16 import FP16_Module
from fp16 import FP16_Optimizer
from learning_rates import AnnealingLR
from model import BertModel
from model import get_params_for_weight_decay_optimization
from model import DistributedDataParallel as DDP
from optim import Adam
from utils import Timers, save_checkpoint, load_checkpoint, check_checkpoint, move_to_cuda
import pdb
def get_model(tokenizer, args):
"""Build the model."""
print('building BERT model ...')
model = BertModel(tokenizer, args)
print(' > number of parameters: {}'.format(
sum([p.nelement() for p in model.parameters()])), flush=True)
# GPU allocation.
model.cuda(torch.cuda.current_device())
# Fp16 conversion.
if args.fp16:
print("fp16 mode")
model = FP16_Module(model)
if args.fp32_embedding:
model.module.model.bert.embeddings.word_embeddings.float()
model.module.model.bert.embeddings.position_embeddings.float()
model.module.model.bert.embeddings.token_type_embeddings.float()
if args.fp32_tokentypes:
model.module.model.bert.embeddings.token_type_embeddings.float()
if args.fp32_layernorm:
for name, _module in model.named_modules():
if 'LayerNorm' in name:
_module.float()
# Wrap model for distributed training.
if args.world_size > 1:
model = DDP(model)
return model
def get_optimizer(model, args):
"""Set up the optimizer."""
# Build parameter groups (weight decay and non-decay).
while isinstance(model, (DDP, FP16_Module)):
model = model.module
layers = model.model.bert.encoder.layer
pooler = model.model.bert.pooler
lmheads = model.model.cls.predictions
nspheads = model.model.cls.seq_relationship
embeddings = model.model.bert.embeddings
param_groups = []
param_groups += list(get_params_for_weight_decay_optimization(layers))
param_groups += list(get_params_for_weight_decay_optimization(pooler))
param_groups += list(get_params_for_weight_decay_optimization(nspheads))
param_groups += list(get_params_for_weight_decay_optimization(embeddings))
param_groups += list(get_params_for_weight_decay_optimization(
lmheads.transform))
param_groups[1]['params'].append(lmheads.bias)
# Use Adam.
optimizer = Adam(param_groups,
lr=args.lr, weight_decay=args.weight_decay)
# Wrap into fp16 optimizer.
if args.fp16:
optimizer = FP16_Optimizer(optimizer,
static_loss_scale=args.loss_scale,
dynamic_loss_scale=args.dynamic_loss_scale,
dynamic_loss_args={
'scale_window': args.loss_scale_window,
'min_scale': args.min_scale,
'delayed_shift': args.hysteresis})
return optimizer
def get_learning_rate_scheduler(optimizer, args):
"""Build the learning rate scheduler."""
# Add linear learning rate scheduler.
if args.lr_decay_iters is not None:
num_iters = args.lr_decay_iters
else:
num_iters = args.train_iters * args.epochs
init_step = -1
warmup_iter = args.warmup * num_iters
lr_scheduler = AnnealingLR(optimizer,
start_lr=args.lr,
warmup_iter=warmup_iter,
num_iters=num_iters,
decay_style=args.lr_decay_style,
last_iter=init_step)
return lr_scheduler
def setup_model_and_optimizer(args, tokenizer):
"""Setup model and optimizer."""
model = get_model(tokenizer, args)
optimizer = get_optimizer(model, args)
lr_scheduler = get_learning_rate_scheduler(optimizer, args)
criterion = torch.nn.CrossEntropyLoss(reduction='sum', ignore_index=-1)
args.continue_train = False
check_checkpoint(model, optimizer, lr_scheduler, args)
if args.load is not None and not args.continue_train:
print("| Resume checkpoints from {}".format(args.load))
epoch, i, total_iters = load_checkpoint(model, optimizer,
lr_scheduler, args)
if args.resume_dataloader:
args.epoch = epoch
args.mid_epoch_iters = i
args.total_iters = total_iters
return model, optimizer, lr_scheduler, criterion
def forward_step(data, model, tokenizer, criterion, args):
"""Forward step."""
sample = move_to_cuda(data, torch.cuda.current_device())
output, nsp, past = model(**sample["net_input"])
nsp_labels = sample["nsp_labels"]
target = sample["target"]
nsp_loss = criterion(nsp.view(-1, 3).contiguous().float(),
nsp_labels.view(-1).contiguous())
losses = criterion(output.view(-1, tokenizer.num_tokens).contiguous().float(),
target.contiguous().view(-1).contiguous())
# pdb.set_trace()
return losses, nsp_loss, sample["nsentences"], sample["ntokens"]
def backward_step(optimizer, model, lm_loss, nsp_loss, batch_size, batch_tokens, args):
"""Backward step."""
# Total loss.
loss = lm_loss / batch_tokens + nsp_loss / batch_size
# Backward pass.
optimizer.zero_grad()
if args.fp16:
optimizer.backward(loss, update_master_grads=False)
else:
loss.backward()
# Reduce across processes.
lm_loss_reduced = lm_loss
nsp_loss_reduced = nsp_loss
if args.world_size > 1:
batch_size = torch.Tensor([batch_size]).to(lm_loss.device)
batch_tokens = torch.Tensor([batch_tokens]).to(lm_loss.device)
reduced_losses = torch.cat((lm_loss.view(1), nsp_loss.view(1), batch_size, batch_tokens))
torch.distributed.all_reduce(reduced_losses.data)
# reduced_losses.data = reduced_losses.data / args.world_size
model.allreduce_params(reduce_after=False,
fp32_allreduce=args.fp32_allreduce)
lm_loss_reduced = reduced_losses[0]
nsp_loss_reduced = reduced_losses[1]
batch_size = reduced_losses[2].item()
batch_tokens = reduced_losses[3].item()
# Update master gradients.
if args.fp16:
optimizer.update_master_grads()
# Clipping gradients helps prevent the exploding gradient.
if args.clip_grad > 0:
if not args.fp16:
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad)
else:
optimizer.clip_master_grads(args.clip_grad)
return lm_loss_reduced, nsp_loss_reduced, batch_size, batch_tokens
def train_step(input_data, model, tokenizer, criterion, optimizer, lr_scheduler, args):
"""Single training step."""
# Forward model for one step.
lm_loss, nsp_loss, batch_size, batch_tokens = forward_step(input_data, model, tokenizer, criterion, args)
# Calculate gradients, reduce across processes, and clip.
lm_loss_reduced, nsp_loss_reduced, batch_size, batch_tokens = backward_step(optimizer, model, lm_loss,
nsp_loss, batch_size, batch_tokens,
args)
# Update parameters.
optimizer.step()
# Update learning rate.
skipped_iter = 0
if not (args.fp16 and optimizer.overflow):
lr_scheduler.step()
else:
skipped_iter = 1
return lm_loss_reduced, nsp_loss_reduced, skipped_iter, batch_size, batch_tokens
def train_epoch(epoch, model, tokenizer, optimizer, train_data, val_data,
lr_scheduler, criterion, timers, args):
"""Train one full epoch."""
# Turn on training mode which enables dropout.
model.train()
# Tracking loss.
total_lm_loss = 0.0
total_nsp_loss = 0.0
# Iterations.
max_iters = len(train_data)
iteration = 0
update_num = 0
total_tokens = 0
total_batch = 0
skipped_iters = 0
data_iterator = iter(train_data)
def comsume_data(times):
for i in range(times):
next(data_iterator)
if args.resume_dataloader:
iteration = args.mid_epoch_iters
comsume_data(iteration)
args.resume_dataloader = False
lr_scheduler.step(max_iters * (epoch-1) + iteration)
# Data iterator.
timers('interval time').start()
while iteration < max_iters:
lm_loss, nsp_loss, skipped_iter, batch_size, batch_tokens = train_step(next(data_iterator), model, tokenizer, criterion,optimizer, lr_scheduler, args)
update_num += 1
skipped_iters += skipped_iter
iteration += 1
args.cur_iteration = iteration
# Update losses.
total_lm_loss += lm_loss.data.detach().float().item()
total_nsp_loss += nsp_loss.data.detach().float().item()
if nsp_loss != 0.0:
total_batch += batch_size
total_tokens += batch_tokens
if total_batch < 1:
total_batch = 1
# Logging.
if iteration % args.log_interval == 0:
learning_rate = optimizer.param_groups[0]['lr']
avg_nsp_loss = total_nsp_loss / total_batch
avg_lm_loss = total_lm_loss / total_tokens
elapsed_time = timers('interval time').elapsed()
log_string = ' epoch{:2d} |'.format(epoch)
log_string += ' iteration {:8d}/{:8d} |'.format(iteration,
max_iters)
log_string += ' lm loss {:.3f} |'.format(avg_lm_loss)
log_string += ' lm ppl {:.3f} |'.format(math.exp(avg_lm_loss))
log_string += ' nsp loss {:.3f} |'.format(avg_nsp_loss)
log_string += ' batch size {} |'.format(batch_size)
log_string += ' learning rate {:.7f} |'.format(learning_rate)
log_string += ' tpi (ms): {:.2f} |'.format(
elapsed_time * 1000.0 / args.log_interval)
if args.fp16:
log_string += ' loss scale {:.3f} |'.format(
optimizer.loss_scale)
print(log_string, flush=True)
if iteration % args.valid_interval == 0:
lm_loss, nsp_loss = evaluate(val_data, model, tokenizer, criterion, args)
val_loss = lm_loss + nsp_loss
print('-' * 100)
print('| end of epoch {:3d} | valid loss {:.3f} | '
'valid LM Loss {:.3f} | valid LM PPL {:.3f} | valid NSP Loss {:.3f}'.format(
epoch, val_loss, lm_loss, math.exp(lm_loss), nsp_loss))
print('-' * 100)
if args.save:
checkpoints_path = "checkpoints_{}_{}.pt".format(epoch, iteration)
save_checkpoint(checkpoints_path, epoch, iteration, model,
optimizer, lr_scheduler, args)
checkpoints_path = "checkpoints-last.pt"
save_checkpoint(checkpoints_path, epoch, iteration, model,
optimizer, lr_scheduler, args)
if val_loss < evaluate.best_val_loss:
evaluate.best_val_loss = val_loss
if args.save:
best_path = 'checkpoints-best.pt'
print('saving best model to:',
os.path.join(args.save, best_path))
save_checkpoint(best_path, epoch, iteration, model,
optimizer, lr_scheduler, args)
if args.save:
final_path = 'checkpoints_{}.pt'.format(epoch)
print('saving final epoch model to:', os.path.join(args.save, final_path))
save_checkpoint(final_path, epoch + 1, 0, model, optimizer, lr_scheduler, args)
cur_path = 'checkpoints-last.pt'
save_checkpoint(cur_path, epoch + 1, 0, model, optimizer, lr_scheduler, args)
lm_loss, nsp_loss = evaluate(val_data, model, tokenizer, criterion, args)
val_loss = lm_loss + nsp_loss
if val_loss < evaluate.best_val_loss:
evaluate.best_val_loss = val_loss
if args.save:
best_path = 'checkpoints-best.pt'
print('saving best model to:',
os.path.join(args.save, best_path))
save_checkpoint(best_path, epoch+1, 0, model,
optimizer, lr_scheduler, args)
return iteration, skipped_iters
def evaluate(data_source, model, tokenizer, criterion, args):
"""Evaluation."""
# Turn on evaluation mode which disables dropout.
model.eval()
total_lm_loss = 0
total_nsp_loss = 0
total_batch_size = 0
total_batch_tokens = 0
for data_loader in data_source:
local_lm_loss = 0
local_batch_tokens = 0
max_iters = len(data_loader)
with torch.no_grad():
data_iterator = iter(data_loader)
iteration = 0
while iteration < max_iters:
# Forward evaluation.
lm_loss, nsp_loss, batch_size, batch_tokens = forward_step(next(data_iterator), model, tokenizer,criterion, args)
# Reduce across processes.
if isinstance(model, DDP):
batch_size = torch.Tensor([batch_size]).to(lm_loss.device)
batch_tokens = torch.Tensor([batch_tokens]).to(lm_loss.device)
reduced_losses = torch.cat((lm_loss.view(1), nsp_loss.view(1), batch_size, batch_tokens))
torch.distributed.all_reduce(reduced_losses.data)
# reduced_losses.data = reduced_losses.data / args.world_size
lm_loss = reduced_losses[0]
nsp_loss = reduced_losses[1]
batch_size = reduced_losses[2].item()
batch_tokens = reduced_losses[3].item()
if lm_loss == 0.0:
batch_size = 0
total_lm_loss += lm_loss.data.detach().float().item()
total_nsp_loss += nsp_loss.data.detach().float().item()
local_lm_loss += lm_loss.data.detach().float().item()
local_batch_tokens += batch_tokens
total_batch_size += batch_size
total_batch_tokens += batch_tokens
iteration += 1
local_lm_loss /= local_batch_tokens
print('| LOCAL valid LM Loss {:.3f} | valid LM PPL {:.3f}'.format(local_lm_loss, math.exp(local_lm_loss)))
# Move model back to the train mode.
model.train()
total_lm_loss /= total_batch_tokens
total_nsp_loss /= total_batch_size
return total_lm_loss, total_nsp_loss
def initialize_distributed(args):
"""Initialize torch.distributed."""
# Manually set the device ids.
device = args.rank % torch.cuda.device_count()
if args.local_rank is not None:
device = args.local_rank
torch.cuda.set_device(device)
# Call the init process
if args.world_size > 1:
init_method = 'tcp://'
master_ip = os.getenv('MASTER_ADDR', 'localhost')
master_port = os.getenv('MASTER_PORT', '6000')
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(
backend=args.distributed_backend,
world_size=args.world_size, rank=args.rank,
init_method=init_method)
suppress_output(args.rank == 0)
def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def set_random_seed(seed):
"""Set random seed for reproducability."""
if seed is not None and seed > 0:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def main():
"""Main training program."""
print('Pretrain BERT model')
# Disable CuDNN.
torch.backends.cudnn.enabled = False
# Arguments.
args = get_args()
# Pytorch distributed.
initialize_distributed(args)
set_random_seed(args.seed)
print(args)
# Data stuff.
data_config = configure_data()
data_config.set_defaults(data_set_type='BERT', transpose=False)
(train_data, val_data), tokenizer = data_config.apply(args)
args.train_iters = len(train_data)
evaluate.best_val_loss = float("inf")
# Model, optimizer, and learning rate.
model, optimizer, lr_scheduler, criterion = setup_model_and_optimizer(
args, tokenizer)
# evaluate(val_data, model, tokenizer, criterion, args)
# At any point you can hit Ctrl + C to break out of training early.
try:
total_iters = 0
skipped_iters = 0
start_epoch = 1
best_val_loss = float('inf')
# Resume data loader if necessary.
if args.resume_dataloader:
start_epoch = args.epoch
total_iters = args.total_iters
# For all epochs.
for epoch in range(start_epoch, args.epochs + 1):
timers = Timers()
# if args.shuffle:
# train_data.batch_sampler.sampler.set_epoch(epoch + args.seed)
timers('epoch time').start()
iteration, skipped = train_epoch(epoch, model, tokenizer, optimizer,
train_data, val_data, lr_scheduler,
criterion, timers, args)
elapsed_time = timers('epoch time').elapsed()
total_iters += iteration
skipped_iters += skipped
lm_loss, nsp_loss = evaluate(val_data, model, tokenizer, criterion, args)
val_loss = lm_loss + nsp_loss
print('-' * 100)
print('| end of epoch {:3d} | time: {:.3f}s | valid loss {:.3f} | '
'valid LM Loss {:.3f} | valid LM PPL {:.3f} | valid NSP Loss {:.3f}'.format(
epoch, elapsed_time, val_loss, lm_loss, math.exp(lm_loss), nsp_loss))
print('-' * 100)
if val_loss < evaluate.best_val_loss:
evaluate.best_val_loss = val_loss
if args.save:
best_path = 'checkpoints-best.pt'
print('saving best model to:',
os.path.join(args.save, best_path))
save_checkpoint(best_path, epoch + 1, 0, model, optimizer, lr_scheduler, args)
except KeyboardInterrupt:
print('-' * 100)
print('Exiting from training early')
if args.save:
cur_path = 'checkpoints-last.pt'
print('saving current model to:',
os.path.join(args.save, cur_path))
save_checkpoint(cur_path, epoch, args.cur_iteration, model, optimizer, lr_scheduler, args)
exit()
if __name__ == "__main__":
main()
| 189 | 0 | 54 |
3ed529d99d20a681fda72fb1b468f10bdf3deffc | 1,907 | py | Python | molsys/fileIO/freq.py | MOFplus/molsys_rel | ff8b181fefc0ba03c5dd14fe2dde613298155203 | [
"MIT"
] | null | null | null | molsys/fileIO/freq.py | MOFplus/molsys_rel | ff8b181fefc0ba03c5dd14fe2dde613298155203 | [
"MIT"
] | null | null | null | molsys/fileIO/freq.py | MOFplus/molsys_rel | ff8b181fefc0ba03c5dd14fe2dde613298155203 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: julian
Read (and write?) molden .freq files
"""
import numpy
import string
from molsys.util.constants import angstrom, kcalmol
| 30.269841 | 92 | 0.521762 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: julian
Read (and write?) molden .freq files
"""
import numpy
import string
from molsys.util.constants import angstrom, kcalmol
def read(mol, f):
# missing docstring!
try:
f.readline ### do nothing
except AttributeError:
raise IOError("%s is not readable" % f)
xyz,elems,eigval,eigvec = [],[],[],[]
stage,nm_idx = 'Null', -1
for i,c in enumerate(f):
if c == '': continue
if c.count('Atoms') != 0:
stage = 'Atoms';continue
elif c.count('FREQ') != 0:
stage ='Freqs';continue
elif c.count('FR-COORD') != 0:
stage = 'Setup_nmarray';continue
elif c.count('vibration') != 0:
stage = [i for i in c.split() if i != ''][-1];continue
### stage contains then the normal mode index
else:
pass
###
if stage == 'Null': continue
###atoms
if stage == 'Atoms':
sline = [i for i in c.split() if i != '']
elems.append(sline[0])
xyz.append([float(i) for i in sline[3:]])
elif stage == 'Freqs':
eigval.append(float(c.replace(' ','')))
elif stage == 'Setup_nmarray':
eigvec = numpy.zeros((len(elems),len(eigval),3),dtype='float')
else: # in any case this is now an eigenvector!
if nm_idx != int(stage) -1:
nm_idx = int(stage)-1
nmcount = 0
nm_idx = int(stage)-1
eigvec[nmcount,nm_idx,:] = numpy.array([float(i) for i in c.split() if c != ''])
nmcount += 1
mol.natoms = len(elems)
mol.xyz = numpy.array(xyz)/angstrom
mol.elems = elems
mol.atypes = elems
mol.frequencies = eigval
mol.normalmodes = eigvec
mol.set_empty_conn()
mol.set_nofrags()
return
| 1,694 | 0 | 23 |
7b9ece5aa05ac3e689b66154aa1aba97c512c468 | 1,049 | py | Python | week9-10/week10/hw41/gas_prices.py | fac33/py_intro_exercise | e2c3f3044537b5cc8980bf7aa7651fd16c5fd34b | [
"MIT"
] | null | null | null | week9-10/week10/hw41/gas_prices.py | fac33/py_intro_exercise | e2c3f3044537b5cc8980bf7aa7651fd16c5fd34b | [
"MIT"
] | null | null | null | week9-10/week10/hw41/gas_prices.py | fac33/py_intro_exercise | e2c3f3044537b5cc8980bf7aa7651fd16c5fd34b | [
"MIT"
] | null | null | null | ################################################################################
# Author: Fanyang Cheng
# Date: 04/07/2021
# Description: This program read the weekly gas average price txt file as input
# and draw a graph for to show the data.
################################################################################
import matplotlib.pyplot as plt
#read file
if __name__ == '__main__':
main()
plt.show()
| 34.966667 | 88 | 0.550048 | ################################################################################
# Author: Fanyang Cheng
# Date: 04/07/2021
# Description: This program read the weekly gas average price txt file as input
# and draw a graph for to show the data.
################################################################################
import matplotlib.pyplot as plt
#read file
def get_data():
data = []
with open('2008_Weekly_Gas_Averages.txt','r') as dat:
for line in dat:
data.append(float(line.rstrip())) #use the float conversion to get the data.
return data
def main():
dat = get_data()
fig,ax = plt.subplots()
week = range(1,len(dat)+1) #week's number should start from 1
ax.plot(week,dat)
ax.grid() #put the grid on
ax.set_title('2008 Weekly Gas Prices') #set labels
ax.set_xlabel('Weeks (by number)')
ax.set_ylabel('Average Price (dollars/gallon)')
ax.set_xlim(1,len(dat)) #set the limits.
ax.set_ylim(1.5,4.25)
if __name__ == '__main__':
main()
plt.show()
| 582 | 0 | 44 |
941c88f15910a8e3d82889e5756ebb4771e44f69 | 18,566 | py | Python | brainspell/json_api.py | akeshavan/brainspell-neo | 05259770a9928d01adaaa028e9d2115750c4fe03 | [
"MIT"
] | null | null | null | brainspell/json_api.py | akeshavan/brainspell-neo | 05259770a9928d01adaaa028e9d2115750c4fe03 | [
"MIT"
] | null | null | null | brainspell/json_api.py | akeshavan/brainspell-neo | 05259770a9928d01adaaa028e9d2115750c4fe03 | [
"MIT"
] | null | null | null | # JSON API classes
import brainspell
from article_helpers import *
from base_handler import *
from search_helpers import *
from user_account_helpers import *
# For GitHub OAuth
import requests
import urllib.parse
import os
import hashlib
REQ_DESC = "The fields to search through. 'x' is experiments, 'p' is PMID, 'r' is reference, and 't' is title + authors + abstract."
START_DESC = "The offset of the articles to show; e.g., start = 10 would return results 11 - 20."
assert "github_frontend_client_id" in os.environ \
and "github_frontend_client_secret" in os.environ, \
"You need to set the 'github_frontend_client_id' and 'github_frontend_client_secret' environment variables."
assert "github_frontend_dev_client_id" in os.environ \
and "github_frontend_dev_client_secret" in os.environ, \
"You need to set the 'github_frontend_dev_client_id' and 'github_frontend_dev_client_secret' environment variables."
class ListEndpointsEndpointHandler(BaseHandler):
""" Return a list of all JSON API endpoints.
Do not include /help pages, or aliases. """
parameters = {}
endpoint_type = Endpoint.PULL_API
# BEGIN: Authentication endpoints
class GithubOauthProductionEndpointHandler(BaseHandler):
""" GitHub login authentication. Return the GitHub token and
Brainspell API key. """
parameters = {
"code": {
"type": str,
"description": "The code returned after GitHub OAuth."
}
}
endpoint_type = Endpoint.PULL_API
client_id_key = "github_frontend_client_id"
client_secret_key = "github_frontend_client_secret"
class GithubOauthDevelopmentEndpointHandler(
GithubOauthProductionEndpointHandler):
""" Endpoint for development OAuth. """
client_id_key = "github_frontend_dev_client_id"
client_secret_key = "github_frontend_dev_client_secret"
# BEGIN: search API endpoints
class QueryEndpointHandler(BaseHandler):
""" Endpoint to handle search queries. Return 10 results at a time. """
parameters = {
"q": {
"type": str,
"default": "",
"description": "The query to search for."
},
"start": {
"type": int,
"default": 0,
"description": START_DESC
},
"req": {
"type": str,
"default": "t",
"description": REQ_DESC
}
}
endpoint_type = Endpoint.PULL_API
class CoordinatesEndpointHandler(BaseHandler):
"""
API endpoint to fetch coordinates from all articles that match a query.
Return 200 sets of coordinates at a time.
"""
parameters = {
"q": {
"type": str,
"default": "",
"description": "The search query to return the coordinates for."
},
"start": {
"type": int,
"default": 0,
"description": START_DESC
},
"req": {
"type": str,
"default": "t",
"description": REQ_DESC
}
}
endpoint_type = Endpoint.PULL_API
class RandomQueryEndpointHandler(BaseHandler):
""" Return five random articles (for use on Brainspell's front page) """
parameters = {}
endpoint_type = Endpoint.PULL_API
class AddArticleFromPmidEndpointHandler(BaseHandler):
""" Add an article to our database via PMID (for use on the search page) """
parameters = {
"new_pmid": {
"type": str,
"description": PMID_DESC
}
}
endpoint_type = Endpoint.PUSH_API
# BEGIN: article API endpoints
class ArticleEndpointHandler(BaseHandler):
"""
Return the contents of an article, given a PMID.
Called by the view-article page.
"""
parameters = {
"pmid": {
"type": str
}
}
endpoint_type = Endpoint.PULL_API
class BulkAddEndpointHandler(BaseHandler):
"""
Add a large number of articles to our database at once,
by parsing a file that is sent to us in a JSON format.
"""
parameters = {}
endpoint_type = Endpoint.PUSH_API
class SetArticleAuthorsEndpointHandler(BaseHandler):
""" Edit the authors of an article. """
parameters = {
"pmid": {
"type": str
},
"authors": {
"type": str,
"description": "The string to set as the 'authors' for this article."
}
}
endpoint_type = Endpoint.PUSH_API
class ToggleStereotaxicSpaceVoteEndpointHandler(BaseHandler):
""" Toggle a user's vote for the stereotaxic space of an article. """
parameters = {
"pmid": {
"type": str
},
"space": {
"type": str,
"description": "Must be 'mni' or 'talairach' without quotes."
}
}
endpoint_type = Endpoint.PUSH_API
class NumberOfSubjectsVoteEndpointHandler(BaseHandler):
""" Place a vote for the number of subjects for an article. """
parameters = {
"pmid": {
"type": str
},
"subjects": {
"type": int,
"description": "The number of subjects that should be set for this article."
}
}
endpoint_type = Endpoint.PUSH_API
class AddExperimentsTableViaTextEndpointHandler(BaseHandler):
"""
Add a table of experiment coordinates via text.
Used on the view-article page.
"""
parameters = {
"values": {
"type": str,
"description": "Takes a CSV formatted string of coordinates; i.e., x, y, z separated by commas, and each coordinate separated by a newline."
},
"pmid": {
"type": str
}
}
endpoint_type = Endpoint.PUSH_API
class ToggleUserVoteEndpointHandler(BaseHandler):
""" Endpoint for a user to vote on an article tag. """
parameters = {
"topic": {
"type": str,
"description": "The name of the tag to place a vote for."
},
"pmid": {
"type": str
},
"direction": {
"type": str,
"description": "The direction that the user clicked in. Will toggle; i.e., if the user votes up on an article they've already upvoted, then it will clear the vote."
}
}
endpoint_type = Endpoint.PUSH_API
# BEGIN: table API endpoints
class ToggleUserTagOnArticleEndpointHandler(BaseHandler):
""" Toggle a user tag on an article in our database. """
parameters = {
"pmid": {
"type": str
},
"tag_name": {
"type": str,
"description": "The name of the tag to add."
}
}
endpoint_type = Endpoint.PUSH_API
class UpdateTableVoteEndpointHandler(BaseHandler):
""" Update the vote on a tag for an experiment table. """
parameters = {
"tag_name": {
"type": str
},
"direction": {
"type": str
},
"experiment": {
"type": int
},
"pmid": {
"type": str
},
"column": {
"type": str,
"description": "The column to place the vote under. Options are 'T' for tasks, 'B' for behavioral, and 'C' for cognitive."
}
}
endpoint_type = Endpoint.PUSH_API
class FlagTableEndpointHandler(BaseHandler):
""" Flag a table as inaccurate. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
class EditTableTitleCaptionEndpointHandler(BaseHandler):
""" Edit the title and caption for an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"title": {
"type": str
},
"caption": {
"type": str,
"default": ""
}
}
endpoint_type = Endpoint.PUSH_API
class DeleteRowEndpointHandler(BaseHandler):
""" Delete a row of coordinates from an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"row_number": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
class SplitTableEndpointHandler(BaseHandler):
"""
Split a table of coordinates for an experiment into two
separate tables.
"""
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"row_number": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
class UpdateRowEndpointHandler(BaseHandler):
""" Update a row of coordinates in an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"coordinates": {
"type": json.loads,
"description": "Takes a JSON array of three or four coordinates. (The fourth is z-effective.)"
},
"row_number": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
class AddRowEndpointHandler(BaseHandler):
""" Add a single row of coordinates to an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"coordinates": {
"type": json.loads,
"description": "Takes a JSON array of three or four coordinates. (The fourth is z-effective.)"
},
"row_number": {
"type": int,
"default": -1,
"description": "The index that this row should be located at in the table. Defaults to the end of the table."
}
}
endpoint_type = Endpoint.PUSH_API
| 28.345038 | 176 | 0.556932 | # JSON API classes
import brainspell
from article_helpers import *
from base_handler import *
from search_helpers import *
from user_account_helpers import *
# For GitHub OAuth
import requests
import urllib.parse
import os
import hashlib
REQ_DESC = "The fields to search through. 'x' is experiments, 'p' is PMID, 'r' is reference, and 't' is title + authors + abstract."
START_DESC = "The offset of the articles to show; e.g., start = 10 would return results 11 - 20."
assert "github_frontend_client_id" in os.environ \
and "github_frontend_client_secret" in os.environ, \
"You need to set the 'github_frontend_client_id' and 'github_frontend_client_secret' environment variables."
assert "github_frontend_dev_client_id" in os.environ \
and "github_frontend_dev_client_secret" in os.environ, \
"You need to set the 'github_frontend_dev_client_id' and 'github_frontend_dev_client_secret' environment variables."
class ListEndpointsEndpointHandler(BaseHandler):
""" Return a list of all JSON API endpoints.
Do not include /help pages, or aliases. """
parameters = {}
endpoint_type = Endpoint.PULL_API
def process(self, response, args):
endpoints = brainspell.getJSONEndpoints()
response["endpoints"] = [name for name, cls in endpoints if name[len(
name) - 1:] != "/" and name[len(name) - 4:] != "help"]
return response
# BEGIN: Authentication endpoints
class GithubOauthProductionEndpointHandler(BaseHandler):
""" GitHub login authentication. Return the GitHub token and
Brainspell API key. """
parameters = {
"code": {
"type": str,
"description": "The code returned after GitHub OAuth."
}
}
endpoint_type = Endpoint.PULL_API
client_id_key = "github_frontend_client_id"
client_secret_key = "github_frontend_client_secret"
def process(self, response, args):
code = args["code"]
data = {
"client_id": os.environ[self.client_id_key],
"client_secret": os.environ[self.client_secret_key],
"code": code
}
# TODO: Make asynchronous, since this is blocking.
result = requests.post(
"https://github.com:443/login/oauth/access_token",
data
)
params = urllib.parse.parse_qs(result.text)
try:
response["github_token"] = params["access_token"][0]
user_data = requests.get(
"https://api.github.com/user",
headers={
"Authorization": "token " +
params["access_token"][0]})
user = user_data.json()
# idempotent operation to make sure GitHub user is in our
# database
register_github_user(user)
hasher = hashlib.sha1()
hasher.update(str(user["id"]).encode('utf-8'))
api_key = hasher.hexdigest()
response["api_key"] = api_key
except BaseException:
response["success"] = 0
response["description"] = "Authentication failed."
return response
class GithubOauthDevelopmentEndpointHandler(
GithubOauthProductionEndpointHandler):
""" Endpoint for development OAuth. """
client_id_key = "github_frontend_dev_client_id"
client_secret_key = "github_frontend_dev_client_secret"
# BEGIN: search API endpoints
class QueryEndpointHandler(BaseHandler):
""" Endpoint to handle search queries. Return 10 results at a time. """
parameters = {
"q": {
"type": str,
"default": "",
"description": "The query to search for."
},
"start": {
"type": int,
"default": 0,
"description": START_DESC
},
"req": {
"type": str,
"default": "t",
"description": REQ_DESC
}
}
endpoint_type = Endpoint.PULL_API
def process(self, response, args):
database_dict = {}
results = formatted_search(args["q"], args["start"], args["req"])
output_list = []
for article in results:
try:
article_dict = {}
article_dict["id"] = article.pmid
article_dict["title"] = article.title
article_dict["authors"] = article.authors
output_list.append(article_dict)
except BaseException:
pass
response["articles"] = output_list
if len(results) == 0:
response["start_index"] = -1
# returns -1 if there are no results;
# UI can always calculate (start, end) with (start_index + 1, start_index + 1 + len(articles))
# TODO: consider returning the start/end indices for the range of
# articles returned instead
else:
response["start_index"] = args["start"]
return response
class CoordinatesEndpointHandler(BaseHandler):
"""
API endpoint to fetch coordinates from all articles that match a query.
Return 200 sets of coordinates at a time.
"""
parameters = {
"q": {
"type": str,
"default": "",
"description": "The search query to return the coordinates for."
},
"start": {
"type": int,
"default": 0,
"description": START_DESC
},
"req": {
"type": str,
"default": "t",
"description": REQ_DESC
}
}
endpoint_type = Endpoint.PULL_API
def process(self, response, args):
database_dict = {}
results = formatted_search(args["q"], args["start"], args["req"], True)
output_list = []
for article in results:
try:
article_dict = {}
experiments = json.loads(article.experiments)
for c in experiments: # get the coordinates from the experiments
output_list.extend(c["locations"])
except BaseException:
pass
response["coordinates"] = output_list
return response
class RandomQueryEndpointHandler(BaseHandler):
""" Return five random articles (for use on Brainspell's front page) """
parameters = {}
endpoint_type = Endpoint.PULL_API
def process(self, response, args):
database_dict = {}
results = random_search()
output_list = []
for article in results:
try:
article_dict = {}
article_dict["id"] = article.pmid
article_dict["title"] = article.title
article_dict["authors"] = article.authors
output_list.append(article_dict)
except BaseException:
pass
response["articles"] = output_list
return response
class AddArticleFromPmidEndpointHandler(BaseHandler):
""" Add an article to our database via PMID (for use on the search page) """
parameters = {
"new_pmid": {
"type": str,
"description": PMID_DESC
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
add_pmid_article_to_database(args["new_pmid"])
return response
# BEGIN: article API endpoints
class ArticleEndpointHandler(BaseHandler):
"""
Return the contents of an article, given a PMID.
Called by the view-article page.
"""
parameters = {
"pmid": {
"type": str
}
}
endpoint_type = Endpoint.PULL_API
def process(self, response, args):
try:
article = next(get_article_object(args["pmid"]))
response["timestamp"] = article.timestamp
response["abstract"] = article.abstract
response["authors"] = article.authors
response["doi"] = article.doi
response["experiments"] = article.experiments
response["metadata"] = article.metadata
response["neurosynthid"] = article.neurosynthid
response["pmid"] = article.pmid
response["reference"] = article.reference
response["title"] = article.title
response["id"] = article.uniqueid
except BaseException:
response["success"] = 0
return response
class BulkAddEndpointHandler(BaseHandler):
"""
Add a large number of articles to our database at once,
by parsing a file that is sent to us in a JSON format.
"""
parameters = {}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
# TODO: add better file parsing function
try:
file_body = self.request.files['articlesFile'][0]['body'].decode(
'utf-8')
contents = json.loads(file_body)
if isinstance(contents, list):
clean_articles = clean_bulk_add(contents)
add_bulk(clean_articles)
response["success"] = 1
else:
# data is malformed
response["success"] = 0
except BaseException:
response["success"] = 0
response["description"] = "You must POST a file with the parameter name 'articlesFile' to this endpoint."
return response
class SetArticleAuthorsEndpointHandler(BaseHandler):
""" Edit the authors of an article. """
parameters = {
"pmid": {
"type": str
},
"authors": {
"type": str,
"description": "The string to set as the 'authors' for this article."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
update_authors(args["pmid"], args["authors"])
return response
class ToggleStereotaxicSpaceVoteEndpointHandler(BaseHandler):
""" Toggle a user's vote for the stereotaxic space of an article. """
parameters = {
"pmid": {
"type": str
},
"space": {
"type": str,
"description": "Must be 'mni' or 'talairach' without quotes."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
space = args["space"].lower()
if space == "mni" or space == "talairach":
vote_stereotaxic_space(
args["pmid"],
args["space"],
get_github_username_from_api_key(
args["key"]))
else:
response["success"] = 0
response["description"] = "Invalid value for 'space' parameter."
return response
class NumberOfSubjectsVoteEndpointHandler(BaseHandler):
""" Place a vote for the number of subjects for an article. """
parameters = {
"pmid": {
"type": str
},
"subjects": {
"type": int,
"description": "The number of subjects that should be set for this article."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
vote_number_of_subjects(
args["pmid"],
args["subjects"],
get_github_username_from_api_key(
args["key"]))
return response
class AddExperimentsTableViaTextEndpointHandler(BaseHandler):
"""
Add a table of experiment coordinates via text.
Used on the view-article page.
"""
parameters = {
"values": {
"type": str,
"description": "Takes a CSV formatted string of coordinates; i.e., x, y, z separated by commas, and each coordinate separated by a newline."
},
"pmid": {
"type": str
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
add_table_through_text_box(args["pmid"], args["values"])
return response
class ToggleUserVoteEndpointHandler(BaseHandler):
""" Endpoint for a user to vote on an article tag. """
parameters = {
"topic": {
"type": str,
"description": "The name of the tag to place a vote for."
},
"pmid": {
"type": str
},
"direction": {
"type": str,
"description": "The direction that the user clicked in. Will toggle; i.e., if the user votes up on an article they've already upvoted, then it will clear the vote."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
username = get_github_username_from_api_key(args["key"])
toggle_vote(args["pmid"], args["topic"], username, args["direction"])
return response
# BEGIN: table API endpoints
class ToggleUserTagOnArticleEndpointHandler(BaseHandler):
""" Toggle a user tag on an article in our database. """
parameters = {
"pmid": {
"type": str
},
"tag_name": {
"type": str,
"description": "The name of the tag to add."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
pmid = args["pmid"]
user_tag = args["tag_name"]
username = get_github_username_from_api_key(args["key"])
toggle_user_tag(user_tag, pmid, username)
return response
class UpdateTableVoteEndpointHandler(BaseHandler):
""" Update the vote on a tag for an experiment table. """
parameters = {
"tag_name": {
"type": str
},
"direction": {
"type": str
},
"experiment": {
"type": int
},
"pmid": {
"type": str
},
"column": {
"type": str,
"description": "The column to place the vote under. Options are 'T' for tasks, 'B' for behavioral, and 'C' for cognitive."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
username = get_github_username_from_api_key(args["key"])
c = args["column"]
if c != "T" and c != "B" and c != "C":
response["success"] = 0
response["description"] = "That is not a valid option for the column parameter."
else:
update_table_vote(
args["tag_name"],
args["direction"],
args["table_num"],
args["pmid"],
c,
username)
return response
class FlagTableEndpointHandler(BaseHandler):
""" Flag a table as inaccurate. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
flag_table(args["pmid"], args["experiment"])
return response
class EditTableTitleCaptionEndpointHandler(BaseHandler):
""" Edit the title and caption for an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"title": {
"type": str
},
"caption": {
"type": str,
"default": ""
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
edit_table_title_caption(
args["pmid"],
args["experiment"],
args["title"],
args["caption"])
return response
class DeleteRowEndpointHandler(BaseHandler):
""" Delete a row of coordinates from an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"row_number": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
delete_row(args["pmid"], args["experiment"], args["row"])
return response
class SplitTableEndpointHandler(BaseHandler):
"""
Split a table of coordinates for an experiment into two
separate tables.
"""
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"row_number": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
split_table(args["pmid"], args["experiment"], args["row"])
return response
class UpdateRowEndpointHandler(BaseHandler):
""" Update a row of coordinates in an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"coordinates": {
"type": json.loads,
"description": "Takes a JSON array of three or four coordinates. (The fourth is z-effective.)"
},
"row_number": {
"type": int
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
coords = args["coordinates"]
if len(coords) == 3 or len(coords) == 4:
update_coordinate_row(
args["pmid"],
args["experiment"],
coords,
args["row_number"])
else:
response["success"] = 0
response["description"] = "Wrong number of coordinates."
return response
class AddRowEndpointHandler(BaseHandler):
""" Add a single row of coordinates to an experiment table. """
parameters = {
"pmid": {
"type": str
},
"experiment": {
"type": int
},
"coordinates": {
"type": json.loads,
"description": "Takes a JSON array of three or four coordinates. (The fourth is z-effective.)"
},
"row_number": {
"type": int,
"default": -1,
"description": "The index that this row should be located at in the table. Defaults to the end of the table."
}
}
endpoint_type = Endpoint.PUSH_API
def process(self, response, args):
coords = args["coordinates"]
if len(coords) == 3 or len(coords) == 4:
add_coordinate_row(
args["pmid"],
args["experiment"],
coords,
args["row_number"])
else:
response["success"] = 0
response["description"] = "Wrong number of coordinates."
return response
| 8,076 | 0 | 567 |
75a1b42150f0c5930166b72169d17038f84dd12f | 8,419 | py | Python | train/tasks/semantic/modules/user.py | Crowbar97/SalsaNext | 789968bf702367b7f004ccc058d3cdce53ee385c | [
"MIT"
] | null | null | null | train/tasks/semantic/modules/user.py | Crowbar97/SalsaNext | 789968bf702367b7f004ccc058d3cdce53ee385c | [
"MIT"
] | null | null | null | train/tasks/semantic/modules/user.py | Crowbar97/SalsaNext | 789968bf702367b7f004ccc058d3cdce53ee385c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import imp
import os
import time
import numpy as np
from matplotlib import pyplot as plt
import torch
import torch.backends.cudnn as cudnn
from torch import nn
import __init__ as booger
from tasks.semantic.modules.segmentator import *
from tasks.semantic.postproc.KNN import KNN
| 39.712264 | 156 | 0.503742 | #!/usr/bin/env python3
# This file is covered by the LICENSE file in the root of this project.
import imp
import os
import time
import numpy as np
from matplotlib import pyplot as plt
import torch
import torch.backends.cudnn as cudnn
from torch import nn
import __init__ as booger
from tasks.semantic.modules.segmentator import *
from tasks.semantic.postproc.KNN import KNN
def get_sync_time():
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.perf_counter()
class User():
def __init__(self, ARCH, DATA, datadir, logdir, modeldir, modelname, split):
# parameters
self.ARCH = ARCH
self.DATA = DATA
self.datadir = datadir
self.logdir = logdir
self.modeldir = modeldir
self.modelname = modelname
self.split = split
# get the data
parserModule = imp.load_source('parserModule',
booger.TRAIN_PATH + '/tasks/semantic/dataset/' + self.DATA['name'] + '/parser.py')
self.parser = parserModule.Parser(root=self.datadir,
train_sequences=self.DATA['split']['train'],
valid_sequences=self.DATA['split']['valid'],
test_sequences=self.DATA['split']['test'],
labels=self.DATA['labels'],
color_map=self.DATA['color_map'],
learning_map=self.DATA['learning_map'],
learning_map_inv=self.DATA['learning_map_inv'],
sensor=self.ARCH['dataset']['sensor'],
max_points=self.ARCH['dataset']['max_points'],
batch_size=1,
# workers=1,
# important for time measurement
workers=0,
gt=True,
shuffle_train=False)
# concatenate the encoder and the head
if self.modelname in ('salsanet', 'salsanext'):
with torch.no_grad():
print('modeldir: %s' % self.modeldir)
model_path = os.path.join(self.modeldir, 'SalsaNet')
print('model_path: %s' % model_path)
self.model = SalsaNet(self.ARCH,
self.parser.get_n_classes(),
model_path)
self.model = nn.DataParallel(self.model)
torch.nn.Module.dump_patches = True
w_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
print(w_dict['state_dict'].keys())
self.model.module.load_state_dict(w_dict['state_dict'], strict=True)
else:
with torch.no_grad():
self.model = Segmentator(self.ARCH,
self.parser.get_n_classes(),
self.modeldir)
# use knn post processing?
self.post = None
if self.ARCH['post']['KNN']['use']:
self.post = KNN(self.ARCH['post']['KNN']['params'], self.parser.get_n_classes())
# GPU?
self.gpu = False
self.model_single = self.model
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Infering in device: ', self.device)
if torch.cuda.is_available() and torch.cuda.device_count() > 0:
cudnn.benchmark = True
cudnn.fastest = True
self.gpu = True
self.model.cuda()
def infer(self):
if self.split == None:
# do train set
self.infer_subset(loader=self.parser.get_train_set(),
to_orig_fn=self.parser.to_original)
# do valid set
self.infer_subset(loader=self.parser.get_valid_set(),
to_orig_fn=self.parser.to_original)
# do test set
self.infer_subset(loader=self.parser.get_test_set(),
to_orig_fn=self.parser.to_original)
elif self.split == 'valid':
self.infer_subset(loader=self.parser.get_valid_set(),
to_orig_fn=self.parser.to_original)
elif self.split == 'train':
self.infer_subset(loader=self.parser.get_train_set(),
to_orig_fn=self.parser.to_original)
else:
self.infer_subset(loader=self.parser.get_test_set(),
to_orig_fn=self.parser.to_original)
print('Finished Infering')
return
def infer_subset(self, loader, to_orig_fn):
# switch to evaluate mode
self.model.eval()
# empty the cache to infer in high res
if self.gpu:
torch.cuda.empty_cache()
with torch.no_grad():
# infer time segments
infer_times = []
# projection time segments
proj_times = []
for i, (proj_in, proj_mask, _, _, path_seq, path_name, p_x, p_y, proj_range, unproj_range, _, _, _, _, npoints, proj_time) in enumerate(loader):
proj_times.append(proj_time.data.cpu().numpy()[0])
# first cut to rela size (batch size one allows it)
p_x = p_x[0, :npoints]
p_y = p_y[0, :npoints]
proj_range = proj_range[0, :npoints]
unproj_range = unproj_range[0, :npoints]
path_seq = path_seq[0]
path_name = path_name[0]
# loading data on GPU
# depends on GPU, so we dont include this in the inference time
if self.gpu:
proj_in = proj_in.cuda()
p_x = p_x.cuda()
p_y = p_y.cuda()
if self.post:
proj_range = proj_range.cuda()
unproj_range = unproj_range.cuda()
# INFER TIME START
infer_time_start = get_sync_time()
# compute output
proj_output = self.model(proj_in)
proj_argmax = proj_output[0].argmax(dim=0)
if self.post:
# knn postproc
unproj_argmax = self.post(proj_range, unproj_range, proj_argmax, p_x, p_y)
else:
# put in original pointcloud using indexes
unproj_argmax = proj_argmax[p_y, p_x]
# INFER TIME END
infer_time_end = get_sync_time()
infer_times.append(infer_time_end - infer_time_start)
print('Infered sequence: %s' % path_seq)
print('Scan: %s' % path_name)
print('Proj time: %s sec' % proj_times[-1])
print('Infer time: %s sec' % infer_times[-1])
print('Total time: %s sec' % (proj_times[-1] + infer_times[-1]))
# save scan
# get the first scan in batch and project scan
pred_np = unproj_argmax.cpu().numpy()
pred_np = pred_np.reshape((-1)).astype(np.int32)
# map to original label
pred_np = to_orig_fn(pred_np)
# save scan
path = os.path.join(self.logdir, 'sequences', path_seq, 'predictions', path_name)
pred_np.tofile(path)
print('*' * 30)
print('INFER TIME STATISTICS')
print('MEAN: %s' % np.mean(infer_times[1:]))
print('STD: %s' % np.std(infer_times[1:]))
print('COUNT: %s' % len(infer_times[1:]))
# plt.plot(infer_times[1:])
# plt.savefig('infer_time.png')
print('-' * 15)
print('PROJ TIME STATISTICS')
print('MEAN: %s' % np.mean(proj_times[1:]))
print('STD: %s' % np.std(proj_times[1:]))
print('COUNT: %s' % len(proj_times[1:]))
# plt.plot(proj_times[1:])
# plt.savefig('proj_time.png')
def predict(self):
pass
| 7,887 | -8 | 153 |
b28113f9144fb0be437e13f608eef1bfa90e636b | 6,220 | py | Python | main.py | farhannysf/vitruvina | f4d9512c80c928001543a0852332a512ab602591 | [
"MIT"
] | 5 | 2018-11-04T08:46:38.000Z | 2021-03-09T21:50:54.000Z | main.py | farhannysf/vitruvina | f4d9512c80c928001543a0852332a512ab602591 | [
"MIT"
] | null | null | null | main.py | farhannysf/vitruvina | f4d9512c80c928001543a0852332a512ab602591 | [
"MIT"
] | null | null | null | import random
import settings
import finance_utils
import asyncio
import aiohttp
from time import strftime
from datetime import date
from sanic import Sanic, response
from sanic.response import json
app = Sanic()
@app.route('/vitruvina', methods=['POST'])
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80) | 46.41791 | 281 | 0.671704 | import random
import settings
import finance_utils
import asyncio
import aiohttp
from time import strftime
from datetime import date
from sanic import Sanic, response
from sanic.response import json
app = Sanic()
async def aioGet(*args, **kwargs):
async with aiohttp.ClientSession() as client:
async with client.get(*args, **kwargs) as response:
response.body = await response.read()
return response
async def aioPost(*args, **kwargs):
async with aiohttp.ClientSession() as client:
async with client.post(*args, **kwargs) as response:
response.body = await response.read()
return response
async def nlp(message, userId):
url = 'https://api.api.ai/v1/query'
headers = {'Content-Type': 'application/json; charset=utf-8', 'Authorization': settings.dialogflowToken}
data = {'v': strftime('%Y%m%d'), 'query': message, 'lang': 'en','sessionId': userId}
response = await aioPost(url=url, headers=headers, json=data)
intent = await response.json()
return intent
async def nlpContext(userId):
url = 'https://api.api.ai/v1/contexts'
headers = {'Content-Type': 'application/json; charset=utf-8', 'Authorization': settings.dialogflowToken}
params = {'v': strftime('%Y%m%d'), 'sessionId': userId}
response = await aioGet(url=url, headers=headers, params=params)
context = await response.json()
return context
async def cleverbot(textClean):
url = "https://www.cleverbot.com/getreply"
fetchData = await aioGet(url, params={'key': settings.cleverbotToken, 'input': textClean})
cleverbotData = await fetchData.json()
return cleverbotData['output']
async def reply(message):
url = settings.slackUrl
data = {'text': message }
await aioPost(url=url, json=data)
async def logicUnit(request):
userId = request.json['event']['user']
textClean = request.json['event']['text'].replace('<@UDGQJD9FT>', '').strip()
nlpResult = await nlp(textClean, userId)
intent = nlpResult['result']['metadata'].get('intentName', 'cleverbot')
if intent == 'cleverbot':
cleverbotOutput = await cleverbot(textClean)
await reply(cleverbotOutput)
return
if intent == 'finance':
queries = {'companyQuery':''.join(e for e in nlpResult['result']['parameters'].get('companies') if e.isalnum()),
'account':[nlpResult['result']['parameters'].get('accounts')],
'fiscalPeriod':nlpResult['result']['parameters'].get('fiscalPeriod'),
'fiscalYear':nlpResult['result']['parameters'].get('number-integer')}
if intent == 'finance-followup':
context = await nlpContext(userId)
queries = {'companyQuery':''.join(e for e in context[0]['parameters'].get('companies') if e.isalnum()),
'account':[context[0]['parameters'].get('accounts')],
'fiscalPeriod':context[0]['parameters'].get('fiscalPeriod'),
'fiscalYear':context[0]['parameters'].get('number-integer')}
tickerData = await finance_utils.resolveTicker(queries["companyQuery"])
if len(tickerData) == 0:
message = 'Sorry, I can\'t found the company.'
else:
companyData = {'companyTicker':tickerData[0]['ticker'], 'companyName':tickerData[0]['security_name']}
account = queries['account'][0].replace(' ', '_').replace('?', '')
financeData = await finance_utils.intrinioData(queries, companyData['companyTicker'], account)
if account == 'balance_sheet':
balanceSheet = await finance_utils.generateStatement(financeData, finance_utils.balanceSheet_dict)
message = f'*{queries["fiscalPeriod"]} {queries["fiscalYear"]} Balance Sheet for {companyData["companyName"]} ({companyData["companyTicker"]})*\n```{balanceSheet}```'
if account == 'income_statement':
incomeStatement = await finance_utils.generateStatement(financeData, finance_utils.incomeStatement_dict)
message = f'*{queries["fiscalPeriod"]} {queries["fiscalYear"]} Income Statement for {companyData["companyName"]} ({companyData["companyTicker"]})*\n```{incomeStatement}```'
if account == 'profitability':
account = ['income_statement', 'balance_sheet']
incomeStatement_data = await finance_utils.intrinioData(queries, companyData['companyTicker'], account[0])
balanceSheet_data = await finance_utils.intrinioData(queries, companyData['companyTicker'], account[1])
incomeStatement = await finance_utils.generate_statementData(incomeStatement_data)
balanceSheet = await finance_utils.generate_statementData(balanceSheet_data)
profitability = await finance_utils.generateProfitability(incomeStatement, balanceSheet)
message = f'Here is some key financial metrics to help you understand the profitability of {companyData["companyName"]}\n*{queries["fiscalPeriod"]} {queries["fiscalYear"]} Profitability of {companyData["companyName"]} ({companyData["companyTicker"]})*\n{profitability}'
if account == 'liquidity':
account = 'balance_sheet'
balanceSheet_data = await finance_utils.intrinioData(queries, companyData['companyTicker'], account)
balanceSheet = await finance_utils.generate_statementData(balanceSheet_data)
liquidity = await finance_utils.generateLiquidity(balanceSheet)
message = f'Here is some key financial metrics to help you understand the liquidity of {companyData["companyName"]}\n*{queries["fiscalPeriod"]} {queries["fiscalYear"]} Liquidity of {companyData["companyName"]} ({companyData["companyTicker"]})*\n{liquidity}'
await reply(message)
return
@app.route('/vitruvina', methods=['POST'])
async def mention(request):
verifyCheck = request.json.get('challenge')
if verifyCheck:
return json({'challenge': verifyCheck})
loop = asyncio.get_event_loop()
loop.create_task(logicUnit(request))
return response.json(
{'message': 'Success'},
headers={'X-Slack-No-Retry': 1},
status=1490
)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80) | 5,706 | 0 | 183 |
94eced0ae6d8038942ea21908a77c26af1240b78 | 1,769 | py | Python | okta/models/application_accessibility.py | corylevine/okta-sdk-python | c86b8fdc4525e84199143c27213c0aebc6b2af8f | [
"Apache-2.0"
] | 145 | 2017-06-13T21:54:04.000Z | 2022-02-25T05:44:34.000Z | okta/models/application_accessibility.py | corylevine/okta-sdk-python | c86b8fdc4525e84199143c27213c0aebc6b2af8f | [
"Apache-2.0"
] | 146 | 2017-06-02T17:46:12.000Z | 2022-03-29T15:52:15.000Z | okta/models/application_accessibility.py | corylevine/okta-sdk-python | c86b8fdc4525e84199143c27213c0aebc6b2af8f | [
"Apache-2.0"
] | 98 | 2017-06-27T03:44:51.000Z | 2022-03-23T04:58:18.000Z | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
class ApplicationAccessibility(
OktaObject
):
"""
A class for ApplicationAccessibility objects.
"""
| 32.759259 | 72 | 0.687394 | # flake8: noqa
"""
Copyright 2020 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
class ApplicationAccessibility(
OktaObject
):
"""
A class for ApplicationAccessibility objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.error_redirect_url = config["errorRedirectUrl"]\
if "errorRedirectUrl" in config else None
self.login_redirect_url = config["loginRedirectUrl"]\
if "loginRedirectUrl" in config else None
self.self_service = config["selfService"]\
if "selfService" in config else None
else:
self.error_redirect_url = None
self.login_redirect_url = None
self.self_service = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"errorRedirectUrl": self.error_redirect_url,
"loginRedirectUrl": self.login_redirect_url,
"selfService": self.self_service
}
parent_req_format.update(current_obj_format)
return parent_req_format
| 899 | 0 | 54 |
495e8adc4d26507ba4b761349caac57cd7210956 | 1,391 | py | Python | joels_scripts/constants.py | DelparteLabs/WCWAVE_ClimateInterp | dce7ddf4f6025ae96b864767c198af78f4ee3c0a | [
"MIT"
] | null | null | null | joels_scripts/constants.py | DelparteLabs/WCWAVE_ClimateInterp | dce7ddf4f6025ae96b864767c198af78f4ee3c0a | [
"MIT"
] | null | null | null | joels_scripts/constants.py | DelparteLabs/WCWAVE_ClimateInterp | dce7ddf4f6025ae96b864767c198af78f4ee3c0a | [
"MIT"
] | null | null | null | #Import necessary modules
import arcpy
from arcpy.sa import *
import numpy
#Check-out necessary extensions
arcpy.CheckOutExtension('Spatial')
#Set input parameters
elevation_raster = arcpy.GetParameterAsText(0)
conRL = arcpy.GetParameter(1)
conRL_ouRaster = arcpy.GetParameterAsText(2)
conH2OSat = arcpy.GetParameter(3)
conH2OSat_outRaster = arcpy.GetParameterAsText(4)
#Set up workspace
scratchWS = arcpy.env.scratchWorkspace
scratchGDB = arcpy.env.scratchGDB
#output cell size and processing extent should be the same as elevation raster
arcpy.env.cellSize = elevation_raster
output_cell_size = arcpy.env.cellSize
arcpy.env.extent = elevation_raster
extent = arcpy.env.extent
arcpy.env.overwriteOutput = True
arcpy.env.parallelProcessingFactor = "75%"
arcpy.Delete_management("in_memory")
#Get coordinate system information
desc = arcpy.Describe(elevation_raster)
coordSystem = desc.spatialReference
arcpy.AddMessage("Creating constant roughness length raster")
rlConstant = CreateConstantRaster(conRL, "FLOAT", output_cell_size, extent)
arcpy.DefineProjection_management(rlConstant, coordSystem)
rlConstant.save(conRL_ouRaster)
arcpy.AddMessage("Creating constant liquid water saturation raster")
waterConstant = CreateConstantRaster(conH2OSat, "FLOAT", output_cell_size, extent)
arcpy.DefineProjection_management(waterConstant, coordSystem)
waterConstant.save(conH2OSat_outRaster) | 33.926829 | 82 | 0.833932 | #Import necessary modules
import arcpy
from arcpy.sa import *
import numpy
#Check-out necessary extensions
arcpy.CheckOutExtension('Spatial')
#Set input parameters
elevation_raster = arcpy.GetParameterAsText(0)
conRL = arcpy.GetParameter(1)
conRL_ouRaster = arcpy.GetParameterAsText(2)
conH2OSat = arcpy.GetParameter(3)
conH2OSat_outRaster = arcpy.GetParameterAsText(4)
#Set up workspace
scratchWS = arcpy.env.scratchWorkspace
scratchGDB = arcpy.env.scratchGDB
#output cell size and processing extent should be the same as elevation raster
arcpy.env.cellSize = elevation_raster
output_cell_size = arcpy.env.cellSize
arcpy.env.extent = elevation_raster
extent = arcpy.env.extent
arcpy.env.overwriteOutput = True
arcpy.env.parallelProcessingFactor = "75%"
arcpy.Delete_management("in_memory")
#Get coordinate system information
desc = arcpy.Describe(elevation_raster)
coordSystem = desc.spatialReference
arcpy.AddMessage("Creating constant roughness length raster")
rlConstant = CreateConstantRaster(conRL, "FLOAT", output_cell_size, extent)
arcpy.DefineProjection_management(rlConstant, coordSystem)
rlConstant.save(conRL_ouRaster)
arcpy.AddMessage("Creating constant liquid water saturation raster")
waterConstant = CreateConstantRaster(conH2OSat, "FLOAT", output_cell_size, extent)
arcpy.DefineProjection_management(waterConstant, coordSystem)
waterConstant.save(conH2OSat_outRaster) | 0 | 0 | 0 |
9a76e6b847a5af542c69c4a2ca4ade2eecfda813 | 2,920 | py | Python | caffe2/python/operator_test/pack_rnn_sequence_op_test.py | jsun94/nimble | e5c899a69677818b1becc58100577441e15ede13 | [
"BSD-3-Clause"
] | 206 | 2020-11-28T22:56:38.000Z | 2022-03-27T02:33:04.000Z | caffe2/python/operator_test/pack_rnn_sequence_op_test.py | jsun94/nimble | e5c899a69677818b1becc58100577441e15ede13 | [
"BSD-3-Clause"
] | 19 | 2020-12-09T23:13:14.000Z | 2022-01-24T23:24:08.000Z | caffe2/python/operator_test/pack_rnn_sequence_op_test.py | jsun94/nimble | e5c899a69677818b1becc58100577441e15ede13 | [
"BSD-3-Clause"
] | 28 | 2020-11-29T15:25:12.000Z | 2022-01-20T02:16:27.000Z |
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
if __name__ == "__main__":
import unittest
unittest.main()
| 31.73913 | 75 | 0.560616 |
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestPackRNNSequenceOperator(serial.SerializedTestCase):
@serial.given(n=st.integers(0, 10), k=st.integers(1, 5),
dim=st.integers(1, 5), **hu.gcs_cpu_only)
def test_pack_rnn_seqence(self, n, k, dim, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
values = np.random.rand(sum(lengths), dim).astype(np.float32)
def pack_op(values, lengths):
T = max(lengths) if any(lengths) else 0
N = lengths.size
output = np.zeros((T, N) + values.shape[1:]).astype(np.float32)
offset = 0
for c in range(N):
for r in range(lengths[c]):
output[r][c] = values[offset + r]
offset += lengths[c]
return [output]
op = core.CreateOperator(
'PackRNNSequence',
['values', 'lengths'],
'out'
)
# Check against numpy reference
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[values, lengths],
reference=pack_op,
)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [values, lengths], [0])
# Gradient check
self.assertGradientChecks(gc, op, [values, lengths], 0, [0])
@serial.given(n=st.integers(0, 10), k=st.integers(2, 5),
dim=st.integers(1, 5), **hu.gcs_cpu_only)
def test_unpack_rnn_seqence(self, n, k, dim, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
T = max(lengths) if any(lengths) else 0
N = lengths.size
values = np.random.rand(T, N, dim).astype(np.float32)
def unpack_op(values, lengths):
M = sum(lengths)
output = np.zeros((M,) + values.shape[2:]).astype(np.float32)
N = lengths.size
offset = 0
for c in range(N):
for r in range(lengths[c]):
output[offset + r] = values[r][c]
offset += lengths[c]
return [output]
op = core.CreateOperator(
'UnpackRNNSequence',
['values', 'lengths'],
'out'
)
# Check against numpy reference
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[values, lengths],
reference=unpack_op,
)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [values, lengths], [0])
# Gradient check
self.assertGradientChecks(gc, op, [values, lengths], 0, [0])
if __name__ == "__main__":
import unittest
unittest.main()
| 2,270 | 322 | 23 |
7cd8c313e001a8189aed96f414569f874d893c5a | 1,682 | py | Python | lib/spack/spack/cmd/common/__init__.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-10T22:50:08.000Z | 2021-01-12T22:18:54.000Z | lib/spack/spack/cmd/common/__init__.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17 | 2019-03-21T15:54:00.000Z | 2022-03-29T19:34:28.000Z | lib/spack/spack/cmd/common/__init__.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-04-07T18:27:09.000Z | 2022-03-31T22:52:38.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import llnl.util.tty.color as color
import spack.paths
def shell_init_instructions(cmd, equivalent):
"""Print out instructions for users to initialize shell support.
Arguments:
cmd (str): the command the user tried to run that requires
shell support in order to work
equivalent (str): a command they can run instead, without
enabling shell support
"""
shell_specific = "{sh_arg}" in equivalent
msg = [
"`%s` requires spack's shell support." % cmd,
"",
"To set up shell support, run the command below for your shell.",
"",
color.colorize("@*c{For bash/zsh/sh:}"),
" . %s/setup-env.sh" % spack.paths.share_path,
"",
color.colorize("@*c{For csh/tcsh:}"),
" source %s/setup-env.csh" % spack.paths.share_path,
"",
color.colorize("@*c{For fish:}"),
" source %s/setup-env.fish" % spack.paths.share_path,
"",
"Or, if you do not want to use shell support, run " + (
"one of these" if shell_specific else "this") + " instead:",
"",
]
if shell_specific:
msg += [
equivalent.format(sh_arg="--sh ") + " # bash/zsh/sh",
equivalent.format(sh_arg="--csh ") + " # csh/tcsh",
equivalent.format(sh_arg="--fish") + " # fish",
]
else:
msg += [" " + equivalent]
msg += ['']
tty.error(*msg)
| 31.148148 | 73 | 0.577289 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import llnl.util.tty.color as color
import spack.paths
def shell_init_instructions(cmd, equivalent):
"""Print out instructions for users to initialize shell support.
Arguments:
cmd (str): the command the user tried to run that requires
shell support in order to work
equivalent (str): a command they can run instead, without
enabling shell support
"""
shell_specific = "{sh_arg}" in equivalent
msg = [
"`%s` requires spack's shell support." % cmd,
"",
"To set up shell support, run the command below for your shell.",
"",
color.colorize("@*c{For bash/zsh/sh:}"),
" . %s/setup-env.sh" % spack.paths.share_path,
"",
color.colorize("@*c{For csh/tcsh:}"),
" source %s/setup-env.csh" % spack.paths.share_path,
"",
color.colorize("@*c{For fish:}"),
" source %s/setup-env.fish" % spack.paths.share_path,
"",
"Or, if you do not want to use shell support, run " + (
"one of these" if shell_specific else "this") + " instead:",
"",
]
if shell_specific:
msg += [
equivalent.format(sh_arg="--sh ") + " # bash/zsh/sh",
equivalent.format(sh_arg="--csh ") + " # csh/tcsh",
equivalent.format(sh_arg="--fish") + " # fish",
]
else:
msg += [" " + equivalent]
msg += ['']
tty.error(*msg)
| 0 | 0 | 0 |
3d91f9e590b81b179ff1c3782708adf0d1a80c4b | 8,815 | py | Python | src/parse_utils.py | dockimbel/RGB | 3dbb999cecc1fc60cd0f9a064723cc5d967c0688 | [
"MIT"
] | null | null | null | src/parse_utils.py | dockimbel/RGB | 3dbb999cecc1fc60cd0f9a064723cc5d967c0688 | [
"MIT"
] | null | null | null | src/parse_utils.py | dockimbel/RGB | 3dbb999cecc1fc60cd0f9a064723cc5d967c0688 | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2018 Samuel Wilder
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contains several utilities for parsing with the Pyparsing library as well as
all of the parsers for all the compilers.
"""
from pyparsing import *
from red_utils import fix_hex_num
'''
Simple function to return a lambda to replace 'tokens' with 'replace'.
Usage:
>>> op_exponent = Literal('**').setParseAction(ReplaceWith('^'))
'''
ReplaceWith = lambda replace: lambda string, loc, tokens: replace
'''
Convenience function to shorten the syntax necessary to use the 'ReplaceWith'
lambda.
Usage A vs. B:
A. op_exponent = Replace(Literal('**'), '^') # <- Way shorter
B. op_exponent = Literal('**').setParseAction(ReplaceWith('^'))
'''
Replace = lambda parser, string: parser.setParseAction(ReplaceWith(string))
'''
Kills the storage specifiers of C integers because they cannot be compiled in
Red/System.
'''
IntegerSuffix = (
CaselessLiteral('ui8').suppress()
| CaselessLiteral('ui16').suppress()
| CaselessLiteral('ui32').suppress()
| CaselessLiteral('ui64').suppress()
| CaselessLiteral('ull').suppress()
| CaselessLiteral('ul').suppress()
| CaselessLiteral('u').suppress()
| CaselessLiteral('ll').suppress()
| CaselessLiteral('l').suppress()
| CaselessLiteral('i8').suppress()
| CaselessLiteral('i16').suppress()
| CaselessLiteral('i32').suppress()
| CaselessLiteral('i64').suppress()
)
'''
Kills the storage specifiers of C floats because they cannot be compiled in
Red/System.
'''
FloatSuffix = (
CaselessLiteral('f8').suppress()
| CaselessLiteral('f16').suppress()
| CaselessLiteral('f32').suppress()
| CaselessLiteral('f64').suppress()
| CaselessLiteral('f').suppress()
)
'''
Parses a hex literal and automatically changes it to the Red/System equivalent.
'''
HexNumber = Combine(
Literal('0x') +
Word(nums + 'abcdefABCDEF') +
Optional(IntegerSuffix)
# Make sure that the replacement of the '0x' to 'h' happens here
).setParseAction(lambda s, l, tokens: fix_hex_num(tokens[0]))('HexNumber')
'''
Parses a C integer.
'''
Integer = Combine(
Optional(Literal('-')) +
(
Word(nums) + CaselessLiteral('e') + (
Literal('+') | Literal('-')
) +
Word(nums) + Optional(IntegerSuffix)
| Word(nums) + Optional(IntegerSuffix)
)
)('Integer')
'''
Parses a C floating point decimal.
'''
FloatNumber = Combine(
Optional(Literal('-')) + (
Optional(Word(nums)) + Literal('.') + Word(nums) + CaselessLiteral('e') + (Literal('+') | Literal('-')) + Word(nums) + Optional(FloatSuffix | IntegerSuffix)
| Optional(Word(nums)) + Literal('.') + Word(nums) + Optional(FloatSuffix | IntegerSuffix)
)
)('FloatNumber')
'''
Parses any type of C integer literal.
'''
Number = FloatNumber | HexNumber | Integer
'''
Identifier:
age
_123
__abc123
th1s1samaz3box
'''
Identifier = Word(alphas + '_', bodyChars=alphanums + '_')
'''
Parses a C pound define.
'''
PoundDefine = (
Keyword('#define') +
Identifier +
Optional(
OneOrMore(
Number
| quotedString
| Identifier
| Keyword('()')
| Keyword('( )')
| Literal('(')
| Literal(')')
| Literal(',')
| Keyword('...')
| Word('!@#$%^&*-=+|.')
)
)
)
'''
Parses a C macro.
'''
Macro = (
Keyword('#define').suppress() +
Identifier +
Literal('(').suppress() +
Group(
ZeroOrMore(
Identifier
| Literal(',')
| Keyword('...')
)
) +
Literal(')').suppress()
)
'''
Parses a C prefix such as a function return type. Replaces any occurance of a
specific storage type with a single type so it can be ingested by RGB.
'''
Prefix = OneOrMore(
Keyword('__declspec(dllimport)').suppress()
| Keyword('__declspec(dllexport)').suppress()
| Keyword('__declspec(noreturn)').suppress()
| Keyword('__stdcall').suppress()
| Keyword('__cdecl').suppress()
| Keyword('unsigned').suppress()
| Keyword('signed').suppress()
| Keyword('long long unsigned int').setParseAction(ReplaceWith('int'))
| Keyword('long long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long long int').setParseAction(ReplaceWith('int'))
| Keyword('long long').setParseAction(ReplaceWith('long'))
| Keyword('long unsinged int').setParseAction(ReplaceWith('int'))
| Keyword('long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long int').setParseAction(ReplaceWith('int'))
| Keyword('long double').setParseAction(ReplaceWith('double'))
| Keyword('short int').setParseAction(ReplaceWith('int'))
| Keyword('const').suppress()
| Identifier
)
'''
Parses a C function pointer.
'''
FunctionPtr = (
Keyword('typedef void').suppress() +
Literal('(').suppress() +
Optional(Keyword('__stdcall').suppress() | Keyword('__cdecl').suppress()) +
Literal('*').suppress() +
Identifier +
Literal(')').suppress() +
Literal('(').suppress() +
Group(
ZeroOrMore(
(Prefix | Literal('*')) +
Optional(Literal(','))
)
) +
Literal(')').suppress() +
Literal(';').suppress()
)
'''
Any C type. Filters out simple unacceptable occurances.
'''
Types = OneOrMore(
Keyword('unsigned').suppress()
| Keyword('signed').suppress()
| Replace(Keyword('long long unsigned int'), 'int')
| Replace(Keyword('long long signed int'), 'int')
| Replace(Keyword('long long int'), 'int')
| Replace(Keyword('long long'), 'long')
| Replace(Keyword('long unsigned int'), 'int')
| Replace(Keyword('long signed int'), 'int')
| Replace(Keyword('long int'), 'int')
| Replace(Keyword('long double'), 'double')
| Replace(Keyword('short int'), 'int')
| Keyword('const').suppress()
| Identifier
)
'''
Parses a C typedef.
'''
Typedef = (
Keyword('typedef').suppress() +
OneOrMore(Types)
)
'''
Parses a C function.
'''
Function = (
Group(OneOrMore(Prefix) + Optional(OneOrMore(Literal('*'))) + Optional(Prefix)) +
Literal('(').suppress() +
Group(ZeroOrMore(
Prefix
| Literal('*')
| Literal(',')
| Keyword('...')
)) +
Literal(')').suppress() +
Literal(';').suppress()
)
'''
Parses a C global variable.
'''
GlobalVar = (
Keyword('extern').suppress() +
OneOrMore(Prefix | Literal('*')) +
Literal(';').suppress()
)
'''
Parses a C struct prefix.
'''
StructPrefix = OneOrMore(
Keyword('__declspec(dllimport)').suppress()
| Keyword('__declspec(dllexport)').suppress()
| Keyword('__declspec(noreturn)').suppress()
| Keyword('__stdcall').suppress()
| Keyword('__cdecl').suppress()
| Keyword('unsigned').suppress()
| Keyword('signed').suppress()
| Keyword('long long unsigned int').setParseAction(ReplaceWith('int'))
| Keyword('long long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long long int').setParseAction(ReplaceWith('int'))
| Keyword('long long').setParseAction(ReplaceWith('long'))
| Keyword('long unsinged int').setParseAction(ReplaceWith('int'))
| Keyword('long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long int').setParseAction(ReplaceWith('int'))
| Keyword('long double').setParseAction(ReplaceWith('double'))
| Keyword('short int').setParseAction(ReplaceWith('int'))
| Keyword('const').suppress()
)
'''
Parses a variable declaration within a struct.
'''
Decl = OneOrMore(
StructPrefix
| Identifier
| Literal('*')
) + Literal(';').suppress()
'''
Parses the start of a C struct.
'''
StructStart = (
Optional(Keyword('typedef').suppress()) +
Keyword('struct').suppress() +
Identifier
)
'''
Parses the end of a C struct.
'''
StructEnd = (
Literal('}').suppress() +
Group(
ZeroOrMore(Identifier +
Optional(Literal(','))) +
Literal(';').suppress()
))
'''
Parses a C enum.
'''
Enum = (
Keyword('enum').suppress() +
Identifier +
Literal('{').suppress() +
Group(OneOrMore(
Identifier + Replace(Literal('='), ': ') + Word(alphanums + '_-.\'"') + Literal(',')
| Identifier + Replace(Literal('='), ': ') + Word(alphanums + '_-.\'"')
| Identifier + Literal(',')
| Identifier
)) +
Literal('}').suppress() +
Optional(Identifier) +
Literal(';').suppress()
)
| 26.15727 | 158 | 0.680091 | """
MIT License
Copyright (c) 2018 Samuel Wilder
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contains several utilities for parsing with the Pyparsing library as well as
all of the parsers for all the compilers.
"""
from pyparsing import *
from red_utils import fix_hex_num
'''
Simple function to return a lambda to replace 'tokens' with 'replace'.
Usage:
>>> op_exponent = Literal('**').setParseAction(ReplaceWith('^'))
'''
ReplaceWith = lambda replace: lambda string, loc, tokens: replace
'''
Convenience function to shorten the syntax necessary to use the 'ReplaceWith'
lambda.
Usage A vs. B:
A. op_exponent = Replace(Literal('**'), '^') # <- Way shorter
B. op_exponent = Literal('**').setParseAction(ReplaceWith('^'))
'''
Replace = lambda parser, string: parser.setParseAction(ReplaceWith(string))
'''
Kills the storage specifiers of C integers because they cannot be compiled in
Red/System.
'''
IntegerSuffix = (
CaselessLiteral('ui8').suppress()
| CaselessLiteral('ui16').suppress()
| CaselessLiteral('ui32').suppress()
| CaselessLiteral('ui64').suppress()
| CaselessLiteral('ull').suppress()
| CaselessLiteral('ul').suppress()
| CaselessLiteral('u').suppress()
| CaselessLiteral('ll').suppress()
| CaselessLiteral('l').suppress()
| CaselessLiteral('i8').suppress()
| CaselessLiteral('i16').suppress()
| CaselessLiteral('i32').suppress()
| CaselessLiteral('i64').suppress()
)
'''
Kills the storage specifiers of C floats because they cannot be compiled in
Red/System.
'''
FloatSuffix = (
CaselessLiteral('f8').suppress()
| CaselessLiteral('f16').suppress()
| CaselessLiteral('f32').suppress()
| CaselessLiteral('f64').suppress()
| CaselessLiteral('f').suppress()
)
'''
Parses a hex literal and automatically changes it to the Red/System equivalent.
'''
HexNumber = Combine(
Literal('0x') +
Word(nums + 'abcdefABCDEF') +
Optional(IntegerSuffix)
# Make sure that the replacement of the '0x' to 'h' happens here
).setParseAction(lambda s, l, tokens: fix_hex_num(tokens[0]))('HexNumber')
'''
Parses a C integer.
'''
Integer = Combine(
Optional(Literal('-')) +
(
Word(nums) + CaselessLiteral('e') + (
Literal('+') | Literal('-')
) +
Word(nums) + Optional(IntegerSuffix)
| Word(nums) + Optional(IntegerSuffix)
)
)('Integer')
'''
Parses a C floating point decimal.
'''
FloatNumber = Combine(
Optional(Literal('-')) + (
Optional(Word(nums)) + Literal('.') + Word(nums) + CaselessLiteral('e') + (Literal('+') | Literal('-')) + Word(nums) + Optional(FloatSuffix | IntegerSuffix)
| Optional(Word(nums)) + Literal('.') + Word(nums) + Optional(FloatSuffix | IntegerSuffix)
)
)('FloatNumber')
'''
Parses any type of C integer literal.
'''
Number = FloatNumber | HexNumber | Integer
'''
Identifier:
age
_123
__abc123
th1s1samaz3box
'''
Identifier = Word(alphas + '_', bodyChars=alphanums + '_')
'''
Parses a C pound define.
'''
PoundDefine = (
Keyword('#define') +
Identifier +
Optional(
OneOrMore(
Number
| quotedString
| Identifier
| Keyword('()')
| Keyword('( )')
| Literal('(')
| Literal(')')
| Literal(',')
| Keyword('...')
| Word('!@#$%^&*-=+|.')
)
)
)
'''
Parses a C macro.
'''
Macro = (
Keyword('#define').suppress() +
Identifier +
Literal('(').suppress() +
Group(
ZeroOrMore(
Identifier
| Literal(',')
| Keyword('...')
)
) +
Literal(')').suppress()
)
'''
Parses a C prefix such as a function return type. Replaces any occurance of a
specific storage type with a single type so it can be ingested by RGB.
'''
Prefix = OneOrMore(
Keyword('__declspec(dllimport)').suppress()
| Keyword('__declspec(dllexport)').suppress()
| Keyword('__declspec(noreturn)').suppress()
| Keyword('__stdcall').suppress()
| Keyword('__cdecl').suppress()
| Keyword('unsigned').suppress()
| Keyword('signed').suppress()
| Keyword('long long unsigned int').setParseAction(ReplaceWith('int'))
| Keyword('long long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long long int').setParseAction(ReplaceWith('int'))
| Keyword('long long').setParseAction(ReplaceWith('long'))
| Keyword('long unsinged int').setParseAction(ReplaceWith('int'))
| Keyword('long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long int').setParseAction(ReplaceWith('int'))
| Keyword('long double').setParseAction(ReplaceWith('double'))
| Keyword('short int').setParseAction(ReplaceWith('int'))
| Keyword('const').suppress()
| Identifier
)
'''
Parses a C function pointer.
'''
FunctionPtr = (
Keyword('typedef void').suppress() +
Literal('(').suppress() +
Optional(Keyword('__stdcall').suppress() | Keyword('__cdecl').suppress()) +
Literal('*').suppress() +
Identifier +
Literal(')').suppress() +
Literal('(').suppress() +
Group(
ZeroOrMore(
(Prefix | Literal('*')) +
Optional(Literal(','))
)
) +
Literal(')').suppress() +
Literal(';').suppress()
)
'''
Any C type. Filters out simple unacceptable occurances.
'''
Types = OneOrMore(
Keyword('unsigned').suppress()
| Keyword('signed').suppress()
| Replace(Keyword('long long unsigned int'), 'int')
| Replace(Keyword('long long signed int'), 'int')
| Replace(Keyword('long long int'), 'int')
| Replace(Keyword('long long'), 'long')
| Replace(Keyword('long unsigned int'), 'int')
| Replace(Keyword('long signed int'), 'int')
| Replace(Keyword('long int'), 'int')
| Replace(Keyword('long double'), 'double')
| Replace(Keyword('short int'), 'int')
| Keyword('const').suppress()
| Identifier
)
'''
Parses a C typedef.
'''
Typedef = (
Keyword('typedef').suppress() +
OneOrMore(Types)
)
'''
Parses a C function.
'''
Function = (
Group(OneOrMore(Prefix) + Optional(OneOrMore(Literal('*'))) + Optional(Prefix)) +
Literal('(').suppress() +
Group(ZeroOrMore(
Prefix
| Literal('*')
| Literal(',')
| Keyword('...')
)) +
Literal(')').suppress() +
Literal(';').suppress()
)
'''
Parses a C global variable.
'''
GlobalVar = (
Keyword('extern').suppress() +
OneOrMore(Prefix | Literal('*')) +
Literal(';').suppress()
)
'''
Parses a C struct prefix.
'''
StructPrefix = OneOrMore(
Keyword('__declspec(dllimport)').suppress()
| Keyword('__declspec(dllexport)').suppress()
| Keyword('__declspec(noreturn)').suppress()
| Keyword('__stdcall').suppress()
| Keyword('__cdecl').suppress()
| Keyword('unsigned').suppress()
| Keyword('signed').suppress()
| Keyword('long long unsigned int').setParseAction(ReplaceWith('int'))
| Keyword('long long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long long int').setParseAction(ReplaceWith('int'))
| Keyword('long long').setParseAction(ReplaceWith('long'))
| Keyword('long unsinged int').setParseAction(ReplaceWith('int'))
| Keyword('long signed int').setParseAction(ReplaceWith('int'))
| Keyword('long int').setParseAction(ReplaceWith('int'))
| Keyword('long double').setParseAction(ReplaceWith('double'))
| Keyword('short int').setParseAction(ReplaceWith('int'))
| Keyword('const').suppress()
)
'''
Parses a variable declaration within a struct.
'''
Decl = OneOrMore(
StructPrefix
| Identifier
| Literal('*')
) + Literal(';').suppress()
'''
Parses the start of a C struct.
'''
StructStart = (
Optional(Keyword('typedef').suppress()) +
Keyword('struct').suppress() +
Identifier
)
'''
Parses the end of a C struct.
'''
StructEnd = (
Literal('}').suppress() +
Group(
ZeroOrMore(Identifier +
Optional(Literal(','))) +
Literal(';').suppress()
))
'''
Parses a C enum.
'''
Enum = (
Keyword('enum').suppress() +
Identifier +
Literal('{').suppress() +
Group(OneOrMore(
Identifier + Replace(Literal('='), ': ') + Word(alphanums + '_-.\'"') + Literal(',')
| Identifier + Replace(Literal('='), ': ') + Word(alphanums + '_-.\'"')
| Identifier + Literal(',')
| Identifier
)) +
Literal('}').suppress() +
Optional(Identifier) +
Literal(';').suppress()
)
| 0 | 0 | 0 |
6737b1610023ce55da9d1f5659e9fb729bfa0f76 | 2,150 | py | Python | code/plots5_airlines_cereal.py | msinkinson/CommonOwnerReplication | 59a4b275464521623c6bfc22738959c27a534504 | [
"MIT"
] | null | null | null | code/plots5_airlines_cereal.py | msinkinson/CommonOwnerReplication | 59a4b275464521623c6bfc22738959c27a534504 | [
"MIT"
] | null | null | null | code/plots5_airlines_cereal.py | msinkinson/CommonOwnerReplication | 59a4b275464521623c6bfc22738959c27a534504 | [
"MIT"
] | null | null | null | # %%
# %%
import pandas as pd
import numpy as np
import pathlib
import matplotlib
import matplotlib.pyplot as plt
from our_plot_config import derived_dir, fig_dir, raw_dir, setplotstyle
from kappas import do_one_period
setplotstyle()
# %%
# Input files
f_cereal = raw_dir / 'cereal.parquet'
f_airlines = raw_dir / 'airlines.parquet'
f_firm_info = derived_dir / 'firm-info.parquet'
f_kappas = derived_dir / 'official-kappas.parquet'
# Figure outputs
fig_both = fig_dir / 'figure16_airlines_cereal_banks.pdf'
# %%
# ### Read in the (Cleaned) Parquet File of Beta's
# - Read in stata file
# - Create the "quarter" variable
# - Apply the $\kappa$ calculations period by period
# - Save the output to a new parquet file
# - Write a Stata file.
# %%
# read in, create quarter and drop kappa_ff
df_cereal = process_df(f_cereal)
# Clean up airlines a bit more
df_airlines = process_df(f_airlines)
df_airlines = df_airlines[df_airlines.kappa < 4].copy()
df_firms = pd.read_parquet(f_firm_info)
df_firms2 = df_firms.loc[df_firms['siccd'] ==
6021, ['permno', 'quarter', 'comnam']].copy()
df_k = pd.read_parquet(f_kappas)
df_banks = pd.merge(pd.merge(df_k[df_k['from'] != df_k['to']], df_firms2, left_on=['quarter', 'from'], right_on=['quarter', 'permno']),
df_firms2, left_on=['quarter', 'to'], right_on=['quarter', 'permno'])
# %%
df_tot = pd.concat([df_cereal.groupby(['quarter'])['kappa'].median(), df_airlines.groupby(
['quarter'])['kappa'].median(), df_banks.groupby(['quarter'])['kappa'].median()], axis=1)
# %%
df_tot[df_tot.index >
'1999-01-01'].plot(figsize=(20, 10), color=['navy', 'maroon', 'darkgreen'])
plt.legend(['RTE Cereal', 'Airlines', 'Banks'])
plt.ylabel(r"Median Pairwise Profit Weights $(\kappa)$")
plt.xlabel("")
plt.ylim(0, 1)
plt.savefig(fig_both, bbox_inches='tight')
| 30.28169 | 135 | 0.686047 | # %%
# %%
import pandas as pd
import numpy as np
import pathlib
import matplotlib
import matplotlib.pyplot as plt
from our_plot_config import derived_dir, fig_dir, raw_dir, setplotstyle
from kappas import do_one_period
setplotstyle()
# %%
# Input files
f_cereal = raw_dir / 'cereal.parquet'
f_airlines = raw_dir / 'airlines.parquet'
f_firm_info = derived_dir / 'firm-info.parquet'
f_kappas = derived_dir / 'official-kappas.parquet'
# Figure outputs
fig_both = fig_dir / 'figure16_airlines_cereal_banks.pdf'
# %%
# ### Read in the (Cleaned) Parquet File of Beta's
# - Read in stata file
# - Create the "quarter" variable
# - Apply the $\kappa$ calculations period by period
# - Save the output to a new parquet file
# - Write a Stata file.
# %%
# read in, create quarter and drop kappa_ff
def process_df(fn):
df = pd.read_parquet(fn)
df['quarter'] = pd.to_datetime(df.rdate, format='%Y%m%d')
total_df3 = df[df.beta < 0.5].groupby(['quarter']).apply(do_one_period)
total_df3 = total_df3[total_df3['from'] != total_df3['to']]
return total_df3.reset_index(drop=True)
df_cereal = process_df(f_cereal)
# Clean up airlines a bit more
df_airlines = process_df(f_airlines)
df_airlines = df_airlines[df_airlines.kappa < 4].copy()
df_firms = pd.read_parquet(f_firm_info)
df_firms2 = df_firms.loc[df_firms['siccd'] ==
6021, ['permno', 'quarter', 'comnam']].copy()
df_k = pd.read_parquet(f_kappas)
df_banks = pd.merge(pd.merge(df_k[df_k['from'] != df_k['to']], df_firms2, left_on=['quarter', 'from'], right_on=['quarter', 'permno']),
df_firms2, left_on=['quarter', 'to'], right_on=['quarter', 'permno'])
# %%
df_tot = pd.concat([df_cereal.groupby(['quarter'])['kappa'].median(), df_airlines.groupby(
['quarter'])['kappa'].median(), df_banks.groupby(['quarter'])['kappa'].median()], axis=1)
# %%
df_tot[df_tot.index >
'1999-01-01'].plot(figsize=(20, 10), color=['navy', 'maroon', 'darkgreen'])
plt.legend(['RTE Cereal', 'Airlines', 'Banks'])
plt.ylabel(r"Median Pairwise Profit Weights $(\kappa)$")
plt.xlabel("")
plt.ylim(0, 1)
plt.savefig(fig_both, bbox_inches='tight')
| 273 | 0 | 23 |
fd53c1c1df9cfa4e457e6510b0e6228f6d83ad69 | 722 | py | Python | python/popart.ir/python_files/module.py | gglin001/popart | 3225214343f6d98550b6620e809a3544e8bcbfc6 | [
"MIT"
] | 61 | 2020-07-06T17:11:46.000Z | 2022-03-12T14:42:51.000Z | python/popart.ir/python_files/module.py | gglin001/popart | 3225214343f6d98550b6620e809a3544e8bcbfc6 | [
"MIT"
] | 1 | 2021-02-25T01:30:29.000Z | 2021-11-09T11:13:14.000Z | python/popart.ir/python_files/module.py | gglin001/popart | 3225214343f6d98550b6620e809a3544e8bcbfc6 | [
"MIT"
] | 6 | 2020-07-15T12:33:13.000Z | 2021-11-07T06:55:00.000Z | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
class Module:
"""
Callable class from which user-defined layers can inherit.
The #build method should be overriden and should build the subgraph.
The benefit of inheriting from this class rather than passing a function is
that you can save input tensors as fields on `self`, then later when you call
the subgraph, you can pass a mapping from the input tensor ids to the
corresponding parent tensor you wish to pass.
"""
| 36.1 | 79 | 0.709141 | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
class Module:
"""
Callable class from which user-defined layers can inherit.
The #build method should be overriden and should build the subgraph.
The benefit of inheriting from this class rather than passing a function is
that you can save input tensors as fields on `self`, then later when you call
the subgraph, you can pass a mapping from the input tensor ids to the
corresponding parent tensor you wish to pass.
"""
def __call__(self, *args, **kwargs):
return self.build(*args, **kwargs)
def build(self, *args, **kwargs):
raise NotImplementedError(
"Your popart.ir.Module must override `build` method")
| 171 | 0 | 54 |
d101b444025d87e58fca07c5c91baf0669f48075 | 4,736 | py | Python | benchmarks/oxuva/examples/track.py | willtwr/iSiam-TF | c9c1d0f49cc80f7a5549155fb220b6dce6b6516b | [
"MIT"
] | 2 | 2020-03-28T20:48:34.000Z | 2021-09-22T02:54:12.000Z | benchmarks/oxuva/examples/track.py | willtwr/iSiam-TF | c9c1d0f49cc80f7a5549155fb220b6dce6b6516b | [
"MIT"
] | 1 | 2020-03-28T20:59:30.000Z | 2020-04-03T20:09:53.000Z | benchmarks/oxuva/examples/track.py | willtwr/iSiam-TF | c9c1d0f49cc80f7a5549155fb220b6dce6b6516b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import cv2
import os
import time
import oxuva
from scripts import *
if __name__ == '__main__':
main()
| 34.318841 | 90 | 0.636824 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import cv2
import os
import time
import oxuva
from scripts import *
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_dir')
parser.add_argument('predictions_dir')
parser.add_argument('--data', default='dev')
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument('--tracker', default='TLD')
global args
args = parser.parse_args()
tracker_id = 'cv' + args.tracker
tracker_preds_dir = os.path.join(args.predictions_dir, args.data, tracker_id)
if not os.path.exists(tracker_preds_dir):
os.makedirs(tracker_preds_dir, 0755)
tasks_file = os.path.join(args.data_dir, 'tasks', args.data + '.csv')
with open(tasks_file, 'r') as fp:
tasks = oxuva.load_dataset_tasks_csv(fp)
# tracks_file = os.path.join(args.data_dir, 'annotations', args.data + '.csv')
# with open(tracks_file, 'r') as fp:
# tracks = oxuva.load_annotations_csv(fp)
# tasks = {key: oxuva.make_task_from_track(track) for key, track in tracks.items()}
imfile = lambda vid, t: os.path.join(
args.data_dir, 'images', args.data, vid, '{:06d}.jpeg'.format(t))
tracker = Tracker(tracker_type=args.tracker)
for key, task in tasks.items():
vid, obj = key
if args.verbose:
print(vid, obj)
preds_file = os.path.join(tracker_preds_dir, '{}_{}.csv'.format(vid, obj))
if os.path.exists(preds_file):
continue
tracker.init(imfile(vid, task.init_time), task.init_rect)
preds = oxuva.SparseTimeSeries()
start = time.time()
for t in range(task.init_time + 1, task.last_time + 1):
preds[t] = tracker.next(imfile(vid, t))
dur_sec = time.time() - start
if args.verbose:
fps = (task.last_time - task.init_time + 1) / dur_sec
print('fps {:.3g}'.format(fps))
tmp_preds_file = os.path.join(tracker_preds_dir, '{}_{}.csv.tmp'.format(vid, obj))
with open(tmp_preds_file, 'w') as fp:
oxuva.dump_predictions_csv(vid, obj, preds, fp)
os.rename(tmp_preds_file, preds_file)
class Tracker:
def __init__(self, tracker_type):
self._tracker = create_tracker(tracker_type)
def init(self, imfile, rect):
im = cv2.imread(imfile, cv2.IMREAD_COLOR)
imheight, imwidth, _ = im.shape
if args.verbose:
print('image size', '{}x{}'.format(imwidth, imheight))
cvrect = rect_to_opencv(rect, imsize_hw=(imheight, imwidth))
ok = self._tracker.init(im, cvrect)
assert ok
def next(self, imfile):
im = cv2.imread(imfile, cv2.IMREAD_COLOR)
imheight, imwidth, _ = im.shape
ok, cvrect = self._tracker.update(im)
if not ok:
return oxuva.make_prediction(present=False, score=0.0)
else:
rect = rect_from_opencv(cvrect, imsize_hw=(imheight, imwidth))
return oxuva.make_prediction(present=True, score=1.0, **rect)
def create_tracker(tracker_type):
# https://www.learnopencv.com/object-tracking-using-opencv-cpp-python/
major_ver, minor_ver, subminor_ver = cv2.__version__.split('.')
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
pass
#tracker = cv2.TrackerBoosting_create()
#elif tracker_type == 'MIL':
#tracker = cv2.TrackerMIL_create()
#elif tracker_type == 'KCF':
#tracker = cv2.TrackerKCF_create()
#elif tracker_type == 'TLD':
#tracker = cv2.TrackerTLD_create()
#elif tracker_type == 'MEDIANFLOW':
#tracker = cv2.TrackerMedianFlow_create()
#elif tracker_type == 'GOTURN':
#tracker = cv2.TrackerGOTURN_create()
elif tracker_type == 'iSiam':
tracker = iSiam_tracker()
return tracker
def rect_to_opencv(rect, imsize_hw):
imheight, imwidth = imsize_hw
xmin_abs = rect['xmin'] * imwidth
ymin_abs = rect['ymin'] * imheight
xmax_abs = rect['xmax'] * imwidth
ymax_abs = rect['ymax'] * imheight
return (xmin_abs, ymin_abs, xmax_abs - xmin_abs, ymax_abs - ymin_abs)
def rect_from_opencv(rect, imsize_hw):
imheight, imwidth = imsize_hw
xmin_abs, ymin_abs, width_abs, height_abs = rect
xmax_abs = xmin_abs + width_abs
ymax_abs = ymin_abs + height_abs
return {
'xmin': xmin_abs / imwidth,
'ymin': ymin_abs / imheight,
'xmax': xmax_abs / imwidth,
'ymax': ymax_abs / imheight,
}
if __name__ == '__main__':
main()
| 4,308 | -7 | 195 |
47ab914c592973fc14facd627639938d32460452 | 908 | py | Python | test.py | Gibbsdavidl/naive_tree | 02f69b83b97832a33e5008f85cae50d9286415cc | [
"MIT"
] | null | null | null | test.py | Gibbsdavidl/naive_tree | 02f69b83b97832a33e5008f85cae50d9286415cc | [
"MIT"
] | 1 | 2021-08-10T22:19:22.000Z | 2021-08-10T22:19:22.000Z | test.py | Gibbsdavidl/naive_tree | 02f69b83b97832a33e5008f85cae50d9286415cc | [
"MIT"
] | null | null | null | # Import classes from your brand new package
from naive_tree import GeneTree
from naive_tree import GeneNetwork
# Create an object of Mammals class & call a method of it
myTree = GeneTree()
myTree.build_network()
myTree.print_gene_network_summary()
myTree.gene_network.print_test_edge(4140)
# then get network between source and list-of-leaves.
vs = myTree.get_subcomponent(source='CXCL10', target=['PTGDR2','PTGDR'])
print('subcomponent length')
print(len(vs))
# and we can prune the tree to just nodes
# reachable from the source
myTree.prune_tree(vs)
# doing a search starting from the root to make sure we can
# reach all nodes
vs = myTree.pruned_graph.bfs(
myTree.pruned_graph.vs.find(name='CXCL10').index,
mode='out')
print('len vs: ' + str(len(vs[0])))
# and we can create a spanning tree based on
# edge weights.
myTree.get_spanning_tree()
myTree.compute_conditionals() | 28.375 | 72 | 0.748899 | # Import classes from your brand new package
from naive_tree import GeneTree
from naive_tree import GeneNetwork
# Create an object of Mammals class & call a method of it
myTree = GeneTree()
myTree.build_network()
myTree.print_gene_network_summary()
myTree.gene_network.print_test_edge(4140)
# then get network between source and list-of-leaves.
vs = myTree.get_subcomponent(source='CXCL10', target=['PTGDR2','PTGDR'])
print('subcomponent length')
print(len(vs))
# and we can prune the tree to just nodes
# reachable from the source
myTree.prune_tree(vs)
# doing a search starting from the root to make sure we can
# reach all nodes
vs = myTree.pruned_graph.bfs(
myTree.pruned_graph.vs.find(name='CXCL10').index,
mode='out')
print('len vs: ' + str(len(vs[0])))
# and we can create a spanning tree based on
# edge weights.
myTree.get_spanning_tree()
myTree.compute_conditionals() | 0 | 0 | 0 |
cba575fe3c4d860e5b0b8717de6d7e6e18647762 | 7,080 | py | Python | codes/dgmpm_stability/CFL_comparison_random.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2021-06-18T14:52:03.000Z | 2021-06-18T14:52:03.000Z | codes/dgmpm_stability/CFL_comparison_random.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | 1 | 2019-01-07T13:11:11.000Z | 2019-01-07T13:11:11.000Z | codes/dgmpm_stability/CFL_comparison_random.py | adRenaud/research | 2f0062a1800d7a17577bbfc2393b084253d567f4 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import numpy as np
from scipy import optimize
from sympy import *
import matplotlib.pyplot as plt
import pdb
import random
import os
# Symbolic function to evaluate shape functions
shape_functions=lambda x: np.matrix([(1-x)/DX,x/DX])
xn = np.array([0.,1.])
DX = 1.
## required for plotting residual
CFL=np.linspace(0.,1.,100.)
samples=1000
number_prev = Rand(1, 4, samples)
position_prev = RandPosition(number_prev)
number_curr = Rand(1, 4, samples)
position_curr = RandPosition(number_curr)
if not os.path.exists('eulerRandom.npy'):
eulerSolution=[]
rk2Solution=[]
eulerSolution_id=[]
rk2Solution_id=[]
for i in range(samples):
print "Computing critical CFL for sample ",i,": ",number_curr[i]," particles"
shapes_prev=shape_functions(position_prev[i])
shapes_curr=shape_functions(position_curr[i])
solution_euler=[]
solution_rk2=[]
solution_euler_id=[]
solution_rk2_id=[]
for k in range(number_curr[i]):
# if number_curr[i]<number_prev[i] :
# print "Attention ca va merder !!!!!!"
# else:
# print "Ca va le faire..."
res=residual(k,position_curr[i],position_prev[i],1)
solution_euler.append(gridSearch(res))
res=residual(k,position_curr[i],position_curr[i],1)
solution_euler_id.append(gridSearch(res))
res=residual(k,position_curr[i],position_prev[i],2)
solution_rk2.append(gridSearch(res))
res=residual(k,position_curr[i],position_curr[i],2)
solution_rk2_id.append(gridSearch(res))
eulerSolution.append(min(solution_euler))
rk2Solution.append(min(solution_rk2))
eulerSolution_id.append(min(solution_euler_id))
rk2Solution_id.append(min(solution_rk2_id))
np.save('eulerRandom.npy',eulerSolution)
np.save('rk2Random.npy',rk2Solution)
np.save('eulerRandom_id.npy',eulerSolution_id)
np.save('rk2Random_id.npy',rk2Solution_id)
else :
eulerSolution=np.load('eulerRandom.npy')
rk2Solution=np.load('rk2Random.npy')
eulerSolution_id=np.load('eulerRandom_id.npy')
rk2Solution_id=np.load('rk2Random_id.npy')
import statistics
print "Mean CFL for euler periodic: ", statistics.mean(eulerSolution_id)
print "Mean CFL for euler non-periodic: ", statistics.mean(eulerSolution)
print "Mean CFL for rk2 periodic: ", statistics.mean(rk2Solution_id)
print "Mean CFL for rk2 non-periodic: ", statistics.mean(rk2Solution)
print " "
print "Median CFL for euler periodic: ", statistics.median(eulerSolution_id)
print "Median CFL for euler non-periodic: ", statistics.median(eulerSolution)
print "Median CFL for rk2 periodic: ", statistics.median(rk2Solution_id)
print "Median CFL for rk2 non-periodic: ", statistics.median(rk2Solution)
pdb.set_trace()
barsEuler=np.histogram(eulerSolution,bins=np.linspace(0.,1.,11))
barsRk2=np.histogram(rk2Solution,bins=np.linspace(0.,1.,11))
export2DTeXFile('cflStatistics.tex',barsEuler[1],np.array([barsEuler[0]/float(samples),barsRk2[0]/float(samples)]),['Euler','RK2'])
barsEuler2=np.histogram(eulerSolution_id,bins=np.linspace(0.,1.,11))
barsRk22=np.histogram(rk2Solution_id,bins=np.linspace(0.,1.,11))
pdb.set_trace()
export2DTeXFile('cflStatistics_id.tex',barsEuler2[1],np.array([barsEuler2[0]/float(samples),barsRk22[0]/float(samples)]),['Euler','RK2'])
| 36.307692 | 232 | 0.642655 | #!/usr/bin/python
import numpy as np
from scipy import optimize
from sympy import *
import matplotlib.pyplot as plt
import pdb
import random
import os
def export2DTeXFile(fileName,bins,fields,*kwargs):
TeXFile=open(fileName,"w")
n_fields = np.shape(fields)[0]
n_labels = np.shape(kwargs)[1]
# Define Paul Tol's colors (purple to red)
color=['Blue','Red','Green','Red','black','black','black']
marker=['+','x','star','+','none','none','none']
size=['very thick','very thick','very thick','very thick','thin','thin',]
line=['solid','solid','dashed','dashed']
TeXFile.write(r'\begin{tikzpicture}')
TeXFile.write('\n')
TeXFile.write(r'\begin{axis}[xlabel=CFL,x label style={at={(axis description cs:0.5,-0.25)}},ylabel=Density,legend pos = north west,ybar interval=0.7,xticklabel interval boundaries,x tick label style = {rotate=90,anchor=east}]')
TeXFile.write('\n')
TeXFile.write('%%%%%%%%%%% EULER SOLUTION')
TeXFile.write('\n')
#pdb.set_trace()
for i in range(n_fields):
TeXFile.write(r'\addplot['+str(color[i])+',fill='+str(color[i])+'] coordinates {')
for j in range(len(fields[i,:])):
TeXFile.write('('+str(bins[j])+','+str(fields[i,j])+') ')
TeXFile.write('(1.,0.) ')
TeXFile.write('};\n')
TeXFile.write(r'\addlegendentry{'+str(kwargs[0][i])+'}; \n')
if i==0:
TeXFile.write('%%%%%%%%%%% RK2 SOLUTION')
TeXFile.write('\n')
TeXFile.write(r'\end{axis}')
TeXFile.write('\n')
TeXFile.write('\end{tikzpicture}')
TeXFile.write('\n')
TeXFile.write('%%% Local Variables:')
TeXFile.write('\n')
TeXFile.write('%%% mode: latex')
TeXFile.write('\n')
TeXFile.write('%%% TeX-master: "../manuscript"')
TeXFile.write('\n')
TeXFile.write('%%% End:')
TeXFile.write('\n')
TeXFile.close()
def residual(point,shapes,shapes_prev,t_order):
CFL = symbols('CFL')
Res=0.
Nmp=len(shapes)
Nmpp=len(shapes_prev)
S1=shapes ; Sum1=np.sum(S1)
S2=1.-shapes; Sum2=np.sum(S2)
Sp1=shapes_prev ; Sump1=np.sum(Sp1)
Sp2=1.-shapes_prev ; Sump2=np.sum(Sp2)
# Sum over material points in curent cell
for k in range(Nmp):
## First order contributions
D_mu = S1[k]*S1[point]/Sum1 + S2[k]*S2[point]/Sum2 + CFL*( S2[point]/Sum2 - S1[point]/Sum1 -Nmp*S2[k]*S2[point]/(Sum2**2) )
## Second order contributions
if t_order==2:
D_mu += 0.5*Nmp*(CFL**2)*((S2[k]/Sum2)*(S1[point]/Sum1-S2[point]/Sum2) + (S2[point]/Sum2)*(Nmp*S2[k]/Sum2-1.)/Sum2)
Res = Res +np.abs(D_mu)
# Sum over material points in previous cell
for k in range(Nmpp):
## First order contributions
D_mu = CFL*Nmp*Sp2[k]*S1[point]/(Sum1*Sump2)
## Second order contributions
if t_order==2:
D_mu +=0.5*Nmp*(CFL**2)*( S1[point]/(Sum1*Sump2)*(1-(Nmpp)*Sp2[k]/Sump2) -(Sp2[k]/Sump2)*(S1[point]/Sum1-S2[point]/Sum2) )
Res=Res + np.abs(D_mu)
Residual = lambdify((CFL),Res-1.)
return Residual
def gridSearch(function,tol=1.e-7):
samples=100000
# Find the bigest root of the residual by grid search algorithm
CFL=np.linspace(0.,1.,samples)
for i in range(samples):
value=CFL[-1-i]
a0=function(value)
if a0<tol:
return value
else:
continue
return 0.
def Rand(start, end, num):
res = []
for j in range(num):
res.append(random.randint(start, end))
return np.asarray(res)
def RandPosition(numberOfPoints):
res=[]
for nPoints in(numberOfPoints):
position=np.zeros((nPoints))
for i in range(nPoints):
position[i]=random.uniform(0., 1.)
res.append(position)
return res
# Symbolic function to evaluate shape functions
shape_functions=lambda x: np.matrix([(1-x)/DX,x/DX])
xn = np.array([0.,1.])
DX = 1.
## required for plotting residual
CFL=np.linspace(0.,1.,100.)
samples=1000
number_prev = Rand(1, 4, samples)
position_prev = RandPosition(number_prev)
number_curr = Rand(1, 4, samples)
position_curr = RandPosition(number_curr)
if not os.path.exists('eulerRandom.npy'):
eulerSolution=[]
rk2Solution=[]
eulerSolution_id=[]
rk2Solution_id=[]
for i in range(samples):
print "Computing critical CFL for sample ",i,": ",number_curr[i]," particles"
shapes_prev=shape_functions(position_prev[i])
shapes_curr=shape_functions(position_curr[i])
solution_euler=[]
solution_rk2=[]
solution_euler_id=[]
solution_rk2_id=[]
for k in range(number_curr[i]):
# if number_curr[i]<number_prev[i] :
# print "Attention ca va merder !!!!!!"
# else:
# print "Ca va le faire..."
res=residual(k,position_curr[i],position_prev[i],1)
solution_euler.append(gridSearch(res))
res=residual(k,position_curr[i],position_curr[i],1)
solution_euler_id.append(gridSearch(res))
res=residual(k,position_curr[i],position_prev[i],2)
solution_rk2.append(gridSearch(res))
res=residual(k,position_curr[i],position_curr[i],2)
solution_rk2_id.append(gridSearch(res))
eulerSolution.append(min(solution_euler))
rk2Solution.append(min(solution_rk2))
eulerSolution_id.append(min(solution_euler_id))
rk2Solution_id.append(min(solution_rk2_id))
np.save('eulerRandom.npy',eulerSolution)
np.save('rk2Random.npy',rk2Solution)
np.save('eulerRandom_id.npy',eulerSolution_id)
np.save('rk2Random_id.npy',rk2Solution_id)
else :
eulerSolution=np.load('eulerRandom.npy')
rk2Solution=np.load('rk2Random.npy')
eulerSolution_id=np.load('eulerRandom_id.npy')
rk2Solution_id=np.load('rk2Random_id.npy')
import statistics
print "Mean CFL for euler periodic: ", statistics.mean(eulerSolution_id)
print "Mean CFL for euler non-periodic: ", statistics.mean(eulerSolution)
print "Mean CFL for rk2 periodic: ", statistics.mean(rk2Solution_id)
print "Mean CFL for rk2 non-periodic: ", statistics.mean(rk2Solution)
print " "
print "Median CFL for euler periodic: ", statistics.median(eulerSolution_id)
print "Median CFL for euler non-periodic: ", statistics.median(eulerSolution)
print "Median CFL for rk2 periodic: ", statistics.median(rk2Solution_id)
print "Median CFL for rk2 non-periodic: ", statistics.median(rk2Solution)
pdb.set_trace()
barsEuler=np.histogram(eulerSolution,bins=np.linspace(0.,1.,11))
barsRk2=np.histogram(rk2Solution,bins=np.linspace(0.,1.,11))
export2DTeXFile('cflStatistics.tex',barsEuler[1],np.array([barsEuler[0]/float(samples),barsRk2[0]/float(samples)]),['Euler','RK2'])
barsEuler2=np.histogram(eulerSolution_id,bins=np.linspace(0.,1.,11))
barsRk22=np.histogram(rk2Solution_id,bins=np.linspace(0.,1.,11))
pdb.set_trace()
export2DTeXFile('cflStatistics_id.tex',barsEuler2[1],np.array([barsEuler2[0]/float(samples),barsRk22[0]/float(samples)]),['Euler','RK2'])
| 3,547 | 0 | 115 |
b40a82670ea346149aaf3640009134ba1053fa90 | 37,795 | py | Python | ag/orbit/node/db.py | AlphaGriffin/orbit-node | 6e330a2734a6a5dfbb52d984fe0b2f8dff4755cd | [
"MIT"
] | null | null | null | ag/orbit/node/db.py | AlphaGriffin/orbit-node | 6e330a2734a6a5dfbb52d984fe0b2f8dff4755cd | [
"MIT"
] | null | null | null | ag/orbit/node/db.py | AlphaGriffin/orbit-node | 6e330a2734a6a5dfbb52d984fe0b2f8dff4755cd | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Alpha Griffin
# @%@~LICENSE~@%@
from . import config, TokenError
from bitcash.format import public_key_to_address
from os import path
import sqlite3
from hashlib import sha256
| 41.854928 | 126 | 0.485673 | # Copyright (C) 2018 Alpha Griffin
# @%@~LICENSE~@%@
from . import config, TokenError
from bitcash.format import public_key_to_address
from os import path
import sqlite3
from hashlib import sha256
class TokenDB:
def __init__(self, auto_commit=True):
isolation = 'EXCLUSIVE' if not auto_commit else None
conn = sqlite3.connect(path.join(config.dir, 'tokens.db'), isolation_level=isolation)
conn.execute('''CREATE TABLE IF NOT EXISTS status (
key TEXT NOT NULL PRIMARY KEY,
value TEXT
)''')
#
# Block and tx data
#
conn.execute('''CREATE TABLE IF NOT EXISTS block (
hash TEXT NOT NULL PRIMARY KEY,
height INTEGER NOT NULL,
orbit BLOB
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_block_height ON block (height)''')
conn.execute('''CREATE TABLE IF NOT EXISTS tx (
hash TEXT NOT NULL PRIMARY KEY,
block INTEGER NOT NULL,
confirmations INTEGER NOT NULL
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_tx_block ON tx (block)''')
conn.execute('''CREATE TABLE IF NOT EXISTS txin (
hash TEXT NOT NULL PRIMARY KEY,
tx INTEGER NOT NULL,
asmhex TEXT NOT NULL
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_txin_tx ON txin (tx)''')
conn.execute('''CREATE TABLE IF NOT EXISTS txout (
tx INTEGER NOT NULL,
value INTEGER NOT NULL,
type TEXT NOT NULL,
addresses TEXT,
asmhex TEXT NOT NULL
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_txout_tx ON txout (tx)''')
#
# Tokens and balances
#
conn.execute('''CREATE TABLE IF NOT EXISTS token (
address TEXT NOT NULL PRIMARY KEY,
tx INTEGER NOT NULL,
created INTEGER NOT NULL,
updated INTEGER NOT NULL,
supply INTEGER NOT NULL,
decimals INTEGER NOT NULL,
symbol TEXT NOT NULL,
name TEXT,
main_uri TEXT,
image_uri TEXT
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_token_symbol ON token (symbol)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_token_updated ON token (updated)''')
conn.execute('''CREATE TABLE IF NOT EXISTS balance (
address TEXT NOT NULL,
token INTEGER NOT NULL,
updated INTEGER NOT NULL,
units INTEGER NOT NULL,
available INTEGER NOT NULL,
PRIMARY KEY (address, token)
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_balance_updated ON balance (updated)''')
#
# Events
# TODO: will need to remove primary key from tx if we ever support multiple operations in one transaction
#
conn.execute('''CREATE TABLE IF NOT EXISTS transfer (
tx INTEGER NOT NULL PRIMARY KEY,
created INTEGER NOT NULL,
addr_from TEXT NOT NULL,
addr_to TEXT NOT NULL,
units INTEGER
)''')
conn.execute('''CREATE INDEX IF NOT EXISTS idx_transfer_created ON transfer (created)''')
conn.execute('''CREATE TABLE IF NOT EXISTS advertisement (
tx INTEGER NOT NULL PRIMARY KEY,
token INTEGER NOT NULL,
created INTEGER NOT NULL,
updated INTEGER NOT NULL,
finished INTEGER,
begins INTEGER NOT NULL,
ends INTEGER,
delivers INTEGER NOT NULL,
available INTEGER NOT NULL,
claimed INTEGER NOT NULL,
rate INTEGER,
minimum INTEGER NOT NULL,
maximum INTEGER NOT NULL,
preregister TEXT NULL
)''')
conn.execute('''CREATE TABLE IF NOT EXISTS registration (
tx INTEGER NOT NULL PRIMARY KEY,
address TEXT NOT NULL,
advertisement INTEGER NOT NULL,
created INTEGER NOT NULL,
updated INTEGER NOT NULL,
finished INTEGER,
maximum INTEGER NOT NULL,
payments INTEGER NOT NULL,
claimed INTEGER NOT NULL
)''')
keys = conn.execute('''SELECT key FROM status''').fetchall()
self._init_status(conn, keys, 'height')
conn.commit()
self.conn = conn
@classmethod
def _init_status(self, conn, keys, key):
for k in keys:
if k[0] == key:
return
conn.execute('''INSERT INTO status (key) VALUES (?)''', (key,))
def commit(self):
self.conn.commit()
def close(self):
self.conn.close()
def _set_status(self, key, value):
self.conn.execute('''UPDATE status SET value = ? WHERE key = ?''', (value, key))
def _get_status(self, key):
return self.conn.execute('''SELECT value FROM status WHERE key = ?''', (key,)).fetchone()[0]
def get_last_block(self):
height = self._get_status('height')
if height is None:
return None
return int(height)
def set_last_block(self, height):
self._set_status('height', height)
def save_block(self, blockhash, height):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO block
(hash, height)
VALUES (?, ?)''',
(blockhash, height))
return cursor.lastrowid
def save_tx(self, txhash, block, confirmations):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO tx
(hash, block, confirmations)
VALUES (?, ?, ?)''',
(txhash, block, confirmations))
return cursor.lastrowid
def save_txin(self, txhash, tx, asmhex):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO txin
(hash, tx, asmhex)
VALUES (?, ?, ?)''',
(txhash, tx, asmhex))
return cursor.lastrowid
def save_txout(self, tx, value, stype, addresses, asmhex):
cursor = self.conn.cursor()
cursor.execute('''INSERT INTO txout
(tx, value, type, addresses, asmhex)
VALUES (?, ?, ?, ?, ?)''',
(tx, value, stype, addresses, asmhex))
return cursor.lastrowid
def get_signer_address(self, txrow):
txins = self.conn.execute('''SELECT asmhex FROM txin WHERE tx = ?''', (txrow,)).fetchall()
address = None
for txin in txins:
asmhex = txin[0]
asm = bytes.fromhex(asmhex)
sig_size = int.from_bytes(asm[0:1], 'little')
pubkey_size = int.from_bytes(asm[sig_size+1:sig_size+2], 'little')
pubkey = asm[sig_size + 2 : sig_size + pubkey_size + 2]
pubkey_address = public_key_to_address(pubkey)
if not address:
address = pubkey_address
elif address != pubkey_address:
raise ValueError("Multiple signer keys are present in the transaction inputs")
return address
def token_create(self, address, tx, block, supply, decimals, symbol, name=None, main_uri=None, image_uri=None):
cursor = self.conn.cursor()
try:
cursor.execute('''INSERT INTO token
(address, tx, created, updated, supply, decimals, symbol, name, main_uri, image_uri)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(address, tx, block, block, supply, decimals, symbol, name, main_uri, image_uri))
tokenrow = cursor.lastrowid
except sqlite3.IntegrityError as e:
raise TokenError("A token is already defined at this address: {}".format(e))
# no try/except here... it's a critical error to be able to insert a token yet already have a blance for it
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(address, tokenrow, block, supply, supply))
return tokenrow
def _get_tokenrow(self, cursor, address):
token = cursor.execute('''SELECT rowid FROM token WHERE address = ?''', (address,)).fetchone()
if token is None:
raise TokenError("No token defined at the specified address")
return token[0]
def _get_balance(self, cursor, tokenrow, address, total=False):
balance = cursor.execute('''SELECT units, available FROM balance
WHERE token = ? AND address = ?''',
(tokenrow, address)).fetchone()
if not balance:
return None
return balance[0] if total else balance[1]
def token_transfer(self, address, txrow, blockrow, from_address, to_address, units):
cursor = self.conn.cursor()
if from_address == to_address:
raise TokenError("Transfer to address must be different than transfer from address")
tokenrow = self._get_tokenrow(cursor, address)
# validate source balance
balance = self._get_balance(cursor, tokenrow, from_address)
if balance is None:
raise TokenError("No balance for this token")
if balance < units:
raise TokenError("Insufficient available balance for this transfer")
# update source balance
cursor.execute('''UPDATE balance
SET updated = ?, units = units - ?, available = available - ?
WHERE token = ? AND address = ?''',
(blockrow, units, units, tokenrow, from_address))
# update destination balance
balance = self._get_balance(cursor, tokenrow, to_address)
if balance is None:
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(to_address, tokenrow, blockrow, units, units))
else:
cursor.execute('''UPDATE balance
SET updated = ?, units = units + ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, units, units, tokenrow, to_address))
# save transfer event
cursor.execute('''INSERT INTO transfer
(tx, created, addr_from, addr_to, units)
VALUES (?, ?, ?, ?, ?)''',
(txrow, blockrow, from_address, to_address, units))
return cursor.lastrowid
def token_advertise(self, address, txrow, blockrow, exchange_rate=None, units_avail=None, units_min=None, units_max=None,
block_begin=None, block_end=None, block_deliver=None, preregister=False):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
# block validation
if block_begin:
if block_begin <= height:
raise TokenError("Beginning block must occur after the advertisement block")
else:
block_begin = height + 1
if block_end:
if block_end < block_begin:
raise TokenError("Ending block must be on or after the beginning block")
if block_deliver:
if block_deliver < block_begin:
raise TokenError("Delivery block must be on or after the beginning block")
else:
block_deliver = block_begin
# existing advertisement checks
advertisement = cursor.execute('''SELECT 1 FROM advertisement
WHERE token = ? AND finished IS NULL AND begins <= ?
AND (ends IS NULL OR ends >= ?)
LIMIT 1''',
(tokenrow, block_begin, block_begin)).fetchone()
if advertisement:
raise TokenError("An existing advertisement is currently open")
advertisement = cursor.execute('''SELECT begins FROM advertisement
WHERE token = ? AND finished IS NULL AND begins > ?
ORDER BY begins LIMIT 1''',
(tokenrow, block_begins)).fetchone()
if advertisement:
if block_end:
if block_end >= advertisement[0]:
raise TokenError("An existing advertisement exists that begins before this one ends")
else:
raise TokenError("An existing advertisement begins in the future but this one has no ending block")
advertisement = cursor.execute("""SELECT 1 FROM advertisement
WHERE token = ? AND finished IS NULL AND begins > ? AND preregister = 'Y'""",
(tokenrow, block_begins)).fetchone()
if advertisement:
raise TokenError("An existing future advertisement allows preregistration")
# available balance validation and update
balance = self._get_balance(cursor, address)
if units_avail:
if balance < units_avail:
raise TokenError("Insufficient available balance to make available")
else:
units_avail = balance
if units_min:
if units_min > units_avail:
raise TokenError("Insufficient available balance for the specified minimum units")
else:
units_min = 1
if units_max:
# note that it's not an error if units_max > units_avail... this allows a per-user maximum to be
# set when units_avail might not be specified
pass
else:
units_max = units_avail
cursor.execute('''UPDATE balance
SET updated = ?, available = available - ?
WHERE token = ? AND address = ?''',
(blockrow, units_avail, tokenrow, address))
# save advertise event
cursor.execute('''INSERT INTO advertisement
(tx, token, created, updated, begins, ends, delivers, available, dispensed,
rate, minimum, maximum, preregister)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
(txrow, tokenrow, blockrow, blockrow, block_begin, block_end, block_deliver,
units_avail, 0, units_min, units_max,
'Y' if preregister else None))
return cursor.lastrowid
def token_advertise_cancel(self, address, txrow, blockrow, txhash):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
# validate advertisement
advertisement = cursor.execute('''SELECT a.rowid, a.token, a.finished, a.available, a.claimed
FROM tx
LEFT JOIN advertisement a ON a.tx = tx.rowid
WHERE tx.hash = ?''',
(txhash,)).fetchone()
if not advertisement:
raise TokenError("No advertisement exists for the given tx hash")
if tokenrow != advertisement[1]:
raise TokenError("Advertisement at the specified tx hash does not match the token indicated")
if advertisement[2] is not None:
raise TokenError("The advertisement has already finished")
# validate registrations
registrations = cursor.execute('''SELECT 1 FROM registration
WHERE advertisement = ?
LIMIT 1''',
(advertisement[0],))
if registrations:
#FIXME: just check that 'claimed' == 0 instead?
raise TokenError("There have already been registrations for this advertisement; it cannot be cancelled")
if advertisement[4] != 0:
raise ValueError("This advertisement indicates claims but no registrations were found")
# close advertisement and make balance available again
cursor.execute('''UPDATE advertisement
SET updated = ?, finished = ?
WHERE rowid = ?''',
(blockrow, blockrow, advertisement[0]))
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, advertisement[3], tokenrow, address))
return advertisement[0]
def get_eligible_advertisement_row(self, cursor, tokenrow, height):
advertisement = None
advertisements = cursor.execute('''SELECT rowid FROM advertisement
WHERE token = ? AND finished IS NULL AND begins <= ?
AND (ends IS NULL OR ends >= ?)''',
(tokenrow, height, height)).fetchall()
if advertisements:
if len(advertisements) > 1:
raise ValueError("There are multiple active advertisements")
advertisement = advertisements[0]
advertisements = cursor.execute("""SELECT rowid FROM advertisement
WHERE token = ? AND finished IS NULL AND begins > ? AND preregister = 'Y'""",
(tokenrow, height)).fetchall()
if advertisements:
if advertisement:
raise ValueError("There is an active advertisement but also a future advertisement allowing preregistration")
if len(advertisements) > 1:
raise ValueError("There are multiple future advertisements allowing preregistration")
advertisement = advertisements[0]
if not advertisement:
raise TokenError("There is no active advertisement or future advertisement allowing preregistration")
return advertisement[0]
def token_register(self, address, txrow, blockrow, user_address, units_max=None):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
advertisement = self.get_eligible_advertisement_row(cursor, tokenrow, height)
advertisement = cursor.execute('''SELECT rowid, minimum, maximum, rate, available, claimed, delivers
FROM advertisement
WHERE rowid = ?''',
(advertisement,)).fetchone()
if units_max < advertisement[1]:
raise TokenError('Specified maximum is less than the advertisement user-minimum required')
registrations = cursor.execute('''SELECT SUM(maximum)
FROM registration
WHERE address = ? and advertisement = ?''',
(user_address, advertisement[0])).fetchone()
max_remains = advertisement[2]
if registrations:
max_remains -= registrations[0]
if max_remains < 1:
raise TokenError('Maximum per-user units has already been registered')
unclaimed = advertisement[4] - advertisement[5]
if unclaimed < max_remains:
max_remains = unclaimed
if units_max > max_remains:
units_max = max_remains
if not advertisement[3]: # free faucet
units = units_max
available = (height > advertisement[6])
# note that if height == delivers then process_advertisements() will make the units available
# update source balance
cursor.execute('''UPDATE balance
SET updated = ?, units = units - ?
WHERE token = ? AND address = ?''',
(blockrow, units, tokenrow, address))
# update destination balance
balance = self._get_balance(cursor, tokenrow, user_address)
if balance is None:
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(user_address, tokenrow, blockrow, units, units if available else 0))
else:
cursor.execute('''UPDATE balance
SET updated = ?, units = units + ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, units, units if available else 0, tokenrow, user_address))
cursor.execute('''UPDATE advertisement
SET updated = ?, claimed = claimed + ?
WHERE rowid = ?''',
(blockrow, units, advertisement[0]))
# save transfer event
cursor.execute('''INSERT INTO transfer
(tx, created, addr_from, addr_to, units)
VALUES (?, ?, ?, ?, ?)''',
(txrow, blockrow, address, user_address, units))
else:
units = 0
cursor.execute('''INSERT INTO registration
(tx, address, advertisement, created, updated, finished, maximum, payments, claimed)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)''',
(txrow, user_address, advertisement[0], blockrow, blockrow,
blockrow if advertisement[3] else None, units_max, 0, units))
return cursor.lastrowid
def token_unregister(self, address, txrow, blockrow, user_address):
cursor = self.conn.cursor()
tokenrow = self._get_tokenrow(cursor, address)
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
advertisement = self.get_eligible_advertisement_row(cursor, tokenrow, height)
registrations = cursor.execute('''SELECT rowid, token FROM registration
WHERE address = ? AND advertisement = ? AND finished IS NULL''',
(user_address, advertisement)).fetchall()
if not registrations:
raise TokenError("No active registration was found")
if len(registrations) > 1:
raise ValueError("Multiple active registrations found")
registration = registrations[0]
if registration[1] != tokenrow:
raise ValueError("This registration token does not match the advertisement token")
cursor.execute('''UPDATE registration
SET updated = ?, finished = ?
WHERE rowid = ?''',
(blockrow, blockrow, registration[0]))
return registration[0]
def get_active_registrations_map(self, blockrow):
cursor = self.conn.cursor()
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
registrations = cursor.execute('''SELECT t.address, r.address, r.rowid
FROM registration r
LEFT JOIN advertisement a ON a.rowid = r.advertisement
LEFT JOIN token t ON t.rowid = a.token
WHERE r.finished IS NULL AND a.finished IS NULL AND a.begins <= ?''',
(height,)).fetchall()
reg_map = {}
if registrations:
for registration in registrations:
try:
records = reg_map[registration[0]]
except AttributeError:
records = {}
reg_map[registration[0]] = records
try:
rowid = records[registration[1]]
raise ValueError('Already have an active registration for this user and token')
except AttributeError:
records[registration[1]] = registration[2]
return reg_map
def registration_payment(self, txrow, blockrow, rowid, value):
cursor = self.conn.cursor()
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
details = cursor.execute('''SELECT r.address, r.maximum, r.payments, r.claimed,
a.rowid, a.delivers, a.available, a.claimed, a.rate, a.minimum, a.maximum,
t.rowid, t.address
FROM registration r
LEFT JOIN advertisement a ON a.rowid = r.advertisement
LEFT JOIN token t ON t.rowid = a.token
WHERE r.rowid = ?''',
(rowid,)).fetchone()
claimed = cursor.execute('''SELECT SUM(claimed)
FROM registration
WHERE address = ? AND advertisement = ? AND rowid <> ?''',
(details[0], details[4], rowid)).fetchone()[0]
ad_remaining = details[6] - details[7]
user_remaining = details[10] - claimed - details[3]
if ad_remaining < user_remaining:
user_remaining = ad_remaining
if details[1] < user_remaining:
user_remaining = details[1]
payments = details[2] + value
rate = details[8]
if rate:
if rate < 0:
units = payments // (-1 * rate)
else:
units = payments * rate
if units < details[9]:
units = 0
else:
units -= details[3]
if units > user_remaining:
units = user_remaining
else:
units = user_remaining
if units > 0:
available = (height > details[5])
# note that if height == delivers then process_advertisements() will make the units available
# update source balance
cursor.execute('''UPDATE balance
SET updated = ?, units = units - ?
WHERE token = ? AND address = ?''',
(blockrow, units, details[11], details[12]))
# update destination balance
balance = self._get_balance(cursor, details[11], details[0])
if balance is None:
cursor.execute('''INSERT INTO balance
(address, token, updated, units, available)
VALUES (?, ?, ?, ?, ?)''',
(details[0], details[11], blockrow, units, units if available else 0))
else:
cursor.execute('''UPDATE balance
SET updated = ?, units = units + ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, units, units if available else 0, details[11], details[0]))
# save transfer event
cursor.execute('''INSERT INTO transfer
(tx, created, addr_from, addr_to, units)
VALUES (?, ?, ?, ?, ?)''',
(txrow, blockrow, details[12], details[0], units))
finished = (units == (details[1] - details[3]))
cursor.execute('''UPDATE registration
SET updated = ?, finished = ?, payments = ?, claimed = claimed + ?
WHERE rowid = ?''',
(blockrow, blockrow if finished else None, payments, units, rowid))
cursor.execute('''UPDATE advertisement
SET updated = ?, claimed = claimed + ?
WHERE rowid = ?''',
(blockrow, units, details[4]))
def process_advertisements(self, blockrow):
cursor = self.conn.cursor()
height = cursor.execute('''SELECT height FROM block WHERE rowid = ?''', (blockrow,)).fetchone()[0]
deliveries = cursor.execute('''SELECT rowid, token
FROM advertisement
WHERE delivers = ?''',
(blockrow,)).fetchall()
if deliveries:
for delivery in deliveries:
registrations = cursor.execute('''SELECT rowid, address, claimed
FROM registration
WHERE advertisement = ?
ORDER BY address''',
(delivery[0],)).fetchall()
if registrations:
last_address = None
user_claimed = 0
for registration in registrations:
if last_address is None:
last_address = registration[1]
if last_address == registration[1]:
user_claimed += registration[2]
else:
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, user_claimed, delivery[1], last_address))
last_address = registration[1]
user_claimed = registration[2]
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, user_claimed, delivery[1], last_address))
ads_to_close = cursor.execute('''SELECT a.rowid, a.available - a.claimed,
t.rowid, t.address
FROM advertisement a
LEFT JOIN token t ON t.rowid = a.token
WHERE a.finished IS NULL AND a.claimed = a.available OR a.ends = ?''',
(height,)).fetchall()
if ads_to_close:
for advertisement in ads_to_close:
cursor.execute('''UPDATE registration
SET updated = ?, finished = ?
WHERE advertisement = ? AND finished IS NULL''',
(blockrow, blockrow, advertisement[0]))
cursor.execute('''UPDATE advertisement
SET updated = ?, finished = ?
WHERE rowid = ?''',
(blockrow, blockrow, advertisement[0]))
make_available = advertisement[1]
if make_available:
cursor.execute('''UPDATE balance
SET updated = ?, available = available + ?
WHERE token = ? AND address = ?''',
(blockrow, make_available, advertisement[2], advertisement[3]))
def get_user_tokens(self, address):
return [{
"address": row[0],
"symbol": row[1],
"decimals": row[2],
"name": row[3],
"units": row[4],
"available": row[5]
} for row in self.conn.execute('''
SELECT t.address, t.symbol, t.decimals, t.name, b.units, b.available
FROM balance b
LEFT JOIN token t ON t.rowid = b.token
WHERE b.address = ?''',
(address,)).fetchall()]
def hash(self, blockrow):
cursor = self.conn.cursor()
blocks = cursor.execute('''SELECT height, rowid, hash FROM block
WHERE orbit IS NULL
ORDER BY height''').fetchall()
if not blocks:
return None
if len(blocks) > 1:
raise ValueError('Multiple unhashed orbits detected... hash() must be called concurrently as blocks are inserted')
block = blocks[0]
height = block[0]
blockrow = block[1]
block_prev = cursor.execute('''SELECT orbit FROM block
WHERE height = ?''',
(height - 1,)).fetchone()
# hash the block and append to previous block hash
if block_prev:
data = block_prev[0]
else:
not_launch = cursor.execute('''SELECT 1 FROM block
WHERE height < ? LIMIT 1''',
(height,)).fetchone()
if not_launch:
raise ValueError('Missing block: {}'.format(height - 1))
data = b'\x42\x81' # special sequence to indicate launch
data += self._hash_cols(block)
# tokens and balances
data += self._hash_rows(cursor.execute('''SELECT * FROM token
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
data += self._hash_rows(cursor.execute('''SELECT * FROM balance
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
# events
data += self._hash_rows(cursor.execute('''SELECT * FROM transfer
WHERE created = ?
ORDER BY rowid''',
(blockrow,)))
data += self._hash_rows(cursor.execute('''SELECT * FROM advertisement
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
data += self._hash_rows(cursor.execute('''SELECT * FROM registration
WHERE updated = ?
ORDER BY rowid''',
(blockrow,)))
# final hash and save
orbit = self._hash(data)
cursor.execute('''UPDATE block
SET orbit = ?
WHERE rowid = ?''',
(sqlite3.Binary(orbit), blockrow))
return orbit
def _hash_rows(self, rows):
if not rows:
return b'\x00'
data = b'\x01'
for row in rows:
data += self._hash_cols(row)
data += b'\xFF'
return self._hash(data)
def _hash_cols(self, cols):
data = '['
for col in cols:
if col:
data += '{}'.format(col)
data += '|'
data += ']'
return self._hash(data.encode('utf-8'))
def _hash(self, data):
return sha256(sha256(data).digest()).digest()
| 36,750 | 820 | 23 |
aa883e3496e21cb0f2daf6fff05903f54faa73b3 | 3,177 | py | Python | create_bags/bag_creator.py | RockefellerArchiveCenter/dart_digitization | f4ee85f3cbcbbe92c9a5b03b76bcdd72233722ec | [
"MIT"
] | null | null | null | create_bags/bag_creator.py | RockefellerArchiveCenter/dart_digitization | f4ee85f3cbcbbe92c9a5b03b76bcdd72233722ec | [
"MIT"
] | 11 | 2021-08-05T19:39:27.000Z | 2021-12-15T14:52:14.000Z | create_bags/bag_creator.py | RockefellerArchiveCenter/dart_digitization | f4ee85f3cbcbbe92c9a5b03b76bcdd72233722ec | [
"MIT"
] | null | null | null | import json
from configparser import ConfigParser
from subprocess import PIPE, Popen
from .archivesspace import ArchivesSpaceClient
from .helpers import create_tag, format_aspace_date, get_closest_dates
| 41.802632 | 79 | 0.564054 | import json
from configparser import ConfigParser
from subprocess import PIPE, Popen
from .archivesspace import ArchivesSpaceClient
from .helpers import create_tag, format_aspace_date, get_closest_dates
class BagCreator:
def __init__(self):
self.config = ConfigParser()
self.config.read("local_settings.cfg")
self.dart_command = self.config["DART"]["dart"]
self.workflow = self.config["DART"]["workflow"]
def run(self, refid, rights_ids, files):
"""
Args:
refid (str)
rights_ids (array)
"""
# directory_to_bag = "some directory"
self.as_client = ArchivesSpaceClient(baseurl=self.config.get(
"ArchivesSpace", "baseurl"), username=self.config.get(
"ArchivesSpace", "username"), password=self.config.get(
"ArchivesSpace", "password"))
self.refid = refid
self.ao_uri = self.as_client.get_uri_from_refid(self.refid)
ao_data = self.as_client.get_ao_data(self.ao_uri)
begin_date, end_date = format_aspace_date(get_closest_dates(ao_data))
self.job_params = self.construct_job_params(
rights_ids, files, begin_date, end_date)
self.create_dart_job()
return self.refid
def construct_job_params(self, rights_ids, files, begin_date, end_date):
"""Formats information for DART job parameters
Args:
rights_ids (array): list of rights ids
files (array): list of full filepaths
dates (tuple): begin and end dates
Returns a dictionary"""
job_params = {"workflowName": self.workflow,
"packageName": "{}.tar".format(self.refid),
"files": files,
"tags": [{"tagFile": "bag-info.txt",
"tagName": "ArchivesSpace-URI",
"userValue": self.ao_uri},
{"tagFile": "bag-info.txt",
"tagName": "Start-Date",
"userValue": begin_date},
{"tagFile": "bag-info.txt",
"tagName": "End-Date",
"userValue": end_date},
{"tagFile": "bag-info.txt",
"tagName": "Origin",
"userValue": "digitization"}]}
for rights_id in rights_ids:
job_params['tags'].append(create_tag("Rights-ID", str(rights_id)))
return job_params
def create_dart_job(self):
"""Runs a DART job"""
json_input = (json.dumps(self.job_params) + "\n").encode()
cmd = "{} -- --stdin".format(self.dart_command)
child = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
stdout_data, stderr_data = child.communicate(json_input)
if child.returncode != 0:
stdout_message = stdout_data.decode('utf-8') if stdout_data else ""
stderr_message = stderr_data.decode('utf-8') if stderr_data else ""
raise Exception(stdout_message, stderr_message)
| 194 | 2,755 | 23 |
81003cc2b9becec46691667317615c5a7f7d2cda | 2,105 | py | Python | test_status_endpoints/test_status_tweet.py | RSMuthu/Twitter_API-Pytest | b272d8ead3d51bbd0d5a4720f67905dccee27d7c | [
"MIT"
] | null | null | null | test_status_endpoints/test_status_tweet.py | RSMuthu/Twitter_API-Pytest | b272d8ead3d51bbd0d5a4720f67905dccee27d7c | [
"MIT"
] | null | null | null | test_status_endpoints/test_status_tweet.py | RSMuthu/Twitter_API-Pytest | b272d8ead3d51bbd0d5a4720f67905dccee27d7c | [
"MIT"
] | null | null | null | import pytest
from conftest import twitter_session, BASE_URL
from utils import get_home_tweets
# status list to tweet
status_list = {"We welcome you to MSD family :)", "Hello World !!"}
@pytest.mark.run(order=1) ## ording test cases -- make tweet first as first test case
@pytest.mark.parametrize("status_text", status_list) ## making it parametrized with the iterable "status text"
def test_make_tweet(twitter_session, status_text):
'''
Test Case for the creation of a tweet.
Args:
twitter_session - the OAuth1Session from the pytest fixture.
status_text - the text which will be dumped in the tweet created for testing.
'''
# making API call to post the tweet with the status_text provide
resp = twitter_session.post(f"{BASE_URL}/statuses/update.json", params={'status': status_text})
print (f"\nTweet Response - {resp.text}") ## response shall be captured from std
# Assert to confirm if the tweet is made successfully
assert resp.status_code == 200
# Assert to Confirm if the tweet made is having correct data
assert resp.json()['text'] == status_text
@pytest.mark.run(order=4) ## ordering test cases -- delete the tweet after all the test cases are done
def test_delete_tweet(twitter_session):
'''
Test Case for the deletion of a tweet.
This test case is executed post creation.
We will be searching for the tweet from the home timeline and deleting it.
Args:
twitter_session - the OAuth1Session from the pytest fixture.
'''
# loop through the tweets made as part of test case
for tweet in get_home_tweets(twitter_session, tweet_count=len(status_list)):
# verifing if its the same tweet we had made, before deleting
if tweet['text'] in status_list:
# API call to delete the tweet
resp = twitter_session.post(f"{BASE_URL}/statuses/destroy/{tweet['id']}.json")
print (f"\nDelete tweet Response - {resp.text}") ## response shall be captured from std
# Assert to confirm if the request made successfully
assert resp.status_code == 200
| 46.777778 | 110 | 0.709264 | import pytest
from conftest import twitter_session, BASE_URL
from utils import get_home_tweets
# status list to tweet
status_list = {"We welcome you to MSD family :)", "Hello World !!"}
@pytest.mark.run(order=1) ## ording test cases -- make tweet first as first test case
@pytest.mark.parametrize("status_text", status_list) ## making it parametrized with the iterable "status text"
def test_make_tweet(twitter_session, status_text):
'''
Test Case for the creation of a tweet.
Args:
twitter_session - the OAuth1Session from the pytest fixture.
status_text - the text which will be dumped in the tweet created for testing.
'''
# making API call to post the tweet with the status_text provide
resp = twitter_session.post(f"{BASE_URL}/statuses/update.json", params={'status': status_text})
print (f"\nTweet Response - {resp.text}") ## response shall be captured from std
# Assert to confirm if the tweet is made successfully
assert resp.status_code == 200
# Assert to Confirm if the tweet made is having correct data
assert resp.json()['text'] == status_text
@pytest.mark.run(order=4) ## ordering test cases -- delete the tweet after all the test cases are done
def test_delete_tweet(twitter_session):
'''
Test Case for the deletion of a tweet.
This test case is executed post creation.
We will be searching for the tweet from the home timeline and deleting it.
Args:
twitter_session - the OAuth1Session from the pytest fixture.
'''
# loop through the tweets made as part of test case
for tweet in get_home_tweets(twitter_session, tweet_count=len(status_list)):
# verifing if its the same tweet we had made, before deleting
if tweet['text'] in status_list:
# API call to delete the tweet
resp = twitter_session.post(f"{BASE_URL}/statuses/destroy/{tweet['id']}.json")
print (f"\nDelete tweet Response - {resp.text}") ## response shall be captured from std
# Assert to confirm if the request made successfully
assert resp.status_code == 200
| 0 | 0 | 0 |
5d39942b02ac6f8ea852e08d75c444f2f2c627d5 | 1,776 | py | Python | compil_to_csv.py | bliiben/sha256SquaredPrecompilCSV | 752aa981299f1f06abf92ad76228926aa9267934 | [
"MIT"
] | null | null | null | compil_to_csv.py | bliiben/sha256SquaredPrecompilCSV | 752aa981299f1f06abf92ad76228926aa9267934 | [
"MIT"
] | null | null | null | compil_to_csv.py | bliiben/sha256SquaredPrecompilCSV | 752aa981299f1f06abf92ad76228926aa9267934 | [
"MIT"
] | null | null | null | import pickle
import operator
import re
prog = re.compile( r"^([\^\~\&\|])\((\w+),?(\w+)?\)$" )
with open("sha_decoded.pp", "r") as file:
_h, _REFERENCES_, _DEPENDENCY_ ,_INV_DEP_, _COST_ = pickle.load( file )
inv_refs = {v: k for k, v in _REFERENCES_.iteritems()}
#sorted_COST = sorted(_COST_.items(), key=operator.itemgetter(1))
opsByDiv = {}
# CONSTRAINTS = {}
# for i in range(2):
# for j in _h[i]:
# CONSTRAINTS[ _h[i][j] ] = 0
for i in _COST_.items():
if( i[1] not in opsByDiv ):
opsByDiv[i[1]]=[]
opsByDiv[i[1]].append(i[0])
csv = open("sha2562hash.csv","w")
csv.write("\n"*10)
opsResultCell = {}
for n in opsByDiv:
line = []
c = 0
for opRef in (opsByDiv[n]):
opsResultCell[opRef] = toCell(n+10,c)
op = inv_refs[opRef]
a,b = getElements( op )
if(a[0]=='r'):
cella = opsResultCell[a]
elif( a[0] == 't'):
cella = '1'
elif( a[0] == 'f' ):
cella = '0'
elif( a[0]=='b'):
cella = toCell(1,int(a[1:]))
else:
cella = a
if( b!= None and b[0]=='r' ):
cellb = opsResultCell[b]
elif( b!= None and b[0] == 't'):
cellb = '1'
elif( b!= None and b[0] == 'f' ):
cellb = '0'
elif( b!= None and b[0]=='b'):
cellb = toCell(1,int(b[1:]))
else:
cellb = b
if(op[0]=='^'):
line.append("=XOR("+cella+";"+cellb+")")
elif(op[0]=='|'):
line.append("=OR("+cella+";"+cellb+")")
elif(op[0]=='&'):
line.append("=AND("+cella+";"+cellb+")")
elif(op[0]=='~'):
line.append("=NOT("+cella+")")
c+=1
csv.write('\t'.join(line)+"\n")
csv.close() | 21.39759 | 72 | 0.552365 | import pickle
import operator
import re
prog = re.compile( r"^([\^\~\&\|])\((\w+),?(\w+)?\)$" )
def getElements( op ):
m= prog.search(op)
ref1 = m.group(2)
ref2 = m.group(3)
return (ref1,ref2)
with open("sha_decoded.pp", "r") as file:
_h, _REFERENCES_, _DEPENDENCY_ ,_INV_DEP_, _COST_ = pickle.load( file )
inv_refs = {v: k for k, v in _REFERENCES_.iteritems()}
#sorted_COST = sorted(_COST_.items(), key=operator.itemgetter(1))
opsByDiv = {}
# CONSTRAINTS = {}
# for i in range(2):
# for j in _h[i]:
# CONSTRAINTS[ _h[i][j] ] = 0
def toCell(n,c):
crs = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if( c >= 26):
letter = crs[ (c // 26) -1] + crs[ c % 26 ]
else:
letter = crs[c]
return letter+str(n)
for i in _COST_.items():
if( i[1] not in opsByDiv ):
opsByDiv[i[1]]=[]
opsByDiv[i[1]].append(i[0])
csv = open("sha2562hash.csv","w")
csv.write("\n"*10)
opsResultCell = {}
for n in opsByDiv:
line = []
c = 0
for opRef in (opsByDiv[n]):
opsResultCell[opRef] = toCell(n+10,c)
op = inv_refs[opRef]
a,b = getElements( op )
if(a[0]=='r'):
cella = opsResultCell[a]
elif( a[0] == 't'):
cella = '1'
elif( a[0] == 'f' ):
cella = '0'
elif( a[0]=='b'):
cella = toCell(1,int(a[1:]))
else:
cella = a
if( b!= None and b[0]=='r' ):
cellb = opsResultCell[b]
elif( b!= None and b[0] == 't'):
cellb = '1'
elif( b!= None and b[0] == 'f' ):
cellb = '0'
elif( b!= None and b[0]=='b'):
cellb = toCell(1,int(b[1:]))
else:
cellb = b
if(op[0]=='^'):
line.append("=XOR("+cella+";"+cellb+")")
elif(op[0]=='|'):
line.append("=OR("+cella+";"+cellb+")")
elif(op[0]=='&'):
line.append("=AND("+cella+";"+cellb+")")
elif(op[0]=='~'):
line.append("=NOT("+cella+")")
c+=1
csv.write('\t'.join(line)+"\n")
csv.close() | 218 | 0 | 45 |
1f46c1bbb19eae64d53cea40d5922ff06d116ac8 | 474 | py | Python | notifs/asgi.py | gaybro8777/django-notifs | f349fdf2dc87f2b579055c4e2abd95832ad9df5b | [
"MIT"
] | 145 | 2017-06-22T20:37:14.000Z | 2022-02-03T21:18:28.000Z | notifs/asgi.py | gaybro8777/django-notifs | f349fdf2dc87f2b579055c4e2abd95832ad9df5b | [
"MIT"
] | 67 | 2017-06-23T06:53:32.000Z | 2021-11-13T04:00:27.000Z | notifs/asgi.py | gaybro8777/django-notifs | f349fdf2dc87f2b579055c4e2abd95832ad9df5b | [
"MIT"
] | 24 | 2017-06-22T20:37:17.000Z | 2022-02-17T19:52:35.000Z | import os
from channels.routing import ChannelNameRouter, ProtocolTypeRouter
from django.core.asgi import get_asgi_application
from notifications import consumers
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'notifs.settings')
application = ProtocolTypeRouter(
{
'http': get_asgi_application(),
'channel': ChannelNameRouter(
{
'django_notifs': consumers.DjangoNotifsConsumer.as_asgi(),
}
),
}
)
| 23.7 | 74 | 0.685654 | import os
from channels.routing import ChannelNameRouter, ProtocolTypeRouter
from django.core.asgi import get_asgi_application
from notifications import consumers
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'notifs.settings')
application = ProtocolTypeRouter(
{
'http': get_asgi_application(),
'channel': ChannelNameRouter(
{
'django_notifs': consumers.DjangoNotifsConsumer.as_asgi(),
}
),
}
)
| 0 | 0 | 0 |
1524f6da34655de1d60bbc96022b5edd80ee87e4 | 615 | py | Python | demo/python/multi-threading/demo4.py | dark-w/sokoban | 98cd76dd94ae0c7d1beaaca47f0d3598998bb43b | [
"Apache-2.0"
] | null | null | null | demo/python/multi-threading/demo4.py | dark-w/sokoban | 98cd76dd94ae0c7d1beaaca47f0d3598998bb43b | [
"Apache-2.0"
] | 9 | 2021-01-24T05:26:31.000Z | 2021-04-07T07:03:02.000Z | demo/python/multi-threading/demo4.py | dark-w/sokoban | 98cd76dd94ae0c7d1beaaca47f0d3598998bb43b | [
"Apache-2.0"
] | null | null | null | import threading
# pls thinking about if there not use mutex...
# you must try it, comment the line 12 and the line 14...
mutex = threading.Lock()
num = 0
if __name__ == "__main__":
main()
| 20.5 | 59 | 0.560976 | import threading
# pls thinking about if there not use mutex...
# you must try it, comment the line 12 and the line 14...
mutex = threading.Lock()
num = 0
def fun1(n):
global num
for i in range(n):
mutex.acquire()
num += 1
mutex.release()
print("num result: {}".format(num))
def main():
t1 = threading.Thread(target = fun1, args = (1000000,))
t2 = threading.Thread(target = fun1, args = (1000000,))
t1.start()
t2.start()
t1.join()
t2.join()
print("num result: {}".format(num))
if __name__ == "__main__":
main()
| 349 | 0 | 62 |
7ed241bdc8a02c16e6ea477e0032b989c2862ec7 | 14,833 | py | Python | decofre/infer.py | LoicGrobol/decofre | 68e12c8da4a6c032bb5ea3edff9e8484344e94e2 | [
"MIT"
] | 9 | 2021-01-15T10:34:02.000Z | 2021-12-24T13:58:36.000Z | decofre/infer.py | LoicGrobol/decofre | 68e12c8da4a6c032bb5ea3edff9e8484344e94e2 | [
"MIT"
] | 8 | 2020-03-13T10:52:48.000Z | 2022-02-06T22:15:28.000Z | decofre/infer.py | LoicGrobol/decofre | 68e12c8da4a6c032bb5ea3edff9e8484344e94e2 | [
"MIT"
] | null | null | null | import contextlib
import pathlib
import shutil
import sys
import tempfile
import typing as ty
import click
import click_pathlib
import jsonlines
import numpy as np
import spacy
import ujson as json
from typing import Any, Dict, List, Literal, Optional, TextIO
from typing_extensions import TypedDict
from decofre.formats import formats
from decofre import detmentions, score, clusterize
spacy.tokens.Doc.set_extension("clusters", default=None)
spacy.tokens.Span.set_extension("cluster", default=None)
spacy.tokens.Span.set_extension("singleton", default=True)
@contextlib.contextmanager
def smart_open(
filename: str, mode: str = "r", *args, **kwargs
) -> ty.Generator[ty.IO, None, None]:
"""Open files and i/o streams transparently."""
if filename == "-":
if "r" in mode:
stream = sys.stdin
else:
stream = sys.stdout
if "b" in mode:
fh = stream.buffer # type: ty.IO
else:
fh = stream
close = False
else:
fh = open(filename, mode, *args, **kwargs)
close = True
try:
yield fh
finally:
if close:
try:
fh.close()
except AttributeError:
pass
@contextlib.contextmanager
def dir_manager(
path: ty.Optional[ty.Union[pathlib.Path, str]] = None, cleanup=None
) -> ty.Generator[pathlib.Path, None, None]:
"""A context manager to deal with a directory, default to a self-destruct temp one."""
if path is None:
d_path = pathlib.Path(tempfile.mkdtemp())
if cleanup is None:
cleanup = True
else:
d_path = pathlib.Path(path).resolve()
d_path.mkdir(parents=True, exist_ok=True)
if cleanup is None:
cleanup = False
elif cleanup:
if d_path.glob("*"):
raise ValueError(f"{d_path} is not empty.")
try:
yield d_path
finally:
if cleanup:
shutil.rmtree(d_path)
def antecedents_from_mentions(
mentions: ty.Iterable[ty.Dict[str, ty.Any]],
max_candidates: int = 128,
distance_buckets: ty.Sequence[int] = (1, 2, 3, 4, 5, 7, 15, 32, 63),
) -> ty.Dict[str, ty.Dict[str, AntecedentFeaturesDict]]:
"""Extract an antecedent dataset from a list of detected mentions."""
sorted_mentions = sorted(mentions, key=lambda m: (m["start"], m["end"]))
if len(sorted_mentions) < 2:
return dict()
# The first mention in a document has no antecedent candidates
res = dict()
for i, mention in enumerate(sorted_mentions[1:], start=1):
mention_content_set = set(mention["content"])
antecedent_candidates = sorted_mentions[max(0, i - max_candidates) : i]
antecedents: ty.Dict[str, AntecedentFeaturesDict] = dict()
for j, candidate in enumerate(antecedent_candidates):
candidate_content_set = set(candidate["content"])
w_distance = int(
np.digitize(
mention["start"] - candidate["end"],
bins=distance_buckets,
right=True,
)
)
u_distance = int(
np.digitize(
mention["sentence"] - candidate["sentence"],
bins=distance_buckets,
)
)
m_distance: int = int(
np.digitize(
len(antecedent_candidates) - j,
bins=distance_buckets,
right=True,
)
)
spk_agreement = mention.get("speaker") == candidate.get("speaker")
intersect = len(mention_content_set.intersection(candidate_content_set))
token_incl_ratio = int(
10
* intersect
/ min(len(mention_content_set), len(candidate_content_set))
)
token_com_ratio = int(
10 * intersect / len(mention_content_set.union(candidate_content_set))
)
overlap = mention["start"] < candidate["end"]
antecedents[candidate["span_id"]] = {
"w_distance": w_distance,
"u_distance": u_distance,
"m_distance": m_distance,
"spk_agreement": spk_agreement,
"overlap": overlap,
"token_incl": token_incl_ratio,
"token_com": token_com_ratio,
}
res[mention["span_id"]] = antecedents
return res
@click.command(help="End-to-end coreference resolution")
@click.argument(
"detect-model",
type=click_pathlib.Path(exists=True, dir_okay=False),
)
@click.argument(
"coref-model",
type=click_pathlib.Path(exists=True, dir_okay=False),
)
@click.argument(
"input_file",
type=click.File("r"),
)
@click.argument(
"output_file",
type=click.File("w", atomic=True),
default="-",
)
@click.option(
"--from",
"input_format",
type=click.Choice(formats.keys()),
default="raw_text",
help="The input format",
show_default=True,
)
@click.option(
"--intermediary-dir",
"intermediary_dir_path",
type=click_pathlib.Path(resolve_path=True, file_okay=False),
help="A path to a directory to use for intermediary files, defaults to a self-destructing temp dir",
)
@click.option(
"--lang",
default="fr_core_news_lg",
help="A spaCy model handle for the document.",
show_default=True,
)
@click.option(
"--to",
"output_format",
type=click.Choice(["latex", "prodigy", "sacr", "text"]),
default="text",
help="Output formats (experimental)",
)
if __name__ == "__main__":
main_entry_point()
| 32.6 | 104 | 0.569945 | import contextlib
import pathlib
import shutil
import sys
import tempfile
import typing as ty
import click
import click_pathlib
import jsonlines
import numpy as np
import spacy
import ujson as json
from typing import Any, Dict, List, Literal, Optional, TextIO
from typing_extensions import TypedDict
from decofre.formats import formats
from decofre import detmentions, score, clusterize
spacy.tokens.Doc.set_extension("clusters", default=None)
spacy.tokens.Span.set_extension("cluster", default=None)
spacy.tokens.Span.set_extension("singleton", default=True)
@contextlib.contextmanager
def smart_open(
filename: str, mode: str = "r", *args, **kwargs
) -> ty.Generator[ty.IO, None, None]:
"""Open files and i/o streams transparently."""
if filename == "-":
if "r" in mode:
stream = sys.stdin
else:
stream = sys.stdout
if "b" in mode:
fh = stream.buffer # type: ty.IO
else:
fh = stream
close = False
else:
fh = open(filename, mode, *args, **kwargs)
close = True
try:
yield fh
finally:
if close:
try:
fh.close()
except AttributeError:
pass
@contextlib.contextmanager
def dir_manager(
path: ty.Optional[ty.Union[pathlib.Path, str]] = None, cleanup=None
) -> ty.Generator[pathlib.Path, None, None]:
"""A context manager to deal with a directory, default to a self-destruct temp one."""
if path is None:
d_path = pathlib.Path(tempfile.mkdtemp())
if cleanup is None:
cleanup = True
else:
d_path = pathlib.Path(path).resolve()
d_path.mkdir(parents=True, exist_ok=True)
if cleanup is None:
cleanup = False
elif cleanup:
if d_path.glob("*"):
raise ValueError(f"{d_path} is not empty.")
try:
yield d_path
finally:
if cleanup:
shutil.rmtree(d_path)
class AntecedentFeaturesDict(TypedDict):
w_distance: int
u_distance: int
m_distance: int
spk_agreement: bool
overlap: bool
token_incl: int
token_com: int
def antecedents_from_mentions(
mentions: ty.Iterable[ty.Dict[str, ty.Any]],
max_candidates: int = 128,
distance_buckets: ty.Sequence[int] = (1, 2, 3, 4, 5, 7, 15, 32, 63),
) -> ty.Dict[str, ty.Dict[str, AntecedentFeaturesDict]]:
"""Extract an antecedent dataset from a list of detected mentions."""
sorted_mentions = sorted(mentions, key=lambda m: (m["start"], m["end"]))
if len(sorted_mentions) < 2:
return dict()
# The first mention in a document has no antecedent candidates
res = dict()
for i, mention in enumerate(sorted_mentions[1:], start=1):
mention_content_set = set(mention["content"])
antecedent_candidates = sorted_mentions[max(0, i - max_candidates) : i]
antecedents: ty.Dict[str, AntecedentFeaturesDict] = dict()
for j, candidate in enumerate(antecedent_candidates):
candidate_content_set = set(candidate["content"])
w_distance = int(
np.digitize(
mention["start"] - candidate["end"],
bins=distance_buckets,
right=True,
)
)
u_distance = int(
np.digitize(
mention["sentence"] - candidate["sentence"],
bins=distance_buckets,
)
)
m_distance: int = int(
np.digitize(
len(antecedent_candidates) - j,
bins=distance_buckets,
right=True,
)
)
spk_agreement = mention.get("speaker") == candidate.get("speaker")
intersect = len(mention_content_set.intersection(candidate_content_set))
token_incl_ratio = int(
10
* intersect
/ min(len(mention_content_set), len(candidate_content_set))
)
token_com_ratio = int(
10 * intersect / len(mention_content_set.union(candidate_content_set))
)
overlap = mention["start"] < candidate["end"]
antecedents[candidate["span_id"]] = {
"w_distance": w_distance,
"u_distance": u_distance,
"m_distance": m_distance,
"spk_agreement": spk_agreement,
"overlap": overlap,
"token_incl": token_incl_ratio,
"token_com": token_com_ratio,
}
res[mention["span_id"]] = antecedents
return res
def text_out(doc: spacy.tokens.Doc, latex: bool = False) -> str:
mentions_spans = sorted(
(m for i, c in doc._.clusters.items() for m in c),
key=lambda m: (m.start_char, -m.end_char),
)
text = doc.text
res = []
open_spans: ty.List[spacy.tokens.Span] = []
current_char = 0
for m in mentions_spans:
while open_spans and open_spans[-1].end_char <= m.start_char:
span_to_close = open_spans.pop()
res.append(text[current_char : span_to_close.end_char])
if span_to_close._.singleton:
if latex:
res.append("}")
else:
res.append("]")
else:
if latex:
res.append("}")
else:
res.append(f"][{span_to_close._.cluster}]")
current_char = span_to_close.end_char
if current_char < m.start_char:
res.append(text[current_char : m.start_char])
current_char = m.start_char
if latex:
if m._.singleton:
res.append(r"\mention{")
else:
res.append(f"\\mention[{m._.cluster}]{{")
else:
res.append("[")
open_spans.append(m)
while open_spans:
span_to_close = open_spans.pop()
res.append(text[current_char : span_to_close.end_char])
if span_to_close._.singleton:
if latex:
res.append("}")
else:
res.append("]")
else:
if latex:
res.append("}")
else:
res.append(f"][{span_to_close._.cluster}]")
current_char = span_to_close.end_char
res.append(text[current_char:])
return "".join(res)
def mention_to_json(mention: spacy.tokens.Span) -> Dict[str, Any]:
return {
"text": mention.text,
"start": mention.start_char,
"token_start": mention.start,
"token_end": mention.end,
"end": mention.end_char,
"type": "pattern",
"label": "mention",
}
def token_to_json(token: spacy.tokens.Token) -> Dict[str, Any]:
return {
"text": token.text,
"start": token.idx,
"end": token.idx + len(token),
"id": token.i,
"ws": bool(token.whitespace_),
"disabled": False,
}
def prodigy_out(doc: spacy.tokens.Doc) -> Dict[str, Any]:
res = {
"text": doc.text,
"tokens": [token_to_json(t) for t in doc],
"spans": [],
"relations": [],
}
processed: List[spacy.tokens.Span] = []
for c in doc._.clusters.values():
antecedent: Optional[spacy.tokens.Span] = None
for m in sorted(c, key=lambda m: (m.end, m.start)):
# This because prodigy doesn't allow nested spans
if any(
o.start <= m.start <= o.end or o.start <= m.end <= o.end
for o in processed
):
continue
res["spans"].append(mention_to_json(m))
if antecedent is not None:
res["relations"].append(
{
"head": m.start,
"child": antecedent.start,
"head_span": mention_to_json(m),
"child_span": mention_to_json(antecedent),
"label": "COREF",
}
)
antecedent = m
processed.append(m)
return res
def sacr_out(doc: spacy.tokens.Doc) -> str:
res = []
open_spans: ty.List[spacy.tokens.Span]
sents = doc.spans.get("utterances", doc.sents)
for sentence in sents:
sentence_res = []
# FIXME: this relies on having imported avp, which sets these extensions in the global space
# we need a better mechanism
if sentence._.speaker is not None:
sentence_res.append(f"#speaker: {sentence._.speaker}\n\n")
if sentence._.uid is not None:
sentence_res.append(f"#uid: {sentence._.uid}\n\n")
mentions_spans = sorted(
(
m
for i, c in doc._.clusters.items()
for m in c
if sentence.start_char <= m.start_char < m.end_char <= sentence.end_char
),
key=lambda m: (m.start_char, -m.end_char),
)
text = sentence.text
current_char = 0
open_spans: ty.List[spacy.tokens.Span] = []
for m in mentions_spans:
# TODO: stop fiddling with char indices ffs
while open_spans and open_spans[-1].end_char <= m.start_char:
span_to_close = open_spans.pop()
sentence_res.append(
text[current_char : span_to_close.end_char - sentence.start_char]
)
sentence_res.append("}")
current_char = span_to_close.end_char - sentence.start_char
if current_char < m.start_char:
sentence_res.append(
text[current_char : m.start_char - sentence.start_char]
)
current_char = m.start_char - sentence.start_char
sentence_res.append(f"{{{m._.cluster} ")
open_spans.append(m)
while open_spans:
span_to_close = open_spans.pop()
sentence_res.append(
text[current_char : span_to_close.end_char - sentence.start_char]
)
sentence_res.append("}")
current_char = span_to_close.end_char - sentence.start_char
sentence_res.append(text[current_char:])
res.append("".join(sentence_res).strip())
return "\n\n".join((s for s in res if s and not s.isspace()))
@click.command(help="End-to-end coreference resolution")
@click.argument(
"detect-model",
type=click_pathlib.Path(exists=True, dir_okay=False),
)
@click.argument(
"coref-model",
type=click_pathlib.Path(exists=True, dir_okay=False),
)
@click.argument(
"input_file",
type=click.File("r"),
)
@click.argument(
"output_file",
type=click.File("w", atomic=True),
default="-",
)
@click.option(
"--from",
"input_format",
type=click.Choice(formats.keys()),
default="raw_text",
help="The input format",
show_default=True,
)
@click.option(
"--intermediary-dir",
"intermediary_dir_path",
type=click_pathlib.Path(resolve_path=True, file_okay=False),
help="A path to a directory to use for intermediary files, defaults to a self-destructing temp dir",
)
@click.option(
"--lang",
default="fr_core_news_lg",
help="A spaCy model handle for the document.",
show_default=True,
)
@click.option(
"--to",
"output_format",
type=click.Choice(["latex", "prodigy", "sacr", "text"]),
default="text",
help="Output formats (experimental)",
)
def main_entry_point(
coref_model: pathlib.Path,
detect_model: pathlib.Path,
input_format: str,
input_file: TextIO,
intermediary_dir_path: Optional[pathlib.Path],
lang: str,
output_file: TextIO,
output_format: Literal["latex", "prodigy", "sacr", "text"],
):
with dir_manager(intermediary_dir_path) as intermediary_dir:
doc, spans = formats[input_format].get_doc_and_spans(input_file, lang)
initial_doc_path = intermediary_dir / "initial_doc.spacy.json"
with open(initial_doc_path, "w") as out_stream:
json.dump(doc.to_json(), out_stream, ensure_ascii=False)
spans_path = intermediary_dir / "spans.json"
with open(spans_path, "w") as out_stream:
json.dump(spans, out_stream, ensure_ascii=False)
mentions_path = intermediary_dir / "mentions.json"
detmentions.main_entry_point(
[
"--mentions",
"--no-overlap",
str(detect_model),
str(spans_path),
str(mentions_path),
]
)
with open(mentions_path, "r") as in_stream:
mentions_lst = json.load(in_stream)
antecedents = antecedents_from_mentions(mentions_lst)
mention_dict = {m["span_id"]: m for m in mentions_lst}
antecedents_path = intermediary_dir / "antecedents.json"
with open(antecedents_path, "w") as out_stream:
json.dump(
{"mentions": mention_dict, "antecedents": antecedents},
out_stream,
ensure_ascii=False,
)
coref_scores_path = intermediary_dir / "coref_scores.json"
score.main_entry_point(
[str(coref_model), str(antecedents_path), str(coref_scores_path)]
)
clusters_path = intermediary_dir / "clusters.json"
clusterize.main_entry_point([str(coref_scores_path), str(clusters_path)])
with open(clusters_path, "r") as in_stream:
clusters = json.load(in_stream)["clusters"]
doc._.clusters = dict()
for i, c in clusters.items():
doc._.clusters[i] = []
for m_id in c:
mention = mention_dict[m_id]
mention_span = doc[mention["start"] : mention["end"] + 1]
mention_span._.cluster = i
if len(c) > 1:
mention_span._.singleton = False
doc._.clusters[i].append(mention_span)
augmented_doc_path = intermediary_dir / "coref_doc.spacy.json"
with open(augmented_doc_path, "w") as out_stream:
json.dump(doc.to_json(), out_stream, ensure_ascii=False)
if output_format == "latex":
output_file.write(text_out(doc, latex=True))
output_file.write("\n")
elif output_format == "prodigy":
output_dict = prodigy_out(doc)
writer = jsonlines.Writer(output_file)
writer.write(output_dict)
writer.close()
elif output_format == "sacr":
output_file.write(sacr_out(doc))
else:
output_file.write(text_out(doc))
output_file.write("\n")
if __name__ == "__main__":
main_entry_point()
| 8,795 | 160 | 160 |
82b98750a07e2cf2bdd895c8151af601bd05d953 | 1,233 | py | Python | lib/galaxy/model/migrate/versions/0124_job_state_history.py | lawrence14701/galaxy | 7eb2fcb708e7b63e17800c87613ddfa5497c0654 | [
"CC-BY-3.0"
] | 2 | 2017-03-28T12:11:41.000Z | 2017-04-22T02:58:25.000Z | lib/galaxy/model/migrate/versions/0124_job_state_history.py | lawrence14701/galaxy | 7eb2fcb708e7b63e17800c87613ddfa5497c0654 | [
"CC-BY-3.0"
] | 12 | 2020-07-24T23:55:19.000Z | 2021-12-19T11:40:06.000Z | lib/galaxy/model/migrate/versions/0124_job_state_history.py | lawrence14701/galaxy | 7eb2fcb708e7b63e17800c87613ddfa5497c0654 | [
"CC-BY-3.0"
] | 1 | 2018-05-30T07:38:54.000Z | 2018-05-30T07:38:54.000Z | """
Migration script for the job state history table
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import Column, DateTime, ForeignKey, Integer, MetaData, String, Table
from galaxy.model.custom_types import TrimmedString
from galaxy.model.migrate.versions.util import create_table, drop_table
now = datetime.datetime.utcnow
log = logging.getLogger(__name__)
metadata = MetaData()
JobStateHistory_table = Table("job_state_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)))
| 30.825 | 90 | 0.660989 | """
Migration script for the job state history table
"""
from __future__ import print_function
import datetime
import logging
from sqlalchemy import Column, DateTime, ForeignKey, Integer, MetaData, String, Table
from galaxy.model.custom_types import TrimmedString
from galaxy.model.migrate.versions.util import create_table, drop_table
now = datetime.datetime.utcnow
log = logging.getLogger(__name__)
metadata = MetaData()
JobStateHistory_table = Table("job_state_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)))
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
create_table(JobStateHistory_table)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_table(JobStateHistory_table)
| 231 | 0 | 46 |
d38193783eb4aa254cd51a4fcd972a92d001a8ae | 3,595 | py | Python | deploy_dst.py | CiscoDevNet/dst-automation | dffcde76f1bd7dc4dd4350c7a224f8ad9679ad4a | [
"BSD-3-Clause"
] | 4 | 2020-04-28T16:38:18.000Z | 2021-06-09T08:45:24.000Z | deploy_dst.py | CiscoDevNet/dst-automation | dffcde76f1bd7dc4dd4350c7a224f8ad9679ad4a | [
"BSD-3-Clause"
] | 6 | 2020-11-04T16:35:42.000Z | 2021-04-25T13:38:56.000Z | deploy_dst.py | CiscoDevNet/dst-automation | dffcde76f1bd7dc4dd4350c7a224f8ad9679ad4a | [
"BSD-3-Clause"
] | 3 | 2020-05-13T22:43:50.000Z | 2021-05-01T22:30:33.000Z | #!/usr/bin/env python3
"""
Deploy DST configuration using Ansible.
Copyright (c) 2020, Copyright (c) 2020, Cisco Systems, Inc. or its affiliates
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
from builtins import input
from dst_topology import DSTTopology
import argparse
import sys
import subprocess
from dst_utils import *
import time
import tempfile
import os
import re
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
if __name__ == "__main__":
main()
| 32.387387 | 122 | 0.708484 | #!/usr/bin/env python3
"""
Deploy DST configuration using Ansible.
Copyright (c) 2020, Copyright (c) 2020, Cisco Systems, Inc. or its affiliates
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
from builtins import input
from dst_topology import DSTTopology
import argparse
import sys
import subprocess
from dst_utils import *
import time
import tempfile
import os
import re
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def main():
conf = None
args = None
msg = None
parser = argparse.ArgumentParser(prog=sys.argv[0], description="Deploy Dynamic Split Tunneling to a set of firewalls")
parser.add_argument(
"--config",
"-c",
metavar="<CONFIG FILE>",
help="Path to the configuration file; default: config.yaml in the current directory",
default="config.yaml",
)
args = parser.parse_args()
if not os.path.exists(args.config):
print("ERROR: Config file {} does not exist!".format(args.config))
sys.exit(1)
with open(args.config, "r") as fd:
conf = load(fd, Loader=Loader)
check_sections("production", conf)
check_vars("production", conf)
inv = build_ansible_inventory(config=conf)
avars = build_ansible_vars(conf, "production")
msg = "Running Ansible to deploy DST config to production..."
os.environ["ANSIBLE_CONFIG"] = os.getcwd() + "/ansible/dst.ansible.cfg"
os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
with Spinner(msg):
try:
run_ansible_command("dst-playbook.yaml", inv, avars, skip_tags="test")
except Exception as e:
print("")
print("ERROR: {}".format(e))
try:
cleanup(inv=inv, avars=avars)
except:
pass
sys.exit(1)
done(msg)
try:
cleanup(inv=inv, avars=avars)
except Exception as e:
print("")
print("WARNING: Failed to cleanup after deployment: {}".format(e))
sys.exit(1)
if __name__ == "__main__":
main()
| 1,547 | 0 | 23 |
d80b042bdf0e393b2fc4472e860104feb09f75f7 | 647 | py | Python | list_class.py | jmb462/rst-to-mediawiki | 2d32f1659593bf6a49668a471f091450526201b9 | [
"MIT"
] | null | null | null | list_class.py | jmb462/rst-to-mediawiki | 2d32f1659593bf6a49668a471f091450526201b9 | [
"MIT"
] | null | null | null | list_class.py | jmb462/rst-to-mediawiki | 2d32f1659593bf6a49668a471f091450526201b9 | [
"MIT"
] | null | null | null | #######################################################################
# Godot RST File to MediaWiki converter #
#######################################################################
import re
import pandas as pd
import sys
source=sys.argv[1]
with open(source) as file:
file_contents = file.read()
class_name=file_contents.splitlines()[8]
print("<tr><td><a target=_blank href='http://godotestarrive.ovh/index.php?title="+class_name+"_GD&action=edit'>Wiki "+class_name+"</a></td>")
print("<td><a target=_new href='mw/"+class_name+".mw'>"+class_name+" MW File</a></td></tr>")
| 34.052632 | 146 | 0.48068 | #######################################################################
# Godot RST File to MediaWiki converter #
#######################################################################
import re
import pandas as pd
import sys
source=sys.argv[1]
with open(source) as file:
file_contents = file.read()
class_name=file_contents.splitlines()[8]
print("<tr><td><a target=_blank href='http://godotestarrive.ovh/index.php?title="+class_name+"_GD&action=edit'>Wiki "+class_name+"</a></td>")
print("<td><a target=_new href='mw/"+class_name+".mw'>"+class_name+" MW File</a></td></tr>")
| 0 | 0 | 0 |
e6443582a8702368766317174ba425fb2305a214 | 2,261 | py | Python | code/lab2_code/two_collide.py | YuYanzy/Lab_ECE_5725 | 1eb0ebbd3e1aa00a850f6d5ad7f3676386e2fb1b | [
"Apache-2.0"
] | null | null | null | code/lab2_code/two_collide.py | YuYanzy/Lab_ECE_5725 | 1eb0ebbd3e1aa00a850f6d5ad7f3676386e2fb1b | [
"Apache-2.0"
] | null | null | null | code/lab2_code/two_collide.py | YuYanzy/Lab_ECE_5725 | 1eb0ebbd3e1aa00a850f6d5ad7f3676386e2fb1b | [
"Apache-2.0"
] | null | null | null | # Yu Zhang yz2729
# Lab 2 Date: 09/23/21
import RPi.GPIO as GPIO
import pygame # Import pygame graphics library
import time
import os # for OS calls
CODERUN = True
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.IN,pull_up_down = GPIO.PUD_UP)
GPIO.add_event_detect(17, GPIO.FALLING, callback=GPIO17_callback, bouncetime=300)
# Environment Setting
os.putenv('SDL_VIDEODRIVER', 'fbcon') # Display on piTFT
os.putenv('SDL_FBDEV', '/dev/fb0')
pygame.init()
# Screen Setting
size = (width, height) = (320, 240)
# size = (width, height) = (800, 800)
screen = pygame.display.set_mode(size)
black = 0, 0, 0
FPS = 40
clock = pygame.time.Clock()
# Big Ball
speed_big = [1,1]
ball_big = pygame.image.load("magic_ball.png")
ballrect_big = ball_big.get_rect()
ballrect_big.left = 192
ballrect_big.bottom = 128
# Small Ball
speed_small = [-2,-2]
ball_small = pygame.image.load("soccer-ball.png")
ballrect_small = ball_small.get_rect()
ballrect_small.right = 50
ballrect_small.bottom = 240
start_time = time.time()
while (time.time() - start_time <= 360) and CODERUN:
# time.sleep(0.02)
clock.tick(FPS)
ballrect_big = ballrect_big.move(speed_big)
if ballrect_big.left < 0 or ballrect_big.right > width:
speed_big[0] = -speed_big[0]
if ballrect_big.top < 0 or ballrect_big.bottom > height:
speed_big[1] = -speed_big[1]
ballrect_small= ballrect_small.move(speed_small)
if ballrect_small.left < 0 or ballrect_small.right > width:
speed_small[0] = -speed_small[0]
if ballrect_small.top < 0 or ballrect_small.bottom > height:
speed_small[1] = -speed_small[1]
if ballrect_big.colliderect(ballrect_small):
# tmp = speed_big
speed_big[0] = - speed_big[0]
speed_big[1] = - speed_big[1]
speed_small[0] = - speed_small[0]
speed_small[1] = - speed_small[1]
screen.fill(black) # Erase the Work space
screen.blit(ball_big, ballrect_big) # Combine Ball surface with workspace surface
screen.blit(ball_small, ballrect_small)
pygame.display.flip() # display workspace on screen
GPIO.cleanup() | 30.972603 | 87 | 0.673596 | # Yu Zhang yz2729
# Lab 2 Date: 09/23/21
import RPi.GPIO as GPIO
import pygame # Import pygame graphics library
import time
import os # for OS calls
CODERUN = True
GPIO.setmode(GPIO.BCM)
GPIO.setup(17,GPIO.IN,pull_up_down = GPIO.PUD_UP)
def GPIO17_callback(channel):
global CODERUN
CODERUN = False
GPIO.add_event_detect(17, GPIO.FALLING, callback=GPIO17_callback, bouncetime=300)
# Environment Setting
os.putenv('SDL_VIDEODRIVER', 'fbcon') # Display on piTFT
os.putenv('SDL_FBDEV', '/dev/fb0')
pygame.init()
# Screen Setting
size = (width, height) = (320, 240)
# size = (width, height) = (800, 800)
screen = pygame.display.set_mode(size)
black = 0, 0, 0
FPS = 40
clock = pygame.time.Clock()
# Big Ball
speed_big = [1,1]
ball_big = pygame.image.load("magic_ball.png")
ballrect_big = ball_big.get_rect()
ballrect_big.left = 192
ballrect_big.bottom = 128
# Small Ball
speed_small = [-2,-2]
ball_small = pygame.image.load("soccer-ball.png")
ballrect_small = ball_small.get_rect()
ballrect_small.right = 50
ballrect_small.bottom = 240
start_time = time.time()
while (time.time() - start_time <= 360) and CODERUN:
# time.sleep(0.02)
clock.tick(FPS)
ballrect_big = ballrect_big.move(speed_big)
if ballrect_big.left < 0 or ballrect_big.right > width:
speed_big[0] = -speed_big[0]
if ballrect_big.top < 0 or ballrect_big.bottom > height:
speed_big[1] = -speed_big[1]
ballrect_small= ballrect_small.move(speed_small)
if ballrect_small.left < 0 or ballrect_small.right > width:
speed_small[0] = -speed_small[0]
if ballrect_small.top < 0 or ballrect_small.bottom > height:
speed_small[1] = -speed_small[1]
if ballrect_big.colliderect(ballrect_small):
# tmp = speed_big
speed_big[0] = - speed_big[0]
speed_big[1] = - speed_big[1]
speed_small[0] = - speed_small[0]
speed_small[1] = - speed_small[1]
screen.fill(black) # Erase the Work space
screen.blit(ball_big, ballrect_big) # Combine Ball surface with workspace surface
screen.blit(ball_small, ballrect_small)
pygame.display.flip() # display workspace on screen
GPIO.cleanup() | 49 | 0 | 22 |
881a2e37024566fd7ac36f53a1f9885400a09b73 | 9,151 | py | Python | uplift/tree/_tree.py | Antiguru11/uplift | ffaa5bcd20d6aa264fa3b2d191327c86384e4f7c | [
"Apache-2.0"
] | null | null | null | uplift/tree/_tree.py | Antiguru11/uplift | ffaa5bcd20d6aa264fa3b2d191327c86384e4f7c | [
"Apache-2.0"
] | null | null | null | uplift/tree/_tree.py | Antiguru11/uplift | ffaa5bcd20d6aa264fa3b2d191327c86384e4f7c | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
import numpy as np
_epsilon = np.finfo('double').eps
| 32.335689 | 82 | 0.460059 | from abc import ABCMeta, abstractmethod
import numpy as np
_epsilon = np.finfo('double').eps
class Tree():
def __init__(self, n_groups):
self.n_groups = n_groups
self.reset()
def reset(self):
self.nodes = list()
self.leaf_ids = list()
def add_node(self,
parent, is_left, is_leaf,
value, gain,
split, stats) -> int:
node_id = len(self.nodes)
self.nodes.append([node_id, parent, None, None,
value, gain,
split[0], split[1],
stats[0], stats[1], stats[2]])
if parent is not None:
if is_left:
self.nodes[parent][2] = node_id
else:
self.nodes[parent][3] = node_id
if is_leaf:
self.leaf_ids.append(node_id)
return node_id
def apply(self, X) -> np.ndarray:
n_samples, _ = X.shape
uplift = np.full((n_samples, self.n_groups), np.nan)
for leaf_id in self.leaf_ids:
mask = np.full(X.shape[0], True, dtype=bool)
parent_id = self.nodes[leaf_id][1]
child_id = leaf_id
while parent_id is not None:
mask &= self._apply_node(X, parent_id, child_id)
parent_id, child_id = self.nodes[parent_id][1], parent_id
uplift[mask, :] = np.array(self.nodes[leaf_id][-1])
return uplift
def _apply_node(self, X, parent_id, child_id):
is_left = self.nodes[parent_id][2] == child_id
feature, threshold = self.nodes[parent_id][6:8]
Xi = X[:, feature]
if np.isnan(threshold):
mask = np.isnan(Xi) if is_left else ~np.isnan(Xi)
else:
mask = Xi <= threshold if is_left else Xi > threshold
if np.isnan(Xi).any():
other_child_id = self.nodes[parent_id][2 if is_left else 3]
a_value = self.nodes[child_id][4]
b_value = self.nodes[other_child_id][4]
if (a_value > b_value
or (np.abs(a_value - b_value) <= _epsilon and is_left)):
mask |= np.isnan(Xi)
return mask
class TreeBuilder(metaclass=ABCMeta):
@abstractmethod
def __init__(self,
splitter,
max_depth: int,
min_samples_split: int,
min_samples_leaf: int,
min_samples_leaf_treated: int,
min_samples_leaf_control: int,
max_leaf_nodes: int, ):
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_samples_leaf_treated = min_samples_leaf_treated
self.min_samples_leaf_control = min_samples_leaf_control
self.max_leaf_nodes = max_leaf_nodes
@abstractmethod
def build(self, tree, X, y, w, groups):
pass
class DepthFirstTreeBuilder(TreeBuilder):
def __init__(self,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_samples_leaf_treated,
min_samples_leaf_control):
super().__init__(splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_samples_leaf_treated,
min_samples_leaf_control,
None,)
def build(self, tree, X, y, w, groups):
tree.reset()
self.splitter.initialize(X, y, w, groups)
stack = list()
stack.append((np.full_like(y, True, dtype=bool),
0,
None,
True,
None,
(None, None, None),))
is_first = True
while len(stack) != 0:
item = stack.pop()
gain = None
feature, threshold = None, None
(idx, depth, parent, is_left,
value, stats,) = item
(n_treatments,
n_control,
uplift, ) = stats
if is_first:
is_first == False
(value,
n_treatments,
n_control,
uplift, ) = self.splitter.node_value(idx)
is_leaf = (depth >= self.max_depth
or (sum(n_treatments) + n_control) < self.min_samples_split
or (sum(n_treatments) + n_control) < self.min_samples_leaf
or min(n_treatments) < self.min_samples_leaf_treated
or n_control < self.min_samples_leaf_control)
if not is_leaf:
gain, split = self.splitter.split(idx, value)
((feature, threshold),
(idx_left,
value_left,
stats_left,),
(idx_right,
value_right,
stats_right,)) = split
is_leaf = is_leaf or gain <= _epsilon
node_id = tree.add_node(parent, is_left, is_leaf,
value, gain, (feature, threshold),
(n_treatments, n_control, uplift))
if is_leaf:
continue
stack.append((idx_left,
depth + 1,
node_id,
True,
value_left,
stats_left,))
stack.append((idx_right,
depth + 1,
node_id,
False,
value_right,
stats_right,))
class BestFirstTreeBuilder(TreeBuilder):
def __init__(self,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_samples_leaf_treated,
min_samples_leaf_control,
max_leaf_nodes, ):
super().__init__(splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_samples_leaf_treated,
min_samples_leaf_control,
max_leaf_nodes,)
def build(self, tree, X, y, w, groups):
tree.reset()
self.splitter.initialize(X, y, w, groups)
stack = list()
stack.append((np.full_like(y, True, dtype=bool),
0,
None,
True,
None,
(None, None, None),))
is_first = True
max_split_nodes = self.max_leaf_nodes - 1
while len(stack) != 0 and max_split_nodes >= 0:
next_id = np.array([si[4] for si in stack]).argmax()
item = stack.pop(next_id)
gain = None
feature, threshold = None, None
(idx, depth, parent, is_left,
value, stats,) = item
(n_treatments,
n_control,
uplift, ) = stats
if is_first:
is_first == False
(value,
n_treatments,
n_control,
uplift, ) = self.splitter.node_value(idx)
is_leaf = (depth >= self.max_depth
or max_split_nodes <= 0
or (sum(n_treatments) + n_control) < self.min_samples_split
or (sum(n_treatments) + n_control) < self.min_samples_leaf
or min(n_treatments) < self.min_samples_leaf_treated
or n_control < self.min_samples_leaf_control)
if not is_leaf:
gain, split = self.splitter.split(idx, value)
((feature, threshold),
(idx_left,
value_left,
stats_left,),
(idx_right,
value_right,
stats_right,)) = split
is_leaf = is_leaf or gain <= _epsilon
node_id = tree.add_node(parent, is_left, is_leaf,
value, gain, (feature, threshold),
(n_treatments, n_control, uplift))
if is_leaf:
continue
stack.append((idx_left,
depth + 1,
node_id,
True,
value_left,
stats_left,))
stack.append((idx_right,
depth + 1,
node_id,
False,
value_right,
stats_right,))
max_split_nodes -= 1
| 8,579 | 140 | 332 |
791006d4cf920590caf78ae89fb7b395283728f7 | 1,778 | py | Python | mysql_autoxtrabackup/utils/mysql_cli.py | icy1900/MySQL-AutoXtraBackup | dfdf86ba4d1fe15a35cececa4934cb7f247e448f | [
"MIT"
] | 134 | 2015-04-17T15:05:13.000Z | 2022-01-06T20:51:37.000Z | mysql_autoxtrabackup/utils/mysql_cli.py | icy1900/MySQL-AutoXtraBackup | dfdf86ba4d1fe15a35cececa4934cb7f247e448f | [
"MIT"
] | 316 | 2015-04-22T07:40:46.000Z | 2021-11-08T12:09:02.000Z | mysql_autoxtrabackup/utils/mysql_cli.py | icy1900/MySQL-AutoXtraBackup | dfdf86ba4d1fe15a35cececa4934cb7f247e448f | [
"MIT"
] | 80 | 2015-04-30T19:25:24.000Z | 2021-11-09T10:32:54.000Z | # This file will consist of some wrapper for using MySQL
# It is mainly used for preparing and calling mysql cli
import logging
from mysql_autoxtrabackup.general_conf import path_config
from mysql_autoxtrabackup.general_conf.generalops import GeneralClass
from mysql_autoxtrabackup.process_runner.process_runner import ProcessRunner
logger = logging.getLogger(__name__)
| 40.409091 | 79 | 0.669291 | # This file will consist of some wrapper for using MySQL
# It is mainly used for preparing and calling mysql cli
import logging
from mysql_autoxtrabackup.general_conf import path_config
from mysql_autoxtrabackup.general_conf.generalops import GeneralClass
from mysql_autoxtrabackup.process_runner.process_runner import ProcessRunner
logger = logging.getLogger(__name__)
class MySQLClientHelper:
def __init__(self, config: str = path_config.config_path_file):
self.conf = config
# Using Composition instead of Inheritance here
options_obj = GeneralClass(config=self.conf)
self.mysql_options = options_obj.mysql_options
def create_mysql_client_command(self, statement: str) -> str:
command_connection = "{} --defaults-file={} -u{} --password={}".format(
self.mysql_options.get("mysql"),
self.mysql_options.get("mycnf"),
self.mysql_options.get("mysql_user"),
self.mysql_options.get("mysql_password"),
)
command_execute = ' -e "{}"'
if self.mysql_options.get("mysql_socket"):
command_connection += " --socket={}"
new_command = command_connection.format(
self.mysql_options.get("mysql_socket")
)
else:
command_connection += " --host={} --port={}"
new_command = command_connection.format(
self.mysql_options.get("mysql_host"),
self.mysql_options.get("mysql_port"),
)
new_command += command_execute
return new_command.format(statement)
def mysql_run_command(self, statement: str) -> bool:
command = self.create_mysql_client_command(statement=statement)
return ProcessRunner.run_command(command)
| 1,299 | 3 | 103 |
98e6f77a0bdec0402472f82795c8851852f675eb | 817 | py | Python | ExerciciosPython/Analisador de Lista.py | NathanSoares25/ExerciciosPython | 718a47ad26690355807b4ec1412e386b02acd098 | [
"MIT"
] | 2 | 2021-03-19T00:27:55.000Z | 2021-03-24T01:24:08.000Z | ExerciciosPython/Analisador de Lista.py | NathanSoares25/ExerciciosPython | 718a47ad26690355807b4ec1412e386b02acd098 | [
"MIT"
] | null | null | null | ExerciciosPython/Analisador de Lista.py | NathanSoares25/ExerciciosPython | 718a47ad26690355807b4ec1412e386b02acd098 | [
"MIT"
] | null | null | null | # Faça um programa que leia 5 valores e guarde-os em uma lista, no final mostre qual é o maior e o menor valor e qual
# A sua posição na lista
valores = list() # cria uma lista
for c in range(0, 5):
valores.append(int(input(f'Digite um valor para a posição {c}: '))) # inserindo os números dentro da lista
print(f'Os valores digitadores foram {valores}') # lista com os números inseridos
print(f'O maior valor digitado foi {max(valores)} na posição: ', end='') # maior valor e a posição do mesmo
for posição in range(0, 5):
if valores[posição] == max(valores):
print(posição, end=' ')
print(f'\nO menor valor digitado foi {min(valores)} na posição: ', end='') # menor valor e a posição do mesmo
for posição in range(0, 5):
if valores[posição] == min(valores):
print(posição, end=' ')
| 40.85 | 117 | 0.684211 | # Faça um programa que leia 5 valores e guarde-os em uma lista, no final mostre qual é o maior e o menor valor e qual
# A sua posição na lista
valores = list() # cria uma lista
for c in range(0, 5):
valores.append(int(input(f'Digite um valor para a posição {c}: '))) # inserindo os números dentro da lista
print(f'Os valores digitadores foram {valores}') # lista com os números inseridos
print(f'O maior valor digitado foi {max(valores)} na posição: ', end='') # maior valor e a posição do mesmo
for posição in range(0, 5):
if valores[posição] == max(valores):
print(posição, end=' ')
print(f'\nO menor valor digitado foi {min(valores)} na posição: ', end='') # menor valor e a posição do mesmo
for posição in range(0, 5):
if valores[posição] == min(valores):
print(posição, end=' ')
| 0 | 0 | 0 |
ab724ee8ffa9e2aff66290fce867acf596434d97 | 2,430 | py | Python | scripts/sources/S_EllipsoidTestSVI.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | 6 | 2021-04-10T13:24:30.000Z | 2022-03-26T08:20:42.000Z | scripts/sources/S_EllipsoidTestSVI.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | null | null | null | scripts/sources/S_EllipsoidTestSVI.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
] | 6 | 2019-08-13T22:02:17.000Z | 2022-02-09T17:49:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EllipsoidTestSVI [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EllipsoidTestSVI&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerSVIiid).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import diff
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot
from autocorrelation import autocorrelation
from InvarianceTestEllipsoid import InvarianceTestEllipsoid
# -
# ## Load the database generated by script S_FitSVI
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_FitSVI'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_FitSVI'), squeeze_me=True)
theta = db['theta']
# -
# ## Compute increments and autocorrelations
# +
lag_ = 10
# preallocating variables
delta_theta = {}
acf_delta_theta = {}
for k in range(6):
delta_theta[k] = diff(theta[[k],:]) # increments
acf_delta_theta[k] = autocorrelation(delta_theta[k], lag_) # autocorrelations
# -
# ## IID test for SVI parameters
# +
lag = 10 # lag to be printed
ell_scale = 2 # ellipsoid radius coefficient
fit = 0 # fitting
pos = [] # use default settings for plot positions
# names of figures
name = {}
name[0]=r'Invariance test(increments of $\theta_1$)'
name[1]=r'Invariance test(increments of $\theta_2$)'
name[2]=r'Invariance test(increments of $\theta_3$)'
name[3]=r'Invariance test(increments of $\theta_4$)'
name[4]=r'Invariance test(increments of $\theta_5$)'
name[5]=r'Invariance test(increments of $\theta_6$)'
for k in range(6):
f = figure(figsize=(12,6))
InvarianceTestEllipsoid(delta_theta[k], acf_delta_theta[k][0,1:], lag, fit, ell_scale, pos, name[k]);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| 27.613636 | 207 | 0.70535 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EllipsoidTestSVI [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EllipsoidTestSVI&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerSVIiid).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import diff
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot
from autocorrelation import autocorrelation
from InvarianceTestEllipsoid import InvarianceTestEllipsoid
# -
# ## Load the database generated by script S_FitSVI
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_FitSVI'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_FitSVI'), squeeze_me=True)
theta = db['theta']
# -
# ## Compute increments and autocorrelations
# +
lag_ = 10
# preallocating variables
delta_theta = {}
acf_delta_theta = {}
for k in range(6):
delta_theta[k] = diff(theta[[k],:]) # increments
acf_delta_theta[k] = autocorrelation(delta_theta[k], lag_) # autocorrelations
# -
# ## IID test for SVI parameters
# +
lag = 10 # lag to be printed
ell_scale = 2 # ellipsoid radius coefficient
fit = 0 # fitting
pos = [] # use default settings for plot positions
# names of figures
name = {}
name[0]=r'Invariance test(increments of $\theta_1$)'
name[1]=r'Invariance test(increments of $\theta_2$)'
name[2]=r'Invariance test(increments of $\theta_3$)'
name[3]=r'Invariance test(increments of $\theta_4$)'
name[4]=r'Invariance test(increments of $\theta_5$)'
name[5]=r'Invariance test(increments of $\theta_6$)'
for k in range(6):
f = figure(figsize=(12,6))
InvarianceTestEllipsoid(delta_theta[k], acf_delta_theta[k][0,1:], lag, fit, ell_scale, pos, name[k]);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| 0 | 0 | 0 |
05a5f6749f1319f6551e4027aec3c982e8778867 | 823 | py | Python | example/search-author.py | dollodart/gssa | 078ff1e8c2d103d871f6f0e4c50f961384127a3c | [
"MIT"
] | null | null | null | example/search-author.py | dollodart/gssa | 078ff1e8c2d103d871f6f0e4c50f961384127a3c | [
"MIT"
] | null | null | null | example/search-author.py | dollodart/gssa | 078ff1e8c2d103d871f6f0e4c50f961384127a3c | [
"MIT"
] | null | null | null | from gssa.core import search
from gssa.graph_search import breadth_first_search
from gssa.secretenv import author_first, author_last, vancouver_author
author = author_first + ' ' + author_last
publist = search(author, nres=100, overwrite=False)
breadth_first_search(publist[:100], levels=2, filters=(has_author, special))
| 30.481481 | 76 | 0.746051 | from gssa.core import search
from gssa.graph_search import breadth_first_search
from gssa.secretenv import author_first, author_last, vancouver_author
author = author_first + ' ' + author_last
publist = search(author, nres=100, overwrite=False)
def has_author(pub):
if author_last in ''.join(x.lower() for x in pub.authors_summary):
return True
cite = pub.get_cite() # note this will run a query if not had
return cite.authors is not None
def has_specific_author(pub, vancouver_author):
if author_last in ''.join(x.lower() for x in pub.authors_summary):
return True
cite = pub.get_cite()
return vancouver_author in cite.authors
def special(pub): return has_specific_author(pub, vancouver_author)
breadth_first_search(publist[:100], levels=2, filters=(has_author, special))
| 426 | 0 | 69 |
d8eb1bf097980e7748dab7e27729f783c74750f9 | 893 | py | Python | PyParadise/__init__.py | brandherd/PyParadise | 1c65bf634e17931f165fd88b9938f604b9371e2e | [
"MIT"
] | 1 | 2021-06-01T13:07:54.000Z | 2021-06-01T13:07:54.000Z | PyParadise/__init__.py | brandherd/PyParadise | 1c65bf634e17931f165fd88b9938f604b9371e2e | [
"MIT"
] | 3 | 2021-11-03T02:07:38.000Z | 2022-03-14T20:35:04.000Z | PyParadise/__init__.py | brandherd/PyParadise | 1c65bf634e17931f165fd88b9938f604b9371e2e | [
"MIT"
] | null | null | null | from . import data
from . import cube
from . import rss
from . import spectrum1d
from . import ssplibrary
from . import parameters
from . import fit_profile
from . import header
import copyreg as copy_reg
from types import *
copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)
| 24.135135 | 67 | 0.671892 | from . import data
from . import cube
from . import rss
from . import spectrum1d
from . import ssplibrary
from . import parameters
from . import fit_profile
from . import header
import copyreg as copy_reg
from types import *
def _pickle_method(method):
func_name = method.__func__.__name__
obj = method.__self__
cls = method.__self__.__class__
if func_name.startswith('__') and not func_name.endswith('__'):
cls_name = cls.__name__.lstrip('_')
if cls_name:
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(MethodType, _pickle_method, _unpickle_method)
| 555 | 0 | 46 |
2ac0b229d581190aaa35cb79f0a0b8e7c605d501 | 131 | py | Python | tenpy/utility/angleConvert.py | TensileTensor/Tenpy | 8ff7150b71f2fed4c6808d89bdfffd1c9063bb66 | [
"MIT"
] | null | null | null | tenpy/utility/angleConvert.py | TensileTensor/Tenpy | 8ff7150b71f2fed4c6808d89bdfffd1c9063bb66 | [
"MIT"
] | null | null | null | tenpy/utility/angleConvert.py | TensileTensor/Tenpy | 8ff7150b71f2fed4c6808d89bdfffd1c9063bb66 | [
"MIT"
] | null | null | null | import math
| 18.714286 | 35 | 0.641221 | import math
def toRadian(angle):
return angle * (math.pi/180.0)
def toDegree(angle):
return angle * (180.0/math.pi) | 70 | 0 | 49 |
d90a4713ede40d909bddd90975cd6377a1d93ec8 | 3,789 | py | Python | NEAT/neat/attributes.py | ShangtongZhang/DataMining | f72a17694ca971dffc80ee6251ab47327c87492c | [
"Apache-2.0"
] | 1 | 2018-02-22T15:40:22.000Z | 2018-02-22T15:40:22.000Z | NEAT/neat/attributes.py | ShangtongZhang/DataMining | f72a17694ca971dffc80ee6251ab47327c87492c | [
"Apache-2.0"
] | null | null | null | NEAT/neat/attributes.py | ShangtongZhang/DataMining | f72a17694ca971dffc80ee6251ab47327c87492c | [
"Apache-2.0"
] | 3 | 2017-03-31T03:38:36.000Z | 2019-04-27T15:58:26.000Z | from random import choice, gauss, random
from neat.config import ConfigParameter
# TODO: There is probably a lot of room for simplification of these classes using metaprogramming.
| 31.31405 | 98 | 0.61441 | from random import choice, gauss, random
from neat.config import ConfigParameter
# TODO: There is probably a lot of room for simplification of these classes using metaprogramming.
class BaseAttribute(object):
def __init__(self, name):
self.name = name
for n, cname in zip(self.__config_items__, self.config_item_names()):
setattr(self, n + "_name", cname)
def config_item_names(self):
return ["{0}_{1}".format(self.name, i) for i in self.__config_items__]
class FloatAttribute(BaseAttribute):
__config_items__ = ["init_mean",
"init_stdev",
"replace_rate",
"mutate_rate",
"mutate_power",
"max_value",
"min_value"]
def get_config_params(self):
return [ConfigParameter(n, float) for n in self.config_item_names()]
def clamp(self, value, config):
min_value = getattr(config, self.min_value_name)
max_value = getattr(config, self.max_value_name)
return max(min(value, max_value), min_value)
def init_value(self, config):
mean = getattr(config, self.init_mean_name)
stdev = getattr(config, self.init_stdev_name)
return self.clamp(gauss(mean, stdev), config)
def mutate_value(self, value, config):
replace_rate = getattr(config, self.replace_rate_name)
r = random()
if r < replace_rate:
return self.init_value(config)
mutate_rate = getattr(config, self.mutate_rate_name)
if r < replace_rate + mutate_rate:
mutate_power = getattr(config, self.mutate_power_name)
return value + gauss(0.0, mutate_power)
return self.clamp(value, config)
def validate(self, config):
pass
class BoolAttribute(BaseAttribute):
__config_items__ = ["default",
"mutate_rate"]
def get_config_params(self):
default_name, rate_name = self.config_item_names()
return [ConfigParameter(default_name, bool), ConfigParameter(rate_name, float)]
def init_value(self, config):
default = getattr(config, self.default_name)
if default is None:
return random() < 0.5
return default
def mutate_value(self, value, config):
mutate_rate = getattr(config, self.mutate_rate_name)
r = random()
if r < mutate_rate:
# NOTE: we choose a random value here so that the mutation rate has the
# same exact meaning as the rates given for the string and bool
# attributes (the mutation operation *may* change the value but is not
# guaranteed to do so).
return random() < 0.5
return value
def validate(self, config):
pass
class StringAttribute(BaseAttribute):
__config_items__ = ["default",
"options",
"mutate_rate"]
def get_config_params(self):
default_name, opt_name, rate_name = self.config_item_names()
return [ConfigParameter(default_name, str),
ConfigParameter(opt_name, list),
ConfigParameter(rate_name, float)]
def init_value(self, config):
default = getattr(config, self.default_name)
if default is None:
options = getattr(config, self.options_name)
return choice(options)
return default
def mutate_value(self, value, config):
mutate_rate = getattr(config, self.mutate_rate_name)
r = random()
if r < mutate_rate:
options = getattr(config, self.options_name)
return choice(options)
return value
def validate(self, config):
pass | 2,606 | 854 | 145 |
7dffe231c31a4be0d60223846b10ded3b3382b58 | 1,938 | py | Python | test.py | hidevn/sphere-face-py | 5b85431b038ac0e196b53ed7e19e2540133132d3 | [
"MIT"
] | null | null | null | test.py | hidevn/sphere-face-py | 5b85431b038ac0e196b53ed7e19e2540133132d3 | [
"MIT"
] | null | null | null | test.py | hidevn/sphere-face-py | 5b85431b038ac0e196b53ed7e19e2540133132d3 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import os
import caffe
from scipy.spatial.distance import cosine
image_folder = './images'
output_folder = './features'
model = './train/code/sphereface_deploy.prototxt'
weights = './train/result/sphereface_model.caffemodel'
net = caffe.Net(model, weights, caffe.TEST)
if __name__ == '__main__':
#save_feature_vectors()
print(detect_from_img('./Aaron_Peirsol_0003.jpg'))
#img_feature = extract_deep_feature('./Aaron_Peirsol_0003.jpg', net)
| 34.607143 | 110 | 0.691434 | import numpy as np
import cv2
import os
import caffe
from scipy.spatial.distance import cosine
image_folder = './images'
output_folder = './features'
model = './train/code/sphereface_deploy.prototxt'
weights = './train/result/sphereface_model.caffemodel'
net = caffe.Net(model, weights, caffe.TEST)
def extract_deep_feature(filename, net):
img = cv2.imread(filename)
if img is None:
return None
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = (img - 127.5)/128
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
img = transformer.preprocess('data', img)
img = np.transpose(img, [2, 0, 1])
data = np.concatenate([np.expand_dims(img, axis=0), np.expand_dims(np.flip(img, axis=0), axis=0)], axis=0)
net.blobs['data'].reshape(2, 3, 112, 96)
net.blobs['data'].data[...] = data
res = net.forward()['fc5']
feature = np.concatenate([res[0], res[1]])
return feature
def save_feature_vectors():
list_images = os.listdir(image_folder)
os.makedirs(output_folder, exist_ok=True)
for image_name in list_images:
feature_vector = extract_deep_feature(os.path.join(image_folder, image_name), net)
np.savetxt(os.path.join(output_folder, image_name.split('.')[0]), feature_vector)
def detect(feature):
list_features = os.listdir(output_folder)
scores = []
for image_name in list_features:
feature2 = np.loadtxt(os.path.join(output_folder, image_name))
score = 1 - cosine(feature,feature2)
scores.append(score)
scores = np.array(scores)
return list_features[np.argmax(scores)]
def detect_from_img(img_path):
img_feature = extract_deep_feature(img_path, net)
return detect(img_feature)
if __name__ == '__main__':
#save_feature_vectors()
print(detect_from_img('./Aaron_Peirsol_0003.jpg'))
#img_feature = extract_deep_feature('./Aaron_Peirsol_0003.jpg', net)
| 1,358 | 0 | 92 |
d86e121283842502e8b415f5c5bb7b8769d03bc1 | 1,806 | py | Python | beast/physicsmodel/helpers/gridhelpers.py | marthaboyer/beast | 1ca71fb64ab60827e4e4e1937b64f319a98166c3 | [
"BSD-3-Clause"
] | null | null | null | beast/physicsmodel/helpers/gridhelpers.py | marthaboyer/beast | 1ca71fb64ab60827e4e4e1937b64f319a98166c3 | [
"BSD-3-Clause"
] | null | null | null | beast/physicsmodel/helpers/gridhelpers.py | marthaboyer/beast | 1ca71fb64ab60827e4e4e1937b64f319a98166c3 | [
"BSD-3-Clause"
] | null | null | null | """ Common helpers in the grid package """
__all__ = ['isNestedInstance', 'pretty_size_print']
def isNestedInstance(obj, cl):
""" Test for sub-classes types
I could not find a universal test
keywords
--------
obj: object instance
object to test
cl: Class
top level class to test
returns
-------
r: bool
True if obj is indeed an instance or subclass instance of cl
"""
tree = []
for k in cl.__subclasses__():
tree += k.__subclasses__()
tree += cl.__subclasses__() + [ cl ]
return issubclass(obj.__class__, tuple(tree))
def pretty_size_print(num_bytes):
"""
Output number of bytes in a human readable format
keywords
--------
num_bytes: int
number of bytes to convert
returns
-------
output: str
string representation of the size with appropriate unit scale
"""
if num_bytes is None:
return
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
if num_bytes > YiB:
output = '%.3g YB' % (num_bytes / YiB)
elif num_bytes > ZiB:
output = '%.3g ZB' % (num_bytes / ZiB)
elif num_bytes > EiB:
output = '%.3g EB' % (num_bytes / EiB)
elif num_bytes > PiB:
output = '%.3g PB' % (num_bytes / PiB)
elif num_bytes > TiB:
output = '%.3g TB' % (num_bytes / TiB)
elif num_bytes > GiB:
output = '%.3g GB' % (num_bytes / GiB)
elif num_bytes > MiB:
output = '%.3g MB' % (num_bytes / MiB)
elif num_bytes > KiB:
output = '%.3g KB' % (num_bytes / KiB)
else:
output = '%.3g Bytes' % (num_bytes)
return output
| 24.08 | 72 | 0.54485 | """ Common helpers in the grid package """
__all__ = ['isNestedInstance', 'pretty_size_print']
def isNestedInstance(obj, cl):
""" Test for sub-classes types
I could not find a universal test
keywords
--------
obj: object instance
object to test
cl: Class
top level class to test
returns
-------
r: bool
True if obj is indeed an instance or subclass instance of cl
"""
tree = []
for k in cl.__subclasses__():
tree += k.__subclasses__()
tree += cl.__subclasses__() + [ cl ]
return issubclass(obj.__class__, tuple(tree))
def pretty_size_print(num_bytes):
"""
Output number of bytes in a human readable format
keywords
--------
num_bytes: int
number of bytes to convert
returns
-------
output: str
string representation of the size with appropriate unit scale
"""
if num_bytes is None:
return
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
if num_bytes > YiB:
output = '%.3g YB' % (num_bytes / YiB)
elif num_bytes > ZiB:
output = '%.3g ZB' % (num_bytes / ZiB)
elif num_bytes > EiB:
output = '%.3g EB' % (num_bytes / EiB)
elif num_bytes > PiB:
output = '%.3g PB' % (num_bytes / PiB)
elif num_bytes > TiB:
output = '%.3g TB' % (num_bytes / TiB)
elif num_bytes > GiB:
output = '%.3g GB' % (num_bytes / GiB)
elif num_bytes > MiB:
output = '%.3g MB' % (num_bytes / MiB)
elif num_bytes > KiB:
output = '%.3g KB' % (num_bytes / KiB)
else:
output = '%.3g Bytes' % (num_bytes)
return output
| 0 | 0 | 0 |
4484d154e370a6956803f8544b3ec654031a14f2 | 317 | py | Python | tests/test_showers.py | deepjets/deepjets | fc9c610d4fd80975d8d25eb0d7cd41d7dd318c75 | [
"BSD-3-Clause"
] | 23 | 2016-11-13T02:48:32.000Z | 2021-11-13T01:02:20.000Z | tests/test_showers.py | deepjets/deepjets | fc9c610d4fd80975d8d25eb0d7cd41d7dd318c75 | [
"BSD-3-Clause"
] | null | null | null | tests/test_showers.py | deepjets/deepjets | fc9c610d4fd80975d8d25eb0d7cd41d7dd318c75 | [
"BSD-3-Clause"
] | 7 | 2016-12-02T16:58:33.000Z | 2021-09-02T12:36:46.000Z | from deepjets.generate import generate_events
for event in generate_events('w_vincia.config', 1, write_to='vincia.hepmc', shower='vincia', random_state=1, verbosity=0):
pass
for event, weight in generate_events('w.config', 1, write_to='dire.hepmc', shower='dire', random_state=1, verbosity=0):
print weight
| 39.625 | 122 | 0.753943 | from deepjets.generate import generate_events
for event in generate_events('w_vincia.config', 1, write_to='vincia.hepmc', shower='vincia', random_state=1, verbosity=0):
pass
for event, weight in generate_events('w.config', 1, write_to='dire.hepmc', shower='dire', random_state=1, verbosity=0):
print weight
| 0 | 0 | 0 |
431546551b4d409bb50114ac36a7a15e82717c34 | 2,518 | py | Python | test/python/test_loss.py | slin004/incubator-singa | 09c8a2e65927d6405262bbb969e6dab96809df07 | [
"Apache-2.0"
] | 1 | 2019-11-15T12:46:10.000Z | 2019-11-15T12:46:10.000Z | test/python/test_loss.py | slin004/incubator-singa | 09c8a2e65927d6405262bbb969e6dab96809df07 | [
"Apache-2.0"
] | 8 | 2020-01-16T06:56:23.000Z | 2020-01-18T03:46:04.000Z | test/python/test_loss.py | slin004/incubator-singa | 09c8a2e65927d6405262bbb969e6dab96809df07 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import unittest
import numpy as np
from singa import loss
from singa import tensor
if __name__ == '__main__':
unittest.main()
| 34.027027 | 74 | 0.602462 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import unittest
import numpy as np
from singa import loss
from singa import tensor
class TestLoss(unittest.TestCase):
def setUp(self):
self.x_np = np.asarray([[0.9, 0.2, 0.1],
[0.1, 0.4, 0.5],
[0.2, 0.4, 0.4]],
dtype=np.float32)
self.y_np = np.asarray([[1, 0, 1],
[0, 1, 1],
[1, 0, 0]],
dtype=np.float32)
self.x = tensor.from_numpy(self.x_np)
self.y = tensor.from_numpy(self.y_np)
def test_sigmoid_cross_entropy(self):
sig = loss.SigmoidCrossEntropy()
l1 = sig.forward(True, self.x, self.y)
sig.backward()
l2 = sig.evaluate(True, self.x, self.y)
p = 1.0 / (1 + np.exp(-self.x_np))
l = - (self.y_np * np.log(p) + (1 - self.y_np) * np.log(1 - p))
self.assertAlmostEqual(l1.l1(), l2)
self.assertAlmostEqual(l1.l1(), np.average(l))
def test_squared_error(self):
sqe = loss.SquaredError()
l1 = sqe.forward(True, self.x, self.y)
sqe.backward()
l2 = sqe.evaluate(True, self.x, self.y)
l = 0.5 * (self.y_np - self.x_np) ** 2
self.assertAlmostEqual(l1.l1(), tensor.to_numpy(l2).flatten()[0])
self.assertAlmostEqual(l1.l1(), np.average(l))
def test_softmax_cross_entropy(self):
sce = loss.SoftmaxCrossEntropy()
l1 = sce.forward(True, self.x, self.y)
sce.backward()
l2 = sce.evaluate(True, self.x, self.y)
self.assertAlmostEqual(l1.l1(), l2)
if __name__ == '__main__':
unittest.main()
| 1,412 | 13 | 138 |
41866b837dfda3d810db4eccfdd0ab68717e6db9 | 1,492 | py | Python | src/SortingSim/test.py | berkayaslan/Sorting_Simulation | 16cfcd404063b060191dab244025012271edacd8 | [
"MIT"
] | 2 | 2020-01-26T09:42:03.000Z | 2020-05-26T13:57:02.000Z | src/SortingSim/test.py | berkayaslan/Sorting_Simulation | 16cfcd404063b060191dab244025012271edacd8 | [
"MIT"
] | null | null | null | src/SortingSim/test.py | berkayaslan/Sorting_Simulation | 16cfcd404063b060191dab244025012271edacd8 | [
"MIT"
] | null | null | null | import Sticks
import stick
import time
import pygame
pygame.init()
board = pygame.display.set_mode((400, 400))
while 1:
for e in pygame.event.get():
if e.type == pygame.KEYDOWN and e.key == pygame.K_TAB:
print("pressed tab")
"""
# ///---=== FIND STICK TEST ===---
ex1 = Sticks.Sticks()
ex1.new_sticks(10)
a = [i.length for i in ex1.sticks]
print(a)
print([ex1.find_stick(i, 0).location for i in a])
print([ex1.find_stick(i, 0).location for i in a])
# ///---=== FIND STICK TEST ===---
"""
"""
# ///---=== SWAP STICK TEST ===---
s1 = stick.Stick(20, 130)
s2 = stick.Stick(13, 1)
s3 = stick.Stick(103, 58)
print("s1 nesnesinin id'si: %d konumu: %d" % (s1.o_id, s1.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s2.o_id, s2.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s3.o_id, s3.location))
Sticks.Sticks().swap_stick_locations(s1, s2)
print("\nKonumlar değiştirildi (s1, s2)!!\n")
print("s1 nesnesinin id'si: %d konumu: %d" % (s1.o_id, s1.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s2.o_id, s2.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s3.o_id, s3.location))
Sticks.Sticks().swap_stick_locations(s1, s3)
print("\nKonumlar değiştirildi (s1, s3)!!\n")
print("s1 nesnesinin id'si: %d konumu: %d" % (s1.o_id, s1.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s2.o_id, s2.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s3.o_id, s3.location))
# ///---=== SWAP STICK TEST ===---
"""
| 27.62963 | 68 | 0.626676 | import Sticks
import stick
import time
import pygame
pygame.init()
board = pygame.display.set_mode((400, 400))
while 1:
for e in pygame.event.get():
if e.type == pygame.KEYDOWN and e.key == pygame.K_TAB:
print("pressed tab")
"""
# ///---=== FIND STICK TEST ===---
ex1 = Sticks.Sticks()
ex1.new_sticks(10)
a = [i.length for i in ex1.sticks]
print(a)
print([ex1.find_stick(i, 0).location for i in a])
print([ex1.find_stick(i, 0).location for i in a])
# ///---=== FIND STICK TEST ===---
"""
"""
# ///---=== SWAP STICK TEST ===---
s1 = stick.Stick(20, 130)
s2 = stick.Stick(13, 1)
s3 = stick.Stick(103, 58)
print("s1 nesnesinin id'si: %d konumu: %d" % (s1.o_id, s1.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s2.o_id, s2.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s3.o_id, s3.location))
Sticks.Sticks().swap_stick_locations(s1, s2)
print("\nKonumlar değiştirildi (s1, s2)!!\n")
print("s1 nesnesinin id'si: %d konumu: %d" % (s1.o_id, s1.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s2.o_id, s2.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s3.o_id, s3.location))
Sticks.Sticks().swap_stick_locations(s1, s3)
print("\nKonumlar değiştirildi (s1, s3)!!\n")
print("s1 nesnesinin id'si: %d konumu: %d" % (s1.o_id, s1.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s2.o_id, s2.location))
print("s2 nesnesinin id'si: %d konumu: %d" % (s3.o_id, s3.location))
# ///---=== SWAP STICK TEST ===---
"""
| 0 | 0 | 0 |
279fc711c62428d8edd7bb25202187627d94fc62 | 581 | py | Python | website/volunteers/migrations/0010_auto_20201129_2152.py | tibet5/website | 937e1941aaadbf7cd0a404a2655858451c01dd54 | [
"MIT"
] | null | null | null | website/volunteers/migrations/0010_auto_20201129_2152.py | tibet5/website | 937e1941aaadbf7cd0a404a2655858451c01dd54 | [
"MIT"
] | null | null | null | website/volunteers/migrations/0010_auto_20201129_2152.py | tibet5/website | 937e1941aaadbf7cd0a404a2655858451c01dd54 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-30 02:52
from django.db import migrations, models
| 24.208333 | 56 | 0.598967 | # Generated by Django 3.1.2 on 2020-11-30 02:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('volunteers', '0009_auto_20201128_2348'),
]
operations = [
migrations.AddField(
model_name='volunteer',
name='anonymized_latitude',
field=models.FloatField(default=43.65107),
),
migrations.AddField(
model_name='volunteer',
name='anonymized_longitude',
field=models.FloatField(default=-79.347015),
),
]
| 0 | 467 | 23 |
1ceb29a6e7813b27c5a606e336c81ea0d60d0dfc | 4,088 | py | Python | examples/external_isolation/plot_both_pol_sweep_results.py | JBHilton/covid-19-in-households-public | 42fb31a3c581a3d5b6b6959078a5f6bf4f25212e | [
"Apache-2.0"
] | 4 | 2020-04-17T13:19:43.000Z | 2021-12-02T19:56:27.000Z | examples/external_isolation/plot_both_pol_sweep_results.py | JBHilton/covid-19-in-households-public | 42fb31a3c581a3d5b6b6959078a5f6bf4f25212e | [
"Apache-2.0"
] | 6 | 2020-06-16T17:06:52.000Z | 2021-02-08T18:32:39.000Z | examples/external_isolation/plot_both_pol_sweep_results.py | JBHilton/covid-19-in-households-public | 42fb31a3c581a3d5b6b6959078a5f6bf4f25212e | [
"Apache-2.0"
] | 3 | 2020-05-12T12:09:48.000Z | 2021-06-07T09:16:09.000Z | '''This plots the results of the parameter sweep for the OOHI
example.
'''
from os import mkdir
from os.path import isdir
from pickle import load
from numpy import arange, array, atleast_2d, hstack, sum, where, zeros
from matplotlib.pyplot import close, colorbar, imshow, set_cmap, subplots
from mpl_toolkits.axes_grid1 import make_axes_locatable
from seaborn import heatmap
if isdir('plots/oohi') is False:
mkdir('plots/oohi')
with open('outputs/oohi/results.pkl','rb') as f:
(vuln_peaks,
vuln_end,
iso_peaks,
cum_iso,
iso_method_range,
iso_rate_range,
iso_prob_range) = load(f)
vp_min = vuln_peaks.min()
vp_max = vuln_peaks.max()
ve_min = vuln_end.min()
ve_max = vuln_end.max()
ip_min = iso_peaks.min()
ip_max = iso_peaks.max()
ci_min = cum_iso.min()
ci_max = cum_iso.max()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(vuln_peaks[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=vp_min,
vmax=vp_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(vuln_peaks[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=vp_min,
vmax=vp_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Peak % prevalence in vulnerable population",
cax=cax)
fig.savefig('plots/oohi/vuln_peaks.png',
bbox_inches='tight',
dpi=300)
close()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(vuln_end[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ve_min,
vmax=ve_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(vuln_end[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ve_min,
vmax=ve_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Cumulative % infected in vulnerable population",
cax=cax)
fig.savefig('plots/oohi/cum_vuln_cases.png',
bbox_inches='tight',
dpi=300)
close()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(iso_peaks[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ip_min,
vmax=ip_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(iso_peaks[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ip_min,
vmax=ip_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Peak % population isolating",
cax=cax)
fig.savefig('plots/oohi/iso_peak.png',
bbox_inches='tight',
dpi=300)
close()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(cum_iso[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ci_min,
vmax=ci_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(cum_iso[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ci_min,
vmax=ci_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Cumulative % isolating",
cax=cax)
fig.savefig('plots/oohi/cum_iso.png',
bbox_inches='tight',
dpi=300)
close()
| 29.410072 | 73 | 0.630871 | '''This plots the results of the parameter sweep for the OOHI
example.
'''
from os import mkdir
from os.path import isdir
from pickle import load
from numpy import arange, array, atleast_2d, hstack, sum, where, zeros
from matplotlib.pyplot import close, colorbar, imshow, set_cmap, subplots
from mpl_toolkits.axes_grid1 import make_axes_locatable
from seaborn import heatmap
if isdir('plots/oohi') is False:
mkdir('plots/oohi')
with open('outputs/oohi/results.pkl','rb') as f:
(vuln_peaks,
vuln_end,
iso_peaks,
cum_iso,
iso_method_range,
iso_rate_range,
iso_prob_range) = load(f)
vp_min = vuln_peaks.min()
vp_max = vuln_peaks.max()
ve_min = vuln_end.min()
ve_max = vuln_end.max()
ip_min = iso_peaks.min()
ip_max = iso_peaks.max()
ci_min = cum_iso.min()
ci_max = cum_iso.max()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(vuln_peaks[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=vp_min,
vmax=vp_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(vuln_peaks[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=vp_min,
vmax=vp_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Peak % prevalence in vulnerable population",
cax=cax)
fig.savefig('plots/oohi/vuln_peaks.png',
bbox_inches='tight',
dpi=300)
close()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(vuln_end[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ve_min,
vmax=ve_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(vuln_end[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ve_min,
vmax=ve_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Cumulative % infected in vulnerable population",
cax=cax)
fig.savefig('plots/oohi/cum_vuln_cases.png',
bbox_inches='tight',
dpi=300)
close()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(iso_peaks[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ip_min,
vmax=ip_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(iso_peaks[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ip_min,
vmax=ip_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Peak % population isolating",
cax=cax)
fig.savefig('plots/oohi/iso_peak.png',
bbox_inches='tight',
dpi=300)
close()
fig, (ax1, ax2) = subplots(1,2,sharex=True)
axim=ax1.imshow(cum_iso[0,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ci_min,
vmax=ci_max)
ax1.set_xlabel('Detection rate')
ax1.set_ylabel('Adherence probability')
ax2.imshow(cum_iso[1,:,:],
origin='lower',
extent=(iso_rate_range[0],iso_rate_range[1],0,1),
vmin=ci_min,
vmax=ci_max)
ax2.set_xlabel('Detection rate')
ax2.get_yaxis().set_ticks([])
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim,
label="Cumulative % isolating",
cax=cax)
fig.savefig('plots/oohi/cum_iso.png',
bbox_inches='tight',
dpi=300)
close()
| 0 | 0 | 0 |
d9c3702c2b8b268d50c3dceb6e958e4b9a10cc86 | 1,515 | py | Python | on Raspberry/DHTPub.py | NourS-d/Smart-GreenHouse-Programming_For_IoT | 18632759c80c87b6343e70aac20b1dbdd3fb6503 | [
"MIT"
] | null | null | null | on Raspberry/DHTPub.py | NourS-d/Smart-GreenHouse-Programming_For_IoT | 18632759c80c87b6343e70aac20b1dbdd3fb6503 | [
"MIT"
] | null | null | null | on Raspberry/DHTPub.py | NourS-d/Smart-GreenHouse-Programming_For_IoT | 18632759c80c87b6343e70aac20b1dbdd3fb6503 | [
"MIT"
] | null | null | null | from DHT import DHT_Read
import paho.mqtt.client as mqtt
import json
import requests
import time
try:
file=open("config.json", "r")
json_str = file.read()
file.close()
except:
raise KeyError("Error opening config file. Please check.")
config_json = json.loads(json_str)
url = config_json["catalog"]["url"]
ID = config_json["zoneID"]
response = requests.get(url+"/broker")
brokerData=response.json()
broker = brokerData["IP"]
port = brokerData["port"]
del brokerData
del response
updateTime = 1
sensor = DHT_Read(17)
publisher = DHT_Pub("DHT11", broker, port)
publisher.start()
while True:
val=sensor.read()
if val is not None:
jsonDic=json.loads(val)
print jsonDic
#Publish Temperature
temp='{"temperature": ' + str(jsonDic["temperature"])+', "time": '+str(jsonDic["time"])+'}'
publisher.publish("/"+ID+"/temperature",temp)
#Publish Humidity
hum='{"humidity": ' + str(jsonDic["humidity"])+', "time": '+str(jsonDic["time"])+'}'
publisher.publish("/"+ID+"/humidity",hum)
else:
print "Error reading from sensor"
time.sleep(updateTime)
| 19.423077 | 93 | 0.673927 | from DHT import DHT_Read
import paho.mqtt.client as mqtt
import json
import requests
import time
class DHT_Pub:
def __init__(self, clientID, broker, port = 1883):
self.clientID = clientID
self.port = port
self.pub=mqtt.Client(self.clientID, port)
self.broker = broker
def start(self):
self.pub.connect(broker)
self.pub.loop_start()
def stop(self):
self.pub.loop_stop()
self.pub.disconnect()
def publish(self, topic, message):
self.pub.publish(topic, message)
try:
file=open("config.json", "r")
json_str = file.read()
file.close()
except:
raise KeyError("Error opening config file. Please check.")
config_json = json.loads(json_str)
url = config_json["catalog"]["url"]
ID = config_json["zoneID"]
response = requests.get(url+"/broker")
brokerData=response.json()
broker = brokerData["IP"]
port = brokerData["port"]
del brokerData
del response
updateTime = 1
sensor = DHT_Read(17)
publisher = DHT_Pub("DHT11", broker, port)
publisher.start()
while True:
val=sensor.read()
if val is not None:
jsonDic=json.loads(val)
print jsonDic
#Publish Temperature
temp='{"temperature": ' + str(jsonDic["temperature"])+', "time": '+str(jsonDic["time"])+'}'
publisher.publish("/"+ID+"/temperature",temp)
#Publish Humidity
hum='{"humidity": ' + str(jsonDic["humidity"])+', "time": '+str(jsonDic["time"])+'}'
publisher.publish("/"+ID+"/humidity",hum)
else:
print "Error reading from sensor"
time.sleep(updateTime)
| 287 | -7 | 122 |
614756d04575afdd61b6db9c917c997966809450 | 1,303 | py | Python | apps/views.py | Jayson7/django-weather-app | 91931cb59de07213739a95019918f7d91f42e2b2 | [
"MIT"
] | null | null | null | apps/views.py | Jayson7/django-weather-app | 91931cb59de07213739a95019918f7d91f42e2b2 | [
"MIT"
] | null | null | null | apps/views.py | Jayson7/django-weather-app | 91931cb59de07213739a95019918f7d91f42e2b2 | [
"MIT"
] | null | null | null | from django.shortcuts import render
import json
from urllib.error import HTTPError
import urllib
# Create your views here.
| 34.289474 | 165 | 0.521105 | from django.shortcuts import render
import json
from urllib.error import HTTPError
import urllib
# Create your views here.
def homepage(request):
if request.method == 'POST':
city = request.POST['city']
try:
url = urllib.request.urlopen('https://api.openweathermap.org/data/2.5/weather?q=' + city + '&units=metric&appid=979c31c159d536fafdc155c66bd467eb').read()
list_data = json.loads(url)
# print(url)
data = {
"country_code": str(list_data['sys']['country']),
"main_weather": str(list_data['weather'][0]['main'] ),
"coordinate": str(list_data['coord']['lon'] ) + ', ' + str(list_data['coord']['lat']),
"temp": str(list_data['main']['temp'] ) + ' °C' ,
"pressure": str(list_data['main']['pressure'] ),
"humidity": str(list_data['main']['humidity'] ),
"description": str(list_data['weather'][0]['description'] ),
"icon":str(list_data['weather'][0]['icon']) ,
"name":str(list_data["name"] )}
except HTTPError:
data = {}
print("none")
else:
data = {}
return render(request, 'index.html', data ) | 1,158 | 0 | 23 |
86a5fc8774ef271374e84a2219191c1c7929b948 | 1,959 | py | Python | uta_tools/data_sources/mane_transcript_mappings.py | cancervariants/uta_tools | daa3fcbaabcc8aaeef8f2b0c7080bacd928d97d6 | [
"MIT"
] | 1 | 2022-01-19T18:17:56.000Z | 2022-01-19T18:17:56.000Z | uta_tools/data_sources/mane_transcript_mappings.py | cancervariants/uta-tools | 9d102ea96e5f7b07a862113ee07eca2ce3412ad4 | [
"MIT"
] | 25 | 2021-10-13T19:57:39.000Z | 2022-03-29T17:56:32.000Z | uta_tools/data_sources/mane_transcript_mappings.py | cancervariants/uta-tools | 9d102ea96e5f7b07a862113ee07eca2ce3412ad4 | [
"MIT"
] | null | null | null | """The module for loading MANE Transcript mappings to genes."""
from typing import Dict, Optional, List
import pandas as pd
from uta_tools import MANE_SUMMARY_PATH, logger
class MANETranscriptMappings:
"""The MANE Transcript mappings class."""
def __init__(self, mane_data_path: str = MANE_SUMMARY_PATH) -> None:
"""Initialize the MANE Transcript mappings class.
:param str mane_data_path: Path to RefSeq MANE summary data
"""
self.mane_data_path = mane_data_path
self.df = self._load_mane_transcript_data()
def _load_mane_transcript_data(self) -> pd.core.frame.DataFrame:
"""Load RefSeq MANE data file into DataFrame.
:return: DataFrame containing RefSeq MANE Transcript data
"""
return pd.read_csv(self.mane_data_path, delimiter="\t")
def get_gene_mane_data(self, gene_symbol: str) -> Optional[List[Dict]]:
"""Return MANE Transcript data for a gene.
:param str gene_symbol: HGNC Gene Symbol
:return: MANE Transcript data (Transcript accessions,
gene, and location information)
"""
data = self.df.loc[self.df["symbol"] == gene_symbol.upper()]
if len(data) == 0:
logger.warning(f"Unable to get MANE Transcript data for gene: "
f"{gene_symbol}")
return None
# Ordering: MANE Plus Clinical (If it exists), MANE Select
data = data.sort_values("MANE_status")
return data.to_dict("records")
def get_mane_from_transcripts(self, transcripts: List[str]) -> List[Dict]:
"""Get mane transcripts from a list of transcripts
:param List[str] transcripts: RefSeq transcripts on c. coordinate
:return: MANE data
"""
mane_rows = self.df["RefSeq_nuc"].isin(transcripts)
result = self.df[mane_rows]
if len(result) == 0:
return []
return result.to_dict("records")
| 36.962264 | 78 | 0.646759 | """The module for loading MANE Transcript mappings to genes."""
from typing import Dict, Optional, List
import pandas as pd
from uta_tools import MANE_SUMMARY_PATH, logger
class MANETranscriptMappings:
"""The MANE Transcript mappings class."""
def __init__(self, mane_data_path: str = MANE_SUMMARY_PATH) -> None:
"""Initialize the MANE Transcript mappings class.
:param str mane_data_path: Path to RefSeq MANE summary data
"""
self.mane_data_path = mane_data_path
self.df = self._load_mane_transcript_data()
def _load_mane_transcript_data(self) -> pd.core.frame.DataFrame:
"""Load RefSeq MANE data file into DataFrame.
:return: DataFrame containing RefSeq MANE Transcript data
"""
return pd.read_csv(self.mane_data_path, delimiter="\t")
def get_gene_mane_data(self, gene_symbol: str) -> Optional[List[Dict]]:
"""Return MANE Transcript data for a gene.
:param str gene_symbol: HGNC Gene Symbol
:return: MANE Transcript data (Transcript accessions,
gene, and location information)
"""
data = self.df.loc[self.df["symbol"] == gene_symbol.upper()]
if len(data) == 0:
logger.warning(f"Unable to get MANE Transcript data for gene: "
f"{gene_symbol}")
return None
# Ordering: MANE Plus Clinical (If it exists), MANE Select
data = data.sort_values("MANE_status")
return data.to_dict("records")
def get_mane_from_transcripts(self, transcripts: List[str]) -> List[Dict]:
"""Get mane transcripts from a list of transcripts
:param List[str] transcripts: RefSeq transcripts on c. coordinate
:return: MANE data
"""
mane_rows = self.df["RefSeq_nuc"].isin(transcripts)
result = self.df[mane_rows]
if len(result) == 0:
return []
return result.to_dict("records")
| 0 | 0 | 0 |
fda805b8612fa20791d43e89fb3298b7f7ab2ab3 | 1,253 | py | Python | lib/datasets/__init__.py | hudmgy/HRNet-Facial-Landmark-Detection | fe95d4b19e92fe267201d38648635b9beffba77a | [
"MIT"
] | 1 | 2021-06-22T07:58:24.000Z | 2021-06-22T07:58:24.000Z | lib/datasets/__init__.py | hudmgy/HRNet-Facial-Landmark-Detection | fe95d4b19e92fe267201d38648635b9beffba77a | [
"MIT"
] | null | null | null | lib/datasets/__init__.py | hudmgy/HRNet-Facial-Landmark-Detection | fe95d4b19e92fe267201d38648635b9beffba77a | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Tianheng Cheng(tianhengcheng@gmail.com)
# ------------------------------------------------------------------------------
from .aflw import AFLW
from .cofw import COFW
from .cofwsd import COFWSD
from .face300w import Face300W
from .face300wsd import Face300WSD
from .wflw import WFLW
from .wflwsd import WFLWSD
from .wflwe70 import WFLWE70
from .free import FreeData
__all__ = ['AFLW', 'COFW', 'Face300W', 'WFLW', 'get_dataset']
| 29.139535 | 80 | 0.592977 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Tianheng Cheng(tianhengcheng@gmail.com)
# ------------------------------------------------------------------------------
from .aflw import AFLW
from .cofw import COFW
from .cofwsd import COFWSD
from .face300w import Face300W
from .face300wsd import Face300WSD
from .wflw import WFLW
from .wflwsd import WFLWSD
from .wflwe70 import WFLWE70
from .free import FreeData
__all__ = ['AFLW', 'COFW', 'Face300W', 'WFLW', 'get_dataset']
def get_dataset(config):
if config.DATASET.DATASET == 'AFLW':
return AFLW
elif config.DATASET.DATASET == 'COFW':
return COFW
elif config.DATASET.DATASET == 'COFWSD':
return COFWSD
elif config.DATASET.DATASET == '300W':
return Face300W
elif config.DATASET.DATASET == '300WSD':
return Face300WSD
elif config.DATASET.DATASET == 'WFLW':
return WFLW
elif config.DATASET.DATASET == 'WFLWSD':
return WFLWSD
elif config.DATASET.DATASET == 'WFLWE70':
return WFLWE70
elif config.DATASET.DATASET == 'FreeData':
return FreeData
else:
raise NotImplemented()
| 644 | 0 | 23 |
5f54ec91d90a237450a6edc030c7b90fef6ca813 | 524 | py | Python | merlin/optimizer.py | ethereon/merlin | 0babfed51e65197086d74479a1ca9150259b4f7f | [
"BSD-3-Clause"
] | 1 | 2019-08-15T16:22:20.000Z | 2019-08-15T16:22:20.000Z | merlin/optimizer.py | ethereon/merlin | 0babfed51e65197086d74479a1ca9150259b4f7f | [
"BSD-3-Clause"
] | null | null | null | merlin/optimizer.py | ethereon/merlin | 0babfed51e65197086d74479a1ca9150259b4f7f | [
"BSD-3-Clause"
] | null | null | null | import tensorflow as tf
from merlin.spec import Spec, Default
| 26.2 | 70 | 0.685115 | import tensorflow as tf
from merlin.spec import Spec, Default
class Optimizer:
class Config(Spec):
# An optimizer name from the tf.optimizers module
name: str = 'RMSprop'
# Keyword arguments passed to the optimizer on construction
params: dict = Default(dict(lr=0.001))
def __new__(cls, config: Config):
return cls.get_optimizer_by_name(config.name)(**config.params)
@classmethod
def get_optimizer_by_name(cls, name):
return getattr(tf.optimizers, name)
| 143 | 294 | 23 |
cd5f236ac6ba3563921c41b0487c9111fcce775b | 3,306 | py | Python | tests/test_ntree.py | awoods/ocfl-py | ef4ff9d6b9a950088ff5373c4f1dfeec339f034d | [
"MIT"
] | 14 | 2018-09-10T20:08:04.000Z | 2022-03-29T18:10:43.000Z | tests/test_ntree.py | awoods/ocfl-py | ef4ff9d6b9a950088ff5373c4f1dfeec339f034d | [
"MIT"
] | 73 | 2019-02-13T20:35:09.000Z | 2022-03-24T15:21:34.000Z | tests/test_ntree.py | awoods/ocfl-py | ef4ff9d6b9a950088ff5373c4f1dfeec339f034d | [
"MIT"
] | 3 | 2019-02-13T18:39:50.000Z | 2021-05-04T15:39:04.000Z | """Digest tests."""
import unittest
from ocfl.ntree import Ntree
class TestAll(unittest.TestCase):
"""TestAll class to run tests."""
def test01_encode(self):
"""Test encode."""
nt = Ntree()
self.assertEqual(nt.encode(''), '')
self.assertEqual(nt.encode('a'), 'a')
self.assertEqual(nt.encode('a/b:?'), 'a=b+^3f')
def test02_decode(self):
"""Test decode."""
nt = Ntree()
self.assertEqual(nt.decode(''), '')
self.assertEqual(nt.decode('a'), 'a')
self.assertEqual(nt.decode('a=b+^3f'), 'a/b:?')
def test03_identifier_to_path(self):
"""Test path creation."""
nt = Ntree(n=2, encapsulate=False)
self.assertEqual(nt.identifier_to_path(''), '')
self.assertEqual(nt.identifier_to_path('a'), 'a')
self.assertEqual(nt.identifier_to_path('ab'), 'ab')
self.assertEqual(nt.identifier_to_path('abc'), 'ab/c')
self.assertEqual(nt.identifier_to_path('abcde'), 'ab/cd/e')
nt = Ntree(n=3, encapsulate=False)
self.assertEqual(nt.identifier_to_path('abcdefg'), 'abc/def/g')
self.assertEqual(nt.identifier_to_path('abcdefgh'), 'abc/def/gh')
self.assertEqual(nt.identifier_to_path('abcdefghi'), 'abc/def/ghi')
nt = Ntree(n=2)
self.assertEqual(nt.identifier_to_path(''), '')
self.assertEqual(nt.identifier_to_path('a'), 'a/a')
self.assertEqual(nt.identifier_to_path('ab'), 'ab/ab')
self.assertEqual(nt.identifier_to_path('abc'), 'ab/c/abc')
self.assertEqual(nt.identifier_to_path('abcde'), 'ab/cd/e/abcde')
nt = Ntree(n=3)
self.assertEqual(nt.identifier_to_path('abcdefg'), 'abc/def/g/abcdefg')
self.assertEqual(nt.identifier_to_path('abcdefgh'), 'abc/def/gh/abcdefgh')
self.assertEqual(nt.identifier_to_path('abcdefghi'), 'abc/def/ghi/abcdefghi')
def test03_path_to_identifier(self):
"""Test path interpretation."""
nt = Ntree(n=2, encapsulate=False)
self.assertEqual(nt.path_to_identifier(''), '')
self.assertEqual(nt.path_to_identifier('a'), 'a')
self.assertEqual(nt.path_to_identifier('ab'), 'ab')
self.assertEqual(nt.path_to_identifier('ab/c'), 'abc')
self.assertEqual(nt.path_to_identifier('ab/cd/e'), 'abcde')
nt = Ntree(n=3, encapsulate=False)
self.assertEqual(nt.path_to_identifier('abc/def/g'), 'abcdefg')
self.assertEqual(nt.path_to_identifier('abc/def/gh'), 'abcdefgh')
self.assertEqual(nt.path_to_identifier('abc/def/ghi'), 'abcdefghi')
nt = Ntree(n=2)
self.assertEqual(nt.path_to_identifier(''), '')
self.assertEqual(nt.path_to_identifier('a/a'), 'a')
self.assertEqual(nt.path_to_identifier('ab/ab'), 'ab')
self.assertEqual(nt.path_to_identifier('ab/c/abc'), 'abc')
self.assertEqual(nt.path_to_identifier('ab/cd/e/abcde'), 'abcde')
nt = Ntree(n=3)
self.assertEqual(nt.path_to_identifier('abc/def/g/abcdefg'), 'abcdefg')
self.assertEqual(nt.path_to_identifier('abc/def/gh/abcdefgh'), 'abcdefgh')
self.assertEqual(nt.path_to_identifier('abc/def/ghi/abcdefghi'), 'abcdefghi')
# Bad ones
self.assertRaises(Exception, nt.path_to_identifier, 'abc/def/g/a-diff-g')
| 47.228571 | 85 | 0.635209 | """Digest tests."""
import unittest
from ocfl.ntree import Ntree
class TestAll(unittest.TestCase):
"""TestAll class to run tests."""
def test01_encode(self):
"""Test encode."""
nt = Ntree()
self.assertEqual(nt.encode(''), '')
self.assertEqual(nt.encode('a'), 'a')
self.assertEqual(nt.encode('a/b:?'), 'a=b+^3f')
def test02_decode(self):
"""Test decode."""
nt = Ntree()
self.assertEqual(nt.decode(''), '')
self.assertEqual(nt.decode('a'), 'a')
self.assertEqual(nt.decode('a=b+^3f'), 'a/b:?')
def test03_identifier_to_path(self):
"""Test path creation."""
nt = Ntree(n=2, encapsulate=False)
self.assertEqual(nt.identifier_to_path(''), '')
self.assertEqual(nt.identifier_to_path('a'), 'a')
self.assertEqual(nt.identifier_to_path('ab'), 'ab')
self.assertEqual(nt.identifier_to_path('abc'), 'ab/c')
self.assertEqual(nt.identifier_to_path('abcde'), 'ab/cd/e')
nt = Ntree(n=3, encapsulate=False)
self.assertEqual(nt.identifier_to_path('abcdefg'), 'abc/def/g')
self.assertEqual(nt.identifier_to_path('abcdefgh'), 'abc/def/gh')
self.assertEqual(nt.identifier_to_path('abcdefghi'), 'abc/def/ghi')
nt = Ntree(n=2)
self.assertEqual(nt.identifier_to_path(''), '')
self.assertEqual(nt.identifier_to_path('a'), 'a/a')
self.assertEqual(nt.identifier_to_path('ab'), 'ab/ab')
self.assertEqual(nt.identifier_to_path('abc'), 'ab/c/abc')
self.assertEqual(nt.identifier_to_path('abcde'), 'ab/cd/e/abcde')
nt = Ntree(n=3)
self.assertEqual(nt.identifier_to_path('abcdefg'), 'abc/def/g/abcdefg')
self.assertEqual(nt.identifier_to_path('abcdefgh'), 'abc/def/gh/abcdefgh')
self.assertEqual(nt.identifier_to_path('abcdefghi'), 'abc/def/ghi/abcdefghi')
def test03_path_to_identifier(self):
"""Test path interpretation."""
nt = Ntree(n=2, encapsulate=False)
self.assertEqual(nt.path_to_identifier(''), '')
self.assertEqual(nt.path_to_identifier('a'), 'a')
self.assertEqual(nt.path_to_identifier('ab'), 'ab')
self.assertEqual(nt.path_to_identifier('ab/c'), 'abc')
self.assertEqual(nt.path_to_identifier('ab/cd/e'), 'abcde')
nt = Ntree(n=3, encapsulate=False)
self.assertEqual(nt.path_to_identifier('abc/def/g'), 'abcdefg')
self.assertEqual(nt.path_to_identifier('abc/def/gh'), 'abcdefgh')
self.assertEqual(nt.path_to_identifier('abc/def/ghi'), 'abcdefghi')
nt = Ntree(n=2)
self.assertEqual(nt.path_to_identifier(''), '')
self.assertEqual(nt.path_to_identifier('a/a'), 'a')
self.assertEqual(nt.path_to_identifier('ab/ab'), 'ab')
self.assertEqual(nt.path_to_identifier('ab/c/abc'), 'abc')
self.assertEqual(nt.path_to_identifier('ab/cd/e/abcde'), 'abcde')
nt = Ntree(n=3)
self.assertEqual(nt.path_to_identifier('abc/def/g/abcdefg'), 'abcdefg')
self.assertEqual(nt.path_to_identifier('abc/def/gh/abcdefgh'), 'abcdefgh')
self.assertEqual(nt.path_to_identifier('abc/def/ghi/abcdefghi'), 'abcdefghi')
# Bad ones
self.assertRaises(Exception, nt.path_to_identifier, 'abc/def/g/a-diff-g')
| 0 | 0 | 0 |