max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
softgroup/data/__init__.py
|
thangvubk/SoftGroup
| 75
|
12785151
|
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from .s3dis import S3DISDataset
from .scannetv2 import ScanNetDataset
__all__ = ['S3DISDataset', 'ScanNetDataset', 'build_dataset']
def build_dataset(data_cfg, logger):
assert 'type' in data_cfg
_data_cfg = data_cfg.copy()
_data_cfg['logger'] = logger
data_type = _data_cfg.pop('type')
if data_type == 's3dis':
return S3DISDataset(**_data_cfg)
elif data_type == 'scannetv2':
return ScanNetDataset(**_data_cfg)
else:
raise ValueError(f'Unknown {data_type}')
def build_dataloader(dataset, batch_size=1, num_workers=1, training=True, dist=False):
shuffle = training
sampler = DistributedSampler(dataset, shuffle=shuffle) if dist else None
if sampler is not None:
shuffle = False
if training:
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=dataset.collate_fn,
shuffle=shuffle,
sampler=sampler,
drop_last=True,
pin_memory=True)
else:
assert batch_size == 1
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=dataset.collate_fn,
shuffle=False,
sampler=sampler,
drop_last=False,
pin_memory=True)
| 2.28125
| 2
|
test/test_tcalib.py
|
askpatrickw/micropython-stubber
| 0
|
12785152
|
<gh_stars>0
#Lobo
from tpcalib import Calibrate
x = Calibrate()
x.drawCrossHair(10,20,False)
x.readCoordinates()
| 1.617188
| 2
|
src/timematic/_utils.py
|
MicaelJarniac/timematic
| 1
|
12785153
|
from datetime import time, timedelta
from .constants import S_IN_H, S_IN_MIN, US_IN_S
from .units import Microseconds
def time_in_microseconds(time: time) -> Microseconds:
return Microseconds(
(time.hour * S_IN_H + time.minute * S_IN_MIN + time.second) * US_IN_S
+ time.microsecond
)
def subtract_times(first: time, second: time) -> timedelta:
# Subtract two times
assert first.tzinfo == second.tzinfo, "Timezones must match"
return timedelta(
microseconds=time_in_microseconds(first) - time_in_microseconds(second)
)
| 3.3125
| 3
|
challenges/slippy/slippy.py
|
nickbjohnson4224/greyhat-crypto-ctf-2014
| 4
|
12785154
|
<reponame>nickbjohnson4224/greyhat-crypto-ctf-2014
import os
import hashlib
import struct
class SlippyCipher(object):
def __init__(self, key):
pass
def weak_block_encrypt(self, block, subkey):
"""
This is a pretty terrible block cipher. Thankfully, we can make it
much stronger through iteration.
"""
# N.B. 2xy+x+y is an invertible operation over numbers mod a power of 2
x = (2 * block * subkey + block + subkey) % 2**64
# left rotate by 1
x = ((x << 1) | (x >> 63)) % 2**64
# just some salt
x = x ^ 0x3CAB470ADB580357
return x
def weak_block_decrypt(self, block, subkey):
pass
def strong_block_encrypt(self, block, key):
"""
If I had an iteration of this cipher for every fluid ounce of coke
in a vending machine bottle of coke, this construction would be
obviously secure.
"""
for i in xrange(4096):
block = self.weak_block_encrypt(block, key)
return block
def encrypt(self, message):
"""
Encrypt using CBC mode with the generated table as the block cipher.
"""
# convert any Unicode characters to UTF-8
message = message.encode('utf-8')
# pad to multiple of block size
message += chr(8 - len(message) % 8) * (8 - len(message) % 8)
# cut message into words as 16-bit numbers
message = [struct.unpack('<Q', message[i:i+8])[0]
for i in xrange(0, len(message), 8)]
ciphertext = []
# generate IV
iv = struct.unpack('<H', os.urandom(2))[0]
ciphertext.append(iv)
# encrypt using CBC mode
last = iv
for mblk in message:
cblk = self._table[mblk ^ last]
ciphertext.append(cblk)
last = cblk
return ''.join(struct.pack('<H', w) for w in ciphertext)
def decrypt(self, ciphertext):
pass
| 3.40625
| 3
|
scripts/move_ptu_touch.py
|
westpoint-robotics/virtual_ptu
| 2
|
12785155
|
<filename>scripts/move_ptu_touch.py<gh_stars>1-10
#!/usr/bin/env python
PKG = 'virtual_ptu'
import roslib; roslib.load_manifest(PKG)
import time
from math import pi
from threading import Thread
import rospy
from sensor_msgs.msg import Joy
from std_msgs.msg import Float64
from dynamixel_controllers.srv import *
from dynamixel_msgs.msg import JointState as JointState
from geometry_msgs.msg import Twist
class MovePTU():
def __init__(self):
self.is_running = True
self.step_size = rospy.get_param('/camera_limits/speed')
self.touch_data = None
self.teleopTime = time.time()
rospy.init_node('move_ptu_touch', anonymous=True)
self.pan_limit = rospy.get_param('/camera_limits/pan')
self.tilt_limit = rospy.get_param('/camera_limits/tilt')
rospy.Subscriber('/usma_remote/right', Twist, self.read_touch_data, queue_size=10)
self.servo_position_pan = rospy.Publisher('/pan_controller/command', Float64, queue_size=10)
self.servo_position_tilt = rospy.Publisher('/tilt_controller/command', Float64, queue_size=10)
self.pan_joint = 0.0
self.tilt_joint = 0.0
self.update_freq = rospy.get_param('updateFreq')
def read_touch_data(self, data):
self.touch_data = data
self.teleopTime = time.time()
def update_ptu_position(self):
rate = rospy.Rate(self.update_freq)
while self.is_running:
if not self.touch_data == None and (time.time() - self.teleopTime) < 0.2:
self.pan_joint += 1 * self.touch_data.angular.z * self.step_size
self.tilt_joint += 1 * self.touch_data.linear.x * self.step_size
if self.pan_joint<self.pan_limit['lower']:
self.pan_joint=self.pan_limit['lower']
elif self.pan_joint>self.pan_limit['upper']:
self.pan_joint=self.pan_limit['upper']
if self.tilt_joint<self.tilt_limit['lower']:
self.tilt_joint=self.tilt_limit['lower']
elif self.tilt_joint>self.tilt_limit['upper']:
self.tilt_joint=self.tilt_limit['upper']
self.servo_position_pan.publish(self.pan_joint)
self.servo_position_tilt.publish(self.tilt_joint)
rate.sleep()
if __name__ == '__main__':
move_ptu = MovePTU()
t = Thread(target=move_ptu.update_ptu_position)
t.start()
rospy.spin()
move_ptu.alive = False
t.join()
| 2.21875
| 2
|
data_importer/shapefiles.py
|
aaronfraint/data-importer
| 0
|
12785156
|
<reponame>aaronfraint/data-importer
from pathlib import Path
from postgis_helpers import PostgreSQL
def import_shapefiles(folder: Path, db: PostgreSQL):
""" Import all shapefiles within a folder into SQL.
"""
endings = [".shp", ".SHP"]
for ending in endings:
for shp_path in folder.rglob(f"*{ending}"):
print(shp_path)
idx = len(ending) * -1
pg_name = shp_path.name[:idx].replace(" ", "_").lower()
db.import_geodata(pg_name, shp_path, if_exists="replace")
| 2.921875
| 3
|
main.py
|
mr-glt/NCAEP-2
| 0
|
12785157
|
import time
import csv
from tentacle_pi.TSL2561 import TSL2561
from Adafruit_BME280 import *
from ctypes import *
from picamera import PiCamera
from time import sleep
from datetime import datetime
sensor = BME280(mode=BME280_OSAMPLE_8)
tsl = TSL2561(0x39,"/dev/i2c-1")
tsl.enable_autogain()
tsl.set_time(0x00)
path = "/home/pi/NCAEP-2/lib/liblsm9ds1cwrapper.so"
lib = cdll.LoadLibrary(path)
lib.lsm9ds1_create.argtypes = []
lib.lsm9ds1_create.restype = c_void_p
lib.lsm9ds1_begin.argtypes = [c_void_p]
lib.lsm9ds1_begin.restype = None
lib.lsm9ds1_calibrate.argtypes = [c_void_p]
lib.lsm9ds1_calibrate.restype = None
lib.lsm9ds1_gyroAvailable.argtypes = [c_void_p]
lib.lsm9ds1_gyroAvailable.restype = c_int
lib.lsm9ds1_accelAvailable.argtypes = [c_void_p]
lib.lsm9ds1_accelAvailable.restype = c_int
lib.lsm9ds1_magAvailable.argtypes = [c_void_p]
lib.lsm9ds1_magAvailable.restype = c_int
lib.lsm9ds1_readGyro.argtypes = [c_void_p]
lib.lsm9ds1_readGyro.restype = c_int
lib.lsm9ds1_readAccel.argtypes = [c_void_p]
lib.lsm9ds1_readAccel.restype = c_int
lib.lsm9ds1_readMag.argtypes = [c_void_p]
lib.lsm9ds1_readMag.restype = c_int
lib.lsm9ds1_getGyroX.argtypes = [c_void_p]
lib.lsm9ds1_getGyroX.restype = c_float
lib.lsm9ds1_getGyroY.argtypes = [c_void_p]
lib.lsm9ds1_getGyroY.restype = c_float
lib.lsm9ds1_getGyroZ.argtypes = [c_void_p]
lib.lsm9ds1_getGyroZ.restype = c_float
lib.lsm9ds1_getAccelX.argtypes = [c_void_p]
lib.lsm9ds1_getAccelX.restype = c_float
lib.lsm9ds1_getAccelY.argtypes = [c_void_p]
lib.lsm9ds1_getAccelY.restype = c_float
lib.lsm9ds1_getAccelZ.argtypes = [c_void_p]
lib.lsm9ds1_getAccelZ.restype = c_float
lib.lsm9ds1_getMagX.argtypes = [c_void_p]
lib.lsm9ds1_getMagX.restype = c_float
lib.lsm9ds1_getMagY.argtypes = [c_void_p]
lib.lsm9ds1_getMagY.restype = c_float
lib.lsm9ds1_getMagZ.argtypes = [c_void_p]
lib.lsm9ds1_getMagZ.restype = c_float
lib.lsm9ds1_calcGyro.argtypes = [c_void_p, c_float]
lib.lsm9ds1_calcGyro.restype = c_float
lib.lsm9ds1_calcAccel.argtypes = [c_void_p, c_float]
lib.lsm9ds1_calcAccel.restype = c_float
lib.lsm9ds1_calcMag.argtypes = [c_void_p, c_float]
lib.lsm9ds1_calcMag.restype = c_float
#This is bad....I'm sorry
def pres2alt(pressure):
alt = 44331.5 - 4946.62 * (pressure*100) ** (0.190263)
return alt
if __name__ == "__main__":
imu = lib.lsm9ds1_create()
lib.lsm9ds1_begin(imu)
if lib.lsm9ds1_begin(imu) == 0:
print("Failed to communicate with 9DOF. Check I2C.")
quit()
lib.lsm9ds1_calibrate(imu)
camera = PiCamera()
camera.resolution = (1920, 1080)
camera.framerate = 30
lastTime = datetime.now()
camera.start_recording('/home/pi/NCAEP-2/NCAEP' + time.strftime("_%H_%M_%S") + '.h264')
while True:
while lib.lsm9ds1_gyroAvailable(imu) == 0:
pass
lib.lsm9ds1_readGyro(imu)
while lib.lsm9ds1_accelAvailable(imu) == 0:
pass
lib.lsm9ds1_readAccel(imu)
while lib.lsm9ds1_magAvailable(imu) == 0:
pass
lib.lsm9ds1_readMag(imu)
gx = lib.lsm9ds1_getGyroX(imu)
gy = lib.lsm9ds1_getGyroY(imu)
gz = lib.lsm9ds1_getGyroZ(imu)
ax = lib.lsm9ds1_getAccelX(imu)
ay = lib.lsm9ds1_getAccelY(imu)
az = lib.lsm9ds1_getAccelZ(imu)
mx = lib.lsm9ds1_getMagX(imu)
my = lib.lsm9ds1_getMagY(imu)
mz = lib.lsm9ds1_getMagZ(imu)
cax = lib.lsm9ds1_calcGyro(imu, ax)
cay = lib.lsm9ds1_calcGyro(imu, ay)
caz = lib.lsm9ds1_calcGyro(imu, az)
cgx = lib.lsm9ds1_calcAccel(imu, gx)
cgy = lib.lsm9ds1_calcAccel(imu, gy)
cgz = lib.lsm9ds1_calcAccel(imu, gz)
cmx = lib.lsm9ds1_calcMag(imu, mx)
cmy = lib.lsm9ds1_calcMag(imu, my)
cmz = lib.lsm9ds1_calcMag(imu, mz)
degrees = sensor.read_temperature()
pascals = sensor.read_pressure()
hectopascals = pascals / 100
humidity = sensor.read_humidity()
currrentTime = datetime.now()
timeDiff = currrentTime - lastTime;
if timeDiff.seconds > 300:
print "Restarting Camera"
camera.stop_recording()
sleep(1)
camera.start_recording('/home/pi/NCAEP-2/NCAEP' + time.strftime("_%H_%M_%S") + '.h264')
lastTime = datetime.now()
print("Gyro: %f, %f, %f [deg/s]" % (cgx, cgy, cgz))
print("Accel: %f, %f, %f [Gs]" % (cax/125, cay/125, caz/125))
print("Mag: %f, %f, %f [gauss]" % (cmx, cmy, cmz))
print 'Timestamp = {0:0.3f}'.format(sensor.t_fine)
print 'Temp = {0:0.3f} deg C'.format(degrees)
print 'Pressure = {0:0.2f} hPa'.format(hectopascals)
print 'Humidity = {0:0.2f} %'.format(humidity)
print "%s lux" % tsl.lux()
print "Computed Altitude %s m" % pres2alt(hectopascals)
print "Camera is Recording"
print "________________________"
with open('datamain.csv', 'a') as csvfile:
fieldnames = ['timestamp', 'unixtimestamp', 'gX', 'gY', 'gZ', 'aX', 'aY', 'aZ', 'mX', 'mY', 'mZ', 'tempC', 'hPa', 'humidity', 'lux', 'alt']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
#writer.writeheader()
writer.writerow({'timestamp': time.strftime("%d/%m/%Y") +" "+ time.strftime("%H:%M:%S"), 'unixtimestamp': time.time(), 'gX': cgx, 'gY': cgy, 'gZ': cgz, 'aX': cax/125, 'aY': cay/125, 'aZ': caz/125, 'mX': cmx, 'mY': cmy, 'mZ': cmz, 'tempC': degrees, 'hPa': hectopascals, 'humidity': humidity, 'lux': tsl.lux(), 'alt': pres2alt(hectopascals)})
time.sleep(0.5)
| 1.976563
| 2
|
spyder/utils/introspection/tests/test_plugin_client.py
|
aglotero/spyder
| 1
|
12785158
|
<filename>spyder/utils/introspection/tests/test_plugin_client.py
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""Tests for plugin_client.py."""
# Test library imports
import pytest
# Local imports
from spyder.utils.introspection.plugin_client import PluginClient
from spyder.utils.introspection.manager import PLUGINS
@pytest.mark.parametrize("plugin_name", PLUGINS)
def test_plugin_client(qtbot, plugin_name):
"""Test creation of the diferent plugin clients."""
plugin = PluginClient(plugin_name=plugin_name)
assert plugin
if __name__ == "__main__":
pytest.main()
| 2.25
| 2
|
orders/admin.py
|
mnemchinov/joint-expenses
| 0
|
12785159
|
<filename>orders/admin.py
from django.contrib import admin
from .models import *
class TabularOrderPartner(admin.TabularInline):
extra = 0
show_change_link = True
model = Order.partners.through
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ('__str__', 'status', 'opening_balance',
'amount', 'final_balance')
inlines = (TabularOrderPartner,)
list_filter = ('date', 'is_deleted', 'status', 'partners')
search_fields = ('content',)
fieldsets = (
('Основное', {
'fields': ('date', 'status', 'is_deleted',)
}),
('Параметры заказа', {
'fields': ('previous_order', 'opening_balance', 'amount', 'debit',
'final_balance'),
}),
('Содержимое', {
'fields': ('content',)
}),
)
def get_changeform_initial_data(self, request):
initial_data = {
'previous_order': None,
'opening_balance': 0,
}
previous_order = Order.objects.filter(
status=OrderStatuses.DELIVERED,
is_deleted=False
).first()
if previous_order is not None:
initial_data['previous_order'] = previous_order
initial_data['opening_balance'] = previous_order.final_balance
return initial_data
| 1.921875
| 2
|
src/encoder.py
|
Tyred/BigData
| 2
|
12785160
|
import keras
from keras.layers import Activation
from keras.models import load_model
from keras import backend as K
from keras.utils.generic_utils import get_custom_objects
import tensorflow as tf
import numpy as np
import pandas as pd
import timeit
import sys
import argparse
# Constants
#window_size = 1024
def windowNoOverlay(data, window_size): # Without overlay
windowed_data = []
i = 0
while(i + window_size-1 < len(data)):
windowed_data.append(data[i:(i+window_size)])
i += window_size
if (i != len(data)):
i = len(data) - window_size
windowed_data.append(data[i:len(data)]) # add the rest
return windowed_data
def parser_args(cmd_args):
parser = argparse.ArgumentParser(sys.argv[0], description="", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-e", "--exp", type=str, action="store", default="pairwise_distances", help="Experiment")
parser.add_argument("-d", "--dataset", type=str, action="store", default="PigArtPressure", help="Dataset name")
return parser.parse_args(cmd_args)
# obtaining arguments from command line
args = parser_args(sys.argv[1:])
dataset = args.dataset
exp = args.exp
def swish(x, beta = 1):
return (x * K.sigmoid(beta * x))
get_custom_objects().update({'Swish': Activation(swish)})
# Swish Activation
#class Swish(Activation):
# def __init__(self, activation, **kwargs):
# super(Swish, self).__init__(activation, **kwargs)
# self.__name__ = 'swish'
#def swish(x):
# return (K.sigmoid(x) * x)
#get_custom_objects().update({'swish': Swish(swish)})
encoder = load_model('../models/' + exp + '/new_train/' + 'encoder_' + dataset + ".h5", compile = False)
if (exp == "pairwise_distances"):
data = np.genfromtxt('../data/' + exp + '/' + dataset + '.txt', delimiter=' ',)
print("Data shape:", data.shape)
elif (exp == "similarity_search"):
data = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + 'Data.txt', delimiter=' ',)
print("Data shape:", data.shape)
print("Encoding the queries as well")
for i in range(1, 6):
query = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + 'Query' + str(i) + '.txt', delimiter=' ',)
query.shape = 1, query.shape[0], 1
query = encoder.predict(query)
query.shape = query.shape[1]
np.savetxt('../data/' + exp + '/' + dataset + '/coded_data/Query' + str (i) + '.txt', query)
del query
else:
data = np.genfromtxt('../data/' + exp + '/' + dataset + '/' + dataset + '_test.txt', delimiter=' ',)
print("Data shape:", data.shape)
# Getting rid of the NaNs and infs with interpolation
if (len(data.shape) == 1):
data = np.array(pd.Series(data).interpolate())
serie_length = 1024
# 'Windowing'
data = np.array(windowNoOverlay(data, serie_length))
print("Window Data shape:", data.shape)
else:
serie_length = data.shape[1]
print("Serie length:", serie_length)
data.shape = data.shape[0], serie_length, 1
# Workaround to load the libraries so it doesn't count in the timer,
# in production these libraries would be already loaded
coded_data = encoder.predict(data)
start = timeit.default_timer()
coded_data = encoder.predict(data)
print("Coded Data shape:", coded_data.shape)
stop = timeit.default_timer()
print("Time to code the serie:", stop - start)
coded_data.shape = coded_data.shape[0], coded_data.shape[1]
if (exp == "similarity_search"):
np.savetxt('../data/' + exp + '/' + dataset + '/coded_data/' + 'Data.txt', coded_data)
elif(exp == "pairwise_distances"):
np.savetxt('../data/' + exp + '/coded_data/' + dataset + '_coded.txt', coded_data)
else:
np.savetxt('../data/' + exp + '/' + dataset + '/' + dataset + '_coded.txt', coded_data)
| 2.34375
| 2
|
component/parameter/file_params.py
|
ingdanielguerrero/SMFM_biota
| 0
|
12785161
|
<reponame>ingdanielguerrero/SMFM_biota<gh_stars>0
from pathlib import Path
# Parameter file
parameter_file = Path(__file__).parents[2]/'biota/cfg/McNicol2018.csv'
| 1.3125
| 1
|
dateflix_api/views/me.py
|
pythrick/dateflix-api
| 8
|
12785162
|
from rest_framework import mixins, response, viewsets
from dateflix_api.models import User
from dateflix_api.serializers import ProfileSerializer
class MeViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
"""
API endpoint that allows current profile to be viewed.
"""
serializer_class = ProfileSerializer
queryset = User.objects.all()
def list(self, request, *args, **kwargs):
# assumes the user is authenticated, handle this according your needs
return response.Response(self.serializer_class(request.user).data)
| 2.265625
| 2
|
d/driver/servo/servo.py
|
desireevl/astro-pointer
| 2
|
12785163
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
p = GPIO.PWM(18, 50)
p.start(2.5)
while True:
p.ChangeDutyCycle(2.5) # 0 degree
time.sleep(1)
p.ChangeDutyCycle(6.75)
time.sleep(1)
p.ChangeDutyCycle(10.5)
time.sleep(1)
| 3.015625
| 3
|
__scraping__/investing.com - request, BS/main.py
|
whitmans-max/python-examples
| 140
|
12785164
|
# date: 2020.09.11
# author: Bartłomiej "furas" Burek (https://blog.furas.pl)
# https://stackoverflow.com/questions/63840415/how-to-scrape-website-tables-where-the-value-can-be-different-as-we-chose-but-th
import requests
from bs4 import BeautifulSoup
import csv
url = 'https://id.investing.com/instruments/HistoricalDataAjax'
payload = {
"curr_id": "8830",
"smlID": "300004",
"header": "Data+Historis+Emas+Berjangka",
"st_date": "01/30/2020",
"end_date": "12/31/2020",
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action":"historical_data"
}
headers = {
#"Referer": "https://id.investing.com/commodities/gold-historical-data",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:80.0) Gecko/20100101 Firefox/80.0",
"X-Requested-With": "XMLHttpRequest"
}
fh = open('output.csv', 'w')
csv_writer = csv.writer(fh)
for year in range(2010, 2021):
print('year:', year)
payload["st_date"] = f"01/01/{year}"
payload["end_date"] = f"12/31/{year}"
r = requests.post(url, data=payload, headers=headers)
#print(r.text)
soup = BeautifulSoup(r.text, 'lxml')
table = soup.find('table')
for row in table.find_all('tr')[1:]: # [1:] to skip header
row_data = [item.text for item in row.find_all('td')]
print(row_data)
csv_writer.writerow(row_data)
fh.close()
| 3.1875
| 3
|
src/skge/test.py
|
comjoueur/cesi
| 100
|
12785165
|
<reponame>comjoueur/cesi<filename>src/skge/test.py
import sys
import numpy as np
from numpy import sqrt, squeeze, zeros_like
from numpy.random import randn, uniform
import pdb
def init_unif(sz):
bnd = 1 / sqrt(sz[0])
p = uniform(low=-bnd, high=bnd, size=sz)
return squeeze(p)
sz = [10,10,5]
a = init_unif(sz)
pdb.set_trace()
| 2.375
| 2
|
test/test_missing_feature_extraction.py
|
cbrewitt/GRIT-OpenDrive
| 0
|
12785166
|
import numpy as np
from igp2 import AgentState, plot_map
from igp2.data import ScenarioConfig, InDScenario
from igp2.opendrive.map import Map
import matplotlib.pyplot as plt
from shapely.ops import unary_union
from grit.core.data_processing import get_episode_frames
from grit.core.feature_extraction import FeatureExtractor
from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D
from grit.core.base import get_base_dir
def get_feature_extractor(episode_idx=1, scenario_name="bendplatz"):
scenario_map = Map.parse_from_opendrive(get_base_dir() + f"/scenarios/maps/{scenario_name}.xodr")
return FeatureExtractor(scenario_map, scenario_name, episode_idx)
def plot_occlusion(frame_id=153, episode_idx=1, *frame, plot_occlusions=True, all_vehicles=False,
scenario_name="bendplatz"):
feature_extractor = get_feature_extractor(episode_idx=episode_idx, scenario_name=scenario_name)
occlusions = feature_extractor.occlusions[frame_id]
scenario_config = ScenarioConfig.load(get_base_dir() + f"/scenarios/configs/{scenario_name}.json")
scenario = InDScenario(scenario_config)
episode = scenario.load_episode(feature_extractor.episode_idx)
# Take a step every 25 recorded frames (1s)
# episode_frames contain for each second the list of frames for all vehicles alive that moment
episode_frames = get_episode_frames(episode, exclude_parked_cars=False, exclude_bicycles=True, step=25)
ego_id = list(occlusions.keys())[0]
ego_occlusions = occlusions[ego_id]
ego = episode_frames[frame_id][ego_id]
plot_map(feature_extractor.scenario_map, scenario_config=scenario_config, plot_buildings=True)
if plot_occlusions:
lane_occlusions_all = []
for road_occlusions in ego_occlusions:
for lane_occlusions in ego_occlusions[road_occlusions]:
lane_occlusion = ego_occlusions[road_occlusions][lane_occlusions]
if lane_occlusion is not None:
lane_occlusions_all.append(lane_occlusion)
OcclusionDetector2D.plot_area_from_list(lane_occlusions_all, color="r", alpha=0.5)
if all_vehicles:
for aid, state in episode_frames[frame_id].items():
plt.text(*state.position, aid)
plt.plot(*list(zip(*OcclusionDetector2D.get_box(state).boundary)), color="black")
if frame:
for aid, state in frame[0].items():
plt.text(*state.position, aid)
plt.plot(*list(zip(*OcclusionDetector2D.get_box(state).boundary)))
plt.plot(*list(zip(*OcclusionDetector2D.get_box(ego).boundary)))
def find_lane_at(point, scenario_name="bendplatz"):
scenario_map = Map.parse_from_opendrive(get_base_dir() + f"/scenarios/maps/{scenario_name}.xodr")
lanes = scenario_map.lanes_at(point)
for lane in lanes:
plot_map(scenario_map)
lane = scenario_map.get_lane(lane.parent_road.id, lane.id)
plt.plot(*list(zip(*[x for x in lane.midline.coords])))
plt.show()
def get_occlusions_and_ego(frame=153, episode_idx=1):
feature_extractor = get_feature_extractor(episode_idx)
occlusions = feature_extractor.occlusions[frame]
ego_id = list(occlusions.keys())[0]
ego_occlusions = occlusions[ego_id]
occlusions = []
for road_occlusions in ego_occlusions:
for lane_occlusions in ego_occlusions[road_occlusions]:
lane_occlusion = ego_occlusions[road_occlusions][lane_occlusions]
if lane_occlusion is not None:
occlusions.append(lane_occlusion)
occlusions = unary_union(occlusions)
return ego_id, occlusions
def test_occluded_area_no_vehicle_in_oncoming_lanes():
mfe = get_feature_extractor()
lane_path = [mfe.scenario_map.get_lane(8, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego()
state0 = AgentState(time=0,
position=np.array((45.67, -46.72)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(45.67, -46.72)
)
state1 = AgentState(time=0,
position=np.array((62.88, -20.96)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-120)
)
state_ego = AgentState(time=0,
position=np.array((43.88, -44.25)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
frame = {ego_id: state_ego, 0: state0, 1: state1}
# plot_occlusion(153, 1, frame)
oncoming_vehicle_id, oncoming_vehicle_dist = mfe.oncoming_vehicle(0, lane_path, frame)
missing = mfe.is_oncoming_vehicle_missing(oncoming_vehicle_dist, lane_path, occlusions)
plt.show()
assert missing
def set_up_frame_ep3_frame100(third_agent_position, third_agent_heading):
"""
The third agent is the possible oncoming vehicle.
State 1 is the target vehicle.
"""
episode_idx = 3
frame_id = 100
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(1, 1, 0),
mfe.scenario_map.get_lane(9, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state0 = AgentState(time=0,
position=np.array((45.67, -46.72)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(45.67, -46.72)
)
state1 = AgentState(time=0,
position=np.array(third_agent_position),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(third_agent_heading)
)
state_ego = AgentState(time=0,
position=np.array((43.88, -44.25)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
target_id = 0
frame = {target_id: state0, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
oncoming_vehicle_id, oncoming_vehicle_dist = mfe.oncoming_vehicle(target_id, lane_path, frame)
missing = mfe.is_oncoming_vehicle_missing(oncoming_vehicle_dist, lane_path, occlusions)
plt.show()
return missing
def test_occluded_area_vehicle_in_oncoming_lanes():
missing = set_up_frame_ep3_frame100((62.88, -20.96), -110)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_2():
missing = set_up_frame_ep3_frame100((60.12, -33.10), 140)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_3():
missing = set_up_frame_ep3_frame100((49.12, -30.13), -45)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_4():
missing = set_up_frame_ep3_frame100((53.81, -38.10), 170)
assert not missing
def test_occluded_area_vehicle_in_oncoming_lanes_5():
missing = set_up_frame_ep3_frame100((56.46, -38.11), -45)
assert missing
def test_occluded_area_vehicle_in_oncoming_lanes_6():
missing = set_up_frame_ep3_frame100((55.75, -37.73), 180)
assert not missing
# Tests for missing vehicle ahead.
def test_the_vehicle_in_front_is_hidden():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 50
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(1, 1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((34.58, -56.93)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(45.67, -46.72)
)
state1 = AgentState(time=0,
position=np.array((39.90, -52.22)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((34.62, -11.01)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
def test_vehicle_is_behind():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 50
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(3, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((76.54, -11.56)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(76.54, -11.56)
)
state1 = AgentState(time=0,
position=np.array((68.24, -20.61)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((34.62, -11.01)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
def test_no_vehicle_in_front_2():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 50
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(3, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((72.77, -9.44)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(72.77, -9.44)
)
state1 = AgentState(time=0,
position=np.array((66.29, -16.77)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((34.62, -11.01)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, 1: state1, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert not missing
def test_occlusion_far_away():
episode_idx = 7
frame_id = 200
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(2, 2, 0),
mfe.scenario_map.get_lane(10, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((84.70, -60.43)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(84.70, -60.43)
)
state_ego = AgentState(time=0,
position=np.array((73.39, -56.32)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert not missing
def test_occlusion_close_enough():
episode_idx = 7
frame_id = 200
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(10, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((61.59, -34.41)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(61.59, -34.41)
)
state_ego = AgentState(time=0,
position=np.array((73.39, -56.32)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, ego_id: state_ego}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
def test_occlusion_between_vehicle_in_front():
"""
State1 is the possible vehicle in front.
"""
episode_idx = 6
frame_id = 42
mfe = get_feature_extractor(episode_idx=episode_idx)
lane_path = [mfe.scenario_map.get_lane(1, 1, 0),
mfe.scenario_map.get_lane(7, -1, 0)]
ego_id, occlusions = get_occlusions_and_ego(frame=frame_id, episode_idx=episode_idx)
state_target = AgentState(time=0,
position=np.array((33.07, -58.33)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=lane_path[0].get_heading_at(33.07, -58.33)
)
state1 = AgentState(time=0,
position=np.array((43.62, -48.47)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(45)
)
state_ego = AgentState(time=0,
position=np.array((73.39, -56.32)),
velocity=np.array((0, 0)),
acceleration=np.array((0, 0)),
heading=np.deg2rad(-45)
)
target_id = 0
frame = {target_id: state_target, ego_id: state_ego, 1: state1}
# plot_occlusion(frame_id, episode_idx, frame)
vehicle_in_front_id, vehicle_in_front_dist = mfe.vehicle_in_front(target_id, lane_path, frame)
missing = mfe.is_vehicle_in_front_missing(vehicle_in_front_dist, target_id, lane_path, frame, occlusions)
plt.show()
assert missing
# find_lane_at((32.7, -59.4))
# plot_occlusion(42, 5, scenario_name="bendplatz")
# plt.show()
| 2.328125
| 2
|
predictFromGPR.py
|
KarlRong/Eye-controlled-e-reader
| 1
|
12785167
|
import matlab.engine
import time
class GPR(object):
def __init__(self):
future = matlab.engine.start_matlab(async=True)
self.eng = future.result()
# self.eng = matlab.engine.start_matlab()
def predictGPR(self, data):
x = self.eng.predictFromGPR_X(data)
y = self.eng.predictFromGPR_Y(data)
return [x, y]
def quit(self):
self.eng.quit()
def main():
gpr = GPR()
i = 0
while i < 100:
gpr.predictGPR()
time.sleep(0.01)
print('Count: ' + str(i))
i = i + 1
gpr.quit()
if __name__ == '__main__':
main()
| 2.65625
| 3
|
setup.py
|
yanggangthu/FVS_cython
| 1
|
12785168
|
<reponame>yanggangthu/FVS_cython
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize("FVS_localsearch_10_cython.pyx")
)
'''
setup(
ext_modules = cythonize("FVS_localsearch_8_cython.pyx",
#sources=[""], #add additional source file
language = "c++"))
'''
| 1.367188
| 1
|
model/__init__.py
|
reacher1130/AIGCN
| 0
|
12785169
|
<reponame>reacher1130/AIGCN
from . import util, AIGCN
| 1.039063
| 1
|
katja_thermo.py
|
KatjaT/Thermodynamics
| 0
|
12785170
|
<filename>katja_thermo.py
# -*- coding: utf-8 -*-
"""
calculate thermodynamics for Katja
"""
from component_contribution.kegg_reaction import KeggReaction
from component_contribution.kegg_model import KeggModel
from component_contribution.component_contribution import ComponentContribution
from component_contribution.thermodynamic_constants import R, default_T
import csv
import numpy as np
import uncertainties.unumpy as unumpy
def reaction2dG0(reaction_list):
'''
Calculates the dG0 of a list of a reaction.
Uses the component-contribution package (Noor et al) to estimate
the standard Gibbs Free Energy of reactions based on
component contribution approach and measured values (NIST and Alberty)
Arguments:
List of reaction strings
Returns:
Array of dG0 values and standard deviation of estimates
'''
cc = ComponentContribution.init()
Kmodel = KeggModel.from_formulas(reaction_list)
Kmodel.add_thermo(cc)
dG0_prime, dG0_std = Kmodel.get_transformed_dG0(pH=7.5, I=0.2, T=298.15)
dG0_prime = np.array(map(lambda x: x[0,0], dG0_prime))
dG0_prime = unumpy.uarray(dG0_prime, np.diag(dG0_std))
return dG0_prime
def reaction2Keq(reaction_list):
'''
Calculates the equilibrium constants of a reaction, using dG0.
Arguments:
List of cobra model reaction objects
Returns:
Array of K-equilibrium values
'''
dG0_prime = reaction2dG0(reaction_list)
Keq = unumpy.exp( -dG0_prime / (R*default_T) )
return Keq
def reaction2RI(reaction_list, fixed_conc=0.1):
'''
Calculates the reversibility index (RI) of a reaction.
The RI represent the change in concentrations of metabolites
(from equal reaction reactants) that will make the reaction reversible.
That is, the higher RI is, the more irreversible the reaction.
A convenient threshold for reversibility is RI>=1000, that is a change of
1000% in metabolite concentrations is required in order to flip the
reaction direction.
Arguments:
List of cobra model reaction objects
Returns:
Array of RI values
'''
keq = reaction2Keq(reaction_list)
sparse = map(lambda x: KeggReaction.parse_formula(x).sparse, reaction_list)
N_P = np.zeros(len(sparse))
N_S = np.zeros(len(sparse))
for i,s in enumerate(sparse):
N_P[i] = sum([v for v in s.itervalues() if v>0])
N_S[i] = -sum([v for v in s.itervalues() if v<0])
N = N_P + N_S
Q_2prime = fixed_conc**(N_P-N_S)
RI = ( keq*Q_2prime )**( 2.0/N )
return RI
if __name__ == "__main__":
reactions = csv.reader(open('CCMtbRxnsKEGG.txt', 'r'))
names = []
reaction_list = []
for row in reactions:
row = row[0].split(" ")
names.append(row[0].replace("'", ''))
reaction_list.append(row[1])
dG0 = reaction2dG0(reaction_list)
Keq = reaction2Keq(reaction_list)
RI = reaction2RI(reaction_list)
reversibility_index = dict(zip(names, RI))
f = open('reversibility_index.csv','w')
w = csv.writer(f)
for k,v in reversibility_index.iteritems():
w.writerow([k, v])
f.close()
| 2.75
| 3
|
misc/python/materialize/checks/roles.py
|
guswynn/materialize
| 0
|
12785171
|
<reponame>guswynn/materialize
# Copyright Materialize, Inc. and contributors. All rights reserved.
#
# Use of this software is governed by the Business Source License
# included in the LICENSE file at the root of this repository.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0.
from textwrap import dedent
from typing import List
from materialize.checks.actions import Testdrive
from materialize.checks.checks import Check
class CreateRole(Check):
def initialize(self) -> Testdrive:
return Testdrive("")
def manipulate(self) -> List[Testdrive]:
return [
Testdrive(dedent(s))
for s in [
"""
> CREATE ROLE create_role1 SUPERUSER LOGIN;
""",
"""
> CREATE ROLE create_role2 SUPERUSER LOGIN;
""",
]
]
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
> SELECT name FROM mz_roles WHERE name LIKE 'create_role%';
create_role1
create_role2
"""
)
)
class DropRole(Check):
def initialize(self) -> Testdrive:
return Testdrive(
dedent(
"""
> CREATE ROLE drop_role1 SUPERUSER LOGIN;
"""
)
)
def manipulate(self) -> List[Testdrive]:
return [
Testdrive(dedent(s))
for s in [
"""
> DROP ROLE drop_role1;
> CREATE ROLE drop_role2 SUPERUSER LOGIN;
""",
"""
> DROP ROLE drop_role2;
""",
]
]
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
> SELECT COUNT(*) FROM mz_roles WHERE name LIKE 'drop_role%';
0
"""
)
)
| 2.203125
| 2
|
export/dataset_summary.py
|
AndreasMadsen/nlp-roar-interpretability
| 17
|
12785172
|
import glob
import json
import argparse
import os
import os.path as path
from functools import partial
from tqdm import tqdm
import pandas as pd
import numpy as np
import scipy
import plotnine as p9
from scipy.stats import bootstrap
from nlproar.dataset import SNLIDataset, SSTDataset, IMDBDataset, BabiDataset, MimicDataset
def ratio_confint(partial_df):
"""Implementes a ratio-confidence interval
The idea is to project to logits space, then assume a normal distribution,
and then project back to the inital space.
Method proposed here: https://stats.stackexchange.com/questions/263516
"""
column_name = partial_df.loc[:, 'test_metric'].iat[0]
x = partial_df.loc[:, column_name].to_numpy()
mean = np.mean(x)
if np.all(x[0] == x):
lower = mean
upper = mean
else:
res = bootstrap((x, ), np.mean, confidence_level=0.95, random_state=np.random.default_rng(0))
lower = res.confidence_interval.low
upper = res.confidence_interval.high
return pd.Series({
'lower': lower,
'mean': mean,
'upper': upper,
'format': f'${mean:.0%}^{{+{upper-mean:.1%}}}_{{-{mean-lower:.1%}}}$'.replace('%', '\\%'),
'n': len(x)
})
def dataset_stats(Loader, cachedir):
dataset = Loader(cachedir=cachedir, model_type='rnn', num_workers=0)
dataset.prepare_data()
dataset.setup('fit')
dataset.setup('test')
summaries = {}
dataloaders = [
('train', dataset.train_dataloader()),
('val', dataset.val_dataloader()),
('test', dataset.test_dataloader())
]
for split_name, split_iter in dataloaders:
lengths = []
for batch in tqdm(split_iter, desc=f'Summarizing {split_name} split', leave=False):
lengths += batch.length.tolist()
summaries[split_name] = {
'length': np.mean(lengths),
'count': len(lengths),
}
return pd.Series({
'dataset': dataset.name,
'vocab_size': len(dataset.vocabulary),
'train_size': summaries['train']['count'],
'valid_size': summaries['val']['count'],
'test_size': summaries['test']['count'],
'avg_length': np.average(
[summary['length'] for summary in summaries.values()],
weights=[summary['count'] for summary in summaries.values()]
)
})
thisdir = path.dirname(path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--persistent-dir',
action='store',
default=path.realpath(path.join(thisdir, '..')),
type=str,
help='Directory where all persistent data will be stored')
parser.add_argument('--stage',
action='store',
default='both',
type=str,
choices=['preprocess', 'plot', 'both'],
help='Which export stage should be performed. Mostly just useful for debugging.')
if __name__ == "__main__":
pd.set_option('display.max_rows', None)
args, unknown = parser.parse_known_args()
dataset_mapping = pd.DataFrame([
{'dataset': 'sst', 'dataset_pretty': 'SST', 'test_metric': 'f1_test', 'reference': '$81\\%$'},
{'dataset': 'snli', 'dataset_pretty': 'SNLI', 'test_metric': 'f1_test', 'reference': '$88\\%$'},
{'dataset': 'imdb', 'dataset_pretty': 'IMDB', 'test_metric': 'f1_test', 'reference': '$78\\%$'},
{'dataset': 'mimic-a', 'dataset_pretty': 'Anemia', 'test_metric': 'f1_test', 'reference': '$92\\%$'},
{'dataset': 'mimic-d', 'dataset_pretty': 'Diabetes', 'test_metric': 'f1_test', 'reference': '$79\\%$'},
{'dataset': 'babi-1', 'dataset_pretty': 'bAbI-1', 'test_metric': 'acc_test', 'reference': '$100\\%$'},
{'dataset': 'babi-2', 'dataset_pretty': 'bAbI-2', 'test_metric': 'acc_test', 'reference': '$48\\%$'},
{'dataset': 'babi-3', 'dataset_pretty': 'bAbI-3', 'test_metric': 'acc_test', 'reference': '$62\\%$'}
])
model_mapping = pd.DataFrame([
{'model_type': 'rnn', 'model_type_pretty': 'BiLSTM-Attention'},
{'model_type': 'roberta', 'model_type_pretty': 'RoBERTa'}
])
datasets = {
'sst': SSTDataset,
'snli': SNLIDataset,
'imdb': IMDBDataset,
'babi-1': partial(BabiDataset, task=1),
'babi-2': partial(BabiDataset, task=2),
'babi-3': partial(BabiDataset, task=3),
'mimic-d': partial(MimicDataset, subset='diabetes', mimicdir=f'{args.persistent_dir}/mimic'),
'mimic-a': partial(MimicDataset, subset='anemia', mimicdir=f'{args.persistent_dir}/mimic'),
}
if args.stage in ['both', 'preprocess']:
# Read JSON files into dataframe
results = []
for file in tqdm(glob.glob(f'{args.persistent_dir}/results/roar/*_s-[0-9].json'),
desc='Loading .json files'):
with open(file, 'r') as fp:
try:
results.append(json.load(fp))
except json.decoder.JSONDecodeError:
print(f'{file} has a format error')
results_df = pd.DataFrame(results)
# Summarize each dataset
summaries = []
for dataset_loader in tqdm(datasets.values(), desc='Summarizing datasets'):
summaries.append(dataset_stats(dataset_loader, cachedir=args.persistent_dir + '/cache'))
summaries_df = pd.DataFrame(summaries)
df = (results_df
.merge(dataset_mapping, on='dataset')
.groupby(['dataset', 'dataset_pretty', 'reference', 'model_type'])
.apply(ratio_confint)
.reset_index()
.merge(summaries_df, on='dataset')
.merge(model_mapping, on='model_type')
.drop(['lower', 'upper', 'n', 'mean', 'dataset', 'model_type'], axis=1)
)
if args.stage in ['preprocess']:
os.makedirs(f'{args.persistent_dir}/pandas', exist_ok=True)
df.to_pickle(f'{args.persistent_dir}/pandas/dataset.pd.pkl.xz')
if args.stage in ['plot']:
df = pd.read_pickle(f'{args.persistent_dir}/pandas/dataset.pd.pkl.xz')
if args.stage in ['both', 'plot']:
print(df)
print(df
.reset_index()
.rename(columns={
'dataset_pretty': 'Dataset',
'format': 'Faithfulness'
})
.pivot(
index=['Dataset'],
columns='model_type_pretty',
values='Faithfulness'
)
.style.to_latex()
)
print(df
.reset_index()
.rename(columns={
'dataset_pretty': 'Dataset',
'format': 'Faithfulness'
})
.pivot(
index=['Dataset', 'train_size', 'valid_size', 'test_size', 'reference'],
columns='model_type_pretty',
values='Faithfulness'
)
.style.to_latex()
)
| 2.40625
| 2
|
nr.refreshable/src/nr/refreshable/__init__.py
|
NiklasRosenstein/nr-python
| 3
|
12785173
|
<reponame>NiklasRosenstein/nr-python
import logging
import typing as t
import threading
__author__ = '<NAME> <<EMAIL>>'
__version__ = '0.0.2'
T = t.TypeVar('T')
R = t.TypeVar('R')
logger = logging.getLogger(__name__)
Subscriber = t.Callable[['Refreshable[T]'], None]
class Refreshable(t.Generic[T]):
def __init__(self, initial: T) -> None:
self._lock = threading.Lock()
self._current = initial
self._subscribers: t.List[Subscriber] = []
def get(self) -> T:
with self._lock:
return self._current
def update(self, value: T) -> None:
with self._lock:
self._current = value
subscribers = self._subscribers[:]
for subscriber in subscribers:
try:
subscriber(self)
except:
logger.exception('Error in subscriber')
def subscribe(self, subscriber: Subscriber) -> None:
with self._lock:
self._subscribers.append(subscriber)
try:
subscriber(self)
except:
logger.exception('Error in subscriber')
def map(self, mapper: t.Callable[[T], R]) -> 'Refreshable[R]':
"""
Map the value of the refreshable to a new refreshable that automatically gets updated when the
parent is updated. Be aware that this method should be used sparingly as it registers a new
subscriber to this refreshable that will never be disposed of again.
"""
child = Refreshable(mapper(self.get()))
def refresh(_parent):
child.update(mapper(self.get()))
self.subscribe(refresh)
return child
| 2.875
| 3
|
Back/generalchatroom/urls.py
|
sadeghjafari5528/404-
| 0
|
12785174
|
from django.urls import path
from . import views
urlpatterns = [
path('generalchatroom/', views.index, name='index'),
path('generalchatroom/<str:room_name>/', views.room, name='room'),
path('show_Message/', views.show_Message, name='show_Message'),
]
| 1.757813
| 2
|
lte/gateway/python/integ_tests/s1aptests/test_send_error_ind_for_dl_nas_with_auth_req.py
|
Aitend/magma
| 849
|
12785175
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import s1ap_types
import s1ap_wrapper
class TestSendErrorIndForDlNasWithAuthReq(unittest.TestCase):
"""Test sending of error indication for DL NAS message
carrying authentication request
"""
def setUp(self):
"""Initialize"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
"""Cleanup"""
self._s1ap_wrapper.cleanup()
def test_send_error_ind_for_dl_nas_with_auth_req(self):
"""Send error indication after receiving authentication request"""
self._s1ap_wrapper.configIpBlock()
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
attach_req = s1ap_types.ueAttachRequest_t()
attach_req.ue_Id = req.ue_id
sec_ctxt = s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT
id_type = s1ap_types.TFW_MID_TYPE_IMSI
eps_type = s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH
attach_req.mIdType = id_type
attach_req.epsAttachType = eps_type
attach_req.useOldSecCtxt = sec_ctxt
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_ATTACH_REQUEST, attach_req,
)
print("************************* Sent attach request")
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_AUTH_REQ_IND.value,
)
print("************************* Received authentication request")
# Send error indication
error_ind = s1ap_types.fwNbErrIndMsg_t()
# isUeAssoc flag to include optional MME_UE_S1AP_ID and eNB_UE_S1AP_ID
error_ind.isUeAssoc = True
error_ind.ue_Id = req.ue_id
error_ind.cause.pres = True
# Radio network causeType = 0
error_ind.cause.causeType = 0
# causeVal - Unknown-pair-ue-s1ap-id
error_ind.cause.causeVal = 15
print("*** Sending error indication ***")
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.ENB_ERR_IND_MSG, error_ind,
)
# Context release
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value,
)
print("************************* Received UE_CTX_REL_IND")
if __name__ == "__main__":
unittest.main()
| 1.742188
| 2
|
gerapy/pipelines/mongodb.py
|
hantmac/Gerapy
| 2,888
|
12785176
|
<filename>gerapy/pipelines/mongodb.py<gh_stars>1000+
import pymongo
from twisted.internet.threads import deferToThread
class MongoDBPipeline(object):
def __init__(self, mongodb_uri, mongodb_database):
self.mongodb_uri = mongodb_uri
self.mongodb_database = mongodb_database
@classmethod
def from_crawler(cls, crawler):
return cls(
mongodb_uri=crawler.settings.get('MONGODB_URI'),
mongodb_database=crawler.settings.get('MONGODB_DATABASE')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongodb_uri)
self.database = self.client[self.mongodb_database]
def _process_item(self, item, spider):
allowed_spiders = item.mongodb_spiders
allowed_collections = item.mongodb_collections
if allowed_spiders and spider.name in allowed_spiders:
for allowed_collection in allowed_collections:
self.database[allowed_collection].insert(dict(item))
return item
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
return deferToThread(self._process_item, item, spider)
| 2.40625
| 2
|
bookorbooks/account/models/parent_profile_model.py
|
talhakoylu/SummerInternshipBackend
| 1
|
12785177
|
from django.core.exceptions import ValidationError
from constants.account_strings import AccountStrings
from django.db import models
from django.conf import settings
from country.models import City
from django.db.models.signals import post_delete
from django.dispatch import receiver
class ParentProfile(models.Model):
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
primary_key=True,
verbose_name=AccountStrings.ParentProfileStrings.user_verbose_name,
related_name="user_parent")
city = models.ForeignKey(
"country.City",
on_delete=models.SET_NULL,
null=True,
blank=True,
verbose_name=AccountStrings.ParentProfileStrings.city_verbose_name,
related_name="city_parent_profiles")
profession = models.CharField(max_length=500,
null=True,
blank=True,
verbose_name=AccountStrings.
ParentProfileStrings.profession_verbose_name)
class Meta:
verbose_name = AccountStrings.ParentProfileStrings.meta_verbose_name
verbose_name_plural = AccountStrings.ParentProfileStrings.meta_verbose_name_plural
@property
def get_full_name(self):
return f"{self.user.first_name} {self.user.last_name}"
def __str__(self):
return self.get_full_name
def clean(self) -> None:
"""
This method will check if the user type is a parent during creation.
"""
if self.user.user_type != 3:
raise ValidationError(AccountStrings.ParentProfileStrings.user_type_error)
| 2.25
| 2
|
setup.py
|
dignitas123/algo_trader
| 4
|
12785178
|
import setuptools
from os.path import dirname, join
here = dirname(__file__)
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="algo-trader",
version="2.0.4",
author="<NAME>",
author_email="<EMAIL>",
description="Trade execution engine to process API data and transmit"
" orders to Bitmex and other brokers.",
long_description=open(join(here, 'README.md')).read(),
long_description_content_type='text/markdown',
url="https://github.com/dignitas123/algo_trader",
install_requires=['bitmex'],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
entry_points={
'console_scripts': [
'algotrader=algo_trader.startbot:run',
],
}
)
| 1.617188
| 2
|
test/schema/fields/test_types.py
|
mcptr/pjxxs
| 0
|
12785179
|
<filename>test/schema/fields/test_types.py
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_raises
from pjxxs import fields
types = {
fields.String : (str, ""),
fields.Object : (dict, {}),
fields.Array : (list, []),
fields.Int : (int, 0),
fields.Double : (float, 0),
fields.Bool : (bool, False),
fields.Null : (None, None),
}
def test_ctors():
name = "name"
for t in types:
o = t(name)
assert_equal(o.get_ident(), name)
assert_equal(o.get_base_type(), types[t][0])
assert_equal(o.get_content(), types[t][1])
def test_nullable():
name = "name"
for t in types:
o = t(name, nullable=True)
assert_equal(o.get_content(), None)
| 2.5
| 2
|
tests/test_basic.py
|
aigarius/photoriver2
| 0
|
12785180
|
<gh_stars>0
"""Basic test environment tests"""
def test_math():
assert 40 + 2 == 42
| 1.304688
| 1
|
photonpy/__init__.py
|
qnano/photonpy
| 5
|
12785181
|
<gh_stars>1-10
from .cpp.context import *
from .cpp.estimator import *
from .cpp.gaussian import Gauss3D_Calibration, GaussianPSFMethods
from .cpp.cspline import *
from .cpp.postprocess import *
from .smlm.dataset import Dataset
from .cpp.spotdetect import *
| 0.972656
| 1
|
tests/test_config_parser.py
|
serend1p1ty/core-pytorch-utils
| 65
|
12785182
|
<reponame>serend1p1ty/core-pytorch-utils<filename>tests/test_config_parser.py
import os
import tempfile
import yaml
from cpu import ConfigArgumentParser
def test_config_parser():
with tempfile.TemporaryDirectory() as dir:
parser = ConfigArgumentParser()
parser.add_argument("-x", "--arg-x", action="store_true")
parser.add_argument("-y", "--arg-y", dest="y1", type=int, default=1)
parser.add_argument("--arg-z", action="append", type=float, default=[2.0])
parser.add_argument("-k", type=float)
args = parser.parse_args(["--arg-x", "-y", "3", "--arg-z", "10.0"])
assert args.arg_x is True
assert args.y1 == 3
assert args.arg_z == [2.0, 10.0]
assert args.k is None
data = {"arg_x": True, "arg_z": [2.0, 10.0], "k": 3.0}
config_file = os.path.join(dir, "config.yaml")
with open(config_file, "w") as f:
yaml.dump(data, f)
args = parser.parse_args(["-c", config_file])
assert args.arg_x is True
assert args.y1 == 1
assert args.arg_z == [2.0, 10.0]
assert args.k == 3.0
args = parser.parse_args(["-c", config_file, "-y", "5", "--arg-z", "18", "-k", "8"])
assert args.arg_x is True
assert args.y1 == 5
assert args.arg_z == [2.0, 10.0, 18.0]
assert args.k == 8.0
| 2.53125
| 3
|
examples/mp/bzn_mp2.py
|
seunghoonlee89/pyscf-ecCC-TCC
| 2
|
12785183
|
<gh_stars>1-10
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
A simple example to run MP2 calculation.
'''
import pyscf
from pyscf import gto, scf, lo, tools, symm
mol = gto.M(atom = 'C 0.0000 1.396792 0.0000;\
C 0.0000 -1.396792 0.0000;\
C 1.209657 0.698396 0.0000;\
C -1.209657 -0.698396 0.0000;\
C -1.209657 0.698396 0.0000;\
C 1.209657 -0.698396 0.0000;\
H 0.0000 2.484212 0.0000;\
H 2.151390 1.242106 0.0000;\
H -2.151390 -1.242106 0.0000;\
H -2.151390 1.242106 0.0000;\
H 2.151390 -1.242106 0.0000;\
H 0.0000 -2.484212 0.0000',
basis = "ccpvdz",
verbose=5, symmetry="Cs", spin=0) # When orbitals are localized the symmetry goes down from D6h to Cs. (Cs since sigma and pi do not mix)
mf = mol.RHF().run()
mp2 = mf.MP2()
#mp2 = mf.MP2(EN=True)
mp2.run()
| 2.625
| 3
|
examples/pybullet/examples/frictionCone.py
|
stolk/bullet3
| 158
|
12785184
|
import pybullet as p
import time
import math
p.connect(p.GUI)
useMaximalCoordinates = False
p.setGravity(0, 0, -10)
plane = p.loadURDF("plane.urdf", [0, 0, -1], useMaximalCoordinates=useMaximalCoordinates)
p.setRealTimeSimulation(0)
velocity = 1
num = 40
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1) #disable this to make it faster
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, 0)
p.setPhysicsEngineParameter(enableConeFriction=1)
for i in range(num):
print("progress:", i, num)
x = velocity * math.sin(2. * 3.1415 * float(i) / num)
y = velocity * math.cos(2. * 3.1415 * float(i) / num)
print("velocity=", x, y)
sphere = p.loadURDF("sphere_small_zeroinertia.urdf",
flags=p.URDF_USE_INERTIA_FROM_FILE,
useMaximalCoordinates=useMaximalCoordinates)
p.changeDynamics(sphere, -1, lateralFriction=0.02)
#p.changeDynamics(sphere,-1,rollingFriction=10)
p.changeDynamics(sphere, -1, linearDamping=0)
p.changeDynamics(sphere, -1, angularDamping=0)
p.resetBaseVelocity(sphere, linearVelocity=[x, y, 0])
prevPos = [0, 0, 0]
for i in range(2048):
p.stepSimulation()
pos = p.getBasePositionAndOrientation(sphere)[0]
if (i & 64):
p.addUserDebugLine(prevPos, pos, [1, 0, 0], 1)
prevPos = pos
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
while (1):
time.sleep(0.01)
| 2.515625
| 3
|
setup/remove_nexus5_bloat.py
|
hacker131/dotfiles
| 71
|
12785185
|
<gh_stars>10-100
# Run this script using Monkeyrunner.
import commands
import time
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
_PACKAGES_TO_BE_DISABLED = (
'com.google.android.apps.magazines',
'com.google.android.apps.plus',
'com.google.android.keep',
'com.google.android.play.games',
'com.google.android.videos',
'com.google.earth',
)
_PACKAGE_INFO_COMMAND = ('adb shell am start -a '
'android.settings.APPLICATION_DETAILS_SETTINGS '
'-n com.android.settings/.applications.InstalledAppDetails '
'-d %s')
_SLEEP_TIME_BETWEEN_INTERACTION = 1 # In seconds.
def isEnabled(package_name):
# List enabled packages.
status, output = commands.getstatusoutput('adb shell pm list packages -e')
if ('package:%s' % package_name) in output:
return True
def click(device, x, y):
device.touch(x, y, MonkeyDevice.DOWN_AND_UP)
def disableApp(device, package_name):
assert package_name
if not isEnabled(package_name):
print 'Package %s is already disabled.' % package_name
return
else:
print 'Disabling package %s' % package_name
# Coordinates of a point on disable button.
status, output = commands.getstatusoutput(
_PACKAGE_INFO_COMMAND % package_name)
assert status == 0
_POINT1= {'X': 800, 'Y': 500}
click(device, _POINT1['X'], _POINT1['Y'])
time.sleep(_SLEEP_TIME_BETWEEN_INTERACTION)
# Coordinates of a point on the OK button which appears in a modal dialog
# shown as a result of clicking "Disable".
_POINT2 = {'X': 700, 'Y': 1100}
click(device, _POINT2['X'], _POINT2['Y'])
# Only needed in some cases, when disabling a package requires data to be
# cleared.
time.sleep(_SLEEP_TIME_BETWEEN_INTERACTION)
_POINT3 = {'X': 700, 'Y': 1100}
click(device, _POINT3['X'], _POINT3['Y'])
def main():
device = MonkeyRunner.waitForConnection()
for package in _PACKAGES_TO_BE_DISABLED:
disableApp(device, package)
if __name__ == '__main__':
main()
| 2.53125
| 3
|
server/orchestrator/models/__init__.py
|
bsantaus/qiskit-dell-runtime
| 17
|
12785186
|
<gh_stars>10-100
from .db_service import DBService
from .runtime_program_model import RuntimeProgram
from .message_model import Message
from .job_model import Job
from .user_model import User
| 1.023438
| 1
|
happytransformer/cuda_detect.py
|
swcrazyfan/happy-transformer
| 0
|
12785187
|
import torch
import torch_xla
import torch_xla.core.xla_model as xm
def detect_cuda_device_number():
return torch.cuda.current_device() if torch.cuda.is_available() else -1
def detect_tpu_device_number():
return xm.xla_device().index if xm.xla_device() else -1
| 2.3125
| 2
|
prometheus_client/mmap_dict.py
|
postmates/prometheus_client_python
| 1
|
12785188
|
<gh_stars>1-10
import json
import mmap
import os
import struct
_INITIAL_MMAP_SIZE = 1 << 20
_pack_integer_func = struct.Struct(b'i').pack
_value_timestamp = struct.Struct(b'dd')
_unpack_integer = struct.Struct(b'i').unpack_from
# struct.pack_into has atomicity issues because it will temporarily write 0 into
# the mmap, resulting in false reads to 0 when experiencing a lot of writes.
# Using direct assignment solves this issue.
def _pack_value_timestamp(data, pos, value, timestamp):
data[pos:pos + _value_timestamp.size] = _value_timestamp.pack(value, timestamp)
def _pack_integer(data, pos, value):
data[pos:pos + 4] = _pack_integer_func(value)
class MmapedDict(object):
"""A dict of doubles, backed by an mmapped file.
The file starts with a 4 byte int, indicating how much of it is used.
Then 4 bytes of padding.
There's then a number of entries, consisting of a 4 byte int which is the
size of the next field, a utf-8 encoded string key, padding to a 8 byte
alignment, a 8 byte float which is the value and then an 8 byte timestamp (seconds).
Not thread safe.
"""
def __init__(self, filename, read_mode=False):
self._f = open(filename, 'rb' if read_mode else 'a+b')
self._fname = filename
if os.fstat(self._f.fileno()).st_size == 0:
self._f.truncate(_INITIAL_MMAP_SIZE)
self._capacity = os.fstat(self._f.fileno()).st_size
self._m = mmap.mmap(self._f.fileno(), self._capacity,
access=mmap.ACCESS_READ if read_mode else mmap.ACCESS_WRITE)
self._positions = {}
self._used = _unpack_integer(self._m, 0)[0]
if self._used == 0:
self._used = 8
_pack_integer(self._m, 0, self._used)
else:
if not read_mode:
for key, _, _, pos in self._read_all_values():
self._positions[key] = pos
def _init_value(self, key):
"""Initialize a value. Lock must be held by caller."""
encoded = key.encode('utf-8')
# Pad to be 8-byte aligned.
padded = encoded + (b' ' * (8 - (len(encoded) + 4) % 8))
value = struct.pack('i{0}sdd'.format(len(padded)).encode(), len(encoded), padded, 0.0, 0.0)
while self._used + len(value) > self._capacity:
self._capacity *= 2
self._f.truncate(self._capacity)
self._m = mmap.mmap(self._f.fileno(), self._capacity)
self._m[self._used:self._used + len(value)] = value
# Update how much space we've used.
self._used += len(value)
_pack_integer(self._m, 0, self._used)
self._positions[key] = self._used - _value_timestamp.size
def _read_all_values(self):
"""Yield (key, value, timestamp, pos). No locking is performed."""
pos = 8
# cache variables to local ones and prevent attributes lookup
# on every loop iteration
used = self._used
data = self._m
unpack_from = struct.unpack_from
while pos < used:
encoded_len = _unpack_integer(data, pos)[0]
# check we are not reading beyond bounds
if encoded_len + pos > used:
msg = 'Read beyond file size detected, %s is corrupted.'
raise RuntimeError(msg % self._fname)
pos += 4
encoded = unpack_from(('%ss' % encoded_len).encode(), data, pos)[0]
padded_len = encoded_len + (8 - (encoded_len + 4) % 8)
pos += padded_len
value, timestamp = _value_timestamp.unpack_from(data, pos)
yield encoded.decode('utf-8'), value, _from_timestamp_float(timestamp), pos
pos += _value_timestamp.size
def read_all_values(self):
"""Yield (key, value, pos). No locking is performed."""
for k, v, ts, _ in self._read_all_values():
yield k, v, ts
def read_value_timestamp(self, key):
if key not in self._positions:
self._init_value(key)
pos = self._positions[key]
# We assume that reading from an 8 byte aligned value is atomic
val, ts = _value_timestamp.unpack_from(self._m, pos)
return val, _from_timestamp_float(ts)
def read_value(self, key):
return self.read_value_timestamp(key)[0]
def write_value(self, key, value, timestamp=None):
if key not in self._positions:
self._init_value(key)
pos = self._positions[key]
# We assume that writing to an 8 byte aligned value is atomic
_pack_value_timestamp(self._m, pos, value, _to_timestamp_float(timestamp))
def close(self):
if self._f:
self._m.close()
self._m = None
self._f.close()
self._f = None
def mmap_key(metric_name, name, labelnames, labelvalues):
"""Format a key for use in the mmap file."""
# ensure labels are in consistent order for identity
labels = dict(zip(labelnames, labelvalues))
return json.dumps([metric_name, name, labels], sort_keys=True)
def _from_timestamp_float(timestamp):
"""Convert timestamp from a pure floating point value
inf is decoded as None
"""
if timestamp == float('inf'):
return None
else:
return timestamp
def _to_timestamp_float(timestamp):
"""Convert timestamp to a pure floating point value
None is encoded as inf
"""
if timestamp is None:
return float('inf')
else:
return float(timestamp)
| 2.546875
| 3
|
fasta2OneHotEncoding.py
|
lincshunter/RNACellularLocalization
| 0
|
12785189
|
<reponame>lincshunter/RNACellularLocalization<filename>fasta2OneHotEncoding.py<gh_stars>0
import os
import sys
from numpy import array
from numpy import argmax
from keras.utils import to_categorical
import numpy as np
import string
#import diShuffle
def oneHot2Sequence(oneHot):
#print(oneHot)
seq=np.zeros(shape=(oneHot.shape[0],),dtype=np.str)
for i in range(0,oneHot.shape[0]):
print(np.array_str(oneHot[i,:]))
if(np.array_str(oneHot[i,:]) =='[1 0 0 0]'):
seq[i]='A'
elif(np.array_str(oneHot[i,:]) =='[0 1 0 0]'):
seq[i]='T'
elif(np.array_str(oneHot[i,:]) =='[0 0 1 0]'):
seq[i] ='G'
elif(np.array_str(oneHot[i,:]) =='[0 0 0 1]'):
seq[i] ='C'
else:
seq[i]='N'
s=np.array_str(seq)
s=s.replace('[','')
s=s.replace(']','')
s=s.replace(' ','')
s=s.replace("'","")
return s
def complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
bases = list(seq)
bases = [complement[base] for base in bases]
return ''.join(bases)
def reverse_complement(s):
return complement(s[::-1])
def fasta2OneHot(fastafile,classLabel):
#fastafile=sys.argv[1]
f=open(fastafile)
lines=f.read().splitlines()
print(len(lines))
i=0
#x = np.zeros(shape=(120000,500,4),dtype=np.int)
x= np.zeros(shape=(120000,len(lines),4),dtype=np.int)
y = np.zeros(shape=(120000,),dtype=np.int)
c=0
MAX_LENGTH=0
while i < len(lines):
#print(i)
id = lines[i]
i=i+1
seq = lines[i]
seq= seq.upper()
if(len(seq) > MAX_LENGTH):
MAX_LENGTH=len(seq)
s = seq
#rev_seq = reverse_complement(seq)
seq = seq.replace('A','0')
seq = seq.replace('a','0')
seq = seq.replace('T','1')
seq = seq.replace('t','1')
seq = seq.replace('u','1')
seq = seq.replace('U','1')
seq = seq.replace('G','2')
seq = seq.replace('g','2')
seq = seq.replace('C','3')
seq = seq.replace('c','3')
seq = str(seq)
data = list(seq)
#print(len(data))
if ('-' not in data ):
data = [int(j) for j in data]
data = array(data)
d = to_categorical(array(data),num_classes=4)
if(len(data) >= (len(lines)+1)):#501
mid = round(len(data)/2 + 0.5)
start = mid - round((len(lines)+1)/2)
end = mid + round((len(lines)+1)/2)
d = d[start:end,]
else:
pad = len(lines) - len(seq)
b=np.zeros(shape=(pad,4))
d=np.concatenate((d, b), axis=0)
x[c,:,:]=d
y[c] = classLabel
c=c+1
i=i+1
x=x[0:c,:,:]
y=y[0:c]
#print(x.shape)
#print(y.shape)
return(x,y)
#files
pos_train_fa = sys.argv[1]
neg_train_fa = sys.argv[2]
'''
pos_train_fa = "Nuclear_lncRNAs.fa"
neg_train_fa = "Cytosol_lncRNAs.fa"
'''
(x1,y1) = fasta2OneHot(pos_train_fa,1)
print(x1.shape)
print(y1.shape)
(x2,y2)=fasta2OneHot(neg_train_fa,0)
print(x2.shape)
print(y2.shape)
blank = np.zeros([4269,132,4])
x1 = np.hstack([x1,blank])
x_train = np.vstack((x1,x2))
y_train = np.concatenate((y1,y2))
print(x_train.shape)
print(y_train.shape)
'''
#pos_test_fa = sys.argv[3]
#neg_test_fa = sys.argv[4]
#neg_test_fa = sys.argv[1]
#neg_test_fa = sys.argv[1]
(x1,y1) = fasta2OneHot(pos_test_fa,1)
(x2,y2) = fasta2OneHot(neg_test_fa,0)
x_test = np.hstack((x1,x2))
y_test = np.hstack((y1,y2))
'''
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.3, random_state=13)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
import h5py
hf = h5py.File(sys.argv[3]+".h5", 'w')
hf.create_dataset('x_train', data=x_train)
hf.create_dataset('y_train', data=y_train)
hf.create_dataset('x_test', data=x_test)
hf.create_dataset('y_test', data=y_test)
hf.close()
| 2.359375
| 2
|
kedro/pipeline/modular_pipeline.py
|
alxxjohn/kedro
| 0
|
12785190
|
<gh_stars>0
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to integrate modular pipelines into a master pipeline."""
import copy
from typing import AbstractSet, Dict, List, Set, Union
from kedro.pipeline.node import Node
from kedro.pipeline.pipeline import (
TRANSCODING_SEPARATOR,
Pipeline,
_strip_transcoding,
_transcode_split,
)
class ModularPipelineError(Exception):
"""Raised when a modular pipeline is not adapted and integrated
appropriately using the helper.
"""
pass
def _is_all_parameters(name: str) -> bool:
return name == "parameters"
def _is_single_parameter(name: str) -> bool:
return name.startswith("params:")
def _is_parameter(name: str) -> bool:
return _is_single_parameter(name) or _is_all_parameters(name)
def _validate_inputs_outputs(
inputs: AbstractSet[str], outputs: AbstractSet[str], pipe: Pipeline
) -> None:
"""Safeguards to ensure that:
- parameters are not specified under inputs
- inputs are only free inputs
- outputs do not contain free inputs
"""
inputs = {_strip_transcoding(k) for k in inputs}
outputs = {_strip_transcoding(k) for k in outputs}
if any(_is_parameter(i) for i in inputs):
raise ModularPipelineError(
"Parameters should be specified in the `parameters` argument"
)
free_inputs = {_strip_transcoding(i) for i in pipe.inputs()}
if not inputs <= free_inputs:
raise ModularPipelineError("Inputs should be free inputs to the pipeline")
if outputs & free_inputs:
raise ModularPipelineError("Outputs can't contain free inputs to the pipeline")
def _validate_datasets_exist(
inputs: AbstractSet[str],
outputs: AbstractSet[str],
parameters: AbstractSet[str],
pipe: Pipeline,
) -> None:
inputs = {_strip_transcoding(k) for k in inputs}
outputs = {_strip_transcoding(k) for k in outputs}
existing = {_strip_transcoding(ds) for ds in pipe.data_sets()}
non_existent = (inputs | outputs | parameters) - existing
if non_existent:
raise ModularPipelineError(
"Failed to map datasets and/or parameters: {}".format(
", ".join(sorted(non_existent))
)
)
def _get_dataset_names_mapping(
names: Union[str, Set[str], Dict[str, str]] = None
) -> Dict[str, str]:
"""Take a name or a collection of dataset names
and turn it into a mapping from the old dataset names to the provided ones if necessary.
Args:
names: A dataset name or collection of dataset names.
When str or Set[str] is provided, the listed names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current names will be
mapped to new names in the resultant pipeline.
Returns:
A dictionary that maps the old dataset names to the provided ones.
Examples:
>>> _get_dataset_names_mapping("dataset_name")
{"dataset_name": "dataset_name"} # a str name will stay the same
>>> _get_dataset_names_mapping(set(["ds_1", "ds_2"]))
{"ds_1": "ds_1", "ds_2": "ds_2"} # a Set[str] of names will stay the same
>>> _get_dataset_names_mapping({"ds_1": "new_ds_1_name"})
{"ds_1": "new_ds_1_name"} # a Dict[str, str] of names will map key to value
"""
if names is None:
return {}
if isinstance(names, str):
return {names: names}
if isinstance(names, dict):
return copy.deepcopy(names)
return {item: item for item in names}
def _normalize_param_name(name: str) -> str:
"""Make sure that a param name has a `params:` prefix before passing to the node"""
return name if name.startswith("params:") else f"params:{name}"
def _get_param_names_mapping(
names: Union[str, Set[str], Dict[str, str]] = None
) -> Dict[str, str]:
"""Take a parameter or a collection of parameter names
and turn it into a mapping from existing parameter names to new ones if necessary.
It follows the same rule as `_get_dataset_names_mapping` and
prefixes the keys on the resultant dictionary with `params:` to comply with node's syntax.
Args:
names: A parameter name or collection of parameter names.
When str or Set[str] is provided, the listed names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current names will be
mapped to new names in the resultant pipeline.
Returns:
A dictionary that maps the old parameter names to the provided ones.
Examples:
>>> _get_param_names_mapping("param_name")
{"params:param_name": "params:param_name"} # a str name will stay the same
>>> _get_param_names_mapping(set(["param_1", "param_2"]))
# a Set[str] of names will stay the same
{"params:param_1": "params:param_1", "params:param_2": "params:param_2"}
>>> _get_param_names_mapping({"param_1": "new_name_for_param_1"})
# a Dict[str, str] of names will map key to valu
{"params:param_1": "params:new_name_for_param_1"}
"""
params = {}
for name, new_name in _get_dataset_names_mapping(names).items():
if _is_all_parameters(name):
params[name] = name # don't map parameters into params:parameters
else:
param_name = _normalize_param_name(name)
param_new_name = _normalize_param_name(new_name)
params[param_name] = param_new_name
return params
def pipeline(
pipe: Pipeline,
*,
inputs: Union[str, Set[str], Dict[str, str]] = None,
outputs: Union[str, Set[str], Dict[str, str]] = None,
parameters: Union[str, Set[str], Dict[str, str]] = None,
namespace: str = None,
) -> Pipeline:
"""Create a copy of the pipeline and its nodes,
with some dataset names, parameter names and node names modified.
Args:
pipe: Original modular pipeline to integrate
inputs: A name or collection of input names to be exposed as connection points
to other pipelines upstream.
When str or Set[str] is provided, the listed input names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current input names will be
mapped to new names.
Must only refer to the pipeline's free inputs.
outputs: A name or collection of names to be exposed as connection points
to other pipelines downstream.
When str or Set[str] is provided, the listed output names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current output names will be
mapped to new names.
Can refer to both the pipeline's free outputs, as well as
intermediate results that need to be exposed.
parameters: A name or collection of parameters to namespace.
When str or Set[str] are provided, the listed parameter names will stay
the same as they are named in the provided pipeline.
When Dict[str, str] is provided, current parameter names will be
mapped to new names.
The parameters can be specified without the `params:` prefix.
namespace: A prefix to give to all dataset names,
except those explicitly named with the `inputs`/`outputs`
arguments, and parameter references (`params:` and `parameters`).
Raises:
ModularPipelineError: When inputs, outputs or parameters are incorrectly
specified, or they do not exist on the original pipeline.
ValueError: When underlying pipeline nodes inputs/outputs are not
any of the expected types (str, dict, list, or None).
Returns:
A new ``Pipeline`` object with the new nodes, modified as requested.
"""
# pylint: disable=protected-access
inputs = _get_dataset_names_mapping(inputs)
outputs = _get_dataset_names_mapping(outputs)
parameters = _get_param_names_mapping(parameters)
_validate_datasets_exist(inputs.keys(), outputs.keys(), parameters.keys(), pipe)
_validate_inputs_outputs(inputs.keys(), outputs.keys(), pipe)
mapping = {**inputs, **outputs, **parameters}
def _prefix_dataset(name: str) -> str:
return f"{namespace}.{name}"
def _prefix_param(name: str) -> str:
_, param_name = name.split("params:")
return f"params:{namespace}.{param_name}"
def _is_transcode_base_in_mapping(name: str) -> bool:
base_name, _ = _transcode_split(name)
return base_name in mapping
def _map_transcode_base(name: str):
base_name, transcode_suffix = _transcode_split(name)
return TRANSCODING_SEPARATOR.join((mapping[base_name], transcode_suffix))
def _rename(name: str):
rules = [
# if name mapped to new name, update with new name
(lambda n: n in mapping, lambda n: mapping[n]),
# if name refers to the set of all "parameters", leave as is
(_is_all_parameters, lambda n: n),
# if transcode base is mapped to a new name, update with new base
(_is_transcode_base_in_mapping, _map_transcode_base),
# if name refers to a single parameter and a namespace is given, apply prefix
(lambda n: bool(namespace) and _is_single_parameter(n), _prefix_param),
# if namespace given for a dataset, prefix name using that namespace
(lambda n: bool(namespace), _prefix_dataset),
]
for predicate, processor in rules:
if predicate(name):
return processor(name)
# leave name as is
return name
def _process_dataset_names(
datasets: Union[None, str, List[str], Dict[str, str]]
) -> Union[None, str, List[str], Dict[str, str]]:
if datasets is None:
return None
if isinstance(datasets, str):
return _rename(datasets)
if isinstance(datasets, list):
return [_rename(name) for name in datasets]
if isinstance(datasets, dict):
return {key: _rename(value) for key, value in datasets.items()}
raise ValueError( # pragma: no cover
f"Unexpected input {datasets} of type {type(datasets)}"
)
def _copy_node(node: Node) -> Node:
new_namespace = node.namespace
if namespace:
new_namespace = (
f"{namespace}.{node.namespace}" if node.namespace else namespace
)
return node._copy(
inputs=_process_dataset_names(node._inputs),
outputs=_process_dataset_names(node._outputs),
namespace=new_namespace,
)
new_nodes = [_copy_node(n) for n in pipe.nodes]
return Pipeline(new_nodes)
| 1.570313
| 2
|
src/evaluation/evaluate_character_predictor.py
|
Zhenye-Na/crnn-pytorch
| 39
|
12785191
|
"""Run validation test for CharacterPredictor."""
import os
from pathlib import Path
from time import time
import unittest
from text_recognizer.datasets import EmnistDataset
from text_recognizer.character_predictor import CharacterPredictor
os.environ["CUDA_VISIBLE_DEVICES"] = ""
SUPPORT_DIRNAME = Path(__file__).parents[0].resolve() / 'support' / 'emnist'
class TestEvaluateCharacterPredictor(unittest.TestCase):
def test_evaluate(self):
predictor = CharacterPredictor()
dataset = EmnistDataset()
dataset.load_or_generate_data()
t = time()
metric = predictor.evaluate(dataset)
time_taken = time() - t
print(f'acc: {metric}, time_taken: {time_taken}')
self.assertGreater(metric, 0.6)
self.assertLess(time_taken, 10)
| 2.6875
| 3
|
tools/third_party/pywebsocket3/example/cgi-bin/hi.py
|
meyerweb/wpt
| 2,479
|
12785192
|
<gh_stars>1000+
#!/usr/bin/env python
print('Content-Type: text/plain')
print('')
print('Hi from hi.py')
| 1.46875
| 1
|
neatsociety/math_util.py
|
machinebrains/neat-society
| 2
|
12785193
|
<reponame>machinebrains/neat-society<filename>neatsociety/math_util.py
'''Commonly used functions not available in the Python2 standard library.'''
from math import sqrt
def mean(values):
return sum(map(float, values)) / len(values)
def variance(values):
m = mean(values)
return sum((v - m) ** 2 for v in values) / len(values)
def stdev(values):
return sqrt(variance(values))
| 3.03125
| 3
|
server/services/slack_service.py
|
nithinkashyapn/ooo-slack-bot
| 0
|
12785194
|
from slack import WebClient
import server.config as config
slack_service = WebClient(token=config.SLACK_TOKEN)
| 1.539063
| 2
|
mergeMetadata/modules/utils/exceptions.py
|
isabella232/ALM-SF-DX-Python-Tools
| 5
|
12785195
|
<filename>mergeMetadata/modules/utils/exceptions.py<gh_stars>1-10
class NotCreatedDescribeLog( Exception ):
'''Exception launched when describe.log didn´t exist on the specific folder'''
ERROR_CODE = 117
def __init__( self, pathDescribe ):
super().__init__( f'Describe log didnt exist, please place it on {pathDescribe}' )
class NoFullNameError( Exception ):
def __init__( self, tagName ):
super().__init__( f'No tag found for {tagName}' )
| 2.15625
| 2
|
notebooks/tracking/test_relational_unkown.py
|
jeffhernandez1995/jeffhernandez1995.github.io
| 0
|
12785196
|
<filename>notebooks/tracking/test_relational_unkown.py
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from lapsolver import solve_dense
from losses import AssociationLoss
from models import InteractionNet
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
digits = np.array([0, 2, 4, 6, 8])
X_train = np.load('datasets/X_train.npy')
y_train = np.load('datasets/y_train.npy')
X_test = np.load('datasets/X_test.npy')
y_test = np.load('datasets/y_test.npy')
X_train = torch.from_numpy(X_train).float()
y_train = torch.from_numpy(y_train).float()
X_test = torch.from_numpy(X_test).float()
y_test = torch.from_numpy(y_test).float()
train_dataset = TensorDataset(X_train, y_train)
test_dataset = TensorDataset(X_test, y_test)
cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if cuda else "cpu")
batch_size = 128
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
# Self connected graph
off_diag = np.triu(np.ones((digits.shape[0], digits.shape[0])))
rel_rec = np.array(encode_onehot(np.where(off_diag)[0]), dtype=np.float32)
rel_send = np.array(encode_onehot(np.where(off_diag)[1]), dtype=np.float32)
n_iter = rel_rec.shape[0]
rel_rec = torch.FloatTensor(rel_rec)
rel_send = torch.FloatTensor(rel_send)
if device:
rel_rec = rel_rec.to(device)
rel_send = rel_send.to(device)
model = InteractionNet(28*28, 256*2, digits.shape[0], n_iter, 0.25)
if device:
model.to(device)
model.load_state_dict(torch.load('models/InteractionNet__11_loss=0.2297398.pth'))
weights = [1/3]*3
model.eval()
criterion = AssociationLoss(weights)
softmax = nn.Softmax(dim=1)
n_digits = 3
with torch.no_grad():
for i, (inp, target) in enumerate(test_loader):
if device:
inp = inp.to(device)
target = target.to(device)
# inp[0, 4, 0, :] = torch.zeros(256, device=device) # (5, 256)
y_pred = model(inp, rel_rec, rel_send)
loss = criterion(y_pred, target)
y_pred_ = softmax(y_pred[0])
matrix = y_pred_.to('cpu').detach().numpy()
y_pred_ = (y_pred_ * 10**n_digits).round() / (10**n_digits)
rids, cids = solve_dense(-matrix)
matched_indices = np.array([rids, cids]).T
fig, ax = plt.subplots(nrows=5, ncols=2)
for i, row in enumerate(ax):
for j, col in enumerate(row):
r = matched_indices[i, j]
img = inp[0, r, j, :, :].to('cpu').detach().numpy()
col.imshow(img, cmap='gray', aspect='auto')
col.set_axis_off()
col.set_xticks([])
col.set_yticks([])
col.get_xaxis().set_ticklabels([])
col.get_yaxis().set_ticklabels([])
col.get_xaxis().set_visible(False)
col.get_yaxis().set_visible(False)
plt.show()
plt.close()
rids, cids = solve_dense(-target[0].to('cpu').detach().numpy())
matched_indices = np.array([rids, cids]).T
fig, ax = plt.subplots(nrows=5, ncols=2)
for i, row in enumerate(ax):
for j, col in enumerate(row):
r = matched_indices[i, j]
img = inp[0, r, j, :, :].to('cpu').detach().numpy()
col.imshow(img, cmap='gray', aspect='auto')
col.set_axis_off()
col.set_xticks([])
col.set_yticks([])
col.get_xaxis().set_ticklabels([])
col.get_yaxis().set_ticklabels([])
col.get_xaxis().set_visible(False)
col.get_yaxis().set_visible(False)
plt.show()
plt.close()
assert 2 == 1
| 2.328125
| 2
|
rkqc/tools/viewer.py
|
ah744/ScaffCC_RKQC
| 1
|
12785197
|
#!/home/adam/Documents/revkit-1.3/python
#!/usr/bin/python
# RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, sys
sys.path.append(os.path.dirname(sys.path[0]))
from revkit import *
from revkitui import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class InfoDialog( QDialog ):
def __init__( self, circ, parent ):
QDialog.__init__( self, parent, Qt.Dialog )
self.setWindowTitle( 'Circuit details' )
self.resize( 20, 20 )
self.setLayout( QVBoxLayout() )
self.layout().setMargin( 0 )
widget = QWidget()
layout = QFormLayout()
widget.setLayout( layout )
layout.addRow( QLabel( 'Gate count:', widget ), QLabel( str( costs( circ, gate_costs() ) ), widget ) )
layout.addRow( QLabel( 'Line count:', widget ), QLabel( str( costs( circ, line_costs() ) ), widget ) )
layout.addRow( QLabel( 'Quantum cost:', widget ), QLabel( str( costs( circ, quantum_costs() ) ), widget ) )
layout.addRow( QLabel( 'Transistor cost:', widget ), QLabel( str( costs( circ, transistor_costs() ) ), widget ) )
widget2 = QWidget()
widget2.setLayout( QHBoxLayout() )
widget2.layout().addStretch()
button = QPushButton( 'Close' )
button.setAutoDefault( True )
widget2.layout().addWidget( button )
self.connect( button, SIGNAL( 'clicked()' ), self.close )
self.layout().addWidget( widget )
self.layout().addWidget( widget2 )
class AboutDialog( QDialog ):
def __init__( self, parent ):
QDialog.__init__( self, parent, Qt.Dialog )
self.setWindowTitle( 'About RevKit Viewer' )
self.resize( 20, 20 )
self.setLayout( QVBoxLayout() )
self.layout().setMargin( 0 )
widget2 = QWidget()
widget2.setLayout( QHBoxLayout() )
widget2.layout().addStretch()
button = QPushButton( 'Close' )
button.setAutoDefault( True )
widget2.layout().addWidget( button )
self.connect( button, SIGNAL( 'clicked()' ), self.close )
self.layout().addWidget( QLabel( '(c) 2009-2011 by the RevKit Developers' ) )
self.layout().addWidget( widget2 )
class SmallerTreeView( QTreeView ):
def __init__( self, parent = None ):
QTreeView.__init__( self, parent )
def sizeHint( self ):
return QSize( 200, 200 )
class Viewer( QMainWindow ):
def __init__( self ):
QMainWindow.__init__( self )
self.setWindowTitle( "RevKit Viewer" )
self.setupDockWidgets()
self.setupActions()
self.setupMenuBar()
self.setupToolBar()
# CircuitView
self.setCentralWidget( QStackedWidget( self ) )
self.connect( self.centralWidget(), SIGNAL( 'currentChanged(int)' ), self.updateStatusBar )
self.setupStatusBar()
self.resize( 600, 300 )
def setupDockWidgets( self ):
self.hierarchyDock = QDockWidget( "Hierarchy", self )
self.hierarchyView = SmallerTreeView( self )
self.hierarchyView.setExpandsOnDoubleClick( False )
self.hierarchyDock.setWidget( self.hierarchyView )
self.hierarchyDock.setVisible( False )
self.hierarchyDock.setFeatures( QDockWidget.DockWidgetClosable )
self.addDockWidget( Qt.LeftDockWidgetArea, self.hierarchyDock )
# Actions
self.connect( self.hierarchyView, SIGNAL( 'doubleClicked(QModelIndex)' ), self.loadCircuitFromHierarchy )
def setupActions( self ):
path = os.path.realpath( os.path.abspath( __file__ ) )
path = path.replace( os.path.basename( __file__ ), 'icons/' )
self.openAction = QAction( QIcon( path + 'document-open.png' ), '&Open...', self )
self.openAction.setStatusTip( 'Open a circuit realization in RevLib format' )
self.imageAction = QAction( QIcon( path + 'image-x-generic.png' ), 'Save as &Image...', self )
self.imageAction.setStatusTip( 'Saves the circuit as an image file (PNG or JPG)' )
self.latexAction = QAction( QIcon( path + 'text-x-tex.png' ), 'Save as &LaTeX...', self )
self.latexAction.setStatusTip( 'Saves the LaTeX code to generate this circuit' )
self.exitAction = QAction( QIcon( path + 'application-exit.png' ), '&Quit', self )
self.exitAction.setStatusTip( 'Quits the program' )
self.infoAction = QAction( QIcon( path + 'dialog-information.png' ), '&Circuit details', self )
self.infoAction.setStatusTip( 'Opens a dialog with circuit information' )
self.specAction = QAction( QIcon( path + 'view-form-table.png' ), '&View truth table', self )
self.specAction.setStatusTip( 'Displays the full truth table of the circuit, obtained by simulation' )
self.partialAction = QAction( QIcon( path + 'view-form-table.png' ), '&View partial truth table', self )
self.partialAction.setStatusTip( 'Displays a truth table only for non-constant and non-garbage signals' )
self.aboutAction = QAction( QIcon( path + 'help-about.png' ), '&About', self )
self.aboutAction.setStatusTip( 'Displays information about the RevKit Viewer' )
# Dock Widgets
self.hierarchyDock.toggleViewAction().setIcon( QIcon( path + 'view-sidetree.png' ) )
self.connect( self.openAction, SIGNAL( 'triggered()' ), self.open )
self.connect( self.imageAction, SIGNAL( 'triggered()' ), self.saveImage )
self.connect( self.latexAction, SIGNAL( 'triggered()' ), self.saveLatex )
self.connect( self.exitAction, SIGNAL( 'triggered()' ), SLOT( 'close()' ) )
self.connect( self.infoAction, SIGNAL( 'triggered()' ), self.info )
self.connect( self.specAction, SIGNAL( 'triggered()' ), self.truthTable )
self.connect( self.partialAction, SIGNAL( 'triggered()' ), self.partialTable )
self.connect( self.aboutAction, SIGNAL( 'triggered()' ), self.about )
def setupMenuBar( self ):
menubar = self.menuBar()
file = menubar.addMenu( '&File' )
file.addAction( self.openAction )
file.addAction( self.imageAction )
file.addAction( self.latexAction )
file.addSeparator()
file.addAction( self.exitAction )
view = menubar.addMenu( '&View' )
view.addAction( self.infoAction )
view.addSeparator()
view.addAction( self.specAction )
view.addAction( self.partialAction )
help = menubar.addMenu( '&Help' )
help.addAction( self.aboutAction )
def setupToolBar( self ):
toolbar = self.addToolBar( 'Main' )
toolbar.setIconSize( QSize( 32, 32 ) )
toolbar.addAction( self.openAction )
toolbar.addAction( self.imageAction )
toolbar.addSeparator()
toolbar.addAction( self.infoAction )
toolbar.addAction( self.partialAction )
toolbarDock = QToolBar( self )
self.addToolBar( Qt.LeftToolBarArea, toolbarDock )
toolbarDock.setOrientation( Qt.Vertical )
toolbarDock.setMovable( False )
toolbarDock.addAction( self.hierarchyDock.toggleViewAction() )
def setupStatusBar( self ):
self.statusBar()
self.updateStatusBar()
zoom_widget = None # Pointer to the current zoom widget
def updateStatusBar( self ):
if self.zoom_widget is not None:
self.statusBar().removeWidget( self.zoom_widget )
self.zoom_widget = None
if self.centralWidget().currentWidget():
self.zoom_widget = self.centralWidget().currentWidget().zoomWidget()
self.statusBar().addPermanentWidget( self.zoom_widget )
self.zoom_widget.show()
def open( self ):
filename = str( QFileDialog.getOpenFileName( self, 'Open Realization', '', 'RevLib Realization (*.real)' ) )
if len( filename ):
self.openCircuitFromFilename( filename )
def openCircuitFromFilename( self, filename, load_hierarchy = True ):
circ = circuit()
read_realization( circ, filename )
self.openCircuit( circ )
def openCircuit( self, circ ):
# remove all views TODO make more efficient (also memory)
while self.centralWidget().count():
self.centralWidget().removeWidget( self.centralWidget().widget( 0 ) )
# Save this, since all the other circuits are references
self.circ = circ
# hierarchy
tree = hierarchy_tree()
circuit_hierarchy( circ, tree )
self.hierarchyView.setModel( HierarchyModel( tree ) )
self.hierarchyView.setColumnWidth( 0, 150 )
self.hierarchyView.resizeColumnToContents( 1 )
self.hierarchyCurrentIndex = self.hierarchyView.model().index( 0, 0 )
self.circuits = [ tree.node_circuit( i ) for i in range( tree.size() ) ]
for c in self.circuits:
view = CircuitView( c, self )
view.gateDoubleClicked.connect( self.slotGateDoubleClicked )
self.centralWidget().addWidget( view )
def saveImage( self ):
filename = QFileDialog.getSaveFileName( self, 'Save as Image', '', 'PNG image (*.png);;JPG image (*.jpg)' )
if not filename.isEmpty():
scene = self.centralWidget().currentWidget().scene()
pixmap = QPixmap( scene.width(), scene.height() )
painter = QPainter( pixmap )
scene.render( painter )
pixmap.save( filename )
painter.end()
def saveLatex( self ):
filename = QFileDialog.getSaveFileName( self, 'Save as LaTeX', '', 'LaTeX file (*.tex)' )
if not filename.isEmpty():
with open( str( filename ), 'w' ) as f:
f.write( create_image( self.circ ) )
def info( self ):
dialog = InfoDialog( self.circ, self )
dialog.exec_()
def truthTable( self ):
dialog = QDialog( self, Qt.Dialog )
dialog.setWindowTitle( 'Truth Table' )
spec = binary_truth_table()
flattened = circuit()
flatten_circuit( self.circ, flattened )
circuit_to_truth_table( flattened, spec )
n = self.circ.lines
table = QTableWidget( 2 ** n, 2 * n, dialog )
table.setHorizontalHeaderLabels( self.circ.inputs + self.circ.outputs )
table.setVerticalHeaderLabels( map( str, range( 0, 2 ** n ) ) )
table.setAlternatingRowColors( True )
table.setShowGrid( False )
row = 0
for entry in spec.entries:
valid = True
for c in range( 0, n ):
if not self.circ.constants[c] is None and entry[0][c] != self.circ.constants[c]:
valid = False
break
for col in range( 0, 2 * n ):
item = QTableWidgetItem( '1' if ( entry[0] + entry[1] )[col] else '0' )
flags = Qt.ItemIsSelectable
if valid and not ( col >= n and self.circ.garbage[col % n] ) and not ( col < n and not self.circ.constants[col] is None ):
flags |= Qt.ItemIsEnabled
item.setFlags( flags )
if col >= n and not self.circ.garbage[col % n]:
item.setBackground( Qt.lightGray )
table.setItem( row, col, item )
row += 1
table.resizeColumnsToContents()
dialog.setLayout( QVBoxLayout() )
dialog.layout().setMargin( 0 )
dialog.layout().addWidget( table )
dialog.exec_()
def partialTable( self ):
dialog = QDialog( self, Qt.Dialog )
dialog.setWindowTitle( 'Partial Truth Table' )
spec = binary_truth_table()
settings = properties()
settings.set_bool( "partial", True )
flattened = circuit()
flatten_circuit( self.circ, flattened )
circuit_to_truth_table( flattened, spec, py_partial_simulation_func( settings ) )
n = len( filter( lambda x: x is None, self.circ.constants ) )
m = len( filter( lambda x: not x, self.circ.garbage ) )
table = QTableWidget( 2 ** n, n + m, dialog )
input_labels = map( lambda x: x[0], filter( lambda x: x[1] is None, map( lambda x, y: [x,y], self.circ.inputs, self.circ.constants ) ) )
output_labels = map( lambda x: x[0], filter( lambda x: not x[1], map( lambda x, y: [x,y], self.circ.outputs, self.circ.garbage ) ) )
table.setHorizontalHeaderLabels( input_labels + output_labels )
table.setVerticalHeaderLabels( map( lambda x: "", range( 0, 2 ** n ) ) )
table.setAlternatingRowColors( True )
table.setShowGrid( False )
row = 0
for entry in spec.entries:
for col in range( 0, n + m ):
item = QTableWidgetItem( '1' if ( entry[0] + entry[1] )[col] else '0' )
item.setFlags( Qt.ItemIsSelectable | Qt.ItemIsEnabled )
if col >= n:
item.setBackground( Qt.lightGray )
table.setItem( row, col, item )
row += 1
table.resizeColumnsToContents()
dialog.setLayout( QVBoxLayout() )
dialog.layout().setMargin( 0 )
dialog.layout().addWidget( table )
dialog.exec_()
def about( self ):
dialog = AboutDialog( self )
dialog.exec_()
def loadCircuitFromHierarchy( self, index ):
self.hierarchyCurrentIndex = index
self.centralWidget().setCurrentIndex( index.internalId() )
def slotGateDoubleClicked( self, gate ):
if gate.type == gate_type.module:
rows = self.hierarchyView.model().rowCount( self.hierarchyCurrentIndex )
for r in range( rows ):
if str( self.hierarchyCurrentIndex.child( r, 0 ).data().toString() ) == gate.module_name:
self.centralWidget().setCurrentIndex( self.hierarchyCurrentIndex.child( r, 0 ).internalId() )
self.hierarchyCurrentIndex = self.hierarchyCurrentIndex.child( r, 0 )
return
if __name__ == '__main__':
a = QApplication([])
w = Viewer()
w.show()
if len( sys.argv ) == 2:
w.openCircuitFromFilename( sys.argv[1] )
sys.exit( a.exec_() )
| 2.59375
| 3
|
src/jobs/admin.py
|
HitLuca/predict-python
| 12
|
12785198
|
from django.contrib import admin
from src.jobs.models import Job
admin.site.register(Job)
| 1.257813
| 1
|
mmf/datasets/builders/csi/dataset.py
|
alisonreboud/mmf
| 0
|
12785199
|
<filename>mmf/datasets/builders/csi/dataset.py
import copy
import json
import os
import numpy as np
import omegaconf
import torch
from mmf.common.sample import Sample
from mmf.datasets.mmf_dataset import MMFDataset
from mmf.utils.general import get_mmf_root
from mmf.utils.visualize import visualize_images
from PIL import Image
from torchvision import transforms
class CSIFeaturesDataset(MMFDataset):
def __init__(self, config, *args, dataset_name="csi", **kwargs):
super().__init__(dataset_name, config, *args, **kwargs)
assert (
self._use_features
), "config's 'use_images' must be true to use image dataset"
#self.is_multilabel = self.config.get("is_multilabel", False)
def preprocess_sample_info(self, sample_info):
video_id = sample_info["video_id"]
# Add feature_path key for feature_database access
sample_info["feature_path"] = f"{video_id}.npy"
return sample_info
def __getitem__(self, idx):
sample_info = self.annotation_db[idx]
sample_info = self.preprocess_sample_info(sample_info)
current_sample = Sample()
processed_text = self.text_processor({"text": sample_info["caption"]})
current_sample.text = processed_text["text"]
if "input_ids" in processed_text:
current_sample.update(processed_text)
# Instead of using idx directly here, use sample_info to fetch
# the features as feature_path has been dynamically added
features = self.features_db.get(sample_info)
#print(features)
if hasattr(self, "transformer_bbox_processor"):
features["image_info_0"] = self.transformer_bbox_processor(
features["image_info_0"]
)
current_sample.update(features)
current_sample.targets = torch.tensor(
sample_info["label"], dtype=torch.long)
return current_sample
| 2.1875
| 2
|
tree.py
|
StanfordAI4HI/Automatic_Curriculum_ZPDES_Memory
| 0
|
12785200
|
import numpy as np
import json
from graphviz import Digraph
import pickle
import compare_functions
def remove_item(item_list, item):
if item in item_list:
item_list.remove(item)
return list(item_list)
def create_ngrams(trace, n):
#A function that returns a list of n-grams of a trace
return [trace[i:i+n] for i in range(len(trace)-n+1)]
class Build_Tree_Data(object):
def __init__(self, all_concepts, concept_problems, all_basic_components, problem_components, n = 1, data = {'traces': {},'traces_ngrams': {}}):
self.all_concepts = all_concepts
self.concept_problems = concept_problems
self.all_basic_components = all_basic_components
self.problem_components = problem_components
self.data = data
self.data['traces']['Root'] = ''
self.data['traces_ngrams']['Root'] = []
self.n = n
class Base_Tree(object):
def return_parents(self):
# a method to return a dictionary where the keys are node names and the values are a list of the names of the immediate parents of the node
pass
def return_all_ancestors(self):
#a method to return a dictionary where the keys are node names and the values are a list of the names of all the ancestors of the node
pass
def return_children(self):
#a method to return a dictionary where the keys are node names and the values are a list of the names of the immediate childre of the node
pass
def return_all_descendants(self):
#a method to return a dictionary where the keys are node names and the values are a list of all the descendants parents of the node
pass
def return_all_concepts(self):
#a method to return a list of all the possible concepts
pass
def return_all_basic_components(self):
#a method to return a list of all possible components
pass
def return_concept_problems(self):
#a method to return a dictionary where the keys are concept names and the values are a list of all problems corresponding to that concept
pass
def return_problem_components(self):
#a method to return a dictionary where the keys are problems and the values are a list of all components corresponding to that problem
pass
def save_tree(self):
#a method for saving the tree to file
pass
class Static_Tree(Base_Tree): #can either load in using a json string representation or rebuild from a dictionary of children
def __init__(self, tree_filename = None, children = None, all_concepts = None, concept_problems = None,
all_basic_components = None, problem_components = None):
if tree_filename is not None:
with open (tree_filename, "r") as text_file:
tree_json_str = text_file.readlines()[0]
self.children, self.all_descendants, self.parents, self.all_ancestors, \
self.all_concepts, self.concept_problems, self.all_basic_components, self.problem_components = json.loads(tree_json_str)
else:
self.children = children #dict - keys: concept, values: list of immediate children of the concept
self.all_concepts = all_concepts #list: list of all concepts, each item in the list must be hashable
self.concept_problems = concept_problems #dict keys: concept, values: list of problems corresponding to the concept
self.all_basic_components = all_basic_components #list: All basic components that make up each problem (if no shared components between problems, they can be the same as the list of all problems)
self.problem_components = problem_components = problem_components #dict: keys: problem, values: list of basic the problem consists of
self.all_descendants = {}
self.parents = {}
self.all_ancestors = {}
self._calculate_all_descendants('Root')
self._calculate_parents()
for concept in self.all_concepts:
if len(self.children[concept]) == 0:
self._calculate_all_ancestors(concept)
def _calculate_all_descendants(self, concept):
if concept not in self.all_descendants:
all_descendants = set()
for child_concept in self.children[concept]:
all_descendants.update(self._calculate_all_descendants(child_concept))
all_descendants.add(child_concept)
self.all_descendants[concept] = list(all_descendants)
return self.all_descendants[concept]
def _calculate_parents(self):
for concept in self.all_concepts:
self.parents[concept] = []
for concept in self.all_concepts:
for child_concept in self.children[concept]:
if concept not in self.parents[child_concept]:
self.parents[child_concept].append(concept)
def _calculate_all_ancestors(self, concept):
if concept not in self.all_ancestors:
all_ancestors = set()
for parent_concept in self.parents[concept]:
all_ancestors.update(self._calculate_all_ancestors(parent_concept))
all_ancestors.add(parent_concept)
self.all_ancestors[concept] = list(all_ancestors)
return self.all_ancestors[concept]
def string_tree(self):
return json.dumps((
self.children,
self.all_descendants,
self.parents,
self.all_ancestors,
self.all_concepts,
self.concept_problems,
self.all_basic_components,
self.problem_components
))
def save_tree(self, tree_filename):
with open(tree_filename, "w") as text_file:
text_file.write(self.string_tree())
def return_parents(self): #return the parents dict (a dictionary where the keys are node names and the values are a list of the names of the immediate parents of the node)
return self.parents
def return_all_ancestors(self): #return the all_ancestors dict (a dictionary where the keys are node names and the values are a list of the names of all the ancestors of the node)
return self.all_ancestors
def return_children(self): #return the children_names dict (a dictionary where the keys are node names and the values are a list of the names of the immediate childre of the node)
return self.children
def return_all_descendants(self): #return the all_descendants_names dict (a dictionary where the keys are node names and the values are a list of all the descendants parents of the node)
return self.all_descendants
def return_all_concepts(self):
return self.all_concepts
def return_all_basic_components(self):
#a method to return a list of all possible components
return self.all_basic_components
def return_concept_problems(self):
#a method to return a dictionary where the keys are concept names and the values are a list of all problems corresponding to that concept
return self.concept_problems
def return_problem_components(self):
#a method to return a dictionary where the keys are problems and the values are a list of all components corresponding to that problem
return self.problem_components
def add_edges_to_progression(self, progression_graph):
#Add directed edges between parents and children to a graphviz graph for visualization purporses
for node_name, node_children in self.children.items():
for child_name in node_children:
progression_graph.edge(node_name, child_name, contraint = 'true')
#Tree object for sorting concepts that adds items recursively
class Build_Tree(Base_Tree):
#a tree node, each node is the head of a subtree of its descendants
def __init__(self, tree_filename = None, name = None, data = None, comp_func = None, children = None, children_names = None, all_descendants_names = None, parent=None, verbose = False):
if tree_filename is not None:
alternate_tree = pickle.load(open(tree_filename, "rb" ))
self.name = alternate_tree.name
self.data = alternate_tree.data
self.parent = alternate_tree.parent
self.children = alternate_tree.parent
self.children_names = alternate_tree.children_names
self.all_descendants_names = alternate_tree.all_descendants_names
self.parents = alternate_tree.parents
self.all_ancestors = alternate_tree.all_ancestors
self.comp_func = alternate_tree.comp_func
self.verbose = alternate_tree.verbose
del alternate_tree
else:
self.name = name #String - name of the node
self.data = data #Build_Tree_Data object
self.parent = parent #Tree object - immediate parent node object
self.children = children #Dictionary - keys are the node names and the values are an array of node objects that are the immediate children of the key node
self.children_names = children_names #Dictionary - keys are the node names and values are an array of names of the immediate children of the key node
self.all_descendants_names = all_descendants_names #Dictionary - keys are the node names and values are an array of names of all the descendants of the key node
self.parents = None #Dictionary - the keys are the node names and values are an array of names of the immediate parents of the key node
self.all_ancestors = None #Dictionary - keys are the node names and values are an array of names of all the ancestors of the key node
self.comp_func = comp_func #Function - function for comparing the data of two concepts and determine which one is harder
#comp_func(A, B) Returns:
#1 if B is harder than A
#0 if neither is harder than the other
#-1 if A is harder than B
self.verbose = verbose #Boolean: Whether or not to print
if children == None:
self.children = {}
self.children_names = {}
self.all_descendants_names = {}
self.children['Root'] = []
self.children_names['Root'] = []
self.all_descendants_names['Root'] = set()
for concept_name in data.all_concepts:
self.children[concept_name] = []
self.children_names[concept_name] = []
self.all_descendants_names[concept_name] = set()
def _add_child(self, node):
#inserting a child into the subtree
if self.verbose:
print("entering add child")
if not(node.name in self.all_descendants_names[self.name]) and node.name != self.name: #check it is not already a descendant of the subtree it is being inserted into
if self.verbose:
print('add child - self_name: ' + self.name + ' child_name: '+ node.name)
self.children[self.name].append(node)
self.children_names[self.name].append(node.name)
self.all_descendants_names[self.name].add(node.name)
def _remove_child(self, node):
#remove a child from the subtree
if self.verbose:
print('remove child - child_name: ' + node.name + ' self_name: ' + self.name)
for index, child in enumerate(self.children[self.name]):
if child.name == node.name:
del self.children[self.name][index]
del self.children_names[self.name][index]
break
def _check_tree(self, node):
#check your sibling's children to see if they are also your children, if they are then add them to the list of your children too
for child in self.children[self.name]:
node.insert_node(child.name)
child._check_tree(node)
def insert_node(self, node_name):
concept_name = node_name
concept_trace = concept_name
if concept_name not in self.data.data['traces']:
self.data.data['traces'][concept_name] = concept_trace
prim_traces = create_ngrams(concept_trace, self.data.n)
self.data.data['traces_ngrams'][concept_name] = prim_traces
#insert a new node into your subtree recursively
if self.name != node_name:
difficulty = self.comp_func(self.name, node_name, self.data.data)
if self.verbose:
print('node_name: ' + node_name + ' self_name: ' + self.name + " difficulty: " + str(difficulty))
if difficulty == 1: #If the node is harder than you then it belongs somewhere in your subtree
if len(self.children[self.name]) == 0:
#If you have no children, then the child is your child
if self.verbose:
print('no children and harder so insert')
node = Build_Tree(name = node_name, data = self.data, children = self.children, children_names = self.children_names, all_descendants_names = self.all_descendants_names, parent = self, comp_func = self.comp_func, verbose = self.verbose)
self._add_child(node)
return 1 #return 1 for inserted
else:
#If you have children, check if the node is your children's child and try to insert it into your children's subtrees
temp_children = list(self.children[self.name])
total_harder = 0
for child in temp_children:
total_harder = total_harder + child.insert_node(node_name)
if total_harder == 0: # if child was not inserted, then it is your child
if self.verbose:
print('not inserted, so insert')
node = Build_Tree(name = node_name, data = self.data, children = self.children, children_names = self.children_names, all_descendants_names = self.all_descendants_names, parent = self, comp_func = self.comp_func, verbose = self.verbose)
for child in temp_children:
child._check_tree(node)
self._add_child(node)
self.all_descendants_names[self.name].add(node_name)
return 1 #return 1 for inserted
elif difficulty == 0: #Cannot say one is more difficult than the other
return 0 #return 0 for not inserted
else: #difficulty == -1, means you are harder than the node so it is your parent
if self.verbose:
print('child is harder so add as parent')
node = Build_Tree(name = node_name, data = self.data, children = self.children, children_names = self.children_names, all_descendants_names = self.all_descendants_names, parent = self.parent, comp_func = self.comp_func, verbose = self.verbose)
#remove yourself from your parent
self.parent._remove_child(self)
#add the new node under your parent
for child in self.children[self.parent.name]:
child._check_tree(node)
self.parent._add_child(node)
self.parent = node
#reinsert yourself starting from your new parent
node.insert_node(self.name)
return 1 #return 1 for inserted
else:
return 1 #1 because the node was already inserted
def _add_parents(self, parents, all_ancestors):
#Add parents into the
if self.parent != None:
parents[self.name].add(self.parent.name)
all_ancestors[self.name].update(all_ancestors[self.parent.name])
all_ancestors[self.name].add(self.parent.name)
for child in self.children[self.name]:
child.parents = parents
child.all_ancestors = all_ancestors
child._add_parents(parents, all_ancestors)
def add_edges_to_progression(self, progression_graph):
#Add directed edges between parents and children to a graphviz graph for visualization purporses
for child_name, child_children in self.children.items():
for child in child_children:
progression_graph.edge(child_name, child.name, contraint = 'true')
def calculate_parents(self):
#calculate the parents of the nodes
if self.parents == None:
parents = {}
all_ancestors = {}
self.parents = parents
self.all_ancestors = all_ancestors
parents[self.name] = set()
all_ancestors[self.name] = set()
for child in self.all_descendants_names[self.name]:
parents[child] = set()
all_ancestors[child] = set()
self._add_parents(parents, all_ancestors)
def return_parents(self): #return the parents dict (a dictionary where the keys are node names and the values are a list of the names of the immediate parents of the node)
if self.parents == None:
self.calculate_parents()
return {key:remove_item(items_list, 'Root') for key, items_list in self.parents.items() if key != 'Root'}
def return_all_ancestors(self): #return the all_ancestors dict (a dictionary where the keys are node names and the values are a list of the names of all the ancestors of the node)
if self.parents == None:
self.calculate_parents()
return {key:remove_item(items_list, 'Root') for key, items_list in self.all_ancestors.items() if key != 'Root'}
def return_children(self): #return the children_names dict (a dictionary where the keys are node names and the values are a list of the names of the immediate childre of the node)
return self.children_names
def return_all_descendants(self): #return the all_descendants_names dict (a dictionary where the keys are node names and the values are a list of all the descendants parents of the node)
return {key:remove_item(items_list, 'Root') for key, items_list in self.parents.items() if key != 'Root'}
def print_tree(self, prepend_string=""):
print(prepend_string + self.name)
prepend_string=prepend_string+" "
for child in self.children[self.name]:
child.print_tree(prepend_string = prepend_string)
return
def return_all_concepts(self):
return self.data.all_concepts
def return_all_basic_components(self):
#a method to return a list of all possible components
return self.data.all_basic_components
def return_concept_problems(self):
#a method to return a dictionary where the keys are concept names and the values are a list of all problems corresponding to that concept
return self.data.concept_problems
def return_problem_components(self):
#a method to return a dictionary where the keys are problems and the values are a list of all components corresponding to that problem
return self.data.problem_components
def save_tree(self, tree_filename):
pickle.dump(self, open(tree_filename, "wb" ))
| 2.96875
| 3
|
causal_networkx/discovery/tests/test_fcialg.py
|
adam2392/causal-networkx
| 0
|
12785201
|
<reponame>adam2392/causal-networkx<filename>causal_networkx/discovery/tests/test_fcialg.py
import networkx as nx
import numpy as np
import pandas as pd
import pytest
from causal_networkx.algorithms import d_separated, possibly_d_sep_sets
from causal_networkx.cgm import ADMG, PAG
from causal_networkx.ci import Oracle
from causal_networkx.discovery import FCI
from causal_networkx.scm import StructuralCausalModel
class Test_FCI:
def setup_method(self):
seed = 12345
rng = np.random.RandomState(seed=seed)
# construct a causal graph that will result in
# x -> y <- z
func_uz = lambda: rng.negative_binomial(n=1, p=0.25)
func_uxy = lambda: rng.binomial(n=1, p=0.4)
func_x = lambda u_xy: 2 * u_xy
func_y = lambda x, u_xy, z: x * u_xy + z
func_z = lambda u_z: u_z
# construct the SCM and the corresponding causal graph
scm = StructuralCausalModel(
exogenous={
"u_xy": func_uxy,
"u_z": func_uz,
},
endogenous={"x": func_x, "y": func_y, "z": func_z},
)
G = scm.get_causal_graph()
oracle = Oracle(G)
self.scm = scm
self.G = G
self.ci_estimator = oracle.ci_test
fci = FCI(ci_estimator=self.ci_estimator)
self.alg = fci
def test_fci_skel_graph(self):
sample = self.scm.sample(n=1, include_latents=False)
skel_graph, _ = self.alg.learn_skeleton(sample)
assert list(skel_graph.edges) == [("x", "y"), ("z", "y")]
def test_fci_basic_collider(self):
sample = self.scm.sample(n=1, include_latents=False)
skel_graph, sep_set = self.alg.learn_skeleton(sample)
graph = PAG(incoming_uncertain_data=skel_graph)
self.alg._orient_colliders(graph, sep_set)
# the PAG learned
expected_graph = PAG()
expected_graph.add_edges_from([("x", "y"), ("z", "y")])
expected_graph.add_circle_edges_from([("y", "x"), ("y", "z")])
assert set(expected_graph.edges) == set(graph.edges)
assert set(expected_graph.circle_edges) == set(graph.circle_edges)
def test_fci_rule1(self):
# If A *-> u o-* C, A and C are not adjacent,
# then we can orient the triple as A *-> u -> C.
# First test:
# A -> u o-o C
G = PAG()
G.add_edge("A", "u")
G.add_circle_edge("u", "C", bidirected=True)
G_copy = G.copy()
self.alg._apply_rule1(G, "u", "A", "C")
assert G.has_edge("u", "C")
assert not G.has_circle_edge("C", "u")
assert not G.has_edge("C", "u")
assert not G.has_edge("u", "A")
# orient u o-o C now as u o-> C
# Second test:
# A -> u o-> C
G = G_copy.copy()
G.orient_circle_edge("u", "C", "arrow")
self.alg._apply_rule1(G, "u", "A", "C")
assert G.has_edge("u", "C")
assert not G.has_circle_edge("C", "u")
assert not G.has_edge("C", "u")
assert not G.has_edge("u", "A")
# now orient A -> u as A <-> u
# Third test:
# A <-> u o-o C
G = G_copy.copy()
G.remove_edge("A", "u")
G.add_bidirected_edge("u", "A")
self.alg._apply_rule1(G, "u", "A", "C")
assert G.has_edge("u", "C")
assert not G.has_circle_edge("C", "u")
assert not G.has_edge("C", "u")
assert G.has_bidirected_edge("u", "A")
# now orient A -> u as A <-> u
# Fourth test:
# A o-> u o-o C
G = G_copy.copy()
G.add_circle_edge("u", "A")
G.orient_circle_edge("u", "C", "arrow")
self.alg._apply_rule1(G, "u", "A", "C")
assert G.has_edge("u", "C")
assert not G.has_circle_edge("C", "u")
assert not G.has_edge("C", "u")
assert G.has_circle_edge("u", "A")
def test_fci_rule2(self):
# If A -> u *-> C, or A *-> u -> C, and A *-o C, then
# orient A *-> C.
# 1. Do A -> u <-> C with A o-o C
G = PAG()
G.add_edge("A", "u")
G.add_bidirected_edge("u", "C")
G.add_circle_edge("A", "C", bidirected=True)
G_copy = G.copy()
self.alg._apply_rule2(G, "u", "A", "C")
assert G.has_edge("A", "C")
assert G.has_circle_edge("C", "A")
# if A o-> u, then it should not work
G = G_copy.copy()
G.add_circle_edge("u", "A")
added_arrows = self.alg._apply_rule2(G, "u", "A", "C")
assert not added_arrows
assert G.has_circle_edge("A", "C")
assert G.has_circle_edge("C", "A")
# 2. Test not-added case
# first test that can't be A <-> u <-> C
G = G_copy.copy()
G.remove_edge("A", "u")
G.add_bidirected_edge("u", "A")
added_arrows = self.alg._apply_rule2(G, "u", "A", "C")
assert G.has_circle_edge("A", "C")
assert not added_arrows
# 3. then test that A <-> u -> C with A o-o C
G.remove_bidirected_edge("C", "u")
G.add_edge("u", "C")
added_arrows = self.alg._apply_rule2(G, "u", "A", "C")
assert G.has_edge("A", "C")
assert G.has_circle_edge("C", "A")
assert added_arrows
def test_fci_rule3(self):
# If A *-> u <-* C, A *-o v o-* C, A/C are not adjacent,
# and v *-o u, then orient v *-> u.
G = PAG()
# start by considering all stars to be empty for A, C, u
G.add_edge("A", "u")
G.add_edge("C", "u")
# then consider all circles as bidirected
G.add_circle_edge("A", "v", bidirected=True)
G.add_circle_edge("C", "v", bidirected=True)
G.add_circle_edge("v", "u", bidirected=True)
G_copy = G.copy()
self.alg._apply_rule3(G, "u", "A", "C")
for edge in G_copy.edges:
assert G.has_edge(*edge)
for edge in G_copy.circle_edges:
if edge != ("v", "u"):
assert G.has_circle_edge(*edge)
else:
assert not G.has_circle_edge(*edge)
assert G.has_edge("v", "u")
# if A -> u is A <-> u, then it should still work
G = G_copy.copy()
G.remove_edge("A", "u")
G.add_bidirected_edge("A", "u")
added_arrows = self.alg._apply_rule3(G, "u", "A", "C")
assert added_arrows
# adding a circle edge should make it not work
G = G_copy.copy()
G.add_circle_edge("A", "C", bidirected=True)
added_arrows = self.alg._apply_rule3(G, "u", "A", "C")
assert not added_arrows
def test_fci_rule4_without_sepset(self):
"""Test orienting a discriminating path without separating set.
A discriminating path, p, between X and Y is one where:
- p has at least 3 edges
- u is non-endpoint and u is adjacent to c
- v is not adjacent to c
- every vertex between v and u is a collider on p and parent of c
<v,..., w, u, c>
"""
G = PAG()
# setup graph with a <-> u o-o c
G.add_circle_edge("u", "c", bidirected=True)
G.add_bidirected_edge("a", "u")
sep_set = set()
# initial test should not add any arrows, since there are only 2 edges
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert not added_arrows
assert explored_nodes == dict()
# now add another variable, but since a is not a parent of c
# this is still not a discriminating path
# setup graph with b <-> a <-> u o-o c
G.add_bidirected_edge("b", "a")
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert not added_arrows
assert explored_nodes == dict()
# add the arrow from a -> c
G.add_edge("a", "c")
G_copy = G.copy()
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert added_arrows
assert list(explored_nodes.keys()) == ["c", "u", "a", "b"]
# since separating set is empty
assert not G.has_circle_edge("c", "u")
assert G.has_bidirected_edge("c", "u")
# change 'u' o-o 'c' to 'u' o-> 'c', which should now orient
# the same way
G = G_copy.copy()
G.orient_circle_edge("u", "c", "arrow")
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert added_arrows
assert list(explored_nodes.keys()) == ["c", "u", "a", "b"]
assert not G.has_circle_edge("c", "u")
assert G.has_bidirected_edge("c", "u")
def test_fci_rule4_early_exit(self):
G = PAG()
G.add_circle_edge("u", "c", bidirected=True)
G.add_bidirected_edge("a", "u")
sep_set = set()
# now add another variable, but since a is not a parent of c
# this is still not a discriminating path
G.add_bidirected_edge("b", "a")
G.add_edge("a", "c")
G.add_edge("b", "c")
G.add_edge("d", "b")
# test error case
new_fci = FCI(ci_estimator=self.ci_estimator, max_path_length=1)
with pytest.warns(UserWarning, match="Did not finish checking"):
new_fci._apply_rule4(G, "u", "a", "c", sep_set)
def test_fci_rule4_wit_sepset(self):
"""Test orienting a discriminating path with a separating set.
A discriminating path, p, between X and Y is one where:
- p has at least 3 edges
- u is non-endpoint and u is adjacent to c
- v is not adjacent to c
- every vertex between v and u is a collider on p and parent of c
<v,..., w, u, c>
"""
G = PAG()
G.add_circle_edge("u", "c", bidirected=True)
G.add_bidirected_edge("a", "u")
sep_set = {"b": {"c": set("u")}}
# initial test should not add any arrows, since there are only 2 edges
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert not added_arrows
assert explored_nodes == dict()
# now add another variable, but since a is not a parent of c
# this is still not a discriminating path
G.add_bidirected_edge("b", "a")
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert not added_arrows
assert explored_nodes == dict()
# add the arrow from a -> c
G.add_edge("a", "c")
G_copy = G.copy()
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert added_arrows
assert list(explored_nodes.keys()) == ["c", "u", "a", "b"]
assert not G.has_circle_edge("c", "u")
assert not G.has_edge("c", "u")
assert G.has_edge("u", "c")
# change 'u' o-o 'c' to 'u' o-> 'c', which should now orient
# the same way
G = G_copy.copy()
G.orient_circle_edge("u", "c", "arrow")
added_arrows, explored_nodes = self.alg._apply_rule4(G, "u", "a", "c", sep_set)
assert added_arrows
assert list(explored_nodes.keys()) == ["c", "u", "a", "b"]
assert not G.has_circle_edge("c", "u")
assert not G.has_edge("c", "u")
assert G.has_edge("u", "c")
def test_fci_rule8_without_selection_bias(self):
# If A -> u -> C and A o-> C
# orient A o-> C as A -> C
G = PAG()
# create a chain for A, u, C
G.add_chain(["A", "u", "C"])
G.add_edge("A", "C")
G.add_circle_edge("C", "A")
self.alg._apply_rule8(G, "u", "A", "C")
assert G.has_edge("A", "C")
assert not G.has_circle_edge("C", "A")
def test_fci_rule9(self):
# If A o-> C and there is an undirected pd path
# from A to C through u, where u and C are not adjacent
# then orient A o-> C as A -> C
G = PAG()
# create an uncovered pd path from A to C through u
G.add_edge("A", "C")
G.add_circle_edge("C", "A")
G.add_chain(["A", "u", "x", "y", "z", "C"])
G.add_circle_edge("y", "x")
# create a pd path from A to C through v
G.add_chain(["A", "v", "x", "y", "z", "C"])
# with the bidirected edge, v,x,y is a shielded triple
G.add_bidirected_edge("v", "y")
G_copy = G.copy()
# get the uncovered pd paths
added_arrows, uncov_pd_path = self.alg._apply_rule9(G, "u", "A", "C")
assert added_arrows
assert uncov_pd_path == ["A", "u", "x", "y", "z", "C"]
assert not G.has_circle_edge("C", "A")
# the shielded triple should not result in an uncovered pd path
G = G_copy.copy()
added_arrows, uncov_pd_path = self.alg._apply_rule9(G, "v", "A", "C")
assert not added_arrows
assert uncov_pd_path == []
assert G.has_circle_edge("C", "A")
# when there is a circle edge it should still work
G = G_copy.copy()
G.add_circle_edge("C", "z")
added_arrows, uncov_pd_path = self.alg._apply_rule9(G, "u", "A", "C")
assert added_arrows
assert uncov_pd_path == ["A", "u", "x", "y", "z", "C"]
assert not G.has_circle_edge("C", "A")
def test_fci_rule10(self):
# If A o-> C and u -> C <- v and:
# - there is an uncovered pd path from A to u, p1
# - there is an uncovered pd from from A to v, p2
# if mu adjacent to A on p1 is distinct from w adjacent to A on p2
# and mu is not adjacent to w, then orient orient A o-> C as A -> C
G = PAG()
# make A o-> C
G.add_edge("A", "C")
G.add_circle_edge("C", "A")
# create an uncovered pd path from A to u that ends at C
G.add_chain(["A", "x", "y", "z", "u", "C"])
G.add_circle_edge("y", "x")
# create an uncovered pd path from A to v so now C is a collider for <u, C, v>
G.add_edge("z", "v")
G.add_edge("v", "C")
G_copy = G.copy()
# 'x' and 'x' are not distinct, so won't orient
added_arrows, a_to_u_path, a_to_v_path = self.alg._apply_rule10(G, "u", "A", "C")
assert not added_arrows
assert a_to_u_path == []
assert a_to_v_path == []
assert G.has_circle_edge("C", "A")
# if we create an edge from A -> y, there is now a distinction
G = G_copy.copy()
G.add_chain(["A", "xy", "y"])
added_arrows, a_to_u_path, a_to_v_path = self.alg._apply_rule10(G, "u", "A", "C")
assert added_arrows
assert a_to_u_path == ["A", "x", "y", "z", "u"]
assert a_to_v_path == ["A", "xy", "y", "z", "v"]
# by making one edge not potentially directed, we break R10
G.remove_edge("z", "u")
G.add_edge("u", "z")
added_arrows, a_to_u_path, a_to_v_path = self.alg._apply_rule10(G, "u", "A", "C")
assert not added_arrows
assert a_to_u_path == []
assert a_to_v_path == []
G.add_circle_edge("z", "u")
added_arrows, a_to_u_path, a_to_v_path = self.alg._apply_rule10(G, "u", "A", "C")
assert not added_arrows
assert a_to_u_path == []
assert a_to_v_path == []
def test_fci_unobserved_confounder(self):
# x4 -> x2 <- x1 <- x3
# x1 <--> x2
# x4 | x1,
edge_list = [
("x4", "x2"),
("x3", "x1"),
("x1", "x2"),
]
latent_edge_list = [("x1", "x2")]
G = ADMG(edge_list, latent_edge_list)
sample = np.random.normal(size=(len(G.nodes), 5)).T
sample = pd.DataFrame(sample)
sample.columns = list(G.nodes)
oracle = Oracle(G)
ci_estimator = oracle.ci_test
fci = FCI(ci_estimator=ci_estimator)
fci.fit(sample)
pag = fci.graph_
print(fci.skel_graph.edges)
expected_pag = PAG()
expected_pag.add_edges_from(
[
("x4", "x2"),
("x1", "x2"),
("x3", "x2"),
]
)
expected_pag.add_circle_edges_from(
[("x2", "x4"), ("x2", "x3"), ("x2", "x1"), ("x1", "x3"), ("x3", "x1")]
)
assert set(pag.edges) == set(expected_pag.edges)
assert set(pag.bidirected_edges) == set(expected_pag.bidirected_edges)
assert set(pag.circle_edges) == set(expected_pag.circle_edges)
expected_pag_digraph = expected_pag.compute_full_graph(to_networkx=True)
pag_digraph = pag.compute_full_graph(to_networkx=True)
assert nx.is_isomorphic(pag_digraph, expected_pag_digraph)
def test_fci_spirtes_example(self):
"""Test example in book.
See book Figure 16
See: https://www.cs.cmu.edu/afs/cs.cmu.edu/project/learn-43/lib/photoz/.g/web/.g/scottd/fullbook.pdf
"""
# reconstruct the PAG the way FCI would
edge_list = [("D", "A"), ("B", "E"), ("F", "B"), ("C", "F"), ("C", "H"), ("H", "D")]
latent_edge_list = [("A", "B"), ("D", "E")]
graph = ADMG(edge_list, latent_edge_list)
alg = FCI(ci_estimator=Oracle(graph).ci_test)
sample = graph.dummy_sample()
alg.fit(sample)
pag = alg.graph_
# generate the expected PAG
edge_list = [
("D", "A"),
("B", "E"),
("H", "D"),
("F", "B"),
]
latent_edge_list = [("A", "B"), ("D", "E")]
uncertain_edge_list = [
("B", "F"),
("F", "C"),
("C", "F"),
("C", "H"),
("H", "C"),
("D", "H"),
]
expected_pag = PAG(edge_list, latent_edge_list, uncertain_edge_list)
assert set(expected_pag.bidirected_edges) == set(pag.bidirected_edges)
assert set(expected_pag.edges) == set(pag.edges)
assert set(expected_pag.circle_edges) == set(pag.circle_edges)
def test_fci_complex(self):
"""
Test FCI algorithm with more complex graph.
Use Figure 2 from :footcite:`Colombo2012`.
References
----------
.. footbibliography::
"""
edge_list = [
("x4", "x1"),
("x2", "x5"),
("x3", "x2"),
("x3", "x4"),
("x2", "x6"),
("x3", "x6"),
("x4", "x6"),
("x5", "x6"),
]
latent_edge_list = [("x1", "x2"), ("x4", "x5")]
G = ADMG(edge_list, latent_edge_list)
sample = np.random.normal(size=(len(G.nodes), 5)).T
sample = pd.DataFrame(sample)
sample.columns = list(G.nodes)
oracle = Oracle(G)
ci_estimator = oracle.ci_test
fci = FCI(ci_estimator=ci_estimator, max_iter=np.inf)
fci.fit(sample)
pag = fci.graph_
assert d_separated(G, "x1", "x3", "x4")
pdsep = possibly_d_sep_sets(pag, "x1", "x3")
assert "x2" in pdsep
expected_pag = PAG()
expected_pag.add_circle_edges_from([("x6", "x5"), ("x2", "x3"), ("x4", "x3"), ("x6", "x4")])
expected_pag.add_edges_from(
[
("x4", "x1"),
("x2", "x5"),
("x3", "x2"),
("x3", "x4"),
("x2", "x6"),
("x3", "x6"),
("x4", "x6"),
("x5", "x6"),
]
)
expected_pag.add_bidirected_edge("x1", "x2")
expected_pag.add_bidirected_edge("x4", "x5")
assert set(pag.bidirected_edges) == set(expected_pag.bidirected_edges)
assert set(pag.edges) == set(expected_pag.edges)
assert set(pag.circle_edges) == set(expected_pag.circle_edges)
| 2.296875
| 2
|
extensions/aria_extension_tosca/simple_v1_0/presentation/field_getters.py
|
enricorusso/incubator-ariatosca
| 1
|
12785202
|
<gh_stars>1-10
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aria.utils.formatting import safe_repr
from aria.parser.exceptions import InvalidValueError
def data_type_class_getter(cls):
"""
Wraps the field value in a specialized data type class.
Can be used with the :func:`field_getter` decorator.
"""
def getter(field, presentation, context=None):
raw = field.default_get(presentation, context)
if raw is not None:
try:
return cls(None, None, raw, None)
except ValueError as e:
raise InvalidValueError(
'%s is not a valid "%s" in "%s": %s'
% (field.full_name, field.full_cls_name, presentation._name, safe_repr(raw)),
cause=e, locator=field.get_locator(raw))
return getter
| 1.992188
| 2
|
migrations/versions/2d549589ee65_initial_migration.py
|
Julia-Agasaro/Books
| 0
|
12785203
|
"""Initial Migration
Revision ID: 2d549589ee65
Revises:
Create Date: 2019-10-02 16:59:16.744510
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2d<PASSWORD>9ee65'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('tel', sa.Integer(), nullable=True),
sa.Column('password_secure', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('book',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('summary', sa.String(), nullable=True),
sa.Column('category', sa.String(length=255), nullable=False),
sa.Column('location', sa.String(), nullable=True),
sa.Column('poster', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_book_summary'), 'book', ['summary'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('book_id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['book.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('upvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('upvote', sa.Integer(), nullable=True),
sa.Column('book_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['book.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('upvotes')
op.drop_table('comments')
op.drop_index(op.f('ix_book_summary'), table_name='book')
op.drop_table('book')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| 1.9375
| 2
|
examples/MgO__buck__lammps_neb/lammps_neb/lmps_script_2_python_str.py
|
eragasa/pypospack
| 4
|
12785204
|
def convert_textfile_to_python_string(filename_in,filename_out):
assert isinstance(filename_in,str)
assert isinstance(filename_out,str)
lines_in = None
with open(filename_in,'r') as f:
lines_in = f.readlines()
lines_out = [line.strip() for line in lines_in]
for i,line in enumerate(lines_out):
line=line.replace("\"","\\\"") #correct escape sequences characters for double quotes
line=line.replace("'","\'") #correct escape sequences for single quotes
line=line.replace("{","{{") #correct escape sequence for { so that we can use format command for a string
line=line.replace("}","}}") #correct escape seuqnece for } so that we can use format command for a string
lines_out[i] = line
lines_out = ["\"{}\\n\"".format(line.strip()) for line in lines_out]
_str_out = "\n".join(lines_out)
with open(filename_out,'w') as f:
f.write(_str_out)
if __name__ == "__main__":
import sys
filename_in = sys.argv[1]
filename_out = sys.argv[2]
convert_textfile_to_python_string(filename_in,filename_out)
| 3.921875
| 4
|
server/rest_api/serializers.py
|
GOKUPIKA/ivanc
| 14
|
12785205
|
from rest_framework import serializers
from rest_api.models import App, Repo, Article
class AppSerializer(serializers.ModelSerializer):
class Meta:
model = App
fields = ('id', 'title', 'url', 'store_url', 'start_date', 'end_date', 'image',
'color', 'platform',)
read_only_fields = ('image',)
class AppSerializerNested(AppSerializer):
"""
Extension of AppSerializer that enables one level of nested objects.
"""
class Meta(AppSerializer.Meta):
# Extends Meta of AppSerializer, hence will use same model and fields
depth = 1
class RepoSerializer(serializers.ModelSerializer):
class Meta:
model = Repo
fields = ('id', 'title', 'subtitle', 'url', 'start_date', 'end_date', 'image', 'color',
'platform',)
read_only_fields = ('image',)
class RepoSerializerNested(RepoSerializer):
"""
Extension of RepoSerializer that enables one level of nested objects.
"""
class Meta(RepoSerializer.Meta):
# Extends Meta of AppSerializer, hence will use same model and fields
depth = 1
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'title', 'url', 'publish_date', 'image', 'color',
'platform',)
read_only_fields = ('image',)
class ArticleSerializerNested(ArticleSerializer):
"""
Extension of ArticleSerializer that enables one level of nested objects.
"""
class Meta(ArticleSerializer.Meta):
# Extends Meta of AppSerializer, hence will use same model and fields
depth = 1
| 2.703125
| 3
|
packages/syft/src/syft/core/node/common/node_service/heritage_update/heritage_update_messages.py
|
callezenwaka/PySyft
| 1
|
12785206
|
# stdlib
from typing import Optional
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
# relative
from ...... import serialize
from ......proto.core.node.common.service.heritage_update_service_pb2 import (
HeritageUpdateMessage as HeritageUpdateMessage_PB,
)
from .....common.message import ImmediateSyftMessageWithoutReply
from .....common.serde.deserialize import _deserialize
from .....common.serde.serializable import bind_protobuf
from .....common.uid import UID
from .....io.address import Address
@bind_protobuf
class HeritageUpdateMessage(ImmediateSyftMessageWithoutReply):
def __init__(
self,
new_ancestry_address: Address,
address: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.new_ancestry_address = new_ancestry_address
def _object2proto(self) -> HeritageUpdateMessage_PB:
return HeritageUpdateMessage_PB(
new_ancestry_address=serialize(self.new_ancestry_address),
address=serialize(self.address),
msg_id=serialize(self.id),
)
@staticmethod
def _proto2object(proto: HeritageUpdateMessage_PB) -> "HeritageUpdateMessage":
return HeritageUpdateMessage(
new_ancestry_address=_deserialize(blob=proto.new_ancestry_address),
address=_deserialize(blob=proto.address),
msg_id=_deserialize(blob=proto.msg_id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return HeritageUpdateMessage_PB
| 1.664063
| 2
|
src/WriteConstantDirectoryFiles/WriteThermophysicalProperties.py
|
darrinl2t/OpenFOAMCaseGenerator
| 3
|
12785207
|
<gh_stars>1-10
class ThermophysicalProperties:
def __init__(self, properties, file_manager):
self.properties = properties
self.file_manager = file_manager
def write_input_file(self):
mu = str(self.properties['flow_properties']['dimensional_properties']['mu'])
file_id = self.file_manager.create_file('constant', 'thermophysicalProperties')
self.file_manager.write_header(file_id, 'dictionary', 'constant', 'thermophysicalProperties')
self.file_manager.write(file_id, '\n')
self.file_manager.write(file_id, 'thermoType\n')
self.file_manager.write(file_id, '{\n')
self.file_manager.write(file_id, ' type hePsiThermo;\n')
self.file_manager.write(file_id, ' mixture pureMixture;\n')
if self.properties['flow_properties']['const_viscosity']:
self.file_manager.write(file_id, ' transport const;\n')
else:
self.file_manager.write(file_id, ' transport sutherland;\n')
self.file_manager.write(file_id, ' thermo hConst;\n')
self.file_manager.write(file_id, ' equationOfState perfectGas;\n')
self.file_manager.write(file_id, ' specie specie;\n')
self.file_manager.write(file_id, ' energy sensibleInternalEnergy;\n')
self.file_manager.write(file_id, '}\n')
self.file_manager.write(file_id, '\n')
self.file_manager.write(file_id, 'mixture\n')
self.file_manager.write(file_id, '{\n')
self.file_manager.write(file_id, ' specie\n')
self.file_manager.write(file_id, ' {\n')
self.file_manager.write(file_id, ' molWeight 28.9;\n')
self.file_manager.write(file_id, ' }\n')
self.file_manager.write(file_id, ' thermodynamics\n')
self.file_manager.write(file_id, ' {\n')
self.file_manager.write(file_id, ' Cp 1005;\n')
self.file_manager.write(file_id, ' Hf 0;\n')
self.file_manager.write(file_id, ' }\n')
self.file_manager.write(file_id, ' transport\n')
self.file_manager.write(file_id, ' {\n')
if self.properties['flow_properties']['const_viscosity']:
self.file_manager.write(file_id, ' mu ' + mu + ';\n')
self.file_manager.write(file_id, ' Pr 0.71;\n')
else:
self.file_manager.write(file_id, ' As 1.4792e-06;\n')
self.file_manager.write(file_id, ' Ts 116;\n')
self.file_manager.write(file_id, ' }\n')
self.file_manager.write(file_id, '}\n')
self.file_manager.write(file_id, '\n')
self.file_manager.write(file_id,
'// ************************************************************************* //\n')
self.file_manager.close_file(file_id)
| 2.640625
| 3
|
envs/classification.py
|
simula-vias/tetraband
| 4
|
12785208
|
<reponame>simula-vias/tetraband<gh_stars>1-10
import numpy as np
import torch
from gym import spaces
from torch.utils.data import TensorDataset, DataLoader
from torchvision import datasets, transforms
import networks
from envs.base import BaseEnv
class ImageClassificationEnv(BaseEnv):
def __init__(self, scenario, evaluation='difference', dataset='cifar10', random_images=True):
super(ImageClassificationEnv, self).__init__(scenario, evaluation, random_images)
network_architecture = "resnet34" if dataset == 'cifar10' else "resnet50"
self.model, self.input_size = networks.get_model(network_architecture, dataset)
self.model.eval()
self.model.to(networks.get_device())
# TODO We could even use an unlabelled dataset and compare original and modified output
if dataset == 'cifar10':
self.dataset = datasets.CIFAR10(root='cifar10',
train=False,
download=True)
self.pre_transformation = transforms.Compose([])
obs_size = 32
elif dataset == 'imagenet':
self.dataset = datasets.ImageNet(root='imagenet',
split='val',
download=True)
self.pre_transformation = transforms.Compose([])
obs_size = 224
self.num_distinct_images = len(self.dataset)
self.model_transformation = transforms.Compose([
transforms.Resize(self.input_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.observation_space = spaces.Box(low=0,
high=255,
shape=(obs_size, obs_size, 3),
dtype=np.uint8)
def run_all_actions(self, batch_size=8):
""" For baseline purposes """
original_image, label = self._get_image(self.cur_image_idx)
original_input = self.model_transformation(original_image)
mod_inputs = []
action_ids = []
for action_idx in range(len(self.actions)):
if self.is_hierarchical_action(action_idx):
for param_idx in range(len(self.actions[action_idx][1])):
modified_image = self.get_action(action_idx, param_idx)(image=original_image)
modified_input = self.model_transformation(modified_image)
mod_inputs.append(modified_input)
action_ids.append((action_idx, param_idx))
else:
modified_image = self.get_action(action_idx)(image=original_image)
modified_input = self.model_transformation(modified_image)
mod_inputs.append(modified_input)
action_ids.append((action_idx, None))
input = TensorDataset(torch.stack([original_input] + mod_inputs))
loader = DataLoader(input, batch_size=batch_size)
outputs = []
for batch in loader:
batch = batch[0].to(networks.get_device())
output = self.model(batch)
_, prediction = output.max(1)
outputs.extend(prediction.cpu().tolist())
outputs = np.array(outputs)
pred_original = outputs[0]
pred_modified = outputs[1:]
original_correct = pred_original == label
results = []
for pred, (act_idx, param_idx) in zip(pred_modified, action_ids):
evaluation_result = pred == pred_original
r = self._reward(pred, pred_original, act_idx, param_idx)
act_name, param_name = self.get_action_name(act_idx, param_idx)
info = {
'action': act_name,
'parameter': param_name,
'action_reward': r[0],
'parameter_reward': r[1],
'original': pred_original,
'prediction': pred,
'label': label,
'success': bool(evaluation_result),
'original_score': bool(original_correct),
'modified_score': bool(pred == label)
}
results.append(info)
return results
def step(self, action):
action_idx, parameter_idx = action
# Apply transformation to current image
original_image, label = self._get_image(self.cur_image_idx)
modified_image = self.get_action(action_idx, parameter_idx)(image=original_image)
# Input image into SUT
original_input = self.model_transformation(original_image)
modified_input = self.model_transformation(modified_image)
input = torch.stack((modified_input, original_input))
input = input.to(networks.get_device())
output = self.model(input)
_, prediction = output.max(1)
pred_modified, pred_original = prediction.cpu().tolist()
original_correct = pred_original == label
modified_correct = pred_modified == label
# Check result
# The learning signal needs to be whether the transformation had an impact on the outcome
# Whether the test failed is to be decided outside the environment.
# In case the original output is already wrong, the expectation on the modified output might be different.
# 0: No transformation impact
# self.actions[action][2]: Transformation changed outcomes, action-dependent
reward = self._reward(pred_modified, pred_original, action_idx, parameter_idx)
observation = modified_image
done = True
info = {
'original': pred_original,
'prediction': pred_modified,
'label': label,
'original_score': original_correct,
'modified_score': modified_correct
}
return observation, reward, done, info
def _reward(self, pred_modified, pred_original, action_idx, parameter_idx=None):
if pred_modified == pred_original:
action_reward = 0
parameter_reward = 0
else:
action_reward = self.actions[action_idx][2]
if self.is_hierarchical_action(action_idx):
parameter_reward = self.actions[action_idx][1][parameter_idx][2]
else:
parameter_reward = 0
return (action_reward, parameter_reward)
def _get_image(self, idx):
image, label = self.dataset[idx]
return image, label
| 2.390625
| 2
|
plotUSA_GDP_and_GNI_final.py
|
cjekel/USA_GDP_per_capita_inflation_adjust
| 0
|
12785209
|
import numpy as np
import matplotlib.pyplot as plt
# close all figures
plt.close('all')
years = np.array([1960,1961,1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGDP = np.array([543300000000.,563300000000.,605100000000.,638600000000.,685800000000.,743700000000.,815000000000.,861700000000.,942500000000.,1019900000000.,1075884000000.,1167770000000.,1282449000000.,1428549000000.,1548825000000.,1688923000000.,1877587000000.,2085951000000.,2356571000000.,2632143000000.,2862505000000.,3210956000000.,3344991000000.,3638137000000.,4040693000000.,4346734000000.,4590155000000.,4870217000000.,5252629000000.,5657693000000.,5979589000000.,6174043000000.,6539299000000.,6878718000000.,7308755000000.,7664060000000.,8100201000000.,8608515000000.,9089168000000.,9660624000000.,10284779000000.,10621824000000.,10977514000000.,11510670000000.,12274928000000.,13093726000000.,13855888000000.,14477635000000.,14718582000000.,14418739000000.,14964372000000.,15517926000000.,16163158000000.,16768053000000.,17419000000000.])
# GDP data from the worldbank http://data.worldbank.org/indicator/NY.GDP.MKTP.CD/countries/US?display=graph
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI = np.array([29.6, 29.9, 30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years, usaGDP)
plt.xlabel('Year')
plt.ylabel('GDP in Current USD')
plt.grid(True)
plt.show()
# Adjust GDP for 1960 USD
usaGDP1960 = usaGDP / (usaCPI / usaCPI[0])
plt.figure()
plt.plot(years, usaGDP1960)
plt.xlabel('Year')
plt.ylabel('GDP adjusted for inflation in 1960 USD')
plt.grid(True)
plt.show()
# Adjust GDP for 2014 USD
usaGDP2014 = usaGDP / (usaCPI / usaCPI[-1])
plt.figure()
plt.plot(years, usaGDP2014)
plt.xlabel('Year')
plt.ylabel('GDP adjusted for inflation in 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([180671000,183691000,186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGDPpercapita = usaGDP / usaPop
plt.figure()
plt.plot(years, usaGDPpercapita)
plt.xlabel('Year')
plt.ylabel('GDP per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GDP per Capita to 1960s numbers
usaGDPpercapita1960 = usaGDPpercapita / (usaCPI / usaCPI[0])
plt.figure()
plt.plot(years, usaGDPpercapita1960)
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation in 1960 USD')
plt.grid(True)
plt.show()
# adjust GDP per Capita to 2014s numbers
usaGDPpercapita2014 = usaGDPpercapita / (usaCPI / usaCPI[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014)
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# define a function to adjust the CPI based on an over or under estimation of
# the inflation rate, where rate is the percent increase or decrease change
# where a precentage overesimate of 5% would be inputted as 1.05
def adjustCPI(cpi, rate):
demo = []
for i, j in enumerate(cpi):
demo.append(j * (rate**i))
return demo
# what if we underestimated inflation?
cpiOverFive = adjustCPI(usaCPI, 1.005)
# what if we underestimated inflation?
cpiUnderFive = adjustCPI(usaCPI, 0.995)
# adjust GDP per Capita to 2014s numbers
usaGDPpercapita2014OverFive = usaGDPpercapita / (cpiOverFive / cpiOverFive[-1])
usaGDPpercapita2014UnderFive = usaGDPpercapita / (cpiUnderFive / cpiUnderFive[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014, label='normal')
plt.plot(years, usaGDPpercapita2014OverFive, label='under')
plt.plot(years, usaGDPpercapita2014UnderFive, label='over')
plt.legend()
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
years2 = np.array([1962,1963,1964,1965,1966,1967,1968,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,1984,1985,1986,1987,1988,1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014])
usaGNI = np.array([612178550047.646,646233886826.65,692328219512.945,753294530375.941,824183577234.192,868295290971.962,952033980993.251,1027990251284.03,1098553055567.61,1183038457083.86,1320921418184.74,1548458249174.67,1711839855738.22,1842214711486.27,1958767403397.59,2117456144199.84,2401109359261.26,2751769589536.9,3048093901726.34,3303883972259.98,3297652203866.24,3411202239818.87,3828479505092.12,4164905103485.73,4601500378186.56,5200354088055.45,5765196251790.1,5888830786924.1,6029529322891.06,6164277951121.71,6612706041742.15,6883086506452.91,7302781827892.38,7760854970064.45,8184808773787.28,8558708987900.82,8869581532268.98,9425292191447.05,10178500697503.7,10498594829042.2,10776200783181,11589035965657.3,12790914724399.8,13693955258225.3,14345564947204.5,14651211130474,15002428215985,14740580035992.9,15143137264678.1,15727290871234.6,16501015978642.4,17001290051112.6,17611490812741.3])
# GNI data atlas method from the worldbank http://databank.worldbank.org/data/reports.aspx?source=2&country=USA&series=&period=#
# CPI data from bureau of labor statistics http://data.bls.gov/pdq/SurveyOutputServlet
usaCPI2 = np.array([30.2, 30.6, 31.0, 31.5, 32.4, 33.4, 34.8, 36.7, 38.8, 40.5, 41.8, 44.4, 49.3, 53.8, 56.9, 60.6, 65.2, 72.6, 82.4, 90.9, 96.5, 99.6, 103.9, 107.6, 109.6, 113.6, 118.3, 124.0, 130.7, 136.2, 140.3, 144.5, 148.2, 152.4, 156.9, 160.5, 163.0, 166.6, 172.2, 177.1, 179.9, 184.0, 188.9, 195.3, 201.6, 207.342, 215.303, 214.537, 218.056, 224.939, 229.594, 232.957, 236.736])
plt.figure()
plt.plot(years2, usaGNI)
plt.xlabel('Year')
plt.ylabel('GNI in Current USD')
plt.grid(True)
plt.show()
# Adjust GNI for 1962 USD
usaGNI1962 = usaGNI / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNI1962)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# Adjust GNI for 2014 USD
usaGNI2014 = usaGNI / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNI2014)
plt.xlabel('Year')
plt.ylabel('GNI adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# population from world bank
usaPop = np.array([186538000,189242000,191889000,194303000,196560000,198712000,200706000,202677000,205052000,207661000,209896000,211909000,213854000,215973000,218035000,220239000,222585000,225055000,227225000,229466000,231664000,233792000,235825000,237924000,240133000,242289000,244499000,246819000,249623000,252981000,256514000,259919000,263126000,266278000,269394000,272657000,275854000,279040000,282162411,284968955,287625193,290107933,292805298,295516599,298379912,301231207,304093966,306771529,309347057,311721632,314112078,316497531,318857056])
usaGNIpercapita = usaGNI / usaPop
plt.figure()
plt.plot(years2, usaGNIpercapita)
plt.xlabel('Year')
plt.ylabel('GNI per capita in Current USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 1962s numbers
usaGNIpercapita1962 = usaGNIpercapita / (usaCPI2 / usaCPI2[0])
plt.figure()
plt.plot(years2, usaGNIpercapita1962)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 1962 USD')
plt.grid(True)
plt.show()
# adjust GNI per Capita to 2014s numbers
usaGNIpercapita2014 = usaGNIpercapita / (usaCPI2 / usaCPI2[-1])
plt.figure()
plt.plot(years2, usaGNIpercapita2014)
plt.xlabel('Year')
plt.ylabel('GNI per capita adjusted for inflation to 2014 USD')
plt.grid(True)
plt.show()
# close all figs
plt.close('all')
# save the final plots
# plot of the GDP and GNI in current USD
plt.figure()
plt.plot(years, usaGDP / 1.e12, '-k', label='GDP')
plt.plot(years2, usaGNI / 1.e12, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('Trillion USD')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI.png')
# plot of GDP and GNI per capita in current USD
plt.figure()
plt.plot(years, usaGDPpercapita, '-k', label='GDP')
plt.plot(years2, usaGNIpercapita, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('USD per capita')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita.png')
# plot of GDP and GNI per capita in 2014 USD
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='GDP')
plt.plot(years2, usaGNIpercapita2014, '--b', label='GNI')
plt.xlabel('Year')
plt.ylabel('USD per capita adjusted for inflation to 2014 levels')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014.png')
# plot of GDP at 0.5, 1, and 2 perecent estimations
# what if CPI has underestimated inflation?
cpiUnderHalf = adjustCPI(usaCPI, 1.005)
cpiUnderOne = adjustCPI(usaCPI, 1.01)
cpiUnderTwo = adjustCPI(usaCPI, 1.02)
# what if CPI has overestimated inflation?
cpiOverHalf = adjustCPI(usaCPI, 0.995)
cpiOverOne = adjustCPI(usaCPI, 0.99)
cpiOverTwo = adjustCPI(usaCPI, 0.98)
# recalculate GDP basedd on the CPI values
usaGDPpercapita2014UnderHalf = usaGDPpercapita / (cpiUnderHalf / cpiUnderHalf[-1])
usaGDPpercapita2014UnderOne = usaGDPpercapita / (cpiUnderOne / cpiUnderOne[-1])
usaGDPpercapita2014UnderTwo = usaGDPpercapita / (cpiUnderTwo / cpiUnderTwo[-1])
usaGDPpercapita2014OverHalf = usaGDPpercapita / (cpiOverHalf / cpiOverHalf[-1])
usaGDPpercapita2014OverOne = usaGDPpercapita / (cpiOverOne / cpiOverOne[-1])
usaGDPpercapita2014OverTwo = usaGDPpercapita / (cpiOverTwo / cpiOverTwo[-1])
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderHalf, '--k', label='CPI each year adjusted +0.5%')
plt.plot(years, usaGDPpercapita2014OverHalf, '-.k', label='CPI each year adjusted -0.5%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_half.png')
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderOne, '--k', label='CPI each year adjusted +1.0%')
plt.plot(years, usaGDPpercapita2014OverOne, '-.k', label='CPI each year adjusted -1.0%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_one.png')
plt.figure()
plt.plot(years, usaGDPpercapita2014, '-k', label='Adjusted to 2014 CPI')
plt.plot(years, usaGDPpercapita2014UnderTwo, '--k', label='CPI each year adjusted +2.0%')
plt.plot(years, usaGDPpercapita2014OverTwo, '-.k', label='CPI each year adjusted -2.0%')
plt.xlabel('Year')
plt.ylabel('GDP per capita adjusted for inflation (USD)')
plt.legend(loc=4)
plt.grid(True)
plt.show()
plt.savefig('images/usaGDPandGNI_perCapita_2014_two.png')
| 2.078125
| 2
|
glue/external/wcsaxes/settings.py
|
yuvallanger/glue
| 1
|
12785210
|
<gh_stars>1-10
# Licensed under a 3-clause BSD style license - see LICENSE.rst
COORDINATE_RANGE_SAMPLES = 50
FRAME_BOUNDARY_SAMPLES = 1000
GRID_SAMPLES = 1000
| 0.863281
| 1
|
basqbot/cogs/snipe.py
|
fckvrbd/basqbot
| 0
|
12785211
|
import discord
from discord.ext import commands
class Snipe(commands.Cog):
"""Gets the last message sent."""
def __init__(self, bot):
self.bot = bot
self.cache = {}
@commands.Cog.listener()
async def on_raw_message_delete(self, payload):
message = payload.cached_message
if message is None:
return
if payload.guild_id:
guild_id = payload.guild_id
self.add_cache(message, payload.channel_id, guild_id)
else:
self.add_cache(message, payload.channel_id, None)
def add_cache(self, message, channel, guild):
if guild is not None:
if guild not in self.cache:
self.cache[guild] = {}
self.cache[guild][channel] = {
"message": message.content,
"author": message.author,
"time": message.created_at}
else:
if channel not in self.cache:
self.cache[channel] = {}
self.cache[channel] = {
"message": message.content,
"author": message.author,
"time": message.created_at}
@commands.command(description="Gets last deleted message from guild / DM and sends it.")
async def snipe(self, ctx):
"""Gets last deleted message from guild / DM and sends it."""
if ctx.message.guild:
guild_cache = self.cache.get(ctx.guild.id, None)
channel_cache = guild_cache.get(ctx.channel.id, None)
else:
channel_cache = self.cache.get(ctx.channel.id, None)
if channel_cache is None:
await ctx.send("No snipe available!")
return
if not channel_cache["message"]:
embed = discord.Embed(
description="No message content, message might have been a file.",
timestamp=channel_cache["time"],
color=0xff0000)
else:
embed = discord.Embed(
description=channel_cache["message"],
timestamp=channel_cache["time"],
color=0xff0000)
author = channel_cache["author"]
embed.set_author(name=f"{author}", icon_url=author.avatar_url)
embed.set_footer(text=f"Sniped by {str(self.bot.user)}")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Snipe(bot))
| 2.6875
| 3
|
esmvalcore/preprocessor/_derive/chlora.py
|
markelg/ESMValCore
| 26
|
12785212
|
"""Derivation of variable `chlora`."""
from iris import Constraint
from ._baseclass import DerivedVariableBase
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable `chlora`."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [
{
'short_name': 'chldiatos'
},
{
'short_name': 'chlmiscos'
},
]
return required
@staticmethod
def calculate(cubes):
"""Compute surface chlorophyll concentration."""
chldiatos_cube = cubes.extract_cube(
Constraint(name='mass_concentration_of_diatoms_expressed_as' +
'_chlorophyll_in_sea_water'))
chlmiscos_cube = cubes.extract_cube(
Constraint(name='mass_concentration_of_miscellaneous' +
'_phytoplankton_expressed_as_chlorophyll' +
'_in_sea_water'))
chlora_cube = chldiatos_cube + chlmiscos_cube
return chlora_cube
| 3.3125
| 3
|
Lib/Category.py
|
StaymanHou/eBayCrawler
| 6
|
12785213
|
<filename>Lib/Category.py<gh_stars>1-10
import MySQLdb
import Mydb
class Category(object):
def __init__(self):
self.pk=None
self.fields={}
def __getitem__(self,field):
if field == 'PK':
return self.pk
else:
if field in self.fields:
return self.fields[field]
def __setitem__(self,field,value):
if field == 'PK':
self.pk = value
else:
self.fields[field] = value
def StaticUpdateAndGetNtP(datadict):
categorydict = {}
catonedict = {}
for value in datadict.values():
for k, v in value['CATEGORY'].items():
catonedict[k] = 0
if k not in categorydict: categorydict[k] = v
else: categorydict[k] = dict(categorydict[k].items()+v.items())
fmtstr = ','.join(['%s'] * len(catonedict))
cur = Mydb.MydbExec(("SELECT TITLE, PK FROM goodscategorylevelone WHERE TITLE IN (%s)"%fmtstr, tuple(catonedict.keys())))
rows = cur.fetchall()
for row in rows:
if row['TITLE'] in catonedict: catonedict[row['TITLE']] = row['PK']
catoneremainlist = [key for key in catonedict.keys() if catonedict[key]==0]
if len(catoneremainlist)>0:
fmtstr = ','.join(['(%s)'] * len(catoneremainlist))
cur = Mydb.MydbExec(("INSERT INTO goodscategorylevelone (TITLE) VALUES %s"%fmtstr, tuple(catoneremainlist)))
cur = Mydb.MydbExec(("SELECT TITLE, PK FROM goodscategorylevelone WHERE TITLE IN (%s)"%fmtstr, tuple(catoneremainlist)))
rows = cur.fetchall()
for row in rows:
if row['TITLE'] in catonedict: catonedict[row['TITLE']] = row['PK']
cattwolist = []
for key, value in categorydict.items():
cattwolist.extend([str(catonedict[key])+'@'+k for k in value.keys()])
cattwodict = dict((cattwo,0) for cattwo in set(cattwolist))
fmtstr = ','.join(['%s'] * len(cattwodict))
cur = Mydb.MydbExec(("SELECT CONCAT( `SUB_HEADING` , '@', `TITLE` ) AS CONCAT, PK FROM goodscategoryleveltwo WHERE CONCAT( `SUB_HEADING` , '@', `TITLE` ) IN (%s)"%fmtstr, tuple(cattwodict.keys())))
rows = cur.fetchall()
for row in rows:
if row['CONCAT'] in cattwodict: cattwodict[row['CONCAT']] = row['PK']
cattworemainlist = [key for key in cattwodict.keys() if cattwodict[key]==0]
if len(cattworemainlist)>0:
fmtstr = ','.join(['(%s,%s)'] * len(cattworemainlist))
fmtparlst = []
for cattwo in cattworemainlist: fmtparlst.extend(cattwo.split('@'))
cur = Mydb.MydbExec(("INSERT INTO goodscategoryleveltwo (SUB_HEADING, TITLE) VALUES %s"%fmtstr, tuple(fmtparlst)))
fmtstr = ','.join(['%s'] * len(cattworemainlist))
cur = Mydb.MydbExec(("SELECT CONCAT( `SUB_HEADING` , '@', `TITLE` ) AS CONCAT, PK FROM goodscategoryleveltwo WHERE CONCAT( `SUB_HEADING` , '@', `TITLE` ) IN (%s)"%fmtstr, tuple(cattworemainlist)))
rows = cur.fetchall()
for row in rows:
if row['CONCAT'] in cattwodict: cattwodict[row['CONCAT']] = row['PK']
for key, value in categorydict.items():
for k in value.keys():
categorydict[key][k] = cattwodict[str(catonedict[key])+'@'+k]
return categorydict
UpdateAndGetNtP = staticmethod(StaticUpdateAndGetNtP)
| 2.609375
| 3
|
tofe_eeprom_crc.py
|
timvideos/HDMI2USB-TOFE-eeprom-tools
| 0
|
12785214
|
"""
# Name Identifier-name, Poly Reverse Init-value XOR-out Check
[ 'crc-8', 'Crc8', 0x107, NON_REVERSE, 0x00, 0x00, 0xF4, ],
"""
from io import StringIO
from crcmod import Crc
c8 = 0x107
code = StringIO()
Crc(c8, rev=False).generateCode('crc8',code)
out = open('opsis_eeprom_crc.c', 'w')
out.write(code.getvalue().replace('UINT8', '__u8'))
out.close()
| 2.140625
| 2
|
autoencoders/mnist_old/dataloaders.py
|
dyth/generative_models
| 1
|
12785215
|
<filename>autoencoders/mnist_old/dataloaders.py<gh_stars>1-10
#!/usr/bin/env python
"""
download mnist
"""
import torch.utils.data
from torchvision import datasets, transforms
def get_mnist(path, use_cuda, batch_size, test_batch_size):
'download into folder data if folder does not exist, then create dataloader'
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=True, download=True, transform=t),
batch_size=batch_size, shuffle=True, **kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=False, download=True, transform=t),
batch_size=test_batch_size, shuffle=True, **kwargs
)
return train_loader, test_loader
def get_2d_mnist(path, use_cuda, batch_size, test_batch_size):
'download into folder data if folder does not exist, then create dataloader'
t = transforms.Compose([
transforms.Resize((28, 28)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=True, download=True, transform=t),
batch_size=batch_size, shuffle=True, **kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(path, train=False, download=True, transform=t),
batch_size=test_batch_size, shuffle=True, **kwargs
)
return train_loader, test_loader
def get_cifar10(path, use_cuda, batch_size, test_batch_size):
'download into folder data if folder does not exist, then create dataloader'
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
t = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(path, train=True, download=True, transform=t),
batch_size=batch_size, shuffle=True, **kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(path, train=False, download=True, transform=t),
batch_size=test_batch_size, shuffle=True, **kwargs
)
return train_loader, test_loader
if __name__ == '__main__':
use_cuda = torch.cuda.is_available()
path = '../data'
get_mnist(path, use_cuda, 64, 1000)
get_cifar10(path, use_cuda, 64, 1000)
| 2.640625
| 3
|
csdl/utils/slice_to_list.py
|
LSDOlab/csdl
| 0
|
12785216
|
from typing import List, Union
import numpy as np
def slice_to_list(
start: Union[int, None],
stop: Union[int, None],
step: Union[int, None],
size: int = None,
) -> List[int]:
if start is None and stop is None:
if size is None:
raise ValueError("size required when start and stop are None")
else:
stop = size
elif stop is not None:
if stop < 0:
if size is None:
raise ValueError(
"size required when using negative stop index")
stop = size + stop
if stop < 0:
raise ValueError("negative stop index out of range")
l = list(
range(
start if start is not None else 0,
stop if stop is not None else size,
step if step is not None else 1,
))
if np.min(l) < 0:
raise ValueError("negative start index not allowed")
return l
| 3.5625
| 4
|
all_cnn_96/t7_to_hdf5.py
|
ganow/keras-information-dropout
| 16
|
12785217
|
import torchfile
import h5py
dataset_types = ('train', 'valid', 'test')
dataset_path = 'data/cluttered_{}.t7'
outpath = 'data/cluttered_mnist.h5'
with h5py.File(outpath, 'w') as hf:
for dataset_type in dataset_types:
inpath = dataset_path.format(dataset_type)
print('... load {}'.format(inpath))
o = torchfile.load(inpath)
print('... save {}, shape: {}'.format('X_{}'.format(dataset_type), o[b'data'].shape))
hf.create_dataset('X_{}'.format(dataset_type), data=o[b'data'])
print('... save {}, shape: {}'.format('Y_{}'.format(dataset_type), o[b'labels'].shape))
hf.create_dataset('Y_{}'.format(dataset_type), data=o[b'labels'])
| 2.609375
| 3
|
zdpapi_modbus/master.py
|
zhangdapeng520/zdpapi_modbus
| 1
|
12785218
|
<reponame>zhangdapeng520/zdpapi_modbus
"""
master角色
"""
from typing import Tuple
from .libs.modbus_tk import modbus_tcp
from .libs.modbus_tk import defines as cst
from .zstruct import trans_int_to_float
import time
class Master:
def __init__(self,
host: str = "127.0.0.1",
port: int = 502,
timeout_in_sec: float = 5.0) -> None:
self.master = modbus_tcp.TcpMaster(
host=host, port=port, timeout_in_sec=timeout_in_sec)
def read_float(self, slave_id: int, data_length: int, keep_num: int = 2):
"""
批量读取float类型的数据
"""
data = []
index = 0
while True:
# 每次取出100个数
length = 100
values = self.master.execute(
slave_id, cst.READ_HOLDING_REGISTERS, index, length)
data.extend(values)
# 最后一次取
data_length -= 100
if data_length <= 100:
values = self.master.execute(
slave_id, cst.READ_HOLDING_REGISTERS, index, data_length)
data.extend(values)
break
index += 100
# 解析为真实的数组
result = trans_int_to_float(data, keep_num=keep_num)
return result
def read(self, slave_id, func_code, address, length):
"""
从modbus读取数据
"""
# 超过124个了
if length > 124:
data = []
while length > 124:
temp = self.master.execute(slave_id, func_code, address, 124)
data.extend(temp)
address += 124
length -= 124
else: # 保证读取完毕
temp = self.master.execute(slave_id, func_code, address, length)
data.extend(temp)
return data
# 不超过则正常读取
return self.master.execute(slave_id, func_code, address, length)
def to_float(self, data: Tuple[int], keep_num: int = 2):
"""
将整数类型的列表转换为浮点数类型的列表
"""
result = trans_int_to_float(data, keep_num=keep_num)
return result
| 2.53125
| 3
|
ann_benchmarks/algorithms/lshf.py
|
PhilSk/ann-jaccard
| 0
|
12785219
|
from __future__ import absolute_import
import sklearn.neighbors
import sklearn.preprocessing
from ann_benchmarks.algorithms.base import BaseANN
from datasketch import MinHash
class LSHF(BaseANN):
def __init__(self, metric, n_estimators=10, n_candidates=50):
self.name = 'LSHF(n_est=%d, n_cand=%d)' % (n_estimators, n_candidates)
self._metric = metric
self._n_estimators = n_estimators
self._n_candidates = n_candidates
def fit(self, X):
self.index = numpy.empty([0, 128])
for i, x in enumerate(X):
m = MinHash(num_perm=128)
for e in x:
m.update(str(e).encode('utf-8'))
self.index = numpy.vstack((self.index, m.digest()))
self._index_minhash.append(m)
self._lshf = sklearn.neighbors.LSHForest(
n_estimators=self._n_estimators, n_candidates=self._n_candidates)
if self._metric == 'angular':
X = sklearn.preprocessing.normalize(X, axis=1, norm='l2')
self._lshf.fit(self.index)
def query(self, v, n):
if self._metric == 'angular':
v = sklearn.preprocessing.normalize([v], axis=1, norm='l2')[0]
m = MinHash(num_perm=128)
for e in v:
m.update(str(e).encode('utf-8'))
return self._lshf.kneighbors(
[m.digest()], return_distance=False, n_neighbors=n)[0]
| 2.28125
| 2
|
pattern-example-google.py
|
kemalcanbora/python-examples
| 1
|
12785220
|
# pattern library example querying Google using the context and terms option to get weights and comparisons
# author: <NAME>
# Date Created: 2015 05 18
# to install pattern, it is simple via pip: pip install pattern
import sys # need this to pass arguments at the command line
from termcolor import colored # awesome color library for printing colored text in the terminal
import argparse
import random
# terminal arguments parser globals - do not change
parser = argparse.ArgumentParser()
parser.add_argument('-s', action='store', dest='simple_value',
help='Search term')
parser.add_argument('-c', action='store', dest='context',
help='Set the context term to search on in Google')
parser.add_argument('-t', action='store_true', default=False,
dest='boolean_switch',
help='Set a switch to true')
parser.add_argument('-f', action='store_false', default=False,
dest='boolean_switch',
help='Set a switch to false')
parser.add_argument('-a', action='append', dest='collection',
default=[],
help='Add repeated values to a list',
)
parser.add_argument('-A', action='append_const', dest='const_collection',
const='value-1-to-append',
default=[],
help='Add different values to list')
parser.add_argument('-B', action='append_const', dest='const_collection',
const='value-2-to-append',
help='Add different values to list')
parser.add_argument('--version', action='version', version='%(prog)s 1.1')
results = parser.parse_args()
# check to see if the context term is set at the command line, otherwise set it to dangerous as default
if results.context != None:
contexter = results.context
else: contexter = 'dangerous'
# global dictionary list of terms - do not change
diction = []
subset = []
lengthmin = 6
numterms = 10
fname = 'assets/dictionary-list.html'
with open(fname) as f:
diction = f.readlines()
for term in diction:
if len(term) > lengthmin:
subset.append(term.strip('\n'))
# function to get a random term from the minlength dictionary in subset list
def rando(listofterms,num):
i = 0
while i < num:
randomed = random.choice(listofterms)
#print randomed
searchlist.append(randomed)
i = i + 1
return
searchlist = [] # the list of terms that will be generated in the rando function
# setup the default search terms
rando(subset,numterms) # get total list of terms based on numterms set in the globals section above
from pattern.web import sort
results = sort(terms=searchlist,context=contexter,prefix=True)
for weight, term in results:
print "%.2f" % (weight * 100) + '%', term
exit()
| 3.1875
| 3
|
server/models.py
|
sjkdmpy/noBlindEyez
| 0
|
12785221
|
<gh_stars>0
from tortoise.models import Model
from tortoise import fields
from tortoise.validators import MinLengthValidator
from tortoise.contrib.pydantic import pydantic_model_creator, pydantic_queryset_creator
from enum import Enum
from server.validators.custom import (
validate_longitude,
validate_latitude
)
class IncidentPictures(Model):
id = fields.BigIntField(pk=True)
file = fields.TextField()
incident_id = fields.ForeignKeyField("models.Incident", related_name="incident", on_delete="CASCADE")
class Meta:
table = "IncidentPictures"
def __str__(self):
return f"Incident_{self.incident_id}_Pictures"
class Incident(Model):
id = fields.BigIntField(pk=True)
title = fields.CharField(max_length=250, validators=[MinLengthValidator(10)])
description = fields.TextField(validators=[MinLengthValidator(20)])
created_at = fields.DatetimeField(auto_now_add=True)
location = fields.ForeignKeyField("models.Location", related_name="location", on_delete="CASCADE")
user = fields.ForeignKeyField("models.User", related_name="user", on_delete="CASCADE")
class Meta:
table = "Incident"
def __str__(self):
return self.title
class Location(Model):
id = fields.BigIntField(pk=True)
address = fields.TextField(validators=[MinLengthValidator(10)])
lat = fields.FloatField(validators=[validate_latitude])
lng = fields.FloatField(validators=[validate_longitude])
class Meta:
table = "Location"
def __str__(self):
return self.address
class UnityKind(str, Enum):
AERIAL = "AERIAL"
LAND = "LAND"
class Unity(Model):
id = fields.BigIntField(pk=True)
name = fields.CharField(max_length=100, unique=True, validators=[])
active = fields.BooleanField(default=False)
kind = fields.CharEnumField(UnityKind)
class Meta:
table = "Unity"
def __str__(self):
return self.name
class UserType(str, Enum):
NORMAL = "NORMAL"
PILOT = "PILOT"
class User(Model):
"""
User model to authenticate and have access to the application
"""
id = fields.BigIntField(pk=True)
username = fields.CharField(max_length=20, unique=True)
password = fields.TextField()
user_type = fields.CharEnumField(UserType)
created_at = fields.DatetimeField(auto_now_add=True)
unity_pilot = fields.ManyToManyField("models.Unity", related_name="unity_pilot", on_delete="CASCADE")
class Meta:
table = "User"
ordering = ["created_at"]
# class PydanticMeta:
# exclude = ("password",)
def __str__(self):
return self.username
IIncidentPictures = pydantic_model_creator(IncidentPictures, name="IncidentPictures")
IIncidentPicturesIn = pydantic_model_creator(IncidentPictures, name="IncidentPicturesIn", exclude_readonly=True)
IIncidentPicturesQuery = pydantic_queryset_creator(IncidentPictures)
IIncident = pydantic_model_creator(Incident, name="Incident")
IIncidentIn = pydantic_model_creator(Incident, name="IncidentIn", exclude_readonly=True)
IIncidentQuery = pydantic_queryset_creator(Incident)
ILocation = pydantic_model_creator(Location, name="Location")
ILocationIn = pydantic_model_creator(Location, name="LocationIn", exclude_readonly=True)
ILocationQuery = pydantic_queryset_creator(Location)
IUnity = pydantic_model_creator(Unity, name="Unity")
IUnityIn = pydantic_model_creator(Unity, name="UnityIn", exclude_readonly=True)
IUnityQuery = pydantic_queryset_creator(Unity)
IUser= pydantic_model_creator(User, name="User")
IUserIn = pydantic_model_creator(User, name="UserIn", exclude_readonly=True)
IUserQuery = pydantic_queryset_creator(User)
| 2.40625
| 2
|
BackEnd/attendoBE/api/Calls/subject.py
|
PedroDuarteSH/attendance-management
| 0
|
12785222
|
<filename>BackEnd/attendoBE/api/Calls/subject.py<gh_stars>0
from django.shortcuts import render
from django.http.response import JsonResponse
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.db import connection, DatabaseError
import json
from .error import error
# Create your views here.
class SubjectViews():
# API endpoint that allows professor to be viewed.
#List all Professor/Add a new professor
@api_view(http_method_names=["GET", "POST"])
def subject_manage(request):
with connection.cursor() as cursor:
#Get Subject list
if(request.method == "GET"):
cursor.execute("""SELECT * FROM cadeira""")
data = cursor.fetchall()
response = []
for user in data:
to_add = {"nome":user[1], "ano":user[2], "curso":user[3], "departamento":user[4], "universidade":user[5], "professor(id)":user[6]}
response.append(to_add)
return Response(response)
#Add a new Subject
if(request.method == "POST"):
data = json.loads(request.body)
cursor.execute("""BEGIN TRANSACTION""")
statement = ("""INSERT INTO cadeira (nome, ano, curso, departamento, univesidade, professor) VALUES(%s, %s, %s, %s, %s, %s)""")
values = (data["name"], data["year"], data["course"], data["department"], data["university"], data["teacher"])
try:
cursor.execute(statement, values)
cursor.execute("commit")
return JsonResponse({'Sucesso': 1})
#to do
except DatabaseError:
return JsonResponse(error("Erro ao inserir cadeira"))
@api_view(http_method_names=["GET", "PATCH"])
def subject_search(request, id):
#Get subject with this number
with connection.cursor() as cursor:
if(request.method == "GET"):
statement = ("""SELECT * FROM TEACHER WHERE id = %s""", json.loads("id"))
cursor.execute(statement)
data = cursor.fetchall()
return JsonResponse(data)
#Update a teacher with given number
if(request.method == "PATCH"):
try:
data = json.loads(request.body)
cursor.execute("""BEGIN TRANSACTION""")
cursor.execute("""SELECT * FROM professor WHERE id = %s FOR UPDATE""", (id,))
statement = """UPDATE professor SET mail = %s, password = %s, nome = %s WHERE id = %s"""
values = (data["mail"], data["password"], data["name"], id)
cursor.execute(statement, values)
print(values)
cursor.execute("COMMIT")
return JsonResponse({'sucesso': 1})
except DatabaseError:
return JsonResponse(error("Nao foi possivel atualizar as informacoes do professor"))
| 2.5
| 2
|
main_script.py
|
tahir1069/TrafficSignClassifier
| 0
|
12785223
|
# Load pickled data
import cv2
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
import MyAlexNet
import DataAugmentation as func
import glob
import csv
# TODO: Fill this in based on where you saved the training and testing data
training_file = "train.p"
validation_file = "valid.p"
testing_file = "test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train, X_train_size, X_train_bbox = train['features'], train['labels'], train['sizes'], train['coords']
X_valid, y_valid, X_valid_size, X_valid_bbox = valid['features'], valid['labels'], valid['sizes'], valid['coords']
X_test, y_test, X_test_size, X_test_bbox = test['features'], test['labels'], test['sizes'], test['coords']
# TODO: Number of training examples
n_train = len(X_train_size)
# TODO: Number of validation examples
print(len(X_valid_size))
n_validation = len(X_valid_size)
# TODO: Number of testing examples.
n_test = len(X_test_size)
# TODO: What's the shape of an traffic sign image?
print(X_train.shape)
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
# TODO: Number of training examples
n_train = len(X_train_size)
# TODO: Number of testing examples.
n_test = len(X_test_size)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
img_size = X_train.shape[1] # Size of input images
print(img_size)
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization goes here.
# Visualizations will be shown in the notebook.
num_of_samples = []
plt.figure(figsize=(12, 16.5))
for i in range(0, n_classes):
plt.subplot(11, 4, i + 1)
x_selected = X_train[y_train == i]
plt.imshow(x_selected[0, :, :, :]) # draw the first image of each class
plt.title(i)
plt.axis('off')
num_of_samples.append(len(x_selected))
plt.show()
# Plot number of images per class
plt.figure(figsize=(12, 4))
plt.bar(range(0, n_classes), num_of_samples)
plt.title("Distribution of the training dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
print("Min number of images in training data per class =", min(num_of_samples))
print("Max number of images in training data per class =", max(num_of_samples))
### Data exploration visualization goes here.
# Visualizations will be shown in the notebook.
num_of_samples = []
plt.figure(figsize=(12, 16.5))
for i in range(0, n_classes):
plt.subplot(11, 4, i + 1)
x_selected = X_valid[y_valid == i]
plt.imshow(x_selected[0, :, :, :]) # draw the first image of each class
plt.title(i)
plt.axis('off')
num_of_samples.append(len(x_selected))
plt.show()
# Plot number of images per class
plt.figure(figsize=(12, 4))
plt.bar(range(0, n_classes), num_of_samples)
plt.title("Distribution of the validation dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
print("Min number of images in vlidation data per class =", min(num_of_samples))
print("Max number of images in validation data per class =", max(num_of_samples))
### Data exploration visualization goes here.
# Visualizations will be shown in the notebook.
num_of_samples = []
plt.figure(figsize=(12, 16.5))
for i in range(0, n_classes):
plt.subplot(11, 4, i + 1)
x_selected = X_test[y_test == i]
plt.imshow(x_selected[0, :, :, :]) # draw the first image of each class
plt.title(i)
plt.axis('off')
num_of_samples.append(len(x_selected))
plt.show()
# Plot number of images per class
plt.figure(figsize=(12, 4))
plt.bar(range(0, n_classes), num_of_samples)
plt.title("Distribution of the test dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
print("Min number of images in test data per class =", min(num_of_samples))
print("Max number of images in test data per class =", max(num_of_samples))
### For Data Augmentation
# X_train_aug = []
# y_train_aug = []
# def create_data(n):
# for i in range(100):
# img=X_train[i]
# X_train_aug.append(img)
# y_train_aug.append(y_train[i])
# #Generate n new images out of each input image
# for j in range(n):
# X_train_aug.append(augment_img(img))
# y_train_aug.append(y_train[i])
# X_train_crop = np.ndarray(shape=[X_train.shape[0],IMAGE_SIZE,IMAGE_SIZE,
# 3],dtype = np.uint8)
# for i in range(n_train):
# X_train_crop[i] = crop_img(X_train[i])
# print(i)
print(X_train.shape)
print(X_train.dtype)
print(y_train.shape)
print(y_train.dtype)
print(X_valid.shape)
print(X_valid.dtype)
print(y_valid.shape)
print(y_train.dtype)
print(X_test.shape)
print(X_test.dtype)
print(y_test.shape)
print(y_test.dtype)
filename = "updated_test.p"
file = open(filename, 'rb')
X_test = pickle.load(file)
filename = "updated_train.p"
file = open(filename, 'rb')
X_train = pickle.load(file)
filename = "updated_valid.p"
file = open(filename, 'rb')
X_valid = pickle.load(file)
test = X_train[10000]
transformation = func.transform_img(test)
augmentation = func.augment_img(test)
func.show_imgs(test, transformation, augmentation)
print(X_train.shape)
print(X_train.dtype)
print(y_train.shape)
print(y_train.dtype)
print(X_valid.shape)
print(X_valid.dtype)
print(y_valid.shape)
print(y_train.dtype)
print(X_test.shape)
print(X_test.dtype)
print(y_test.shape)
print(y_test.dtype)
# Data Normalization
print(np.mean(X_train))
X_train = (X_train - np.mean(X_train)) / 255.0
print(np.mean(X_train))
print(np.mean(X_valid))
X_valid = (X_valid - np.mean(X_valid)) / 255.0
print(np.mean(X_valid))
print(np.mean(X_test))
X_test = (X_test - np.mean(X_test)) / 255.0
print(np.mean(X_test))
## Shuffle the training dataset
print(X_train.shape)
print(y_train.shape)
X_train, y_train = shuffle(X_train, y_train)
print(X_train.shape)
print(y_train.shape)
print('done')
EPOCHS = 90
BATCH_SIZE = 128
print('done')
tf.reset_default_graph()
x = tf.placeholder(tf.float32, (None, 51, 51, 3))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32) # probability to keep units
one_hot_y = tf.one_hot(y, 43)
print('done')
rate = 0.0005
save_file = './new_model.ckpt'
logits = MyAlexNet.AlexNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
Saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
print('done')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
print("Epoch: ", i)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.75})
validation_accuracy = evaluate(X_valid, y_valid)
print("EPOCH {} ...".format(i + 1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
Saver.save(sess,save_file)
print("Model saved")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
test_accuracy = evaluate(X_test, y_test)
print("Test Set Accuracy = {:.3f}".format(test_accuracy))
graph = tf.get_default_graph()
signs_class=[]
with open('signnames.csv', 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
signs_class.append((row['SignName']))
my_labels = [37,38,17,15,12,13,1,0,35,20,3,5]
test = func.load_images("./new_images1/")
test_images=X_test_data=np.uint8(np.zeros((len(test),51,51,3)))
test_images_labels=np.ndarray(shape=[len(test)],dtype=np.uint8)
test_images[0:12]=test[0:12]
test_images_labels[0:12]=my_labels[0:12]
plt.figure(figsize=(12, 8))
for i in range(len(test)):
plt.subplot(3, 4, i+1)
plt.imshow(test[i])
plt.title(signs_class[my_labels[i]])
plt.axis('off')
plt.show()
test_images=(test_images-np.mean(test_images))/255.0
### Visualize the softmax probabilities here.
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
new_test_accuracy = evaluate(test_images, test_images_labels)
print("New Test Set Accuracy = {:.3f}".format(new_test_accuracy))
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=5)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: test_images, keep_prob: 1.0})
my_top_k = sess.run(top_k, feed_dict={x: test_images, keep_prob: 1.0})
print(len(test))
plt.figure(figsize=(16, 21))
for i in range(12):
plt.subplot(12, 2, 2*i+1)
plt.imshow(test[i])
plt.title(i)
plt.axis('off')
plt.subplot(12, 2, 2*i+2)
plt.barh(np.arange(1, 6, 1), my_top_k.values[i, :])
labs=[signs_class[j] for j in my_top_k.indices[i]]
plt.yticks(np.arange(1, 6, 1), labs)
plt.show()
my_labels = [3, 11, 1, 12, 38, 34, 18, 25]
test = []
for i, img in enumerate(glob.glob('./new_images2/*x.png')):
image = func.crop_img(cv2.imread(img))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
test.append(image)
test_images=X_test_data=np.uint8(np.zeros((len(test),51,51,3)))
test_images_labels=np.ndarray(shape=[len(test)],dtype=np.uint8)
test_images[0:len(test)]=test[0:len(test)]
test_images_labels[0:len(test)]=my_labels[0:len(test)]
plt.figure(figsize=(12, 8))
for i in range(len(test)):
plt.subplot(3, 4, i+1)
plt.imshow(test[i])
plt.title(signs_class[my_labels[i]])
plt.axis('off')
plt.show()
test_images=(test_images-np.mean(test_images))/255.0
### Visualize the softmax probabilities here.
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
new_test_accuracy = evaluate(test_images, test_images_labels)
print("New Test Set Accuracy = {:.3f}".format(new_test_accuracy))
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=5)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver2 = tf.train.import_meta_graph('./Trained Model/final_model.ckpt.meta')
saver2.restore(sess, "./Trained Model/final_model.ckpt")
my_softmax_logits = sess.run(softmax_logits, feed_dict={x: test_images, keep_prob: 1.0})
my_top_k = sess.run(top_k, feed_dict={x: test_images, keep_prob: 1.0})
print(len(test))
plt.figure(figsize=(16, 21))
for i in range(len(test)):
plt.subplot(12, 2, 2*i+1)
plt.imshow(test[i])
plt.title(i)
plt.axis('off')
plt.subplot(12, 2, 2*i+2)
plt.barh(np.arange(1, 6, 1), my_top_k.values[i, :])
labs=[signs_class[j] for j in my_top_k.indices[i]]
plt.yticks(np.arange(1, 6, 1), labs)
plt.show()
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
#
#def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# # Here make sure to preprocess your image_input in a way your network expects
# # with size, normalization, ect if needed
# # image_input =
# # Note: x should be the same name as your network's tensorflow data placeholder variable
# # If you get an error tf_activation is not defined it may be having trouble
# #accessing the variable from inside a function
# activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
# featuremaps = activation.shape[3]
# plt.figure(plt_num, figsize=(15,15))
# for featuremap in range(featuremaps):
# plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
# plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
# if activation_min != -1 & activation_max != -1:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
# elif activation_max != -1:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
# elif activation_min !=-1:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
# else:
# plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
#
#
#
#
#test1=X_train[6500]
#plt.imshow(test1)
#test1= (test1- np.mean(test1)) / 255.0
#outputFeatureMap(test1)
| 2.734375
| 3
|
apps/snake.py
|
LeLuxNet/GridPy
| 0
|
12785224
|
import datetime
import random
from config import *
from lib import app, cords, button, led
from utils import time
class Direction:
def __init__(self, x, y, left=None, right=None):
self.x = x
self.y = y
self.left = left
self.right = right
SPEED = 1000
DIR_UP = Direction(0, -1)
DIR_DOWN = Direction(0, 1)
DIR_LEFT = Direction(-1, 0, DIR_DOWN, DIR_UP)
DIR_RIGHT = Direction(1, 0, DIR_UP, DIR_DOWN)
DIR_UP.left = DIR_LEFT
DIR_UP.right = DIR_RIGHT
DIR_DOWN.left = DIR_LEFT
DIR_DOWN.right = DIR_RIGHT
class App(app.BaseApp):
def __init__(self):
super().__init__("Snake")
middle = DISPLAY_ROWS // 2
self.snake = [
cords.Cords(3, middle),
cords.Cords(2, middle),
cords.Cords(1, middle)
]
self.food = None
self.gen_food()
self.direction = DIR_RIGHT
def run(self):
self.render()
while True:
begin = datetime.datetime.now()
final_press = None
try:
while time.to_ms(datetime.datetime.now() - begin) < SPEED:
press = button.any_button_once()
if press:
final_press = press
except KeyboardInterrupt:
break
if final_press is not None:
if final_press[0] == 0:
self.direction = self.direction.left
elif final_press[0] == 1:
self.direction = self.direction.right
old_head = self.snake[0]
head = cords.Cords(old_head.x + self.direction.x, old_head.y + self.direction.y)
if 0 > head.x or DISPLAY_COLUMNS <= head.x or 0 > head.y or DISPLAY_ROWS <= head.y:
break
self.snake.insert(0, head)
if head != self.food:
self.snake.pop()
else:
self.gen_food()
self.render()
time.sleep(3)
def gen_food(self):
food = cords.Cords(random.randint(0, DISPLAY_COLUMNS - 1), random.randint(0, DISPLAY_ROWS - 1))
if food in self.snake:
self.gen_food()
else:
self.food = food
def render(self):
led.fill_func(self.render_func)
def render_func(self, cord):
if self.food == cord:
return led.COLOR_RED
elif cord in self.snake:
return led.COLOR_GREEN
return led.COLOR_BLACK
| 2.984375
| 3
|
train_NaiveBayes_model.py
|
dlhuynh/flask-app
| 0
|
12785225
|
<reponame>dlhuynh/flask-app<filename>train_NaiveBayes_model.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 17 09:33:14 2021
@author: dhuynh
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
import pickle
df= pd.read_csv("spam.csv", encoding="latin-1")
df.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis=1, inplace=True)
# Features and Labels
df['label'] = df['class'].map({'ham': 0, 'spam': 1})
X = df['message']
y = df['label']
# Extract Feature With CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(X) # Fit the Data
pickle.dump(cv, open('tranform.pkl', 'wb'))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
nb = MultinomialNB()
nb.fit(X_train,y_train)
nb.score(X_test,y_test)
filename = 'nb_model.pkl'
pickle.dump(nb, open(filename, 'wb'))
# #Alternative Usage of Saved Model
# joblib.dump(clf, 'NB_spam_model.pkl')
# NB_spam_model = open('NB_spam_model.pkl','rb')
# clf = joblib.load(NB_spam_model)
| 2.6875
| 3
|
venv/lib/python2.7/site-packages/image/video_field.py
|
deandunbar/html2bwml
| 0
|
12785226
|
<reponame>deandunbar/html2bwml
from django.db import models
from django.db.models.fields.files import FieldFile
# A video field is exactly a file field with a different signature
class VideoFieldFile(FieldFile):
pass
class VideoField(models.FileField):
attr_class = VideoFieldFile
| 2.171875
| 2
|
swhlab/__init__.py
|
swharden/SWHLab
| 15
|
12785227
|
<reponame>swharden/SWHLab
"""
SWHLab is a python module intended to provide easy access to high level ABF
file opeartions to aid analysis of whole-cell patch-clamp electrophysiological
recordings. Although graphs can be interactive, the default mode is to output
PNGs and generate flat file HTML indexes to allow data browsing through any
browser on the network. Direct ABF access was provided by the NeoIO module.
* if a site-packages warning is thrown, force use of developmental version by:
sys.path.insert(0,'../')
"""
import logging
import sys
import os
if not os.path.abspath('../') in sys.path:
sys.path.append('../')
import swhlab
def tryLoadingFrom(tryPath,moduleName='swhlab'):
"""if the module is in this path, load it from the local folder."""
if not 'site-packages' in swhlab.__file__:
print("loaded custom swhlab module from",
os.path.dirname(swhlab.__file__))
return # no need to warn if it's already outside.
while len(tryPath)>5:
sp=tryPath+"/swhlab/" # imaginary swhlab module path
if os.path.isdir(sp) and os.path.exists(sp+"/__init__.py"):
if not os.path.dirname(tryPath) in sys.path:
sys.path.insert(0,os.path.dirname(tryPath))
print("#"*80)
print("# WARNING: using site-packages swhlab module")
print("#"*80)
tryPath=os.path.dirname(tryPath)
return
tryLoadingFrom(os.path.abspath('./'))
# from here, assume everything is fine.
logDateFormat='%m/%d/%Y %I:%M:%S %p'
logFormat='%(asctime)s\t%(levelname)s\t%(message)s'
loglevel_SILENT=logging.FATAL
loglevel_QUIET=logging.INFO
loglevel_DEBUG=logging.DEBUG
loglevel_ERROR=logging.ERROR
loglevel=loglevel_QUIET # change this at will
from swhlab.version import __version__
from swhlab.core import ABF
from swhlab.plotting.core import ABFplot as PLOT
from swhlab.analysis.ap import AP
from swhlab.indexing import imaging
| 1.875
| 2
|
train_levels.py
|
TheCherry/test_ai_game
| 0
|
12785228
|
A = 0 ## AIR - Movable field, no actions
P = 1 ## PLAYER - The Player
W = 2 ## WALL - A Wall, player cant pass
F = 3 ## FIRE - Kills the player if he havent a Fire protection
P = 4 ## FPROTECT - Fire protection
R = 5 ## ALLOW_DIRECTION_RIGHT - only moveable to right
L = 6 ## ALLOW_DIRECTION_LEFT - only moveable to left
D = 7 ## ALLOW_DIRECTION_DOWN - only moveable to down
U = 8 ## ALLOW_DIRECTION_UP - only moveable to up
K = 9 ## KEY - key, needed for doors
D = 10 ## DOOR - door, only passable with a key
G = 11 ## GOAL - the end
levels = [
###
[{ "best_steps": 2, "matrix": [
# 0 1 2 3 4 5 6 7 8 9 10 11
[W, W, W, W, W, W, W, W, W, W, W, W], # 0
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 1
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 2
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 3
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 4
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 5
[W, 0, 0, 0, 0, P, 0, G, 0, 0, 0, W], # 6
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 7
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 8
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 9
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 10
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 11
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 12
[W, W, W, W, W, W, W, W, W, W, W, W] # 13
]},{ "best_steps": 8, "matrix": [
# 0 1 2 3 4 5 6 7 8 9 10 11
[W, W, W, W, W, W, W, W, W, W, W, W], # 0
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 1
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 2
[W, 0, 0, 0, 0, 0, W, W, W, 0, 0, W], # 3
[W, 0, 0, 0, 0, W, G, 0, W, 0, 0, W], # 4
[W, 0, 0, 0, W, P, W, 0, W, 0, 0, W], # 5
[W, 0, 0, 0, W, 0, W, D, W, 0, 0, W], # 6
[W, 0, 0, 0, W, 0, 0, K, W, 0, 0, W], # 7
[W, 0, 0, 0, W, W, W, W, 0, 0, 0, W], # 8
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 9
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 10
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 11
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 12
[W, W, W, W, W, W, W, W, W, W, W, W] # 13
]},{ "best_steps": 12, "matrix": [
# 0 1 2 3 4 5 6 7 8 9 10 11
[W, W, W, W, W, W, W, W, W, W, W, W], # 0
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 1
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 2
[W, 0, 0, 0, W, W, W, W, W, 0, 0, W], # 3
[W, 0, 0, 0, W, P, W, G, W, 0, 0, W], # 4
[W, 0, 0, 0, W, 0, W, 0, W, 0, 0, W], # 5
[W, 0, 0, 0, W, 0, W, 0, W, 0, 0, W], # 6
[W, 0, 0, 0, W, 0, D, 0, W, 0, 0, W], # 7
[W, 0, 0, 0, W, 0, W, W, 0, 0, 0, W], # 8
[W, 0, 0, 0, W, K, W, 0, 0, 0, 0, W], # 9
[W, 0, 0, 0, 0, W, 0, 0, 0, 0, 0, W], # 10
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 11
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 12
[W, W, W, W, W, W, W, W, W, W, W, W] # 13
]},{ "best_steps": 10, "matrix": [
# 0 1 2 3 4 5 6 7 8 9 10 11
[W, W, W, W, W, W, W, W, W, W, W, W], # 0
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 1
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 2
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 3
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 4
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 5
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 6
[W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, W], # 7
[W, 0, 0, G, 0, 0, 0, 0, 0, 0, 0, W], # 8
[W, W, W, D, W, 0, 0, 0, 0, 0, 0, W], # 9
[W, 0, 0, 0, W, 0, 0, 0, 0, 0, 0, W], # 10
[W, 0, W, 0, W, 0, 0, 0, 0, 0, 0, W], # 11
[W, P, W, K, W, 0, 0, 0, 0, 0, 0, W], # 12
[W, W, W, W, W, W, W, W, W, W, W, W] # 13
]}
]
| 2.375
| 2
|
physionet-django/console/admin.py
|
partizaans/physionet-build
| 0
|
12785229
|
from django.contrib import admin
from physionet import models
# Register your models here.
admin.site.register(models.StaticPage)
admin.site.register(models.Section)
| 1.375
| 1
|
app/blog/forms.py
|
chaos-soft/velvet
| 0
|
12785230
|
from datetime import datetime
from common.forms import DocumentForm
from common.functions import create_thumbnail, delete_thumbnail
from django import forms
from django.conf import settings
from django.utils.dateformat import format
from .models import Article
class ArticleForm(DocumentForm):
UPLOAD_TO = 'blog/%Y/%m/%d/'
title = forms.CharField()
content = forms.CharField(required=False, widget=forms.Textarea)
is_comments = forms.BooleanField(required=False)
is_published = forms.BooleanField(required=False)
type = forms.TypedChoiceField(choices=Article.Type.choices, coerce=int)
cover = forms.CharField(required=False)
code = forms.CharField(required=False, widget=forms.Textarea)
get_youtube_image = forms.BooleanField(required=False)
status = forms.CharField(required=False)
class Meta:
fields = [
'title',
'content',
'is_comments',
'is_published',
'type',
'cover',
'code',
'get_youtube_image',
'status',
]
model = Article
def clean(self):
cd = super().clean()
cd['date_modified'] = format(datetime.today(), settings.DATETIME_FORMAT)
return cd
def delete_image(self, i):
delete_thumbnail(self.cleaned_data['images'][i])
super().delete_image(i)
def upload_image(self, image):
name = super().upload_image(image)
create_thumbnail(name)
return name
| 2.265625
| 2
|
src/pumpwood_deploy/deploy.py
|
Murabei-OpenSource-Codes/pumpwood-deploy
| 0
|
12785231
|
"""Pumpwood Deploy."""
import os
import stat
import shutil
from pumpwood_deploy.microservices.standard.standard import (
StandardMicroservices)
from pumpwood_deploy.kubernets.kubernets import Kubernets
class DeployPumpWood():
"""Class to perform PumpWood Deploy."""
create_kube_cmd = (
'SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"\n'
'kubectl apply -f $SCRIPTPATH/{file} --namespace={namespace}')
def __init__(self, bucket_key_path: str, model_user_password: str,
rabbitmq_secret: str, hash_salt: str, kong_db_disk_name: str,
kong_db_disk_size: str, cluster_name: str,
cluster_zone: str, cluster_project: str,
namespace="default",
gateway_health_url: str = "health-check/pumpwood-auth-app/"):
"""
__init__.
Args:
bucket_key_path (str): path to bucket service user path.
model_password (str): Password of models microservice.
beatbox_conf_path (str): Path to beatbox configuration file.
beatbox_version (str): Version of beatbox image.
hash_salt (str): Salt for hashs in deployment.
cluster_zone (str): Kubernets cluster zone.
cluster_project (str): Kubernets project name.
Kwargs:
namespace [str]: Which namespace to deploy the system.
"""
self.deploy = []
self.kube_client = Kubernets(
cluster_name=cluster_name, zone=cluster_zone,
project=cluster_project, namespace=namespace)
self.namespace = namespace
standard_microservices = StandardMicroservices(
hash_salt=hash_salt,
rabbit_username='rabbitmq',
rabbit_password=<PASSWORD>,
kong_db_disk_name=kong_db_disk_name,
kong_db_disk_size=kong_db_disk_size,
model_user_password=model_<PASSWORD>,
bucket_key_path=bucket_key_path)
self.microsservices_to_deploy = [
standard_microservices]
self.base_path = os.getcwd()
def add_microservice(self, microservice):
"""
add_microservice.
.
"""
self.microsservices_to_deploy.append(microservice)
def create_deploy_files(self):
"""create_deploy_files."""
sevice_cmds = []
deploy_cmds = []
counter = 0
service_counter = 0
###################################################################
# Limpa o deploy anterior e cria as pastas para receber os arquivos
# do novo deploy
if os.path.exists('outputs/deploy_output'):
shutil.rmtree('outputs/deploy_output')
os.makedirs('outputs/deploy_output')
os.makedirs('outputs/deploy_output/resources/')
if os.path.exists('outputs/services_output'):
shutil.rmtree('outputs/services_output')
os.makedirs('outputs/services_output')
os.makedirs('outputs/services_output/resources/')
###################################################################
#####################################################################
# Usa os arqivos de template e subistitui com as variáveis para criar
# os templates de deploy
print('###Creating microservices files:')
# m = self.microsservices_to_deploy[0]
for m in self.microsservices_to_deploy:
print('\nProcessing: ' + str(m))
temp_deployments = m.create_deployment_file()
for d in temp_deployments:
if d['type'] in ['secrets', 'deploy', 'volume']:
file_name_temp = 'resources/{counter}__{name}.yml'
file_name = file_name_temp.format(
counter=counter,
name=d['name'])
print('Creating secrets/deploy: ' + file_name)
with open('outputs/deploy_output/' +
file_name, 'w') as file:
file.write(d['content'])
file_name_sh_temp = (
'outputs/deploy_output/{counter}__{name}.sh')
file_name_sh = file_name_sh_temp.format(
counter=counter, name=d['name'])
with open(file_name_sh, 'w') as file:
content = self.create_kube_cmd.format(
file=file_name, namespace=self.namespace)
file.write(content)
os.chmod(file_name_sh, stat.S_IRWXU)
deploy_cmds.append({
'command': 'run', 'file': file_name_sh,
'sleep': d.get('sleep')})
counter = counter + 1
elif d['type'] == 'secrets_file':
command_formated = (
'SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"\n'
"kubectl delete secret --namespace={namespace} {name};"
"\n"
"kubectl create secret generic {name} "
"--from-file='{path}' "
"--namespace={namespace}").format(
name=d["name"], path=d["path"],
namespace=self.namespace)
file_name_temp = (
'outputs/deploy_output/{counter}__{name}.sh')
file_name = file_name_temp.format(
counter=counter, name=d['name'])
print('Creating secrets_file: ' + file_name)
with open(file_name, 'w') as file:
file.write(command_formated)
os.chmod(file_name, stat.S_IRWXU)
deploy_cmds.append({
'command': 'run', 'file': file_name,
'sleep': d.get('sleep')})
counter = counter + 1
elif d['type'] == 'configmap':
file_name_resource_temp = 'resources/{name}'
file_name_resource = file_name_resource_temp.format(
name=d['file_name'])
if 'content' in d.keys():
with open('outputs/deploy_output/' +
file_name_resource, 'w') as file:
file.write(d['content'])
elif 'file_path' in d.keys():
with open(d['file_path'], 'rb') as file:
file_data = file.read()
with open('outputs/deploy_output/' +
file_name_resource, 'wb') as file:
file.write(file_data)
command_formated = None
if d.get('keyname') is None:
command_text = (
'SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"\n'
"kubectl delete configmap {name};\n"
"kubectl create configmap {name} "
'--from-file="$SCRIPTPATH/{file_name}" '
'--namespace={namespace}')
command_formated = command_text.format(
name=d['name'], file_name=file_name_resource,
namespace=self.namespace)
else:
command_text = (
'SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"\n'
"kubectl delete configmap {name};\n"
"kubectl create configmap {name} "
'--from-file="{keyname}=$SCRIPTPATH/{file_name}" '
'--namespace={namespace}')
command_formated = command_text.format(
name=d['name'], file_name=file_name_resource,
keyname=d['keyname'], namespace=self.namespace)
file_name_temp = 'outputs/deploy_output/' + \
'{counter}__{name}.sh'
file_name = file_name_temp.format(
counter=counter,
name=d['name'])
print('Creating configmap: ' + file_name)
with open(file_name, 'w') as file:
file.write(command_formated)
deploy_cmds.append({'command': 'run', 'file': file_name,
'sleep': d.get('sleep')})
os.chmod(file_name, stat.S_IRWXU)
counter = counter + 1
elif d['type'] == 'services':
file_name_temp = 'resources/{service_counter}__{name}.yml'
file_name = file_name_temp.format(
service_counter=service_counter,
name=d['name'])
print('Creating services: ' + file_name)
with open('outputs/services_output/' +
file_name, 'w') as file:
file.write(d['content'])
file_name_sh_temp = \
'outputs/services_output/' +\
'{service_counter}__{name}.sh'
file_name_sh = file_name_sh_temp .format(
service_counter=service_counter,
name=d['name'])
with open(file_name_sh, 'w') as file:
content = self.create_kube_cmd.format(
file=file_name, namespace=self.namespace)
file.write(content)
os.chmod(file_name_sh, stat.S_IRWXU)
sevice_cmds.append({
'command': 'run', 'file': file_name_sh,
'sleep': d.get('sleep')})
service_counter = service_counter + 1
elif d['type'] == 'endpoint_services':
raise Exception('Not used anymore')
else:
raise Exception('Type not implemented: %s' % (d['type'], ))
#####################################################################
return {
'service_cmds': sevice_cmds,
'microservice_cmds': deploy_cmds}
def deploy_cluster(self):
"""Deploy cluster."""
deploy_cmds = self.create_deploy_files()
print('\n\n###Deploying Services:')
self.kube_client.run_deploy_commmands(
deploy_cmds['service_cmds'])
print('\n\n###Deploying Microservices:')
self.kube_client.run_deploy_commmands(
deploy_cmds['microservice_cmds'])
| 2.3125
| 2
|
src/view/gtkview/msgSupport.py
|
iivvoo-abandoned/most
| 0
|
12785232
|
"""
generic routines for classes / widgets that support printing of irc messages
"""
# $Id: msgSupport.py,v 1.11 2002/02/24 23:08:24 ivo Exp $
from gtk import *
from GDK import *
from libglade import *
red = GdkColor(-1, 0, 0)
blue = GdkColor(0, 0, -1)
class msgSupport:
def __init__(self):
pass
def insert(self, msg):
self.text.insert_defaults(msg + "\n")
if hasattr(self, "updated") and callable(self.updated):
self.updated()
def msg(self, nick, msg, isme = 0, dcc=0):
if dcc:
_nick = "=%s=" % nick
else:
_nick = "<%s>" % nick
if isme:
self.text.insert(None, blue, None, "%s " % _nick)
else:
self.text.insert(None, red, None, "%s " % _nick)
self.insert(msg)
def notice(self, nick, msg, isme=0):
if isme:
self.text.insert(None, blue, None, "-%s- " % nick)
else:
self.text.insert(None, red, None, "-%s- " % nick)
self.insert(msg)
def action(self, nick, msg, isme=0, dcc=0):
arrow = "->"
if dcc:
arrow = "=>"
if isme:
self.text.insert(None, blue, None, "%s %s " % (arrow, nick))
else:
self.text.insert(None, red, None, "%s %s " % (arrow, nick))
self.insert(msg)
def announce(self, msg):
self.insert("*** " + msg)
def sendmsg(self, nick, msg):
""" insert a message *to* someone """
self.text.insert(None, blue, None, "-> <%s> " % nick)
self.insert(msg)
def sendaction(self, nick, target, msg):
""" insert a message *to* someone """
self.text.insert(None, blue, None, "-> <%s> %s " % (target, nick))
self.insert(msg)
| 2.578125
| 3
|
src/losses.py
|
dxfg/2DVoxelmorph
| 2
|
12785233
|
# Third party inports
import tensorflow as tf
import numpy as np
# batch_sizexheightxwidthxdepthxchan
def diceLoss(y_true, y_pred):
top = 2*tf.reduce_sum(y_true * y_pred, [1, 2, 3])
bottom = tf.maximum(tf.reduce_sum(y_true+y_pred, [1, 2, 3]), 1e-5)
dice = tf.reduce_mean(top/bottom)
return -dice
def gradientLoss(penalty='l1'):
def loss(y_true, y_pred):
dy = tf.abs(y_pred[:, 1:, :, :, :] - y_pred[:, :-1, :, :, :])
dx = tf.abs(y_pred[:, :, 1:, :, :] - y_pred[:, :, :-1, :, :])
dz = tf.abs(y_pred[:, :, :, 1:, :] - y_pred[:, :, :, :-1, :])
if (penalty == 'l2'):
dy = dy * dy
dx = dx * dx
dz = dz * dz
d = tf.reduce_mean(dx)+tf.reduce_mean(dy)+tf.reduce_mean(dz)
return d/3.0
return loss
def gradientLoss2D():
def loss(y_true, y_pred):
dy = tf.abs(y_pred[:, 1:, :, :] - y_pred[:, :-1, :, :])
dx = tf.abs(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])
dy = dy * dy
dx = dx * dx
d = tf.reduce_mean(dx)+tf.reduce_mean(dy)
return d/2.0
return loss
def cc3D(win=[9, 9, 9], voxel_weights=None):
def loss(I, J):
I2 = I*I
J2 = J*J
IJ = I*J
filt = tf.ones([win[0], win[1], win[2], 1, 1])
I_sum = tf.nn.conv3d(I, filt, [1, 1, 1, 1, 1], "SAME")
J_sum = tf.nn.conv3d(J, filt, [1, 1, 1, 1, 1], "SAME")
I2_sum = tf.nn.conv3d(I2, filt, [1, 1, 1, 1, 1], "SAME")
J2_sum = tf.nn.conv3d(J2, filt, [1, 1, 1, 1, 1], "SAME")
IJ_sum = tf.nn.conv3d(IJ, filt, [1, 1, 1, 1, 1], "SAME")
win_size = win[0]*win[1]*win[2]
u_I = I_sum/win_size
u_J = J_sum/win_size
cross = IJ_sum - u_J*I_sum - u_I*J_sum + u_I*u_J*win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I*u_I*win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J*u_J*win_size
cc = cross*cross / (I_var*J_var+1e-5)
# if(voxel_weights is not None):
# cc = cc * voxel_weights
return -1.0*tf.reduce_mean(cc)
return loss
def cc2D(win=[9, 9]):
def loss(I, J):
I2 = tf.multiply(I, I)
J2 = tf.multiply(J, J)
IJ = tf.multiply(I, J)
sum_filter = tf.ones([win[0], win[1], 1, 1])
I_sum = tf.nn.conv2d(I, sum_filter, [1, 1, 1, 1], "SAME")
J_sum = tf.nn.conv2d(J, sum_filter, [1, 1, 1, 1], "SAME")
I2_sum = tf.nn.conv2d(I2, sum_filter, [1, 1, 1, 1], "SAME")
J2_sum = tf.nn.conv2d(J2, sum_filter, [1, 1, 1, 1], "SAME")
IJ_sum = tf.nn.conv2d(IJ, sum_filter, [1, 1, 1, 1], "SAME")
win_size = win[0]*win[1]
u_I = I_sum/win_size
u_J = J_sum/win_size
cross = IJ_sum - u_J*I_sum - u_I*J_sum + u_I*u_J*win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I*u_I*win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J*u_J*win_size
cc = cross*cross / (I_var*J_var + np.finfo(float).eps)
return -1.0*tf.reduce_mean(cc)
return loss
| 2
| 2
|
afqueue/messages/peer_messages.py
|
appfirst/distributed_queue_manager
| 1
|
12785234
|
from afqueue.common.encoding_utilities import cast_bytes
from afqueue.messages.base_message import BaseMessage #@UnresolvedImport
from afqueue.common.exception_formatter import ExceptionFormatter #@UnresolvedImport
from afqueue.common.client_queue_lock import ClientQueueLock #@UnresolvedImport
from afqueue.messages import message_types #@UnresolvedImport
from afqueue.common.client_exchange import ClientExchange #@UnresolvedImport
from afqueue.common.client_queue import ClientQueue #@UnresolvedImport
from afqueue.data_objects.exchange_wrapper import ExchangeWrapper #@UnresolvedImport
from afqueue.data_objects.data_queue_wrapper import DataQueueWrapper #@UnresolvedImport
import simplejson as json #@UnresolvedImport
import bson #@UnresolvedImport
def build_settings_dictionary(id_string, start_time, redis_connection_string, shared_memory_max_size,
ordered_ownership_stop_threshold, ordered_ownership_start_threshold):
"""
Builds the settings dictionary which peers use to pass settings information back and forth.
"""
# Build.
settings_dict = dict()
settings_dict["id"] = id_string
settings_dict["start_time"] = start_time
settings_dict["sm_connection"] = redis_connection_string
settings_dict["sm_max"] = shared_memory_max_size
settings_dict["oq_stop"] = ordered_ownership_stop_threshold
settings_dict["oq_start"] = ordered_ownership_start_threshold
# Return.
return settings_dict
class PeerForwardedCommandMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, command_message_as_dict, sender_id_string = None):
# Build base.
super(PeerForwardedCommandMessage, self).__init__(message_types.PEER_FORWARDED_COMMAND_MESSAGE)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.command_message_as_dict = command_message_as_dict
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, bson.dumps(self.command_message_as_dict))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerForwardedCommandMessage(None, bson.loads(raw_message[1]), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerOrderedQueuesExhaustedOwnersMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, ordered_queues_owners_exhausted_dictionary, sender_id_string = None):
# Build base.
super(PeerOrderedQueuesExhaustedOwnersMessage, self).__init__(message_types.PEER_ORDERED_QUEUES_OWNERS_EXHAUSTED)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.ordered_queues_owners_exhausted_dictionary = ordered_queues_owners_exhausted_dictionary
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, bson.dumps(self.ordered_queues_owners_exhausted_dictionary))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
raw_message[1] = cast_bytes(raw_message[1])
return PeerOrderedQueuesExhaustedOwnersMessage(None, bson.loads(raw_message[1]), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerClientDeclareExchangesRequestMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, client_exchange_list, sender_id_string = None):
# Build base.
super(PeerClientDeclareExchangesRequestMessage, self).__init__(message_types.PEER_CLIENT_DECLARE_EXCHANGES_REQUEST)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.client_exchange_list = client_exchange_list
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, json.dumps(ClientExchange.create_network_tuple_list(self.client_exchange_list)))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerClientDeclareExchangesRequestMessage(None, ClientExchange.create_client_exchange_list(json.loads(raw_message[1])), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerClientDeclareQueuesRequestMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, client_queue_list, sender_id_string = None):
# Build base.
super(PeerClientDeclareQueuesRequestMessage, self).__init__(message_types.PEER_CLIENT_DECLARE_QUEUES_REQUEST)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.client_queue_list = client_queue_list
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, json.dumps(ClientQueue.create_network_tuple_list(self.client_queue_list)))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerClientDeclareQueuesRequestMessage(None, ClientQueue.create_client_queue_list(json.loads(raw_message[1])), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerClientDeleteQueuesRequestMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, queue_name_list, sender_id_string = None):
# Build base.
super(PeerClientDeleteQueuesRequestMessage, self).__init__(message_types.PEER_CLIENT_DELETE_QUEUES_REQUEST)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.queue_name_list = queue_name_list
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, json.dumps(self.queue_name_list))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerClientDeleteQueuesRequestMessage(None, json.loads(raw_message[1]), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerClientLockQueuesRequestMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, client_queue_lock_list, owner_id_string, sender_id_string = None):
# Build base.
super(PeerClientLockQueuesRequestMessage, self).__init__(message_types.PEER_CLIENT_LOCK_QUEUES_REQUEST)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.client_queue_lock_list = client_queue_lock_list
self.owner_id_string = owner_id_string
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, json.dumps(ClientQueueLock.create_network_tuple_list(self.client_queue_lock_list)), self.owner_id_string)
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerClientLockQueuesRequestMessage(None, ClientQueueLock.create_client_queue_lock_list(json.loads(raw_message[1])), raw_message[2], sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerClientUnlockQueuesRequestMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, queue_name_list, sender_id_string = None):
# Build base.
super(PeerClientUnlockQueuesRequestMessage, self).__init__(message_types.PEER_CLIENT_UNLOCK_QUEUES_REQUEST)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.queue_name_list = queue_name_list
# Internal data.
self.sender_id_string = sender_id_string
self.owner_id_string = None
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, json.dumps(self.queue_name_list))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerClientUnlockQueuesRequestMessage(None, json.loads(raw_message[1]), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerHeartBeatFailureMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, disconnecting_flag, sender_id_string = None):
# Build base.
super(PeerHeartBeatFailureMessage, self).__init__(message_types.PEER_HEART_BEAT_FAILURE)
# Transmitted data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.disconnecting_flag = disconnecting_flag
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, str(self.disconnecting_flag))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
diconnecting_flag = True if raw_message[1] == "True" else False
return PeerHeartBeatFailureMessage(None, diconnecting_flag, sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerHeartBeatMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, sender_time_stamp, sender_queue_size_snapshot_dict, sender_id_string = None):
# Build base.
super(PeerHeartBeatMessage, self).__init__(message_types.PEER_HEART_BEAT)
# Transmitted data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.sender_time_stamp = sender_time_stamp
self.sender_queue_size_snapshot_dict = sender_queue_size_snapshot_dict
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, self.sender_time_stamp, bson.dumps(self.sender_queue_size_snapshot_dict))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
raw_message[2] = cast_bytes(raw_message[2])
return PeerHeartBeatMessage(None, raw_message[1], bson.loads(raw_message[2]), sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerMasterControlDataMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, pecking_order_list, queue_lock_owner_dict,
ordered_queue_owners_dict, push_rejection_queue_name_set, accepting_data_owner_id_list,
frozen_push_queue_list, frozen_pull_queue_list, sender_id_string = None):
# Build base.
super(PeerMasterControlDataMessage, self).__init__(message_types.PEER_MASTER_CONTROL_DATA)
# Transmitted data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.pecking_order_list = pecking_order_list
self.queue_lock_owner_dict = queue_lock_owner_dict
self.ordered_queue_owners_dict = ordered_queue_owners_dict
self.push_rejection_queue_name_set = push_rejection_queue_name_set
self.accepting_data_owner_id_list = accepting_data_owner_id_list
self.frozen_push_queue_list = frozen_push_queue_list
self.frozen_pull_queue_list = frozen_pull_queue_list
# Internal data.
self.sender_id_string = sender_id_string
def dump(self, include_destination_tag = True):
"""
Dumps the message into a format in which it can be recreated via the "load" method.
"""
try:
dump_dict = dict()
if include_destination_tag == True:
dump_dict["ddit"] = self.destination_dealer_id_tag
dump_dict["pol"] = self.pecking_order_list
dump_dict["qlod"] = self.queue_lock_owner_dict
dump_dict["oqod"] = self.ordered_queue_owners_dict
dump_dict["prqns"] = list(self.push_rejection_queue_name_set)
dump_dict["adol"] = self.accepting_data_owner_id_list
dump_dict["fpush"] = self.frozen_push_queue_list
dump_dict["fpull"] = self.frozen_pull_queue_list
return bson.dumps(dump_dict)
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def load(dumped_string):
"""
Returns an instance object of this class built from data which was created in the "dump" method.
"""
dumped_string = cast_bytes(dumped_string)
dump_dict = bson.loads(dumped_string)
destination_dealer_id_tag = dump_dict.get("ddit", None)
pecking_order_list = dump_dict["pol"]
queue_lock_owner_dict = dump_dict["qlod"]
ordered_queue_owners_dict = dump_dict["oqod"]
push_rejection_queue_name_set = set(dump_dict["prqns"])
accepting_data_owner_id_list = dump_dict["adol"]
frozen_push_queue_list = dump_dict["fpush"]
frozen_pull_queue_list = dump_dict["fpull"]
return PeerMasterControlDataMessage(destination_dealer_id_tag, pecking_order_list, queue_lock_owner_dict, ordered_queue_owners_dict,
push_rejection_queue_name_set, accepting_data_owner_id_list,
frozen_push_queue_list, frozen_pull_queue_list)
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, self.dump(False))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
message = PeerMasterControlDataMessage.load(raw_message[1])
message.sender_id_string = sender_id_string
return message
except:
raise ExceptionFormatter.get_full_exception()
class PeerMasterSetupDataMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, exchange_wrapper_list, queue_wrapper_list, sender_id_string = None):
# Build base.
super(PeerMasterSetupDataMessage, self).__init__(message_types.PEER_MASTER_SETUP_DATA)
# Transmitted data.
self.destination_dealer_id_tag = destination_dealer_id_tag
self.exchange_wrapper_list = exchange_wrapper_list
self.queue_wrapper_list = queue_wrapper_list
# Internal data.
self.sender_id_string = sender_id_string
def dump(self):
"""
Dumps the message into a format in which it can be recreated via the "load" method.
"""
try:
dump_dict = dict()
dump_dict["ddit"] = self.destination_dealer_id_tag
dump_dict["ewl"] = [ew.dump() for ew in self.exchange_wrapper_list]
dump_dict["qwl"] = [qw.dump() for qw in self.queue_wrapper_list]
return bson.dumps(dump_dict)
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def load(dumped_string):
"""
Returns an instance object of this class built from data which was created in the "dump" method.
"""
dumped_string = cast_bytes(dumped_string)
dump_dict = bson.loads(dumped_string)
destination_dealer_id_tag = dump_dict["ddit"]
exchange_wrapper_list = [ExchangeWrapper.load(dew) for dew in dump_dict["ewl"]]
queue_wrapper_list = [DataQueueWrapper.load(dqw) for dqw in dump_dict["qwl"]]
return PeerMasterSetupDataMessage(destination_dealer_id_tag, exchange_wrapper_list, queue_wrapper_list)
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag, self.dump())
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
message = PeerMasterSetupDataMessage.load(raw_message[1])
message.sender_id_string = sender_id_string
return message
except:
raise ExceptionFormatter.get_full_exception()
class PeerOnlineHandshakeReplyMessage(BaseMessage):
def __init__(self, reply_id_tag, settings_dict, sender_dealer_id_tag,
sender_master_flag, master_setup_data_message, master_control_data_message, master_synchronization_failure_flag,
ping_back_success_flag):
# Build base.
super(PeerOnlineHandshakeReplyMessage, self).__init__(message_types.PEER_ONLINE_HANDSHAKE_REPLY)
# Transmitted data.
self.reply_id_tag = reply_id_tag
self.settings_dict = settings_dict
self.sender_dealer_id_tag = sender_dealer_id_tag
self.sender_master_flag = sender_master_flag
self.master_setup_data_message = master_setup_data_message
self.master_control_data_message = master_control_data_message
self.master_synchronization_failure_flag = master_synchronization_failure_flag
self.ping_back_success_flag = ping_back_success_flag
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
if self.master_setup_data_message != None:
master_setup_data_message = self.master_setup_data_message.dump()
else:
master_setup_data_message = ""
if self.master_control_data_message != None:
master_control_data_message = self.master_control_data_message.dump()
else:
master_control_data_message = ""
BaseMessage._send_with_destination_and_delimiter(self, socket, self.reply_id_tag,
bson.dumps(self.settings_dict),
self.sender_dealer_id_tag,
str(self.sender_master_flag),
master_setup_data_message, master_control_data_message,
str(self.master_synchronization_failure_flag),
str(self.ping_back_success_flag))
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message):
"""
Returns a new message of this type from the raw message data.
"""
try:
if raw_message[4] != "":
master_setup_data_message = PeerMasterSetupDataMessage.load(raw_message[4])
else:
master_setup_data_message = None
if raw_message[5] != "":
master_control_data_message = PeerMasterControlDataMessage.load(raw_message[5])
else:
master_control_data_message = None
return PeerOnlineHandshakeReplyMessage(None, bson.loads(raw_message[1]), raw_message[2],
BaseMessage.bool_from_string(raw_message[3]),
master_setup_data_message, master_control_data_message,
BaseMessage.bool_from_string(raw_message[6]),
BaseMessage.bool_from_string(raw_message[7]))
except:
raise ExceptionFormatter.get_full_exception()
class PeerOnlineHandshakeRequestMessage(BaseMessage):
def __init__(self, settings_dict, sender_dealer_id_tag, receiver_dealer_id_tag = None):
# Build base.
super(PeerOnlineHandshakeRequestMessage, self).__init__(message_types.PEER_ONLINE_HANDSHAKE_REQUEST)
# Transmitted data.
self.settings_dict = settings_dict
self.sender_dealer_id_tag = sender_dealer_id_tag
# Internal data.
self.receiver_dealer_id_tag = receiver_dealer_id_tag
self.sending_thread_name = None
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send(self, socket, bson.dumps(self.settings_dict), self.sender_dealer_id_tag)
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, qm_dealer_id_tag):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerOnlineHandshakeRequestMessage(bson.loads(raw_message[1]), raw_message[2], qm_dealer_id_tag)
except:
raise ExceptionFormatter.get_full_exception()
class PeerOfflineMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, sender_id_string = None):
# Build base.
super(PeerOfflineMessage, self).__init__(message_types.PEER_OFFLINE)
# Transmitted data.
self.destination_dealer_id_tag = destination_dealer_id_tag
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag)
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerOfflineMessage(None, sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
class PeerRequestMasterDataMessage(BaseMessage):
def __init__(self, destination_dealer_id_tag, sender_id_string = None):
# Build base.
super(PeerRequestMasterDataMessage, self).__init__(message_types.PEER_REQUEST_MASTER_DATA)
# Store data.
self.destination_dealer_id_tag = destination_dealer_id_tag
# Internal data.
self.sender_id_string = sender_id_string
def send(self, socket):
"""
Sends the message over the socket.
"""
try:
BaseMessage._send_with_destination(self, socket, self.destination_dealer_id_tag)
except:
raise ExceptionFormatter.get_full_exception()
@staticmethod
def create_from_received(raw_message, sender_id_string):
"""
Returns a new message of this type from the raw message data.
"""
try:
return PeerRequestMasterDataMessage(None, sender_id_string)
except:
raise ExceptionFormatter.get_full_exception()
| 1.859375
| 2
|
googledocs/fix-googledocs-html.py
|
JoshuaFox/beautiful-jekyll
| 0
|
12785235
|
<filename>googledocs/fix-googledocs-html.py
# coding utf-8
import os
import re
import urllib.parse
from pathlib import Path
from bs4 import BeautifulSoup
def out_folder():
return os.path.abspath("./yiddish")
def folder_in():
return os.path.abspath("./_yiddish_from_google_docs")
def clean_missing_font_link(filename):
if "הײַנט בין" in filename:
with open(filename, 'r') as f:
data = f.read()
inserted = data.replace("@import url(https://themes.googleusercontent.com/fonts/css?kit=wAPX1HepqA24RkYW1AuHYA);", "")
if inserted != data: # Do this after filehandle for read is closed
with open(filename, 'wt') as fout:
fout.write(inserted)
print("remove_missing_font_link wrote", filename)
else:
print("remove_missing_font_link found nothing in", filename)
def clean_half_spaces (filename):
if ("הירהורים" in filename):
with open(filename, 'r') as f:
data = f.read()
inserted = re.sub(r"(?<![A-Za-z0-9])Q(?![A-Za-z0-9])", " ", data)
if inserted != data: # Do this after filehandle for read is closed
with open(filename, 'wt') as fout:
fout.write(inserted)
print("clean_half_spaces wrote", filename)
else:
print("clean_half_spaces found nothing in", filename)
def add_analytics(filename):
ua = 'UA-24142341-1'
analytics = \
f"""
<!-- Google Analytics -->
<script>
(function(i,s,o,g,r,a,m){{i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){{
(i[r].q=i[r].q||[]).push(arguments)}},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
}})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', '{ua}', 'auto');
ga('send', 'pageview');
</script>
<!-- End Google Analytics -->
"""
inserted = None
if filename.endswith(".html"):
with open(filename, 'r') as f:
data = f.read()
if ua in data:
print('Did not add Google Analytics in', filename)
else:
inserted = data.replace("</body>", analytics + "\n</body>")
print('Added Google Analytics in', filename)
if inserted: # Do this after filehandle for read is closed
with open(filename, 'wt') as fout:
fout.write(inserted)
print("add_analytics wrote", filename)
def add_rtl(filename):
rtl_style = "body{direction:rtl}</style>"
with open(filename, 'r') as f:
def replace_rtl_align_style():
data1 = f.read()
data2 = data1.replace(";text-align:left", "")
if data2 != data1:
print("Removed text-align:left in", filename)
else:
print("Did not remove text-align:left in", filename)
return data2
def add_rtl_div(data2):
if rtl_style not in data2:
data3 = data2.replace("</style>", rtl_style)
print("Inserted RTL in", filename)
else:
data3 = data2
print("Did not insert RTL in", filename)
return data3
with_rtl_style = replace_rtl_align_style()
with_rtl_div = add_rtl_div(with_rtl_style)
# Do this after filehandle for read is closed
with open(filename, 'wt') as fout:
fout.write(with_rtl_div)
print("add_rtl wrote", filename)
def generate_md(html_filepath, folder_out):
title = os.path.basename(html_filepath).split('.')[:-1][0]
if not re.match(r'.*[א-ת]+.*', title):
print('Not making markdown from', html_filepath)
else:
md_filepath = folder_out + "/" + title + '.md'
if os.path.exists(md_filepath):
os.remove(md_filepath)
print('Deleted', md_filepath)
else:
print('Markdown', md_filepath, "does not exist")
md_header = \
f"""---
layout: page
title: "{title}"
subtitle:
tags: [ yiddish]
css: yiddish
---
"""
with open(html_filepath, 'r') as f:
html = f.read()
md_header += html
with open(md_filepath, 'w') as fout:
fout.write(md_header)
print("generate_md wrote", md_filepath)
def replace_img(html_filepath):
return# The below is not doing what it looks like, bcause img urls are long GUIDs
if html_filepath.endswith('/דער פֿראָסט־ריזעס טאָכטער.html'):
with open(html_filepath, 'r') as f:
data = f.read()
inserted = data.replace('src="images/image1.png"', 'src="/img/conan.jpg"')
if data != inserted:
print("changed image tag in Conan story")
with open(html_filepath, 'wt') as fout:
fout.write(inserted)
print("replace_image wrote", html_filepath)
def fix_google_redirects_once(data, html_filepath):
# THere is also some query-string junk, but just leaving that.
intro_s = 'https://www.google.com/url?q=https://'
try:
idx = data.index(intro_s)
except ValueError:
return data
idx_end_of_real_url = data.index('&sa', idx)
idx_end_of_link = data.index('"', idx_end_of_real_url)
before = data[0: idx]
real_url_encoded = "https://" + data[idx + len(intro_s):idx_end_of_real_url]
real_url = urllib.parse.unquote(real_url_encoded)
rest = data[idx_end_of_link:]
replaced = before + real_url + rest
# if any(x in real_url for x in ["joshuafox.com", "lesswrong.com", "doit-intl.com", "grnh.se"]):
print("replaced link", real_url, "in", html_filepath)
return replaced
# else:
# print("Did not replace link",real_url,"in", html_filepath)
# return data
def fix_link(html_filepath):
while True:
with open(html_filepath, 'r') as f:
data = f.read()
replaced = fix_google_redirects_once(data, html_filepath)
if replaced == data:
break
else:
with open(html_filepath, 'wt') as fout:
fout.write(replaced)
print("fix_link wrote", html_filepath)
def pretty_print(html_filepath):
with open(html_filepath, 'r') as f:
data = f.read()
soup = BeautifulSoup(data) # make BeautifulSoup
prettyHTML = soup.prettify() # prettify the html
with open(html_filepath, 'wt') as fout:
fout.write(prettyHTML)
def main():
os.chdir(Path(Path(__file__).parent.absolute()).parent.absolute())
assert "_site" not in os.getcwd(), "Do not run script in _site, which is meant for generated content"
for file in os.listdir(folder_in()):
html_filepath = folder_in() + '/' + file
if html_filepath.endswith(".html"):
clean_missing_font_link(html_filepath)
clean_half_spaces(html_filepath)
add_analytics(html_filepath)
add_rtl(html_filepath)
replace_img(html_filepath)
fix_link(html_filepath)
pretty_print(html_filepath)
generate_md(html_filepath, out_folder())
if __name__ == '__main__':
main()
| 3.03125
| 3
|
routes.py
|
Isaacgv/api_car_recom_blx
| 0
|
12785236
|
<filename>routes.py<gh_stars>0
import os
from flask import Flask, request
from flask_cors import CORS
from speech import recive_audio
from nlu import sentiment_nlu
app=Flask(__name__)
cors = CORS(app, resource={r"/*":{"origins": "*"}})
#{
# 'car': (string)
# 'text': (string)
# 'audio':(file)
#}
@app.route("/mutipart/form-data", methods=["POST"])
def index():
if not request.json:
abort(400)
body = request.get_json()
car = body['car'].lower().split()
print(body)
if 'audio' in body:
text_audio = recive_audio(body["audio"])
recommend = sentiment_nlu(text_audio, car)
print(text_audio)
return recommend
elif 'text' in body:
recommend = sentiment_nlu(body['text'], car)
print(body['text'])
return recommend
else:
abort(400)
def main():
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
if __name__ == "__main__":
main()
| 2.78125
| 3
|
pymedphys/_dicom/rtplan/core.py
|
pymedphys/pymedphys-archive-2019
| 1
|
12785237
|
# Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from pymedphys._utilities.transforms import convert_IEC_angle_to_bipolar
Point = namedtuple("Point", ("x", "y", "z"))
class DICOMEntryMissing(ValueError):
pass
def require_gantries_be_zero(plan):
gantry_angles = set(get_gantry_angles_from_dicom(plan))
if gantry_angles != set([0.0]):
raise ValueError("Only Gantry angles equal to 0.0 are currently supported")
def get_surface_entry_point_with_fallback(plan) -> Point:
try:
return get_surface_entry_point(plan)
except DICOMEntryMissing:
pass
require_gantries_be_zero(plan)
iso_raw = get_single_value_from_control_points(plan, "IsocenterPosition")
iso = Point(*[float(item) for item in iso_raw])
source_to_surface = get_single_value_from_control_points(
plan, "SourceToSurfaceDistance"
)
source_to_axis = get_single_value_from_beams(plan, "SourceAxisDistance")
new_y_value = iso.y + source_to_surface - source_to_axis
source_entry_point = Point(iso.x, new_y_value, iso.z)
return source_entry_point
def get_single_value_from_control_points(plan, keyword):
"""Get a named keyword from all control points.
Raises an error if all values are not the same as each other. Raises an
error if no value is found.
"""
values = set()
for beam in plan.BeamSequence:
for control_point in beam.ControlPointSequence:
try:
value = getattr(control_point, keyword)
except AttributeError:
continue
try:
values.add(value)
except TypeError:
values.add(tuple(value))
if not values:
raise DICOMEntryMissing(f"{keyword} was not found within the plan")
if len(values) > 1:
raise ValueError(f"More than one disagreeing {keyword} found")
return values.pop()
def get_single_value_from_beams(plan, keyword):
"""Get a named keyword from all beams.
Raises an error if all values are not the same as each other. Raises an
error if no value is found.
"""
values = set()
for beam in plan.BeamSequence:
try:
value = getattr(beam, keyword)
except AttributeError:
continue
try:
values.add(value)
except TypeError:
values.add(tuple(value))
if not values:
raise DICOMEntryMissing(f"{keyword} was not found within the plan")
if len(values) > 1:
raise ValueError(f"More than one disagreeing {keyword} found")
return values.pop()
def get_surface_entry_point(plan) -> Point:
"""
Parameters
----------
plan : pydicom.Dataset
Returns
-------
surface_entry_point : Point("x", "y", "z")
Patient surface entry point coordinates (x,y,z) in the
Patient-Based Coordinate System described in
Section C.7.6.2.1.1 [1]_ (mm).
References
----------
.. [1] https://dicom.innolitics.com/ciods/rt-plan/rt-beams/300a00b0/300a0111/300a012e
"""
# Once we have DicomCollection sorted out, it will likely be worthwhile
# having this function take a beam sequence parameter, and get the entry
# point for a given beam sequence
surface_entry_point_raw = get_single_value_from_control_points(
plan, "SurfaceEntryPoint"
)
surface_entry_point = Point(*[float(item) for item in surface_entry_point_raw])
return surface_entry_point
def get_metersets_from_dicom(dicom_dataset, fraction_group):
fraction_group_sequence = dicom_dataset.FractionGroupSequence
fraction_group_numbers = [
fraction_group.FractionGroupNumber for fraction_group in fraction_group_sequence
]
fraction_group_index = fraction_group_numbers.index(fraction_group)
fraction_group = fraction_group_sequence[fraction_group_index]
beam_metersets = tuple(
float(referenced_beam.BeamMeterset)
for referenced_beam in fraction_group.ReferencedBeamSequence
)
return beam_metersets
def get_cp_attribute_leaning_on_prior(control_point_sequence, attribute):
current_result = None
results = []
for control_point in control_point_sequence:
try:
current_result = getattr(control_point, attribute)
# If a subsequent control point doesn't record an
# angle then leave current_angle as what it was in the
# previous iteration of the loop
except AttributeError:
if current_result is None:
raise
results.append(current_result)
return results
def get_gantry_angles_from_dicom(dicom_dataset):
beam_gantry_angles = []
for beam_sequence in dicom_dataset.BeamSequence:
cp_gantry_angles_IEC = get_cp_attribute_leaning_on_prior(
beam_sequence.ControlPointSequence, "GantryAngle"
)
cp_gantry_angles_bipolar = convert_IEC_angle_to_bipolar(cp_gantry_angles_IEC)
cp_unique_gantry_angles = set(cp_gantry_angles_bipolar)
beam_gantry_angles.append(cp_unique_gantry_angles)
for cp_unique_gantry_angles in beam_gantry_angles:
if len(cp_unique_gantry_angles) != 1:
raise ValueError(
"Only a single gantry angle per beam is currently supported"
)
result = tuple(list(item)[0] for item in beam_gantry_angles)
return result
def get_fraction_group_index(dicom_dataset, fraction_group_number):
fraction_group_numbers = [
fraction_group.FractionGroupNumber
for fraction_group in dicom_dataset.FractionGroupSequence
]
return fraction_group_numbers.index(fraction_group_number)
def get_referenced_beam_sequence(dicom_dataset, fraction_group_number):
fraction_group_index = get_fraction_group_index(
dicom_dataset, fraction_group_number
)
fraction_group = dicom_dataset.FractionGroupSequence[fraction_group_index]
referenced_beam_sequence = fraction_group.ReferencedBeamSequence
beam_numbers = [
referenced_beam.ReferencedBeamNumber
for referenced_beam in referenced_beam_sequence
]
return beam_numbers, referenced_beam_sequence
def get_beam_indices_of_fraction_group(dicom_dataset, fraction_group_number):
beam_numbers, _ = get_referenced_beam_sequence(dicom_dataset, fraction_group_number)
beam_sequence_numbers = [
beam_sequence.BeamNumber for beam_sequence in dicom_dataset.BeamSequence
]
beam_indexes = [
beam_sequence_numbers.index(beam_number) for beam_number in beam_numbers
]
return beam_indexes
def get_fraction_group_beam_sequence_and_meterset(dicom_dataset, fraction_group_number):
beam_numbers, referenced_beam_sequence = get_referenced_beam_sequence(
dicom_dataset, fraction_group_number
)
metersets = [
referenced_beam.BeamMeterset for referenced_beam in referenced_beam_sequence
]
beam_sequence_number_mapping = {
beam.BeamNumber: beam for beam in dicom_dataset.BeamSequence
}
beam_sequence = [
beam_sequence_number_mapping[beam_number] for beam_number in beam_numbers
]
return beam_sequence, metersets
| 2.078125
| 2
|
app/__init__.py
|
zjurelinac/FM-Radio
| 0
|
12785238
|
from flask import Flask
from flask.ext.mail import Mail
from peewee import *
import os
cwd = os.getcwd()
frontend_dest = os.path.join( cwd, 'frontend/' )
app = Flask( __name__, static_url_path = '', static_folder = frontend_dest )
app.config.from_object( 'config' )
mail = Mail( app )
db = SqliteDatabase( app.config[ 'DATABASE' ], threadlocals = True )
from app.views import *
| 2.15625
| 2
|
webcam_handler.py
|
dain-kim/ASLingo
| 0
|
12785239
|
<reponame>dain-kim/ASLingo
import os, sys
import cv2
import numpy as np
import pandas as pd
import string
import mediapipe as mp
import pickle
from zipfile import ZipFile
from utils import StaticSignProcessor, mp_process_image, generate_dataframe, annotate_image, pred_class_to_letter
# load the model
with open('saved_model.pkl', 'rb') as f:
model = pickle.load(f)
# TODO change to generic video src handler
class WebcamHandler():
def __init__(self, vid_src=0):
self.cap = cv2.VideoCapture(vid_src)
self.image = None
self.processor = StaticSignProcessor((126,))
self.timestamps = []
self.hand_results = []
self.pose_results = []
self.framecount = 0
def generate_buffer(self, frame, buffer_size=10, sliding_window=1):
'''
Generates a buffer of fixed length from a live video stream
to be processed and passed into the recognition model.
Returns:
A dict containing timestamps, hand_results, and pose_results
if the buffer condition is met
'''
assert buffer_size > 0, 'Buffer size must be a positive number'
assert sliding_window > 0, 'Sliding window size must be a positive number'
assert buffer_size > sliding_window, 'Sliding window must be smaller than buffer'
hand_result, pose_result = mp_process_image(frame)
if not hand_result.multi_handedness:
self.timestamps = []
self.hand_results = []
self.pose_results = []
self.framecount = 0
return
# self.timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC))
# time is a construct
self.timestamps.append(0.0)
self.hand_results.append(hand_result)
self.pose_results.append(pose_result)
self.framecount += 1
if (self.framecount % buffer_size == 0) or (self.framecount % sliding_window == 0 and self.framecount > buffer_size):
buf = {'timestamps': self.timestamps,
'hand_results': self.hand_results,
'pose_results': self.pose_results}
self.timestamps = self.timestamps[sliding_window:]
self.hand_results = self.hand_results[sliding_window:]
self.pose_results = self.pose_results[sliding_window:]
return buf
def get_next_frame(self, pred_thresh=0.3):
'''
Reads the next frame from the webcam and makes a prediction when applicable.
Returns:
- None if webcam feed is closed or can't read feed
- annotated image if feed is open
- annotated image, prediction, score if feed is open and buffer condition is met
'''
if not self.cap.isOpened():
return
success, image = self.cap.read()
if not success:
return
score = ''
prediction = ''
buf = self.generate_buffer(image, buffer_size=10, sliding_window=1)
if buf:
# Make a prediction on the generated buffer
df = generate_dataframe(buf)
data = self.processor.process(df)
pred_prob = model.predict_proba([data])[0]
pred_class = list(pred_prob).index(max(pred_prob))
if max(pred_prob) < pred_thresh:
# print('PREDICTED: NEUTRAL', max(pred_prob))
pass
else:
prediction = pred_class_to_letter(pred_class)[0]
score = str(round(max(pred_prob),2))
# print('PREDICTED:', prediction, score)
# if blur:
# image = cv2.blur(image, (25,25))
if self.hand_results:
image = annotate_image(image, self.hand_results[-1], self.pose_results[-1])
else:
prediction = 'No hands detected'
image = cv2.flip(image, 1)
if prediction:
cv2.putText(image, prediction + ' ' + score, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
return image, prediction, score
def stream_webcam(self):
'''
A helper function to demonstrate the WebcamHandler's functionality.
Note that this is a blocking function: it will keep running until the webcam feed is closed.
'''
while self.cap.isOpened():
image,_,_ = self.get_next_frame()
cv2.imshow('webcam', image)
if cv2.waitKey(5) & 0xFF == 27:
print('esc')
break
# out = self.get_next_frame()
# while out:
# image,_,_ = out
# cv2.imshow('webcam', image)
# out = self.get_next_frame()
# if cv2.waitKey(5) & 0xFF == 27:
# print('esc')
# break
def evaluate_model(self, show=False):
'''
A helper function for evaluating the recognition model's performance.
It uses pre-recorded videos in test_webcam_data to test each letter.
The videos in the test data were not used to train the model.
'''
if not os.path.isdir('test_webcam_data'):
print('Unzipping test data...')
with ZipFile('test_webcam_data.zip','r') as zipobj:
zipobj.extractall()
accuracy = 0
for i in string.ascii_uppercase:
print('input:', i)
tmp = []
vid_src = f"test_webcam_data/{i}.mp4"
self.cap = cv2.VideoCapture(vid_src)
while self.cap.isOpened():
try:
image, pred, score = self.get_next_frame()
if pred not in ('','No hands detected'):
tmp.append(pred.replace('LETTER-',''))
if show:
cv2.imshow('webcam', image)
if cv2.waitKey(5) & 0xFF == 27:
print('esc')
break
except:
break
final_pred = max(set(tmp), key = tmp.count)
print('prediction:', final_pred)
if i == final_pred:
print('CORRECT')
accuracy += 1
else:
print('INCORRECT')
print('\n\nFinal Accuracy: {}/26 ({}%)'.format(str(accuracy), round(accuracy/26, 2)))
# # TODO move this to a separate script
# class StaticSignProcessor():
# def __init__(self, X_shape=(10,126,1)):
# self.shape = X_shape
# def process(self, df):
# '''
# Processes the parsed data (DataFrame containing MediaPipe data objects)
# just the cleanup: cut out head and tail, fill nan, (normalize)
# '''
# # # Drop the frames in the beginning and end of the video where no hands are detected
# # start_idx = (~df['lefthand_0_x'].isna() | ~df['righthand_0_x'].isna()).argmax()
# # end_idx = len(df) - (df[::-1]['lefthand_0_x'].isna() & df[::-1]['righthand_0_x'].isna()).argmin()
# # # df = df.iloc[start_idx:end_idx]
# # # for lda
# # # df = df.iloc[start_idx:end_idx,1:]
# # # Fill empty values with the previous seen value
# # df = df.fillna(method='ffill')
# # df = df.fillna(method='bfill')
# # df = df.fillna(0.)
# # # Drop the timeframe and pose data
# # df = df.iloc[start_idx:end_idx,1:127]
# # if sum(np.isnan(df.to_numpy())) != 0:
# # print('FAIL: na value found')
# # print(df)
# # # normalize
# # data = df.fillna(0).to_numpy()
# # x = np.linspace(0, len(data.T[0]), self.shape[0], endpoint=False)
# # norm_data = np.array([np.interp(x, np.arange(len(col)), col) for col in data.T]).T
# # print(norm_data.shape)
# # norm_data = np.reshape(norm_data, self.shape)
# # print(norm_data.shape)
# # normalize x and y positions based on the width of the shoulders and height from shoulders to nose
# # x1,y1,x2,y2 = df[['pose_11_x','pose_0_y','pose_12_x','pose_12_y']].mean()
# df_array = df.to_numpy().T # shape: (202,num_frames)
# # col_indices = [df.columns.get_loc(col) for col in ('pose_11_x','pose_0_y','pose_12_x','pose_12_y')]
# # x1,y1,x2,y2 = df_array[col_indices].mean(axis=1)
# for h in ['left','right']:
# x1,y1,x2,y2 = df.filter(regex=h).filter(regex='_x').min().min(),df.filter(regex=h).filter(regex='_y').min().min(),df.filter(regex=h).filter(regex='_x').max().max(),df.filter(regex=h).filter(regex='_y').max().max()
# x_cols = [df.columns.get_loc(col) for col in df.filter(regex=h).filter(regex='_x').columns]
# y_cols = [df.columns.get_loc(col) for col in df.filter(regex=h).filter(regex='_y').columns]
# df_array[x_cols] = (df_array[x_cols]-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
# df_array[y_cols] = (df_array[y_cols]-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
# # # def norm_pts(p):
# # # px = (p[0]-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
# # # py = (p[1]-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
# # # return (px,py)
# # x_cols = [df.columns.get_loc(col) for col in df.filter(regex='_x').columns]
# # y_cols = [df.columns.get_loc(col) for col in df.filter(regex='_y').columns]
# # df_array[x_cols] = (df_array[x_cols]-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
# # df_array[y_cols] = (df_array[y_cols]-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
# # # df_x = (df.filter(regex='_x')-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
# # # df_y = (df.filter(regex='_y')-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
# norm_df = pd.DataFrame(data=df_array.T, columns=df.columns)
# # Drop the frames in the beginning and end of the video where no hands are detected
# # Drop the timeframe and pose data
# start_idx = (~norm_df['lefthand_0_x'].isna() | ~norm_df['righthand_0_x'].isna()).argmax()
# end_idx = len(norm_df) - (norm_df[::-1]['lefthand_0_x'].isna() & norm_df[::-1]['righthand_0_x'].isna()).argmin()
# norm_df = norm_df.iloc[start_idx:end_idx,1:127]
# # Fill empty values with the previous seen value
# norm_df = norm_df.fillna(method='ffill').fillna(method='bfill').fillna(0.)
# # For classifiers, just return the mean of each column
# return norm_df.mean().to_numpy()
# # for now, just choose 10 frames from the middle
# # data = df.iloc[len(df)//3:len(df)//3+10].mean().to_numpy()
# # if sum(np.isnan(data)) != 0:
# # print(sum(np.isnan(data)))
# # norm_data = np.reshape(data, self.shape)
# # assert data.shape == self.shape
# # return data
# def generate_more_data(self, df_array, n=10, std=0.1):
# '''
# Generate more data from a single sample by adding noise
# '''
# samples = []
# for i in range(n):
# noise = np.random.normal(0, std, df_array.shape)
# samples.append(df_array + noise)
# return samples
if __name__ == "__main__":
webcam = WebcamHandler()
# webcam.stream_webcam()
webcam.evaluate_model(show=(len(sys.argv) > 1 and sys.argv[1] == '--show'))
| 2.609375
| 3
|
index.py
|
Unknowncmbk/Pokemon-Go-Locator-Server
| 0
|
12785240
|
<reponame>Unknowncmbk/Pokemon-Go-Locator-Server<filename>index.py
#!/usr/bin/python
# local imports
import pokemon
import cell_data
from user import transaction
from user import loc
# python modules
import time
import json
import Geohash
from flask import Flask
from flask import request
'''
Note: This module serves as the backend handler, serving app requests by
creating JSON responses.
'''
# how many steps do we query niantic for
DEFAULT_STEP_MAX = 10
# how many miles do we query our db for
DEFAULT_MILE_MAX = 3
# the cooldown for local requests
L_REQ_CD = 10
# the cooldown for fetch requests
F_REQ_CD = 300
# maps HOST -> last request from our DB
LOCAL_REQ = {}
# maps HOST -> last fetch request from niantic
FETCH_REQ = {}
app = Flask(__name__)
def can_local_request(req_ip, curr_time):
'''
Args:
req_ip: The IP of the requester
curr_time: The current time
Returns:
True if the requester can locally pull data. False otherwise.
'''
if req_ip in LOCAL_REQ:
# grab the last request time
if curr_time - LOCAL_REQ[req_ip] > L_REQ_CD:
print('Host ' + str(req_ip) + ' can request locally!')
return True
else:
print('Host ' + str(req_ip) + ' cannot request locally!')
return False
else:
LOCAL_REQ[req_ip] = curr_time
print('Adding host ' + str(req_ip) + ' to local request!')
return True
def can_fetch_request(lat, lng, curr_time):
'''
Args:
lat: The latititude of the fetch request
lng: The longitude of the fetch request
curr_time: The current time
Returns:
True if the requester can fetch data. False otherwise.
'''
# format the lat/lng to two decimals
r_lat = "{0:.2f}".format(lat)
r_lng = "{0:.2f}".format(lng)
var = Geohash.encode(float(r_lat), float(r_lng))
print('Lat/Lng pair of ' + str(lat) + '/' + str(lng) + ' rounds to ' + str(r_lat) + '/' +str(r_lng) + ' and hashes to ' + str(var))
if var in FETCH_REQ:
# grab the last request time
if curr_time - FETCH_REQ[var] > F_REQ_CD:
print('Can fetch request of ' + str(r_lat) + "/" + str(r_lng))
# update it
FETCH_REQ[var] = curr_time
return True
else:
return False
else:
print('Can fetch request of ' + str(r_lat) + "/" + str(r_lng))
FETCH_REQ[var] = curr_time
return True
def build_local_request(lat, lng, radius):
'''
Builds the local request for pokemon data.
Args:
lat: The latitude of the request
lng: The longitude of the request
Returns:
The JSON wrapped response with initial key 'result' and value
of array of pokemon data. If something went wrong, returns None.
'''
result = {}
try:
lat = float(lat)
lng = float(lng)
pokes_map = []
pokes = pokemon.get_pokemon(lat, lng, radius)
for p in pokes:
pokes_map.append(p.__json__())
result["result"] = pokes_map
return json.dumps(result)
except Exception as e:
print('Exception ' + str(e))
return None
@app.route('/req/<lat>/<lng>')
def get_pokemon(lat, lng):
'''
Args:
lat: The latitude of the requested pokemon
lng: The longitude of the requested pokemon
Returns:
A JSON object filled with pokemon data.
'''
try:
lat = float(lat)
lng = float(lng)
# get the IP of the request
req_ip = request.environ['REMOTE_ADDR']
curr_time = time.time()
result = None
if can_fetch_request(lat, lng, curr_time):
# create initial location request
initial_loc = loc.Location()
initial_loc.set_loc_coords(lat, lng, 0)
initial_loc.default_lat = lat
initial_loc.default_lng = lng
# get all the locations in radius
locs = cell_data.get_radius_locs(initial_loc, DEFAULT_STEP_MAX)
if locs is not None and len(locs) > 0:
for l in locs:
# fetch request data, by adding transaction to nexus
transaction.create_transaction(l.float_lat, l.float_lng)
if can_local_request(req_ip, curr_time):
# update time of their last request
LOCAL_REQ[req_ip] = curr_time
# build the local request
result = build_local_request(lat, lng, DEFAULT_MILE_MAX)
if result is not None:
return result
except Exception as e:
print('Unable to grab lat/lng as floats: ' + str(e))
return json.dumps(str({"result": result}))
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
| 2.609375
| 3
|
haptools/data/haplotypes.py
|
aryarm/admixtools
| 0
|
12785241
|
from __future__ import annotations
import re
from pathlib import Path
from logging import getLogger, Logger
from fileinput import hook_compressed
from dataclasses import dataclass, field, fields
from typing import Iterator, get_type_hints, Generator
import numpy as np
import numpy.typing as npt
from pysam import TabixFile
from .data import Data
from .genotypes import GenotypesRefAlt
@dataclass
class Extra:
"""
An extra field on a line in the .hap file
Attributes
----------
name: str
The name of the extra field
fmt: str = "s"
The python fmt string of the field value; indicates how to format the value
description: str = ""
A description of the extra field
"""
name: str
fmt: str = "s"
description: str = ""
_type: type = field(init=False, repr=False)
def __post_init(self):
if self.fmt.endswith("s"):
self._type = str
elif self.fmt.endswith("d"):
self._type = int
elif self.fmt.endswith("f"):
self._type = float
else:
raise ValueError("Unsupported extra type '{}'!".format(self.fmt[-1]))
@classmethod
def from_hap_spec(cls, line: str) -> Extra:
"""
Convert an "extra" line in the header of a .hap file into an Extra object
Parameters
----------
line: str
An "extra" field, as it appears declared in the header
Returns
-------
Extra
An Extra object
"""
line = line[3:].split("\t")
return cls(name=line[0], fmt=line[1], description=line[2])
def to_hap_spec(self, line_type_symbol: str) -> str:
"""
Convert an Extra object into a header line in the .hap format spec
Parameters
----------
hap_id: str
The ID of the haplotype associated with this variant
Returns
-------
str
A valid variant line (V) in the .hap format spec
"""
return (
"#"
+ line_type_symbol
+ "\t"
+ "\t".join((self.name, self.fmt, self.description))
)
@property
def fmt_str(self) -> str:
"""
Convert an Extra into a fmt string
Retruns
-------
str
A python format string (ex: "{beta:.3f}")
"""
return "{" + self.name + ":" + self.fmt + "}"
# We declare this class to be a dataclass to automatically define __init__ and a few
# other methods.
@dataclass
class Variant:
"""
A variant within the .hap format spec
In order to use this class with the Haplotypes class, you should
1) add properties to the class for each of extra fields
2) override the _extras property to describe the header declaration
Attributes
----------
start: int
The chromosomal start position of the variant
end: int
The chromosomal end position of the variant
In most cases this will be the same as the start position
id: str
The variant's unique ID
allele: str
The allele of this variant within the Haplotype
_extras: tuple[Extra]
Extra fields for the haplotype
Examples
--------
Let's extend this class and add an extra field called "score"
>>> from dataclasses import dataclass, field
>>> @dataclass
>>> class CustomVariant(Variant):
... score: float
... _extras: tuple = (
... Extra("score", ".3f", "Importance of inclusion"),
... )
"""
start: int
end: int
id: str
allele: str
_extras: tuple = field(default=tuple(), init=False, repr=False)
@property
def ID(self):
"""
Create an alias for the id property
"""
return self.id
@property
# TODO: use @cached_property in py3.8
def _fmt(self):
extras = ""
if len(self._extras):
extras = "\t" + "\t".join(extra.fmt_str for extra in self._extras)
return "V\t{hap:s}\t{start:d}\t{end:d}\t{id:s}\t{allele:s}" + extras
@classmethod
def from_hap_spec(cls: Variant, line: str) -> tuple[str, Variant]:
"""
Convert a variant line into a Variant object in the .hap format spec
Note that this implementation does NOT support having more extra fields than
appear in the header
Parameters
----------
line: str
A variant (V) line from the .hap file
Returns
-------
tuple[str, Variant]
The haplotype ID and Variant object for the variant
"""
assert line[0] == "V", "Attempting to init a Variant with a non-V line"
line = line[2:].split("\t")
hap_id = line[0]
var_fields = {}
idx = 1
for name, val in get_type_hints(cls).items():
if not name.startswith("_"):
var_fields[name] = val(line[idx])
idx += 1
return hap_id, cls(**var_fields)
def to_hap_spec(self, hap_id: str) -> str:
"""
Convert a Variant object into a variant line in the .hap format spec
Parameters
----------
hap_id: str
The ID of the haplotype associated with this variant
Returns
-------
str
A valid variant line (V) in the .hap format spec
"""
return self._fmt.format(**self.__dict__, hap=hap_id)
@classmethod
def extras_head(cls) -> tuple:
"""
Return the header lines of the extra fields that are supported
Returns
-------
tuple
The header lines of the extra fields
"""
return tuple(extra.to_hap_spec("V") for extra in cls._extras)
# We declare this class to be a dataclass to automatically define __init__ and a few
# other methods.
@dataclass
class Haplotype:
"""
A haplotype within the .hap format spec
In order to use this class with the Haplotypes class, you should
1) add properties to the class for each of extra fields
2) override the _extras property to describe the header declaration
Attributes
----------
chrom: str
The contig to which this haplotype belongs
start: int
The chromosomal start position of the haplotype
end: int
The chromosomal end position of the haplotype
id: str
The haplotype's unique ID
variants: list[Variant]
A list of the variants in this haplotype
_extras: tuple[Extra]
Extra fields for the haplotype
Examples
--------
Let's extend this class and add an extra field called "ancestry"
>>> from dataclasses import dataclass, field
>>> @dataclass
>>> class CustomHaplotype(Haplotype):
... ancestry: str
... _extras: tuple = (
... Extra("ancestry", "s", "Local ancestry"),
... )
"""
chrom: str
start: int
end: int
id: str
variants: tuple = field(default_factory=tuple, init=False)
_extras: tuple = field(default=tuple(), init=False, repr=False)
@property
def ID(self):
"""
Create an alias for the id property
"""
return self.id
@property
# TODO: use @cached_property in py3.8
def _fmt(self):
extras = ""
if len(self._extras):
extras = "\t" + "\t".join(extra.fmt_str for extra in self._extras)
return "H\t{chrom:s}\t{start:d}\t{end:d}\t{id:s}" + extras
@property
# TODO: use @cached_property in py3.8
def varIDs(self):
return {var.id for var in self.variants}
@classmethod
def from_hap_spec(
cls: Haplotype, line: str, variants: tuple = tuple()
) -> Haplotype:
"""
Convert a variant line into a Haplotype object in the .hap format spec
Note that this implementation does NOT support having more extra fields than
appear in the header
Parameters
----------
line: str
A variant (H) line from the .hap file
Returns
-------
Haplotype
The Haplotype object for the variant
"""
assert line[0] == "H", "Attempting to init a Haplotype with a non-H line"
line = line[2:].split("\t")
hap_fields = {}
idx = 0
for name, val in get_type_hints(cls).items():
if name != "variants" and not name.startswith("_"):
hap_fields[name] = val(line[idx])
idx += 1
hap = cls(**hap_fields)
hap.variants = variants
return hap
def to_hap_spec(self) -> str:
"""
Convert a Haplotype object into a haplotype line in the .hap format spec
Returns
-------
str
A valid haplotype line (H) in the .hap format spec
"""
return self._fmt.format(**self.__dict__)
@classmethod
def extras_head(cls) -> tuple:
"""
Return the header lines of the extra fields that are supported
Returns
-------
tuple
The header lines of the extra fields
"""
return tuple(extra.to_hap_spec("H") for extra in cls._extras)
def transform(
self, genotypes: GenotypesRefAlt, samples: list[str] = None
) -> npt.NDArray[bool]:
"""
Transform a genotypes matrix via the current haplotype
Each entry in the returned matrix denotes the presence of the current haplotype
in each chromosome of each sample in the Genotypes object
Parameters
----------
genotypes : GenotypesRefAlt
The genotypes which to transform using the current haplotype
If the genotypes have not been loaded into the Genotypes object yet, this
method will call Genotypes.read(), while loading only the needed variants
samples : list[str], optional
See documentation for :py:attr:`~.Genotypes.read`
Returns
-------
npt.NDArray[bool]
A 2D matrix of shape (num_samples, 2) where each entry in the matrix
denotes the presence of the haplotype in one chromosome of a sample
"""
var_IDs = self.varIDs
# check: have the genotypes been loaded yet?
# if not, we can load just the variants we need
if genotypes.unset():
start = min(var.start for var in self.variants)
end = max(var.end for var in self.variants)
region = f"{self.chrom}:{start}-{end}"
genotypes.read(region=region, samples=samples, variants=var_IDs)
genotypes.check_biallelic(discard_also=True)
genotypes.check_phase()
# create a dict where the variants are keyed by ID
var_dict = {
var["id"]: var["ref"] for var in genotypes.variants if var["id"] in var_IDs
}
var_idxs = [
idx for idx, var in enumerate(genotypes.variants) if var["id"] in var_IDs
]
missing_IDs = var_IDs - var_dict.keys()
if len(missing_IDs):
raise ValueError(
f"Variants {missing_IDs} are present in haplotype '{self.id}' but "
"absent in the provided genotypes"
)
# create a np array denoting the alleles that we want
alleles = [int(var.allele != var_dict[var.id]) for var in self.variants]
allele_arr = np.array([[[al] for al in alleles]]) # shape: (1, n, 1)
# look for the presence of each allele in each chromosomal strand
# and then just AND them together
return np.all(allele_arr == genotypes.data[:, var_idxs], axis=1)
class Haplotypes(Data):
"""
A class for processing haplotypes from a file
Attributes
----------
fname: Path
The path to the file containing the data
data: dict[str, Haplotype]
A dict of Haplotype objects keyed by their IDs
types: dict
A dict of class names keyed by the symbol denoting their line type
Ex: {'H': Haplotype, 'V': Variant}
version: str
A string denoting the current file format version
log: Logger
A logging instance for recording debug statements.
Examples
--------
Parsing a basic .hap file without any extra fields is simple:
>>> haplotypes = Haplotypes.load('tests/data/basic.hap')
>>> haps = haplotypes.data # a dictionary of Haplotype objects
If the .hap file contains extra fields, you'll need to call the read() method
manually. You'll also need to create Haplotype and Variant subclasses that support
the extra fields and then specify the names of the classes when you initialize the
Haplotypes object:
>>> haplotypes = Haplotypes('tests/data/simphenotype.hap', HaptoolsHaplotype)
>>> haplotypes.read()
>>> haps = haplotypes.data # a dictionary of Haplotype objects
"""
def __init__(
self,
fname: Path,
haplotype: type[Haplotype] = Haplotype,
variant: type[Variant] = Variant,
log: Logger = None,
):
super().__init__(fname, log)
self.data = None
self.types = {"H": haplotype, "V": variant}
self.version = "0.0.1"
@classmethod
def load(
cls: Haplotypes, fname: Path, region: str = None, haplotypes: set[str] = None
) -> Haplotypes:
"""
Load haplotypes from a .hap file
Read the file contents
Parameters
----------
fname: Path
See documentation for :py:attr:`~.Data.fname`
region: str, optional
See documentation for :py:meth:`~.Haplotypes.read`
haplotypes: list[str], optional
See documentation for :py:meth:`~.Haplotypes.read`
Returns
-------
Haplotypes
A Haplotypes object with the data loaded into its properties
"""
haps = cls(fname)
haps.read(region, haplotypes)
return haps
def check_header(self, lines: list[str], check_version=False):
"""
Check 1) that the version number matches and 2) that extra fields declared in
# the .haps file can be handled by the the Variant and Haplotype classes
# provided in __init__()
Parameters
----------
lines: list[str]
Header lines from the .hap file
check_version: bool = False
Whether to also check the version of the file
Raises
------
ValueError
If any of the header lines are not supported
"""
self.log.info("Checking header")
if check_version:
version_line = lines[0].split("\t")
assert version_line[1] == "version", (
"The version of the format spec must be declared as the first line of"
" the header."
)
if version_line[2] != self.version:
self.log.warning(
f"The version of the provided .hap file is {version_line} but this"
f" tool expected {self.version}"
)
expected_lines = [
line
for line_type in self.types.values()
for line in line_type.extras_head()
]
for line in lines:
if line[1] in self.types.keys():
try:
expected_lines.remove(line)
except ValueError:
# extract the name of the extra field
name = line.split("\t", maxsplit=1)[1]
raise ValueError(
f"The extra field '{name}' is declared in the header of the"
" .hap file but is not accepted by this tool."
)
# if there are any fields left...
if expected_lines:
names = [line.split("\t", maxsplit=2)[1] for line in expected_lines]
raise ValueError(
"Expected the input .hap file to have these extra fields, but they "
f"don't seem to be declared in the header: {*names,}"
)
def _line_type(self, line: str) -> type:
"""
Return the type of line that this line matches
Parameters
----------
line: str
A line of the .hap file
Returns
-------
type
The name of the class corresponding with the type of this line
"""
line_types = self.types.keys()
if line[0] in line_types:
return line[0]
else:
# if none of the lines matched, return None
return None
def read(self, region: str = None, haplotypes: set[str] = None):
"""
Read haplotypes from a .hap file into a list stored in :py:attr:`~.Haplotypes.data`
Parameters
----------
region: str, optional
The region from which to extract haplotypes; ex: 'chr1:1234-34566' or 'chr7'
For this to work, the .hap file must be indexed and the seqname must match!
Defaults to loading all haplotypes
haplotypes: list[str], optional
A list of haplotype IDs corresponding to a subset of the haplotypes to
extract
Defaults to loading haplotypes from all samples
For this to work, the .hap file must be indexed
"""
super().read()
self.data = {}
var_haps = {}
for line in self.__iter__(region, haplotypes):
if isinstance(line, Haplotype):
self.data[line.id] = line
elif isinstance(line, Variant):
hap_id = line.hap
del line.hap
# store the variant for later
var_haps.setdefault(hap_id, []).append(line)
for hap in var_haps:
self.data[hap].variants = tuple(var_haps[hap])
def __iter__(
self, region: str = None, haplotypes: set[str] = None
) -> Iterator[Variant | Haplotype]:
"""
Read haplotypes from a .hap file line by line without storing anything
Parameters
----------
region: str, optional
The region from which to extract haplotypes; ex: 'chr1:1234-34566' or 'chr7'
For this to work, the .hap file must be indexed and the seqname must match!
Defaults to loading all haplotypes
haplotypes: list[str], optional
A list of haplotype IDs corresponding to a subset of the haplotypes to
extract
Defaults to loading haplotypes from all samples
For this to work, the .hap file must be indexed
Yields
------
Iterator[Variant|Haplotype]
An iterator over each line in the file, where each line is encoded as a
Variant or Haplotype containing each of the class properties
Examples
--------
If you're worried that the contents of the .hap file will be large, you may
opt to parse the file line-by-line instead of loading it all into memory at
once. In cases like these, you can use the __iter__() method in a for-loop:
>>> haplotypes = Haplotypes('tests/data/basic.hap')
>>> for line in haplotypes:
... print(line)
Call the function manually to pass it the region or haplotypes params:
>>> haplotypes = Haplotypes('tests/data/basic.hap.gz')
>>> for line in haplotypes.__iter__(
... region='21:26928472-26941960', haplotypes={"chr21.q.3365*1"}
... ):
... print(line)
"""
# if the user requested a specific region or set of haplotypes, then we should
# handle it using tabix
# else, we use a regular text opener
if region or haplotypes:
haps_file = TabixFile(str(self.fname))
self.check_header(list(haps_file.header))
if region:
region_positions = region.split(":", maxsplit=1)[1]
# fetch region
# we already know that each line will start with an H, so we don't
# need to check that
for line in haps_file.fetch(region):
hap = self.types["H"].from_hap_spec(line)
if haplotypes is not None:
if hap.id not in haplotypes:
continue
haplotypes.remove(hap.id)
yield hap
else:
for line in haps_file.fetch():
# we only want lines that start with an H
line_type = self._line_type(line)
if line_type == "H":
hap = self.types["H"].from_hap_spec(line)
if hap.id in haplotypes:
yield hap
haplotypes.remove(hap.id)
elif line_type > "H":
# if we've already passed all of the H's, we can just exit
# We assume the file has been sorted so that all of the H lines
# come before the V lines
break
# query for the variants of each haplotype
for hap_id in self.data:
# exclude variants outside the desired region
hap_region = hap_id
if region:
hap_region = hap_id + ":" + region_positions
# fetch region
# we already know that each line will start with a V, so we don't
# need to check that
for line in haps_file.fetch(hap_region):
line_type = self._line_type(line)
if line_type == "V":
var = self.types["V"].from_hap_spec(line)[1]
# add the haplotype, since otherwise, the user won't know
# which haplotype this variant belongs to
var.hap = hap_id
yield var
else:
self.log.warning(
"Check that chromosomes are distinct from your hap IDs!"
)
haps_file.close()
else:
# the file is not indexed, so we can't assume it's sorted, either
# use hook_compressed to automatically handle gz files
with hook_compressed(self.fname, mode="rt") as haps:
self.log.info("Not taking advantage of indexing.")
header_lines = []
for line in haps:
line = line.rstrip("\n")
line_type = self._line_type(line)
if line[0] == "#":
# store header for later
try:
header_lines.append(line)
except AttributeError:
# this happens when we encounter a line beginning with a #
# after already having seen an H or V line
# in this case, it's usually just a comment, so we can ignore
pass
else:
if header_lines:
self.check_header(header_lines)
header_lines = None
self.log.info("Finished reading header.")
if line_type == "H":
yield self.types["H"].from_hap_spec(line)
elif line_type == "V":
hap_id, var = self.types["V"].from_hap_spec(line)
# add the haplotype, since otherwise, the user won't know
# which haplotype this variant belongs to
var.hap = hap_id
yield var
else:
self.log.warning(
f"Ignoring unsupported line type '{line[0]}'"
)
def to_str(self) -> Generator[str, None, None]:
"""
Create a string representation of this Haplotype
Yields
------
Generator[str, None, None]
A list of lines (strings) to include in the output
"""
yield "#\tversion\t" + self.version
for line_type in self.types:
yield from self.types[line_type].extras_head()
for hap in self.data.values():
yield self.types["H"].to_hap_spec(hap)
for hap in self.data.values():
for var in hap.variants:
yield self.types["V"].to_hap_spec(var, hap.id)
def __repr__(self):
return "\n".join(self.to_str())
def write(self):
"""
Write the contents of this Haplotypes object to the file given by fname
Parameters
----------
file: TextIO
A file-like object to which this Haplotypes object should be written.
Examples
--------
To write to a .hap file, you must first initialize a Haplotypes object and then
fill out the data property:
>>> haplotypes = Haplotypes('tests/data/basic.hap')
>>> haplotypes.data = {'H1': Haplotype('chr1', 0, 10, 'H1')}
>>> haplotypes.write()
"""
with hook_compressed(self.fname, mode="wt") as haps:
for line in self.to_str():
haps.write(line + "\n")
def transform(
self,
genotypes: GenotypesRefAlt,
hap_gts: GenotypesRefAlt,
samples: list[str] = None,
low_memory: bool = False,
) -> GenotypesRefAlt:
"""
Transform a genotypes matrix via the current haplotype
Each entry in the returned matrix denotes the presence of each haplotype
in each chromosome of each sample in the Genotypes object
Parameters
----------
genotypes : GenotypesRefAlt
The genotypes which to transform using the current haplotype
If the genotypes have not been loaded into the Genotypes object yet, this
method will call Genotypes.read(), while loading only the needed variants
hap_gts: GenotypesRefAlt
An empty GenotypesRefAlt object into which the haplotype genotypes should
be stored
samples : list[str], optional
See documentation for :py:attr:`~.Genotypes.read`
low_memory : bool, optional
If True, each haplotype's genotypes will be loaded one at a time.
Returns
-------
GenotypesRefAlt
A Genotypes object composed of haplotypes instead of regular variants.
"""
hap_gts.samples = genotypes.samples
hap_gts.variants = np.array(
[(hap.id, hap.chrom, hap.start, 0, "A", "T") for hap in self.data.values()],
dtype=[
("id", "U50"),
("chrom", "U10"),
("pos", np.uint32),
("aaf", np.float64),
("ref", "U100"),
("alt", "U100"),
],
)
self.log.info(
f"Transforming a set of genotypes from {len(genotypes.variants)} total "
f"variants with a list of {len(self.data)} haplotypes"
)
hap_gts.data = np.concatenate(
tuple(
hap.transform(genotypes, samples)[:, np.newaxis]
for hap in self.data.values()
),
axis=1,
).astype(np.uint8)
| 2.53125
| 3
|
mindhome_alpha/erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py
|
Mindhome/field_service
| 1
|
12785242
|
<filename>mindhome_alpha/erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import calendar
import frappe
from frappe import _
from frappe.utils import cint, cstr, getdate
def execute(filters=None):
common_columns = [
{
'label': _('New Customers'),
'fieldname': 'new_customers',
'fieldtype': 'Int',
'default': 0,
'width': 125
},
{
'label': _('Repeat Customers'),
'fieldname': 'repeat_customers',
'fieldtype': 'Int',
'default': 0,
'width': 125
},
{
'label': _('Total'),
'fieldname': 'total',
'fieldtype': 'Int',
'default': 0,
'width': 100
},
{
'label': _('New Customer Revenue'),
'fieldname': 'new_customer_revenue',
'fieldtype': 'Currency',
'default': 0.0,
'width': 175
},
{
'label': _('Repeat Customer Revenue'),
'fieldname': 'repeat_customer_revenue',
'fieldtype': 'Currency',
'default': 0.0,
'width': 175
},
{
'label': _('Total Revenue'),
'fieldname': 'total_revenue',
'fieldtype': 'Currency',
'default': 0.0,
'width': 175
}
]
if filters.get('view_type') == 'Monthly':
return get_data_by_time(filters, common_columns)
else:
return get_data_by_territory(filters, common_columns)
def get_data_by_time(filters, common_columns):
# key yyyy-mm
columns = [
{
'label': _('Year'),
'fieldname': 'year',
'fieldtype': 'Data',
'width': 100
},
{
'label': _('Month'),
'fieldname': 'month',
'fieldtype': 'Data',
'width': 100
},
]
columns += common_columns
customers_in = get_customer_stats(filters)
# time series
from_year, from_month, temp = filters.get('from_date').split('-')
to_year, to_month, temp = filters.get('to_date').split('-')
from_year, from_month, to_year, to_month = \
cint(from_year), cint(from_month), cint(to_year), cint(to_month)
out = []
for year in range(from_year, to_year+1):
for month in range(from_month if year==from_year else 1, (to_month+1) if year==to_year else 13):
key = '{year}-{month:02d}'.format(year=year, month=month)
data = customers_in.get(key)
new = data['new'] if data else [0, 0.0]
repeat = data['repeat'] if data else [0, 0.0]
out.append({
'year': cstr(year),
'month': calendar.month_name[month],
'new_customers': new[0],
'repeat_customers': repeat[0],
'total': new[0] + repeat[0],
'new_customer_revenue': new[1],
'repeat_customer_revenue': repeat[1],
'total_revenue': new[1] + repeat[1]
})
return columns, out
def get_data_by_territory(filters, common_columns):
columns = [{
'label': 'Territory',
'fieldname': 'territory',
'fieldtype': 'Link',
'options': 'Territory',
'width': 150
}]
columns += common_columns
customers_in = get_customer_stats(filters, tree_view=True)
territory_dict = {}
for t in frappe.db.sql('''SELECT name, lft, parent_territory, is_group FROM `tabTerritory` ORDER BY lft''', as_dict=1):
territory_dict.update({
t.name: {
'parent': t.parent_territory,
'is_group': t.is_group
}
})
depth_map = frappe._dict()
for name, info in territory_dict.items():
default = depth_map.get(info['parent']) + 1 if info['parent'] else 0
depth_map.setdefault(name, default)
data = []
for name, indent in depth_map.items():
condition = customers_in.get(name)
new = customers_in[name]['new'] if condition else [0, 0.0]
repeat = customers_in[name]['repeat'] if condition else [0, 0.0]
temp = {
'territory': name,
'parent_territory': territory_dict[name]['parent'],
'indent': indent,
'new_customers': new[0],
'repeat_customers': repeat[0],
'total': new[0] + repeat[0],
'new_customer_revenue': new[1],
'repeat_customer_revenue': repeat[1],
'total_revenue': new[1] + repeat[1],
'bold': 0 if indent else 1
}
data.append(temp)
loop_data = sorted(data, key=lambda k: k['indent'], reverse=True)
for ld in loop_data:
if ld['parent_territory']:
parent_data = [x for x in data if x['territory'] == ld['parent_territory']][0]
for key in parent_data.keys():
if key not in ['indent', 'territory', 'parent_territory', 'bold']:
parent_data[key] += ld[key]
return columns, data, None, None, None, 1
def get_customer_stats(filters, tree_view=False):
""" Calculates number of new and repeated customers and revenue. """
company_condition = ''
if filters.get('company'):
company_condition = ' and company=%(company)s'
customers = []
customers_in = {}
for si in frappe.db.sql('''select territory, posting_date, customer, base_grand_total from `tabSales Invoice`
where docstatus=1 and posting_date <= %(to_date)s
{company_condition} order by posting_date'''.format(company_condition=company_condition),
filters, as_dict=1):
key = si.territory if tree_view else si.posting_date.strftime('%Y-%m')
new_or_repeat = 'new' if si.customer not in customers else 'repeat'
customers_in.setdefault(key, {'new': [0, 0.0], 'repeat': [0, 0.0]})
# if filters.from_date <= si.posting_date.strftime('%Y-%m-%d'):
if getdate(filters.from_date) <= getdate(si.posting_date):
customers_in[key][new_or_repeat][0] += 1
customers_in[key][new_or_repeat][1] += si.base_grand_total
if new_or_repeat == 'new':
customers.append(si.customer)
return customers_in
| 1.890625
| 2
|
hard-gists/a1178bf10729a27184b1/snippet.py
|
jjhenkel/dockerizeme
| 21
|
12785243
|
<filename>hard-gists/a1178bf10729a27184b1/snippet.py
#!/usr/bin/env python
# -*- coding: utf-8
#
# murmurCollectd.py - "murmur stats (User/Bans/Uptime/Channels)" script for collectd
# Copyright (c) 2015, Nils / <EMAIL>
#
# munin-murmur.py - "murmur stats (User/Bans/Uptime/Channels)" script for munin.
# Copyright (c) 2014, Natenom / <EMAIL>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the developer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collectd
import Ice, sys
import Murmur
#Path to Murmur.ice, this is default for Debian
iceslice='/usr/share/slice/Murmur.ice'
#Includepath for Ice, this is default for Debian
iceincludepath="/usr/share/Ice/slice"
#Murmur-Port (not needed to work, only for display purposes)
serverport=64738
#Port where ice listen
iceport=6502
#Ice Password to get read access.
#If there is no such var in your murmur.ini, this can have any value.
#You can use the values of icesecret, icesecretread or icesecretwrite in your murmur.ini
icesecret="secureme"
#MessageSizeMax; increase this value, if you get a MemoryLimitException.
# Also check this value in murmur.ini of your Mumble-Server.
# This value is being interpreted in kibiBytes.
messagesizemax="65535"
Ice.loadSlice("--all -I%s %s" % (iceincludepath, iceslice))
props = Ice.createProperties([])
props.setProperty("Ice.MessageSizeMax", str(messagesizemax))
props.setProperty("Ice.ImplicitContext", "Shared")
id = Ice.InitializationData()
id.properties = props
ice = Ice.initialize(id)
ice.getImplicitContext().put("secret", icesecret)
meta = Murmur.MetaPrx.checkedCast(ice.stringToProxy("Meta:tcp -h 127.0.0.1 -p %s" % (iceport)))
try:
server=meta.getServer(1)
except Murmur.InvalidSecretException:
print 'Given icesecreatread password is wrong.'
ice.shutdown()
sys.exit(1)
import time
def dispatch(type_instance, value):
"""
This function dispatces the value to collectd. The used Name is given
by type_instance
:param type_instance: The name of the value for Collectd
:type type_instance: str
:param value: The Value to log
:type value: int
"""
val = collectd.Values(plugin='murmur')
val.type = 'gauge'
val.type_instance = type_instance
val.values = [value]
val.dispatch()
def read_callback(data=None):
"""
The read callback for Collectd. This function gets called on a read from
collectd.
"""
#count users
usersnotauth=0
users=server.getUsers()
for key in users.keys():
if (users[key].userid == -1):
usersnotauth+=1
dispatch("users", len(users))
#dispatch("uptime" float(meta.getUptime())/60/60/24)
dispatch("chancount",len(server.getChannels()))
dispatch("bancount", len(server.getBans()))
dispatch("usersnotauth", usersnotauth)
def shutdown():
"""
This Function is called on Shutdown by collectd
"""
ice.shutdown()
# Register the callbacks with collecd
collectd.register_read(read_callback)
collectd.register_shutdown(shutdown)
| 1.210938
| 1
|
pyble/const/characteristic/date_time.py
|
bgromov/PyBLEWrapper
| 14
|
12785244
|
<reponame>bgromov/PyBLEWrapper
NAME="Date Time"
UUID=0x2A08
| 1.125
| 1
|
2016/day1-alt.py
|
bloy/adventofcode
| 0
|
12785245
|
#!/usr/bin/env python
from __future__ import unicode_literals
def solve1(instructions):
def step(state, instruction):
position = state[0]
direction = state[1]
turn = complex(0, 1) if instruction[0] == 'L' else complex(0, -1)
direction = direction * turn
position = position + (direction * int(instruction[1:]))
return (position, direction)
position = complex(0, 0)
direction = complex(1, 0)
state = (position, direction)
return reduce(step, instructions, state)
if __name__ == '__main__':
input_text = """R3, R1, R4, L4, R3, R1, R1, L3, L5, L5, L3, R1, R4, L2, L1, R3, L3, R2, R1, R1, L5, L2, L1, R2, L4, R1, L2, L4, R2, R2, L2, L4, L3, R1, R4, R3, L1, R1, L5, R4, L2, R185, L2, R4, R49, L3, L4, R5, R1, R1, L1, L1, R2, L1, L4, R4, R5, R4, L3, L5, R1, R71, L1, R1, R186, L5, L2, R5, R4, R1, L5, L2, R3, R2, R5, R5, R4, R1, R4, R2, L1, R4, L1, L4, L5, L4, R4, R5, R1, L2, L4, L1, L5, L3, L5, R2, L5, R4, L4, R3, R3, R1, R4, L1, L2, R2, L1, R4, R2, R2, R5, R2, R5, L1, R1, L4, R5, R4, R2, R4, L5, R3, R2, R5, R3, L3, L5, L4, L3, L2, L2, R3, R2, L1, L1, L5, R1, L3, R3, R4, R5, L3, L5, R1, L3, L5, L5, L2, R1, L3, L1, L3, R4, L1, R3, L2, L2, R3, R3, R4, R4, R1, L4, R1, L5"""
instructions = input_text.split(', ')
print(solve1(instructions))
| 3.5625
| 4
|
scripts/visualize/match.py
|
facebookresearch/banmo
| 201
|
12785246
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# TODO: pass ft_cse to use fine-tuned feature
# TODO: pass fine_steps -1 to use fine samples
from absl import flags, app
import sys
sys.path.insert(0,'')
sys.path.insert(0,'third_party')
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam,\
Kmatinv, K2mat, K2inv, sample_xy, resample_dp,\
raycast
from nnutils.loss_utils import kp_reproj, feat_match, kp_reproj_loss
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
def construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far, flip=True):
device = dp_feats_rsmp.device
bs,nsample,_ =xys.shape
opts = model.opts
embedid=model.embedid
embedid = embedid.long().to(device)[:,None]
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
rtk_vec = rays['rtk_vec']
del rays
feats_at_samp = [dp_feats_rsmp[i].view(model.num_feat,-1).T\
[rand_inds[i].long()] for i in range(bs)]
feats_at_samp = torch.stack(feats_at_samp,0) # bs,ns,num_feat
# TODO implement for se3
if opts.lbs and model.num_bone_used>0:
bone_rts = model.nerf_body_rts(embedid)
bone_rts = bone_rts.repeat(1,nsample,1)
# TODO rearrange inputs
feats_at_samp = feats_at_samp.view(-1, model.num_feat)
xys = xys.view(-1,1,2)
if flip:
rtk_vec = rtk_vec.view(bs//2,2,-1).flip(1).view(rtk_vec.shape)
bone_rts = bone_rts.view(bs//2,2,-1).flip(1).view(bone_rts.shape)
rays = {'rtk_vec': rtk_vec,
'bone_rts': bone_rts}
return rays, feats_at_samp, xys
def match_frames(trainer, idxs, nsample=200):
idxs = [int(i) for i in idxs.split(' ')]
bs = len(idxs)
opts = trainer.opts
device = trainer.device
model = trainer.model
model.eval()
# load frames and aux data
for dataset in trainer.evalloader.dataset.datasets:
dataset.load_pair = False
batch = []
for i in idxs:
batch.append( trainer.evalloader.dataset[i] )
batch = trainer.evalloader.collate_fn(batch)
model.set_input(batch)
rtk = model.rtk
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
kaug = model.kaug # according to cropping, p = Kaug Kmat P
Kaug = K2inv(kaug)
Kinv = Kmatinv(Kaug.matmul(Kmat))
near_far = model.near_far[model.frameid.long()]
dp_feats_rsmp = model.dp_feats
# construct rays for sampled pixels
rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=False)
rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far)
model.update_delta_rts(rays)
# re-project
with torch.no_grad():
pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
model.latest_vars['obj_bound'],grid_size=20,is_training=False)
pts_pred = pts_pred.view(bs,nsample,3)
xy_reproj = kp_reproj(pts_pred, model.nerf_models, model.embedding_xyz, rays)
# draw
imgs_trg = model.imgs.view(bs//2,2,-1).flip(1).view(model.imgs.shape)
xy_reproj = xy_reproj.view(bs,nsample,2)
xys = xys.view(bs,nsample, 2)
sil_at_samp = torch.stack([model.masks[i].view(-1,1)[rand_inds[i]] \
for i in range(bs)],0) # bs,ns,1
for i in range(bs):
img1 = model.imgs[i]
img2 = imgs_trg[i]
img = torch.cat([img1, img2],2)
valid_idx = sil_at_samp[i].bool()[...,0]
p1s = xys[i][valid_idx]
p2s = xy_reproj[i][valid_idx]
p2s[...,0] = p2s[...,0] + img1.shape[2]
img = draw_lines(img, p1s,p2s)
cv2.imwrite('tmp/match_%04d.png'%i, img)
# visualize matching error
if opts.render_size<=128:
with torch.no_grad():
rendered, rand_inds = model.nerf_render(rtk, kaug, model.embedid,
nsample=opts.nsample, ndepth=opts.ndepth)
xyz_camera = rendered['xyz_camera_vis'][0].reshape(opts.render_size**2,-1)
xyz_canonical = rendered['xyz_canonical_vis'][0].reshape(opts.render_size**2,-1)
skip_idx = len(xyz_camera)//50 # vis 50 rays
trimesh.Trimesh(xyz_camera[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_camera_pts.obj')
trimesh.Trimesh(xyz_canonical[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_canonical_pts.obj')
vis_match(rendered, model.masks, model.imgs,
bs,opts.img_size, opts.ndepth)
## construct rays for all pixels
#rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=True)
#rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
# Rmat, Tmat, Kinv, near_far, flip=False)
#with torch.no_grad():
# pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
# model.latest_vars['obj_bound'],grid_size=20,is_training=False)
# pts_pred = pts_pred.view(bs,opts.render_size**2,3)
# proj_err = kp_reproj_loss(pts_pred, xys, model.nerf_models,
# model.embedding_xyz, rays)
# proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
# proj_err = proj_err/opts.img_size * 2
# results = {}
# results['proj_err'] = proj_err
## visualize current error stats
#feat_err=model.latest_vars['fp_err'][:,0]
#proj_err=model.latest_vars['fp_err'][:,1]
#feat_err = feat_err[feat_err>0]
#proj_err = proj_err[proj_err>0]
#print('feat-med: %f'%(np.median(feat_err)))
#print('proj-med: %f'%(np.median(proj_err)))
#plt.hist(feat_err,bins=100)
#plt.savefig('tmp/viser_feat_err.jpg')
#plt.clf()
#plt.hist(proj_err,bins=100)
#plt.savefig('tmp/viser_proj_err.jpg')
# visualize codes
with torch.no_grad():
fid = torch.Tensor(range(0,len(model.impath))).cuda().long()
D=model.pose_code(fid)
D = D.view(len(fid),-1)
##TODO
#px = torch.Tensor(range(len(D))).cuda()
#py = px*2
#pz = px*5+1
#D = torch.stack([px,py,pz],-1)
D = D-D.mean(0)[None]
A = D.T.matmul(D)/D.shape[0] # fxf
U,S,V=torch.svd(A) #
code_proj_3d=D.matmul(V[:,:3])
cmap = matplotlib.cm.get_cmap('cool')
time = np.asarray(range(len(model.impath)))
time = time/time.max()
code_proj_3d=code_proj_3d.detach().cpu().numpy()
trimesh.Trimesh(code_proj_3d, vertex_colors=cmap(time)).export('tmp/0.obj')
#plt.figure(figsize=(16,16))
plot_stack = []
weight_dir = opts.model_path.rsplit('/',1)[0]
bne_path = sorted(glob.glob('%s/%s-*bne-mrender*.jpg'%\
(weight_dir, opts.seqname)))
img_path = model.impath.copy()
## remove the last img for each video to make shape consistent with bone renders
#for i in model.data_offset[1:][::-1]:
# img_path.remove(img_path[i-1])
# code_proj_3d = np.delete(code_proj_3d, i-1,0)
# plot the first video
img_path = img_path [:model.data_offset[1]-2]
code_proj_3d = code_proj_3d[:model.data_offset[1]-2]
try:
bne_path = bne_path [:model.data_offset[1]-2]
except:
pass
for i in range(len(code_proj_3d)):
plt.plot(code_proj_3d[i,0], code_proj_3d[i,1], color=cmap(time[i]), marker='o')
plt.annotate(str(i), (code_proj_3d[i,0], code_proj_3d[i,1]))
plt.xlim(code_proj_3d[:,0].min(), code_proj_3d[:,0].max())
plt.ylim(code_proj_3d[:,1].min(), code_proj_3d[:,1].max())
fig = plt.gcf()
fig.canvas.draw()
plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
plot = plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
print('plot pose code of frame id:%03d'%i)
if len(bne_path) == len(code_proj_3d):
bneimg = cv2.imread(bne_path[i])
bneimg = cv2.resize(bneimg,\
(bneimg.shape[1]*plot.shape[0]//bneimg.shape[0], plot.shape[0]))
img=cv2.imread(img_path[i])[:,:,::-1]
img = cv2.resize(img,\
(img.shape[1]*plot.shape[0]//img.shape[0], plot.shape[0]))
plot = np.hstack([img, bneimg, plot])
plot_stack.append(plot)
save_vid('tmp/code', plot_stack, suffix='.mp4',
upsample_frame=150.,fps=30)
save_vid('tmp/code', plot_stack, suffix='.gif',
upsample_frame=150.,fps=30)
# vis dps
cv2.imwrite('tmp/match_dpc.png', model.dp_vis[model.dps[0].long()].cpu().numpy()*255)
def main(_):
opts.img_size=opts.render_size
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
#write matching function
img_match = match_frames(trainer, opts.match_frames)
if __name__ == '__main__':
app.run(main)
| 1.421875
| 1
|
caption/models/__init__.py
|
Unbabel/caption
| 3
|
12785247
|
# -*- coding: utf-8 -*-
import logging
import pytorch_lightning as ptl
import pandas as pd
import os
from .taggers import TransformerTagger
from .language_models import MaskedLanguageModel
str2model = {
"TransformerTagger": TransformerTagger,
"MaskedLanguageModel": MaskedLanguageModel,
}
def build_model(hparams) -> ptl.LightningModule:
"""
Function that builds an estimator model from the HyperOptArgumentParser
:param hparams: HyperOptArgumentParser
"""
return str2model[hparams.model](hparams)
def add_model_args(parser, model: str):
return str2model[model].add_model_specific_args(parser)
try:
return str2model[model].add_model_specific_args(parser)
except KeyError:
raise Exception(f"{model} is not a valid model type!")
| 2.5625
| 3
|
extras/api/urls.py
|
maznu/peering-manager
| 127
|
12785248
|
from peering_manager.api import OrderedDefaultRouter
from . import views
router = OrderedDefaultRouter()
router.APIRootView = views.ExtrasRootView
router.register("ix-api", views.IXAPIViewSet)
router.register("job-results", views.JobResultViewSet)
router.register("webhooks", views.WebhookViewSet)
app_name = "extras-api"
urlpatterns = router.urls
| 1.632813
| 2
|
HM0_tiFire/tiFire.py
|
PsycoTodd/TaichiJourney
| 0
|
12785249
|
<reponame>PsycoTodd/TaichiJourney
import taichi as ti
import math
# ti.init(debug=True, arch=ti.cpu)
ti.init(arch=ti.opengl)
def vec(*xs):
return ti.Vector(list(xs))
def mix(x, y, a):
return x * (1.0 - a) + y * a
GUI_TITLE = "Fire"
w, h = wh = (640, 480) # GUI size
pixels = ti.Vector(3, dt=ti.f32, shape=wh)
iResolution = vec(w, h)
@ti.func
def noise(p):
i = ti.floor(p)
a = i.dot(vec(1.0, 57.0, 21.0)) + vec(0.0, 57.0, 21.0, 78.0)
f = ti.cos((p - i) * math.acos(-1.0)) * (-0.5) + 0.5
a = mix(ti.sin(ti.cos(a) * a), ti.sin(ti.cos(1.0 + a) * (1.0 + a)), f[0])
a[0] = mix(a[0], a[1], f[1])
a[1] = mix(a[2], a[3], f[1])
return mix(a[0], a[1], f[2])
@ti.func
def sphere(p, spr):
spr_xyz = vec(spr[0], spr[1], spr[2])
w = spr[3]
return (spr_xyz - p).norm() - w
@ti.func
def flame(p, t):
d = sphere(p * vec(1.0, 0.5, 1.0), vec(0.0, -1.0, 0.0, 1.0))
return d + (noise(p + vec(0.0, t * 2.0, 0.0)) + noise(p * 3.0) * 0.5) * 0.25 * (p[1])
@ti.func
def scene(p, t):
return min(100.0 - p.norm(), abs(flame(p, t)))
@ti.func
def raymarch(org, dir, t):
d = 0.0
glow = 0.0
eps = 0.02
p = org
glowed = False
for i in range(64):
d = scene(p, t) + eps
p += d * dir
if( d > eps):
if(flame(p, t) < 0.0):
glowed = True
if(glowed):
glow = float(i) / 64.0
return vec(p[0], p[1], p[2], glow)
@ti.func
def mainImage(iTime, i, j):
fragCoord = vec(i, j)
# Normalized pixel coordinates (from 0 to 1)
uv = fragCoord / iResolution
v = -1.0 + 2.0 * uv
org = vec(0.0, -2.0, 4.0)
dir = vec(v[0] * 1.6, -v[1], -1.5)
dir /= dir.norm()
p = raymarch(org, dir, iTime)
glow = p[3]
col = mix(vec(1.0, 0.5, 0.1, 1.0), vec(0.1, 0.5, 1.0, 1.0), p[1] * 0.02 + 0.4)
# Output to screen
fragColor = mix(vec(0.0, 0.0, 0.0, 0.0), col, pow(glow * 2.0, 4.0))
return fragColor
@ti.kernel
def render(t: ti.f32):
"render ??????? mainImage ????"
for i, j in pixels:
col4 = mainImage(t, i, j)
pixels[i, j] = vec(col4[0], col4[1], col4[2])
return
def main(output_img=False):
"output_img: ??????"
gui = ti.GUI(GUI_TITLE, res=wh)
for ts in range(1000000):
if gui.get_event(ti.GUI.ESCAPE):
exit()
render(ts * 0.03)
gui.set_image(pixels.to_numpy())
if output_img:
gui.show(f'{ts:04d}.png')
else:
gui.show()
if __name__ == '__main__':
main(output_img=False)
| 2.21875
| 2
|
hyaline/errors/MessageErrors.py
|
5elenay/hyaline
| 11
|
12785250
|
<reponame>5elenay/hyaline
class EditMessageFailed(Exception):
"""Raises when editing the message is failed."""
pass
class DeleteMessageFailed(Exception):
"""Raises when deleting the message is failed."""
pass
class BulkDeleteMessageFailed(Exception):
"""Raises when bulk-delete the messages is failed."""
pass
class AddReactionToMessageFailed(Exception):
"""Raises when adding a reaction to message is failed."""
pass
class RemoveReactionToMessageFailed(Exception):
"""Raises when removing a reaction to message is failed."""
pass
class FetchReactionsFromMessageFailed(Exception):
"""Raises when fetching reactions from message is failed."""
pass
class RemoveReactionsFromMessageFailed(Exception):
"""Raises when removing reactions from message is failed."""
pass
class CrossPostMessageFailed(Exception):
"""Raises when cross posting a message is failed."""
pass
| 2.375
| 2
|