content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import json
from logger import Logger
import xiaomi_data_json
import twilio_sms
import i_email
import schedule # pip install schedule
import socket
import sys
import os
import net_reconnect
import ding_talk_robot
import datetime
import global_config as gl
name = "MiGatewayMonitor"
version = "v1.0.2.191008"
# 全局消息
globalMiData = {}
def get_gateway_heart():
SENDERIP = "0.0.0.0"
MYPORT = 9898
MYGROUP = '224.0.0.50'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# allow multiple sockets to use the same PORT number
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind to the port that we know will receive multicast data
sock.bind((SENDERIP, MYPORT))
# tell the kernel that we are a multicast socket
# sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
# Tell the kernel that we want to add ourselves to a multicast group
# The address for the multicast group is the third param
status = sock.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(MYGROUP) + socket.inet_aton(SENDERIP))
# 设置61秒超时,防止阻塞网络检测等
sock.settimeout(61)
data, addr = sock.recvfrom(1024)
data_str = str(data, encoding='utf-8')
sock.close()
return data_str
# 检测程序
def mi_run():
udp_group_data = get_gateway_heart()
# print(udp_group_data)
global globalMiData
# 通过sid分组
udp_group_data_json = json.loads(udp_group_data)
# 增加时间戳
udp_group_data_json["time"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
globalMiData[data_json.get_custom(udp_group_data,'sid')] = udp_group_data_json
print(globalMiData)
# send_ding_talk(globalMiData)
model = data_json.get_model(udp_group_data)
if model == 'sensor_wleak.aq1': # 水浸传感器
cmd = data_json.get_cmd(udp_group_data)
if cmd == 'report': # 只处理上报数据
data = data_json.get_data(udp_group_data) # 获取状态数据
status = data_json.get_custom(data, 'status')
if status == 'leak': # 发生水浸则发送短信
info = '水浸传感器报警'
send_all_msg(info)
# send message
info = 'leak lou shui bao jing'
print(info)
sms.send_sms(gl.sms_send1, info)
sms.send_sms(gl.sms_send2, info)
elif status == 'no_leak': # 报警但是并没有发生水浸
info = '水浸传感器报警解除'
send_all_msg(info)
# send message
info = 'no_leak bao jing jie chu'
print(info)
sms.send_sms(gl.sms_send2, info)
sms.send_sms(gl.sms_send2, info)
else:
pass
else:
pass
else: # 其他传感器
pass
# 发送邮件
def send_family_email(email_content):
receivers = gl.email_send # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
mail_tile = '家庭卫士 ' + version
mail = i_email.iEmail()
bRet = mail.send_mail(receivers, email_content, mail_tile)
if bRet:
print("邮件发送成功")
else:
print("邮件发送失败")
# 发送钉钉
def send_ding_talk(text):
dingTalkRobot = ding_talk_robot.iDingTalkRobot()
dingTalkRobot.sendText(text)
# 发送消息:邮件、钉钉、短信等
def send_all_msg(text):
send_family_email(text)
send_ding_talk(text)
# 重启程序
def restart_program():
python = sys.executable
os.execl(python, python, *sys.argv)
print("restart_program")
# 网络检测
def netCheck():
bRet = net_reconnect.is_reachable("www.baidu.com") | net_reconnect.is_reachable("www.baidu.com")
# print(bRet)
if (bRet):
print("network ok")
else:
info = '网络异常,重启程序...'
print(info)
send_all_msg(info)
# 重启程序
restart_program()
def sendMiData():
global globalMiData
send_ding_talk(globalMiData);
def sendHeartbeatData():
global globalMiData
info = '家庭卫士程序心跳包\n' + json.dumps(globalMiData)
send_all_msg(info)
sys.stdout = Logger()
data_json = xiaomi_data_json.xiaomi_data_json
sms = twilio_sms.sms_client
# mail = i_email.iEmail()
# dingTalkRobot = ding_talk_robot.iDingTalkRobot()
if __name__ == '__main__':
info = '家庭卫士程序启动'
print(info)
send_all_msg(info)
# 定时任务1
schedule.every().day.at("13:10").do(sendHeartbeatData)
# 定时任务2
# schedule.every(10).seconds.do(netCheck)
schedule.every(30).minutes.do(netCheck)
# 定时任务3
schedule.every().day.at("8:55").do(sendMiData)
schedule.every().day.at("18:05").do(sendMiData)
while True:
schedule.run_pending()
try:
mi_run()
except Exception as e:
print('mi_run Error:', e)
|
from .authenticator import Authenticator
from .websocket_client import WebSocketClient
from .constants import (
API_URL,
WEB_BASE_URL,
WS_BASE_URL,
START_SPLASH_TEXT,
END_SPLASH_TEXT,
codes,
)
from .tester import Tester
from .web_server import WebServer
from .logger import logger
class LocalServer:
"""
Main class for the local server
"""
def __init__(self):
self.status_code = codes.INTERNAL_SETUP
if "_is_from_test" not in self.__dict__:
self._is_from_test = False
# WebSocket client
self.ws_client = WebSocketClient(self, WS_BASE_URL)
# WebServer
self.web_server = WebServer(self)
# User authentication
self.authenticator = Authenticator(self)
# Runs tests for the system
self.tester = Tester(self)
# for testing, do not touch
@property
def status_code_name(self):
return codes.to_name(self.status_code)
def run(self):
"""
Method for starting the system
Runs all the tests
Authenticates with a user
Establishes Websocket connection
:return: Nothing
"""
print(START_SPLASH_TEXT)
logger.info("Starting Local Server")
# Start the Web Server
try:
self.set_status(codes.WEBSERVER_SETUP)
self.web_server.start()
except:
logger.error("COULD NOT START THE WEBSERVER")
self.set_status(codes.WEBSERVER_SETUP_ERROR)
# tests system
self.tester.establish_successful_tests()
# work on the authenticator
if not self.authenticator.is_authenticated:
self.authenticator.authenticate()
self.ws_client.establish_connection()
def stop(self, code=None):
"""
Stops the system
:param code: Code or reason for the shut down
:return: None
"""
logger.info("SHUTTING DOWN...")
self.web_server.stop()
logger.debug("good bye <3")
print(END_SPLASH_TEXT)
# exit(code)
def set_status(self, code):
"""
Sets the internal status code
:param code: int of the status code
:return: None
"""
self.status_code = code
|
quantity = 3
item = 'widgets'
price = 4995.345
myorder = "I want {} pieces of {} for ${:,.2f}."
print(myorder.format(quantity, item, price))
>>>I want 3 pieces of widgets for $4,995.35.
# : is for formatting codes
# $ is outside of {}
# ,.2f means comma thousand separator, 2 decimal float16 number
# String has to be ' ' quotes in definition, input
# Can be in all one line with ".format()" call to function.
print("User {} is {} years old.".format("Tom", 17))
>>>User Tom is 17 years old.
|
def combinationSum(candidates: list[int], target: int) -> list[list[int]]:
solutions = set()
checked = set()
stack = []
def recur(total):
seq = tuple(sorted(stack[:]))
if seq in checked:
return
checked.add(seq)
if total == target:
solutions.add(seq)
return
elif total > target:
return
else:
for i in candidates:
stack.append(i)
recur(total + i)
stack.pop()
recur(0)
return [list(x) for x in solutions]
|
import numpy as np
import tensorflow as tf
class Model(object):
def __init__(self, batch_size, num_rollouts, entTotal, relTotal, embedding_size, weight_decay, learning_rate):
self.batch_size = batch_size
self.num_rollouts = num_rollouts # neg_size == num_rollouts - 1 when negative sampling
self.weight_decay = weight_decay
self.learning_rate = learning_rate
self.entTotal = entTotal
self.relTotal = relTotal
self.embedding_size = embedding_size
self.optimizer = tf.train.AdamOptimizer(learning_rate)
self.input_def()
self.embedding_def()
self.loss_def()
self.predict_def()
def get_all_instance(self, in_batch=False):
return [self.batch_h, self.batch_t, self.batch_r]
def get_predict_instance(self):
return [self.predict_h, self.predict_t, self.predict_r]
def input_def(self):
self.batch_h = tf.placeholder(tf.int32, [self.batch_size * self.num_rollouts, 2])
self.batch_t = tf.placeholder(tf.int32, [self.batch_size * self.num_rollouts, 2])
self.batch_r = tf.placeholder(tf.int32, [self.batch_size * self.num_rollouts, 2])
self.predict_h = tf.placeholder(tf.int32, [None])
self.predict_t = tf.placeholder(tf.int32, [None])
self.predict_r = tf.placeholder(tf.int32, [None])
def embedding_def(self):
pass
def loss_def(self):
pass
def predict_def(self):
pass
class DistMult(Model):
def _calc(self, h, t, r):
return h * r * t
def embedding_def(self):
# TODO: align with generator, vocab?
self.ent_embeddings = tf.get_variable(name="ent_embeddings", shape=[self.entTotal, self.embedding_size])
self.rel_embeddings = tf.get_variable(name="rel_embeddings", shape=[self.relTotal, self.embedding_size])
self.parameter_lists = {"ent_embeddings": self.ent_embeddings,
"rel_embeddings": self.rel_embeddings}
def loss_def(self):
# Obtaining the initial configuration of the model
# To get positive triples and negative triples for training
# To get labels for the triples, positive triples as 1 and negative triples as -1
h, t, r = self.get_all_instance()
# Embedding entities and relations of triples
e_h = tf.nn.embedding_lookup(self.ent_embeddings, h)
e_t = tf.nn.embedding_lookup(self.ent_embeddings, t)
e_r = tf.nn.embedding_lookup(self.rel_embeddings, r)
# Calculating score functions for all positive triples and negative triples
res = tf.reduce_sum(self._calc(e_h, e_t, e_r), -1, keep_dims=False)
# max. E[log(1 - D)] + E[log D] = min. - E[log sigma(-res)] - E[log sigma(res)] = min. - E[log sigma(y * res)]
loss_func = - tf.reduce_mean(tf.tanh(res[:, 0] - res[:, 1]))
regul_func = tf.reduce_mean(e_h ** 2) + tf.reduce_mean(e_t ** 2) + tf.reduce_mean(e_r ** 2)
# Calculating loss to get what the framework will optimize
self.loss = loss_func + self.weight_decay * regul_func
self.train_op = self.optimizer.minimize(self.loss)
def predict_def(self):
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_h)
predict_t_e = tf.nn.embedding_lookup(self.ent_embeddings, predict_t)
predict_r_e = tf.nn.embedding_lookup(self.rel_embeddings, predict_r)
# min E[log(1 - D)] -> max. E[log D] -> reward func.
# self.predict = tf.log(tf.nn.sigmoid(tf.reduce_sum(self._calc(predict_h_e, predict_t_e, predict_r_e), 1)))
self.predict = tf.reduce_sum(self._calc(predict_h_e, predict_t_e, predict_r_e), -1)
|
"""
Read esri filegdb and keep original objectid
"""
from arcgis.features import FeatureLayerCollection
import fiona
import geopandas as gpd
import pandas as pd
from tohydamogml.config import COLNAME_OID
from shapely.geometry import Point, LineString
import pyproj
import warnings
def read_featureserver(url, layer_index):
"""Read featureservice with arcgis. Query the featureset to return everything. Return geopandas dataframe or pandas dataframe"""
collection = FeatureLayerCollection(url)
wkid = collection.properties['spatialReference']['wkid']
featureset = collection.layers[int(layer_index)]
if featureset.properties.geometryField is not None:
fieldnames = [field['name'] for field in featureset.properties.fields]
if COLNAME_OID in fieldnames:
col = COLNAME_OID
else:
cols = [name for name in fieldnames if name.startswith(COLNAME_OID)]
if len(cols) == 0:
raise ValueError(f"Can't find column starting with '{COLNAME_OID}', thus unable to query dataset")
else:
col = cols[0]
query_all = featureset.query(where=f'{col}>=0')
try:
geojson = query_all.to_geojson
gdf = gpd.read_file(geojson)
if type(gdf.crs) == pyproj.crs.crs.CRS:
if not gdf.crs.srs.endswith(str(wkid)):
gdf = gdf.set_crs(wkid, allow_override=True)
elif gdf.crs is not None:
if gdf.crs['init'] != wkid:
gdf = gdf.to_crs(epsg=wkid)
else:
raise warnings.warn("Check CRS")
else:
gdf.crs = f'EPSG:{wkid}'
except:
# For some reason, the geojson created from the esri dataset doesn't always get read by geopandas/fiona.
# If the geojson method fails, a manual operation is used to create the geodataframe anyway.
sdf = query_all.sdf
sdf['geometry'] = sdf.apply(lambda x: LineString([Point(xy[0], xy[1]) for xy in x['SHAPE']['paths'][0]]), axis=1)
gdf = gpd.GeoDataFrame(sdf)
gdf.crs = 'EPSG:'+str(wkid)
gdf[COLNAME_OID] = gdf[col].astype(int)
return gdf
else:
#Code adjusted from read_filegdb, but might not be needed
query_all = featureset.query(where=f'{COLNAME_OID}>=0')
json = query_all.to_geojson
df = pd.read_json(json) #Doesn't unpack properly! Under df['features'] are all features in another dict
df[COLNAME_OID] = df[COLNAME_OID].astype(int)
return df
def read_filegdb(filegdb, layer):
"""Read filegdb with fiona to get original objectid. Return geopandas dataframe or pandas dataframe"""
if layer in fiona.listlayers(filegdb):
features = _yield_features(filegdb, layer)
if next(features)["geometry"] is not None:
gdf = gpd.GeoDataFrame.from_features(features, crs=get_crs(filegdb, layer))
gdf[COLNAME_OID] = gdf[COLNAME_OID].astype(int)
return gdf
else:
df = pd.DataFrame.from_records(_yield_table(filegdb, layer))
df[COLNAME_OID] = df[COLNAME_OID].astype(int)
return df
else:
raise ValueError(f"layer '{layer}' not in layer list: {fiona.listlayers(filegdb)}")
def _yield_features(path, layer, colname_oid=COLNAME_OID):
"""Read filegdb with fiona to get original objectid"""
with fiona.open(path, 'r', layer=layer) as f:
for feature in f:
feature['properties'][colname_oid] = feature['id']
yield feature
def _yield_table(path, layer, colname_oid=COLNAME_OID):
"""Read filegdb table with fiona to get original objectid"""
with fiona.open(path, 'r', layer=layer) as f:
for feature in f:
feature['properties'][colname_oid] = feature['id']
yield feature['properties']
def get_crs(path, layer):
with fiona.open(path, 'r', layer=layer) as f:
if type(f.crs) == dict:
if 'init' in f.crs.keys():
return f.crs['init']
return None
if __name__ == '__main__':
a = read_featureserver('https://maps.brabantsedelta.nl/arcgis/rest/services/Extern/Kunstwerken/FeatureServer', '14')
mask = gpd.read_file(r"c:\local\TKI_WBD\aanvullende_data\Aa_of_Weerijs_v2.shp")
gdf = a[a.intersects(mask.unary_union)]
gdf.to_file(r'c:\Users\908367\Box\BH8519 WBD DHYDRO\BH8519 WBD DHYDRO WIP\04_GIS\kopie_server\Cat_A_Waterloop_Aa_of_Weerijs.shp')
gdf.to_file(r'c:\Users\908367\Box\BH8519 WBD DHYDRO\BH8519 WBD DHYDRO WIP\04_GIS\kopie_server\Cat_A_Waterloop_Aa_of_Weerijs.gpkg', driver='GPKG')
|
# Print statements with syntax errors (fixed).
print('Hello, world!')
print('This is a series')
print('of print statements.')
print('The end!')
print("But not really!")
print("Here's another print statement!")
print("And here's another one!")
print("This will be the last one of these initial print statements.")
|
import os
from flask import Flask, render_template, request
import luigi
from luigi.contrib.gcs import GCSTarget, GCSClient
import subprocess
from merge_video import MergeVideoAndAudio
app = Flask(__name__)
@app.route('/')
def hello_world():
target = os.environ.get('TARGET', 'World')
return 'Hello {}!\n'.format(target)
# http://localhost:8080/merge_video?youtube_id=asdf&text_id=pg_12
@app.route('/merge_video', methods=['GET'])
def merge_video():
youtube_id = request.args.get('youtube_id')
youtube_link = f'https://www.youtube.com/watch?v={youtube_id}'
text_id = request.args.get('text_id')
# --scheduler-url
# https://luigi.readthedocs.io/en/latest/central_scheduler.html
# $luigid --background --pidfile <PATH_TO_PIDFILE> --logdir <PATH_TO_LOGDIR> --state-path <PATH_TO_STATEFILE>
scheduler_url = os.environ.get('SCHEDULER', 'http://127.0.0.1:8082')
#if not num:
luigi.run(['detect.MergeVideoAndAudio',
'--gs-path-video', youtube_link, #'gs://amvideotest/Welcome_to_Adam_Does_Movies.mp4', # 'gs://amvideotest/battlefield1.mp4', #
'--text-generator','markov',
'--text-generator-source', 'gs://amvideotest/source/pg/pg345.txt', #'gs://amvideotest/source/pg/pg345.txt',
'--workers', '1',
'--scheduler-url', scheduler_url])
return f'Completed youtube_link: {youtube_link}\ntext_id: {text_id}'
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
"""
Snake, but multiplayer
Created by sheepy0125
2022-02-21
Client join state code
"""
### Setup ###
from multiplayer_snake.shared.common import pygame, Logger, hisock
from multiplayer_snake.shared.config_parser import parse
from multiplayer_snake.shared.shared_game import SharedGame
from multiplayer_snake.shared.pygame_tools import GlobalPygame
from multiplayer_snake.client.states.state import BaseState, update_state
from multiplayer_snake.client.snake import ClientSnakePlayer
CONFIG = parse()
GUI_CONFIG = CONFIG["gui"]
### States ###
class GameState(BaseState):
def __init__(self, client: hisock.client.ThreadedHiSockClient):
super().__init__(identifier="game")
self.key_enum = {
pygame.K_w: "up",
pygame.K_s: "down",
pygame.K_a: "left",
pygame.K_d: "right",
pygame.K_UP: "up",
pygame.K_DOWN: "up",
pygame.K_LEFT: "left",
pygame.K_RIGHT: "right",
}
self.client = client
self.snake = ClientSnakePlayer()
self.other_snake = ClientSnakePlayer()
self.grid = Grid()
self.update_called = False
@client.on("update")
def update(data: dict):
"""Called every frame"""
self.our_position = data["position"]
self.our_direction = data["direction"]
self.other_position = data["other_position"]
self.other_direction = data["other_direction"]
self.tail_length = data["tail_length"]
self.update_called = True
def handle_event(self, event: pygame.event.EventType):
# Get keyboard input
if event.type == pygame.KEYDOWN:
key_pressed = pygame.key.get_pressed()
if key_pressed in self.key_enum:
self.next_move = self.key_enum[key_pressed]
def update(self):
if not self.update_called:
return
self.update_called = False
# Our snake
self.snake.update(self.our_position, self.our_direction, self.tail_length)
def draw(self):
GlobalPygame.window.fill("black")
# Grid
self.grid.draw()
GlobalPygame.window.blit(self.grid.grid_surf, (0, 0))
# Draw the snakes
self.snake.draw()
self.other_snake.draw()
def close(self):
self.client.close(emit_leave=True)
class Grid:
"""
A simple grid that uses the information stored in the
:class:`SharedGame` class
"""
def __init__(self):
self.grid_surf = pygame.Surface(
(SharedGame.window_width, SharedGame.window_height)
)
def draw(self):
# Draw lines onto the surface
for row in range(0, SharedGame.height):
pygame.draw.line(
self.grid_surf,
GUI_CONFIG["colors"]["grid"]["line"],
(row * SharedGame.grid_snap, 0),
(row * SharedGame.grid_snap, SharedGame.window_height),
)
for column in range(0, SharedGame.width):
pygame.draw.line(
self.grid_surf,
GUI_CONFIG["colors"]["grid"]["line"],
(0, column * SharedGame.grid_snap),
(SharedGame.window_width, column * SharedGame.grid_snap),
)
|
class Clock:
DEFAULT_TIME = '00:00:00'
IS_AM = 0
IS_PM = 1
IS_24H = 2
COUNTDOWN_TIMER = 0
COUNTDOWN_TO_TIME = 1
ELAPSED_TIME = 2
def __init__(self, index, clock_info, client=None):
self.index = index
self._initialize_with_clock_info(
name=clock_info.get('clockName'),
time=clock_info.get('clockTime'),
clock_type=clock_info.get('clockType'),
display_type=clock_info.get('clockIsPM'),
is_overrun=clock_info.get('clockOverrun'),
duration=clock_info.get('clockDuration'),
end_time=clock_info.get('clockEndTime'),
state=clock_info.get('clockState'),
)
self.client = client
def _initialize_with_clock_info(
self,
name=None,
time=None,
clock_type=None,
display_type=None,
is_overrun=False,
duration=None,
end_time=None,
state=False
):
self.name = name
self.time = time or self.DEFAULT_TIME
self.clock_type = clock_type or self.COUNTDOWN_TIMER
self.display_type = display_type or self.IS_24H
self.is_overrun = is_overrun
self.duration = duration or self.DEFAULT_TIME
self.end_time = end_time or self.DEFAULT_TIME
self.state = state
@property
def settings(self):
return {
'clockIndex': self.index,
'clockTime': self.time,
'clockName': self.name,
'clockType': self.clock_type,
'clockIsPM': self.display_type,
'clockOverrun': self.is_overrun,
}
def update(self, new_settings):
if self.client:
command = {'action': 'clockUpdate'}
settings = self.settings
settings.update(new_settings)
command.update(settings)
return self.client.async_send(command, expect_response=False)
def set_time(self, time):
if self.client:
return self.update({'clockTime': time})
def start(self):
if self.client:
command = {
'action': 'clockStart',
'clockIndex': self.index,
}
return self.client.async_send(command, expect_response=False)
def stop(self):
if self.client:
command = {
'action': 'clockStop',
'clockIndex': self.index,
}
return self.client.async_send(command, expect_response=False)
def reset(self):
if self.client:
command = {
'action': 'clockReset',
'clockIndex': self.index,
}
return self.client.async_send(command, expect_response=False)
def __repr__(self):
return f'<Clock: {self.name} | {self.index}>'
|
import os
import pytest
import mkdocs_jupyter
from mkdocs_jupyter.config import settings
pytestmark = [pytest.mark.pkg]
def test_import():
assert mkdocs_jupyter.__version__ is not None
assert mkdocs_jupyter.__version__ != "0.0.0"
assert len(mkdocs_jupyter.__version__) > 0
def test_assets_included():
mkdocs_html = os.path.join(settings.templates_dir, "mkdocs_html")
assert os.path.exists(os.path.join(mkdocs_html, "conf.json"))
assert os.path.exists(os.path.join(mkdocs_html, "notebook.html.j2"))
html_assets = os.path.join(mkdocs_html, "assets")
assert os.path.exists(os.path.join(html_assets, "clipboard.umd.js"))
assert os.path.exists(os.path.join(html_assets, "mkdocs-jupyter.css"))
assert os.path.exists(os.path.join(html_assets, "mkdocs-jupyter.css.map"))
assert os.path.exists(os.path.join(html_assets, "mkdocs-jupyter.js"))
# assert os.path.exists(os.path.join(html_assets, "mkdocs-jupyter.js.map"))
mkdocs_md = os.path.join(settings.templates_dir, "mkdocs_md")
assert os.path.exists(os.path.join(mkdocs_md, "conf.json"))
assert os.path.exists(os.path.join(mkdocs_md, "md-no-codecell.md.j2"))
|
class RawArgs:
def __init__(self,
cols_features, col_treatment, col_outcome, col_propensity,
col_cate, col_recommendation, min_propensity, max_propensity,
verbose, uplift_model_params, enable_ipw, propensity_model_params,
index_name, partition_name, runner, conditionally_skip):
self.cols_features = cols_features
self.col_treatment = col_treatment
self.col_outcome = col_outcome
self.col_propensity = col_propensity
self.col_cate = col_cate
self.col_recommendation = col_recommendation
self.min_propensity = min_propensity
self.max_propensity = max_propensity
self.verbose = verbose
self.uplift_model_params = uplift_model_params
self.enable_ipw = enable_ipw
self.propensity_model_params = propensity_model_params
self.index_name = index_name
self.partition_name = partition_name
self.runner = runner
self.conditionally_skip = conditionally_skip
|
import os
import ersa_utils
class BasicProcess(object):
"""
Process block is a basic running module for this repo, it will run the process by checking if function has been
ran before, or be forced to re-run the process again
"""
def __init__(self, name, path, func=None):
"""
:param name:name of the process, this will be used for the state file name
:param path: path to where the state file will be stored
:param func: process function, if None then it will be child class's process() function
"""
self.name = name
self.path = path
if func is None:
self.func = self.process
else:
self.func = func
self.state_file = os.path.join(self.path, '{}_state.txt'.format(self.name))
def process(self, **kwargs):
raise NotImplementedError()
def run(self, force_run=False, **kwargs):
"""
Run the process
:param force_run: if True, then the process will run no matter it has completed before
:param kwargs:
:return:
"""
# check if state file exists
state_exist = os.path.exists(self.state_file)
# run the function if force run or haven't run before
if force_run or state_exist == 0:
print(('Start running {}'.format(self.name)))
# write state log as incomplete
with open(self.state_file, 'w') as f:
f.write('Incomplete\n')
# run the process
self.func(**kwargs)
# write state log as complete
with open(self.state_file, 'w') as f:
f.write('Finished\n')
else:
# if haven't run before, run the process
if not self.check_finish():
self.func(**kwargs)
# write state log as complete
with open(self.state_file, 'w') as f:
f.write('Finished\n')
return self
def check_finish(self):
"""
check if state file exists
:return: True if it has finished
"""
state_exist = os.path.exists(self.state_file)
if state_exist:
with open(self.state_file, 'r') as f:
a = f.readlines()
if a[0].strip() == 'Finished':
return True
return False
class ValueComputeProcess(BasicProcess):
"""
Compute value for the given function, save value
Return the value if already exists
"""
def __init__(self, name, path, save_path, func=None):
"""
:param name:name of the process, this will be used for the state file name
:param path: path to where the state file will be stored
:param save_path: path to save the computed value
:param func: process function, if None then it will be child class's process() function
"""
self.save_path = save_path
self.val = None
super().__init__(name, path, func)
def run(self, force_run=False, **kwargs):
"""
Run the process
:param force_run: if True, then the process will run no matter it has completed before
:param kwargs:
:return:
"""
# check if state file exists
state_exist = os.path.exists(self.state_file)
# run the function if force run or haven't run before
if force_run or state_exist == 0:
print(('Start running {}'.format(self.name)))
# write state log as incomplete
with open(self.state_file, 'w') as f:
f.write('Incomplete\n')
# run the process
self.val = self.func(**kwargs)
# write state log as complete
with open(self.state_file, 'w') as f:
f.write('Finished\n')
ersa_utils.save_file(self.save_path, self.val)
else:
# if haven't run before, run the process
if not self.check_finish():
self.val = self.func(**kwargs)
ersa_utils.save_file(self.save_path, self.val)
# if already exists, load the file
self.val = ersa_utils.load_file(self.save_path)
# write state log as complete
with open(self.state_file, 'w') as f:
f.write('Finished\n')
return self
|
import asyncio
import datetime
from typing import Dict, Union, List, Any
import pandas as pd
import numpy as np
from math import nan
from .errors import QuikLuaException
class ParamWatcher:
"""
A special class that decides which params have to be updated based on given interval and time passed since last update.
"""
def __init__(self):
self.lock = asyncio.Lock()
self._watched = pd.DataFrame(columns=['dt', 'key'])
self._watched['dt'] = self._watched['dt'].astype(np.float)
def subscribed(self, param_tuple_list):
dt_now = datetime.datetime.now().timestamp()
for (class_code, sec_code, param, update_interval) in param_tuple_list:
param = param.lower()
idx_key = '__'.join((class_code, sec_code, param))
self._watched.at[idx_key, 'dt'] = dt_now
self._watched.at[idx_key, 'key'] = (class_code, sec_code, param)
self._watched.at[idx_key, 'upd_int'] = update_interval
pass
def unsubscribed(self, param_tuple_list):
for (class_code, sec_code, param) in param_tuple_list:
param = param.lower()
idx_key = '__'.join((class_code, sec_code, param))
try:
del self._watched[idx_key]
except KeyError:
# All good, index was deleted
pass
def get_update_candidates(self):
if len(self._watched) == 0:
return []
last_upd = self._watched['dt'] + self._watched['upd_int']
dt_now = datetime.datetime.now().timestamp()
return self._watched[last_upd < dt_now]
def set_candidate_updates(self, candidates):
if len(self._watched) == 0:
return
dt_now = datetime.datetime.now().timestamp()
self._watched.loc[candidates.index, 'dt'] = dt_now
class ParamCache:
"""
Current parameters (bid/ask/quotes, etc) cache table
It can store and parse everything that returned by getParamEx2() RPC
Values are parsed depending on their types.
Missing values marked as None (for stings, dates, times) or nan (for numeric).
"""
def __init__(self, class_code: str, sec_code: str, params_list: List[str]):
self.class_code = class_code
self.sec_code = sec_code
if params_list is None or len(params_list) == 0:
raise ValueError(f'params_list is empty')
self.params = {p.lower(): None for p in params_list} # type: Dict[str, Any]
def process_param(self, param_key: str, param_ex_api_response: dict):
"""
Fills the parameter cache based on getParamEx2() RPC response
:param param_key: param field name, must be `params_list` in constructor
:param param_ex_api_response: as {'param_ex': {'param_type': '1', 'result': '1', 'param_image': '152 420', 'param_value': '152420.000000'}}
:return:
"""
key = param_key.lower()
assert key in self.params, f'Param key {key} is not requested at ParamCache constructor'
assert 'param_ex' in param_ex_api_response, f'Expected to get getParamEx2() RPC response format, got ({param_ex_api_response})'
res = param_ex_api_response['param_ex']
if res['param_type'] == '1' or res['param_type'] == '2':
# Float or Int (but parse as floats)
if res['result'] == '1':
# Request was successful
self.params[key] = float(res['param_value'])
else:
self.params[key] = nan
elif res['param_type'] == '3' or res['param_type'] == '4':
# String or enumerable
if res['result'] == '1':
# Request was successful
self.params[key] = res['param_image']
else:
self.params[key] = None
elif res['param_type'] == '5':
# Time
if res['result'] == '1' and res['param_image']:
# Request was successful
t_str = res['param_image']
if ':' not in t_str:
raise QuikLuaException(f'Unknown param time format {key}: {t_str}')
t_tok = t_str.split(':')
self.params[key] = datetime.time(int(t_tok[0]), int(t_tok[1]), int(t_tok[2]))
else:
self.params[key] = None
elif res['param_type'] == '6':
# Date
if res['result'] == '1' and res['param_image']:
# Request was successful
self.params[key] = datetime.datetime.strptime(res['param_image'], '%d.%m.%Y')
else:
self.params[key] = None
elif res['param_type'] == '0':
# Highly likely it's unknown param_type
raise QuikLuaException(f'({self.class_code},{self.sec_code}): getParamEx2() unknown or invalid param key: {param_key}')
else:
# Unexpected param type
raise QuikLuaException(f'({self.class_code},{self.sec_code}): getParamEx2() returned unknown param type: {param_ex_api_response}')
class HistoryCache:
"""In-memory history quote cache for speed up Quik historical data fetching"""
def __init__(self, class_code, sec_code, interval, cache_update_min_interval_sec=0.2):
self.class_code = class_code
self.sec_code = sec_code
self.interval = interval
self._data = None
self._last_quote = datetime.datetime(1900, 1, 1)
self.ds_uuid = None
self._lock = asyncio.Lock()
self._last_update = None
self._cache_update_interval_sec = cache_update_min_interval_sec
def process_history(self, quotes_df):
assert isinstance(quotes_df, pd.DataFrame), 'quotes_df must be a Pandas DataFrame'
assert isinstance(quotes_df.index, pd.DatetimeIndex), 'quotes_df must have DatetimeIndex'
assert quotes_df.index.is_monotonic_increasing, 'quotes_df must be sorted by date ascending'
if self._data is None:
if len(quotes_df) > 0:
self._data = quotes_df
self._last_quote = quotes_df.index[-1]
self._last_update = datetime.datetime.now()
else:
if len(quotes_df) > 0:
# Update data and rewrite overlapping records in old data
self._data = quotes_df.combine_first(self._data)
self._last_quote = quotes_df.index[-1]
self._last_update = datetime.datetime.now()
@property
def data(self):
return self._data
@property
def last_bar_date(self):
return self._last_quote
@property
def lock(self):
return self._lock
@property
def can_update(self):
if self._last_update is None:
# Cache is new, let it update
return True
else:
# Permit cache update only if self._cache_update_interval_sec passed
return (datetime.datetime.now() - self._last_update).total_seconds() > self._cache_update_interval_sec
|
import discord
from dotenv import load_dotenv
import logging
import os
from discord.ext import commands
extensions = [
"cogs.clan_battle",
"cogs.help"
]
load_dotenv()
DISCORD_BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN')
formatter = '%(levelname)s : %(asctime)s : %(message)s'
logging.basicConfig(filename='logs/bot.log', level=logging.INFO, format=formatter)
bot = commands.Bot(command_prefix="ちぇる")
bot.remove_command("help")
if __name__ == '__main__':
for extension in extensions:
bot.load_extension(extension)
@bot.event
async def on_ready():
logging.info(f"Name: {bot.user.name}")
logging.info(f"ID: {bot.user.id}")
await bot.change_presence(activity=discord.Game(name="「ちぇるへるぷ」でヘルプが見れます。"))
@bot.event
async def on_command_error(ctx: commands.Context, exception):
logging.error(f"ctx.message.content: {ctx.message.content}")
logging.error(f"ctx.args: {ctx.args}")
logging.error(f"ctx.command_failed: {ctx.command_failed}")
if not ctx.command:
return
await ctx.channel.send(exception)
bot.run(DISCORD_BOT_TOKEN)
|
# Module version
version_info = (0, 6, 0)
# Module version stage suffix map
_specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''}
# Module version accessible using ipyspeck.__version__
__version__ = '%s.%s.%s'%(version_info[0], version_info[1], version_info[2])
|
# DROP TABLES
staging_events_table_drop = "drop table if exists s_songplay_event;"
staging_songs_table_drop = "drop table if exists s_song;"
songplay_table_drop = "drop table if exists f_songplay;"
user_table_drop = "drop table if exists d_user;"
song_table_drop = "drop table if exists d_song;"
artist_table_drop = "drop table if exists d_artist;"
time_table_drop = "drop table if exists d_time;"
# CREATE TABLES
# songplay_id is always 64 characters (hex digest of sha256 hash)
staging_events_table_create= ("""
create table if not exists s_songplay_event (
songplay_id varchar(64),
artist text,
auth text,
first_name text,
item_in_session integer,
last_name text,
gender char(1),
length decimal,
level text,
location text,
method text,
page text,
registration decimal,
session_id integer,
song text,
status smallint,
timestamp text,
year smallint,
week smallint,
month smallint,
day smallint,
hour smallint,
weekday smallint,
user_agent text,
user_id text
);
""")
staging_songs_table_create = ("""
create table if not exists s_song (
artist_id varchar(18),
artist_location varchar(96),
artist_latitude decimal,
artist_longitude decimal,
artist_name varchar(256),
duration decimal,
num_songs smallint,
song_id varchar(18),
title varchar(256),
year smallint
);
""")
songplay_table_create = ("""
create table if not exists f_songplay (
songplay_id text primary key not null,
start_time text not null references d_time(start_time),
user_id text not null references d_user(user_id),
artist_id text references d_artist(artist_id),
song_id text references d_song(song_id),
level text,
session_id text,
location text,
user_agent text
);
""")
user_table_create = ("""
create table if not exists d_user (
user_id text primary key not null,
first_name text,
last_name text,
gender text,
level text
);
""")
song_table_create = ("""
create table if not exists d_song (
song_id text primary key not null,
title text,
artist_id text,
year text,
duration text
);
create index idx_song_title on d_song(title);
""")
artist_table_create = ("""
create table if not exists d_artist (
artist_id text primary key not null,
name text,
location text,
latitude text,
longitude text
);
create index idx_artist_name on d_artist(name);
""")
time_table_create = ("""
create table if not exists d_time (
start_time text primary key not null,
hour text,
day text,
week text,
month text,
year text,
weekday text
);
""")
# STAGING TABLES
staging_events_copy = ("""
insert into s_songplay_event (
songplay_id,
artist,
auth,
first_name,
item_in_session,
last_name,
gender,
length,
level,
location,
method,
page,
registration,
session_id,
song,
status,
timestamp,
year,
week,
month,
day,
hour,
weekday,
user_agent,
user_id
) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
on conflict do nothing;
""")
staging_songs_copy = ("""
insert into s_song (
artist_id,
artist_latitude,
artist_location,
artist_longitude,
artist_name,
duration,
num_songs,
song_id,
title,
year
) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
on conflict do nothing;
""")
# FINAL TABLES
songplay_table_insert = ("""
insert into f_songplay (songplay_id, start_time, user_id, artist_id, song_id, level, session_id, location, user_agent)
select
songplay_id,
timestamp,
user_id,
(select artist_id from d_artist d where d.name = artist limit 1) as artist_id,
(select song_id from d_song d where d.artist_id = artist_id and d.title = song limit 1),
level,
session_id,
location,
user_agent
from s_songplay_event;
""")
user_table_insert = ("""
insert into d_user (user_id, first_name, last_name, gender, level)
select distinct(user_id), first_name, last_name, gender, level from s_songplay_event
on conflict do nothing;
""")
song_table_insert = ("""
insert into d_song (song_id, title, artist_id, year, duration)
select distinct(song_id), title, artist_id, year, duration from s_song
on conflict do nothing;
""")
artist_table_insert = ("""
insert into d_artist (artist_id, name, location, latitude, longitude)
select distinct(artist_id), artist_name, artist_location, artist_latitude, artist_longitude from s_song
on conflict do nothing;
""")
time_table_insert = ("""
insert into d_time (start_time, year, week, month, day, hour, weekday)
select distinct(timestamp), year, week, month, day, hour, weekday from s_songplay_event
on conflict do nothing;
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, user_table_create, song_table_create, artist_table_create, time_table_create, songplay_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [user_table_insert, song_table_insert, artist_table_insert, time_table_insert, songplay_table_insert]
|
'''
Created on Sep 6, 2021
@author: mhindle
'''
import numpy as np
import numbers
from typing import Tuple, List, Dict, Union, Set
import itertools
from collections import defaultdict
import pandas as pd
class JointAllellicDistribution(object):
def __init__(self, snp_ordered, chromosome2snp=None, pseudocount = 1, surround_size=1):
self.pseudocount = pseudocount
self.frequency: Dict[Tuple[str,int],Dict[Tuple[str,int],Dict[Tuple[str,int],int]]] = dict()
self.n_observations: Dict[Tuple[str,str,str]] = defaultdict(int)
self.surround_size = surround_size
self.window_size = (surround_size*2)+1
self.snp_ordered = snp_ordered
self.chromosome2snp = chromosome2snp
def getWindow(self, targetSnp):
'''
targetSnp is the snp around which to extract the symetric window of +- window_size
'''
targetpos = self.snp_ordered.index(targetSnp)
startpos_snp = targetpos-self.surround_size
if startpos_snp < 0:
startpos_snp = 0
endpos_snp = targetpos+self.surround_size+1
if endpos_snp >= len(self.snp_ordered):
endpos_snp = len(self.snp_ordered)-1
snpWindow = self.snp_ordered[startpos_snp:endpos_snp]
if self.chromosome2snp is not None:
targetchr = self.chromosome2snp[targetSnp]
return([snpId for snpId in snpWindow if self.chromosome2snp[snpId] == targetchr])
return(snpWindow)
def getCountTable(self, observedstates: dict, targetSnp):
all_obs = [(snpid,observedstates[snpid]) for snpid in self.getWindow(targetSnp)]
def copypastefunc(x):
return([(snpid,state) if snpid != targetSnp else (targetSnp, x) for snpid,state in all_obs])
for state, query in enumerate(list(map(copypastefunc, [0,1,2]))):
#print("%s == %s" % (state, query))
workinghash = self.frequency
for item in query:
workinghash = workinghash[item]
if "obs" in workinghash:
yield workinghash["obs"] #it should be the result
else:
print("query %s" % query)
print("first %s" % self.frequency[query[0]])
print("workinghash %s" % workinghash)
print("item %s" % "_".join(map(str,item)))
raise Exception("incomplete traversal of nested hash: final %s state %s" % (workinghash, state))
def countJointFrq(self, table, mask, column_names: List[str], conditions_index=[0,1,2,9]):
column_names = np.array(column_names)
subset = table[np.all(mask,axis=1),:]
for values in list(itertools.product(conditions_index, repeat=self.window_size)):
conditions = list(zip(column_names, values))
nine_truth = np.ones((subset.shape[0],1), dtype=bool)
rows_that_meet = np.logical_and.reduce([nine_truth if value == 9 else np.equal(subset[:,column_names == snp],value) for snp,value in conditions])
keys = list(zip(column_names, values))
obs = np.count_nonzero(rows_that_meet)
self.recurse_set_dict(self.frequency, keys, obs)
if 9 not in values: # only count complete real value arrays
self.n_observations[tuple(column_names)] += (obs+self.pseudocount) # this we keep track of how many observations there have been for these three snps
def recurse_set_dict(self, d, queue, value):
f = queue.pop(0)
if len(queue) > 0:
if f not in d:
d[f] = dict()
self.recurse_set_dict(d[f], queue, value)
else:
if f not in d:
d[f] = dict()
if "obs" not in d[f]:
d[f]["obs"] = value+self.pseudocount # we record the observations for this state combo
elif d[f]["obs"] != value+self.pseudocount:
raise Exception("overwriting value %s with %s " % (d[f]["obs"], value))
def countJointFrqAll(self, table:pd.DataFrame, mask=None):
'''
table expect pandas Dataframe with columns as snpids and rows being observations
mask expect numpy bool matrix but will deal with pandas bool Dataframe
'''
if mask is None:
mask = np.ones(table.shape,dtype=bool)
elif mask is pd.DataFrame:
mask = mask.to_numpy(dtype=bool)
for targetSnp in self.snp_ordered:
snp_window = self.getWindow(targetSnp)
indexofsnps = [x in snp_window for x in table.columns]
self.countJointFrq(table.loc[:,snp_window].to_numpy(dtype=int), mask[:,indexofsnps], snp_window)
|
import numpy as np
import scipy.stats as stats
import pylab as pl
h = sorted({2.083, 0.315, 1.00e-03, 3.27, 0.929, 2.251,
0.867, 2.727, 0.156, 0.023, 0.081, 6.157, 2.764, 0.586,
1.374, 0.025, 4.015, 0.064, 0.777, 1.783, 0.347, 0.782,
0.391, 3.807, 0.569, 0.344, 0.434, 4.266, 1.031, 0.954,
5.396, 0.216, 1.217, 4.754, 1.205, 0.881, 0.257, 0.122,
5.127, 0.706, 1.893, 5.18, 0.308, 1.319, 0.039, 2.313,
0.985, 1.528, 2.533, 1.235, 0.477, 2.756, 1.206, 1.2,
1.444, 0.941, 3.186, 1.521, 3.631, 2.742, 0.425, 0.551,
0.751, 0.697, 0.333, 2.139, 1.05, 0.041, 0.413, 1.679,
0.773, 0.212, 0.115, 0.081, 1.647, 0.97, 1.12, 0.002,
0.114, 6.603, 0.293, 0.358, 1.031, 0.226, 1.00E-0, 3.388,
1.68, 1.682, 0.055, 0.91, 0.81, 0.915, 0.774, 4.572,
4.081, 1.403, 1.204, 0.181, 1.526, 0.22}) #sorted
fit = stats.norm.pdf(h, np.mean(h), np.std(h)) # this is a fitting indeed
pl.plot(h, fit, '-o')
pl.hist(h, normed=True, bins=np.arange(0, 7, .2)) # use this to draw histogram of your data
pl.show() # use may also need add this
|
import random
import logging
import unittest
from unittest import mock
from talkgenerator.schema import slide_schemas
from talkgenerator import generator
from talkgenerator.slide import powerpoint_slide_creator
from talkgenerator.util import os_util
class TestTalkGenerator(unittest.TestCase):
def setUp(self):
random.seed(1)
self.default_args = mock.Mock()
self.default_args.configure_mock(topic="cat")
self.default_args.configure_mock(num_slides=3)
self.default_args.configure_mock(schema="default")
self.default_args.configure_mock(title=None)
self.default_args.configure_mock(parallel=True)
self.default_args.configure_mock(
output_folder=os_util.to_actual_file("../output/test/")
)
self.default_args.configure_mock(open_ppt=False)
self.default_args.configure_mock(save_ppt=True)
self.default_args.configure_mock(int_seed=123)
def test_serial(self):
self.default_args.configure_mock(parallel=False)
ppt, _, _ = generator.generate_presentation_using_cli_arguments(
self.default_args
)
self.assertEqual(3, len(ppt.slides))
def test_to_dictionary(self):
_, slide_deck, _ = generator.generate_presentation(
schema="default",
slides=3,
topic="cat",
title=None,
presenter=None,
parallel=True,
int_seed=123,
save_ppt=False,
open_ppt=False,
print_logs=False,
)
slides_dict = slide_deck.to_slide_deck_dictionary()
logging.info(slides_dict)
self.assertIsNotNone(slides_dict)
def test_all_slide_generators(self):
basic_presentation_context = {
"topic": "dog",
"seed": "cat",
"presenter": "An O. Nymous",
"title": "Mock title",
}
presentation = powerpoint_slide_creator.create_new_powerpoint()
for slide_generator in slide_schemas.all_slide_generators:
logging.info("Testing Slide Generator: {}".format(slide_generator))
random.seed(123)
slide, _ = slide_generator.generate(
basic_presentation_context, []
)
slide.create_powerpoint_slide(presentation)
if __name__ == "__main__":
unittest.main()
|
# Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
""" Model for labels association table."""
from sqlalchemy import orm
from sqlalchemy.orm import validates
from ggrc import db
from ggrc.models import mixins
from ggrc.models import reflection
from ggrc.models.mixins import base
from ggrc.fulltext.mixin import Indexed
class Label(base.ContextRBAC, mixins.Base, db.Model, Indexed):
"""Represent object labels"""
__tablename__ = 'labels'
_fulltext_attrs = [
'name',
'object_type',
]
@validates('name')
def validate_name(self, key, value):
"""Validates and cleans name that has leading/trailing spaces"""
# pylint: disable=unused-argument,no-self-use
return value if value is None else value.strip()
name = db.Column(db.String, nullable=False)
object_type = db.Column(db.String)
_api_attrs = reflection.ApiAttributes("name")
_extra_table_args = [
db.UniqueConstraint('name', 'object_type'),
]
@classmethod
def indexed_query(cls):
return super(Label, cls).indexed_query().options(
orm.Load(cls).load_only("name", "object_type"),
)
|
from django.utils.translation import ugettext_lazy as _
HTTP_STATUS_CODES = (
# Infomational
(100, _('Continue')),
(101, _('Switching Protocols')),
(102, _('Processing (WebDAV)')),
# Success
(200, _('OK')),
(201, _('Created')),
(202, _('Accepted')),
(203, _('Non-Authoritative Information')),
(204, _('No Content')),
(205, _('Reset Content')),
(206, _('Partial Content')),
(207, _('Multi-Status (WebDAV)')),
# Redirection
(300, _('Multiple Choices')),
(301, _('Moved Permanently')),
(302, _('Found')),
(303, _('See Other')),
(304, _('Not Modified')),
(305, _('Use Proxy')),
(306, _('Switch Proxy')), # No longer used
(307, _('Temporary Redirect')),
# Client Error
(400, _('Bad Request')),
(401, _('Unauthorized')),
(402, _('Payment Required')),
(403, _('Forbidden')),
(404, _('Not Found')),
(405, _('Method Not Allowed')),
(406, _('Not Acceptable')),
(407, _('Proxy Authentication Required')),
(408, _('Request Timeout')),
(409, _('Conflict')),
(410, _('Gone')),
(411, _('Length Required')),
(412, _('Precondition Failed')),
(413, _('Request Entity Too Large')),
(414, _('Request-URI Too Long')),
(415, _('Unsupported Media Type')),
(416, _('Requested Range Not Satisfiable')),
(417, _('Expectation Failed')),
(418, _('I\'m a teapot')), # April Fools
(422, _('Unprocessable Entity (WebDAV)')),
(423, _('Locked (WebDAV)')),
(424, _('Failed Dependency (WebDAV)')),
(425, _('Unordered Collection')),
(426, _('Upgrade Required')),
(449, _('Retry With')),
# Server Error
(500, _('Internal Server Error')),
(501, _('Not Implemented')),
(502, _('Bad Gateway')),
(503, _('Service Unavailable')),
(504, _('Gateway Timeout')),
(505, _('HTTP Version Not Supported')),
(506, _('Variant Also Negotiates')),
(507, _('Insufficient Storage (WebDAV)')),
(509, _('Bandwidth Limit Exceeded')),
(510, _('Not Extended')),
)
from request.router import patterns
browsers = patterns(('Unknown', {}),
# Browsers
(r'AOL (?P<version>[\d+\.\d+]+)', 'AOL'),
(r'Mozilla/(?P<mozilla_version>[-.\w]+) \(compatible; ( ?)MSIE (?P<msie_version>[-.\w]+);( ?)( ?)America Online Browser (?P<version>[-.\w]+);', 'AOL'),
(r'Camino/(?P<version>[-.\w]+)', 'Camino'),
(r'Chrome/(?P<version>[-.\w]+)', 'Google Chrome'),
(r'Firefox(/(?P<version>[-.\w]+)?)', 'Firefox'),
(r'Mozilla/(?P<mozilla_version>[-.\w]+) \(compatible; ( ?)MSIE (?P<version>[-.\w]+);( ?)( ?)(Win|Mac)', 'Internet Explorer'),
(r'Konqueror/(?P<version>[-.\w]+)', 'Konqueror'),
(r'Opera( |/)(?P<version>[-.\w]+)', 'Opera'),
(r'OmniWeb(/(?P<version>[-.\w]+)?)', 'OmniWeb'),
(r'Safari/(?P<version>[-.\w]+)', 'Safari'),
(r'(Netscape([\d]?)|Navigator)/(?P<version>[-.\w]+)', 'Netscape'),
(r'Wget/(?P<version>[-.\w]+)', 'Wget'),
(r'Minefield(/(?P<version>[-.\w]+)?)', 'Firefox'), # Firefox nightly trunk builds
(r'Shiretoko(/(?P<version>[-.\w]+)?)', 'Firefox'), # Firefox testing browser
(r'GranParadiso(/(?P<version>[-.\w]+)?)', 'Firefox'), # Firefox testing browser
(r'Iceweasel(/(?P<version>[-.\w]+)?)', 'Firefox'), # Debian re-branded firefox
# RSS Reader
(r'(NetNewsWire|NewsGatorOnline)/(?P<version>[-.\w]+)', 'NetNewsWire'),
(r'Feedfetcher-Google', 'Google Reader'),
# Bots
(r'Googlebot', 'Google'),
(r'Yahoo! Slurp', 'Yahoo'),
(r'msnbot', 'MSN Bot'),
(r'(Baiduspider|BaiduImagespider)', 'Baiduspider'),
(r'Ask Jeeves', 'Ask Jeeves'),
(r'FollowSite', 'FollowSite'),
(r'WebAlta Crawler', 'WebAlta Crawler'),
(r'ScoutJet', 'ScoutJet'),
(r'SurveyBot', 'domaintools.com'),
(r'Gigabot', 'Gigabot'),
(r'Speedy Spider', 'entireweb'),
(r'discobot', 'Discovery Engine'),
(r'Purebot(/(?P<version>[-.\w]+)?);', 'Purity search'),
(r'Yandex(/(?P<version>[-.\w]+)?)', 'Yandex'),
(r'PostRank(/(?P<version>[-.\w]+)?)', 'PostRank'),
(r'Mozilla/(?P<mozilla_version>[-.\w]+) \(compatible; DotBot/(?P<version>[-.\w]+); http://www.dotnetdotcom.org/, crawler@dotnetdotcom.org\)', 'Dotbot'),
(r'IrssiUrlLog(/(?P<version>[-.\w]+)?)', 'irssi'),
(r'Linguee Bot \(http://www.linguee.com/bot; bot@linguee.com\)', 'Linguee'),
(r'Sphider', 'Sphider'),
# Other
(r'Mediapartners-Google', 'Google Ads'),
(r'Apple-PubSub', 'Apple-PubSub'),
(r'Python-urllib', 'Python'),
)
engines = patterns(None,
(r'^https?:\/\/([\.\w]+)?yahoo.*(?:&|\?)p=(?P<keywords>[\+-_\w]+)', 'Yahoo'),
(r'^https?:\/\/([\.\w]+)?google.*(?:&|\?)q=(?P<keywords>[\+-_\w]+)', 'Google'),
(r'^https?:\/\/([\.\w]+)?bing.*(?:&|\?)q=(?P<keywords>[\+-_\w]+)', 'Bing'),
)
|
from __main__ import db
#from app import db
class Prospect(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), nullable=True)
email = db.Column(db.String(120), nullable=False)
gender = db.Column(db.String(20), nullable=False)
age = db.Column(db.Integer, nullable=False)
for_event = db.Column(db.String(120), nullable=False)
def __repr__(self):
return '<Prospect %r>' % self.name
def to_dict(self):
return {
'email': self.email,
'age': self.age,
'gender': self.gender,
'name': self.name,
'for_event': self.for_event
}
|
import unittest
from unittest.mock import Mock
import json
from pathlib import Path
from chris.types import CUBEAddress, CUBEToken
from caw.login.store import AbstractSecretStore, KeyringSecretStore, PlaintextSecretStore, use_keyring
from caw.login.manager import LoginManager
from tempfile import NamedTemporaryFile
class TestSecretStore(unittest.TestCase):
def can_save_clear(self, store: AbstractSecretStore):
store.set('http://localhost:8910/api/v1/', 'abcdefg')
stored = store.get('http://localhost:8910/api/v1/')
self.assertEqual(stored, 'abcdefg',
msg='Stored secret does not match what was originally set.')
store.clear('http://localhost:8910/api/v1/')
self.assertIsNone(store.get('http://localhost:8910/api/v1/'),
msg='store.clear did not work.')
@unittest.skipUnless(use_keyring, 'keyring not supported')
def test_keyring(self):
self.can_save_clear(KeyringSecretStore({}))
def test_plaintext(self):
self.can_save_clear(PlaintextSecretStore({}))
class TestLoginManager(unittest.TestCase):
def setUp(self) -> None:
with NamedTemporaryFile(suffix='.json', delete=True) as self.savefile:
pass
self.savefile = Path(self.savefile.name)
self.store: Mock = Mock(spec=AbstractSecretStore)
wrapper = Mock(return_value=self.store, spec=AbstractSecretStore.__init__)
self.lm = LoginManager(wrapper, self.savefile) # type: ignore
def getJson(self) -> dict:
"""
Load the file written to by the ``LoginManager``.
"""
with self.savefile.open('r') as f:
return json.load(f)
def test_default_address(self):
self.lm.login(CUBEAddress('https://example.com/api/v1/'), CUBEToken('berry'))
self.store.set.assert_called_once_with(CUBEAddress('https://example.com/api/v1/'), CUBEToken('berry'))
content = self.getJson()
self.assertIn('defaultAddress', content,
msg='Login manager did not set the CUBE address as default.')
self.assertEqual(content['defaultAddress'], 'https://example.com/api/v1/',
msg='Default address is incorrect.')
self.store.get = Mock(return_value='berry')
self.assertEqual(self.lm.get(), CUBEToken('berry'),
msg='Retrieved password for default CUBE address is incorrect.')
self.lm.logout()
self.store.clear.assert_called_once_with('https://example.com/api/v1/')
self.assertNotIn('defaultAddress', self.getJson(),
msg='Default address not removed after logout.')
|
# RESPUESTA 1 paso 1
import constante
def gcl(x_n):
m = 232 # modulus
a = 1013904223 # multiplier
c = 1664525 # increment
x = ((a * x_n) + c) % m
return x
x_n = constante.SEMILLA
for _ in range(6):
x_n = gcl(x_n)
print(x_n)
|
import re
from typing import List
class SentenceTokenizer:
PERIOD = "。"
PERIOD_SPECIAL = "__PERIOD__"
PATTERNS = [
re.compile(r"(.*?)"),
re.compile(r"「.*?」"),
]
@staticmethod
def conv_period(item) -> str:
return item.group(0).replace(SentenceTokenizer.PERIOD, SentenceTokenizer.PERIOD_SPECIAL)
def tokenize(self, document) -> List[str]:
for pattern in SentenceTokenizer.PATTERNS:
document = re.sub(pattern, self.conv_period, document)
result = []
for line in document.split("\n"):
line = line.rstrip()
line = line.replace("\n", "")
line = line.replace("\r", "")
line = line.replace("。", "。\n")
sentences = line.split("\n")
for sentence in sentences:
if not sentence:
continue
period_special = SentenceTokenizer.PERIOD_SPECIAL
period = SentenceTokenizer.PERIOD
sentence = sentence.replace(period_special, period)
result.append(sentence)
return result
|
# SPDX-FileCopyrightText: 2018 Mikey Sklar for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# Mindfulness Bracelet sketch for Adafruit Gemma. Briefly runs
# vibrating motor (connected through transistor) at regular intervals.
import time
import board
from digitalio import DigitalInOut, Direction
# vibrating disc mini motor disc connected on D2
vibrating_disc = DigitalInOut(board.D1)
vibrating_disc.direction = Direction.OUTPUT
on_time = 2 # Vibration motor run time, in seconds
interval = 60 # Time between reminders, in seconds
start_time = time.monotonic()
while True:
timer = time.monotonic() - start_time
if timer >= interval and timer <= (interval + on_time):
vibrating_disc.value = True
elif timer >= (interval + on_time):
vibrating_disc.value = False
start_time = time.monotonic()
|
from torchutils.callbacks.callbacks import *
|
# -*- coding: utf-8 -*-
from cadnano.extras.math.vector import normalToPlane, normalizeV3, applyMatrix3, applyMatrix4
from cadnano.extras.math.matrix3 import getNormalMatrix
from cadnano.extras.math.face import Face
class Solid(object):
def __init__(self, name):
self.name = name
self.vertices = []
self.faces = []
self.face_vertex_uvs = [[]]
# end def
def addFace(self, v1, v2, v3, normal=None):
""" List vertices using right hand rule so that unit
normal will point out of the surface
vertices are given by index into vertices list
Args:
v1 (Vector3):
v2 (Vector3):
v3 (Vector3):
normal (Vector3): face normal
"""
# for v in vertices:
# if v is not in self.vertices:
# self.addVertex(v)
vrts = self.vertices
if normal is None:
normal = normalToPlane(vrts[v1], vrts[v2], vrts[v3])
self.faces.append(Face(normal, v1, v2, v3))
# end def
def addVertex(self, vertex):
""" Add a vertex to the Solid
Args:
vertex (Vector3):
"""
self.vertices.append(vertex)
# end def
def applyMatrix(self, matrix4):
normal_matrix = getNormalMatrix(matrix4)
verts = self.vertices
for i in range(len(verts)):
vertex = verts[i]
verts[i] = applyMatrix4(matrix4, vertex)
faces = self.faces
for i in range(len(faces)):
face = faces[i]
normal = normalizeV3(applyMatrix3(normal_matrix, face.normal))
faces[i] = Face(normal, face.v1, face.v2, face.v3)
def computeFaceNormals(self):
vrts = self.vertices
for i in range(len(self.faces)):
face = self.faces[i]
normal = normalToPlane(vrts[face.v1], vrts[face.v2], vrts[face.v3])
self.faces[i] = Face(normal, face.v1, face.v2, face.v3)
# end for
# end class
|
#!/usr/bin/env python
import argparse
import sys
from collections import defaultdict
DEFAULT_OUT = "stackcollapse-merged.txt"
def merge(files, dst):
data = defaultdict(lambda: 0)
for file in files:
with open(file, "r") as fp:
for line in fp.readlines():
stack, hits = line.rsplit(" ", 1)
hits = int(hits)
data[stack] += hits
with open(dst, "w") as fp:
for stack, hits in data.items():
print(stack, hits, file=fp)
def main():
parser = argparse.ArgumentParser(sys.argv[0])
parser = argparse.ArgumentParser(
description="merge multiple stackcollapes into a single one"
)
parser.add_argument(
"files", metavar="FILE", type=str, nargs="+", help="a stackcollapse file"
)
parser.add_argument(
"-o",
"--out",
default=DEFAULT_OUT,
help=f"write resulting stackcollapse to this file (default: {DEFAULT_OUT})",
)
opts = parser.parse_args(sys.argv[1:])
merge(opts.files, opts.out)
if __name__ == "__main__":
main()
|
# 023
# Ask the user to type in the first line of a nursery rhyme and display
# the length of the string. Ask for a starting number and an
# ending number and then display just that section of the text
# (remember Python starts counting from 0 and not 1).
rhyme = list()
while True:
try:
if not rhyme:
rhyme = input('Please enter the first line of a nursery '
'rhyme: ')
print(f'There are {len(rhyme)} characters in that line')
from_to = input('Please type in the starting character'
'you want to the final character: ')
from_to = from_to.split(' ')
for index, value in enumerate(from_to):
from_to[index] = int(value)
print(rhyme[from_to[0] - 1:from_to[1] + 1])
break
except Exception as e:
print(e)
|
from __future__ import division
import math
class RunningMean:
"""Compute running mean.
This class computes running mean.
Recoded from https://www.johndcook.com/blog/skewness_kurtosis/
Example
-------
>>> from RunningMean import RunningMean
>>> s = RunningMean()
>>> s.mean()
0.0
>>> s.count()
0
>>> print(s([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]))
5.0
>>> s.mean()
5.0
>>> s.count()
9
>>> print(s.clear())
0.0
>>> s.mean()
0.0
>>> s.count()
0
>>> print(s(100))
100.0
>>> print(s([0.0]))
50.0
"""
def __init__(self):
self.clear()
def __call__(self, input):
"""Update running mean with input
Parameters
----------
input : list or scalar
Returns
-------
self
Modified self object
"""
try:
for scalar in input:
self.update(scalar)
except TypeError:
self.update(input)
return self
def __str__(self):
return "{}".format(self.mean())
def clear(self):
"""Clear state and running mean.
Returns
-------
self
Modified self object
"""
self.n = 0
self.M1 = 0.0
return self
def update(self, input):
"""Update running mean with input
Parameters
----------
input : scalar
Returns
-------
self
Modified self object
"""
self.n += 1
delta = float(input) - self.M1
delta_n = delta / self.n
self.M1 += delta_n
return self
def count(self):
"""Return count.
Returns
-------
int
Number of data points received.
"""
return self.n
def mean(self):
"""Return running mean.
Returns
-------
float
Running mean.
"""
return self.M1
if __name__ == "__main__":
import doctest
import sys
(failure_count, test_count) = doctest.testmod()
sys.exit(failure_count)
|
#! /usr/bin/env python
"""HistToGNU.py
Convert saved binary pickle of histograms to gnu plot output
Usage: %(program)s [options] [histogrampicklefile ...]
reads pickle filename from options if not specified
writes to stdout
"""
globalOptions = """
set grid
set xtics 5
set xrange [0.0:100.0]
"""
dataSetOptions = "smooth unique"
import sys
from spambayes.Options import options
from spambayes.safepickle import pickle_read, pickle_write
program = sys.argv[0]
def usage(code, msg=''):
"""Print usage message and sys.exit(code)."""
if msg:
print >> sys.stderr, msg
print >> sys.stderr
print >> sys.stderr, __doc__ % globals()
sys.exit(code)
def loadHist(path):
"""Load the histogram pickle object"""
return pickle_read(path)
def outputHist(hist, f=sys.stdout):
"""Output the Hist object to file f"""
hist.fill_buckets()
for i in range(len(hist.buckets)):
n = hist.buckets[i]
f.write("%.3f %d\n" % ( (100.0 * i) / hist.nbuckets, n))
def plot(files):
"""given a list of files, create gnu-plot file"""
import cStringIO
cmd = cStringIO.StringIO()
cmd.write(globalOptions)
args = []
for file in files:
args.append("""'-' %s title "%s" """ % (dataSetOptions, file))
cmd.write('plot %s\n' % ",".join(args))
for file in files:
outputHist(loadHist(file), cmd)
cmd.write('e\n')
cmd.write('pause 100\n')
print cmd.getvalue()
def main():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], '', [])
except getopt.error, msg:
usage(1, msg)
if not args and options["TestDriver", "save_histogram_pickles"]:
args = []
for f in ('ham', 'spam'):
fname = "%s_%shist.pik" % (options["TestDriver",
"pickle_basename"], f)
args.append(fname)
if args:
plot(args)
else:
print "could not locate any files to plot"
if __name__ == "__main__":
main()
|
import sys
import harold_packaging_example.util.utils as utils
def main():
for arg in sys.argv[1:]:
print(utils.translate(arg))
if __name__ == '__main__':
main()
|
import base64
import io
from fdk import response
import json
import logging
import oci
from .base import BaseDispatch
from .dispatch import Dispatcher
from .service import SlackService, Agent, Channel
from .text import Text
LOG = logging.getLogger(__name__)
SERVICE = None
TOKEN = None
TEAM = None
NAMESPACE, BUCKET = None, None
def init(cfg):
global SERVICE, TOKEN, TEAM, NAMESPACE, BUCKET
if TEAM is None:
TEAM = load_secret(cfg, 'TEAM')
if SERVICE is None:
SERVICE = SlackService(team=TEAM,
bot_oauth=load_secret(cfg, 'BOT_OAUTH'),
user_oauth=load_secret(cfg, 'USER_OAUTH'))
if TOKEN is None:
TOKEN = load_secret(cfg, 'TOKEN')
if NAMESPACE is None:
NAMESPACE = cfg['NAMESPACE']
if BUCKET is None:
BUCKET = cfg['BUCKET']
def load_secret(cfg, setting):
"""If we have KMS_KEY and KMS_EP defined, use those to decrypt the given secret
Otherwise, pull the value out as plaintext."""
value = cfg.get(setting)
if value is None:
return value
# Retrieve key OCID and endpoint
key = cfg.get("KMS_KEY")
endpoint = cfg.get("KMS_EP")
if key is None and endpoint is None:
return value
# Create decryption client
signer = oci.auth.signers.get_resource_principals_signer()
client = oci.key_management.KmsCryptoClient({}, endpoint, signer=signer)
# The plaintext is returned as base64-encoded data. Decrypt it (providing a byte sequence)
# and then produce a UTF-8 string from the result.
return base64.b64decode(client.decrypt(oci.key_management.models.DecryptDataDetails(
key_id=key, ciphertext=value)).data.plaintext).decode("utf-8")
class Bot(BaseDispatch):
pass
def handle(ctx, data: io.BytesIO, bot_class=Bot):
init(ctx.Config())
try:
args = json.loads(data.getvalue())
LOG.debug('args are %s', {k: args[k] for k in args if k != 'token'})
token = args.get('token')
if token != TOKEN:
return response.Response(ctx, status_code=401)
if args.get('challenge') is not None:
return response.Response(ctx, status_code=200, response_data=args['challenge'])
team = args.get('team_id')
if team != TEAM:
return response.Response(ctx, status_code=404)
if SERVICE is None:
return response.Response(ctx, status_code=404)
if args.get('type') == 'event_callback':
event = args.get('event', {})
if event.get('type') == 'app_mention':
pass
elif event.get('type') == 'message' and event.get('subtype') is None:
text = Text.parse(event.get('text', ''), srv=SERVICE)
text.ts = event.get('ts')
sender = Agent(id=event.get('user'))
channel = Channel(id=event.get('channel'))
if event.get('channel_type') == 'group':
channel = channel.replace(is_private=True)
elif event.get('channel_type') == 'im':
channel = channel.replace(is_im=True)
receivers = [Agent(id=rcv, is_bot=True) for rcv in args.get('authed_users', [])]
rp = oci.auth.signers.get_resource_principals_signer()
dispatcher = Dispatcher(srv=SERVICE,
default=bot_class, factory=bot_class.load,
signer=rp, namespace=NAMESPACE, bucket=BUCKET)
dispatcher.dispatch(sender=sender, channel=channel, receivers=receivers, text=text)
except Exception as e:
LOG.exception("Problem during dispatch: %r", e)
return response.Response(ctx, status_code=500)
return response.Response(ctx, status_code=200)
|
from django.contrib import admin
from animals.models import Animal, Breed, Pet
class AnimalAdmin(admin.ModelAdmin):
list_display = ('name', 'scientific_name',)
search_fields = ('name', 'scientific_name',)
ordering = ('name',)
class BreedAdmin(admin.ModelAdmin):
list_display = ('name', 'animal',)
search_fields = ('name',)
list_filter = ('animal__name',)
ordering = ('name',)
class PetAdmin(admin.ModelAdmin):
list_display = ('name', 'animal', 'breed', 'birthday',)
search_fields = ('name', 'birthday', 'breed__name', 'breed__animal__name')
list_filter = ('breed__animal__name',)
ordering = ('name',)
def animal(self, obj):
return obj.breed.animal
animal.admin_order_field = 'breed__animal'
admin.site.register(Animal, AnimalAdmin)
admin.site.register(Breed, BreedAdmin)
admin.site.register(Pet, PetAdmin)
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements; and to You under the Apache License,
# Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
# ------------------------------------------------------------------------------
from EBRAINS_RichEndpoint.steering.steering_menu_cli import SteeringMenuCLI
from EBRAINS_RichEndpoint.Application_Companion.common_enums import Response
class SteeringMenuCLIHandler:
'''
Manages the Menu related functionality.
NOTE: It is a POC as of now, and later will be extended to may be
a separate process/thread.
'''
def __init__(self) -> None:
self.__steering_menu_cli = SteeringMenuCLI()
self.__current_choice = None
@property
def current_selection(self): return self.__current_choice
def display_steering_menu(self):
print('\n'+'*' * 33, flush=True)
print('*\t Steering Menu \t\t*')
print('*' * 33)
index = 1
for item in self.__steering_menu_cli.steering_menu_items:
print(f'{index}. {self.__steering_menu_cli.steering_menu[item]} ')
index += 1
print('\n')
def get_user_choice(self):
choice = input("please enter the choice number [1-3]: ")
self.__current_choice = self.__convert_str_to_int(choice)
return self.current_selection
def __convert_str_to_int(self, val_str):
try:
val_int = int(val_str)
return val_int
except ValueError:
return Response.ERROR
def parse_user_choice(self, user_choice):
if user_choice in self.__steering_menu_cli.steering_menu_items:
return user_choice
else:
return Response.ERROR
def get_menu_item(self, item):
if item in self.__steering_menu_cli.steering_menu_items:
return self.__steering_menu_cli.steering_menu[item]
else:
return Response.ERROR
|
class Empleado(object):
puesto = ""
def factory(clase):
if clase == "Maestro":
return Maestro()
elif clase == "Vendedor":
return Vendedor()
elif clase == "Repatidor":
return Repartidor()
class Maestro(Empleado):
puesto = "Maestro"
def mensaje(self):
return "Hola soy un Maestro"
class Vendedor(Empleado):
puesto = "Vendedor"
def mensaje(self):
return "Hola soy un Vendedor"
class Repartidor(Empleado):
puesto = "Repartidor"
def mensaje(self):
return "Hola soy un Repartidor"
'''Entrada'''
vend = Empleado.factory('Vendedor')
print("Clase: {0}\n{1}".format(vend.puesto, vend.mensaje()))
'''
Salida Esperada:
Clase: Vendedor
Hola soy un Vendedor
'''
|
#!/usr/bin/env python
import math
import random
import scanpy.api as sc
import numpy as np
from granatum_sdk import Granatum
def main():
gn = Granatum()
adata = gn.ann_data_from_assay(gn.get_import("assay"))
num_cells_to_sample = gn.get_arg("num_cells_to_sample")
random_seed = gn.get_arg("random_seed")
np.random.seed(random_seed)
num_cells_before = adata.shape[0]
num_genes_before = adata.shape[1]
if num_cells_to_sample > 0 and num_cells_to_sample < 1:
num_cells_to_sample = round(num_cells_before * num_cells_to_sample)
else:
num_cells_to_sample = round(num_cells_to_sample)
if num_cells_to_sample > num_cells_before:
num_cells_to_sample = num_cells_before
if num_cells_to_sample < 1:
num_cells_to_sample = 1
sampled_cells_idxs = np.sort(np.random.choice(num_cells_before, num_cells_to_sample, replace=False))
adata = adata[sampled_cells_idxs, :]
gn.add_result(
"\n".join(
[
"The assay before down-sampling has **{}** cells and {} genes.".format(
num_cells_before, num_genes_before
),
"",
"The assay after down-sampling has **{}** cells and {} genes.".format(adata.shape[0], adata.shape[1]),
]
),
type="markdown",
)
gn.export(gn.assay_from_ann_data(adata), "Down-sampled Assay", dynamic=False)
gn.commit()
if __name__ == "__main__":
main()
|
'''
/*******************************************************************************
* Copyright 2016-2019 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
'''
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction import text
from collections import OrderedDict
from exactpro.cleaner import ClearData
from exactpro.my_multithread import Multithreaded
from sklearn import feature_selection
from sklearn.feature_selection import SelectKBest
import pandas
from pathlib import Path
from werkzeug.utils import secure_filename
from exactpro.model import Model
from exactpro.config_parser import SettingProvider
from exactpro.model import Insert
# calculation of:
# THE TOP OF THE MOST FREQUENTLY USED TERMS
# THE TOP OF THE MOST SIGNIFICANT TERMS
# STAT INFO
class StatInfo:
# STAT INFO info calculation
def get_statInfo(self, data, orig_data):
try:
# data type convertion for correct stat processing
comments_ser = data['Comments'].apply(float)
attachments_ser = data['Attachments'].apply(float)
return {'total': str(orig_data['Issue_key'].count()),
'filtered': str(data['Issue_key'].count()),
'commentStat': {'max': str(comments_ser.max()),
'min': str(comments_ser.min()),
'mean': str(round(comments_ser.mean(), 3)),
'std': str(round(comments_ser.std(), 3))
},
'attachmentStat': {
'max': str(attachments_ser.max()),
'min': str(attachments_ser.min()),
'mean': str(round(attachments_ser.mean(), 3)),
'std': str(round(attachments_ser.std(), 3))
},
'ttrStat': {
'max': str(data['ttr'].max()),
'min': str(data['ttr'].min()),
'mean': str(round(data['ttr'].mean(), 3)),
'std': str(round(data['ttr'].std(), 3))
}
}
except KeyError:
raise
# THE TOP OF THE MOST FREQUENTLY USED TERMS info calculation
def friquency_stat(self, data, sw=text.ENGLISH_STOP_WORDS):
try:
self.tfidf = StemmedTfidfVectorizer(norm='l2',
sublinear_tf=True,
min_df=1,
stop_words=sw,
analyzer='word',
max_features=1000)
self.multithreaded = Multithreaded()
self.clear_data = ClearData()
# description cleaning
self.parall_data = self.multithreaded.parallelize(data['Description_tr'], self.clear_data.clean_descr)
tfs = self.tfidf.fit_transform(self.parall_data)
# train_bug = tfs.todense() # use it for sparse matrix
self.idf = self.tfidf.idf_
# words coefficient
self.voc_feat = dict(zip(self.tfidf.get_feature_names(), self.idf))
self.voc_feat_s = OrderedDict((k, v) for k, v in sorted(self.voc_feat.items(), key=lambda x: x[1], reverse=True))
return list(self.voc_feat_s.keys())[:100] # returns the first 100 words from the calculated list
except Exception as e:
raise Exception(str(e))
def top_terms(self, data, metric, field, sw=text.ENGLISH_STOP_WORDS):
try:
chi2 = feature_selection.chi2
tfidf = StemmedTfidfVectorizer(norm='l2',
sublinear_tf=True,
min_df=1,
stop_words=sw,
analyzer='word',
max_features=1000)
# StemmedTfidfVectorizer(norm='l2', sublinear_tf=True, min_df=1, stop_words=SW, analyzer='word', max_features = 1000, ngram_range=(2, 3)) # stemming + bigram
# TfidfVectorizer(tokenizer=LemmaTokenizer(), norm='l2',sublinear_tf=True, min_df=10, max_df=0.5,stop_words=SW, analyzer='word', ngram_range=(2, 3)) # lemmatization + bigram
# TfidfVectorizer(tokenizer=LemmaTokenizer(), norm='l2', sublinear_tf=True, min_df=10, max_df=0.5, stop_words=SW, analyzer='word') # lemmatization
# StemmedTfidfVectorizer(norm='l2', sublinear_tf=True, min_df=1, stop_words=SW, analyzer='word', max_features = 1000) # stemming with additional settings
# StemmedTfidfVectorizer(norm='l2', stop_words=SW, analyzer='word') # stemming
self.bidata = pandas.get_dummies(data, prefix=[field], columns=[field]) # data binarisation
self.multithreaded = Multithreaded()
self.clear_data = ClearData()
self.parall_data = self.multithreaded.parallelize(self.bidata['Description_tr'], self.clear_data.clean_descr)
self.tfs = tfidf.fit_transform(self.parall_data)
self.y = self.bidata[metric]
self.selector = SelectKBest(score_func=chi2, k='all') # select the most significant terms
self.selector.fit_transform(self.tfs, self.y)
self.X_new = dict(zip(tfidf.get_feature_names(), self.selector.scores_))
self.temp_dict = OrderedDict((k, v) for k, v in sorted(self.X_new.items(), key=lambda x: x[1], reverse=True))
return list(self.temp_dict.keys())[:20]
except Exception as e:
raise Exception(str(e))
def get_topPriority(self, frame, field, sw):
if field.split()[0] == 'Priority':
return StatInfo.top_terms(self,
frame,
'Priority_' + ' '.join(e for e in field.split()[1:]),
'Priority',
sw)
if field.split()[0] == 'Resolution':
return StatInfo.top_terms(self,
frame,
'Resolution_' + ' '.join(e for e in field.split()[1:]),
'Resolution',
sw)
# saving of finished calculations for significance top
# reason: building the top of terms is too resource-consuming task
def save_significanceTop(self, data, reference_to, significance_top, sw=text.ENGLISH_STOP_WORDS):
if reference_to in significance_top.keys():
return significance_top[reference_to]
else:
significance_top[reference_to] = StatInfo.get_topPriority(self, data, reference_to, sw)
return significance_top[reference_to]
def max_data(self, frame, fields, resolution):
try:
self.count = 0
self.model = Model()
self.setting_provader = SettingProvider('single_mod.ini')
self.insert = Insert()
self.dictionary = {}
while self.count < frame.shape[0]:
self.my_list = {}
self.my_list['Summary'] = (frame['Summary'][self.count])
self.my_list['Priority'] = (frame['Priority'][self.count])
self.ttr = self.model.proc_text(frame['Description'][self.count],
self.setting_provader.get_setting('single_mod.ini'.split('.')[0], 'ttr_col_class', False).split(','),
'ttr',
str(Path(__file__).parents[2])+'/model/')
self.my_list['ttr'] = (max(self.ttr, key=self.ttr.get))
for el in resolution:
self.rez = self.model.proc_text(frame['Description'][self.count],
self.setting_provader.get_setting('single_mod.ini'.split('.')[0], el+'_col_class', False).split(','),
secure_filename(el),
str(Path(__file__).parents[2])+'/model/')
self.my_list[el] = (max(self.rez, key=self.rez.get))
self.area_prob = {}
for area in fields:
self.tmp = self.model.proc_text(frame['Description'][self.count],
self.setting_provader.get_setting('single_mod.ini'.split('.')[0], 'binary_col_class', False).split(','),
secure_filename(area),
str(Path(__file__).parents[2])+'/model/')
self.area_prob.update({area: float(self.tmp['1'])})
self.my_list['area_of_testing'] = [k for k, v in self.area_prob.items() if v > 0.5] if [k for k, v in self.area_prob.items() if v > 0.5] else 'no idea'
self.dictionary[frame['Issue_key'][self.count]] = self.my_list
self.count = self.count + 1
return self.dictionary
except FileNotFoundError:
raise
except Exception:
raise
def mean_std_data(self, frame, fields, resolution):
try:
self.count = 0
self.model = Model()
self.setting_provader = SettingProvider('single_mod.ini')
self.resolution_fin = {el: {el: [], 'not_'+el: []} for el in resolution}
self.ttr_fin = {k: [] for k in self.setting_provader.get_setting('single_mod.ini'.split('.')[0], 'ttr_col_class', False).split(',')}
self.area_fin = {field: [] for field in fields}
while self.count < frame.shape[0]:
self.ttr = self.model.proc_text(frame['Description'][self.count],
self.setting_provader.get_setting('single_mod.ini'.split('.')[0], 'ttr_col_class', False).split(','),
'ttr',
str(Path(__file__).parents[2])+'/model/')
for key in self.ttr_fin:
self.ttr_fin[key].append(self.ttr[key])
for el in resolution:
self.rez = self.model.proc_text(frame['Description'][self.count],
['not_'+el, el],
el,
str(Path(__file__).parents[2])+'/model/')
self.resolution_fin[el][el].append(self.rez[el])
self.resolution_fin[el]['not_'+el].append(self.rez['not_'+el])
for area in fields:
self.tmp = self.model.proc_text(frame['Description'][self.count],
self.setting_provader.get_setting('single_mod.ini'.split('.')[0], 'binary_col_class', False).split(','),
area,
str(Path(__file__).parents[2])+'/model/')
self.area_fin[area].append(float(self.tmp['1']))
self.count = self.count + 1
StatInfo.print_mean_std(self, self.resolution_fin)
StatInfo.print_mean_std(self, self.ttr_fin)
StatInfo.print_mean_std(self, self.area_fin)
except FileNotFoundError:
raise
except Exception:
raise
def print_mean_std(self, obj):
import numpy
for el in obj:
if isinstance(obj[el], list):
print(el+'\nmean: '+str(numpy.mean(obj[el]))+'\n'+'std: '+str(numpy.std(obj[el])))
elif isinstance(obj[el], dict):
StatInfo.print_mean_std(self, obj[el])
else:
print(el+'\nfor {} only one value'.format(el))
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (SnowballStemmer("english").stem(w) for w in analyzer(doc))
|
from turtle import Screen
from snake import Snake
from food import Food
from scoreboard import Scoreboard
import time
WIDTH = 600
HEIGHT = 600
COLLISION_SEG_SIZE = 15
X_COLLISION = WIDTH/2 - COLLISION_SEG_SIZE
Y_COLLISION = HEIGHT/2 - COLLISION_SEG_SIZE
# Set up game screen:
screen = Screen()
screen.setup(width=WIDTH,height=HEIGHT)
screen.bgcolor("black")
screen.title("Snake")
screen.tracer(0)
# Set up Turtle game objects:
snake = Snake()
food = Food(WIDTH, HEIGHT)
score = Scoreboard(HEIGHT)
# Setup screen listener functions
screen.listen()
screen.onkey(snake.up, 'Up')
screen.onkey(snake.down, 'Down')
screen.onkey(snake.right, 'Right')
screen.onkey(snake.left, 'Left')
def game_over():
"""Reset the Snake Game"""
score.game_over()
snake.reset()
game_is_on = True
# Basic game loop, refresh every 0.1 second
while game_is_on:
screen.update()
time.sleep(0.1)
snake.move()
# detect collision with food compare two turtles
if snake.head.distance(food) < COLLISION_SEG_SIZE:
food.refresh()
snake.extend()
score.increase_score()
# detect colison with walls:
if (snake.head.xcor() > X_COLLISION or snake.head.xcor() < -X_COLLISION
or snake.head.ycor() > Y_COLLISION or snake.head.ycor() < -Y_COLLISION):
game_over()
# detect colision with tail
for segment in snake.snake_segments[1:]:
if snake.head.distance(segment) < COLLISION_SEG_SIZE:
game_over()
screen.exitonclick()
|
__version__ = '0.9'
from ._vulkan import *
|
import logging
from datetime import datetime, timedelta
from typing import Any, Optional, Union
from privex.helpers.exceptions import CacheNotFound
from privex.helpers.settings import DEFAULT_CACHE_TIMEOUT
from privex.helpers.cache.CacheAdapter import CacheAdapter
log = logging.getLogger(__name__)
class MemoryCache(CacheAdapter):
"""
A very basic cache adapter which implements :class:`.CacheAdapter` - stores the cache in memory using
the static attribute :py:attr:`.__CACHE`
As the cache is simply stored in memory, any python object can be cached without needing any form of serialization.
Fully supports cache expiration.
**Basic Usage**::
>>> from time import sleep
>>> c = MemoryCache()
>>> c.set('test:example', 'hello world', timeout=60)
>>> c.get('test:example')
'hello world'
>>> sleep(60)
>>> c.get('test:example', 'NOT FOUND')
'NOT FOUND'
"""
__CACHE = {}
def get(self, key: str, default: Any = None, fail: bool = False) -> Any:
key = str(key)
c = self.__CACHE
if key in c:
log.debug('Cache key "%s" found in __CACHE. Checking expiry...', key)
vc = c[key]
if str(vc['timeout']) != 'never' and vc['timeout'] < datetime.utcnow():
log.debug('Cache key "%s" has expired. Removing from cache.')
del c[key]
if fail:
raise CacheNotFound(f'Cache key "{key}" was expired.')
return default
log.debug('Cache key "%s" is valid and not expired. Returning value "%s"', key, vc)
return vc['value']
if fail:
raise CacheNotFound(f'Cache key "{key}" was not found.')
log.debug('Cache key "%s" was not found in __CACHE. Returning default value.', key)
return default
def set(self, key: str, value: Any, timeout: Optional[int] = DEFAULT_CACHE_TIMEOUT):
key, timeout = str(key), int(timeout)
c = self.__CACHE
log.debug('Setting cache key "%s" to value "%s" with timeout %s', key, value, timeout)
c[key] = dict(value=value, timeout=datetime.utcnow() + timedelta(seconds=timeout))
return c[key]
def get_or_set(self, key: str, value: Union[Any, callable], timeout: int = DEFAULT_CACHE_TIMEOUT) -> Any:
key, timeout = str(key), int(timeout)
try:
k = self.get(key, fail=True)
except CacheNotFound:
k = value(key) if callable(value) else value
self.set(key=key, value=k, timeout=timeout)
return k
def remove(self, *key: str) -> bool:
removed = 0
for k in key:
k = str(k)
if k in self.__CACHE:
del self.__CACHE[k]
removed += 1
return removed == len(key)
def update_timeout(self, key: str, timeout: int = DEFAULT_CACHE_TIMEOUT) -> Any:
key, timeout = str(key), int(timeout)
v = self.get(key=key, fail=True)
self.__CACHE[key]['timeout'] = datetime.utcnow() + timedelta(seconds=timeout)
return v
|
import os
import shutil
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(FILE_DIR, ".."))
shutil.rmtree(os.path.join(ROOT_DIR, "build"), ignore_errors = True)
shutil.rmtree(os.path.join(ROOT_DIR, "dist"), ignore_errors = True)
shutil.rmtree(os.path.join(ROOT_DIR, "happy.egg-info"), ignore_errors = True)
|
import functools
import urllib.parse
import tornado.web
from monstro.forms import forms
from monstro.views import mixins
__all__ = (
'View',
'RedirectView',
'TemplateView',
'ListView',
'DetailView',
'FormView',
'CreateView',
'UpdateView',
'DeleteView'
)
class View(tornado.web.RequestHandler):
authenticators = ()
@staticmethod
def authenticated(argument=None):
def decorator(method):
@functools.wraps(method)
async def wrapper(self, *args, **kwargs):
for authenticator in await self.get_authenticators():
self.session = await authenticator.authenticate(self)
if self.session:
break
else:
if callable(argument) or argument is None:
raise tornado.web.HTTPError(401)
redirect_url = argument
if isinstance(argument, str) and '?' not in argument:
if urllib.parse.urlparse(argument).scheme:
next_url = self.request.full_url()
else:
next_url = self.request.uri
redirect_url = '{}?{}'.format(
argument,
urllib.parse.urlencode(dict(next=next_url))
)
return self.redirect(redirect_url)
return await method(self, *args, **kwargs)
return wrapper
return decorator(argument) if callable(argument) else decorator
def initialize(self):
self.session = None
self.request.GET = {}
self.request.POST = {}
async def get_authenticators(self):
return self.authenticators
async def prepare(self):
for key, value in self.request.query_arguments.items():
self.request.GET[key] = value[0].decode('utf-8')
for key, value in self.request.body_arguments.items():
self.request.POST[key] = value[0].decode('utf-8')
class RedirectView(mixins.RedirectResponseMixin, tornado.web.RequestHandler):
async def prepare(self):
return self.redirect(await self.get_redirect_url(), self.permanent)
class TemplateView(View):
template_name = None
async def get_template_name(self):
assert self.template_name, (
'TemplateView requires either a definition of '
'"template_name" or an implementation of "get_template_name()"'
)
return self.template_name
async def get_context(self, **kwargs):
return kwargs
async def get(self, *args, **kwargs):
self.render(
await self.get_template_name(),
**await self.get_context()
)
class ListView(mixins.ListResponseMixin, TemplateView):
context_object_name = 'pagination'
async def get_context(self, **kwargs):
context = {self.context_object_name: await self.paginate()}
context.update(kwargs)
return context
class DetailView(mixins.DetailResponseMixin, TemplateView):
context_object_name = 'object'
async def get_context(self, **kwargs):
context = {self.context_object_name: await self.get_object()}
context.update(kwargs)
return context
class FormView(mixins.RedirectResponseMixin, TemplateView):
form_class = None
permanent = False
async def get_form_class(self):
assert self.form_class, (
'FormView requires either a definition of '
'"form_class" or an implementation of "get_form_class()"'
)
return self.form_class
async def get_form_kwargs(self):
return {'data': self.request.POST}
async def get_form(self):
return (await self.get_form_class())(**await self.get_form_kwargs()) # pylint:disable=E1102
async def get_context(self, **kwargs):
context = await super().get_context()
context.update(kwargs)
context.setdefault('form', await self.get_form())
return context
async def post(self, *args, **kwargs):
form = await self.get_form()
if await form.is_valid():
return await self.form_valid(form)
return await self.form_invalid(form)
async def form_valid(self, form): # pylint: disable=W0613
return self.redirect(await self.get_redirect_url(), self.permanent)
async def form_invalid(self, form):
context = await self.get_context(form=form)
self.render(self.template_name, **context)
class CreateView(mixins.ModelResponseMixin, FormView):
async def get_form_class(self):
if self.form_class: # pragma: no cover
return self.form_class
Meta = type('Meta', (), {'model': await self.get_model()})
return type('ModelForm', (forms.ModelForm,), {'Meta': Meta})
async def form_valid(self, form):
await form.save()
return await super().form_valid(form)
class UpdateView(mixins.DetailResponseMixin, CreateView): # pylint:disable=R0901
async def get_form_kwargs(self):
kwargs = await super().get_form_kwargs()
kwargs['instance'] = await self.get_object()
return kwargs
class DeleteView(mixins.RedirectResponseMixin,
mixins.DetailResponseMixin,
View):
async def delete(self, *args, **kwargs):
await (await self.get_object()).delete()
return self.redirect(await self.get_redirect_url(), self.permanent)
|
import os
import pandas as pd
import numpy as np
import glob
def load_abs_data():
"""
Loads the data from the ABSdata.csv file.
"""
# print current directory
df = pd.read_csv('Utilities/DataframeCode/ABSdata.csv')
x = df['X'].values
x = x.reshape(-1, 1)
t = df['T'].values
t = t.reshape(-1, 1)
assert (x.shape == (20, 1))
assert (t.shape == (20, 1))
return x, t
def make_directory_if_not_exists(directory_path):
"""
Creates a directory if it does not exist. The directory path is relative to the current working directory.
"""
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def save_df_to_csv(df, width, depths, optimizer, learning_rate):
"""
Saves a dataframe to a csv file. The file is saved based on the width, optimizer, and learning rate.
"""
directory_path = f'Results/Width-{width}/{optimizer}'
make_directory_if_not_exists(directory_path)
full_path = f'{directory_path}/{optimizer}-LearningRate-{learning_rate}.csv'
df.replace(to_replace=np.nan, value=' ', inplace=True)
df.insert(len(df.columns), ' ', [' ' for _ in range(len(depths) * 2 + 1)], True)
df.to_csv(full_path, index=True)
def combine_all_dfs_to_csv(width, optimizers):
"""
Combines all the dataframes for a given width and optimizer into one dataframe.
The dataframe is saved to the Results' directory as 'AllData.csv' in the directory for the width.
"""
def get_combined_optimizer_csvs(optimizer):
optimizer_csvs = glob.glob(f'Results/Width-{width}/{optimizer}/{optimizer}-LearningRate-*.csv')
df_holder = []
for filename in optimizer_csvs:
df = pd.read_csv(filename)
df_holder.append(df)
return pd.concat(df_holder, axis=1)
final = pd.concat([get_combined_optimizer_csvs(optimizer) for optimizer in optimizers], axis=0)
final.to_csv(f'Results/Width-{width}/All-Results.csv', index=False)
|
#
# level1_sum.py
# programmers_training
#
# Created by Chanwoo Noh on 2018. 09. 09..
# Copyright © 2018년 Chanwoo Noh. All rights reserved.
#
def solution(a, b):
sum = 0
if a > b :
a,b=b,a
while a <= b :
sum += a
a+=1
return sum
# ... or more simply
# def solution(a, b):
# answer = 0
# if a > b: (a,b) = (b,a)
# return sum(range(a,b+1))
|
from .attribute import *
from .buffer_description import *
from .shader import *
from .shader_program import *
|
@cookery.action('')
def do(subject):
print('in do action with subject:', subject)
return subject
@cookery.action('(.*)')
def echo(subject, text):
return text
@cookery.subject('in', r'(.+)')
def file(path):
print('opening file:', repr(path))
f = open(path, 'r')
return f.read()
|
import unittest
from collections import OrderedDict
from robot.utils import DotDict
from robot.utils.asserts import (assert_equal, assert_false, assert_not_equal,
assert_raises, assert_true)
class TestDotDict(unittest.TestCase):
def setUp(self):
self.dd = DotDict([('z', 1), (2, 'y'), ('x', 3)])
def test_init(self):
assert_true(DotDict() == DotDict({}) == DotDict([]))
assert_true(DotDict(a=1) == DotDict({'a': 1}) == DotDict([('a', 1)]))
assert_true(DotDict({'a': 1}, b=2) ==
DotDict({'a': 1, 'b': 2}) ==
DotDict([('a', 1), ('b', 2)]))
assert_raises(TypeError, DotDict, None)
def test_get(self):
assert_equal(self.dd[2], 'y')
assert_equal(self.dd.x, 3)
assert_raises(KeyError, self.dd.__getitem__, 'nonex')
assert_raises(AttributeError, self.dd.__getattr__, 'nonex')
def test_equality(self):
assert_true(self.dd == self.dd)
assert_false(self.dd != self.dd)
assert_true(self.dd == DotDict(self.dd))
assert_false(self.dd != DotDict(self.dd))
assert_false(self.dd == DotDict())
assert_true(self.dd != DotDict())
def test_equality_with_normal_dict(self):
assert_equal(self.dd, {'z': 1, 2: 'y', 'x': 3})
def test_hash(self):
assert_raises(TypeError, hash, self.dd)
def test_set(self):
self.dd.x = 42
self.dd.new = 43
self.dd[2] = 44
self.dd['n2'] = 45
assert_equal(self.dd, {'z': 1, 2: 44, 'x': 42, 'new': 43, 'n2': 45})
def test_del(self):
del self.dd.x
del self.dd[2]
self.dd.pop('z')
assert_equal(self.dd, {})
assert_raises(KeyError, self.dd.__delitem__, 'nonex')
assert_raises(AttributeError, self.dd.__delattr__, 'nonex')
def test_same_str_and_repr_format_as_with_normal_dict(self):
D = {'foo': 'bar', '"\'': '"\'', '\n': '\r', 1: 2, (): {}, True: False}
for d in {}, {'a': 1}, D:
for formatter in str, repr:
result = formatter(DotDict(d))
assert_equal(eval(result, {}), d)
def test_is_ordered(self):
assert_equal(list(self.dd), ['z', 2, 'x'])
self.dd.z = 'new value'
self.dd.a_new_item = 'last'
self.dd.pop('x')
assert_equal(list(self.dd.items()),
[('z', 'new value'), (2, 'y'), ('a_new_item', 'last')])
self.dd.x = 'last'
assert_equal(list(self.dd.items()),
[('z', 'new value'), (2, 'y'), ('a_new_item', 'last'), ('x', 'last')])
def test_order_does_not_affect_equality(self):
d = dict(a=1, b=2, c=3, d=4, e=5, f=6, g=7)
od1 = OrderedDict(sorted(d.items()))
od2 = OrderedDict(reversed(list(od1.items())))
dd1 = DotDict(sorted(d.items()))
dd2 = DotDict(reversed(list(dd1.items())))
for d1, d2 in [(dd1, dd2), (dd1, d), (dd2, d), (dd1, od1), (dd2, od2)]:
assert_equal(d1, d2)
assert_equal(d2, d1)
for d1, d2 in [(dd1, od2), (dd2, od1)]:
assert_equal(d1, d2)
assert_equal(d2, d1)
assert_not_equal(od1, od2)
class TestNestedDotDict(unittest.TestCase):
def test_nested_dicts_are_converted_to_dotdicts_at_init(self):
leaf = {'key': 'value'}
d = DotDict({'nested': leaf, 'deeper': {'nesting': leaf}}, nested2=leaf)
assert_equal(d.nested.key, 'value')
assert_equal(d.deeper.nesting.key, 'value')
assert_equal(d.nested2.key, 'value')
def test_dicts_inside_lists_are_converted(self):
leaf = {'key': 'value'}
d = DotDict(list=[leaf, leaf, [leaf]], deeper=[leaf, {'deeper': leaf}])
assert_equal(d.list[0].key, 'value')
assert_equal(d.list[1].key, 'value')
assert_equal(d.list[2][0].key, 'value')
assert_equal(d.deeper[0].key, 'value')
assert_equal(d.deeper[1].deeper.key, 'value')
def test_other_list_like_items_are_not_touched(self):
value = ({'key': 'value'}, [{}])
d = DotDict(key=value)
assert_equal(d.key[0]['key'], 'value')
assert_false(hasattr(d.key[0], 'key'))
assert_true(isinstance(d.key[0], dict))
assert_true(isinstance(d.key[1][0], dict))
def test_items_inserted_outside_init_are_not_converted(self):
d = DotDict()
d['dict'] = {'key': 'value'}
d['list'] = [{}]
assert_equal(d.dict['key'], 'value')
assert_false(hasattr(d.dict, 'key'))
assert_true(isinstance(d.dict, dict))
assert_true(isinstance(d.list[0], dict))
def test_dotdicts_are_not_recreated(self):
value = DotDict(key=1)
d = DotDict(key=value)
assert_true(d.key is value)
assert_equal(d.key.key, 1)
def test_lists_are_not_recreated(self):
value = [{'key': 1}]
d = DotDict(key=value)
assert_true(d.key is value)
assert_equal(d.key[0].key, 1)
if __name__ == '__main__':
unittest.main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Creates a simple TVM modules."""
import argparse
import os
import logging
from PIL import Image
import numpy as np
def preprocess_image(image_file):
resized_image = Image.open(image_file).resize((224, 224))
image_data = np.asarray(resized_image).astype("float32")
# after expand_dims, we have format NCHW
image_data = np.expand_dims(image_data, axis=0)
image_data[:, :, :, 0] = 2.0 / 255.0 * image_data[:, :, :, 0] - 1
image_data[:, :, :, 1] = 2.0 / 255.0 * image_data[:, :, :, 1] - 1
image_data[:, :, :, 2] = 2.0 / 255.0 * image_data[:, :, :, 2] - 1
return image_data
def build_inputs():
x = preprocess_image("lib/cat.png")
print("x", x.shape)
with open("lib/input.bin", "wb") as fp:
fp.write(x.astype(np.float32).tobytes())
if __name__ == "__main__":
build_inputs()
|
# -*- coding: utf-8 -*-
"""Test the compute_current_source_density function.
For each supported file format, implement a test.
"""
# Authors: Alex Rockhill <aprockhill@mailbox.org>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.io import loadmat
from scipy import linalg
from mne.channels import make_dig_montage
from mne import create_info, EvokedArray, pick_types, Epochs
from mne.io import read_raw_fif, RawArray
from mne.io.constants import FIFF
from mne.utils import object_diff, run_tests_if_main
from mne.datasets import testing
from mne.preprocessing import compute_current_source_density
data_path = op.join(testing.data_path(download=False), 'preprocessing')
eeg_fname = op.join(data_path, 'test_eeg.mat')
coords_fname = op.join(data_path, 'test_eeg_pos.mat')
csd_fname = op.join(data_path, 'test_eeg_csd.mat')
io_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(io_path, 'test_raw.fif')
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def evoked_csd_sphere():
"""Get the MATLAB EEG data."""
data = loadmat(eeg_fname)['data']
coords = loadmat(coords_fname)['coords'] * 1e-3
csd = loadmat(csd_fname)['csd']
sphere = np.array((0, 0, 0, 0.08500060886258405)) # meters
sfreq = 256 # sampling rate
# swap coordinates' shape
pos = np.rollaxis(coords, 1)
# swap coordinates' positions
pos[:, [0]], pos[:, [1]] = pos[:, [1]], pos[:, [0]]
# invert first coordinate
pos[:, [0]] *= -1
dists = np.linalg.norm(pos, axis=-1)
assert_allclose(dists, sphere[-1], rtol=1e-2) # close to spherical, meters
# assign channel names to coordinates
ch_names = [str(ii) for ii in range(len(pos))]
dig_ch_pos = dict(zip(ch_names, pos))
montage = make_dig_montage(ch_pos=dig_ch_pos, coord_frame='head')
# create info
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types='eeg')
# make Evoked object
evoked = EvokedArray(data=data, info=info, tmin=-1)
evoked.set_montage(montage)
return evoked, csd, sphere
def test_csd_matlab(evoked_csd_sphere):
"""Test replication of the CSD MATLAB toolbox."""
evoked, csd, sphere = evoked_csd_sphere
evoked_csd = compute_current_source_density(evoked, sphere=sphere)
assert_allclose(linalg.norm(csd), 0.00177, atol=1e-5)
# If we don't project onto the sphere, we get 1e-12 accuracy here,
# but it's a bad assumption for real data!
# Also, we divide by (radius ** 2) to get to units of V/m², unclear
# why this isn't done in the upstream implementation
evoked_csd_data = evoked_csd.data * sphere[-1] ** 2
assert_allclose(evoked_csd_data, csd, atol=2e-7)
with pytest.raises(ValueError, match=('CSD already applied, '
'should not be reapplied')):
compute_current_source_density(evoked_csd, sphere=sphere)
# 1e-5 here if we don't project...
assert_allclose(evoked_csd_data.sum(), 0.02455, atol=2e-3)
def test_csd_degenerate(evoked_csd_sphere):
"""Test degenerate conditions."""
evoked, csd, sphere = evoked_csd_sphere
warn_evoked = evoked.copy()
warn_evoked.info['bads'].append(warn_evoked.ch_names[3])
with pytest.raises(ValueError, match='Either drop.*or interpolate'):
compute_current_source_density(warn_evoked)
with pytest.raises(TypeError, match='must be an instance of'):
compute_current_source_density(None)
fail_evoked = evoked.copy()
with pytest.raises(ValueError, match='Zero or infinite position'):
for ch in fail_evoked.info['chs']:
ch['loc'][:3] = np.array([0, 0, 0])
compute_current_source_density(fail_evoked, sphere=sphere)
with pytest.raises(ValueError, match='Zero or infinite position'):
fail_evoked.info['chs'][3]['loc'][:3] = np.inf
compute_current_source_density(fail_evoked, sphere=sphere)
with pytest.raises(ValueError, match='No EEG channels found.'):
fail_evoked = evoked.copy()
fail_evoked.set_channel_types({ch_name: 'ecog' for ch_name in
fail_evoked.ch_names})
compute_current_source_density(fail_evoked, sphere=sphere)
with pytest.raises(TypeError, match='lambda2'):
compute_current_source_density(evoked, lambda2='0', sphere=sphere)
with pytest.raises(ValueError, match='lambda2 must be between 0 and 1'):
compute_current_source_density(evoked, lambda2=2, sphere=sphere)
with pytest.raises(TypeError, match='stiffness must be'):
compute_current_source_density(evoked, stiffness='0', sphere=sphere)
with pytest.raises(ValueError, match='stiffness must be non-negative'):
compute_current_source_density(evoked, stiffness=-2, sphere=sphere)
with pytest.raises(TypeError, match='n_legendre_terms must be'):
compute_current_source_density(evoked, n_legendre_terms=0.1,
sphere=sphere)
with pytest.raises(ValueError, match=('n_legendre_terms must be '
'greater than 0')):
compute_current_source_density(evoked, n_legendre_terms=0,
sphere=sphere)
with pytest.raises(ValueError, match='sphere must be'):
compute_current_source_density(evoked, sphere=-0.1)
with pytest.raises(ValueError, match=('sphere radius must be '
'greater than 0')):
compute_current_source_density(evoked, sphere=(-0.1, 0., 0., -1.))
with pytest.raises(TypeError):
compute_current_source_density(evoked, copy=2, sphere=sphere)
# gh-7859
raw = RawArray(evoked.data, evoked.info)
epochs = Epochs(
raw, [[0, 0, 1]], tmin=0, tmax=evoked.times[-1] - evoked.times[0],
baseline=None, preload=False, proj=False)
epochs.drop_bad()
assert len(epochs) == 1
assert_allclose(epochs.get_data()[0], evoked.data)
with pytest.raises(RuntimeError, match='Computing CSD requires.*preload'):
compute_current_source_density(epochs)
epochs.load_data()
raw = compute_current_source_density(raw)
assert not np.allclose(raw.get_data(), evoked.data)
evoked = compute_current_source_density(evoked)
assert_allclose(raw.get_data(), evoked.data)
epochs = compute_current_source_density(epochs)
assert_allclose(epochs.get_data()[0], evoked.data)
def test_csd_fif():
"""Test applying CSD to FIF data."""
raw = read_raw_fif(raw_fname).load_data()
raw.info['bads'] = []
picks = pick_types(raw.info, meg=False, eeg=True)
assert 'csd' not in raw
orig_eeg = raw.get_data('eeg')
assert len(orig_eeg) == 60
raw_csd = compute_current_source_density(raw)
assert 'eeg' not in raw_csd
new_eeg = raw_csd.get_data('csd')
assert not (orig_eeg == new_eeg).any()
# reset the only things that should change, and assert objects are the same
assert raw_csd.info['custom_ref_applied'] == FIFF.FIFFV_MNE_CUSTOM_REF_CSD
raw_csd.info['custom_ref_applied'] = 0
for pick in picks:
ch = raw_csd.info['chs'][pick]
assert ch['coil_type'] == FIFF.FIFFV_COIL_EEG_CSD
assert ch['unit'] == FIFF.FIFF_UNIT_V_M2
ch.update(coil_type=FIFF.FIFFV_COIL_EEG, unit=FIFF.FIFF_UNIT_V)
raw_csd._data[pick] = raw._data[pick]
assert object_diff(raw.info, raw_csd.info) == ''
run_tests_if_main()
|
import requests
import re
import json
from bs4 import BeautifulSoup
import ssl
import time
from modules.helper import rnd, write_to_json_file
FID_URL = "https://accounts.ea.com/connect/auth?response_type=code&client_id=ORIGIN_SPA_ID&display=originXWeb/login&locale=en_US&release_type=prod&redirect_uri=https://www.origin.com/views/login.html"
LOGIN_URL = "https://accounts.ea.com/connect/auth?client_id=ORIGIN_JS_SDK&response_type=token&redirect_uri=nucleus:rest&prompt=none&release_type=prod"
class OriginAPI:
def __init__(self, username, password):
self.username = username
self.password = password
self.user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
self.cid = rnd()
def _get(self, url, params=None, headers={}):
headers["User-Agent"] = self.user_agent
return requests.get(url, params=params, headers=headers, verify=False, allow_redirects=False)
def _post(self, url, data=None, headers={}):
headers["User-Agent"] = self.user_agent
return requests.post(url, data=data, headers=headers, verify=False)
def _authed_get(self, url, headers={}):
headers["User-Agent"] = self.user_agent
headers["Authorization"] = f"Bearer {self.access_token['access_token']}"
return requests.get(url, headers=headers, verify=False)
def _authed_get2(self, url, headers={}):
headers["User-Agent"] = self.user_agent
headers["authtoken"] = self.access_token["access_token"]
return requests.get(url, headers=headers, verify=False)
def _fid(self):
res = self._get(FID_URL)
if res.status_code == 302:
self.fid = re.search("(?<=fid=)[a-zA-Z0-9]+?(?=&|$)", res.headers["Location"]).group(0)
return res.headers["Location"]
def _jsession_id(self, location):
res = self._get(location)
if res.status_code == 302:
self.jsession_id = re.search("(?<=JSESSIONID=)[\S]+?(?=;)", res.headers["Set-Cookie"]).group(0)
return f"https://signin.ea.com{res.headers['Location']}"
def _auth_page(self, location):
res = self._get(location, headers={
"Cookie": f"JSESSIONID={self.jsession_id}"
})
if res.status_code == 302:
self.jsession = re.search("(?<=JSESSIONID=)[\S]+?(?=;)", res.headers["Set-Cookie"]).group(0)
return res.headers["Location"]
def _post_auth(self, location):
res = self._post(location, data={
"email": self.username,
"password": self.password,
"_eventId": "submit",
"cid": self.cid,
"showAgeUp": "true",
"googleCaptchaResponse": "",
"_rememberMe": "on"
}, headers={
"Cookie": f"JSESSIONID={self.jsession_id}"
})
if res.status_code == 200:
file = open("data//responses//origin_auth.txt", "w")
file.write(res.text)
# print(res.text)
return re.search("(?<=window\.location = \")\S+(?=\";)", res.text).group(0)
def _sid(self, location):
res = self._get(location)
if res.status_code == 302:
self.sid = re.search("(?<=sid=)[\S]+?(?=;)", res.headers["Set-Cookie"]).group(0)
self.code = re.search("(?<=code=)[\S]+?(?=&|$)", res.headers["Location"]).group(0)
return res.headers["Location"]
def _access_token(self):
res = self._get(LOGIN_URL, headers={
"Cookie": f"sid={self.sid}"
})
js = json.loads(res.text)
if res.status_code == 200:
self.access_token = {
"access_token": js["access_token"],
"token_type": js["token_type"]
}
def search(self, user):
res = self._authed_get2(f"https://api1.origin.com/xsearch/users?userId={self.my_uid}&searchTerm={user}&start=0")
return json.loads(res.text)
def get_users(self, user_ids):
res = self._authed_get2(f"https://api1.origin.com/atom/users?userIds={','.join(user_ids)}")
return BeautifulSoup(res.text, features="xml")
def get_uid(self, username):
searched = self.search(username)
userids = []
i = 0
if searched["infoList"] is None:
return -1
for entry in searched["infoList"]:
if i is 5:
break
userids.append(entry["friendUserId"])
i += 1
users = self.get_users(userids)
for user in users.users:
if user.EAID.string == username:
return user.userId.string
def get_auth_code(self, access_token, client_id):
auth_code_url = f"https://accounts.ea.com/connect/auth?access_token={access_token}&client_id={client_id}&response_type=code&release_type=prod"
code = self._get(auth_code_url).headers["Location"]
code = code.split("code=")[1]
return code
def me(self):
return json.loads(self._authed_get("https://gateway.ea.com/proxy/identity/pids/me").text)
def auth(self):
location = self._fid()
location = self._jsession_id(location)
self._auth_page(location)
location = self._post_auth(location)
location = self._sid(location)
self._access_token()
user_data = self.me()
self.my_uid = user_data["pid"]["pidId"]
user_data["pid"] = self.my_uid
user_data['access_token'] = self.access_token
return self.access_token, self.my_uid
|
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = os.path.realpath('.') + '/my_app/static/uploads'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
app.config['WTF_CSRF_SECRET_KEY'] = 'random key for form'
db = SQLAlchemy(app)
app.secret_key = 'some_random_key'
from my_app.catalog.views import catalog
app.register_blueprint(catalog)
db.create_all()
|
__author__ = 'Rodochenko'
import datetime
class rtsDailyOption:
def __init__(self, strike, opt_type, code, date, value):
self.strike = strike
self.code = code
self.mat_date = self.get_mat_date_from_code(self)
self.opt_type = opt_type
self.date = date
self.value = value
def get_mat_date_from_code(self):
code = self.code
call_month_codes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']
put_month_codes = ['M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X']
mat_date_month = "00"
if code[-2] in call_month_codes:
# retrieving month code
mat_date_month = str(call_month_codes.index(code[-2]) + 1)
elif code[-2] in put_month_codes:
mat_date_month = str(put_month_codes.index(code[-2]) + 1)
mat_date_year = "0000"
if code[-1] == 1:
mat_date_year = "2011"
elif code[-1] == 2:
mat_date_year = "2012"
mat_date_day = "15"
mat_day_moment_string = mat_date_year + mat_date_month + mat_date_day
return mat_day_moment_string
def get_maturity_from_code(code):
"""getting expiration date from FORTS code"""
call_month_codes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']
put_month_codes = ['M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X']
mat_date_month = "00"
if code[-2] in call_month_codes:
# retrieving month code
mat_date_month = str(call_month_codes.index(code[-2]) + 1)
if len(mat_date_month)<2:
mat_date_month = '0' + mat_date_month
elif code[-2] in put_month_codes:
mat_date_month = str(put_month_codes.index(code[-2]) + 1).format()
if len(mat_date_month)<2:
mat_date_month = '0' + mat_date_month
mat_date_year = "0000"
if code[-1] == '1':
mat_date_year = "2011"
elif code[-1] == '2':
mat_date_year = "2012"
elif code[-1] == '3':
mat_date_year = "2012"
elif code[-1] == '4':
mat_date_year = "2014"
elif code[-1] == '5':
mat_date_year = "2015"
mat_date_day = "15"
mat_day_moment_string = mat_date_year + mat_date_month + mat_date_day
return mat_day_moment_string
def get_time_to_expiration(current_date_day, maturity_day):
days_in_the_year = 248
try:
date0 = datetime.datetime.strptime(current_date_day, "%Y%m%d")
date1 = datetime.datetime.strptime(maturity_day, "%Y%m%d")
date_dif = date1-date0
value = str(float(date_dif.days)/days_in_the_year)
except:
value = 0
return value
|
# coding=utf-8
"""
Cookbook Repository Management
"""
from oslo_log import log as logging
import svn.remote
LOG = logging.getLogger(__name__)
class SVNRepo:
"""
Cookbook Repository Object Model
"""
def __init__(self, url, user="default", pwd="default", ):
"""
Connects to a remote svn server
:param user: username
:param pwd: password
:param url: url
"""
self.r = svn.remote.RemoteClient(url, username=user, password=pwd)
self.version = self.get_version()
def list_cookbooks(self, rp=None):
"""
:return: List of all cookbooks in repo
"""
cookbooks = []
entries = self.r.list(rel_path=rp)
for filename in entries:
if self.check_cookbook(filename):
cookbooks.append(filename.replace("/", ""))
return cookbooks
def check_cookbook(self, name):
"""check if the item is a cookbook"""
LOG.info("checking %s" % name)
check = False
# check if the item is a directory
res = self.info(rel_path=name)
if res['entry_kind'] == 'dir':
# check if the item has a recipes directory
if "recipes/" in self.r.list(rel_path=name):
check = True
LOG.debug("Cookbook found: %s" % name)
if not check:
LOG.debug("Not a cookbook: %s" % name)
return check
def download_cookbooks(self, local_path='/tmp/cookbooks'):
"""
Downloads all remote cookbooks to a local path
:param local_path: path to download to
:return: operation result
"""
return self.r.run_command("export", [self.r.url, local_path, "--trust-server-cert", "--force"])
def info(self, rel_path=None):
return self.r.info(rel_path=rel_path)
def get_version(self):
vers = 'Unknown'
for l in self.r.run_command("info", [self.r.url, "--trust-server-cert"]):
if "Revision:" in l:
vers = l.split(":")[1].strip()
return vers
|
#An odd number is an integer which is not a multiple of two
#An even number is an integer which is "evenly divisible" by two
def check_number(number):
if number % 2 != 0:
solution = 'Weird'
elif number % 2 == 0 and number in range(2, 5):
solution = 'Not Wierd'
elif number % 2 == 0 and number in range(6, 20):
solution = 'Weird'
elif number % 2 == 0 and number > 20:
solution = 'Not Weird'
return solution
number = int(input('Enter the number: '))
print(check_number(number))
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 17:11:22 2020
@author: chens
"""
#from os.path import dirname
import numpy as np
import geoist as gi
import geoist.others.fetch_data as data
from geoist.others.fetch_data import _retrieve_file as downloadurl
from geoist.others.fetch_data import usgs_catalog
from geoist.catalog import QCreport as qc
from geoist.catalog import QCmulti as cp
from geoist.catalog import Catalogue as cat
from geoist.catalog import Exploration as exp
from geoist.catalog import MapTools as mapt
from geoist.catalog import Selection as sel
from geoist.catalog import Seismicity as sem
from geoist.catalog import Declusterer as declus
from geoist.catalog import Smoothing as sm
from geoist.catalog import CatUtils as ct
## 下载CENC地震目录
# pathname = dirname(__file__)
# print(pathname)
url = data.ispec_catalog_url
print(url)
filename = '2020-03-25CENC-M4.7.dat'
localpath = downloadurl(url+filename, filename)
print(localpath) #文件路径
## 下载USGS地震目录
## 参考:https://earthquake.usgs.gov/fdsnws/event/1/
usgsfile = 'usgscat2.csv'
localpath2 = usgs_catalog(usgsfile, '2014-01-01', '2014-01-02') #, '-90','90','-180','180',minmag = '5')
print(localpath2)
dbusgs = cat.Database(usgsfile)
dbusgs.Import0(localpath2)
dbusgs.Info()
## 建立地震目录数据库
catname = 'CENCM4.7'
db2 = cat.Database(catname)
header = ['Year', 'Month','Day','Hour','Minute','Second','Latitude','Longitude', 'Depth','MagType','MagSize','Log']
db2.Import0(localpath, Header = header, Delimiter= ' ', flag = False)
db2.Info()
db2.SetField('LocCode','CENC')
db2.SetField('MagCode','CENC4.7')
#地震筛选
# Search Area (China) using internal filter
lon = [70, 135]
lat = [15, 55]
db2.Filter('Latitude',lat[0],Opr='>=')
db2.Filter('Latitude',lat[1],Opr='<=')
db2.Filter('Longitude',lon[0],Opr='>=')
db2.Filter('Longitude',lon[1],Opr='<=')
db2.Info()
#二维时间序列图
exp.MagTimePlot(db2)
exp.MagTimeBars(db2)
exp.RateDensityPlot(db2)
# G-R关系
enum, mbin =exp.GetKeyHisto(db2,'MagSize',Bnum=20, Norm=False)
minc= (max(mbin)-min(mbin))/10.
#拟合b值
a,b = sem.MfdOptimize(enum, mbin, minc, max(mbin))
print('b-value=',b)
#复发概率
sem.MfdPlot(a,b, max(mbin),Enum=enum, Ecum=np.cumsum(enum[::-1])[::-1], Mbin=mbin, Minc=[minc])
## 去余震
dbm, log1 = declus.WindowSearch(db2, WinFun= declus.GardnerKnopoff, WinScale=1)
dbm.Info()
## 震中分布图
x1,y1,z1 = exp.GetHypocenter(db2)
x2,y2,z2 = exp.GetHypocenter(dbm)
cfg = {'Bounds': [70., 15., 135., 55.],
'FigSize': [16., 12.],
'Background': ['none',[0.9,0.8,0.6],[0.5,0.8,1.]],
'Grid': [10., 10.]}
M = mapt.GeoMap(cfg)
M.BasePlot()
M.DrawBounds()
M.DrawGrid()
#震中分布图
M.PointPlot(x1, y1, Set=['o','g',5,1], Label='全部')
M.PointPlot(x2, y2, Set=['*','r',2,1], Label='去余震')
M.Legend()
M.Title('中国及邻区震中分布图')
M.Show()
#平滑地震目录
p = [(90.,20.),(90.,40.),(105.,40.),(105.,20.),(90.,20.)]
db3 = sel.AreaSelect(db2,p)
P = ct.Polygon()
P.Load(p)
db3.Info()
wkt = ct.XYToWkt(P.x, P.y)
xsm, ysm, asm = sm.SmoothMFD(db3, 1., wkt, Delta=0.5)
cfg1 = {'Bounds': [90., 20., 105., 40.],
'FigSize': [10., 12.],
'Background': ['none',[0.9,0.8,0.6],[0.5,0.8,1.]],
'Grid': [5., 5.]}
m1 = mapt.GeoMap(cfg1)
m1.BasePlot()
m1.MeshPlot(xsm, ysm, asm)
#m1.AreaPlot(P.x, P.y, Set=['y',0.5,'k',1])
#m1.PointPlot(xsm, ysm, Set=['o','b',2,1], Label='Grid')
m1.PointPlot(x1, y1, Set=['o','g',5,1], Label='全部')
m1.DrawGrid()
m1.Title('川滇地区地震目录高斯平滑')
m1.Show()
## 得到系统路径和记录日志
#print(gi.EXAMPLES_PATH, gi.DATA_PATH, gi.TEMP_PATH)
nwcat = qc.pathname+'\\mwcat1900utc.csv'
print(qc.pathname+'\\cn-cat-mw.txt')
qc.pathname = gi.TEMP_PATH
qc.network = catname
qc.start_year = '1970'
qc.end_year = '2020'
qc.time_window = 2.0
qc.dist_window = 15.0
##gi.__verison__
gi.log.info(catname+'/catalog qctest')
## 地震目录质量检测
pathname,prefix = qc.create_figures_new(qc.qcinit(), db2)
## 生成HTML报告
qc.generate_html(pathname,prefix,qc.to_show)
## 地震目录对比
dbm.Header['Name'] = 'CENCm'
db2.Header['Name'] = 'CENC4.7'
## 建立地震目录数据库
catname = 'cnmw'
localpath = qc.pathname+'\\mwcat1900utc.csv'
db6 = cat.Database(catname)
header = ['Year', 'Month','Day','Hour','Minute','Second','Latitude','Longitude','MagSize','Depth','Log']
db6.Import0(localpath, Header = header, Delimiter= ',', flag = False)
db6.SetField('MagType', 'Mw')
db6.Info()
outputname = cp.create_figures_new(db = [db2, db6], pathname = gi.TEMP_PATH,
startyear = 1970 , endyear = 2015, dhrs = 8)
## 生成HTML报告
no_output_matches = True
cp.generate_html(outputname, no_output_matches)
|
CONFIG_PATH = "/home/lfhohmann/gcp-weather-and-forecast-scraper/config.yaml"
GOOGLE_DB_TABLE = "google_forecast"
GOOGLE_URL = "https://www.google.com/search?hl=en&lr=lang_en&ie=UTF-8&q=weather"
GOOGLE_HEADER = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0",
"Language": "en-US,en;q=0.5",
}
WUNDERGROUND_DB_TABLE = "wunderground_pws"
WUNDERGROUND_DB_SHORT_TABLE = "wunderground_pws_short"
WUNDERGROUND_URL = "https://www.wunderground.com/dashboard/pws/"
WUNDERGROUND_HEADER = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36",
"Language": "en-US,en;q=0.5",
}
|
from collections import OrderedDict
import logging
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.session import SparkSession
from typeguard import typechecked
from .logging_utils import _format_sql
from .settings import complete_settings_dict, ComparisonColumn
from .ordered_set import OrderedSet
logger = logging.getLogger(__name__)
def _add_left_right(columns_to_retain, name):
columns_to_retain.add(name + "_l")
columns_to_retain.add(name + "_r")
return columns_to_retain
def _add_unique_id_and_source_dataset(
cols_set: OrderedSet, uid_name: str, sd_name: str, retain_source_dataset_col: bool
):
if retain_source_dataset_col:
cols_set.add(f"{sd_name}_l")
cols_set.add(f"{uid_name}_l")
cols_set.add(f"{sd_name}_r")
cols_set.add(f"{uid_name}_r")
else:
cols_set.add(f"{uid_name}_l")
cols_set.add(f"{uid_name}_r")
return cols_set
def _get_select_expression_gammas(
settings: dict, retain_source_dataset_col: bool, retain_tf_cols: bool
):
"""Get a select expression which picks which columns to keep in df_gammas
Args:
settings (dict): A `splink` settings dictionary
retain_source_dataset: whether to retain the source dataset columns
retain_tf_cols: whether to retain the individual term frequency columns
Returns:
str: A select expression
"""
# Use ordered dict as an ordered set - i.e. to make sure we don't have duplicate cols to retain
select_columns = OrderedSet()
uid = settings["unique_id_column_name"]
sds = settings["source_dataset_column_name"]
select_columns = _add_unique_id_and_source_dataset(
select_columns, uid, sds, retain_source_dataset_col
)
for col in settings["comparison_columns"]:
cc = ComparisonColumn(col)
if settings["retain_matching_columns"]:
for col_name in cc.columns_used:
select_columns = _add_left_right(select_columns, col_name)
if col["term_frequency_adjustments"]:
if retain_tf_cols:
select_columns = _add_left_right(select_columns, f"tf_{cc.name}")
select_columns.add(col["case_expression"])
for c in settings["additional_columns_to_retain"]:
select_columns = _add_left_right(select_columns, c)
if "blocking_rules" in settings:
if len(settings["blocking_rules"]) > 0:
select_columns.add("match_key")
return ", ".join(select_columns)
def _retain_source_dataset_column(settings_dict, df):
# Want to retain source dataset column in all cases
# except when link type is dedupe only
# and the column does not exist in the data
if settings_dict["link_type"] != "dedupe_only":
return True
source_dataset_colname = settings_dict.get(
"source_dataset_column_name", "source_dataset"
)
if f"{source_dataset_colname}_l" in df.columns:
return True
else:
return False
def _retain_tf_columns(settings_dict, df):
# If all necessary TF columns are in the data,
# make sure they are retained
tf_cols = [
f"tf_{cc['col_name']}" if "col_name" in cc else f"tf_{cc['custom_name']}"
for cc in settings_dict["comparison_columns"]
if cc["term_frequency_adjustments"]
]
cols = OrderedSet()
[_add_left_right(cols, c) for c in tf_cols]
return all([col in df.columns for col in cols])
def _sql_gen_add_gammas(
settings: dict,
df_comparison: DataFrame,
table_name: str = "df_comparison",
):
"""Build SQL statement that adds gamma columns to the comparison dataframe
Args:
settings (dict): `splink` settings dict
unique_id_col (str, optional): Name of the unique id column. Defaults to "unique_id".
table_name (str, optional): Name of the comparison df. Defaults to "df_comparison".
Returns:
str: A SQL string
"""
retain_source_dataset = _retain_source_dataset_column(settings, df_comparison)
retain_tf_cols = _retain_tf_columns(settings, df_comparison)
select_cols_expr = _get_select_expression_gammas(
settings, retain_source_dataset, retain_tf_cols
)
sql = f"""
select {select_cols_expr}
from {table_name}
"""
return sql
@typechecked
def add_gammas(
df_comparison: DataFrame,
settings_dict: dict,
spark: SparkSession,
unique_id_col: str = "unique_id",
):
"""Compute the comparison vectors and add them to the dataframe. See
https://imai.fas.harvard.edu/research/files/linkage.pdf for more details of what is meant by comparison vectors
Args:
df_comparison (spark dataframe): A Spark dataframe containing record comparisons, with records compared using the convention col_name_l, col_name_r
settings_dict (dict): The `splink` settings dictionary
spark (Spark session): The Spark session object
Returns:
Spark dataframe: A dataframe containing new columns representing the gammas of the model
"""
settings_dict = complete_settings_dict(settings_dict, spark)
sql = _sql_gen_add_gammas(settings_dict, df_comparison)
logger.debug(_format_sql(sql))
df_comparison.createOrReplaceTempView("df_comparison")
df_gammas = spark.sql(sql)
return df_gammas
|
# Simple unit testing for prime servers. Run with -h for details.
#
# Tested with Python 3.6
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import argparse
import itertools
import logging
import math
import queue
import random
import socket
import subprocess
import sys
import threading
import time
# The port number the server will listen on.
PORTNUM = 8070
def is_prime(num):
if num == 2:
return True
elif num < 2 or num % 2 == 0:
return False
else:
upto = int(math.sqrt(num)) + 1
for i in range(3, upto, 2):
if num % i == 0:
return False
return True
def server_runner(path, args, stop_event):
"""Runs the server as a subprocess until stop is requested.
Run this function in a separate thread!
path is the path to the server to run, with the given args. If 'path' ends
with .js, node is prepended. The args have to be a (possibly empty)
iterable.
stop_event is a threading.Event object; when it's set, the subprocess is
killed and this function returns.
"""
if path.endswith('.js'):
runcmd = ['node', path]
elif path.endswith('.py'):
runcmd = ['python', path]
else:
runcmd = path
runcmd.extend(args)
logging.info('server_runner: executing subprocess "{0}"'.format(runcmd))
proc = subprocess.Popen(runcmd)
logging.info('server_runner waiting for stop event')
stop_event.wait()
logging.info('server_runner sending kill to subprocess')
proc.terminate()
try:
proc.wait(timeout=0.2)
except subprocess.TimeoutExpired:
logging.info('server_runner: subprocess did not die within timeout')
def client_thread_runner(port, nums=[]):
"""Client.
"""
tid = threading.current_thread().ident
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.settimeout(1.0 * len(nums))
sockobj.connect(('localhost', port))
logging.info('Client {0} connected to server'.format(tid))
for num in nums:
sockobj.send(bytes(str(num), encoding='ascii'))
logging.info('Client {0} sent "{1}"'.format(tid, num))
reply = sockobj.recv(20)
logging.info('Client {0} received "{1}"'.format(tid, reply))
if is_prime(num):
assert b'prime' in reply
else:
assert b'composite' in reply
sockobj.shutdown(socket.SHUT_RDWR)
sockobj.close()
def test_main():
argparser = argparse.ArgumentParser('Server test')
argparser.add_argument('server_path', help='path to the server executable')
argparser.add_argument('-n', '--num-clients', default=4, type=int,
help='number of clients to launch simultaneously; ')
args = argparser.parse_args()
assert args.num_clients >= 1
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s:%(message)s')
# Launch the server in a thread, listening on the port.
stop_event = threading.Event()
server_thread = threading.Thread(
target=server_runner,
args=(args.server_path, [str(PORTNUM)], stop_event))
server_thread.start()
time.sleep(0.2)
threads = []
# Generate some pseudo-random numbers, with guaranteed repetition to hit
# the caches.
nums = [random.randint(20, 1990) // 2 for i in range(8)]
nums.extend(nums[0:2])
for i in range(args.num_clients):
tester_thread = threading.Thread(
target=client_thread_runner,
args=(PORTNUM, nums))
tester_thread.start()
threads.append(tester_thread)
for thread in threads:
thread.join()
stop_event.set()
if __name__ == '__main__':
test_main()
|
#Program - Enter a point that is
#in a sphere of a chosen radius
#and find the magnetic field on it
import math
r= float(input("Radius:"))
ri=int(r)
while r>ri:
ri=ri+1
x= float(input("x-coordinate:"))
while x>r or x<-r:
print ("x-coordinate cannot be larger than radius")
x= float(input("x-coordinate:"))
y= float(input("y-coordinate:"))
while y>r or y<-r:
print ("y-coordinate cannot be larger than radius")
y= float(input("y-coordinate:"))
z= float(input("z-coordinate:"))
while z>r or z<-r:
print ("z-coordinate cannot be larger than radius")
z= float(input("z-coordinate:"))
rij=math.sqrt(((x)**2)+((y)**2)+((z)**2))
while rij>r:
print ("point is outside the circle")
x=float(input("x-coordinate:"))
while x>r or x<-r:
print ("x-coordinate cannot be larger than radius")
x=float(input("x-coordinate:"))
y=float(input("y-coordinate:"))
while y>r or y<-r:
print ("y-coordinate cannot be larger than radius")
y=float(input("y-coordinate:"))
z=float(input("z-coordinate:"))
while z>r or z<-r:
print ("z-coordinate cannot be larger than radius")
z=float(input("z-coordinate:"))
rij=math.sqrt(((x)**2)+((y)**2)+((z)**2))
print ('Point:(',x,',',y,',',z,')')
while x<0:
x=x*(-1)
while y<0:
y=y*(-1)
while z<0:
z=z*(-1)
xone=ri-x
yone=ri-y
zone=ri-z
xtwo=(-1)*(x+ri)
ytwo=(-1)*(y+ri)
ztwo=(-1)*(z+ri)
totalx=0
totaly=0
totalz=0
while xone>=xtwo:
while yone>=ytwo:
while zone>=ztwo:
if xone==0 and yone==0 and zone==0:
zone=zone-1
else:
rij=math.sqrt(((xone)**2)+((yone)**2)+((zone)**2))
rijc=math.sqrt(((x+xone)**2)+((y+yone)**2)+((z+zone)**2))
if rijc>r:
zone=zone-1
else:
Hx=((3*xone*zone)/((rij)**5))
Hy=((3*yone*zone)/((rij)**5))
Hz=(((2*((zone)**2))-((xone)**2)-((yone)**2))/((rij)**5))
totalx=totalx+Hx
totaly=totaly+Hy
totalz=totalz+Hz
zone=zone-1
yone=yone-1
zone=ri+z
xone=xone-1
yone=ri+y
H=math.sqrt(((totalx)**2)+((totaly)**2)+((totalz)**2))
if H<(10**(-15)):
print ("total H: 0.0")
else:
print ("total H:",H)
print(totalx)
print(totaly)
print(totalz)
|
# Easy to use offline chat archive.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: August 1, 2018
# URL: https://github.com/xolox/python-chat-archive
"""
Usage: chat-archive [OPTIONS] [COMMAND]
Easy to use offline chat archive that can gather chat message
history from Google Talk, Google Hangouts, Slack and Telegram.
Supported commands:
- The 'sync' command downloads new chat messages from supported chat
services and stores them in the local archive (an SQLite database).
- The 'search' command searches the chat messages in the local archive
for the given keyword(s) and lists matching messages.
- The 'list' command lists all messages in the local archive.
- The 'stats' command shows statistics about the local archive.
- The 'unknown' command searches for conversations that contain messages from
an unknown sender and allows you to enter the name of a new contact to
associate with all of the messages from an unknown sender. Conversations
involving multiple unknown sender are not supported.
Supported options:
-C, --context=COUNT
Print COUNT messages of output context during 'chat-archive search'. This
works similarly to 'grep -C'. The default value of COUNT is 3.
-f, --force
Retry synchronization of conversations where errors were previously
encountered. This option is currently only relevant to the Google Hangouts
backend, because I kept getting server errors when synchronizing a few
specific conversations and I didn't want to keep seeing each of those
errors during every synchronization run :-).
-c, --color=CHOICE, --colour=CHOICE
Specify whether ANSI escape sequences for text and background colors and
text styles are to be used or not, depending on the value of CHOICE:
- The values 'always', 'true', 'yes' and '1' enable colors.
- The values 'never', 'false', 'no' and '0' disable colors.
- When the value is 'auto' (this is the default) then colors will
only be enabled when an interactive terminal is detected.
-l, --log-file=LOGFILE
Save logs at DEBUG verbosity to the filename given by LOGFILE. This option
was added to make it easy to capture the log output of an initial
synchronization that will be downloading thousands of messages.
-p, --profile=FILENAME
Enable profiling of the chat-archive application to make it possible to
analyze performance problems. Python profiling data will be saved to
FILENAME every time database changes are committed (making it possible to
inspect the profile while the program is still running).
-v, --verbose
Increase logging verbosity (can be repeated).
-q, --quiet
Decrease logging verbosity (can be repeated).
-h, --help
Show this message and exit.
"""
# Standard library modules.
import getopt
import html
import logging
import os
import sys
# External dependencies.
import coloredlogs
from humanfriendly import coerce_boolean, compact, concatenate, format_path, format_size, parse_path, pluralize
from humanfriendly.prompts import prompt_for_input
from humanfriendly.terminal import HTMLConverter, connected_to_terminal, find_terminal_size, output, usage, warning
from property_manager import lazy_property, mutable_property
from sqlalchemy import func
from verboselogs import VerboseLogger
# Modules included in our package.
from chat_archive import ChatArchive
from chat_archive.emoji import normalize_emoji
from chat_archive.html import HTMLStripper, text_to_html
from chat_archive.html.keywords import KeywordHighlighter
from chat_archive.html.redirects import RedirectStripper
from chat_archive.models import Contact, Conversation, Message
from chat_archive.utils import utc_to_local
FORMATTING_TEMPLATES = dict(
conversation_delimiter='<span style="color: green">{text}</span>',
conversation_name='<span style="font-weight: bold; color: #FCE94F">{text}</span>',
keyword_highlight='<span style="color: black; background-color: yellow">{text}</span>',
message_backend='<span style="color: #C4A000">({text})</span>',
message_contacts='<span style="color: blue">{text}</span>',
message_delimiter='<span style="color: #555753">{text}</span>',
message_timestamp='<span style="color: green">{text}</span>',
)
"""The formatting of output, specified as HTML with placeholders."""
UNKNOWN_CONTACT_LABEL = "Unknown"
"""The label for contacts without a name or email address (a string)."""
# Initialize a logger for this module.
logger = VerboseLogger(__name__)
def main():
"""Command line interface for the ``chat-archive`` program."""
# Enable logging to the terminal.
coloredlogs.install()
# Parse the command line options.
program_opts = dict()
command_name = None
try:
options, arguments = getopt.gnu_getopt(
sys.argv[1:],
"C:fl:c:p:vqh",
[
"context=",
"force",
"log-file=",
"color=",
"colour=",
"profile=",
"verbose",
"quiet",
"help",
],
)
for option, value in options:
if option in ("-C", "--context"):
program_opts["context"] = int(value)
elif option in ("-f", "--force"):
program_opts["force"] = True
elif option in ("-l", "--log-file"):
handler = logging.FileHandler(parse_path(value))
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
)
handler.setLevel(logging.DEBUG)
logging.root.addHandler(handler)
logging.root.setLevel(logging.NOTSET)
elif option in ("-c", "--color", "--colour"):
mapping = dict(always=True, never=False)
program_opts["use_colors"] = mapping[value] if value in mapping else coerce_boolean(value)
elif option in ("-p", "--profile"):
program_opts["profile_file"] = parse_path(value)
elif option in ("-v", "--verbose"):
coloredlogs.increase_verbosity()
elif option in ("-q", "--quiet"):
coloredlogs.decrease_verbosity()
elif option in ("-h", "--help"):
usage(__doc__)
sys.exit(0)
else:
assert False, "Unhandled option!"
# Make sure the operator provided a command.
if not arguments:
usage(__doc__)
sys.exit(0)
except Exception as e:
warning("Failed to parse command line arguments: %s", e)
sys.exit(1)
try:
# We extract any search keywords from the command line arguments before
# initializing an instance of the UserInterface class, to enable
# initialization of the KeywordHighlighter class.
if arguments[0] == "search":
program_opts["keywords"] = arguments[1:]
# Initialize the chat archive.
with UserInterface(**program_opts) as program:
# Validate the requested command.
command_name = arguments.pop(0)
method_name = "%s_cmd" % command_name
if not hasattr(program, method_name):
warning("Error: Invalid command name '%s'!", command_name)
sys.exit(1)
# Execute the requested command.
command_fn = getattr(program, method_name)
command_fn(arguments)
except KeyboardInterrupt:
logger.notice("Interrupted by Control-C ..")
sys.exit(1)
except Exception:
logger.exception("Aborting due to unexpected exception!")
sys.exit(1)
class UserInterface(ChatArchive):
"""The Python API for the command line interface for the ``chat-archive`` program."""
@mutable_property
def context(self):
"""The number of messages of output context to print during searches (defaults to 3)."""
return 3
@mutable_property(cached=True)
def use_colors(self):
"""Whether to output ANSI escape sequences for text colors and styles (a boolean)."""
return connected_to_terminal()
@lazy_property
def html_to_ansi(self):
"""
An :class:`~humanfriendly.terminal.HTMLConverter` object that uses
:func:`.normalize_emoji()` as a text pre-processing callback.
"""
return HTMLConverter(callback=normalize_emoji)
@lazy_property
def redirect_stripper(self):
"""An :class:`.RedirectStripper` object."""
return RedirectStripper()
@lazy_property
def html_to_text(self):
"""An :class:`.HTMLStripper` object."""
return HTMLStripper()
@lazy_property
def keyword_highlighter(self):
"""A :class:`.KeywordHighlighter` object based on :attr:`keywords`."""
return KeywordHighlighter(highlight_template=FORMATTING_TEMPLATES["keyword_highlight"], keywords=self.keywords)
@mutable_property
def keywords(self):
"""A list of strings with search keywords."""
return []
@mutable_property
def timestamp_format(self):
"""The format of timestamps (defaults to ``%Y-%m-%d %H:%M:%S``)."""
return "%Y-%m-%d %H:%M:%S"
def list_cmd(self, arguments):
"""List all messages in the local archive."""
self.render_messages(self.session.query(Message).order_by(Message.timestamp))
def search_cmd(self, arguments):
"""Search the chat messages in the local archive for the given keyword(s)."""
results = self.search_messages(arguments)
if self.context > 0:
results = self.gather_context(results)
self.render_messages(results)
def stats_cmd(self, arguments):
"""Show some statistics about the local chat archive."""
logger.info("Statistics about %s:", format_path(self.database_file))
logger.info(" - Number of contacts: %i", self.num_contacts)
logger.info(" - Number of conversations: %i", self.num_conversations)
logger.info(" - Number of messages: %i", self.num_messages)
logger.info(" - Database file size: %s", format_size(os.path.getsize(self.database_file)))
logger.info(
" - Size of %s: %s",
pluralize(self.num_messages, "plain text chat message"),
format_size(self.session.query(func.coalesce(func.sum(func.length(Message.text)), 0)).scalar()),
)
logger.info(
" - Size of %s: %s",
pluralize(self.num_html_messages, "HTML formatted chat message"),
format_size(self.session.query(func.coalesce(func.sum(func.length(Message.html)), 0)).scalar()),
)
def sync_cmd(self, arguments):
"""Download new chat messages from the supported services."""
self.synchronize(*arguments)
def unknown_cmd(self, arguments):
"""
Find private conversations with messages from an unknown sender and
interactively prompt the operator to provide a name for a new contact
to associate the messages with.
"""
logger.info("Searching for private conversations with unknown sender ..")
for conversation in self.session.query(Conversation).filter(Conversation.is_group_conversation == False):
if conversation.have_unknown_senders:
logger.info("Private conversation %i includes messages from unknown senders:", conversation.id)
self.render_messages(conversation.messages[:10])
full_name = prompt_for_input("Name for new contact (leave empty to skip): ")
if full_name:
words = full_name.split()
kw = dict(account=conversation.account, first_name=words.pop(0))
if words:
kw["last_name"] = " ".join(words)
contact = Contact(**kw)
self.session.add(contact)
for message in conversation.messages:
if not message.sender:
message.sender = contact
self.commit_changes()
def generate_html(self, name, text):
"""
Generate HTML based on a named format string.
:param name: The name of an HTML format string in
:data:`FORMATTING_TEMPLATES` (a string).
:param text: The text to interpolate (a string).
:returns: The generated HTML (a string).
This method does not escape the `text` given to it, in other words it
is up to the caller to decide whether embedded HTML is allowed or not.
"""
template = FORMATTING_TEMPLATES[name]
return template.format(text=text)
def gather_context(self, messages):
"""Enhance search results with context (surrounding messages)."""
related = []
for msg in messages:
# Gather older messages.
older_query = msg.older_messages.order_by(Message.timestamp.desc()).limit(self.context)
logger.debug("Querying older messages: %s", older_query)
for other_msg in reversed(older_query.all()):
if other_msg not in related:
related.append(other_msg)
yield other_msg
# Yield one of the given messages.
if msg not in related:
related.append(msg)
yield msg
# Gather newer messages.
newer_query = msg.newer_messages.order_by(Message.timestamp).limit(self.context)
logger.debug("Querying newer messages: %s", newer_query)
for other_msg in newer_query.all():
if other_msg not in related:
related.append(other_msg)
yield other_msg
def render_messages(self, messages):
"""Render the given message(s) on the terminal."""
previous_conversation = None
previous_message = None
# Render a horizontal bar as a delimiter between conversations.
num_rows, num_columns = find_terminal_size()
conversation_delimiter = self.generate_html("conversation_delimiter", "─" * num_columns)
for i, msg in enumerate(messages):
if msg.conversation != previous_conversation:
# Mark context switches between conversations.
logger.verbose("Rendering conversation #%i ..", msg.conversation.id)
self.render_output(conversation_delimiter)
self.render_output(self.render_conversation_summary(msg.conversation))
self.render_output(conversation_delimiter)
elif previous_message and self.keywords:
# Mark gaps in conversations. This (find_distance()) is a rather
# heavy check so we only do this when rendering search results.
distance = msg.find_distance(previous_message)
if distance > 0:
message_delimiter = "── %s omitted " % pluralize(distance, "message")
message_delimiter += "─" * int(num_columns - len(message_delimiter))
self.render_output(self.generate_html("message_delimiter", message_delimiter))
# We convert the message metadata and the message text separately,
# to avoid that a chat message whose HTML contains a single <p> tag
# causes two newlines to be emitted in between the message metadata
# and the message text.
message_metadata = self.prepare_output(
" ".join(
[
self.render_timestamp(msg.timestamp),
self.render_backend(msg.conversation.account.backend),
self.render_contacts(msg),
]
)
)
message_contents = self.normalize_whitespace(self.prepare_output(self.render_text(msg)))
output(message_metadata + " " + message_contents)
# Keep track of the previous conversation and message.
previous_conversation = msg.conversation
previous_message = msg
def normalize_whitespace(self, text):
"""
Normalize the whitespace in a chat message before rendering on the terminal.
:param text: The chat message text (a string).
:returns: The normalized text (a string).
This method works as follows:
- First leading and trailing whitespace is stripped from the text.
- When the resulting text consists of a single line, it is processed
using :func:`~humanfriendly.text.compact()` and returned.
- When the resulting text contains multiple lines the text is prefixed
with a newline character, so that the chat message starts on its own
line. This ensures that messages requiring vertical alignment render
properly (for example a table drawn with ``|`` and ``-`` characters).
"""
# Check for multi-line chat messages.
stripped = text.strip()
if "\n" in stripped:
# When the message contains "significant" newline
# characters we start the message on its own line.
return "\n" + stripped
else:
# When the message doesn't contain significant newline characters
# we compact all whitespace in the message. I added this when I
# found that quite a few of the HTML fragments in my personal chat
# archive contain very inconsistent whitespace, which bothered me
# when I viewed them on the terminal.
return compact(text)
def render_conversation_summary(self, conversation):
"""Render a summary of which conversation a message is part of."""
# Gather the names of the participants in the conversation, but exclude the
# operator's name from private conversations (we can safely assume they
# know who they are 😇).
participants = sorted(
set(
contact.unambiguous_name
if conversation.is_group_conversation
else (contact.full_name or UNKNOWN_CONTACT_LABEL)
for contact in conversation.participants
if conversation.is_group_conversation or not self.is_operator(contact)
)
)
parts = [
self.get_backend_name(conversation.account.backend),
"group" if conversation.is_group_conversation else "private",
"chat",
]
if conversation.name:
parts.append(self.generate_html("conversation_name", html.escape(conversation.name)))
parts.append("with")
participants_html = concatenate(map(html.escape, participants))
if conversation.is_group_conversation:
parts.append(pluralize(len(participants), "participant"))
parts.append("(%s)" % participants_html)
else:
parts.append(self.generate_html("conversation_name", participants_html))
if conversation.account.name_is_significant:
parts.append("in %s account" % conversation.account.name)
return " ".join(parts)
def render_contacts(self, message):
"""Render a human friendly representation of a message's contact(s)."""
contacts = [self.get_contact_name(message.sender)]
if message.conversation.is_group_conversation and message.recipient:
# In Google Talk group chats can contain private messages between
# individuals. This is how we represent those messages.
contacts.append(self.get_contact_name(message.recipient))
return self.generate_html("message_contacts", "%s:" % " → ".join(contacts))
def prepare_output(self, text):
"""
Prepare text for rendering on the terminal.
:param text: The HTML text to render (a string).
:returns: The rendered text (a string).
When :attr:`use_colors` is :data:`True` this method first uses
:attr:`keyword_highlighter` to highlight search matches in the given
text and then it converts the string from HTML to ANSI escape sequences
using :attr:`html_to_ansi`.
When :attr:`use_colors` is :data:`False` then :attr:`html_to_text` is
used to convert the given HTML to plain text. In this case keyword
highlighting is skipped.
"""
# Log the HTML encoded output to enable debugging of issues in
# the HTML to ANSI conversion process (it's rather nontrivial).
logger.debug("Rendering HTML output: %r", text)
if self.use_colors:
if self.keywords:
text = self.keyword_highlighter(text)
logger.debug("HTML with keywords highlighted: %r", text)
text = self.html_to_ansi(text)
logger.debug("Text with ANSI escape sequences: %r", text)
else:
text = self.html_to_text(text)
logger.debug("HTML converted to plain text: %r", text)
return text
def render_output(self, text):
"""
Render text on the terminal.
:param text: The HTML text to render (a string).
Refer to :func:`prepare_output()` for details about how `text`
is converted from HTML to text with ANSI escape sequences.
"""
output(self.prepare_output(text))
def get_contact_name(self, contact):
"""
Get a short string describing a contact (preferably their first name,
but if that is not available then their email address will have to do).
If no useful information is available :data:`UNKNOWN_CONTACT_LABEL` is
returned so as to explicitly mark the absence of more information.
"""
if contact:
if contact.first_name:
return html.escape(contact.first_name)
for email_address in contact.email_addresses:
return html.escape(email_address.value)
return UNKNOWN_CONTACT_LABEL
def render_text(self, message):
"""Prepare the text of a chat message for rendering on the terminal."""
return self.redirect_stripper(message.html or text_to_html(message.text, callback=normalize_emoji))
def render_timestamp(self, value):
"""Render a human friendly representation of a timestamp."""
return self.generate_html("message_timestamp", utc_to_local(value).strftime(self.timestamp_format))
def render_backend(self, value):
"""Render a human friendly representation of a chat message backend."""
return self.generate_html("message_backend", value)
|
import pygame
from time import sleep
from random import randrange
pygame.init()
|
# Copyright (c) 2021 ralabo.jp
# This software is released under the MIT License.
# see https://opensource.org/licenses/mit-license.php
# ====================================================
from common import com, TargetDomain
from flow.flow_item import FlowItem
class FlowDependMachineSection(object):
"""
machine_type, section_index, domain に依存する制御を行うクラス
flow = FlowDependMachineSection()
"""
machine_types = ['ToyCar','ToyTrain','fan', 'gearbox','pump','slider','valve']
section_indices = ['00','01','02','03','04','05']
targets = ['source', 'target']
def __init__(self, f_target):
self.f_target = f_target
pass
class _Item(FlowItem):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def items(self):
"""
generate a tuple of (machine_type, section_index, domain)
"""
target = '*' # ignore target
usable_machine_types = com.param['limit']['usable_machine_types']
for machine_type in self.machine_types:
if usable_machine_types and machine_type not in usable_machine_types:
continue # skip
for section_index in self.section_indices:
if com.mode: # dev_mode
if section_index not in ['00','01','02']:
continue # skip
yield self._Item(machine_type, section_index, target)
def domain(self, machine_type, section_index, target):
target = '*' # ignore target
return TargetDomain(machine_type, section_index, target)
|
import itertools
import json
import os
import random
import sys
from os.path import isdir, isfile, join, basename
from math import cos, pi, sin, sqrt, acos
from tqdm import tqdm
import multiprocessing
import scipy.io as sio
import numpy as np
import cv2
from cv2 import resize as resize_, imread
from joblib import Parallel, delayed
import progressbar as pb
# python3
import pickle
# from .libs import *
# safe dictionary copy
from copy import deepcopy as dc
random.seed(0)
import ref
# transform annotation to array format
def ann_to_array(H, d, max_len=1):
# junction_order = H.get('junction_order', 'off_cell_center')
region_size = H['region_size']
inp_h = inp_w = H['image_size']
grid_h = grid_w = H['grid_size']
assert region_size == inp_h // grid_h
junctions = d['junction']
theta = d['theta_bin']
num_junc = len(junctions)
junction_residual = np.zeros((grid_h, grid_w, max_len, 2), dtype=np.float)
junction_flags = np.zeros((grid_h, grid_w, max_len), dtype=np.int32)
theta_bin = np.zeros(
(grid_h, grid_w, max_len, H['num_bin']), dtype=np.int32)
theta_bin_residual = np.zeros(
(grid_h, grid_w, max_len, H['num_bin']), dtype=np.float)
focus_range = 0.5 #H['focus_size']
for h in range(grid_h):
for w in range(grid_w):
ccx, ccy = w + 0.5, h + 0.5
cell_center_w, cell_center_h = ccx * region_size, ccy * region_size
unsorted_junctions = []
for idx, (jx, jy) in enumerate(junctions):
#px, py = jx / float(region_size), jy /float(region_size)
if abs(jx - cell_center_w) <= focus_range * region_size and abs(jy - cell_center_h) <= focus_range * region_size:
ox, oy = jx - cell_center_w, jy - cell_center_h
th = theta[idx]
unsorted_junctions += [(ox, oy, th)]
if len(unsorted_junctions) == 0:
continue
unsorted_junctions = unsorted_junctions[:max_len]
#print (unsorted_junctions[0][:2])
sorted_junctions = sorted( unsorted_junctions, key=lambda x: x[0]**2 + x[1]**2)
num_keep = min(max_len, len(sorted_junctions))
for idx_sorted in range(num_keep):
ox, oy, th = sorted_junctions[idx_sorted]
junction_residual[h, w, idx_sorted, :] = np.array((ox, oy), dtype=np.float32)
order_th = len(th)
if H['num_classes'] > 2:
junction_flags[h, w, idx_sorted] = min(order_th - 1, 5)
else:
junction_flags[h, w, idx_sorted] = 1
for _, tt in enumerate(th):
bin_idx, bin_residual = tt
bin_idx = int(bin_idx)
theta_bin[h, w, idx_sorted, bin_idx] = 1
theta_bin_residual[h, w, idx_sorted, bin_idx] = float(bin_residual)
output = {}
output['junction'] = junction_residual
output['junction_flags'] = junction_flags
output['theta_bin_conf'] = theta_bin
output['theta_bin_residual'] = theta_bin_residual
return output
## Calculate the bin index and residual of junction angles.
def make_bin(bn, ths):
bin_num = bn
bin_width = 360. / float(bin_num)
bin_max_width = 4
centers = np.array(
[i * bin_width for i in range(bin_num)] + [360.], dtype=np.float32)
th_bin = [None for _ in ths]
for cnt0, th in enumerate(ths):
bin_t = [None for _ in th]
for cnt, angle in enumerate(th):
#assert scale <= line_max_len, "scale should be smaller than max length of lines."
idx = round(float(angle) / bin_width)
idx = int(idx)
idx1 = idx if idx != bin_num else 0
bin_t[cnt] = (idx1, angle - centers[idx])
th_bin[cnt0] = bin_t
return th_bin
def dist(p1, p2):
return sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
def lineangle(p1, p2):
""" p1 -> p2 """
x1, y1 = p1
x2, y2 = p2
dist = sqrt((x1 - x2)**2 + (y1 - y2)**2)
if y1 < y2:
theta = acos((x2 - x1) / dist)
else:
theta = acos((x1 - x2) / dist)
return theta / pi * 180.
# Resize image and annotation to input size of network.
def resize_and_transform(H, d, max_len = 1):
num_bin = H['num_bin']
inp_h = inp_w = H['image_size']
img, junctions, points = d['img'], d['junction'], d['points']
cur_h, cur_w = img.shape[:2]
if cur_h == 0 or cur_w == 0:
print("{} {} {}"%(d['imgname'], cur_h, cur_w))
sw, sh = float(inp_w) / float(cur_w), float(inp_h) / float(cur_h)
try:
resized_img = resize_(img, (inp_h, inp_w), cv2.INTER_CUBIC)
d['junction'] = [(sw * x, sh * y) for x, y in junctions]
d['points'] = [(sw * x, sh * y) for x, y in points]
except ValueError:
print("cannot resize ", d['imgname'])
raise ValueError
ntheta = resize_theta(d['theta'], (sw, sh))
d['theta'] = ntheta
d['theta_bin'] = make_bin(num_bin, ntheta)
d['img'] = resized_img
output = ann_to_array(H, d)
output['imgname'] = d['imgname']
output['image'] = resized_img
return output
# when resizing image, the junction angles should be
# calculated again.
def resize_theta(ths, scale_param, with_conf=False):
new_ths = [None for _ in ths]
for cnt0, th in enumerate(ths):
bin_t = [None for _ in th]
if with_conf:
for cnt, (t, conf_t) in enumerate(th):
x = cos(t * pi / 180.) * scale_param[0]
y = sin(t * pi / 180.) * scale_param[1]
dist = sqrt(x**2 + y**2)
if abs(y) <= 0.001:
nt = 180. if x < 0 else 0.
elif y > 0:
nt = acos(x / dist) / pi * 180.
else:
nt = 360. - acos(x / dist) / pi * 180.
bin_t[cnt] = (nt, conf_t)
else:
for cnt, t in enumerate(th):
x = cos(t * pi / 180.) * scale_param[0]
y = sin(t * pi / 180.) * scale_param[1]
dist = sqrt(x**2 + y**2)
if abs(y) <= 0.001:
nt = 180. if x < 0 else 0.
elif y > 0:
nt = acos(x / dist) / pi * 180.
else:
nt = 360. - acos(x / dist) / pi * 180.
bin_t[cnt] = nt
new_ths[cnt0] = bin_t
return new_ths
def resize_points(pts, scale_param):
sx, sy = scale_param
points = [None for _ in pts]
for idx, (x, y) in enumerate(pts):
points[idx] = (x * sx, y * sy)
return points
### make crop augmentation.
def make_crop(d, bbox, suffix):
d_crop = dc(d)
(x0, y0, x1, y1) = bbox
I = d['img']
junction = d['junction']
theta = d['theta']
d_crop['img'] = I[y0:y1, x0:x1, :]
d_crop['imgname'] = "{}_{}.jpg".format(d['imgname'][:-4], suffix[0])
new_junction = []
new_theta = []
for pp, ths in zip(junction, theta):
x, y = pp
if x >= x0 and x < x1 and y >= y0 and y < y1:
new_junction += [(x - x0, y - y0)]
new_theta += [ths]
d_crop['points'] = [(x - x0, y - y0) for x, y in d['points']]
d_crop['junction'] = new_junction
d_crop['theta'] = new_theta
return d_crop
# make mirror augmentation.
def make_mirror(d, axis=1):
I = d['img']
h, w = I.shape[:2]
d_mirror = dc(d)
suffix = 'mr' if axis == 2 else 'ud'
d_mirror['imgname'] = "{}_{}.jpg".format(d['imgname'][:-4], suffix)
if axis == 2:
d_mirror['img'] = I[:, ::-1, :]
d_mirror['junction'] = [(w - x, y) for x, y in d['junction']]
d_mirror['points'] = [(w - x, y) for x, y in d['points']]
d_mirror['theta'] = [[180. - th if th < 180. else 540. - th for th in point_th]
for point_th in d['theta']]
elif axis == 1:
d_mirror['img'] = I[::-1, :, :]
d_mirror['junction'] = [(x, h - y) for x, y in d['junction']]
d_mirror['points'] = [(x, h - y) for x, y in d['points']]
d_mirror['theta'] = [[360. - th for th in point_th]
for point_th in d['theta']]
return(d_mirror)
# Transform the raw data and augment, store it.
def save_ann(H, d, save_dir, max_len = 1, split='train'):
grid_h = grid_w = H['grid_size']
image_h = image_w = H['image_size']
num_bin = H['num_bin']
I = d['img']
if len(I.shape) < 3:
raise
return
if I.shape[2] == 4:
I = I[:, :, :3]
td = dc(d)
td['img'] = I
td['theta'] = theta = [[ x for x, _ in th] for th in d['theta']]
annlist = []
annlist += [td]
if split != 'test':
annlist += [make_mirror(td, axis=1), make_mirror(td, axis=2)]
h, w = I.shape[:2]
junction = td['junction']
crop_list = []
if h > w:
x0, x1 = 0, w
y0, y1 = 0, w
crop_list += [(x0, y0, x1, y1, 'top')]
y0, y1 = int((h - w) / 2.), int((h-w)/ 2.) + w
crop_list += [(x0, y0, x1, y1, 'middle')]
y0, y1 = h - w, h
crop_list += [(x0, y0, x1, y1, 'bottom')]
elif w > h:
y0, y1 = 0, h
x0, x1 = 0, h
crop_list += [(x0, y0, x1, y1, 'left')]
x0, x1 = int((w - h) / 2.), int( (w - h)/2. ) + h
crop_list += [(x0, y0, x1, y1, 'middle')]
x0, x1 = w - h, w
crop_list += [(x0, y0, x1, y1, 'right')]
else:
x0, y0 = int(h * 0.25), int(h * 0.25)
x1, y1 = int(h * 0.75), int(h * 0.75)
crop_list += [(x0, y0, x1, y1, 'center')]
annlist += [make_crop(td, (x0, y0, x1, y1), suffix) for x0, y0, x1, y1, suffix in crop_list]
outputs = [resize_and_transform(H, t) for t in annlist]
for fn in outputs:
junc_flags = fn['junction_flags']
num_junc = np.sum(junc_flags.astype(float))
if split != 'test' and num_junc <= 5:
continue
imgname = fn['imgname']
dir_to_save = join(save_dir, "{}_{}_{}".format(image_h, grid_h, num_bin))
if not isdir(dir_to_save):
os.mkdir(dir_to_save)
with open('{}/{}.pickle'.format(dir_to_save, imgname[:-4]), 'wb') as handle:
pickle.dump(fn, handle, protocol=pickle.HIGHEST_PROTOCOL)
cv2.imwrite(join(dir_to_save, fn['imgname']), fn['image'])
## load the pickle files.
def loading(fn, src_dir):
with open(src_dir / fn, 'rb') as handle:
d = pickle.load(handle, encoding='latin1')
return d
# Junction label augmentation cannot be easily done online.
# Precompute the label, store locally.
debug = False
def load_data_and_save(H, src_dir, save_dir, filenames, split='train', use_mp=True):
print("src_dir: {} save_dir: {}".format(src_dir, save_dir))
bar = pb.ProgressBar(widgets=[ '[ ', pb.Bar(), ' ][ ', pb.Timer(), ' ]'], max_value=pb.UnknownLength)
def loadDataset():
return Parallel(n_jobs=1)(delayed(loading)(f, src_dir) for f in bar(filenames))
if not debug:
print("== loading raw data ==")
anno = loadDataset()
total_num = len(anno)
print("== {} raw images data loaded ==".format(total_num))
cpu_num = multiprocessing.cpu_count()
cpu_num = min(30, cpu_num)
bar = pb.ProgressBar(widgets=[ '[ ', pb.Bar(), ' ][ ', pb.Timer(), ' ]'], max_value=pb.UnknownLength)
if use_mp:
Parallel(n_jobs=cpu_num)(delayed(save_ann)(H, d, save_dir, split=split)
for d in bar(anno)) # max_len default to 1.
else:
print("== single process processing")
for d in anno: # max_len default to 1.
save_ann(H, d, save_dir, split=split)
image_h, grid_h, num_bin = H['image_size'], H['grid_size'], H['num_bin']
prefix = str(join(save_dir, "{}_{}_{}".format(image_h, grid_h, num_bin)))
if split=='test':
print("### finished test split")
return 0
train_name = prefix + "_train.txt"
val_name = prefix + "_val.txt"
test_name = prefix + "_test.txt"
dirname = prefix
def valid_pickle_file(x):
return x.endswith('.jpg') and isfile("{}/{}.pickle".format(dirname, x[:-4]))
imgnames = [x for x in os.listdir(dirname) if valid_pickle_file(x)]
train, val, test = [], [], []
def read_list_from_file(f_):
with open(f_, 'r') as h:
filelist = h.read().splitlines()
filelist = [x[:-4] for x in filelist]
return filelist
def write_list_to_file(f_, filelist):
with open(f_, 'w') as h:
for l in filelist:
h.write("{}\n".format(l))
trainImgs = read_list_from_file(ref.data_root / 'v1.1/train.txt')
testImgs = read_list_from_file(ref.data_root / 'v1.1/test.txt')
random.shuffle(trainImgs)
trainImgs = trainImgs[200:]
valImgs = trainImgs[:200]
print("train: {} | val: {} | test: {}".format(len(trainImgs), len(valImgs), len(testImgs)))
for f in imgnames:
if '_' in f:
prefix_ = f.split('_')[0]
if prefix_ in trainImgs: train += [f]
if prefix_ in valImgs: val += [f]
else:
prefix_ = f[:-4]
if prefix_ in trainImgs: train += [f]
if prefix_ in valImgs: val += [f]
if prefix_ in testImgs: test += [f]
print("train: {} | val: {} | test: {}".format(len(train), len(val), len(test)))
write_list_to_file(train_name, train)
write_list_to_file(val_name, val)
def create_dataset(H, split, use_mp = True):
import time
src_dir = ref.data_root / 'pointlines'
save_dir = ref.junc_data_root / 'processed'
with open(ref.data_root / 'v1.1/{}.txt'.format(split), 'r') as f:
filelst = f.read().splitlines()
filelst = [x[:-4] + '.pkl' for x in filelst]
print(" #{split} images: {0}".format(len(filelst), split=split))
start = time.time()
load_data_and_save(H, src_dir=src_dir, save_dir=save_dir, filenames=filelst, use_mp = use_mp)
print(" Elasped time: {:.2f}".format(time.time() - start))
|
"""Unit test for the toppra.dracula.run_topp() wrapper."""
import glob
import os
import unittest
import numpy as np
import toppra.dracula as tdrac
class TestRunTopp(unittest.TestCase):
"""Test RunTopp()."""
# setup test data only once as they aren't modified
glob_regex = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"test_data",
"test_waypts_jnt_*.txt",
)
paths = sorted(glob.glob(glob_regex))
waypts_list = [np.loadtxt(path) for path in paths]
@staticmethod
def _gen_limits(waypts):
"""Generate maximum vlim and alim arrays given waypts.
Assume the last 7-dof belong to a cobot. The rest is filled with 3.
"""
n_dof = waypts.shape[1]
v_max = 3 * np.ones((n_dof, 2)) # init with 3
a_max = 3 * np.ones((n_dof, 2)) # init with 3
v_max[-7:, 0] = -tdrac.V_MAX # fill 7-dof limits
v_max[-7:, 1] = tdrac.V_MAX
a_max[-7:, 0] = -tdrac.A_MAX
a_max[-7:, 1] = tdrac.A_MAX
v_max[:-7, 0] *= -1
a_max[:-7, 0] *= -1
return v_max, a_max
def test_run_topp_spline_static_data(self):
"""Test run_topp_spline() using static test data."""
print("Starting test_run_topp_spline_static_data")
for coeff in [1, 0.5, 0.12]:
print(f"Testing with limit reduction coefficient: {coeff}...")
for i, waypts in enumerate(self.waypts_list):
print(f"Testing waypoints file {i}...")
v_max, a_max = TestRunTopp._gen_limits(waypts)
tdrac.run_topp_spline(
waypts,
coeff * v_max,
coeff * a_max,
verify_lims=True,
)
def test_run_topp_const_accel_static_data(self):
"""Test run_topp_const_accel() using static test data."""
print("Starting test_run_topp_const_accel_static_data")
for coeff in [1, 0.5, 0.12]:
print(f"Testing with limit reduction coefficient: {coeff}...")
for i, waypts in enumerate(self.waypts_list):
print(f"Testing waypoints file {i}...")
v_max, a_max = TestRunTopp._gen_limits(waypts)
tdrac.run_topp_const_accel(
waypts,
coeff * v_max,
coeff * a_max,
cmd_rate=1000,
verify_lims=True,
)
@staticmethod
def test_run_topp_spline_random_data():
"""Test run_topp_spline() using randoms."""
# 2000 waypts supported but can be commented out for speed if needed
n_dof = 7
vlim = np.asarray([1] * n_dof)
alim = np.asarray([2] * n_dof)
vlim = np.vstack([-vlim, vlim]).T
alim = np.vstack([-alim, alim]).T
for n_waypts in [2, 20, 50, 200]: # , 2000]:
print(f"Testing {n_waypts} random waypoints...")
waypts = np.random.rand(n_waypts, n_dof)
tdrac.run_topp_spline(waypts, vlim, alim, verify_lims=True)
if __name__ == "__main__":
unittest.main()
# import matplotlib.pyplot as plt
# # test using static test data
# v_max = np.vstack([-V_MAX, V_MAX]).T
# a_max = np.vstack([-A_MAX, A_MAX]).T
# # two sets of vlims, alims, two reduction coefficients (safety factor)
# for coeff in [1, 0.5, 0.2, 0.1, 0.05]:
# print(f"limit reduction coefficient: {coeff}")
# for i in range(5):
# print(f"testing waypoints file {i}...")
# waypts = np.loadtxt(
# f"/src/toppra/tests/dracula/test_waypts_jnt_{i}.txt"
# ) # (33, 7)
# _ = run_topp(
# waypts, coeff * v_max, coeff * a_max, verify_lims=True
# ) # assert no throw
# # test using randoms
# # 2000 is supported but commented out for speed
# for n in [2, 20, 50, 200]: # , 2000]:
# print(f"Testing {n} random waypoints with no truncation...")
# topp_breaks_count_final, _, _ = run_topp_random(n, False)
# # Plotting
# csplcp = copy.deepcopy(cspl)
# s_sampled = np.linspace(0, csplcp.x[-1], 100)
# fig, axs = plt.subplots(1, 4, sharex=True, figsize=[18, 4])
# for i in range(csplcp.c.shape[2]):
# axs[0].plot(
# s_sampled, csplcp(s_sampled)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[1].plot(
# s_sampled, csplcp(s_sampled, 1)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[2].plot(
# s_sampled, csplcp(s_sampled, 2)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[3].plot(
# s_sampled, csplcp(s_sampled, 3)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[0].set_xlabel("Time (s)")
# axs[0].set_ylabel("Joint position (rad)")
# axs[1].set_xlabel("Time (s)")
# axs[1].set_ylabel("Joint velocity (rad/s)")
# axs[2].set_xlabel("Time (s)")
# axs[2].set_ylabel("Joint acceleration (rad/s^2)")
# axs[3].set_xlabel("Time (s)")
# axs[3].set_ylabel("Joint jerk (rad/s^3)")
# axs[0].legend()
# axs[1].legend()
# axs[2].legend()
# axs[3].legend()
# plt.tight_layout()
# fig.suptitle("original")
# # plt.show()
# s_sampled2 = np.linspace(0, cspl.x[-1], 100)
# fig, axs = plt.subplots(1, 4, sharex=True, figsize=[18, 4])
# for i in range(cspl.c.shape[2]):
# axs[0].plot(
# s_sampled2, cspl(s_sampled2)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[1].plot(
# s_sampled2, cspl(s_sampled2, 1)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[2].plot(
# s_sampled2, cspl(s_sampled2, 2)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[3].plot(
# s_sampled2, cspl(s_sampled2, 3)[:, i],
# label="J{:d}".format(i + 1)
# )
# axs[0].set_xlabel("Time (s)")
# axs[0].set_ylabel("Joint position (rad)")
# axs[1].set_xlabel("Time (s)")
# axs[1].set_ylabel("Joint velocity (rad/s)")
# axs[2].set_xlabel("Time (s)")
# axs[2].set_ylabel("Joint acceleration (rad/s^2)")
# axs[3].set_xlabel("Time (s)")
# axs[3].set_ylabel("Joint jerk (rad/s^3)")
# axs[0].legend()
# axs[1].legend()
# axs[2].legend()
# axs[3].legend()
# plt.tight_layout()
# fig.suptitle("new")
# plt.show()
# more debugging plots from code files
# if debug_active:
# print("yay we made an instance")
# X = instance.compute_feasible_sets()
# K = instance.compute_controllable_sets(0, 0)
# _, sd_vec, _ = instance.compute_parameterization(0, 0)
# X = np.sqrt(X)
# K = np.sqrt(K)
# plt.plot(X[:, 0], c="green", label="Feasible sets")
# plt.plot(X[:, 1], c="green")
# plt.plot(K[:, 0], "--", c="red", label="Controllable sets")
# plt.plot(K[:, 1], "--", c="red")
# plt.plot(sd_vec, label="Velocity profile")
# plt.title("Path-position path-velocity plot")
# plt.xlabel("Path position")
# plt.ylabel("Path velocity square")
# plt.legend()
# plt.tight_layout()
# plt.show()
# if debugging:
# plt.figure()
# s_sampled = np.linspace(0, csplcp.x[-1], 100)
# fig, axs = plt.subplots(1, 4, sharex=True, figsize=[18, 4])
# for i in range(csplcp.c.shape[2]):
# axs[0].plot(
# s_sampled,
# csplcp(s_sampled)[:, i], label="J{:d}".format(i + 1)
# )
# axs[1].plot(
# s_sampled,
# csplcp(s_sampled, 1)[:, i],
# label="J{:d}".format(i + 1),
# )
# axs[2].plot(
# s_sampled,
# csplcp(s_sampled, 2)[:, i],
# label="J{:d}".format(i + 1),
# )
# axs[3].plot(
# s_sampled,
# csplcp(s_sampled, 3)[:, i],
# label="J{:d}".format(i + 1),
# )
# axs[0].set_xlabel("Time (s)")
# axs[0].set_ylabel("Joint position (rad)")
# axs[1].set_xlabel("Time (s)")
# axs[1].set_ylabel("Joint velocity (rad/s)")
# axs[2].set_xlabel("Time (s)")
# axs[2].set_ylabel("Joint acceleration (rad/s^2)")
# axs[3].set_xlabel("Time (s)")
# axs[3].set_ylabel("Joint jerk (rad/s^3)")
# axs[0].legend()
# axs[1].legend()
# axs[2].legend()
# axs[3].legend()
# plt.tight_layout()
# fig.suptitle("original")
# plt.show()
# s_sampled2 = np.linspace(0, cspl.x[-1], 100)
# fig, axs = plt.subplots(1, 4, sharex=True, figsize=[18, 4])
# for i in range(cspl.c.shape[2]):
# axs[0].plot(
# s_sampled2,
# cspl(s_sampled2)[:, i], label="J{:d}".format(i + 1)
# )
# axs[1].plot(
# s_sampled2,
# cspl(s_sampled2, 1)[:, i],
# label="J{:d}".format(i + 1),
# )
# axs[2].plot(
# s_sampled2,
# cspl(s_sampled2, 2)[:, i],
# label="J{:d}".format(i + 1),
# )
# axs[3].plot(
# s_sampled2,
# cspl(s_sampled2, 3)[:, i],
# label="J{:d}".format(i + 1),
# )
# axs[0].set_xlabel("Time (s)")
# axs[0].set_ylabel("Joint position (rad)")
# axs[1].set_xlabel("Time (s)")
# axs[1].set_ylabel("Joint velocity (rad/s)")
# axs[2].set_xlabel("Time (s)")
# axs[2].set_ylabel("Joint acceleration (rad/s^2)")
# axs[3].set_xlabel("Time (s)")
# axs[3].set_ylabel("Joint jerk (rad/s^3)")
# axs[0].legend()
# axs[1].legend()
# axs[2].legend()
# axs[3].legend()
# plt.tight_layout()
# fig.suptitle("new")
# plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from _utils import parse, print_stats, URL_DS3
print(URL_DS3)
rows = parse(URL_DS3)
print_stats(rows)
"""
https://darksouls.fandom.com/ru/wiki/Боссы_(Dark_Souls_III)
NAME | HEALTH | SOULS
----------------------------------------------+--------+-------
Гигант Йорм | 27822 | 36000
Демон-принц | 21135 | 100000
Отец Ариандель и сестра Фриде | 18877 | 72000
Мидир Пожиратель Тьмы | 15895 | 150000
Проклятое Великое древо | 15405 | 7000
Рыцарь-раб Гаэль | 14895 | 120000
Лотрик, младший принц и Лориан, старший принц | 13529 | 85000
Безымянный король | 11677 | 80000
Душа пепла | 10766 | 100000
Оцейрос, Снедаемый король | 8087 | 58000
Древняя виверна | 7873 | 70000
Верховный повелитель Вольнир | 7052 | 22000
Хранитель могилы чемпиона и великий волк | 6984 | 60000
Старый король демонов | 5301 | 25000
Танцовщица Холодной долины | 5111 | 60000
Понтифик Саливан | 5106 | 28000
Чемпион Гундир | 4956 | 60000
Олдрик, пожиратель богов | 4727 | 50000
Доспехи драконоборца | 4581 | 64000
Дьяконы глубин | 4099 | 13000
Копье церкви | 3379 | 80000
Хранители Бездны | 3096 | 18000
Знаток кристальных чар | 2723 | 8000
Вордт из Холодной долины | 1328 | 3000
Судия Гундир | 1037 | 3000
"""
|
from django.db import transaction, OperationalError
from django.db.models import F, Q
from django.conf import settings
from django.utils import timezone
from celery import shared_task
import os
import shutil
import string, random
from datetime import timedelta
import oj
from main.judge import run_judge_in_docker, JudgeError
from main.models import Submit
from main.utils.string_utils import generate_noise
from main.utils.directory import remkdir
class CheckConditionError(Exception):
"""自定义错误,用于条件检查错误时。"""
pass
@shared_task(autoretry_for=(OperationalError, CheckConditionError, JudgeError),
retry_kwargs={'countdown': 10, 'max_retries': 3})
def judge_submit(submit_pk):
submit = Submit.objects.get(pk=submit_pk)
problem = submit.problem
# 准备文件夹与映射关系
judge_dir = os.path.join(settings.JUDGE_BASE_DIR,
'%s_%s' % (str(submit.id), generate_noise(6)))
remkdir(judge_dir)
submit.copy_code_to_dir(judge_dir)
problem.prepare_problem_dir(judge_dir)
docker_judge_dir = '/' + generate_noise(8)
volumes = {judge_dir: {'bind': docker_judge_dir, 'mode': 'ro'}}
default_check = bool(not problem.compare_file)
ta_check_file = '' if default_check else os.path.join(docker_judge_dir, 'compare.py')
# 更新状态为判题中
with transaction.atomic():
submit.refresh_from_db()
if submit.judge_status in [Submit.JUDGE_COMPLETED, Submit.JUDGE_FAILED]:
raise CheckConditionError('Submit has already been judged.')
submit.judge_status = Submit.JUDGE_JUDGING
submit.save()
try:
(compile_status, results) = run_judge_in_docker(
image=settings.JUDGE_DOCKER_IMAGE,
src_path=os.path.join(docker_judge_dir, submit.codefile_name),
compiler=submit.get_compiler_name(),
test_case_dir=docker_judge_dir,
sample_num=problem.testdata_num,
mem_limit=problem.memory_limit,
time_limit=problem.time_limit,
volumes=volumes,
max_wait_time=max(60, problem.time_limit * problem.testdata_num + 30),
default_check=default_check,
ta_check_file=ta_check_file,)
shutil.rmtree(judge_dir)
except JudgeError as e:
shutil.rmtree(judge_dir)
submit.judge_status = Submit.JUDGE_PENDING
submit.save()
raise e
except Exception as e:
shutil.rmtree(judge_dir)
submit.judge_status = Submit.JUDGE_FAILED
submit.save()
raise e
with transaction.atomic():
submit.refresh_from_db()
if submit.judge_status != Submit.JUDGE_JUDGING:
raise CheckConditionError('Submit is not in JUDGING state.')
# 更新 Submit 状态和结果
if compile_status == oj.consts.COMPILE_OK:
submit.compile_status = Submit.COMPILE_OK
submit.run_results = [list(tp) for tp in results]
total = len(submit.run_results)
accepted = len([status for (status, _, _) in submit.run_results
if status == oj.consts.ACCEPTED])
if total == 0:
submit.score = 0.0
else:
submit.score = 100.0 * (accepted / total)
elif compile_status == oj.consts.COMPILE_ERROR:
submit.compile_status = Submit.COMPILE_ERROR
submit.error_message = results.replace('\x00', '')
submit.score = 0.0
submit.judge_status = Submit.JUDGE_COMPLETED
submit.save()
# 更新 Problem 统计数据
problem.submit_cnt = F('submit_cnt') + 1
if submit.score == 100.0:
problem.accept_cnt = F('accept_cnt') + 1
problem.save()
@shared_task
def auto_cancel_unfinished_submits():
past_time = timezone.now() - timedelta(seconds=3600)
unfinished_submits = Submit.objects.filter(Q(judge_status=Submit.JUDGE_PENDING) |
Q(judge_status=Submit.JUDGE_JUDGING))\
.filter(create_time__lte=past_time)
for submit in unfinished_submits:
submit.judge_status = Submit.JUDGE_FAILED
submit.save()
|
import os
import pickle
import numpy as np
np.random.seed(1000)
from utils.generic_utils import load_dataset_at
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
def load_embedding_matrix(embedding_path, word_index, max_nb_words, embedding_dim, print_error_words=True):
if not os.path.exists('data/embedding_matrix max words %d embedding dim %d.npy' % (max_nb_words, embedding_dim)):
embeddings_index = {}
error_words = []
f = open(embedding_path, encoding='utf8')
for line in f:
values = line.split()
word = values[0]
try:
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
except Exception:
error_words.append(word)
f.close()
if len(error_words) > 0:
print("%d words were not added." % (len(error_words)))
if print_error_words:
print("Words are : \n", error_words)
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(max_nb_words, len(word_index))
embedding_matrix = np.zeros((nb_words, embedding_dim))
for word, i in word_index.items():
if i >= nb_words:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
np.save('data/embedding_matrix max words %d embedding dim %d.npy' % (max_nb_words,
embedding_dim),
embedding_matrix)
print('Saved embedding matrix')
else:
embedding_matrix = np.load('data/embedding_matrix max words %d embedding dim %d.npy' % (max_nb_words,
embedding_dim))
print('Loaded embedding matrix')
return embedding_matrix
def create_ngram_set(input_list, ngram_value=2):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def add_ngram(sequences, token_indice, ngram_range=2):
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for i in range(len(new_list) - ngram_range + 1):
for ngram_value in range(2, ngram_range + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
def prepare_tokenized_data(texts, max_nb_words, max_sequence_length, ngram_range=1):
if not os.path.exists('data/tokenizer.pkl'):
tokenizer = Tokenizer(nb_words=max_nb_words)
tokenizer.fit_on_texts(texts)
with open('data/tokenizer.pkl', 'wb') as f:
pickle.dump(tokenizer, f)
print('Saved tokenizer.pkl')
else:
with open('data/tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
print('Loaded tokenizer.pkl')
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique 1-gram tokens.' % len(word_index))
ngram_set = set()
for input_list in sequences:
for i in range(2, ngram_range + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
# Dictionary mapping n-gram token to a unique integer.
# Integer values are greater than max_features in order
# to avoid collision with existing features.
start_index = max_nb_words + 1
token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
indice_token = {token_indice[k]: k for k in token_indice}
word_index.update(token_indice)
max_features = np.max(list(indice_token.keys())) + 1
print('Now there are:', max_features, 'features')
# Augmenting X_train and X_test_mat with n-grams features
sequences = add_ngram(sequences, token_indice, ngram_range)
print('Average sequence length: {}'.format(np.mean(list(map(len, sequences)), dtype=int)))
print('Max sequence length: {}'.format(np.max(list(map(len, sequences)))))
data = pad_sequences(sequences, maxlen=max_sequence_length)
return (data, word_index)
# def __load_embeddings(dataset_prefix, verbose=False):
#
# embedding_path = npy_path # change to numpy data format (which contains the preloaded embedding matrix)
# if os.path.exists(embedding_path):
# # embedding matrix exists, no need to create again.
# print("Loading embedding matrix for dataset \'%s\'" % (dataset_prefix))
# embedding_matrix = np.load(embedding_path)
# return embedding_matrix
#
# with open(txt_path, 'r', encoding='utf8') as f:
# header = f.readline()
# splits = header.split(' ')
#
# vocab_size = int(splits[0])
# embedding_size = int(splits[1])
#
# embeddings_index = {}
# error_words = []
#
# for line in f:
# values = line.split()
# word = values[0]
# try:
# coefs = np.asarray(values[1:], dtype='float32')
# embeddings_index[word] = coefs
# except Exception:
# error_words.append(word)
#
# if len(error_words) > 0:
# print("%d words were not added." % (len(error_words)))
# if verbose:
# print("Words are : \n", error_words)
#
# if verbose: print('Preparing embedding matrix.')
#
# embedding_matrix = np.zeros((vocab_size, embedding_size))
#
# for key, vector in embeddings_index.items():
# if vector is not None:
# # words not found in embedding index will be all-zeros.
# key = int(key)
# embedding_matrix[key] = vector
#
# if verbose: print('Saving embedding matrix for dataset \'%s\'' % (dataset_prefix))
#
# np.save(embedding_path, embedding_matrix)
# return embedding_matrix
|
from setuptools import setup, find_packages
setup(name='kfinny.finnpie',
version='1',
description='A simple library for packaging useful RE functions',
url='https://github.com/kfinny/finnpie',
author='Kevin Finnigin',
author_email='kevin@finnigin.net',
license='MIT',
packages=find_packages(),
zip_safe=False)
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import pickle
import numpy as np
import onnx
import tensorrt as trt
import torch
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError as err:
sys.stderr.write("""Error: Failed to import tensorflow module ({})\n""".format(err))
sys.exit()
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
Attentions Keys
"""
WQ = "self_query_kernel"
BQ = "self_query_bias"
WK = "self_key_kernel"
BK = "self_key_bias"
WV = "self_value_kernel"
BV = "self_value_bias"
WQKV = "self_qkv_kernel"
BQKV = "self_qkv_bias"
"""
Transformer Keys
"""
W_AOUT = "attention_output_dense_kernel"
B_AOUT = "attention_output_dense_bias"
AOUT_LN_BETA = "attention_output_layernorm_beta"
AOUT_LN_GAMMA = "attention_output_layernorm_gamma"
W_MID = "intermediate_dense_kernel"
B_MID = "intermediate_dense_bias"
W_LOUT = "output_dense_kernel"
B_LOUT = "output_dense_bias"
LOUT_LN_BETA = "output_layernorm_beta"
LOUT_LN_GAMMA = "output_layernorm_gamma"
"""
Squad Output Keys
"""
SQD_W = "squad_output_weights"
SQD_B = "squad_output_bias"
def load_tf_weights(inputbase, config):
"""
Load the weights from the tensorflow checkpoint
"""
weights_dict = dict()
try:
reader = tf.train.NewCheckpointReader(inputbase)
tensor_dict = reader.get_variable_to_shape_map()
# There might be training-related variables in the checkpoint that can be discarded
param_names = [key for key in sorted(tensor_dict) if "adam" not in key and "global_step" not in key and "pooler" not in key]
count = len(param_names)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Found {:} entries in weight map".format(count))
for pn in param_names:
toks = pn.lower().split("/")
if "encoder" in pn:
assert ("layer" in pn)
l = (re.findall("\d+", pn))[0]
outname = "l{}_".format(l) + "_".join(toks[3:])
else:
outname = "_".join(toks)
tensor = reader.get_tensor(pn)
shape = tensor.shape
if pn.find("kernel") != -1:
weights_dict[outname + "_notrans"] = trt.Weights(np.ascontiguousarray(tensor).flatten())
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Transposing {}\n".format(np))
tensor = np.transpose(tensor)
shape = tensor.shape
flat_tensor = tensor.flatten()
shape_str = "{} ".format(len(shape)) + " ".join([str(d) for d in shape])
weights_dict[outname] = trt.Weights(flat_tensor)
TRT_LOGGER.log(TRT_LOGGER.VERBOSE, "Original name: {:}, TensorRT name: {:}, shape: {:}".format(pn, outname, shape_str))
N = config.num_attention_heads
H = config.head_size
additional_dict = dict()
for key, value in weights_dict.items():
pos = key.find(BQ)
if pos != -1:
hidden_size = value.size
prefix = key[:pos]
Bq_ = value
Bk_ = weights_dict[prefix + BK]
Bv_ = weights_dict[prefix + BV]
Wq_ = weights_dict[prefix + WQ]
Wk_ = weights_dict[prefix + WK]
Wv_ = weights_dict[prefix + WV]
mat_size = hidden_size * hidden_size
wcount = 3 * mat_size
Wall = np.zeros(wcount, np.float32)
bcount = 3 * hidden_size
Ball = np.zeros(bcount, np.float32)
Wall[0:mat_size] = Wq_.numpy()[0:mat_size]
Wall[mat_size:2*mat_size] = Wk_.numpy()[0:mat_size]
Wall[2*mat_size:3*mat_size] = Wv_.numpy()[0:mat_size]
Ball[0:hidden_size] = Bq_.numpy()[0:hidden_size]
Ball[hidden_size:2*hidden_size] = Bk_.numpy()[0:hidden_size]
Ball[2*hidden_size:3*hidden_size] = Bv_.numpy()[0:hidden_size]
if config.use_int8 and getattr(config, 'interleaved', False):
Wall = np.ascontiguousarray(Wall.reshape((3, N, H, N, H)), dtype=np.float32)
Ball = np.ascontiguousarray(Ball.reshape((3, N, H)), dtype=np.float32)
else:
Wall = np.ascontiguousarray(Wall.reshape((3, N, H, N, H)).transpose((1, 0, 2, 3, 4)), dtype=np.float32)
Ball = np.ascontiguousarray(Ball.reshape((3, N, H)).transpose((1, 0, 2)), dtype=np.float32)
additional_dict[prefix + WQKV] = trt.Weights(Wall)
additional_dict[prefix + BQKV] = trt.Weights(Ball)
additional_dict[prefix + WQKV + "_notrans"] = trt.Weights(np.ascontiguousarray(Wall.T))
except Exception as error:
TRT_LOGGER.log(TRT_LOGGER.ERROR, str(error))
weights_dict.update(additional_dict)
return weights_dict
def onnx_to_trt_name(onnx_name):
"""
Converting variables in the onnx checkpoint to names corresponding to the naming convention used in the TF version, expected by the builder
"""
qkv_strings = {'key', 'value', 'query', 'query_key_value'}
onnx_name = onnx_name.lower()
toks = [t.strip('_') for t in onnx_name.split('.')]
if toks[0] == 'bert': #embeddings or encoder
if toks[1] == 'encoder': #transformer
# Token conversions for sparse checkpoints
if toks[-2] == 'dense_act':
toks[-2] = 'dense'
elif toks[-3] == 'dense_act':
if toks[-2] == 'input_quantizer':
toks[-2] = 'input'
elif toks[-2] == 'weight_quantizer':
toks[-2] = 'kernel'
toks[-3] = 'dense'
elif toks[-2].startswith('matmul'):
toks[-2] = {
'matmul_q_quantizer': 'qv_a_input_quantizer',
'matmul_k_quantizer': 'qv_b_input_quantizer',
'matmul_v_quantizer': 'av_b_input_quantizer',
'matmul_a_quantizer': 'av_a_input_quantizer',
}[toks[-2].replace('input_', '')]
# Token conversions for all checkpoints
if toks[-2] == 'layernorm': #bias->beta, weight->gamma
toks[-1] = 'beta' if toks[-1] == 'bias' else 'gamma'
elif (toks[-2] == 'dense' or toks[-2] in qkv_strings) and toks[-1] == 'weight':
toks[-1] = 'kernel'
elif (toks[-3] == 'dense' or toks[-3] in qkv_strings) and toks[-1] == 'amax':
if toks[-2] == 'weight_quantizer':
toks[-2] = 'kernel'
elif toks[-2] == 'input_quantizer':
toks[-2] = 'input'
if 'final_input_quantizer' not in toks[2]:
ind = toks.index('layers')+1 if 'layers' in toks else 3
toks = toks[ind:]
toks[0] = 'l{}'.format(int(toks[0]))
else:
if toks[-2] == 'layernorm': #bias->beta, weight->gamma
toks[-1] = 'beta' if toks[-1] == 'bias' else 'gamma'
else: #embeddings: drop "_weight" suffix
if toks[-1] == 'amax':
toks[-2] = 'amax'
toks = toks[:-1]
elif 'qa' in onnx_name:
name = 'cls_squad_output_bias' if toks[-1] == 'bias' else 'cls_squad_output_weights'
return name
else:
print("Encountered unknown case:", onnx_name)
assert(False)
parsed = '_'.join(toks)
return parsed
def get_onnx_weight_dict(tensor_dict, config):
N = config.num_attention_heads
H = config.head_size
hidden_size = config.hidden_size
weights_dict = dict()
for outname, tensor in tensor_dict.items():
if outname.find("_amax") != -1:
weights_dict[outname] = tensor
elif outname.find(BQ) != -1:
prefix = outname[:outname.find(BQ)]
Wqkv = np.zeros((3, hidden_size, hidden_size), np.float32)
Bqkv = np.zeros((3, hidden_size), np.float32)
Wqkv[0,:,:] = tensor_dict[prefix + WQ]
Wqkv[1,:,:] = tensor_dict[prefix + WK]
Wqkv[2,:,:] = tensor_dict[prefix + WV]
Bqkv[0,:] = tensor
Bqkv[1,:] = tensor_dict[prefix + BK]
Bqkv[2,:] = tensor_dict[prefix + BV]
if config.use_int8 and getattr(config, 'interleaved', False):
Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)))
Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)))
else:
Wqkv = np.ascontiguousarray(Wqkv.reshape((3, N, H, N, H)).transpose((1,0,2,3,4)))
Bqkv = np.ascontiguousarray(Bqkv.reshape((3, N, H)).transpose((1,0,2)))
weights_dict[prefix + WQKV] = trt.Weights(Wqkv)
weights_dict[prefix + BQKV] = trt.Weights(Bqkv)
weights_dict[prefix + WQKV + "_notrans"] = trt.Weights(np.ascontiguousarray(Wqkv.T))
elif outname.find(BK) != -1 or outname.find(BV) != -1 or outname.find(WQ) != -1 or outname.find(WK) != -1 or outname.find(WV) != -1:
pass
else:
flat_tensor = np.ascontiguousarray(tensor).flatten()
weights_dict[outname] = trt.Weights(flat_tensor)
if outname.find("kernel") != -1:
tensor = np.transpose(tensor)
weights_dict[outname + "_notrans"] = trt.Weights(np.ascontiguousarray(tensor).flatten())
TRT_LOGGER.log(TRT_LOGGER.INFO, "Found {:} entries in weight map".format(len(weights_dict)))
return weights_dict
def load_onnx_weights_and_quant(path, config):
"""
Load the weights from the onnx checkpoint
"""
model = onnx.load(path)
weights = model.graph.initializer
tensor_dict = dict((onnx_to_trt_name(w.name), np.frombuffer(w.raw_data, np.int8).reshape(w.dims))
if w.name.split('_')[-1] == 'mask' else
(onnx_to_trt_name(w.name), np.frombuffer(w.raw_data, np.float32).reshape(w.dims))
for w in weights)
return get_onnx_weight_dict(tensor_dict, config)
def load_pytorch_weights_and_quant(path, config):
"""
Load the weights from the pytorch checkpoint
"""
state_dict = torch.load(path, map_location='cpu')["model"]
tensor_dict = {onnx_to_trt_name(name):val.numpy() for name, val in state_dict.items()}
return get_onnx_weight_dict(tensor_dict, config)
def load_megatron_pickle_weights(path, config):
N = config.num_attention_heads
H = config.head_size
with open(path, 'rb') as f:
tensor_dict = pickle.load(f)
weight_dict = {}
for name, tensor in tensor_dict.items():
if 'scale' in name:
continue
name = (onnx_to_trt_name(name)
.replace('embedding_', 'embeddings_')
.replace('tokentype_', 'token_type_')
.replace('_av', '_self_av')
.replace('_qv', '_self_qv')
.replace('query_key_value', 'self_qkv'))
if name.endswith('self_qkv_kernel'):
tensor = np.ascontiguousarray(tensor.reshape((3, N, H, N, H))).astype(np.float32)
weight_dict[name] = trt.Weights(tensor)
elif name.endswith('self_qkv_bias'):
tensor = np.ascontiguousarray(tensor.reshape((3, N, H))).astype(np.float32)
weight_dict[name] = trt.Weights(tensor)
elif name == 'l{}_output_layernorm_output_quantizer_amax'.format(config.num_hidden_layers-1):
weight_dict['bert_encoder_final_input_quantizer_amax'] = tensor
elif name.endswith('_amax'):
weight_dict[name] = tensor
if name.endswith('_qkv_input_amax'):
weight_dict[name.replace('_qkv_input_amax', '_query_input_amax')] = tensor
weight_dict[name.replace('_qkv_input_amax', '_key_input_amax')] = tensor
weight_dict[name.replace('_qkv_input_amax', '_value_input_amax')] = tensor
else:
flat_tensor = np.ascontiguousarray(tensor).flatten().astype(np.float32)
weight_dict[name] = trt.Weights(flat_tensor)
TRT_LOGGER.log(TRT_LOGGER.INFO, "Found {:} entries in weight map".format(len(weight_dict)))
return weight_dict
|
# -*- coding: utf-8 -*-
"""Test that samplers can be executed."""
import unittest
from typing import ClassVar, Type
import numpy
import torch
from pykeen.datasets import Nations
from pykeen.sampling import BasicNegativeSampler, BernoulliNegativeSampler, NegativeSampler
from pykeen.training.schlichtkrull_sampler import GraphSampler, _compute_compressed_adjacency_list
from pykeen.triples import SLCWAInstances, TriplesFactory
def _array_check_bounds(
array: torch.LongTensor,
low: int,
high: int,
) -> bool:
"""Check if all elements lie in bounds."""
return (low <= array).all() and (array < high).all()
class _NegativeSamplingTestCase:
"""A test case for quickly defining common tests for samplers."""
#: The batch size
batch_size: int
#: The random seed
seed: int
#: The triples factory
triples_factory: TriplesFactory
#: The sLCWA instances
slcwa_instances: SLCWAInstances
#: Class of negative sampling to test
negative_sampling_cls: ClassVar[Type[NegativeSampler]]
#: The negative sampler instance, initialized in setUp
negative_sampler: NegativeSampler
#: A positive batch
positive_batch: torch.LongTensor
def setUp(self) -> None:
"""Set up the test case with a triples factory and model."""
self.batch_size = 16
self.seed = 42
self.num_negs_per_pos = 10
self.triples_factory = Nations().training
self.slcwa_instances = self.triples_factory.create_slcwa_instances()
self.negative_sampler = self.negative_sampling_cls(triples_factory=self.triples_factory)
self.scaling_negative_sampler = self.negative_sampling_cls(
triples_factory=self.triples_factory,
num_negs_per_pos=self.num_negs_per_pos,
)
random = numpy.random.RandomState(seed=self.seed)
batch_indices = random.randint(low=0, high=self.slcwa_instances.num_instances, size=(self.batch_size,))
self.positive_batch = self.slcwa_instances.mapped_triples[batch_indices]
def test_sample(self) -> None:
# Generate negative sample
negative_batch = self.negative_sampler.sample(positive_batch=self.positive_batch)
# check shape
assert negative_batch.shape == self.positive_batch.shape
# check bounds: heads
assert _array_check_bounds(negative_batch[:, 0], low=0, high=self.triples_factory.num_entities)
# check bounds: relations
assert _array_check_bounds(negative_batch[:, 1], low=0, high=self.triples_factory.num_relations)
# check bounds: tails
assert _array_check_bounds(negative_batch[:, 2], low=0, high=self.triples_factory.num_entities)
# Check that all elements got corrupted
assert (negative_batch != self.positive_batch).any(dim=1).all()
# Generate scaled negative sample
scaled_negative_batch = self.scaling_negative_sampler.sample(
positive_batch=self.positive_batch,
)
assert scaled_negative_batch.shape[0] == self.positive_batch.shape[0] * self.num_negs_per_pos
assert scaled_negative_batch.shape[1] == self.positive_batch.shape[1]
class BasicNegativeSamplerTest(_NegativeSamplingTestCase, unittest.TestCase):
"""Test the basic negative sampler."""
negative_sampling_cls = BasicNegativeSampler
def test_sample_basic(self):
"""Test if relations and half of heads and tails are not corrupted."""
# Generate negative samples
negative_batch = self.negative_sampler.sample(positive_batch=self.positive_batch)
# test that the relations were not changed
assert (self.positive_batch[:, 1] == negative_batch[:, 1]).all()
# Test that half of the subjects and half of the objects are corrupted
half_size = self.positive_batch.shape[0] // 2
num_subj_corrupted = (self.positive_batch[:, 0] != negative_batch[:, 0]).sum()
num_obj_corrupted = (self.positive_batch[:, 2] != negative_batch[:, 2]).sum()
assert num_obj_corrupted - 1 <= num_subj_corrupted
assert num_subj_corrupted - 1 <= num_obj_corrupted
assert num_subj_corrupted - 1 <= self.positive_batch.shape[0]
assert half_size - 1 <= num_subj_corrupted
class BernoulliNegativeSamplerTest(_NegativeSamplingTestCase, unittest.TestCase):
"""Test the Bernoulli negative sampler."""
negative_sampling_cls = BernoulliNegativeSampler
def test_sample_bern(self):
"""Test if relations are not corrupted."""
# Generate negative sample for additional tests
negative_batch = self.negative_sampler.sample(positive_batch=self.positive_batch)
# test that the relations were not changed
assert (self.positive_batch[:, 1] == negative_batch[:, 1]).all()
class GraphSamplerTest(unittest.TestCase):
"""Test the GraphSampler."""
def setUp(self) -> None:
"""Set up the test case with a triples factory."""
self.triples_factory = Nations().training
self.num_samples = 20
self.num_epochs = 10
self.graph_sampler = GraphSampler(triples_factory=self.triples_factory, num_samples=self.num_samples)
def test_sample(self) -> None:
"""Test drawing samples from GraphSampler."""
for e in range(self.num_epochs):
# sample a batch
batch_indices = []
for j in self.graph_sampler:
batch_indices.append(j)
batch = torch.stack(batch_indices)
# check shape
assert batch.shape == (self.num_samples,)
# get triples
triples_batch = self.triples_factory.mapped_triples[batch]
# check connected components
# super inefficient
components = [{int(e)} for e in torch.cat([triples_batch[:, i] for i in (0, 2)]).unique()]
for h, _, t in triples_batch:
h, t = int(h), int(t)
s_comp_ind = [i for i, c in enumerate(components) if h in c][0]
o_comp_ind = [i for i, c in enumerate(components) if t in c][0]
# join
if s_comp_ind != o_comp_ind:
s_comp = components.pop(max(s_comp_ind, o_comp_ind))
o_comp = components.pop(min(s_comp_ind, o_comp_ind))
so_comp = s_comp.union(o_comp)
components.append(so_comp)
else:
pass
# already joined
if len(components) < 2:
break
# check that there is only a single component
assert len(components) == 1
class AdjacencyListCompressionTest(unittest.TestCase):
"""Unittest for utility method."""
def setUp(self) -> None:
"""Set up the test case with a triples factory."""
self.triples_factory = Nations().training
def test_compute_compressed_adjacency_list(self):
"""Test method _compute_compressed_adjacency_list ."""
degrees, offsets, comp_adj_lists = _compute_compressed_adjacency_list(triples_factory=self.triples_factory)
triples = self.triples_factory.mapped_triples
uniq, cnt = torch.unique(torch.cat([triples[:, i] for i in (0, 2)]), return_counts=True)
assert (degrees == cnt).all()
assert (offsets[1:] == torch.cumsum(cnt, dim=0)[:-1]).all()
assert (offsets < comp_adj_lists.shape[0]).all()
# check content of comp_adj_lists
for i in range(self.triples_factory.num_entities):
start = offsets[i]
stop = start + degrees[i]
adj_list = comp_adj_lists[start:stop]
# check edge ids
edge_ids = adj_list[:, 0]
adjacent_edges = set(int(a) for a in ((triples[:, 0] == i) | (triples[:, 2] == i)).nonzero().flatten())
assert adjacent_edges == set(map(int, edge_ids))
|
import pytest
def test_merge_call_order(rebase_remotes, mocker):
mocker.patch.object(rebase_remotes, 'git', return_value=1)
rr = rebase_remotes
target_branch = 'test'
rr.merge(target_branch)
calls = [
mocker.call('checkout {}'.format(target_branch), ignore_err=True),
mocker.call('merge --no-ff {}'.format('br1'), interrupt_if_err=False),
mocker.call('merge --no-ff {}'.format('br2'), interrupt_if_err=False),
]
rr.git.assert_has_calls(calls, any_order=False)
def test_merge_fail_checkout(rebase_remotes, mocker):
mocker.patch.object(rebase_remotes, 'git', return_value=0)
rr = rebase_remotes
target_branch = 'test'
calls = [
mocker.call('checkout {}'.format(target_branch), ignore_err=True),
]
with pytest.raises(SystemExit):
rr.merge(target_branch)
rr.git.assert_has_calls(calls, any_order=False)
def test_merge_not_result(rebase_remotes, mocker):
mocker.patch.object(rebase_remotes, 'git', return_value=1)
rr = rebase_remotes
result = rr.merge('test')
assert not result
|
from ..builder import HEADS
from .posenc_cascade_roi_head import PositionalEncodingCascadeRoIHead
from mmdet.core import bbox2roilist
import torch
@HEADS.register_module()
class SpatialRelationCascadeRoIHead(PositionalEncodingCascadeRoIHead):
def _bbox_forward(self, stage, x, rois, n_imgs=1):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
x = self.add_positional_encoding(x[:bbox_roi_extractor.num_inputs])
bbox_feats, bbox_spatial_feats = bbox_roi_extractor(x, rois)
cls_score, bbox_pred = bbox_head(bbox_feats, bbox_spatial_feats, n_imgs=n_imgs)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg):
"""Run forward function and calculate loss for box head in training."""
roilist = bbox2roilist([res.bboxes for res in sampling_results])
if roilist[0].size(0) != roilist[-1].size(0):
bbox_results = self._bbox_forward(stage, x, roilist)
rois = torch.cat(roilist, dim=0)
else:
rois = torch.cat(roilist, dim=0)
bbox_results = self._bbox_forward(stage, x, rois, n_imgs=len(roilist))
bbox_targets = self.bbox_head[stage].get_targets(
sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
return bbox_results
|
# -*- coding: utf-8 -*-
"""
These functions actually aren't really used in the code for now.'
"""
import numpy as np
import scipy
from scipy.stats import truncnorm
from scipy.interpolate import NearestNDInterpolator
from scipy.interpolate.ndgriddata import _ndim_coords_from_arrays
def ltruncnorm(loc, scale, size, random_state=None):
"""
Truncated normal random numbers, cut off at locations less than 0.
Parameters
-----------
loc : float
Center coordinate of gaussian distribution
scale : float
Std deviation scale
size : int
Number of random numbers to generate
random_state : None or numpy.random.RandomState
Random number seeding object, or None.
Returns
---------
out : array shaped (size)
Output samples
"""
if scale == 0:
return np.ones(size) * loc
xmin = -loc / scale
t = truncnorm(xmin, 1e6)
s = t.rvs(size=size, random_state=random_state)
s = s * scale + loc
return s
class NearestManhattanInterpolator(NearestNDInterpolator):
"""
NearestManhattanInterpolator(x, y)
Nearest-neighbour interpolation in N dimensions using Manhatten
p=1 norm.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
"""
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi, p=1)
return self.values[i]
def vectorized_choice(p, n, items=None):
"""
Randomly choose integers
Parameters
------------
p : array
Weights on choices
n : int
Number of choices
items : array
Choices
"""
s = p.cumsum(axis=1)
r = np.random.rand(p.shape[0], n, 1)
q = np.expand_dims(s, 1) >= r
k = q.argmax(axis=-1)
if items is not None:
k = np.asarray(items)[k]
return k
p = np.ones((20, 5))
#p[:, 1] = 0
p = p / np.sum(p, axis=1)[:, None]
n = 4
out = vectorized_choice(p, n)
|
from pokermodules.convenience_hole import add_margins, range_plot, top_hands_pct
from sys import argv
fc2 = 1326
p = float(argv[1])
print "Top", p, 'pct of hands =', fc2 * (p / 100.0), 'hands'
thp = top_hands_pct(p)
t = {'pairs':0, 'suited':0, 'unsuited':0}
mul = {'pairs':6, 'suited':4, 'unsuited':12}
for h in thp:
if h[0] == h[1]:
t['pairs'] += 1
elif 's' in h:
t['suited'] += 1
else:
t['unsuited'] += 1
running_tot = 0
for k in 'pairs suited unsuited'.split():
print t[k], k, '*', mul[k], '=', t[k] * mul[k]
running_tot += t[k] * mul[k]
print "Actual number", running_tot, '=', round(running_tot / float(fc2), 4) * 100
print add_margins(range_plot(thp))
|
from pypy.lang.smalltalk import shadow, constants
from pypy.lang.smalltalk import constants
def bootstrap_class(instsize, w_superclass=None, w_metaclass=None,
name='?', format=shadow.POINTERS, varsized=False):
from pypy.lang.smalltalk import model
w_class = model.W_PointersObject(w_metaclass, 0)
# a dummy placeholder for testing
s = shadow.ClassShadow(w_class)
s.methoddict = {}
if w_superclass is not None:
s.s_superclass = w_superclass.as_class_get_shadow()
s.name = name
s.instance_size = instsize
s.instance_kind = format
s.instance_varsized = varsized or format != shadow.POINTERS
s.invalid = False
w_class._shadow = s
return w_class
# ___________________________________________________________________________
# Core Bootstrapping Objects
classtable = {}
def create_classtable():
def define_core_cls(name, w_superclass, w_metaclass):
assert name.startswith('w_')
shadow = bootstrap_class(instsize=0, # XXX
w_superclass=w_superclass,
w_metaclass=w_metaclass,
name=name[2:])
classtable[name] = shadow
return shadow
# A complete minimal setup (including Behavior) would look like this
#
# class: superclass: metaclass:
# ------------------- ------------------- -------------------
# Object *nil Object class
# Behavior Object Behavior class
# ClassDescription Behavior ClassDescription class
# Class ClassDescription Class class
# Metaclass ClassDescription Metaclass class
# Object class *Class *Metaclass
# Behavior class Object class *Metaclass
# ClassDescription cl Behavior class *Metaclass
# Class class ClassDescription cl *Metaclass
# Metaclass class ClassDescription cl *Metaclass
# Class Name Super class name
cls_nm_tbl = [
["w_Object", "w_ProtoObject"], # there is not ProtoObject in mini.image
["w_Behavior", "w_Object"],
["w_ClassDescription", "w_Behavior"],
["w_Class", "w_ClassDescription"],
["w_Metaclass", "w_ClassDescription"],
]
define_core_cls("w_ProtoObjectClass", None, None)
w_ProtoObjectClass = classtable["w_ProtoObjectClass"]
define_core_cls("w_ProtoObject", None, w_ProtoObjectClass)
for (cls_nm, super_cls_nm) in cls_nm_tbl:
meta_nm = cls_nm + "Class"
meta_super_nm = super_cls_nm + "Class"
w_metacls = define_core_cls(meta_nm, classtable[meta_super_nm], None)
define_core_cls(cls_nm, classtable[super_cls_nm], w_metacls)
w_Class = classtable["w_Class"]
w_Metaclass = classtable["w_Metaclass"]
w_ProtoObjectClass.as_class_get_shadow().s_superclass = \
w_Class.as_class_get_shadow()
# at this point, all classes that still lack a w_class are themselves
# metaclasses
for nm, w_cls_obj in classtable.items():
if w_cls_obj.w_class is None:
w_cls_obj.w_class = w_Metaclass
create_classtable()
def copy_in_globals_classes_known_to_the_vm():
for name in constants.classes_in_special_object_table:
name = 'w_' + name
globals()[name] = classtable[name]
# ___________________________________________________________________________
# Other classes
def define_cls(cls_nm, supercls_nm, instvarsize=0, format=shadow.POINTERS,
varsized=False):
assert cls_nm.startswith("w_")
meta_nm = cls_nm + "Class"
meta_super_nm = supercls_nm + "Class"
w_Metaclass = classtable["w_Metaclass"]
w_meta_cls = classtable[meta_nm] = \
bootstrap_class(0, # XXX
classtable[meta_super_nm],
w_Metaclass,
name=meta_nm[2:])
w_cls = classtable[cls_nm] = \
bootstrap_class(instvarsize,
classtable[supercls_nm],
w_meta_cls,
format=format,
varsized=varsized,
name=cls_nm[2:])
define_cls("w_Magnitude", "w_Object")
define_cls("w_Character", "w_Magnitude", instvarsize=1)
define_cls("w_Number", "w_Magnitude")
define_cls("w_Integer", "w_Number")
define_cls("w_SmallInteger", "w_Integer")
define_cls("w_Float", "w_Number", format=shadow.BYTES)
define_cls("w_Collection", "w_Object")
define_cls("w_SequencableCollection", "w_Collection")
define_cls("w_ArrayedCollection", "w_SequencableCollection")
define_cls("w_Array", "w_ArrayedCollection", varsized=True)
define_cls("w_String", "w_ArrayedCollection", format=shadow.BYTES)
define_cls("w_UndefinedObject", "w_Object")
define_cls("w_Boolean", "w_Object")
define_cls("w_True", "w_Boolean")
define_cls("w_False", "w_Boolean")
define_cls("w_ByteArray", "w_ArrayedCollection", format=shadow.BYTES)
define_cls("w_MethodDict", "w_Object", instvarsize=2, varsized=True)
define_cls("w_CompiledMethod", "w_ByteArray", format=shadow.COMPILED_METHOD)
define_cls("w_MethodContext", "w_Object")
define_cls("w_ContextPart", "w_Object")
define_cls("w_MethodContext", "w_ContextPart")
define_cls("w_BlockContext", "w_ContextPart",
instvarsize=constants.BLKCTX_TEMP_FRAME_START)
copy_in_globals_classes_known_to_the_vm()
|
import requests
import pandas as pd
import json
csv_csi = pd.read_csv('1_CSI.dat', skiprows=4,
names=['TIMESTAMP', 'RECORD', 'Temp_Enc', 'Batt_Volt', 'RH_Enc'])
csv_fws = pd.read_csv('1_FWS.dat', skiprows=4,
names=['TIMESTAMP', 'RECORD', 'AirTemp', 'Humidity', 'Baro', 'Lat_deg', 'Lat_min', 'Long_deg',
'Long_min'])
csv_sonde = pd.read_csv('1_SONDE1.dat', skiprows=4,
names=['TIMESTAMP', 'RECORD', 'Temp_C', 'SpCond_mS', 'Sal', 'TDS_ppt', 'pH', 'ORP_mV',
'DO_percent', 'DO_ppm', 'Turb_NTU', 'Chl_ppb', 'PC_ppb'])
csi = pd.DataFrame(csv_csi)
csijson = csi.to_json(orient='records', force_ascii=False)
fws = pd.DataFrame(csv_fws)
sonde = pd.DataFrame(csv_sonde)
allconcat = pd.concat([csi,fws,sonde],axis=1)
csvcsifws = pd.merge(csi, fws)
csvall = pd.merge(csvcsifws,sonde)
sizelen = [len(fws), len(csi), len(sonde)]
sizelen.sort(reverse=True)
for index, row in csi.iterrows():
print(index, row['TIMESTAMP'], row['RECORD'])
print("csi %s" % len(csi))
print("fws %s" % len(fws))
print("sonde %s" % len(sonde))
# for index, row in csi.iterrows():
# print(index)
# print(row)
|
from collections import defaultdict
from sys import argv
import pandas as pd
import re
import string
import numpy as np
from sklearn.cluster import DBSCAN
from collections import Counter
script, strain_name = argv
table1_df = pd.read_csv('%s_table1.csv' % strain_name, sep='\t')
table1_df['product'].fillna('None', inplace=True)
#This first portion will create the distance matrix
def score_match(table, index_gene1, index_gene2):
score = 0
gene1_category = table1_df.category.loc[index_gene1]
gene2_category = table1_df.category.loc[index_gene2]
if gene1_category == 'hypothetical' or gene2_category == 'hypothetical':
if gene1_category == gene2_category:
score = score + 1
elif gene1_category != gene2_category:
score = score + 2
else:
if gene1_category == gene2_category:
score = score + 5
gene1_best_BGC = table1_df.best_hit_BGC.loc[index_gene1]
gene2_best_BGC = table1_df.best_hit_BGC.loc[index_gene2]
if gene1_best_BGC != 'None' and gene2_best_BGC != 'None':
if gene1_best_BGC == gene2_best_BGC:
score = score + 2
gene1_best_hit_pos = re.search(r'^\D*([0-9]*)',table1_df.best_hit_gene_loc.loc[index_gene1])
gene2_best_hit_pos = re.search(r'^\D*([0-9]*)',table1_df.best_hit_gene_loc.loc[index_gene2])
dif_best_hit_pos = abs(abs((int(gene2_best_hit_pos.group(1)) - int(gene1_best_hit_pos.group(1)))) - abs((index_gene2-index_gene1)))
if dif_best_hit_pos == 0:
score = score + 3
elif dif_best_hit_pos == 1:
score = score + 2
elif dif_best_hit_pos == 2:
score = score + 1
else:
score = score + 1
return score
for index,row in table1_df.iterrows():
scores = []
for gene in range(0,len(table1_df)):
scores.append(score_match(table1_df,gene,index))
if index == 0:
A = np.vstack([scores])
else:
A = np.vstack([A,scores])
#This second portion will run dbscan to create a subclusters possibilities
def repeated(db_arrays,db):
for i in range(0,len(db_arrays)-1):
if np.array_equal(db_arrays[i],db) == False:
continue
else:
return True
break
def parse_db(db):
D = defaultdict(list)
for i,item in enumerate(db):
D[item].append(i)
D = {k:v for k,v in D.items() if len(v)>1}
return D
def find_category(categories,col5):
if 'biosynthetic' in categories:
col5.append('biosynthetic')
else:
if len(categories) > 1:
category = re.search(r'^\[\(\'(\S*)\'',str(Counter(categories).most_common(1))).group(1)
col5.append('%s'%category)
else:
col5.append('%s'%categories[0])
count = 0
for itn in range(1,len(A)):
db = DBSCAN(eps=itn, min_samples=2).fit_predict(A)
if itn == 1:
db_arrays = np.vstack([db])
else:
db_arrays = np.vstack([db_arrays,db])
if repeated(db_arrays,db) == True:
continue
else:
subcluster_dict = parse_db(db)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
for key, value in subcluster_dict.iteritems():
col1.append(strain_name)
col2.append(string.ascii_uppercase[subcluster_dict.keys().index(key)])
col3.append(len(value))
categories = []
genes = []
for item in value:
categories.append(table1_df.category.loc[item])
genes.append(table1_df.locus_tag.loc[item])
genes = ','.join(genes)
col4.append(genes)
find_category(categories,col5)
frames = {'BGC':col1, 'subcluster':col2, 'CDSs':col3, 'loci':col4, 'category':col5}
count = count + 1
table2_df = pd.DataFrame(frames, index=None)
table2_df.to_csv('%s_table2_%d.csv' % (strain_name,count), sep='\t', index=False)
|
import sys
import os
import argparse
import torch
import logging
import time
from tqdm import tqdm
from image import *
from Models.lightUnetPlusPlus import lightUnetPlusPlus
def predict(model,
threshold,
device,
dataset,
output_paths,
color):
with tqdm(desc=f'Prediction', unit=' img') as progress_bar:
for i, (image, _) in enumerate(dataset):
image = image[0, ...]
#ground_truth = ground_truth[0, ...]
image = image.to(device)
#ground_truth = ground_truth.to(device)
with torch.no_grad():
mask_predicted = model(image)
placeholder_path(output_paths[i])
save_predicted_mask(mask_predicted, device, color=color, filename=(output_paths[i]+"/predicted.png"), threshold=threshold)
progress_bar.update()
if __name__ == '__main__':
t_start = time.time()
current_path = sys.argv[0]
current_path = current_path.replace("predict.py", "")
# Hyperparameters
batch_size = 1
num_classes = 2
n_channels = 6
# Arg parse
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i",
help="path to the input directory, containing instance directories, each instance directory should contain before.png and after.png 650x650 images")
parser.add_argument("--output", "-o",
help="path to the output directory, where the change masks will be saved, can be the same as the input directory")
parser.add_argument("--threshold", "-t", type=float,
help="a value between 0 and 1, to classify each pixel, if not given the mask pixels will have continuous values between the two classes")
parser.add_argument("--color", "-c",
help="background color of the generated masks, can be 'red', 'blue' or 'black'")
args = parser.parse_args()
# Setup of log and device
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
device = torch.device('cpu' if not torch.cuda.is_available() else 'cuda')
logging.info(f'Using {device}')
instance_names = [i for i in os.walk(args.input)][0][1]
dataset, output_paths = load_dataset_predict(args.input, args.output, instance_names, batch_size)
logging.info(f'Data loaded : {len(output_paths)} instances found')
# Network creation, uncomment the one you want to use
# model = BasicUnet(n_channels= n_channels, n_classes=num_classes)
# model = modularUnet(n_channels=n_channels, n_classes=num_classes, depth=2)
# model = unetPlusPlus(n_channels=n_channels, n_classes=num_classes)
model = lightUnetPlusPlus(n_channels=n_channels, n_classes=num_classes)
model.to(device)
model.load_state_dict(torch.load('Weights/last.pth',map_location=torch.device(device)))
model.eval()
logging.info(f'Model loaded\n')
try:
predict(model=model,
threshold=args.threshold,
device=device,
dataset=dataset,
output_paths=output_paths,
color=args.color)
except KeyboardInterrupt:
logging.info(f'Interrupted by Keyboard')
finally:
t_end = time.time()
print("\nDone in " + str(int((t_end - t_start))) + " sec")
|
# Beam In Vessel Test
import sys
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
sys.path.append(os.path.join('../'))
from lib.BeamDynamicsTools.Boundary import Boundary
from lib.BeamDynamicsTools.Bfield import Bfield, BfieldTF, BfieldVF
from lib.BeamDynamicsTools.Trajectory import Trajectory
from lib.BeamDynamicsTools.Beam import Beam
from lib.BeamDynamicsTools.Ellipse import Ellipse
import pylab as pl
# ------------------------------------------------------------------------------
# Input Sigma Matrix
S1 = np.matrix([
[1.502802755999999818e+01, -1.284540872159999791e+00, 0.000000000000000000e+00,
0.000000000000000000e+00, 0.000000000000000000e+00, 0.000000000000000000e+00],
[-1.284540872159999791e+00, 1.759299135999999919e+01, 0.000000000000000000e+00,
0.000000000000000000e+00, 0.000000000000000000e+00, 0.000000000000000000e+00],
[0.000000000000000000e+00, 0.000000000000000000e+00, 2.312744280999999802e+01, -
1.934440661508000048e+01, 0.000000000000000000e+00, 0.000000000000000000e+00],
[0.000000000000000000e+00, 0.000000000000000000e+00, -1.934440661508000048e+01,
1.971182403999999977e+01, 0.000000000000000000e+00, 0.000000000000000000e+00],
[0.000000000000000000e+00, 0.000000000000000000e+00, 0.000000000000000000e+00,
0.000000000000000000e+00, 4.679517649000000290e+01, 8.473947224080001206e+01],
[0.000000000000000000e+00, 0.000000000000000000e+00, 0.000000000000000000e+00, 0.000000000000000000e+00, 8.473947224080001206e+01, 1.572014440000000093e+02]], float)
# ------------------------------------------------------------------------------
# Define Boundary
# Boundary(Rb,Zb)
#Rb = [ 0.2 , 0.25, 0.4 , 0.6 , 0.8 , 0.8 , 0.6 , 0.4 , 0.25, 0.2 ]
#Zb = [-0.55,-0.6 ,-0.6 ,-0.5 ,-0.2 , 0.2 , 0.5 , 0.6 , 0.6 , 0.55]
Rb = [0.2, 2.0, 2.0, 0.2]
Zb = [2.0, 2.0, -2.0, -2.0]
Vessel = Boundary(Rb, Zb, cw=-1)
Vessel.Plot2D(0)
# class Trajectory(self,Vessel,B,dS=1e-3,r0=[1.5,0.0,0.5],v0=[-1.0,0.0,0.0],a0=[0.0,0.0,0.0],A0=2,T0=0.9,Nmax=10000):
#T = Trajectory(Vessel,B)
ax = Vessel.Figure3D(1)
Vessel.Plot3D(ax)
B0 = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4]
if True:
# Inputs for 4 B-field settings
In = np.array([0.0, 1600.0, 3120, 4450.0])
# Bn = np.array([ 0.0, 0.00969697, 0.01890909, 0.0269697 ])
Bn = np.array([0.0, 0.05818182, 0.11345455, 0.16181818])
if False:
# Inputs for Fine poloidal sweep
In = np.array([
0.0000,
620.00,
1110.0,
1600.0,
1780.0,
2400.0,
3000.0,
3120.0,
3470.0,
4000.0,
4450.0,
4800.0])
Bn = np.array([
0.0000000000,
0.0225454545,
0.0403636364,
0.0581818182,
0.0647272727,
0.0872727273,
0.1090909091,
0.1134545455,
0.1261818182,
0.1454545455,
0.1618181818,
0.1745454545])
if True:
Angle = []
Coordinates = []
Path = '../output/'
# ------------------------------------------------------------------------------
# Calculate Trajectories
for i in range(len(Bn)):
B = BfieldTF(B0=Bn[i])
Bv = BfieldVF(B0=0.00000)
T = Trajectory(Vessel, B, Bv, M0=0.511e6, Method='LeapFrog')
AIMSBeam = Beam(T, S1)
AIMSBeam.Trace()
# ------------------------------------------------------------------------------
# Save beam and target parameters
if False:
np.savetxt(Path + 'Curvature_I_' + str(int(In[i])) + '.txt', T.k)
np.savetxt(Path + 'SCoord_I_' + str(int(In[i])) + '.txt', T.s)
np.savetxt(Path + 'GradB_I_' + str(int(In[i])) + '.txt', T.gradB)
np.savetxt(Path + 'GradBk_I_' + str(int(In[i])) + '.txt', T.gradBn)
np.savetxt(Path + 'GradBn_I_' + str(int(In[i])) + '.txt', T.gradBk)
np.savetxt(Path + 'TargetBasis_I_' +
str(int(In[i])) + '.txt', T.target.TargetBasis)
np.savetxt(Path + 'SigmaBasis_I_' +
str(int(In[i])) + '.txt', T.target.SigmaBasis)
np.savetxt(Path + 'SigmaFinal_I_' +
str(int(In[i])) + '.txt', AIMSBeam.Target.Sigma)
Angle.append([T.target.VAngle, T.target.HAngle])
Coordinates.append([T.target.R, T.target.Z, T.target.Phi])
# ------------------------------------------------------------------------------
# Plot Trajectories
T.Plot3D(ax)
T.target.Plot3D
plt.figure(10)
T.Plot2D()
plt.figure(11)
T.Plot2D('top')
plt.figure(10)
Vessel.Border()
plt.xlim(0.2, 1.4)
plt.ylim(-0.7, 0.5)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('Poloidal Projection')
plt.figure(11)
Vessel.Border('top')
plt.xlim(0, 1.2)
plt.ylim(-0.6, 0.6)
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title('Midplane Projection')
# np.savetxt(Path+'TargetAngle_Vert_Horiz.txt',Angle)
# np.savetxt(Path+'TargetCoordinates.txt',Coordinates)
plt.show()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline code for PythonML activation to GA."""
import datetime
import warnings
from googleapiclient import discovery
from googleapiclient.http import MediaFileUpload
import httplib2
import numpy as np
from oauth2client.service_account import ServiceAccountCredentials
from google.oauth2 import service_account
import params
import pandas as pd
from google.cloud import bigquery
import pkl_predictions
GA_ACCOUNT_ID = params.GA_ACCOUNT_ID
GA_PROPERTY_ID = params.GA_PROPERTY_ID
GA_DATASET_ID = params.GA_DATASET_ID
GA_IMPORT_METHOD = params.GA_IMPORT_METHOD
BQ_READ_QUERY = params.BQ_READ_QUERY
MODEL_FILE_NAME = params.MODEL_FILE_NAME
SERVICE_ACCOUNT_FILE = "svc_key.json"
CLOUD_SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
CSV_LOCATION = "output.csv"
GA_SCOPES = [
"https://www.googleapis.com/auth/analytics.readonly",
"https://www.googleapis.com/auth/analytics.edit",
"https://www.googleapis.com/auth/analytics"
]
GA_API_NAME = "analytics"
GA_API_VERSION = "v3"
warnings.simplefilter(action="ignore", category=FutureWarning)
def authorize_ga_api():
"""Fetches the GA API obj.
Returns:
ga_api: GA API obj.
"""
ga_credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_FILE, GA_SCOPES)
http = ga_credentials.authorize(http=httplib2.Http())
ga_api = discovery.build(GA_API_NAME, GA_API_VERSION, http=http)
return ga_api
def read_from_bq():
"""Reads the prediction query from Bigquery using BQML.
Returns:
dataframe: BQML model results dataframe.
"""
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=["https://www.googleapis.com/auth/cloud-platform"])
bq_client = bigquery.Client(credentials=credentials, project=credentials.project_id)
#bq_client = bigquery.Client()
query_job = bq_client.query(BQ_READ_QUERY)
results = query_job.result()
dataframe = results.to_dataframe()
return dataframe
def prepare_csv(df):
"""Converts results dataframe to CSV.
Args:
df: final results dataframe for GA export.
"""
csv_string = df.to_csv(index=False)
with open(CSV_LOCATION, "w+") as f:
f.write(csv_string)
def write_to_ga_via_di(ga_api):
"""Write the prediction results into GA via data import.
Args:
ga_api: Google Analytics Management API object.
"""
media = MediaFileUpload(CSV_LOCATION,
mimetype="application/octet-stream",
resumable=False)
ga_api.management().uploads().uploadData(
accountId=GA_ACCOUNT_ID,
webPropertyId=GA_PROPERTY_ID,
customDataSourceId=GA_DATASET_ID,
media_body=media).execute()
def delete_ga_prev_uploads(ga_api):
"""Delete previous GA data import files.
Args:
ga_api: Google Analytics Management API object.
"""
response = ga_api.management().uploads().list(
accountId=GA_ACCOUNT_ID,
webPropertyId=GA_PROPERTY_ID,
customDataSourceId=GA_DATASET_ID).execute()
uploads = response["items"]
cids = [upload["id"] for upload in uploads[1:]]
delete_request_body = {"customDataImportUids": cids}
ga_api.management().uploads().deleteUploadData(
accountId=GA_ACCOUNT_ID,
webPropertyId=GA_PROPERTY_ID,
customDataSourceId=GA_DATASET_ID,
body=delete_request_body).execute()
def main():
"""Code to trigger workflow.
"""
try:
dataframe = read_from_bq()
print("Read the input data from BQ.")
processed_df = pkl_predictions.preprocess(dataframe)
print("Pre-processed the input data.")
results_df = pkl_predictions.get_predictions(MODEL_FILE_NAME,
processed_df)
print("Fetched prediction results.")
if GA_IMPORT_METHOD == "di":
print("Uploading to GA via DI.....")
prepare_csv(results_df)
ga_api = authorize_ga_api()
write_to_ga_via_di(ga_api)
delete_ga_prev_uploads(ga_api)
print("Upload via DI complete.")
elif GA_IMPORT_METHOD == "mp":
write_to_ga_via_mp(output_df)
else:
raise Exception("GA Import method not found.")
timestamp_utc = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("{0},SUCCESS".format(timestamp_utc))
except Exception as e:
timestamp_utc = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print("{0},ERROR,{1}".format(timestamp_utc, str(e)))
main()
|
from enum import Flag, auto
class Context(Flag):
"""
Used to express either the data or program execution context.
"""
BATCH = auto()
STREAMING = auto()
ONLINE = auto()
def __str__(self):
return self.name
__repr__ = __str__
|
class Solution:
def solve(self, nums, k):
if not nums:
return 0
ans = 1
cur_sum = 0
maxes = []
j = -1
for i in range(len(nums)):
while maxes and maxes[0][1] < i:
heappop(maxes)
if j < i:
heappush(maxes, [-nums[i],i])
while j < len(nums)-1 and max(-maxes[0][0], nums[j+1])*(j+1-i+1)-(cur_sum+nums[j+1]) <= k:
cur_sum += nums[j+1]
heappush(maxes, [-nums[j+1],j+1])
j += 1
ans = max(ans, j-i+1)
cur_sum -= nums[i]
return ans
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
class Debugger(object):
def __init__(self, down_ratio=4):
self.imgs = {}
colors = [(color_list[_]).astype(np.uint8) \
for _ in range(len(color_list))]
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
self.dim_scale = 1
self.names = class_name
self.down_ratio=down_ratio
def add_img(self, img, img_id='default', revert_color=False):
if revert_color:
img = 255 - img
self.imgs[img_id] = img.copy()
def add_blend_img(self, back, fore, img_id='blend', trans=0.7):
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
self.imgs[img_id] = (back * (1. - trans) + fore * trans)
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
def gen_colormap(self, img, output_res=None):
img = img.copy()
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True, img_id='default'):
bbox = np.array(bbox, dtype=np.int32)
cat = int(cat)
c = self.colors[cat][0][0].tolist()
txt = '{}{:.3f}'.format(self.names[cat], conf)
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]), c, 2)
if show_txt:
cv2.rectangle(self.imgs[img_id],
(bbox[0], bbox[1] - cat_size[1] - 2),
(bbox[0] + cat_size[0], bbox[1] - 2), c, -1)
cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - 2),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
def show_all_imgs(self):
for i, v in self.imgs.items():
cv2.imshow('helmet detector', v)
if cv2.waitKey(1) == 27:
import sys
sys.exit(0)
class_name = [
'head','helmet',
]
color_list = np.array(
[
0.667, 0.000, 1.000,
0.929, 0.694, 0.125,
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
|
from hypernets.reader.spectrum import Spectrum
class Spectra(object):
def __init__(self, filename, line=None, plt=None):
self.index = 0
self.offset = 0
self.line = line
self.plt = plt
with open(filename, 'rb') as fd:
self.data = fd.read()
self.update()
def next_spectrum(self, event):
# TODO : boundary
self.index += 1
self.offset += self.current_spectrum.total
self.update()
def prev_spectrum(self, event):
# TODO : ensure positivity
self.index -= 1
self.offset -= self.current_spectrum.total # different when spec BOTH
self.update()
def update(self):
self.current_spectrum = Spectrum(self.data[self.offset:]) # not optim
if self.line is not None and self.plt is not None:
self.line.set_xdata(range(len(self.current_spectrum.counts)))
self.line.set_ydata(self.current_spectrum.counts)
self.plt.draw()
|
# -*- coding: utf-8 -*-
import io
import re
import tempfile
import unittest
from collections import OrderedDict
from datetime import datetime, date
from fractions import Fraction
from os.path import getsize
import six
from mock import patch
import cloudinary.utils
from cloudinary import CL_BLANK
from cloudinary.utils import build_list_of_dicts, json_encode, encode_unicode_url, base64url_encode, \
patch_fetch_format, cloudinary_scaled_url, chain_transformations, generate_transformation_string, build_eager
from test.helper_test import TEST_IMAGE, REMOTE_TEST_IMAGE
from test.test_api import API_TEST_TRANS_SCALE100, API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR, \
API_TEST_TRANS_SEPIA
DEFAULT_ROOT_PATH = 'http://res.cloudinary.com/test123/'
DEFAULT_UPLOAD_PATH = DEFAULT_ROOT_PATH + 'image/upload/'
DEFAULT_FETCH_PATH = DEFAULT_ROOT_PATH + 'image/fetch/'
VIDEO_UPLOAD_PATH = DEFAULT_ROOT_PATH + 'video/upload/'
TEST_ID = 'test'
FETCH_URL = "http://cloudinary.com/images/logo.png"
IMAGE_VERSION = "1234"
IMAGE_VERSION_STR = "v" + IMAGE_VERSION
DEFAULT_VERSION_STR = 'v1'
TEST_FOLDER = 'folder/test'
class TestUtils(unittest.TestCase):
crop_transformation = {'crop': 'crop', 'width': 100}
crop_transformation_str = 'c_crop,w_100'
raw_transformation = "c_fill,e_grayscale,q_auto"
custom_function_wasm = {"function_type": "wasm", "source": "blur.wasm"}
custom_function_wasm_str = "wasm:blur.wasm"
custom_function_remote = {
"function_type": "remote",
"source": "https://df34ra4a.execute-api.us-west-2.amazonaws.com/default/cloudinaryFn"}
custom_function_remote_str = \
"remote:aHR0cHM6Ly9kZjM0cmE0YS5leGVjdXRlLWFwaS51cy13ZXN0LTIuYW1hem9uYXdzLmNvbS9kZWZhdWx0L2Nsb3VkaW5hcnlGbg=="
def setUp(self):
cloudinary.config(cloud_name="test123",
cname=None, # for these tests without actual upload, we ignore cname
api_key="a", api_secret="b",
secure_distribution=None,
private_cdn=False)
def __test_cloudinary_url(self, public_id=TEST_ID, options=None, expected_url=None, expected_options=None):
if expected_options is None:
expected_options = {}
if options is None:
options = {}
url, options = cloudinary.utils.cloudinary_url(public_id, **options)
self.assertEqual(expected_url, url)
self.assertEqual(expected_options, options)
def test_cloud_name(self):
"""should use cloud_name from config"""
self.__test_cloudinary_url(options={}, expected_url=DEFAULT_UPLOAD_PATH + "test")
def test_cloud_name_options(self):
"""should allow overriding cloud_name in options"""
self.__test_cloudinary_url(options={"cloud_name": "test321"},
expected_url="http://res.cloudinary.com/test321/image/upload/test")
def test_secure_distribution(self):
"""should use default secure distribution if secure=True"""
self.__test_cloudinary_url(options={"secure": True},
expected_url="https://res.cloudinary.com/test123/image/upload/test")
def test_secure_distribution_overwrite(self):
"""should allow overwriting secure distribution if secure=True"""
self.__test_cloudinary_url(options={"secure": True, "secure_distribution": "something.else.com"},
expected_url="https://something.else.com/test123/image/upload/test")
def test_secure_distibution(self):
"""should take secure distribution from config if secure=True"""
cloudinary.config().secure_distribution = "config.secure.distribution.com"
self.__test_cloudinary_url(options={"secure": True},
expected_url="https://config.secure.distribution.com/test123/image/upload/test")
def test_secure_akamai(self):
"""should default to akamai if secure is given with private_cdn and no secure_distribution"""
self.__test_cloudinary_url(options={"secure": True, "private_cdn": True},
expected_url="https://test123-res.cloudinary.com/image/upload/test")
def test_secure_non_akamai(self):
"""should not add cloud_name if private_cdn and secure non akamai secure_distribution"""
self.__test_cloudinary_url(
options={"secure": True, "private_cdn": True, "secure_distribution": "something.cloudfront.net"},
expected_url="https://something.cloudfront.net/image/upload/test")
def test_http_private_cdn(self):
"""should not add cloud_name if private_cdn and not secure"""
self.__test_cloudinary_url(options={"private_cdn": True},
expected_url="http://test123-res.cloudinary.com/image/upload/test")
def test_format(self):
"""should use format from options"""
self.__test_cloudinary_url(options={"format": "jpg"}, expected_url=DEFAULT_UPLOAD_PATH + "test.jpg")
def test_crop(self):
"""should always use width and height from options"""
self.__test_cloudinary_url(
options={"width": 100, "height": 100},
expected_url=DEFAULT_UPLOAD_PATH + "h_100,w_100/test",
expected_options={"width": 100, "height": 100})
self.__test_cloudinary_url(
options={"width": 100, "height": 100, "crop": "crop"},
expected_url=DEFAULT_UPLOAD_PATH + "c_crop,h_100,w_100/test",
expected_options={"width": 100, "height": 100})
def test_html_width_height_on_crop_fit_limit(self):
"""should not pass width and height to html in case of fit or limit crop"""
self.__test_cloudinary_url(options={"width": 100, "height": 100, "crop": "limit"},
expected_url=DEFAULT_UPLOAD_PATH + "c_limit,h_100,w_100/test")
self.__test_cloudinary_url(options={"width": 100, "height": 100, "crop": "fit"},
expected_url=DEFAULT_UPLOAD_PATH + "c_fit,h_100,w_100/test")
def test_html_width_height_on_angle(self):
"""should not pass width and height to html in case angle was used"""
self.__test_cloudinary_url(options={"width": 100, "height": 100, "crop": "scale", "angle": "auto"},
expected_url=DEFAULT_UPLOAD_PATH + "a_auto,c_scale,h_100,w_100/test")
def test_various_options(self):
"""should use x, y, radius, prefix, gravity and quality from options"""
self.__test_cloudinary_url(
options={"x": 1, "y": 2, "opacity": 20, "radius": 3, "gravity": "center", "quality": 0.4, "prefix": "a"},
expected_url=DEFAULT_UPLOAD_PATH + "g_center,o_20,p_a,q_0.4,r_3,x_1,y_2/test")
self.__test_cloudinary_url(options={"gravity": "auto", "width": 0.5, "crop": "crop"},
expected_url=DEFAULT_UPLOAD_PATH + "c_crop,g_auto,w_0.5/test")
def test_radius(self):
cases = (
({"radius": 10}, "r_10"),
({"radius": "10"}, "r_10"),
({"radius": "$v", "variables": [("$v", 10)]}, "$v_10,r_$v"),
({"radius": [10, 20]}, "r_10:20"),
({"radius": "10:20"}, "r_10:20"),
({"radius": "10:$v", "variables": [("$v", 20)]}, "$v_20,r_10:$v"),
({"radius": [10, 20, 30]}, "r_10:20:30"),
({"radius": "10:20:30"}, "r_10:20:30"),
({"radius": "10:$v:30", "variables": [("$v", 20)]}, "$v_20,r_10:$v:30"),
({"radius": [10, 20, 30, 40]}, "r_10:20:30:40"),
({"radius": "10:20:30:40"}, "r_10:20:30:40"),
({"radius": "10:$v:30:40", "variables": [("$v", 20)]}, "$v_20,r_10:$v:30:40"),
)
for options, expected_part_url in cases:
self.__test_cloudinary_url(options=options, expected_url=DEFAULT_UPLOAD_PATH + expected_part_url + "/test")
wrong_options = (
{"radius": []},
{"radius": ()},
{"radius": [10, 20, 30, 40, 50]},
)
for options in wrong_options:
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", **options)
def test_should_support_auto_width(self):
"""should support auto width"""
self.__test_cloudinary_url(options={"width": "auto:20", "crop": 'fill'},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,w_auto:20/test",
expected_options={'responsive': True})
self.__test_cloudinary_url(options={"width": "auto:20:350", "crop": 'fill'},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,w_auto:20:350/test",
expected_options={'responsive': True})
self.__test_cloudinary_url(options={"width": "auto:breakpoints", "crop": 'fill'},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,w_auto:breakpoints/test",
expected_options={'responsive': True})
self.__test_cloudinary_url(options={"width": "auto:breakpoints_100_1900_20_15", "crop": 'fill'},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,w_auto:breakpoints_100_1900_20_15/test",
expected_options={'responsive': True})
self.__test_cloudinary_url(options={"width": "auto:breakpoints:json", "crop": 'fill'},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,w_auto:breakpoints:json/test",
expected_options={'responsive': True})
def test_original_width_and_height(self):
"""should support original width and height"""
self.__test_cloudinary_url(options={"crop": "crop", "width": "ow", "height": "oh"},
expected_url=DEFAULT_UPLOAD_PATH + "c_crop,h_oh,w_ow/test")
def test_support_a_percent_value(self):
"""quality support a percent value"""
self.__test_cloudinary_url(
options={"x": 1, "y": 2, "radius": 3, "gravity": "center", "quality": 80, "prefix": "a"},
expected_url=DEFAULT_UPLOAD_PATH + "g_center,p_a,q_80,r_3,x_1,y_2/test")
self.__test_cloudinary_url(
options={"x": 1, "y": 2, "radius": 3, "gravity": "center", "quality": "80:444", "prefix": "a"},
expected_url=DEFAULT_UPLOAD_PATH + "g_center,p_a,q_80:444,r_3,x_1,y_2/test")
def test_should_support_auto_value(self):
"""quality should support auto value"""
self.__test_cloudinary_url(
options={"x": 1, "y": 2, "radius": 3, "gravity": "center", "quality": "auto", "prefix": "a"},
expected_url=DEFAULT_UPLOAD_PATH + "g_center,p_a,q_auto,r_3,x_1,y_2/test")
self.__test_cloudinary_url(
options={"x": 1, "y": 2, "radius": 3, "gravity": "center", "quality": "auto:good", "prefix": "a"},
expected_url=DEFAULT_UPLOAD_PATH + "g_center,p_a,q_auto:good,r_3,x_1,y_2/test")
self.__test_cloudinary_url(
options={"width": 100, "height": 100, "crop": 'crop', "gravity": "auto:ocr_text"},
expected_url=DEFAULT_UPLOAD_PATH + "c_crop,g_auto:ocr_text,h_100,w_100/test",
expected_options={"width": 100, "height": 100})
self.__test_cloudinary_url(
options={"width": 100, "height": 100, "crop": 'crop', "gravity": "ocr_text"},
expected_url=DEFAULT_UPLOAD_PATH + "c_crop,g_ocr_text,h_100,w_100/test",
expected_options={"width": 100, "height": 100})
self.__test_cloudinary_url(
options={"width": 100, "height": 100, "crop": 'crop', "gravity": "ocr_text:adv_ocr"},
expected_url=DEFAULT_UPLOAD_PATH + "c_crop,g_ocr_text:adv_ocr,h_100,w_100/test",
expected_options={"width": 100, "height": 100})
def test_transformation_simple(self):
"""should support named transformation"""
self.__test_cloudinary_url(options={"transformation": "blip"}, expected_url=DEFAULT_UPLOAD_PATH + "t_blip/test")
def test_transformation_array(self):
"""should support array of named transformations"""
self.__test_cloudinary_url(options={"transformation": ["blip", "blop"]},
expected_url=DEFAULT_UPLOAD_PATH + "t_blip.blop/test")
def test_base_transformations(self):
"""should support base transformation"""
self.__test_cloudinary_url(
options={"transformation": {"x": 100, "y": 100, "crop": "fill"}, "crop": "crop", "width": 100},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,x_100,y_100/c_crop,w_100/test",
expected_options={"width": 100})
def test_base_transformation_array(self):
"""should support array of base transformations"""
options = {"transformation": [{"x": 100, "y": 100, "width": 200, "crop": "fill"}, {"radius": 10}],
"crop": "crop", "width": 100}
result, options = cloudinary.utils.cloudinary_url("test", **options)
self.assertEqual(options, {"width": 100})
self.assertEqual(result, DEFAULT_UPLOAD_PATH + "c_fill,w_200,x_100,y_100/r_10/c_crop,w_100/test")
def test_no_empty_transformation(self):
"""should not include empty transformations"""
self.__test_cloudinary_url(options={"transformation": [{}, {"x": 100, "y": 100, "crop": "fill"}, {}]},
expected_url=DEFAULT_UPLOAD_PATH + "c_fill,x_100,y_100/test")
def test_raw_transformation(self):
"""should include raw_transformation"""
self.__test_cloudinary_url(options={"transformation": {"width": 100, "raw_transformation": "g_north_west"}},
expected_url=DEFAULT_UPLOAD_PATH + "w_100,g_north_west/test")
self.__test_cloudinary_url(options={"transformation": {"raw_transformation": "g_north_west"}},
expected_url=DEFAULT_UPLOAD_PATH + "g_north_west/test")
self.__test_cloudinary_url(options={"transformation": {"width": 100, "raw_transformation": ""}},
expected_url=DEFAULT_UPLOAD_PATH + "w_100/test")
def test_chain_transformations(self):
"""Should support chaining transformations at the end"""
options = {"effect": "art:incognito", "format": "png"}
chained_transformations = [
{"x": 100, "y": 100, "width": 200, "crop": "fill"},
{"radius": 10},
{"raw_transformation": self.raw_transformation}
]
actual_options = chain_transformations(options, chained_transformations)
actual_transformation_str = generate_transformation_string(**actual_options)[0]
self.assertEqual("e_art:incognito/c_fill,w_200,x_100,y_100/r_10/" + self.raw_transformation,
actual_transformation_str)
# Should support chaining transformations, when default options have no transformations
actual_options = chain_transformations({}, chained_transformations)
actual_transformation_str = generate_transformation_string(**actual_options)[0]
self.assertEqual("c_fill,w_200,x_100,y_100/r_10/" + self.raw_transformation,
actual_transformation_str)
# Should handle empty list of chained transformations
actual_options = chain_transformations(options, [])
actual_transformation_str = generate_transformation_string(**actual_options)[0]
self.assertEqual("e_art:incognito", actual_transformation_str)
# Should handle empty options and empty list of chained transformations
actual_options = chain_transformations({}, [])
actual_transformation_str = generate_transformation_string(**actual_options)[0]
self.assertEqual("", actual_transformation_str)
# Should remove transformation options from resulting options
actual_options = chain_transformations(dict(width=200, height=100), chained_transformations)
self.assertNotIn("width", actual_options)
self.assertNotIn("height", actual_options)
actual_transformation_str = generate_transformation_string(**actual_options)[0]
self.assertEqual("h_100,w_200/c_fill,w_200,x_100,y_100/r_10/c_fill,e_grayscale,q_auto",
actual_transformation_str)
# Should chain transformations with a fetch option
options["type"] = "fetch"
patch_fetch_format(options)
actual_options = chain_transformations(options, chained_transformations)
# format should be removed when we use fetch
self.assertNotIn("format", actual_options)
actual_transformation_str = generate_transformation_string(**actual_options)[0]
# Should use url format as a fetch_format
self.assertEqual("e_art:incognito,f_png/c_fill,w_200,x_100,y_100/r_10/" + self.raw_transformation,
actual_transformation_str)
options["fetch_format"] = "gif"
actual_options = chain_transformations(options, chained_transformations)
actual_transformation_str = generate_transformation_string(**actual_options)[0]
# Should use fetch_format
self.assertEqual("e_art:incognito,f_gif/c_fill,w_200,x_100,y_100/r_10/" + self.raw_transformation,
actual_transformation_str)
def test_size(self):
"""should support size"""
options = {"size": "10x10", "crop": "crop"}
result, options = cloudinary.utils.cloudinary_url("test", **options)
self.assertEqual(options, {"width": "10", "height": "10"})
self.assertEqual(result, DEFAULT_UPLOAD_PATH + "c_crop,h_10,w_10/test")
def test_type(self):
"""should use type from options"""
self.__test_cloudinary_url(options={"type": "facebook"}, expected_url=DEFAULT_ROOT_PATH + "image/facebook/test")
def test_resource_type(self):
"""should use resource_type from options"""
self.__test_cloudinary_url(options={"resource_type": "raw"}, expected_url=DEFAULT_ROOT_PATH + "raw/upload/test")
def test_ignore_http(self):
"""should ignore http links only if type is not given or is asset"""
options = {}
result, options = cloudinary.utils.cloudinary_url("http://test", **options)
self.assertEqual(options, {})
self.assertEqual(result, "http://test")
options = {"type": "fetch"}
result, options = cloudinary.utils.cloudinary_url("http://test", **options)
self.assertEqual(options, {})
self.assertEqual(result, DEFAULT_ROOT_PATH + "image/fetch/http://test")
def test_fetch(self):
"""should escape fetch urls"""
options = {"type": "fetch"}
result, options = cloudinary.utils.cloudinary_url("http://blah.com/hello?a=b", **options)
self.assertEqual(options, {})
self.assertEqual(result, DEFAULT_ROOT_PATH + "image/fetch/http://blah.com/hello%3Fa%3Db")
def test_http_escape(self):
"""should escape http urls"""
options = {"type": "youtube"}
result, options = cloudinary.utils.cloudinary_url("http://www.youtube.com/watch?v=d9NF2edxy-M", **options)
self.assertEqual(options, {})
self.assertEqual(result, DEFAULT_ROOT_PATH + "image/youtube/http://www.youtube.com/watch%3Fv%3Dd9NF2edxy-M")
def test_cname(self):
"""should support extenal cname"""
self.__test_cloudinary_url(options={"cname": "hello.com"},
expected_url="http://hello.com/test123/image/upload/test")
def test_cname_subdomain(self):
"""should support extenal cname with cdn_subdomain on"""
self.__test_cloudinary_url(options={"cname": "hello.com", "cdn_subdomain": True},
expected_url="http://a2.hello.com/test123/image/upload/test")
def test_background(self):
"""should support background"""
self.__test_cloudinary_url(options={"background": "red"}, expected_url=DEFAULT_UPLOAD_PATH + "b_red/test")
self.__test_cloudinary_url(options={"background": "#112233"},
expected_url=DEFAULT_UPLOAD_PATH + "b_rgb:112233/test")
def test_default_image(self):
"""should support default_image"""
self.__test_cloudinary_url(options={"default_image": "default"},
expected_url=DEFAULT_UPLOAD_PATH + "d_default/test")
def test_angle(self):
"""should support angle"""
self.__test_cloudinary_url(options={"angle": 12}, expected_url=DEFAULT_UPLOAD_PATH + "a_12/test")
def test_overlay(self):
"""should support overlay"""
self.__test_cloudinary_url(options={"overlay": "text:hello"},
expected_url=DEFAULT_UPLOAD_PATH + "l_text:hello/test")
# Should not pass width height to HTML with overlay
self.__test_cloudinary_url(options={"overlay": "text:hello", "height": 100, "width": 100},
expected_url=DEFAULT_UPLOAD_PATH + "h_100,l_text:hello,w_100/test")
self.__test_cloudinary_url(
options={"overlay": {"font_family": "arial", "font_size": 20, "text": "hello"}, "height": 100,
"width": 100}, expected_url=DEFAULT_UPLOAD_PATH + "h_100,l_text:arial_20:hello,w_100/test")
def test_fetch_overlay(self):
"""should support overlay"""
self.__test_cloudinary_url(
options={"overlay": "fetch:" + REMOTE_TEST_IMAGE},
expected_url=(
DEFAULT_UPLOAD_PATH
+ "l_fetch:aHR0cDovL2Nsb3VkaW5hcnkuY29tL2ltYWdlcy9vbGRfbG9nby5wbmc=/"
+ "test"))
self.__test_cloudinary_url(
options={
"overlay": {
"url":
"https://upload.wikimedia.org/wikipedia/commons/2/2b/고창갯벌.jpg"}},
expected_url=(
DEFAULT_UPLOAD_PATH +
"l_fetch:"
"aHR0cHM6Ly91cGxvYWQud2lraW1lZGlhLm9yZy93aWtpcGVkaWEvY29"
"tbW9ucy8yLzJiLyVFQSVCMyVBMCVFQyVCMCVCRCVFQSVCMCVBRiVFQiVCMiU4Qy5qcGc=/"
"test"))
def test_underlay(self):
"""should support underlay"""
self.__test_cloudinary_url(options={"underlay": "text:hello"},
expected_url=DEFAULT_UPLOAD_PATH + "u_text:hello/test")
# Should not pass width height to HTML with underlay
self.__test_cloudinary_url(options={"underlay": "text:hello", "height": 100, "width": 100},
expected_url=DEFAULT_UPLOAD_PATH + "h_100,u_text:hello,w_100/test")
def test_custom_function(self):
# should support custom function from string
options = {"custom_function": self.custom_function_wasm_str}
self.__test_cloudinary_url(
options=options,
expected_url=DEFAULT_UPLOAD_PATH + "fn_" + self.custom_function_wasm_str + "/test"
)
# should support custom function from dictionary
options = {"custom_function": self.custom_function_wasm}
self.__test_cloudinary_url(
options=options,
expected_url=DEFAULT_UPLOAD_PATH + "fn_" + self.custom_function_wasm_str + "/test"
)
# should encode custom function source for remote function
options = {"custom_function": self.custom_function_remote}
self.__test_cloudinary_url(
options=options,
expected_url=DEFAULT_UPLOAD_PATH + "fn_" + self.custom_function_remote_str + "/test"
)
def test_custom_pre_function_wasm_str(self):
# should support custom pre function from string
options = {"custom_pre_function": self.custom_function_wasm_str}
self.__test_cloudinary_url(
options=options,
expected_url=DEFAULT_UPLOAD_PATH + "fn_pre:" + self.custom_function_wasm_str + "/test"
)
def test_custom_pre_function_wasm_dictionary(self):
# should support custom pre function from dictionary
options = {"custom_pre_function": self.custom_function_wasm}
self.__test_cloudinary_url(
options=options,
expected_url=DEFAULT_UPLOAD_PATH + "fn_pre:" + self.custom_function_wasm_str + "/test"
)
def test_custom_pre_function_remote(self):
# should encode custom function source for remote function
options = {"custom_pre_function": self.custom_function_remote}
self.__test_cloudinary_url(
options=options,
expected_url=DEFAULT_UPLOAD_PATH + "fn_pre:" + self.custom_function_remote_str + "/test"
)
def test_fetch_format(self):
"""should support format for fetch urls"""
self.__test_cloudinary_url(
public_id="http://cloudinary.com/images/logo.png",
options={"format": "jpg", "type": "fetch"},
expected_url=DEFAULT_ROOT_PATH + "image/fetch/f_jpg/http://cloudinary.com/images/logo.png"
)
def test_effect(self):
"""should support effect"""
self.__test_cloudinary_url(options={"effect": "sepia"}, expected_url=DEFAULT_UPLOAD_PATH + "e_sepia/test")
def test_effect_with_dict(self):
"""should support effect with dict"""
self.__test_cloudinary_url(options={"effect": {"sepia": -10}},
expected_url=DEFAULT_UPLOAD_PATH + "e_sepia:-10/test")
def test_effect_with_array(self):
"""should support effect with array"""
self.__test_cloudinary_url(options={"effect": ["sepia", 10]},
expected_url=DEFAULT_UPLOAD_PATH + "e_sepia:10/test")
def test_keyframe_interval(self):
"""should support keyframe_interval"""
test_values = (
(10, "ki_10.0"),
(0.05, "ki_0.05"),
(3.45, "ki_3.45"),
(300, "ki_300.0"),
("10", "ki_10"),
)
for value, expected in test_values:
self.__test_cloudinary_url(options={"resource_type": "video", "keyframe_interval": value},
expected_url=VIDEO_UPLOAD_PATH + expected + "/test")
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", keyframe_interval=-1)
def test_streaming_profile(self):
"""should support streaming_profile"""
self.__test_cloudinary_url(options={"streaming_profile": "some-profile"},
expected_url=DEFAULT_UPLOAD_PATH + "sp_some-profile/test")
def test_density(self):
"""should support density"""
self.__test_cloudinary_url(options={"density": 150}, expected_url=DEFAULT_UPLOAD_PATH + "dn_150/test")
def test_page(self):
"""should support page"""
self.__test_cloudinary_url(options={"page": 3}, expected_url=DEFAULT_UPLOAD_PATH + "pg_3/test")
def test_border(self):
"""should support border"""
self.__test_cloudinary_url(options={"border": {"width": 5}},
expected_url=DEFAULT_UPLOAD_PATH + "bo_5px_solid_black/test")
self.__test_cloudinary_url(options={"border": {"width": 5, "color": "#ffaabbdd"}},
expected_url=DEFAULT_UPLOAD_PATH + "bo_5px_solid_rgb:ffaabbdd/test")
self.__test_cloudinary_url(options={"border": "1px_solid_blue"},
expected_url=DEFAULT_UPLOAD_PATH + "bo_1px_solid_blue/test")
def test_flags(self):
"""should support flags"""
self.__test_cloudinary_url(options={"flags": "abc"}, expected_url=DEFAULT_UPLOAD_PATH + "fl_abc/test")
self.__test_cloudinary_url(options={"flags": ["abc", "def"]},
expected_url=DEFAULT_UPLOAD_PATH + "fl_abc.def/test")
def test_dpr(self):
"""should support dpr (device pixel radio)"""
self.__test_cloudinary_url(options={"dpr": "2.0"}, expected_url=DEFAULT_UPLOAD_PATH + "dpr_2.0/test")
def test_folder_version(self):
"""should add version if public_id contains / """
self.__test_cloudinary_url(public_id="folder/test", expected_url=DEFAULT_UPLOAD_PATH + "v1/folder/test")
self.__test_cloudinary_url(public_id="folder/test", options={"version": 123},
expected_url=DEFAULT_UPLOAD_PATH + "v123/folder/test")
self.__test_cloudinary_url(public_id="v1234/test", expected_url=DEFAULT_UPLOAD_PATH + "v1234/test")
def test_force_version(self):
"""Should not set default version v1 to resources stored in folders if force_version is set to False"""
self.__test_cloudinary_url(TEST_FOLDER,
expected_url=DEFAULT_UPLOAD_PATH + DEFAULT_VERSION_STR + "/" + TEST_FOLDER)
self.__test_cloudinary_url(TEST_FOLDER,
options={"force_version": False},
expected_url=DEFAULT_UPLOAD_PATH + TEST_FOLDER)
# Explicitly set version is always passed
self.__test_cloudinary_url(TEST_ID,
options={"force_version": False, "version": IMAGE_VERSION},
expected_url=DEFAULT_UPLOAD_PATH + IMAGE_VERSION_STR + "/" + TEST_ID)
self.__test_cloudinary_url(TEST_FOLDER,
options={"force_version": False, "version": IMAGE_VERSION},
expected_url=DEFAULT_UPLOAD_PATH + IMAGE_VERSION_STR + "/" + TEST_FOLDER)
# Should use force_version from config
cloudinary.config(force_version=False)
self.__test_cloudinary_url(TEST_FOLDER, expected_url=DEFAULT_UPLOAD_PATH + TEST_FOLDER)
# Should override config with options
self.__test_cloudinary_url(TEST_FOLDER,
options={"force_version": True},
expected_url=DEFAULT_UPLOAD_PATH + DEFAULT_VERSION_STR + "/" + TEST_FOLDER)
def test_shorten(self):
self.__test_cloudinary_url(options={"shorten": True}, expected_url=DEFAULT_ROOT_PATH + "iu/test")
self.__test_cloudinary_url(options={"shorten": True, "type": "private"},
expected_url=DEFAULT_ROOT_PATH + "image/private/test")
def test_signed_url(self):
self.__test_cloudinary_url(
public_id="image.jpg",
options={"version": 1234, "transformation": {"crop": "crop", "width": 10, "height": 20}, "sign_url": True},
expected_url=DEFAULT_UPLOAD_PATH + "s--Ai4Znfl3--/c_crop,h_20,w_10/v1234/image.jpg")
self.__test_cloudinary_url(
public_id="image.jpg",
options={"version": 1234, "sign_url": True},
expected_url=DEFAULT_UPLOAD_PATH + "s----SjmNDA--/v1234/image.jpg")
self.__test_cloudinary_url(
public_id="image.jpg",
options={"transformation": {"crop": "crop", "width": 10, "height": 20}, "sign_url": True},
expected_url=DEFAULT_UPLOAD_PATH + "s--Ai4Znfl3--/c_crop,h_20,w_10/image.jpg")
self.__test_cloudinary_url(
public_id="image.jpg",
options={"type": "authenticated", "transformation": {"crop": "crop", "width": 10, "height": 20},
"sign_url": True},
expected_url=DEFAULT_ROOT_PATH + "image/authenticated/s--Ai4Znfl3--/c_crop,h_20,w_10/image.jpg")
self.__test_cloudinary_url(
public_id="http://google.com/path/to/image.png",
options={"version": 1234, "type": "fetch", "sign_url": True},
expected_url=DEFAULT_ROOT_PATH + "image/fetch/s--hH_YcbiS--/v1234/http://google.com/path/to/image.png")
def test_disallow_url_suffix_in_non_upload_types(self):
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", url_suffix="hello", private_cdn=True, type="facebook")
def test_disallow_url_suffix_with_slash_or_dot(self):
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", url_suffix="hello/world", private_cdn=True)
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", url_suffix="hello.world", private_cdn=True)
def test_support_url_suffix_for_private_cdn(self):
self.__test_cloudinary_url(options={"url_suffix": "hello", "private_cdn": True},
expected_url="http://test123-res.cloudinary.com/images/test/hello")
self.__test_cloudinary_url(options={"url_suffix": "hello", "angle": 0, "private_cdn": True},
expected_url="http://test123-res.cloudinary.com/images/a_0/test/hello")
def test_put_format_after_url_suffix(self):
self.__test_cloudinary_url(options={"url_suffix": "hello", "private_cdn": True, "format": "jpg"},
expected_url="http://test123-res.cloudinary.com/images/test/hello.jpg")
def test_not_sign_the_url_suffix(self):
url, options = cloudinary.utils.cloudinary_url("test", format="jpg", sign_url=True)
expected_signature = re.search(r's--[0-9A-Za-z_-]{8}--', url)
self.__test_cloudinary_url(
options={"url_suffix": "hello", "private_cdn": True, "format": "jpg", "sign_url": True},
expected_url="http://test123-res.cloudinary.com/images/" + expected_signature.group(0) + "/test/hello.jpg")
url, options = cloudinary.utils.cloudinary_url("test", format="jpg", angle=0, sign_url=True)
expected_signature = re.search(r's--[0-9A-Za-z_-]{8}--', url)
self.__test_cloudinary_url(
options={"url_suffix": "hello", "private_cdn": True, "format": "jpg", "angle": 0, "sign_url": True},
expected_url="http://test123-res.cloudinary.com/images/" + expected_signature.group(
0) + "/a_0/test/hello.jpg")
def test_support_url_suffix_for_raw_uploads(self):
self.__test_cloudinary_url(options={"url_suffix": "hello", "private_cdn": True, "resource_type": "raw"},
expected_url="http://test123-res.cloudinary.com/files/test/hello")
def test_support_use_root_path_for_shared_cdn(self):
self.__test_cloudinary_url(options={"use_root_path": True, "private_cdn": False},
expected_url=DEFAULT_ROOT_PATH + "test")
self.__test_cloudinary_url(options={"use_root_path": True, "private_cdn": False, "angle": 0},
expected_url=DEFAULT_ROOT_PATH + "a_0/test")
def test_support_use_root_path_for_private_cdn(self):
self.__test_cloudinary_url(options={"use_root_path": True, "private_cdn": True},
expected_url="http://test123-res.cloudinary.com/test")
self.__test_cloudinary_url(options={"use_root_path": True, "private_cdn": True, "angle": 0},
expected_url="http://test123-res.cloudinary.com/a_0/test")
def test_support_use_root_path_together_with_url_suffix_for_private_cdn(self):
self.__test_cloudinary_url(options={"use_root_path": True, "url_suffix": "hello", "private_cdn": True},
expected_url="http://test123-res.cloudinary.com/test/hello")
def test_disallow_use_root_path_if_not_image_upload(self):
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", use_root_path=True, private_cdn=True, type="facebook")
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", use_root_path=True, private_cdn=True, resource_type="raw")
def test_support_cdn_subdomain_with_secure_on_if_using_shared_domain(self):
self.__test_cloudinary_url(options={"secure": True, "cdn_subdomain": True},
expected_url="https://res-2.cloudinary.com/test123/image/upload/test")
def test_support_secure_cdn_subdomain_false_override_with_secure(self):
self.__test_cloudinary_url(options={"secure": True, "cdn_subdomain": True, "secure_cdn_subdomain": False},
expected_url="https://res.cloudinary.com/test123/image/upload/test")
def test_support_secure_cdn_subdomain_true_override_with_secure(self):
self.__test_cloudinary_url(
options={"secure": True, "cdn_subdomain": True, "secure_cdn_subdomain": True, "private_cdn": True},
expected_url="https://test123-res-2.cloudinary.com/image/upload/test")
def test_escape_public_id(self):
""" should escape public_ids """
tests = {
"a b": "a%20b",
"a+b": "a%2Bb",
"a%20b": "a%20b",
"a-b": "a-b",
"a??b": "a%3F%3Fb"
}
for source, target in tests.items():
result, options = cloudinary.utils.cloudinary_url(source)
self.assertEqual(DEFAULT_UPLOAD_PATH + "" + target, result)
def test_escape_public_id_with_non_ascii_characters(self):
self.__test_cloudinary_url(u"ß", expected_url=DEFAULT_UPLOAD_PATH + "%C3%9F")
def test_responsive_width(self):
"""should support responsive width"""
options = {"width": 100, "height": 100, "crop": "crop", "responsive_width": True}
result, options = cloudinary.utils.cloudinary_url("test", **options)
self.assertEqual(options, {"responsive": True})
self.assertEqual(result, DEFAULT_UPLOAD_PATH + "c_crop,h_100,w_100/c_limit,w_auto/test")
cloudinary.config(responsive_width_transformation={"width": "auto", "crop": "pad"})
options = {"width": 100, "height": 100, "crop": "crop", "responsive_width": True}
result, options = cloudinary.utils.cloudinary_url("test", **options)
self.assertEqual(options, {"responsive": True})
self.assertEqual(result, DEFAULT_UPLOAD_PATH + "c_crop,h_100,w_100/c_pad,w_auto/test")
def test_norm_range_value(self):
# should parse integer range values
self.assertEqual(cloudinary.utils.norm_range_value("200"), "200")
# should parse float range values
self.assertEqual(cloudinary.utils.norm_range_value("200.0"), "200.0")
# should parse a percent range value
self.assertEqual(cloudinary.utils.norm_range_value("20p"), "20p")
self.assertEqual(cloudinary.utils.norm_range_value("20P"), "20p")
self.assertEqual(cloudinary.utils.norm_range_value("20%"), "20p")
self.assertEqual(cloudinary.utils.norm_range_value("p"), None)
def test_norm_auto_range_value(self):
"""Should parse both auto and values supported by norm_range_value"""
self.assertEqual("auto", cloudinary.utils.norm_auto_range_value("auto"))
self.assertEqual(None, cloudinary.utils.norm_auto_range_value("non_auto"))
# Should handle regular norm_range_value values
self.assertEqual(cloudinary.utils.norm_auto_range_value("20P"), "20p")
def test_fps(self):
""" Should support a single number, a list of mixed type and a string, including open-ended and closed ranges"""
fps_test_values = (
('24-29.97', 'fps_24-29.97'),
(24, 'fps_24'),
(24.973, 'fps_24.973'),
('24', 'fps_24'),
('-24', 'fps_-24'),
('$v', 'fps_$v'),
((24, 29.97), 'fps_24-29.97'),
(['24', '$v'], 'fps_24-$v')
)
for value, expected in fps_test_values:
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'fps': value},
expected_url=VIDEO_UPLOAD_PATH + expected + "/video_id")
def test_video_codec(self):
# should support a string value
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'video_codec': 'auto'},
expected_url=VIDEO_UPLOAD_PATH + "vc_auto/video_id")
# should support a hash value
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video',
'video_codec': {'codec': 'h264', 'profile': 'basic',
'level': '3.1'}},
expected_url=VIDEO_UPLOAD_PATH + "vc_h264:basic:3.1/video_id")
def test_audio_codec(self):
# should support a string value
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'audio_codec': 'acc'},
expected_url=VIDEO_UPLOAD_PATH + "ac_acc/video_id")
def test_bit_rate(self):
# should support an integer value
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'bit_rate': 2048},
expected_url=VIDEO_UPLOAD_PATH + "br_2048/video_id")
# should support "<integer>k"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'bit_rate': '44k'},
expected_url=VIDEO_UPLOAD_PATH + "br_44k/video_id")
# should support "<integer>m"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'bit_rate': '1m'},
expected_url=VIDEO_UPLOAD_PATH + "br_1m/video_id")
def test_audio_frequency(self):
# should support an integer value
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'audio_frequency': 44100},
expected_url=VIDEO_UPLOAD_PATH + "af_44100/video_id")
def test_video_sampling(self):
# should support an integer value
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'video_sampling': 20},
expected_url=VIDEO_UPLOAD_PATH + "vs_20/video_id")
# should support an string value in the a form of \"<float>s\"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'video_sampling': "2.3s"},
expected_url=VIDEO_UPLOAD_PATH + "vs_2.3s/video_id")
def test_start_offset(self):
# should support decimal seconds
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'start_offset': 2.63},
expected_url=VIDEO_UPLOAD_PATH + "so_2.63/video_id")
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'start_offset': '2.63'},
expected_url=VIDEO_UPLOAD_PATH + "so_2.63/video_id")
# should support percents of the video length as "<number>p"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'start_offset': '35p'},
expected_url=VIDEO_UPLOAD_PATH + "so_35p/video_id")
# should support percents of the video length as "<number>%"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'start_offset': '35%'},
expected_url=VIDEO_UPLOAD_PATH + "so_35p/video_id")
# should support auto
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'start_offset': 'auto'},
expected_url=VIDEO_UPLOAD_PATH + "so_auto/video_id")
def test_end_offset(self):
# should support decimal seconds
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'end_offset': 2.63},
expected_url=VIDEO_UPLOAD_PATH + "eo_2.63/video_id")
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'end_offset': '2.63'},
expected_url=VIDEO_UPLOAD_PATH + "eo_2.63/video_id")
# should support percents of the video length as "<number>p"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'end_offset': '35p'},
expected_url=VIDEO_UPLOAD_PATH + "eo_35p/video_id")
# should support percents of the video length as "<number>%"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'end_offset': '35%'},
expected_url=VIDEO_UPLOAD_PATH + "eo_35p/video_id")
def test_duration(self):
# should support decimal seconds
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'duration': 2.63},
expected_url=VIDEO_UPLOAD_PATH + "du_2.63/video_id")
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'duration': '2.63'},
expected_url=VIDEO_UPLOAD_PATH + "du_2.63/video_id")
# should support percents of the video length as "<number>p"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'duration': '35p'},
expected_url=VIDEO_UPLOAD_PATH + "du_35p/video_id")
# should support percents of the video length as "<number>%"
self.__test_cloudinary_url(public_id="video_id", options={'resource_type': 'video', 'duration': '35%'},
expected_url=VIDEO_UPLOAD_PATH + "du_35p/video_id")
def test_offset(self):
test_cases = {
'eo_3.21,so_2.66': '2.66..3.21',
'eo_3.22,so_2.67': (2.67, 3.22),
'eo_70p,so_35p': ('35%', '70%'),
'eo_71p,so_36p': ('36p', '71p'),
'eo_70.5p,so_35.5p': ['35.5p', '70.5p']
}
for transformation, offset in test_cases.items():
self.__test_cloudinary_url(
public_id="video_id",
options={'resource_type': 'video', 'offset': offset},
expected_url=VIDEO_UPLOAD_PATH + transformation + "/video_id")
def test_user_agent(self):
with patch('cloudinary.USER_PLATFORM', ''):
agent = cloudinary.get_user_agent()
six.assertRegex(self, agent, r'^CloudinaryPython\/\d\.\d+\.\d+ \(Python \d\.\d+\.\d+\)$')
platform = 'MyPlatform/1.2.3 (Test code)'
with patch('cloudinary.USER_PLATFORM', platform):
result = cloudinary.get_user_agent()
self.assertEqual(result, platform + ' ' + agent)
def test_aspect_ratio(self):
self.__test_cloudinary_url(
public_id="test",
options={"aspect_ratio": "1.0"},
expected_url=DEFAULT_UPLOAD_PATH + "ar_1.0/test")
self.__test_cloudinary_url(
public_id="test",
options={"aspect_ratio": "3:2"},
expected_url=DEFAULT_UPLOAD_PATH + "ar_3:2/test")
self.__test_cloudinary_url(
public_id="test",
options={"aspect_ratio": Fraction(3.0 / 4)},
expected_url=DEFAULT_UPLOAD_PATH + "ar_3:4/test")
def test_overlay_options(self):
tests = [
({'public_id': "logo"}, "logo"),
({'public_id': "folder/logo"}, "folder:logo"),
({'public_id': "logo", 'type': "private"}, "private:logo"),
({'public_id': "logo", 'format': "png"}, "logo.png"),
({'resource_type': "video", 'public_id': "cat"}, "video:cat"),
({'text': "Hello World, Nice to meet you?", 'font_family': "Arial", 'font_size': "18"},
"text:Arial_18:Hello%20World%252C%20Nice%20to%20meet%20you%3F"),
({'text': "Hello World, Nice to meet you?", 'font_family': "Arial", 'font_size': "18",
'font_weight': "bold", 'font_style': "italic", 'letter_spacing': 4,
'line_spacing': 3},
"text:Arial_18_bold_italic_letter_spacing_4_line_spacing_3:Hello%20World"
"%252C%20Nice%20to%20meet%20you%3F"),
({'resource_type': "subtitles", 'public_id': "sample_sub_en.srt"},
"subtitles:sample_sub_en.srt"),
({'text': "Hello World, Nice to meet you?", 'font_family': "Arial", 'font_size': "18",
'font_antialiasing': "best", 'font_hinting': "medium"},
"text:Arial_18_antialias_best_hinting_medium:Hello%20World%252C%20Nice%20to%20meet%20you%3F"),
({'resource_type': "subtitles", 'public_id': "sample_sub_he.srt",
'font_family': "Arial", 'font_size': 40},
"subtitles:Arial_40:sample_sub_he.srt"),
({'url': "https://upload.wikimedia.org/wikipedia/commons/2/2b/고창갯벌.jpg"},
"fetch:aHR0cHM6Ly91cGxvYWQud2lraW1lZGlhLm9yZy93aWtpcGVkaWEvY29"
"tbW9ucy8yLzJiLyVFQSVCMyVBMCVFQyVCMCVCRCVFQSVCMCVBRiVFQiVCMiU4Qy5qcGc=")
]
for options, expected in tests:
result = cloudinary.utils.process_layer(options, "overlay")
self.assertEqual(expected, result)
def test_overlay_error_1(self):
""" Must supply font_family for text in overlay """
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url(
"test", overlay=dict(text="text", font_style="italic"))
def test_overlay_error_2(self):
""" Must supply public_id for for non-text underlay """
with self.assertRaises(ValueError):
cloudinary.utils.cloudinary_url("test", underlay=dict(resource_type="video"))
def test_translate_if(self):
all_operators = "if_"
all_operators += "w_eq_0_and"
all_operators += "_h_ne_0_or"
all_operators += "_ar_lt_0_and"
all_operators += "_pc_gt_0_and"
all_operators += "_fc_lte_0_and"
all_operators += "_w_gte_0"
all_operators += ",e_grayscale"
condition = "width = 0 && height != 0 || aspect_ratio < 0 && page_count > 0 " \
"and face_count <= 0 and width >= 0"
options = {"if": condition, "effect": "grayscale"}
transformation, options = cloudinary.utils.generate_transformation_string(**options)
self.assertEqual({}, options)
self.assertEqual(all_operators, transformation)
def test_merge(self):
a = {"foo": "foo", "bar": "foo"}
b = {"foo": "bar"}
self.assertIsNone(cloudinary.utils.merge(None, None))
self.assertDictEqual(a, cloudinary.utils.merge(a, None))
self.assertDictEqual(a, cloudinary.utils.merge(None, a))
self.assertDictEqual({"foo": "bar", "bar": "foo"}, cloudinary.utils.merge(a, b))
self.assertDictEqual(a, cloudinary.utils.merge(b, a))
def test_array_should_define_a_set_of_variables(self):
options = {
"if": "face_count > 2",
"variables": [["$z", 5], ["$foo", "$z * 2"]],
"crop": "scale",
"width": "$foo * 200"
}
transformation, options = cloudinary.utils.generate_transformation_string(**options)
self.assertEqual('if_fc_gt_2,$z_5,$foo_$z_mul_2,c_scale,w_$foo_mul_200', transformation)
def test_dollar_key_should_define_a_variable(self):
options = {"transformation": [{"$foo": 10}, {"if": "face_count > 2"},
{"crop": "scale", "width": "$foo * 200 / face_count"}, {"if": "end"}]}
transformation, options = cloudinary.utils.generate_transformation_string(**options)
self.assertEqual('$foo_10/if_fc_gt_2/c_scale,w_$foo_mul_200_div_fc/if_end', transformation)
def test_should_sort_defined_variable(self):
options = {"$second": 1, "$first": 2}
transformation, options = cloudinary.utils.generate_transformation_string(**options)
self.assertEqual('$first_2,$second_1', transformation)
def test_should_place_defined_variables_before_ordered(self):
options = {"variables": [["$z", 5], ["$foo", "$z * 2"]], "$second": 1, "$first": 2}
transformation, options = cloudinary.utils.generate_transformation_string(**options)
self.assertEqual('$first_2,$second_1,$z_5,$foo_$z_mul_2', transformation)
def test_should_support_text_values(self):
public_id = "sample"
options = {"effect": "$efname:100", "$efname": "!blur!"}
url, options = cloudinary.utils.cloudinary_url(public_id, **options)
self.assertEqual(DEFAULT_UPLOAD_PATH + "$efname_!blur!,e_$efname:100/sample", url)
def test_should_support_string_interpolation(self):
public_id = "sample"
options = {
"crop": "scale",
"overlay": {
"text": "$(start)Hello $(name)$(ext), $(no ) $( no)$(end)",
"font_family": "Arial",
"font_size": "18"
}
}
url, options = cloudinary.utils.cloudinary_url(public_id, **options)
self.assertEqual(
DEFAULT_UPLOAD_PATH + "c_scale,l_text:Arial_18:$(start)"
"Hello%20$(name)$(ext)%252C%20%24%28no%20%29"
"%20%24%28%20no%29$(end)/sample",
url)
def test_encode_context(self):
self.assertEqual("", cloudinary.utils.encode_context({}))
self.assertEqual("a=b", cloudinary.utils.encode_context({"a": "b"}))
# using OrderedDict for tests consistency
self.assertEqual("a=b|c=d", cloudinary.utils.encode_context(OrderedDict((("a", "b"), ("c", "d")))))
# test that special characters are unchanged
self.assertEqual("a=!@#$%^&*()_+<>?,./", cloudinary.utils.encode_context({"a": "!@#$%^&*()_+<>?,./"}))
# check value escaping
self.assertEqual(r"a=b\|\|\=|c=d\=a\=\|", cloudinary.utils.encode_context(OrderedDict((("a", "b||="),
("c", "d=a=|")))))
# check fallback
self.assertEqual("not a dict", cloudinary.utils.encode_context("not a dict"))
def test_build_list_of_dicts(self):
dict_data = {"one": 1, "two": 2, "three": 3}
list_of_dict_data = [dict_data]
# should convert a dict to a list of dict
self.assertListEqual(list_of_dict_data, build_list_of_dicts(dict_data))
# should leave as is a list of dict
self.assertListEqual(list_of_dict_data, build_list_of_dicts(list_of_dict_data))
# should convert a JSON string representing dict to a list of dict
string_data = '{"one": 1, "two": 2, "three": 3}'
self.assertListEqual(list_of_dict_data, build_list_of_dicts(string_data))
# should convert a JSON string representing a list of dict to a list of dict
string_array_data = '[{"one": 1, "two": 2, "three": 3}]'
self.assertListEqual(list_of_dict_data, build_list_of_dicts(string_array_data))
# should return an empty list on None
self.assertListEqual([], build_list_of_dicts(None))
# should return an empty list on []
self.assertListEqual([], build_list_of_dicts([]))
# should raise a ValueError on invalid values
invalid_values = ["", [[]], ["not_a_dict"], [7357], {"not", "a dict"}]
for invalid_value in invalid_values:
with self.assertRaises(ValueError):
build_list_of_dicts(invalid_value)
def test_json_encode(self):
# should encode simple values
self.assertEqual('[]', json_encode(list()))
self.assertEqual('{}', json_encode(dict()))
self.assertEqual('[{"k":"v"}]', json_encode([{"k": "v"}, ]))
# should encode date and datetime to ISO format
self.assertEqual('{"t":"2019-02-22T16:20:57"}', json_encode({"t": datetime(2019, 2, 22, 16, 20, 57)}))
self.assertEqual('{"t":"2019-02-22"}', json_encode({"t": date(2019, 2, 22)}))
# should raise Exception on unsupported values
with self.assertRaises(TypeError) as te:
json_encode({"t": self})
self.assertIn("is not JSON serializable", str(te.exception))
def test_encode_unicode_url(self):
self.assertEqual("string", encode_unicode_url("string"))
self.assertEqual("encoded", encode_unicode_url(u"encoded"))
def test_base64url_encode(self):
self.assertEqual("YWQ_Lix4MDl-IUAhYQ==", base64url_encode("ad?.,x09~!@!a"))
def test_is_remote_url(self):
self.assertFalse(cloudinary.utils.is_remote_url(TEST_IMAGE))
remote_urls = [
"ftp://ftp.cloudinary.com/images/old_logo.png",
"http://cloudinary.com/images/old_logo.png",
"https://cloudinary.com/images/old_logo.png",
"s3://s3-us-west-2.amazonaws.com/cloudinary/images/old_logo.png",
"gs://cloudinary/images/old_logo.png",
"data:image/gif;charset=utf8;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7",
"data:image/gif;param1=value1;param2=value2;base64," +
"R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7",
CL_BLANK
]
for url in remote_urls:
self.assertTrue(cloudinary.utils.is_remote_url(url))
def test_file_io_size(self):
"""Should return correct file size"""
test_data = b"Test data"
test_data_len = len(test_data)
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(test_data)
actual_size = cloudinary.utils.file_io_size(temp_file)
filesystem_size = getsize(temp_file.name)
self.assertEqual(test_data_len, filesystem_size)
self.assertEqual(test_data_len, actual_size)
with io.BytesIO() as temp_stream:
temp_stream.write(test_data)
actual_size = cloudinary.utils.file_io_size(temp_stream)
self.assertEqual(test_data_len, actual_size)
with tempfile.NamedTemporaryFile() as empty_file:
actual_size = cloudinary.utils.file_io_size(empty_file)
self.assertEqual(0, actual_size)
def test_cloudinary_scaled_url(self):
"""Should correctly handle format and fetch_format with and without custom transformation"""
image_format = "jpg"
fetch_format = "gif"
resp_w = 99
resp_trans = "c_scale,w_{}".format(resp_w)
effect = "sepia"
options = {"format": image_format, "type": "fetch", "fetch_format": fetch_format}
# Without custom transformation
actual_url = cloudinary_scaled_url(FETCH_URL, resp_w, {}, options)
self.assertEqual("{p}f_{ff}/{t}/{fu}".format(p=DEFAULT_FETCH_PATH, ff=fetch_format, t=resp_trans, fu=FETCH_URL),
actual_url)
# With custom transformation
actual_url = cloudinary_scaled_url(FETCH_URL, resp_w, self.crop_transformation, options)
self.assertEqual("{p}c_crop,f_{f},w_100/{t}/{fu}".format(p=DEFAULT_FETCH_PATH, f=image_format, t=resp_trans,
fu=FETCH_URL),
actual_url)
# Add base transformation
options["effect"] = effect
actual_url = cloudinary_scaled_url(FETCH_URL, resp_w, {}, options)
self.assertEqual("{p}e_{e},f_{ff}/{t}/{fu}".format(p=DEFAULT_FETCH_PATH, e=effect, ff=fetch_format,
t=resp_trans, fu=FETCH_URL),
actual_url)
# Should ignore base transformation
actual_url = cloudinary_scaled_url(FETCH_URL, resp_w, self.crop_transformation, options)
self.assertEqual("{p}c_crop,f_{f},w_100/{t}/{fu}".format(p=DEFAULT_FETCH_PATH, f=image_format,
t=resp_trans, fu=FETCH_URL),
actual_url)
# Should include raw transformation from base options
options["raw_transformation"] = self.raw_transformation
actual_url = cloudinary_scaled_url(FETCH_URL, resp_w, {}, options)
self.assertEqual("{p}e_{e},f_{ff},{rt}/{t}/{fu}".format(p=DEFAULT_FETCH_PATH, e=effect, ff=fetch_format,
rt=self.raw_transformation, t=resp_trans, fu=FETCH_URL),
actual_url)
def test_build_eager(self):
test_data = [
["should support strings",
[API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR + "/jpg"],
"{}|{}/jpg".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR)],
["should concatenate transformations using pipe",
[API_TEST_TRANS_SCALE100, API_TEST_TRANS_SEPIA],
"{}|{}".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR)],
["should support transformations with multiple components",
[{"transformation": [API_TEST_TRANS_SCALE100, API_TEST_TRANS_SEPIA]}, API_TEST_TRANS_SEPIA],
"{}/{}|{}".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR, API_TEST_TRANS_SEPIA_STR)],
["should concatenate format at the end of the transformation",
([dict(API_TEST_TRANS_SCALE100, **{"format": "gif"}), API_TEST_TRANS_SEPIA]),
"{}/gif|{}".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR)],
["should support an empty format",
([dict(API_TEST_TRANS_SCALE100, **{"format": ""}), API_TEST_TRANS_SEPIA]),
"{}/|{}".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR)],
["should treat a null format as none",
([dict(API_TEST_TRANS_SCALE100, **{"format": None}), API_TEST_TRANS_SEPIA]),
"{}|{}".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR)],
["should concatenate format at the end of the transformation",
[dict(API_TEST_TRANS_SCALE100, **{"format": "gif"}),
dict(API_TEST_TRANS_SEPIA, **{"format": "jpg"})],
"{}/gif|{}/jpg".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR)],
["should support transformations with multiple components and format",
[{"transformation": [API_TEST_TRANS_SCALE100, API_TEST_TRANS_SEPIA], "format": "gif"},
API_TEST_TRANS_SEPIA],
"{}/{}/gif|{}".format(API_TEST_TRANS_SCALE100_STR, API_TEST_TRANS_SEPIA_STR, API_TEST_TRANS_SEPIA_STR)],
]
for message, value, expected in test_data:
self.assertEqual(expected, build_eager(value), message)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...core import TilesError
from ...serialize import KeyField, BoolField
from ...utils import check_chunks_unknown_shape
from ..operands import TensorOperand, TensorOperandMixin
from ..datasource import tensor as astensor
from ..array_utils import as_same_device, device
from ..core import TensorOrder
from .ravel import ravel
class TensorIsIn(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.ISIN
_element = KeyField('element')
_test_elements = KeyField('test_elements')
_assume_unique = BoolField('assume_unique')
_invert = BoolField('invert')
def __init__(self, assume_unique=None, invert=None, dtype=None, **kw):
dtype = np.dtype(bool) if dtype is None else dtype
super().__init__(_assume_unique=assume_unique, _invert=invert,
dtype=dtype, **kw)
@property
def element(self):
return self._element
@property
def test_elements(self):
return self._test_elements
@property
def assume_unique(self):
return self._assume_unique
@property
def invert(self):
return self._invert
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
self._element = self._inputs[0]
self._test_elements = self._inputs[1]
def __call__(self, element, test_elements):
element, test_elements = astensor(element), ravel(astensor(test_elements))
return self.new_tensor([element, test_elements], element.shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
in_tensor = op.element
test_elements = op.test_elements
out_tensor = op.outputs[0]
if len(test_elements.chunks) != 1:
check_chunks_unknown_shape([test_elements], TilesError)
test_elements = test_elements.rechunk(len(test_elements))._inplace_tile()
test_elements_chunk = test_elements.chunks[0]
out_chunks = []
for c in in_tensor.chunks:
chunk_op = op.copy().reset_key()
out_chunk = chunk_op.new_chunk([c, test_elements_chunk], shape=c.shape,
index=c.index, order=out_tensor.order)
out_chunks.append(out_chunk)
new_op = op.copy()
return new_op.new_tensors([in_tensor, test_elements], out_tensor.shape,
order=out_tensor.order, chunks=out_chunks,
nsplits=in_tensor.nsplits)
@classmethod
def execute(cls, ctx, op):
(element, test_elements), device_id, xp = as_same_device(
[ctx[c.key] for c in op.inputs], device=op.device, ret_extra=True)
with device(device_id):
ctx[op.outputs[0].key] = xp.isin(element, test_elements,
assume_unique=op.assume_unique,
invert=op.invert)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
Returns a boolean array of the same shape as `element` that is True
where an element of `element` is in `test_elements` and False otherwise.
Parameters
----------
element : array_like
Input tensor.
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if it is a tensor or array_like.
See notes for behavior with non-array-like parameters.
assume_unique : bool, optional
If True, the input tensors are both assumed to be unique, which
can speed up the calculation. Default is False.
invert : bool, optional
If True, the values in the returned tensor are inverted, as if
calculating `element not in test_elements`. Default is False.
``mt.isin(a, b, invert=True)`` is equivalent to (but faster
than) ``mt.invert(mt.isin(a, b))``.
Returns
-------
isin : Tensor, bool
Has the same shape as `element`. The values `element[isin]`
are in `test_elements`.
See Also
--------
in1d : Flattened version of this function.
Notes
-----
`isin` is an element-wise function version of the python keyword `in`.
``isin(a, b)`` is roughly equivalent to
``mt.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
`element` and `test_elements` are converted to tensors if they are not
already. If `test_elements` is a set (or other non-sequence collection)
it will be converted to an object tensor with one element, rather than a
tensor of the values contained in `test_elements`. This is a consequence
of the `tensor` constructor's way of handling non-sequence collections.
Converting the set to a list usually gives the desired behavior.
Examples
--------
>>> import mars.tensor as mt
>>> element = 2*mt.arange(4).reshape((2, 2))
>>> element.execute()
array([[0, 2],
[4, 6]])
>>> test_elements = [1, 2, 4, 8]
>>> mask = mt.isin(element, test_elements)
>>> mask.execute()
array([[ False, True],
[ True, False]])
>>> element[mask].execute()
array([2, 4])
>>> mask = mt.isin(element, test_elements, invert=True)
>>> mask.execute()
array([[ True, False],
[ False, True]])
>>> element[mask]
array([0, 6])
Because of how `array` handles sets, the following does not
work as expected:
>>> test_set = {1, 2, 4, 8}
>>> mt.isin(element, test_set).execute()
array([[ False, False],
[ False, False]])
Casting the set to a list gives the expected result:
>>> mt.isin(element, list(test_set)).execute()
array([[ False, True],
[ True, False]])
"""
op = TensorIsIn(assume_unique, invert)
return op(element, test_elements)
|
from __future__ import print_function
import math, random
class AsymmetricCrypto:
def __init__(self):
pass
def gcd(self, a, b):
if b==0:
return a
return self.gcd(b,a%b)
def isPrime(self, n):
if n==2:
return True
if n%2==0:
return False
i = 3
while i*i<=n:
if n%i==0:
return False
i+=2
return True
def generate_large_prime(self):
while True:
p = random.randint(10001,1000001)
if self.isPrime(p)==True:
return p
def rsa_generate_public_key(self):
p = self.generate_large_prime()
q = self.generate_large_prime()
n = p*q
phi = (p-1)*(q-1)
e = None
while True:
e = random.randint(2,phi-1)
if self.gcd(e,phi)==1:
break
return (n, e, p, q)
def get_num_list(self, plaintext):
return [ord(ch) for ch in plaintext ]
def rsa_util_encrypt(self, num, public_key):
n = public_key[0]
e = public_key[1]
return pow(num, e, n)
def rsa_encrypt(self, plaintext, public_key=None):
if public_key==None:
public_key = self.rsa_generate_public_key()
num_list = self.get_num_list(plaintext)
cipher_nums = []
for num in num_list:
cipher_num = self.rsa_util_encrypt(num, public_key)
cipher_nums.append(cipher_num)
return public_key, cipher_nums
def rsa_util_decrypt(self, cipher_num, private_key, public_key):
n = public_key[0]
d = private_key
return pow(cipher_num, d, n)
def multiplicative_inverse(self, a, n):
if self.gcd(a, n)!=1:
return None
r1 = n
r2 = a
p1 = 0
p2 = 1
while r1>1:
q = r1//r2
r = r1 - q*r2
r1 = r2
r2 = r
p = p1 - q*p2
p1 = p2
p2 = p
return p1%n
def get_private_key(self, public_key):
n = public_key[0]
e = public_key[1]
p = public_key[2]
q = public_key[3]
phi = (p-1)*(q-1)
d = self.multiplicative_inverse(e, phi)
return d
def rsa_decrypt(self, cipher_nums, public_key=None, private_key=None):
if public_key==None:
return "Please Provide a public key!!"
if private_key==None:
return "Please Provide a private Key!!"
plain_nums = []
for cipher_num in cipher_nums:
plain_num = self.rsa_util_decrypt(cipher_num, private_key, public_key)
plain_nums.append(plain_num)
plain_text = [ chr(plain_num) for plain_num in plain_nums ]
return "".join(plain_text), plain_nums
def knapsack_generate_keys(self, n=8):
## first create a superincreasing sequence of length 'n'
seed = random.randint(2,10)
super_sequence = [seed]
for i in range(n-1):
sum_so_far = sum(super_sequence)
element = random.randint(sum_so_far+1, 2*sum_so_far)
super_sequence.append(element)
## now select a random integer q such that q > sum(super_sequence)
q = random.randint(sum(super_sequence)+1, 2*sum(super_sequence))
## now select another random integer 'r' such that gcd(q,r) = 1
r = 2
while True:
r = random.randint(2,q-1)
if self.gcd(q,r) == 1:
break
## Finally, calculate beta - the public key
beta = tuple( (r*super_sequence[i])%q for i in range(n) ) ## making beta as a tuple, as tuples are immutable
private_key = (super_sequence, q, r)
return beta, private_key
##The below function finds alpha as a part of decryption
def getalpha(self, c, w):
w = w[::-1]
alpha = []
for number in w:
if number > c:
alpha.append(0)
else:
alpha.append(1)
c = c - number
return alpha[::-1]
def knapsack_encrypt(self, plaintext, public_key):
ciphertext = []
for character in plaintext:
binary = bin(ord(character))
binary = binary[0]+binary[2:]
l = len(binary)
binary = (8-l)*"0"+binary
binary = list(map(int,binary))
# now, binary is an 8 element list, containing bits of binary representation of the character
# ciphertext 'c' is calculated now
c = sum([ binary[i]*public_key[i] for i in range(len(public_key)) ])
ciphertext.append(c)
return ciphertext
def knapsack_decrypt(self, ciphertext, private_key):
#First, we calculate the integer 's' , which has the property that:
# r*s = 1 mod q
#unpack private key
super_sequence, q, r = private_key
s = self.multiplicative_inverse(r, q)
# finding c' = modified ciphertext
modified_ciphertext = [ (ciphertext[i]*s)%q for i in range(len(ciphertext)) ]
decrypted_text = []
## Now, for each modified ciphertext, we will find the actual alpha sequence
i = 0
for c in modified_ciphertext:
alpha = self.getalpha(c,super_sequence)
alpha = "".join(map(str, alpha))
decrypted_text.append(chr(int(alpha,2)))
i+=1
return "".join(decrypted_text)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from karborclient.tests.unit import base
from karborclient.tests.unit.v1 import fakes
cs = fakes.FakeClient()
mock_request_return = ({}, {'verification': {}})
class VerificationsTest(base.TestCaseShell):
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_list_verifications_with_marker_limit(self, mock_request):
mock_request.return_value = mock_request_return
cs.verifications.list(marker=1234, limit=2)
mock_request.assert_called_with(
'GET',
'/verifications?limit=2&marker=1234', headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_list_verifications_with_sort_key_dir(self, mock_request):
mock_request.return_value = mock_request_return
cs.verifications.list(sort_key='id', sort_dir='asc')
mock_request.assert_called_with(
'GET',
'/verifications?'
'sort_dir=asc&sort_key=id', headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_list_verifications_with_invalid_sort_key(self, mock_request):
self.assertRaises(ValueError,
cs.verifications.list,
sort_key='invalid', sort_dir='asc')
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_create_verification(self, mock_request):
mock_request.return_value = mock_request_return
cs.verifications.create('586cc6ce-e286-40bd-b2b5-dd32694d9944',
'2220f8b1-975d-4621-a872-fa9afb43cb6c',
'{}')
mock_request.assert_called_with(
'POST',
'/verifications',
data={
'verification':
{
'checkpoint_id': '2220f8b1-975d-4621-a872-fa9afb43cb6c',
'parameters': '{}',
'provider_id': '586cc6ce-e286-40bd-b2b5-dd32694d9944'
}}, headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_verification(self, mock_request):
mock_request.return_value = mock_request_return
cs.verifications.get('1')
mock_request.assert_called_with(
'GET',
'/verifications/1',
headers={})
@mock.patch('karborclient.common.http.HTTPClient.json_request')
def test_show_verification_with_headers(self, mock_request):
mock_request.return_value = mock_request_return
cs.verifications.get('1', session_id='fake_session_id')
mock_request.assert_called_with(
'GET',
'/verifications/1',
headers={'X-Configuration-Session': 'fake_session_id'})
|
from gpiozero import Button
button = Button(2)
button2 = Button(3)
button.wait_for_press()
button2.wait_for_press()
print("Never gonna give you up never gonna let you down never gonna run around and desert you never gonna make you cry never gonna say goodbye never gonna tell a lie and hurt you")
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
# pylint: disable=unused-variable
import json
from pathlib import Path
from typing import Dict
import pytest
import yaml
from service_integration.osparc_config import MetaConfig, RuntimeConfig
@pytest.fixture
def labels(tests_data_dir: Path, labels_fixture_name: str) -> Dict[str, str]:
data = yaml.safe_load((tests_data_dir / "docker-compose-meta.yml").read_text())
service_name = {
"legacy": "dy-static-file-server",
"service-sidecared": "dy-static-file-server-dynamic-sidecar",
"compose-sidecared": "dy-static-file-server-dynamic-sidecar-compose-spec",
}
labels_annotations = data["services"][service_name[labels_fixture_name]]["build"][
"labels"
]
# patch -> replaces some environs
if compose_spec := labels_annotations.get("simcore.service.compose-spec"):
if compose_spec == "${DOCKER_COMPOSE_SPECIFICATION}":
labels_annotations["simcore.service.compose-spec"] = json.dumps(
yaml.safe_load((tests_data_dir / "compose-spec.yml").read_text())
)
return labels_annotations
@pytest.mark.parametrize(
"labels_fixture_name", ["legacy", "service-sidecared", "compose-sidecared"]
)
def test_load_from_labels(
labels: Dict[str, str], labels_fixture_name: str, tmp_path: Path
):
meta_cfg = MetaConfig.from_labels_annotations(labels)
runtime_cfg = RuntimeConfig.from_labels_annotations(labels)
print(meta_cfg.json(exclude_unset=True, indent=2))
print(runtime_cfg.json(exclude_unset=True, indent=2))
# create yamls from config
for model in (runtime_cfg, meta_cfg):
config_path = (
tmp_path / f"{model.__class__.__name__.lower()}-{labels_fixture_name}.yml"
)
with open(config_path, "wt") as fh:
data = json.loads(
model.json(exclude_unset=True, by_alias=True, exclude_none=True)
)
yaml.safe_dump(data, fh, sort_keys=False)
# reload from yaml and compare
new_model = model.__class__.from_yaml(config_path)
assert new_model == model
|
from django.contrib import admin
from .models import GenericContainerPlugin, CompareTwoThingsPlugin, GenericListPlugin, ImageWithThumbnailPlugin, HostService
#from .models import RenderableTextPlugin, GenericContainerPlugin, CompareTwoThingsPlugin, GenericListPlugin, ImageWithThumbnailPlugin, HostService
admin.site.register(HostService)
#admin.site.register(ImageWithThumbnailPlugin)
#admin.site.register(RenderableTextPlugin)
#admin.site.register(CompareTwoThingsPlugin)
#admin.site.register(GenericContainerPlugin)
#admin.site.register(GenericListPlugin)
|
import networkx as nx
import ndlib.models.ModelConfig as mc
import ndlib.models.CompositeModel as gc
import ndlib.models.compartments as ns
import time
from ndlib.utils import multi_runs
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
# Network Definition
for initialinfect in [1]:
for itercount in [6, 7, 8, 9, 10, 11, 12, 13, 14]:
start_time = time.time()
N = 20000
connections = 6
iterations = 30
executions = 1000
print('-----Generating Barabasi-Albert graph with {} nodes-----'.format(N))
g = nx.barabasi_albert_graph(N, connections)
# Model Selection
print('-----Configuring Model-----')
model = gc.CompositeModel(g)
# Model Statuses
model.add_status("Susceptible")
model.add_status("Exposed")
model.add_status("Symptomatic")
#model.add_status("Asymptomatic")
model.add_status("Infected")
model.add_status("Removed")
#-----------------------------------------------------
#DISEASE DATA
r0 = 3.1
disease_length = 14
people_total = 12 * disease_length
chance_of_infection = r0 / people_total
infect_chance = chance_of_infection
#print(infect_chance)
#-----------------------------------------------------
# Compartment Definition
c1_1 = ns.NodeStochastic(infect_chance, triggering_status="Infected")
c1_3 = ns.NodeStochastic(infect_chance, triggering_status="Symptomatic")
c2_1 = ns.NodeStochastic((1 - 0.5**(1/11)))
c3 = ns.NodeStochastic((1 - 0.5**(1/14)))
c4 = ns.CountDown('Testing Timer', iterations = itercount)
# Rule Definition
model.add_rule("Susceptible","Exposed",c1_1)
model.add_rule("Susceptible","Exposed",c1_3)
model.add_rule("Exposed","Symptomatic",c2_1)
model.add_rule("Symptomatic","Removed",c3)
model.add_rule("Infected","Removed",c4)
# Model Configuration
config = mc.Configuration()
config.add_model_parameter('fraction_infected', initialinfect/N)
model.set_initial_status(config)
# Simulation
print('-----Doing {} simulation(s) on {} day test-----'.format(executions,itercount))
trends = multi_runs(model, execution_number = executions, iteration_number = iterations, infection_sets=None)
stop_time = time.time()
total_time = stop_time - start_time
print('\n----- Total Time: {} seconds ----'.format(total_time))
print('-----Plotting Results-----')
#print(iterations)
#print(trends)
from ndlib.viz.mpl.DiffusionTrend import DiffusionTrend
daydata = []
fig = ()
ax = ()
for n in range(0, executions):
if (trends[n]['trends']['node_count'][0][-1] != (N - initialinfect)):
daydata.append(N-trends[n]['trends']['node_count'][0][-1])
print(daydata)
fig = plt.hist(daydata)
stdev = np.std(daydata)
mean = np.mean(daydata)
plt.title("Infected at Day {}".format(iterations), fontsize = 24)
plt.xlabel("Infected Population", fontsize = 24)
plt.ylabel("Number of Simulations", fontsize = 24)
plt.figtext(0.85, 0.80, 'Outbreaks: {:.0f}\nMean: {:.3f}\nStDev: {:.3f}'.format(len(daydata), mean, stdev),
fontsize = 24, bbox = dict(boxstyle = 'round', facecolor = 'white'))
plt.tight_layout()
plt.savefig("Histogram4/(HIST)Patient 0 Test: {} Days Test, {} Days Total, {} Initial.png".format(itercount, iterations, initialinfect))
plt.clf()
viz = DiffusionTrend(model, trends)
name = ("Histogram4/(COUNT)Patient 0 Test: {} Days Test, {} Days Total, {} Initial.png".format(itercount, iterations, initialinfect))
viz.plot(filename = name, percentile = 90, itercount = executions, timereq = total_time)
|
#!/usr/bin/env python
import os
from app import create_app
from flask_script import Manager, Shell, Server
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role, Photo=Photo)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("runserver", Server(host='0.0.0.0'))
if __name__ == '__main__':
manager.run()
|
def evaluate(self):
if self.getMapper().getName() == self.mate._("evaluation:versioned:testee"):
comparison_test = self.mate.getTestByMapperName(self.getName(), self.mate._("evaluation:versioned:base"))
testee_result = self.getRunResults()
comparison_result = comparison_test.getRunResults()
if testee_result.correct < comparison_result.correct:
self.warn("Less correct reads than base %s"%comparison_test.getMapper().getTitle())
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
for _ in range(t):
n, l = map(int, input().strip().split())
a = []
for _ in range(n):
xl, xr = map(int, input().strip().split())
if xr - xl <= l: # 1 ≤ Xl ≤ Xr ≤ 10^6
a.append((xl, xr))
a.sort()
ln = len(a)
for i in range(ln - 1):
max_right = a[i][0] + l
curr_right = a[i][1]
if curr_right == max_right:
print('Yes')
break
elif curr_right > max_right:
continue
else:
for j in range(i + 1, ln):
if a[j][0] <= curr_right and a[j][1] <= max_right:
curr_right = max(curr_right, a[j][1])
if curr_right == max_right:
print('Yes')
break
else:
print('No')
|
import numpy as np
import multiprocessing as mp
import scipy.stats as stats
import os
os.environ['OMP_NUM_THREADS'] = str(1)
import statsmodels.sandbox.stats.multicomp as mc
import h5py
import nibabel as nib
from importlib import reload
import tools
import argparse
# Excluding 084
subjNums = ['013','014','016','017','018','021','023','024','026','027','028','030','031','032','033',
'034','035','037','038','039','040','041','042','043','045','046','047','048','049','050',
'053','055','056','057','058','062','063','066','067','068','069','070','072','074','075',
'076','077','081','085','086','087','088','090','092','093','094','095','097','098','099',
'101','102','103','104','105','106','108','109','110','111','112','114','115','117','119',
'120','121','122','123','124','125','126','127','128','129','130','131','132','134','135',
'136','137','138','139','140','141']
projectdir = '/home/ti61/f_mc1689_1/SRActFlow/'
# Using final partition
networkdef = np.loadtxt(projectdir + 'data/network_partition.txt')
networkorder = np.asarray(sorted(range(len(networkdef)), key=lambda k: networkdef[k]))
networkorder.shape = (len(networkorder),1)
# network mappings for final partition set
networkmappings = {'fpn':7, 'vis1':1, 'vis2':2, 'smn':3, 'aud':8, 'lan':6, 'dan':5, 'con':4, 'dmn':9,
'pmulti':10, 'none1':11, 'none2':12}
networks = networkmappings.keys()
xticks = {}
reorderednetworkaffil = networkdef[networkorder]
for net in networks:
netNum = networkmappings[net]
netind = np.where(reorderednetworkaffil==netNum)[0]
tick = np.max(netind)
xticks[tick] = net
## General parameters/variables
nParcels = 360
nSubjs = len(subjNums)
glasserfile2 = projectdir + 'data/Q1-Q6_RelatedParcellation210.LR.CorticalAreas_dil_Colors.32k_fs_RL.dlabel.nii'
glasser2 = nib.load(glasserfile2).get_data()
glasser2 = np.squeeze(glasser2)
sortednets = np.sort(list(xticks.keys()))
orderednetworks = []
for net in sortednets: orderednetworks.append(xticks[net])
networkpalette = ['royalblue','slateblue','paleturquoise','darkorchid','limegreen',
'lightseagreen','yellow','orchid','r','peru','orange','olivedrab']
networkpalette = np.asarray(networkpalette)
OrderedNetworks = ['VIS1','VIS2','SMN','CON','DAN','LAN','FPN','AUD','DMN','PMM','VMM','ORA']
parser = argparse.ArgumentParser('./main.py', description='Run decoding analyses to identify sets of regions involved for different task components')
parser.add_argument('--outfilename', type=str, default="", help='Prefix output filenames (Default: analysis1')
parser.add_argument('--decoder', type=str, default="similarity", help='decoding approach [similarity, logistic, svm')
parser.add_argument('--ID', type=str, default="rule", help='condition/information to decode [rules,colorStim,oriStim,constantStim,pitchStim]')
parser.add_argument('--nproc', type=int, default=20, help='number of processes to run in parallel (default: 20)')
parser.add_argument('--motor_mapping', action='store_true', help="Include motor output activations")
def run(args):
args
outfilename = args.outfilename
decoder = args.decoder
ID = args.ID
nproc = args.nproc
outdir = '/home/ti61/f_mc1689_1/SRActFlow/data/results/MAIN/LayerID_Revision/'
outfilename = outdir + ID + '_' + decoder
if ID=='rules':
nStims = 12
data_task = np.zeros((len(glasser2),nStims,len(subjNums)))
rules = ['Logic','Sensory','Motor']
rois = np.arange(nParcels)
scount = 0
for subj in subjNums:
rulecount = 0
for rule in rules:
data_task[:,rulecount:(rulecount+4),scount] = tools.loadInputActivity(subj,rule)
rulecount += 4
scount += 1
if ID in ['colorStim', 'oriStim', 'constantStim', 'pitchStim']:
nStims = 4
data_task = np.zeros((len(glasser2),nStims,len(subjNums)))
if ID in ['colorStim','oriStim']:
rois = np.where((networkdef==networkmappings['vis1']) | (networkdef==networkmappings['vis2']))[0]
elif ID in ['constantStim','pitchStim']:
rois = np.where(networkdef==networkmappings['aud'])[0]
scount = 0
for subj in subjNums:
data_task[:,:,scount] = tools.loadInputActivity(subj,ID)
scount += 1
distances_baseline_allrules, rmatch, rmismatch, confusion_mats = tools.conditionDecodings(data_task, rois,
motorOutput=False, ncvs=1, effects=True,
confusion=True, decoder=decoder, nproc=nproc)
statistics_allrules = np.zeros((len(rois),3)) # acc, q, acc_thresh
for roicount in range(len(rois)):
ntrials = distances_baseline_allrules.shape[1]
p = stats.binom_test(np.mean(distances_baseline_allrules[roicount,:])*ntrials,n=ntrials,p=1/float(nStims))
if np.mean(distances_baseline_allrules[roicount,:])>1/float(nStims):
p = p/2.0
else:
p = 1.0-p/2.0
statistics_allrules[roicount,0] = np.mean(distances_baseline_allrules[roicount,:])
statistics_allrules[roicount,1] = p
h0, qs = mc.fdrcorrection0(statistics_allrules[:,1])
for roicount in range(len(rois)):
statistics_allrules[roicount,1] = qs[roicount]
statistics_allrules[roicount,2] = h0[roicount]*statistics_allrules[roicount,0]
# Count number of significant ROIs for LH decoding
sig_ind = np.where(statistics_allrules[:,1]<0.05)[0]
print('Number of ROIs significant for all 12 rules:', sig_ind.shape[0])
print('Accuracies:', statistics_allrules[sig_ind,0])
#### Map back to surface
# Put all data into a single matrix (since we only run a single classification)
inputStim = np.zeros((glasser2.shape[0],3))
roicount = 0
for roi in rois:
vertex_ind = np.where(glasser2==roi+1)[0]
inputStim[vertex_ind,0] = statistics_allrules[roicount,0]
inputStim[vertex_ind,1] = statistics_allrules[roicount,1]
inputStim[vertex_ind,2] = statistics_allrules[roicount,2]
roicount += 1
np.savetxt(outfilename + '.csv', np.where(statistics_allrules[:,1]<0.05)[0], delimiter=',')
####
# Write file to csv and run wb_command
np.savetxt(outfilename + '.csv', inputStim,fmt='%s')
wb_command = 'wb_command -cifti-convert -from-text ' + outfilename + '.csv ' + glasserfile2 + ' ' + outfilename + '.dscalar.nii' + ' -reset-scalars'
os.system(wb_command)
if __name__ == '__main__':
args = parser.parse_args()
run(args)
|
x = 2
y = 3
z = x + y
print(z)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.