content stringlengths 5 1.05M |
|---|
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print(device)
class SimpleAE(nn.Module):
def __init__(self):
super(SimpleAE, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=5),
nn.ReLU(True),
nn.Conv2d(32, 64, kernel_size=5),
nn.ReLU(True))
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=5),
nn.ReLU(True),
nn.ConvTranspose2d(32, 3, kernel_size=5),
nn.ReLU(True))
def forward(self,x):
x = self.encoder(x)
x = self.decoder(x)
return x
class DeblurCNN(nn.Module):
"""
Convolutional neural network for deblur
Args:
nn.Module([torch.nn.Module]): [Base class for all neural network modules]
"""
def __init__(self):
super(DeblurCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=9, padding=9 // 2)
self.conv2 = nn.Conv2d(64, 32, kernel_size=5, padding=5 // 2)
self.conv3 = nn.Conv2d(32, 3, kernel_size=5, padding=5 // 2)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.conv3(x)
return x
def get_model():
"""
Get neural network model
"""
model = DeblurCNN().to(device)
print(model)
return model |
#!/usr/bin/env python3
import sys
import argparse
from time import sleep
from p3lib.uio import UIO
from p3lib.helper import logTraceBack
from smotor.smotor_ctrl import SMotor
class StepItError(Exception):
pass
class StepIt(object):
def __init__(self, uio, options):
"""@brief Constructor
@param uio A UIO instance handling user input and output (E.G stdin/stdout or a GUI)
@param options An instance of the OptionParser command line options."""
self._uio = uio
self._options = options
self._sMotor = SMotor()
self._sMotor.setUIO(uio)
def move(self, angle, revSec, relative):
"""@brief Move the motor by the require angle from the current position.
@param angle The angle to move the motor in degrees.
@param revSec The speed to move the motor in revolutions per second.
@param relative If True move the motor to it's absolute angle. If False
move the motor relative to it's current position."""
self._uio.info("Moving {:.1f}° at {:.1f} revs/sec.".format(angle, revSec))
self._sMotor.setRevSec(revSec)
self._sMotor.enableHold(False)
self._sMotor.move(angle, absolute = not relative)
absPos = self._sMotor.getAbsolutePosition()
self._uio.info("The absolute position of the motor is now {}°".format(absPos))
def setMode(self, mode):
"""@brief Set the step mode.
@param mode The mode (1,2,4,8,16 or 32)."""
if mode == 1:
modeStr = SMotor.MODE_1
elif mode == 2:
modeStr = SMotor.MODE_1_2
elif mode == 4:
modeStr = SMotor.MODE_1_4
elif mode == 8:
modeStr = SMotor.MODE_1_8
elif mode == 16:
modeStr = SMotor.MODE_1_16
elif mode == 32:
modeStr = SMotor.MODE_1_32
else:
raise Exception("{} is an invalid mode.".format(mode))
self._uio.info("Set Mode to {}.".format(modeStr))
self._sMotor.setStepMode(modeStr)
def on(self, enabled):
"""@param disable the motor hold current.
@param enabled IF True the motor current is on. If False the motor current is off and the motor can be moved manually."""
if enabled:
enabledS="ON"
else:
enabledS="OFF"
self._uio.info("Motor current: {}".format(enabledS))
self._sMotor.enableHold(not enabled)
if not enabled:
#This ensure the step pin stops
self._sMotor.stop()
def zero(self):
"""@brief Reset the reference/zero position to the current position."""
self._sMotor.resetRef()
self._uio.info("Set the current motor position to the zero/reference position.")
def stop(self):
"""@brief Stop the motor if running."""
self._sMotor.stop()
self._uio.info("The motor is now stopped.")
def runUntilStopped(self):
"""@brief Run the stepper motor until stopped."""
self._sMotor.setRevSec(self._options.speed)
self._sMotor.runUntilStopped()
self._uio.info("Running the stepper motor at {:.1f} revs/sec until the motor is stopped.".format(self._options.speed))
def main():
"""@brief Program entry point"""
uio = UIO()
try:
parser = argparse.ArgumentParser(description="Example interface to drive a stepper motor. Before running this tool run the 'sudo pigpiod -s 1' command on the Raspberry PI.",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-d", "--debug", action='store_true', help="Enable debugging.")
parser.add_argument("-a", "--angle", type=float, help="Set the angle to move the motor spindle. Set a -ve angle to reverse the motor direction (anti clockwise). If the -r option is not used then the angle set is an absolute angle with reference to the zero/reference position.", default=None)
parser.add_argument("-m", "--mode", type=int, help="The mode of the stepper motor. 1 = Full Step, 2 = 1/2 step, 4 = 1/4 step, 8 = 1/8 step, 16 = 1/16 step, 32 = 1/32 step (default=2).", default=2)
parser.add_argument("-s", "--speed", type=float, help="Set the speed of the motor in revolutions per second (default=1.0).", default=1.0)
parser.add_argument("-o", "--on", action='store_true', help="Turn the motor current on. The motor will hold it's position and can be moved.")
parser.add_argument("-f", "--off", action='store_true', help="Turn the motor current off. The motor will not draw power and can be manually moved.")
parser.add_argument("-r", "--relative", action='store_true', help="Turn relative to the current position. By default the absolute position of the motor spindle is set.")
parser.add_argument("-z", "--zero", action='store_true', help="Set the rzero/reference position of the motor to it's current position.")
parser.add_argument("-p", "--stop", action='store_true', help="Stop the motor if it is running. If this option is used then the absolute position of the motor is lost.")
parser.add_argument("-n", "--non_stop", action='store_true', help="Run the motor non stop. If this option is used then the absolute position of the motor is lost.")
options = parser.parse_args()
uio.enableDebug(options.debug)
stepIt = StepIt(uio, options)
stepIt.setMode(options.mode)
if options.stop:
stepIt.stop()
return
if options.on:
stepIt.on(True)
elif options.off:
stepIt.on(False)
if options.zero:
stepIt.zero()
if options.angle is not None:
stepIt.move(options.angle, options.speed, options.relative)
elif options.non_stop:
stepIt.runUntilStopped()
#If the program throws a system exit exception
except SystemExit:
pass
#Don't print error information if CTRL C pressed
except KeyboardInterrupt:
pass
except Exception as ex:
logTraceBack(uio)
raise
if options.debug:
raise
else:
uio.error(str(ex))
if __name__== '__main__':
main()
|
import datetime
from datetime import datetime
class Storage(object):
def __init__(self):
self.__data = {} # { 'default': {"key1":[ exp1, value1]}, 'domain2': {"key1":[ exp1, value1] }
def set(self, name, value, datatype, expiration=None, domain="default"):
try:
if domain not in self.__data:
self.__data.update({domain: {}})
self.__data[domain][name] = [value, datatype, expiration]
return "0"
except Exception as e:
return str(e)
def get(self, name, domain):
if domain in self.__data:
rec = self.__data[domain]
if name in rec:
# exp in self.__data[name][0]
if rec[name][2]: # exp is set
now = datetime.now().timestamp()
if now > float(rec[name][2]):
try:
del rec.remove[name] # remove from cache
except Exception as e:
pass
return None, None
return rec[name][0], rec[name][1] #tuple (value, datatype)
return None, None
def delete(self, name, domain):
try:
if domain not in self.__data:
return "Domain does not exists"
else:
if name not in self.__data[domain]:
return "Key does not exists"
del self.__data[domain][name]
return "0"
except Exception as e:
return str(e)
def reset(self):
self.__data = {}
return "0"
def stats(self, started, set_hit, get_hit, get_miss):
r = {'domains': {}, 'started': started, 'set_hit': set_hit, 'get_hit': set_hit, 'get_miss': get_miss}
for domain in self.__data:
r["domains"][domain] = {'keys': len(self.__data[domain])}
return str(r)
# if name in self.__data:
# # exp in self.__data[name][0]
# if self.__data[name][0]: # exp is set
# now = datetime.now().timestamp()
# if now > float(self.__data[name][0]):
# try:
# del self.__data.remove[name] # remove from cache
# except Exception as e:
# pass
# return None
# return self.__data[name][1]
# return None
def dump(self):
return self.__data
|
import argparse
from copy import copy
try:
import ujson as json
except ModuleNotFoundError:
import json
import kaggle_environments
import numpy as np
import os
from pathlib import Path
import pandas as pd
from scipy import stats
import tqdm
from typing import *
from hungry_geese.utils import STATE_TYPE, read_json
def process_replay_file(replay_dict: Dict, index_to_mmr: pd.Series) -> List[STATE_TYPE]:
env = kaggle_environments.make(
'hungry_geese',
configuration=replay_dict['configuration'],
steps=replay_dict['steps'],
info=replay_dict['info']
)
game_score = np.array([agent['reward'] for agent in env.steps[-1]])
agent_rankings = stats.rankdata(game_score, method='average') - 1.
for step_idx, step in enumerate(env.steps[:-1]):
for agent_idx, agent in enumerate(step):
agent['next_action'] = env.steps[step_idx + 1][agent_idx]['action']
agent['final_rank'] = agent_rankings[agent_idx]
agent['mmr'] = index_to_mmr[agent_idx].item()
return env.steps[:-1]
def batch_split_replay_files(
epagents_df: pd.DataFrame,
replay_paths_to_save: List[Path],
save_dir: Path,
force: bool,
delete: bool
) -> NoReturn:
all_replay_paths_to_save = copy(replay_paths_to_save)
saved_replay_names = []
if save_dir.exists():
assert save_dir.is_dir()
if (save_dir / 'all_processed_episodes.txt').exists() and not force:
already_processed = set()
if (save_dir / 'all_processed_episodes.txt').is_file():
with open(save_dir / 'all_processed_episodes.txt', 'r') as f:
already_processed.update([replay_name.rstrip() for replay_name in f.readlines()])
replay_paths_to_save = [rp for rp in replay_paths_to_save if rp.stem not in already_processed]
if (save_dir / 'all_saved_episodes.txt').is_file():
with open(save_dir / 'all_saved_episodes.txt', 'r') as f:
saved_replay_names.extend([replay_name.rstrip() for replay_name in f.readlines()])
else:
save_dir.mkdir()
step_counter = 0
print(f'Processing {len(replay_paths_to_save)} replays and saving output to {save_dir.absolute()}')
for rp in tqdm.tqdm(copy(replay_paths_to_save)):
try:
episode = process_replay_file(
read_json(rp),
epagents_df[epagents_df.EpisodeId == int(rp.stem)].set_index('Index').LatestScore
)
save_file_name = save_dir / (rp.stem + '.ljson')
if not save_file_name.exists() or force:
with open(save_file_name, 'w') as f:
f.writelines([json.dumps(step) + '\n' for step in episode])
step_counter += len(episode)
saved_replay_names.append(rp.stem)
else:
raise RuntimeError(f'Replay already exists and force is False: {(save_dir / rp.name)}')
except (kaggle_environments.errors.InvalidArgument, RuntimeError) as e:
print(f'Unable to save replay {rp.name}:')
replay_paths_to_save.remove(rp)
print(e)
except ValueError:
print(f'Unable to save empty or malformed replay {rp.name} - deleting')
all_replay_paths_to_save.remove(rp)
os.remove(rp)
all_replay_names = set([rp.stem for rp in all_replay_paths_to_save])
saved_replay_names = set(saved_replay_names)
if delete:
found_episodes = list(save_dir.glob('*.ljson'))
for ep_path in found_episodes:
if ep_path.stem not in all_replay_names:
os.remove(ep_path)
if ep_path.stem in saved_replay_names:
saved_replay_names.remove(ep_path.stem)
all_replay_names = sorted(list(all_replay_names), key=lambda rn: int(rn))
saved_replay_names = sorted(list(saved_replay_names), key=lambda rn: int(rn))
with open(save_dir / 'all_processed_episodes.txt', 'w') as f:
f.writelines([f'{rn}\n' for rn in all_replay_names])
with open(save_dir / 'all_saved_episodes.txt', 'w') as f:
f.writelines([f'{rn}\n' for rn in saved_replay_names])
print(f'Successfully saved {step_counter:,} steps from {len(replay_paths_to_save)} replays.')
print(f'{len(saved_replay_names)} out of {len(all_replay_names)} replays saved in total.')
def load_metadata(metadata_path: Path) -> pd.DataFrame:
# Load Episodes and EpisodeAgents, and filter for hungry-geese competition
episodes_df = pd.read_csv(metadata_path / 'Episodes.csv')
episodes_df = episodes_df[episodes_df.CompetitionId == 25401]
epagents_df = pd.read_csv(metadata_path / 'EpisodeAgents.csv')
epagents_df = epagents_df[epagents_df.EpisodeId.isin(episodes_df.Id)]
epagents_df.fillna(0, inplace=True)
epagents_df = epagents_df.sort_values(by=['Id'], ascending=False)
latest_scores_df = epagents_df.loc[epagents_df.groupby('SubmissionId').EpisodeId.idxmax(), :].sort_values(
by=['UpdatedScore'])
latest_scores_df['LatestScore'] = latest_scores_df.UpdatedScore
latest_scores_df = latest_scores_df[['SubmissionId', 'LatestScore']]
epagents_df = epagents_df.merge(latest_scores_df, left_on='SubmissionId', right_on='SubmissionId',
how='outer').sort_values(by=['LatestScore'])
return epagents_df
def select_episodes(epagents_df: pd.DataFrame, replay_paths: List[Path], threshold: float) -> List[Path]:
episode_min_scores = epagents_df.groupby('EpisodeId').LatestScore.min()
ep_to_score = episode_min_scores[episode_min_scores >= threshold].to_dict()
return [rp for rp in replay_paths if int(rp.stem) in ep_to_score.keys()]
def main() -> NoReturn:
parser = argparse.ArgumentParser(
description='Process a list of JSON replay files and creates a new file per step.'
)
parser.add_argument(
'save_dir',
type=Path,
help='Where to save the .pt output files'
)
parser.add_argument(
'replay_paths',
nargs='+',
type=Path,
help='A list of JSON replay file paths, or a relative glob pattern to match to find all replay paths'
)
parser.add_argument(
'-d',
'--delete',
action='store_true',
help='Including this option will delete episodes that no longer qualify for the given threshold'
)
parser.add_argument(
'-f',
'--force',
action='store_true',
help='Including this option will overwrite existing saved episodes'
)
parser.add_argument(
'-t',
'--threshold',
type=float,
default=None,
help='The minimum ELO threshold of agents to include an episode. Leave empty to process all episodes'
)
parser.add_argument(
'-m',
'--metadata_path',
type=Path,
default=Path('episode_scraping/metadata'),
help='The path to directory containing the EpisodeAgents and Episodes .csv files. '
'Default: episode_scraping/metadata'
)
args = parser.parse_args()
if len(args.replay_paths) == 1:
args.replay_paths = list(Path('.').glob(str(args.replay_paths[0])))
epagents_df = load_metadata(args.metadata_path)
args.save_dir.mkdir(exist_ok=True)
if args.threshold is not None:
selected_replay_paths = select_episodes(epagents_df, args.replay_paths, args.threshold)
else:
selected_replay_paths = args.replay_paths
batch_split_replay_files(epagents_df, selected_replay_paths, args.save_dir, args.force, args.delete)
if __name__ == '__main__':
main()
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from fitness.accounts.managers import FitnessUserManager
class FitnessUser(AbstractBaseUser, PermissionsMixin):
is_staff = models.BooleanField(
default=False,
)
email = models.EmailField(
unique=True,
)
USERNAME_FIELD = 'email'
objects = FitnessUserManager()
first_name = models.CharField(
max_length=15,
blank=True,
)
last_name = models.CharField(
max_length=15,
blank=True,
)
profile_image = models.ImageField(
upload_to='profiles',
blank=True,
)
|
"""Image utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import io
import matplotlib.pyplot as plt
import numpy as np
import PIL
import PIL.ExifTags
import scipy.ndimage as ndimage
import tensorflow as tf
face_cascade = cv2.CascadeClassifier(
"data/opencv/haarcascade_frontalface_alt.xml")
def normalize_translation_and_scale(labels):
center = labels.mean(axis=-2, keepdims=True)
labels_c = labels - center
norm = np.linalg.norm(labels_c, axis=-1, keepdims=True)
scale = norm.mean(axis=-2, keepdims=True)
labels_cs = labels_c / scale
return labels_cs, center, scale
def crop_image(image, labels, bbox):
"""Crop an annotated image."""
image_size = np.array([image.shape[1], image.shape[0]])
bbox = np.array(bbox, dtype=np.int32)
tl = bbox[0:2]
br = bbox[0:2] + bbox[2:4]
if np.any(tl < 0) or np.any(br > image_size - 1):
pad = np.maximum(
np.maximum(-tl, 0),
np.maximum(br - image_size + 1, 0))
image = np.pad(image, ((pad[1], pad[1]),
(pad[0], pad[0]),
(0, 0)), "constant")
labels += pad[np.newaxis, :]
tl += pad
br += pad
image = image[tl[1]:br[1], tl[0]:br[0], :]
labels -= tl[np.newaxis, :]
return image, labels
def compact_crop(image, labels, margin=0):
image = np.array(image)
labels = np.array(labels).astype(np.int32)
minimum = np.amin(labels, axis=0)
maximum = np.amax(labels, axis=0)
center = ((minimum + maximum) / 2).astype(np.int32)
half_size = np.amax((1 + margin) * (maximum - minimum) / 2).astype(np.int32)
tl = center - half_size
bbox = tl[0], tl[1], half_size * 2, half_size * 2
image, labels = crop_image(image, labels, bbox)
return image, labels
def rotate_landmarks(labels, center, angle):
labels = np.array(labels, dtype=np.float32)
center = np.array(center, dtype=np.float32)
transform = np.array([
[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
labels -= center[np.newaxis]
labels = np.dot(labels, transform)
labels += center[np.newaxis]
return labels
def rotate_image(image, labels, angle):
h, w = image.shape[0], image.shape[1]
image = ndimage.rotate(image, np.degrees(angle), reshape=False)
labels = rotate_landmarks(labels, [w / 2, h / 2], angle)
return image, labels
def align_to_shape(image, labels, target, extend=1.0, rotate=False):
image = np.array(image)
labels = np.array(labels).astype(np.float32)
target = np.array(target).astype(np.float32)
m, _ = cv2.estimateAffinePartial2D(labels, target)
image_t = cv2.warpAffine(image, m, (128, 128))
labels_t = np.dot(labels, m[:,:2].T) + m[np.newaxis,:,2]
return image_t, labels_t
def scale_image(image, labels, size):
"""Scale an annotated image."""
image = PIL.Image.fromarray(image)
zoom = np.array([size, size], dtype=np.float32) / np.array(image.size, np.float32)
image = image.resize([size, size], resample=PIL.Image.ANTIALIAS)
image = np.array(image)
labels = labels * zoom
return image, labels
def overlap(bbox1, bbox2):
"""Compute the overlap beween two boxes."""
tl = np.maximum(bbox1[0:2], bbox2[0:2])
br = np.minimum(bbox1[0:2] + bbox1[2:4], bbox2[0:2] + bbox2[2:4])
size = br - tl
ratio = float(np.prod(size)) / np.prod(bbox1[2:4])
return ratio
def detect_face(image, labels, min_overlap=0.8):
"""Detect face."""
cv2.ocl.setUseOpenCL(False)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100))
center = labels.mean(axis=0).astype(np.int)
size = np.int((labels.max(axis=0) - labels.min(axis=0)).mean())
size = np.array([size, size])
pos = center - size // 2
init_face = np.concatenate((pos, size))
sel_face = init_face
detected = False
for face in faces:
cur_overlap = overlap(init_face, face)
if cur_overlap > min_overlap:
sel_face = face
min_overlap = cur_overlap
detected = True
print(["Not detected", "Detected"][detected])
return sel_face.astype(np.int)
def encode_image(data, format="png"):
"""Encodes a numpy array to string."""
im = PIL.Image.fromarray(data)
buf = io.BytesIO()
data = im.save(buf, format=format)
buf.seek(0)
return buf.getvalue()
def decode_image(fp, force_rgb=True):
"""Decode the given image to a numpy array."""
im = PIL.Image.open(fp)
# correct rotation
orientation_key = 0x0112
if hasattr(im, "_getexif") and im._getexif():
orientation = im._getexif().get(orientation_key, 0)
if orientation == 3:
im = im.rotate(180, expand=True)
elif orientation == 6:
im = im.rotate(270, expand=True)
elif orientation == 8:
im = im.rotate(90, expand=True)
if force_rgb:
im = im.convert(mode='RGB')
im = np.array(im)
return im
def visualize(image, labels):
"""Visualize image."""
plt.figure(figsize=(16, 12))
plt.imshow(image)
plt.plot(labels[:, 0], labels[:, 1], ".")
plt.axis("off")
plt.show()
|
import cv2
import random
class RandomHorizontallyFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, label):
if random.random() < self.p:
image = cv2.flip(image, 1)
label = cv2.flip(label, 1)
return image, label
class RandomVerticallyFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, label):
if random.random() < self.p:
image = cv2.flip(image, 0)
label = cv2.flip(label, 0)
return image, label |
'''Additional CFG algorithms.'''
from util.digraph import Digraph
from table import first_sets
def has_empty_rules(grammar):
'''Return whether a grammar has e-productions.'''
for rule in grammar.productions:
if not rule.right_side:
return True
return False
def is_left_recursive(grammar):
'''Return whether a grammar is left-recursive.'''
firsts, nullable = first_sets(grammar)
G = Digraph()
for rule in grammar.productions:
for Xi in rule.right_side:
if Xi.is_nonterminal():
G.add_edge(rule.left_side, Xi)
if Xi not in nullable:
break
else:
break
return G.cyclic()
def is_cyclic(grammar):
'''Return whether a grammar has a cycle.'''
firsts, nullable = first_sets(grammar)
G = Digraph()
for rule in grammar.productions:
for i, Xi in enumerate(rule.right_side):
if Xi.is_nonterminal():
if all(
map(
lambda x: x in nullable,
rule.right_side[:i-1] + rule.right_side[i+1:])):
G.add_edge(rule.left_side, Xi)
else:
break
return G.cyclic()
|
import socketserver
from video_reader import base_server
from video_reader import net_sock_handler
def video_net_socket_reader_server(data_pipe_w, ack_pipe_r, semaphore, port=9526):
return VideoPacketNetSocketReaderServer(
"video packet net socket reading server", data_pipe_w, ack_pipe_r, semaphore, port)
class _VideoPacketNetSocketServer(socketserver.ForkingMixIn, socketserver.TCPServer):
def __init__(self, data_pipe_w, ack_pipe_r, semaphore,
server_address, request_handler_class, bind_and_activate=True):
super(_VideoPacketNetSocketServer, self).__init__(server_address, request_handler_class, bind_and_activate)
self.data_pipe_w = data_pipe_w
self.ack_pipe_r = ack_pipe_r
self._semaphore = semaphore
self._semaphore_acquired = False
def verify_request(self, request, client_address):
self._semaphore_acquired = self._semaphore.acquire(block=False)
if self._semaphore_acquired:
print("a video packet TCP client connected, client = %s:%d" % client_address)
return True
else:
print("WARN: reject a connecting due to video packet TCP client max connections limit, "
"client = %s:%d" % client_address)
return False
def shutdown_request(self, request):
super(_VideoPacketNetSocketServer, self).close_request(request)
if self._semaphore_acquired:
self._semaphore.release()
print("a video packet TCP client disconnected")
else:
print("a video packet TCP client rejected")
class VideoPacketNetSocketReaderServer(base_server.Server):
def __init__(self, name, data_pipe_w, ack_pipe_r, semaphore, port=9526):
super(VideoPacketNetSocketReaderServer, self).__init__(name)
if data_pipe_w is None:
raise Exception("data write pipe connection is None")
if ack_pipe_r is None:
raise Exception("ack read pipe connection is None")
self._server = _VideoPacketNetSocketServer(data_pipe_w, ack_pipe_r, semaphore,
("0.0.0.0", port), net_sock_handler.VideoPacketNetSocketHandler,
bind_and_activate=False)
def serve(self):
try:
self._server.server_bind()
self._server.server_activate()
self._server.serve_forever()
except Exception as e:
print("failed to read video packet: %s" % str(e))
def stop(self):
super(VideoPacketNetSocketReaderServer, self).stop()
self._server.shutdown()
def release(self):
super(VideoPacketNetSocketReaderServer, self).release()
self._server.server_close()
|
# coding: utf-8
from caty import UNDEFINED
from caty.core.schema import *
from caty.core.script.builder import CommandCombinator
from caty.core.script.interpreter import BaseInterpreter
from caty.core.script.node import *
from caty.core.std.command.builtin import Void
from caty.core.typeinterface import TreeCursor
from caty.core.command.usage import MiniDumper
import caty.jsontools as json
def hook(f):
def _(*args, **kwds):
r = f(*args, **kwds)
print f, args[1:], r
return r
return _
def dump(s):
return MiniDumper().visit(s)
class ScriptAnnotation(BaseInterpreter):
u"""内部形式にコンパイルされたCatyスクリプトに対し、型情報の注釈を付けていく。
"""
def __remove_marker(self, s):
return s.replace('/*', '').replace('*/', '')
def __walk_options(self, node):
vl = node.var_loader
r = []
for o in vl.opts:
if o.type == 'option':
if o.value == UNDEFINED:
r.append('--%s' % o.key)
else:
r.append('--%s=%s' % (o.key, json.pp(o.value)))
elif o.type == 'var':
r.append('--%s=%s' % (o.key, o.value.name))
elif o.type == 'glob':
r.append('%--*')
else:
if o.optional:
r.append('%--%s?' % o.key)
else:
r.append('%--%s' % o.key)
r.append(' ')
for a in vl.args:
if a.type == 'arg':
r.append(json.pp(a.value))
elif a.type == 'iarg':
r.append('%' + str(a.index))
elif a.type == 'glob':
r.append('%#')
else:
r.append('%' + a.key)
r.append(' ')
return r
def __get_canonical_name(self, node):
if node.profile_container.module and node.profile_container.module.name != 'builtin':
name = '{0}:{1}'.format(node.profile_container.module.name, node.name)
else:
name = node.name
params = []
for p in node.profile_container.type_params:
params.append(p.var_name)
if not params:
return name
else:
return name+'<%s>' % (', '.join(params))
def visit(self, node):
return node.accept(self)
def visit_command(self, node):
i = dump(node.in_schema)
o = dump(node.out_schema)
return ['/* %s */' % i, self.__get_canonical_name(node), '/* %s */' % o]
def visit_script(self, node):
return node.script.accept(self)
def visit_pipe(self, node):
a = node.bf.accept(self)
b = node.af.accept(self)
return a + [' | '] + b
def visit_discard_pipe(self, node):
a = node.bf.accept(self)
b = node.af.accept(self)
return a + [' ;\n '] + b
def visit_scalar(self, node):
return [json.pp(node.value)]
def visit_list(self, node):
r = ['[']
i = []
o = []
for n in node:
x = n.accept(self)
if x[0].startswith('/*'):
i.append(self.__remove_marker(x.pop(0)))
if x[-1].startswith('/*'):
o.append(self.__remove_marker(x.pop(-1)))
else:
o.extend(map(lambda a:self.__remove_marker(a), x))
r.extend(x)
r.append(', ')
if r:
r.pop(-1)
if i:
r.insert(0, '/* %s */' % ('&'.join(i)))
r.append(']')
if o:
r.append('/* [%s] */' % (', '.join(o)))
return r
def visit_object(self, node):
r = ['{']
i = []
o = {}
for k, n in node.items():
x = n.accept(self)
if x[0].startswith('/*'):
i.append(self.__remove_marker(x.pop(0)))
if x[-1].startswith('/*'):
o[k] = self.__remove_marker(x.pop(-1))
else:
o[k] = (map(lambda a:self.__remove_marker(a), x))
r.extend(x)
r.append(', ')
if r:
r.pop(-1)
if i:
r.insert(0, '/* %s */' % ('&'.join(i)))
r.append('}')
if o:
_ = []
for k, v in o.items():
_.append('%s: %s' % (k, v))
r.append('/* {%s} */' % (', '.join(_)))
return r
def visit_varstore(self, node):
return [' > ', node.var_name]
def visit_varref(self, node):
if node.optional:
return ['%' + node.var_name + '?']
else:
return ['%' + node.var_name]
def visit_argref(self, node):
if node.optional:
return ['%' + str(node.arg_num) + '?']
else:
return ['%' + str(node.arg_num)]
def visit_when(self, node):
r = ['when']
r.extend(self.__walk_options(node))
r.append('{')
i = []
o = []
for c in node.cases.values():
if c.tag != '*':
i.append('@' + c.tag)
r.append(c.tag)
r.append('=>')
x = c.accept(self)
r.extend(x)
if x[-1].startswith('/*'):
o.append(self.__remove_marker(x[-1]))
else:
o.append(x[-1])
r.append('}')
if i:
r.insert(0, '/* %s */' % ('&'.join(i)))
if o:
r.append('/* [%s] */' % (', '.join(o)))
return r
def visit_binarytag(self, node):
r = node.command.accept(self)
if r[-1].startswith('/*'):
r[-1] = '/* @%s %s */' % (node.tag, self.__remove_marker(r[-1]))
else:
r[-1] = '/* @%s %s */' % (node.tag, r[-1])
return r
def visit_unarytag(self, node):
return ['@' + node.tag]
def visit_each(self, node):
r = ['each']
r.extend(self.__walk_options(node))
r.append('{')
x = node.cmd.accept(self)
if x[0].startswith('/*'):
r.insert(0, '/* [%s*] */' % self.__remove_marker(x[0]))
x.pop(0)
r.extend(x)
r.append('}')
if x[-1].startswith('/*'):
r.append('/* [%s*] */' % self.__remove_marker(x[-1]))
x.pop(-1)
else:
r.append('/* [%s*] */' % x[-1])
return r
def visit_time(self, node):
raise NotImplementedError(u'{0}#visit_time'.format(self.__class__.__name__))
def visit_take(self, node):
raise NotImplementedError(u'{0}#visit_take'.format(self.__class__.__name__))
def visit_start(self, node):
raise NotImplementedError(u'{0}#visit_start'.format(self.__class__.__name__))
def visit_case(self, node):
raise NotImplementedError(u'{0}#visit_case'.format(self.__class__.__name__))
def visit_json_path(self, node):
raise NotImplementedError(u'{0}#visit_json_path'.format(self.__class__.__name__))
|
from django.contrib import admin
from .forms import TagLayoutAdminForm
from .models import TagLayout
# Register your models here.
@admin.register(TagLayout)
class TagLayoutAdmin(admin.ModelAdmin):
form = TagLayoutAdminForm
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from nav_msgs.msg import Odometry
from tf2_msgs.msg import TFMessage
from geometry_msgs.msg import TransformStamped
pub = None
tf_static_list = TFMessage()
def callback(datas):
for data in datas.transforms:
tf_static_list.transforms.append(data)
def callbackPub(datas):
global pub, tf_static_list
for tf in tf_static_list.transforms:
tf.header.stamp = datas.transforms[0].header.stamp
pub.publish(tf_static_list)
def listener():
global pub
rospy.init_node('tf_static_keeper', anonymous=True)
pub = rospy.Publisher('tf_static', TFMessage, queue_size=10)
rospy.Subscriber("tf_static", TFMessage, callback)
rospy.Subscriber("tf", TFMessage, callbackPub)
rospy.spin()
if __name__ == '__main__':
listener() |
from ecomplexity.ecomplexity import ecomplexity
from ecomplexity.proximity import proximity
name = "ecomplexity"
|
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if matrix == []: return 0
a = [0 for i in xrange(len(matrix[0]))]; maxArea = 0
for i in xrange(len(matrix)):
for j in xrange(len(matrix[0])):
a[j] = a[j] + 1 if matrix[i][j] == '1' else 0
maxArea = max(maxArea, self.largestRectangleArea(a))
return maxArea
def largestRectangleArea(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
stack = []
i = 0
maxArea = 0
h = heights + [0]
h_length = len(h)
while i < h_length:
if (not stack) or h[stack[-1]] < h[i]:
stack.append(i)
i += 1
else:
t = stack.pop()
maxArea = max(maxArea, h[t] * (i if not stack else i - stack[-1] - 1))
return maxArea |
from flask import Flask, request, jsonify
import requests
app = Flask(__name__)
@app.route('/products/all')
def get_all_products():
print("Listing all the products")
response = requests.get('http://database-service:9090/products')
return jsonify(response.json())
@app.route('/products/')
def get_matching_products():
response = requests.get('http://database-service:9090/products?search={0}'.format(request.args.get('find','')))
return jsonify(response.json())
if __name__ == '__main__':
app.run(host='0.0.0.0',port=80,debug=True) |
def extended_gcd(a, b):
if a == 0:
return (b, 0, 1)
g, y, x = extended_gcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = extended_gcd(a % m, m)
if g != 1:
raise Exception('Element has no inverse')
return x % m
|
from __future__ import absolute_import # No implicit relative imports
from __future__ import print_function
from __future__ import division
from builtins import range
from rain.module_two import function_two
GLOBAL_VARIABLE_THREE = 3
def function_three():
return function_two(1)
|
import wx
from vistas.ui.project import SceneNode
class FlythroughSceneSelector(wx.Dialog):
""" A Dialog for choosing a Scene to create a Flythrough for. """
def __init__(self, scene_list: [SceneNode], parent, id):
super().__init__(parent, id, "Select Flythrough Scene", style=wx.CAPTION | wx.STAY_ON_TOP)
self.CenterOnParent()
main_panel = wx.Panel(self)
text = wx.StaticText(main_panel, wx.ID_ANY, "Select a scene to create a flythrough for:")
self.choice = wx.Choice(main_panel, wx.ID_ANY, size=wx.Size(200, -1))
for scene in scene_list:
self.choice.Append(scene.label)
self.choice.SetSelection(0)
main_sizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(main_sizer)
main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
main_panel.SetSizer(main_panel_sizer)
main_panel_sizer.Add(text)
main_panel_sizer.Add(self.choice, 0, wx.EXPAND)
main_sizer.Add(main_panel, 1, wx.EXPAND | wx.ALL, 5)
main_sizer.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL), 0, wx.EXPAND, wx.ALL, 5)
self.Fit()
def GetSceneChoice(self):
return self.choice.GetSelection()
|
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title="Panel Example")
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour (wx.BLACK)
self.button1 = wx.Button(self.panel, label="50, 50)")
self.button1.SetPosition((50, 50))
self.button2 = wx.Button(self.panel, label="(250, 100)")
self.button2.SetPosition((250, 100))
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
|
class Utils(object):
''' This class contains helpers to use in tests '''
def __init__(self):
pass
ADMIN = {
'firstname': 'Luda',
'lastname': 'one',
'othername': 'politic',
'email': 'politician@app.com',
'phone_number': '254726094972',
'passportUrl' : 'some/url/pass.jpg',
'address': '123 fith street',
'password': 'password',
'is_admin': True,
'is_politician': False
}
POLITICIAN = {
'firstname': 'Donald',
'lastname': 'Duck',
'othername': 'quack',
'email': 'admin@app.com',
'phone_number': '254726094972',
'passportUrl' : 'some/url/pass.jpg',
'home_county': 'Rongai',
'password': 'password',
'is_admin': False,
'is_politician': True
}
USER = {
'firstname': 'Luda',
'lastname': 'one',
'othername': 'luqee',
'email': 'user@app.com',
'phone_number': '254726094972',
'passportUrl' : 'some/url/pass.jpg',
'password': 'password',
'is_admin': False,
'is_politician': False
}
PARTIES = [
{
'name': 'Chama Kikuu',
'hq_address': '564 Lon Road',
'logo_url': 'url/to/log.jpg',
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elitsed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
'name': 'Party 2',
'hq_address': '786 city square',
'logo_url': 'url/to/logo.jpg',
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitatio'
},
]
OFFICES = [
{
'name': 'President',
'office_type': 'State',
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitatio'
},
{
'name': 'Women Rep',
'office_type': 'Legislative',
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitatio'
},
{
'name': 'Chief',
'office_type': 'Local Government',
'description': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitatio'
},
]
def register_user(self, client, user_type):
''' This method registers a specific user '''
if user_type == 'admin':
client.post('api/v1/auth/user/register', json=self.ADMIN)
elif user_type == 'politician':
client.post('api/v1/auth/user/register', json=self.POLITICIAN)
else:
client.post('api/v1/auth/user/register', json=self.USER)
def login_user(self, client, user_type):
''' This method logs in a specific user '''
data = {}
if user_type == 'admin':
data['email'] = self.ADMIN['email']
data['password'] = self.ADMIN['password']
elif user_type == 'politician':
data['email'] = self.POLITICIAN['email']
data['password'] = self.POLITICIAN['password']
else:
data['email'] = self.USER['email']
data['password'] = self.USER['password']
return client.post('api/v1/auth/user/login', json=data)
def create_party(self, client, party, headers):
''' This method creates a party '''
client.post('api/v1/parties', json=party, headers=headers)
def create_office(self, client, office, headers):
''' This method creates an office '''
client.post('api/v1/offices', json=office, headers=headers)
def create_parties(self, client, headers):
''' This method creates multiple parties '''
for party in self.PARTIES:
client.post('api/v1/parties', json=party, headers=headers)
def create_offices(self, client, headers):
''' This method creates multiple offices '''
for office in self.OFFICES:
client.post('api/v1/offices', json=office, headers=headers)
|
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix or not matrix[0]:
return []
left, right, top, down = 0, len(matrix[0])-1, 0, len(matrix)-1
rst = []
while right > left and down > top:
for col in range(left, right+1, 1):
rst.append(matrix[top][col])
top += 1
for row in range(top, down+1, 1):
rst.append(matrix[row][right])
right -= 1
for col in range(right, left-1,-1):
rst.append(matrix[down][col])
down -= 1
for row in range(down,top-1,-1):
rst.append(matrix[row][left])
left += 1
if right == left:
for row in range(top, down+1, 1):
rst.append(matrix[row][right])
elif top == down:
for col in range(left, right+1, 1):
rst.append(matrix[top][col])
return rst |
import numpy as np
from Utilities.ComputeSimilarityPython import ComputeSimilarityPython
class ItemBasedCollaborativeFilteringRecommender(object):
def __init__(self, URM):
self.URM = URM
def fit(self, topK=150, shrink=8, normalize=True):
similarity_object = ComputeSimilarityPython(self.URM, shrink=shrink, topK=topK, normalize=normalize)
self.W_sparse = similarity_object.compute_similarity()
def recommend(self, playlist_id, at=None, exclude_seen=True):
# compute the scores using the dot product
playlist_profile = self.URM[playlist_id]
scores = playlist_profile.dot(self.W_sparse).toarray().ravel()
if exclude_seen:
scores = self.filter_seen(playlist_id, scores)
# rank items
ranking = scores.argsort()[::-1]
return ranking[:at]
def get_scores(self, playlist_id):
playlist_profile = self.URM[playlist_id]
scores = playlist_profile.dot(self.W_sparse).toarray().ravel()
maximum = np.amax(scores)
normalized_scores = np.true_divide(scores, maximum)
return normalized_scores
def filter_seen(self, playlist_id, scores):
start_pos = self.URM.indptr[playlist_id]
end_pos = self.URM.indptr[playlist_id + 1]
playlist_profile = self.URM.indices[start_pos:end_pos]
scores[playlist_profile] = -np.inf
return scores
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from unittest import skip
from .base import FunctionalTest
import sys
class NewVisitorTest(FunctionalTest):
def test_can_start_a_list_and_retrieve_it_later(self):
# 민수는 멋진 작업 목록 온라인 앱이 나왔다는 소식을 듣고
# 해당 웹사이트로 이동한다
self.browser.get(self.server_url)
# 웹페이지 타이틀과 헤더가 'To-Do'를 표시하고 있다
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# 그는 바로 작업을 추가하기로 한다
inputbox = self.get_item_input_box()
self.assertEqual(
inputbox.get_attribute('placeholder'),
'작업 아이템 입력'
)
#"공작깃털 사기"라고 텍스트 상자에 입력한다
#(민수의 취미는 날치 잡이용 그물을 만들어 내는 것이다)
inputbox.send_keys('공작깃털 사기')
#엔터키를 치면 페이지가 갱신되고 작업목록에
#"1:공작깃털 사기" 아이템이 추가된다
inputbox.send_keys(Keys.ENTER)
minsoo_list_url = self.browser.current_url
self.assertRegex(minsoo_list_url, '/lists/.+')
self.check_for_row_in_list_table('1: 공작깃털 사기')
#추가 아이템을 입력할 수 있는 여분의 텍스트 상자가 존재한다
#다시 "공작깃털을 이용해서 그물 만들기"라고 입력한다(민수는 매우 체계적인 사람이다)
inputbox = self.get_item_input_box()
inputbox.send_keys('공작깃털을 이용해서 그물 만들기')
inputbox.send_keys(Keys.ENTER)
#페이지는 다시 갱신되고, 두 개 아이템이 목록에 보인다
self.check_for_row_in_list_table('1: 공작깃털 사기')
self.check_for_row_in_list_table('2: 공작깃털을 이용해서 그물 만들기')
#새로운 사용자인 남의현이 접속한다.
##새로운 브라우저 세션을 이용해서 민수의 정보가
##쿠키를 통해 유입되는 것을 방지한다.
self.browser.quit()
self.browser = webdriver.Firefox()
##남의현이 브라우저에 접속한다.
##민수의 리스트는 보이지 않는다.
self.browser.get(self.server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('공작깃털 사기', page_text)
self.assertNotIn('그물 만들기', page_text)
## 남의현이 새로운 작업 아이템을 입력하기 시작한다.
## 그녀는 민수보다 재미가 없다
inputbox = self.get_item_input_box()
inputbox.send_keys('우유 사기')
inputbox.send_keys(Keys.ENTER)
#남의현이 전용 URL을 취득한다
euihyun_list_url = self.browser.current_url
self.assertRegex(euihyun_list_url, '/lists/.+')
self.assertNotEqual(euihyun_list_url, minsoo_list_url)
#민수가 입력한 흔적이 없다는 것을 다시 확인한다.
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('공작깃털 사기', page_text)
self.assertNotIn('그물 만들기', page_text)
|
from pyrunner import tasks
from pyrunner.misc.types import TaskMetadata
TASKS = (
TaskMetadata(name="task reloader", interval=5, task_type=tasks.TaskReloaderTask),
TaskMetadata(name="ynet downloader", interval=10, task_type=tasks.URLDownloaderTask,
execute_kwargs={"url": "http://ynet.co.il", "output_path": "/usr/src/app/ynet.html"})
)
|
#to run
from pyvision.misc.NeuralStyleTransfer import NeuralStyle
__PREFIX__ = "pyvision/misc/NeuralStyleTransfer/Examples/"
#provide the paths to the two images
style_img, content_img = (__PREFIX__+'images/style1.jpg', __PREFIX__+'images/content2.jpg')
#if you do not wish to use gpu, pass use_gpu=False as a parameter, i.e., nst=Neural_Style(num_steps=300, use_gpu=False)
nst = NeuralStyle(num_steps=300, retain_dims=False)
#call the function to run neural style transfer
output, time = nst.run_style_transfer(style_img, content_img)
print("time taken: ", time)
|
from autorecon.plugins import PortScan
from autorecon.config import config
import re
class AllTCPPortScan(PortScan):
def __init__(self):
super().__init__()
self.name = 'All TCP Ports'
self.description = 'Performs an Nmap scan of all TCP ports.'
self.type = 'tcp'
self.specific_ports = True
self.tags = ['default', 'default-port-scan', 'long']
async def run(self, target):
if config['proxychains']:
traceroute_os = ''
else:
traceroute_os = ' -A --osscan-guess'
if target.ports:
if target.ports['tcp']:
process, stdout, stderr = await target.execute('nmap {nmap_extra} -sV -sC --version-all' + traceroute_os + ' -p ' + target.ports['tcp'] + ' -oN "{scandir}/_full_tcp_nmap.txt" -oX "{scandir}/xml/_full_tcp_nmap.xml" {address}', blocking=False)
else:
return []
else:
process, stdout, stderr = await target.execute('nmap {nmap_extra} -sV -sC --version-all' + traceroute_os + ' -p- -oN "{scandir}/_full_tcp_nmap.txt" -oX "{scandir}/xml/_full_tcp_nmap.xml" {address}', blocking=False)
services = []
while True:
line = await stdout.readline()
if line is not None:
match = re.search('^Discovered open port ([0-9]+)/tcp', line)
if match:
target.info('Discovered open port {bmagenta}tcp/' + match.group(1) + '{rst} on {byellow}' + target.address + '{rst}', verbosity=1)
service = target.extract_service(line)
if service:
services.append(service)
else:
break
await process.wait()
return services
|
import math
import time
import torch
from tqdm import tqdm as tqdm_wrap
from marshmallow.exceptions import ValidationError
from torch.utils.data import DataLoader
from patter.config import EvaluatorConfiguration
from patter.data import audio_seq_collate_fn
from patter.decoder import DecoderFactory, GreedyCTCDecoder
from patter.util import AverageMeter, TranscriptionError, split_targets
class Evaluator(object):
def __init__(self, cfg, tqdm=False, verbose=False, out_stream=None):
self.cfg = cfg
self.cuda = cfg['cuda']
self.tqdm = tqdm
self.verbose = verbose
self.out_stream = out_stream
def eval(self, model, corpus):
test_loader = DataLoader(corpus, num_workers=self.cfg['num_workers'], collate_fn=audio_seq_collate_fn,
pin_memory=self.cuda, batch_size=self.cfg['batch_size'])
if self.cuda:
model = model.cuda()
decoder = DecoderFactory.create(self.cfg['decoder'], model.labels, blank_index=model.blank_index)
return validate(test_loader, model, decoder=decoder, tqdm=self.tqdm, verbose=self.verbose, out_stream=self.out_stream)
@classmethod
def load(cls, evaluator_config, tqdm=False, verbose=False, out_stream=None):
try:
cfg = EvaluatorConfiguration().load(evaluator_config)
if len(cfg.errors) > 0:
raise ValidationError(cfg.errors)
except ValidationError as err:
raise err
return cls(cfg.data, tqdm=tqdm, verbose=verbose, out_stream=out_stream)
def validate(val_loader, model, decoder=None, tqdm=True, training=False, log_n_examples=0, verbose=False, out_stream=None):
labels = model.module.labels if type(model) == torch.nn.DataParallel else model.labels
blank_index = model.module.blank_index if type(model) == torch.nn.DataParallel else model.blank_index
target_decoder = GreedyCTCDecoder(labels, blank_index=blank_index)
if decoder is None:
decoder = target_decoder
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
loader = tqdm_wrap(val_loader, desc="Validate", leave=not training) if tqdm else val_loader
end = time.time()
err = TranscriptionError()
examples = []
for i, data in enumerate(loader):
err_inst, example = validate_batch(i, data, model, decoder, target_decoder, verbose=verbose, losses=losses if training else None, out_stream=out_stream)
err += err_inst
if len(examples) < log_n_examples:
examples.append(example)
# measure time taken
batch_time.update(time.time() - end)
end = time.time()
if training:
return err, losses.avg, examples
if len(examples) > 0:
return err, examples
return err
def validate_batch(i, data, model, decoder, target_decoder, verbose=False, losses=None, out_stream=None):
loss_fn = model.module.loss if type(model) == torch.nn.DataParallel else model.loss
is_cuda = model.module.is_cuda if type(model) == torch.nn.DataParallel else model.is_cuda
# create variables
feat, target, feat_len, target_len, paths = data
with torch.no_grad():
if is_cuda:
feat = feat.cuda()
target = target.cpu()
feat_len = feat_len.cpu()
target_len = target_len.cpu()
# compute output
output, output_len = model(feat, feat_len)
output_len = output_len.cpu()
if losses is not None:
mb_loss = loss_fn(output, target, output_len, target_len)
avg_loss = mb_loss.detach().sum() / feat.size(0) # average the loss by minibatch
inf = math.inf
if avg_loss == inf or avg_loss == -inf:
print("WARNING: received an inf loss, setting loss value to 0")
avg_loss = 0
losses.update(avg_loss, feat.size(0))
# do the decode
decoded_output, _, _ = decoder.decode(output, output_len)
target_strings = target_decoder.convert_to_strings(split_targets(target, target_len))
example = (decoded_output[0][0], target_strings[0][0])
err = TranscriptionError()
for x in range(len(target_strings)):
transcript, reference = decoded_output[x][0], target_strings[x][0]
err_inst = TranscriptionError.calculate(transcript, reference)
err += err_inst
if verbose:
print("Path:", paths[x])
print("Ref:", reference.lower())
print("Hyp:", transcript.lower())
print("WER:", err_inst.wer, "CER:", err_inst.cer, "\n")
if out_stream is not None:
out_stream.write(paths[x] + "," + transcript + "\n")
del output
del output_len
return err, example
|
class Product:
def __init__(self, name="", price=0.0, discountPercent=0):
self.name = name
self.price = price
self.discountPercent = discountPercent
def getDiscountAmount(self):
return self.price * self.discountPercent / 100
def getDiscountPrice(self):
return self.price - self.getDiscountAmount()
def getDescription(self):
return self.name
class Media(Product):
def __init__(self, name="", price=0.0, discountPercent=0, format=""):
self.format = format
Product.__init__(self, name, price, discountPercent)
# def getDiscription(self):
# return Product.getDescription(self)
class Book(Media):
def __init__(self, name="", price=0.0, discountPercent=0, author="", format="Hardcover"):
self.author = author
Media.__init__(self, name, price, discountPercent, format)
# Book("The Big Short", 15.95, 34, "Michael Lewis", "Ebook")) # ,
def getDescription(self):
return Media.getDescription(self) + " by " + self.author
class Album(Media):
def __init__(self, name="", price=0.0, discountPercent=0, author="", format="cassette"):
Media.__init__(self, name, price, discountPercent, format)
self.author = author
def getDescription(self):
return Media.getDescription(self) + " by " + self.author
class Movie(Media):
def __init__(self, name="", price=0.0, discountPercent=0, year=0, format="DVD"):
Media.__init__(self, name, price, discountPercent, format)
self.year = year
def getDescription(self):
return Media.getDescription(self) + " (" + str(self.year) + ")"
|
"""
This assumes data is generated by format_tercom.py
"""
import sys
import parse_pra_xml
import codecs
def read_file(file_path):
with codecs.open(file_path, 'r', "utf-8") as fid:
return [line.rstrip().split() for line in fid.readlines()]
assert len(sys.argv[:1]) == 1, "Expected one argument"
(
in_tercom_xml, in_mt_original, in_pe_original, out_align
) = sys.argv[1:]
# Read original files
mt_original = read_file(in_mt_original)
pe_original = read_file(in_pe_original)
# Parse tercom HTML
mt_tokens, pe_tokens, edits, hters = parse_pra_xml.parse_file(in_tercom_xml)
# Sanity check: Original and tercom files match in number of tokens
# Note that we will not use the tokenized tercom outputs only the alignments
nr_sentences = len(mt_tokens)
for index in range(nr_sentences):
assert len(mt_original[index]) == len([x for x in mt_tokens[index] if x]), \
"Lengths do not match"
assert len(pe_original[index]) == len([x for x in pe_tokens[index] if x]), \
"Lengths do not match"
edit_alignments = []
for sent_index, sent_edits in enumerate(edits):
pe_original_index = 0
mt_original_index = 0
edit_alignments_sent = []
sent_edit_actions = []
for edit in sent_edits:
# Store edit action
sent_edit_actions.append(edit.o)
if edit.o == 'C':
# Sanity check
# NOTE: Tercom ignores unless -s is used
if (
mt_original[sent_index][mt_original_index].lower() !=
pe_original[sent_index][pe_original_index].lower()
):
raise Exception("Reading Tercom xml failed")
edit_alignments_sent.append("-".join([
str(pe_original_index),
str(mt_original_index)
]))
pe_original_index += 1
mt_original_index += 1
elif edit.o == 'S':
edit_alignments_sent.append("-".join([
str(pe_original_index),
str(mt_original_index)
]))
pe_original_index += 1
mt_original_index += 1
elif edit.o == 'I':
edit_alignments_sent.append("-".join([str(pe_original_index), '']))
pe_original_index += 1
elif edit.o == 'D':
edit_alignments_sent.append("-".join(['', str(mt_original_index)]))
mt_original_index += 1
else:
raise Exception("Uknown edit %s" % edit.o)
edit_alignments.append(edit_alignments_sent)
with codecs.open(out_align, 'w', 'utf-8') as fid:
for sent_edits in edit_alignments:
fid.write("%s\n" % (" ".join(sent_edits)))
|
from django.db import models
class Student(models.Model):
"""Database model for students"""
name = models.CharField(max_length=100)
roll_number = models.IntegerField(unique=True)
date_of_birth = models.DateField()
class Mark(models.Model):
"""Database model for marks"""
mark = models.FloatField()
student = models.OneToOneField(Student, on_delete=models.CASCADE)
|
# Generated by Django 3.2.4 on 2021-06-26 19:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0004_survey_name_pl'),
]
operations = [
migrations.AddField(
model_name='answer',
name='text_en',
field=models.CharField(max_length=16, null=True, unique=True, verbose_name='text'),
),
migrations.AddField(
model_name='answer',
name='text_pl',
field=models.CharField(max_length=16, null=True, unique=True, verbose_name='text'),
),
migrations.AddField(
model_name='question',
name='text_en',
field=models.CharField(max_length=64, null=True, unique=True, verbose_name='text'),
),
migrations.AddField(
model_name='question',
name='text_pl',
field=models.CharField(max_length=64, null=True, unique=True, verbose_name='text'),
),
]
|
class BasicEqualityMixin(object):
"""
Provides equals and not-equals methods based on the
member variables of the object being equal. Can be
inherited by any class where only those variables
are considered for equality
"""
def __eq__(self, other):
#note: I'm not checking for the class to be the same
#b/c I don't want to make assumptions about how
#subclasses will work.
is_eq = (self.__dict__ == other.__dict__)
return is_eq
def __ne__(self, other):
return not self.__eq__(other)
class BasicPrintMixin(object):
"""
Provides simple str and repr methods to a class based on whatever the
member variables of the class are
"""
def __repr__(self):
"""unambigous/exact string"""
cname = self.__class__.__name__
s = cname + '::' + str(self.__dict__)
return s
def __str__(self):
"""human readable string"""
return repr(self)
class BasicPopoMixin(BasicEqualityMixin, BasicPrintMixin):
"""
Inherits the individual Basic*Mixin classes for a class that
can be treated as a data container (plain old python object: POPO)
"""
pass
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 10/25/2004
#
#------------------------------------------------------------------------------
""" Defines the HTML help templates used for formatting Traits UI help pages.
"""
#-------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------
from __future__ import absolute_import
from traits.api import HasStrictTraits, Str
#-------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------
# Default HTML for a single Item's help window
ItemHTML = """
<HTML>
<BODY BGCOLOR="#E8E5D4">
<TABLE CELLPADDING="0">
<TR>
<TD BGCOLOR="#000000">
<TABLE CELLSPACING = "1">
<TR>
<TD WIDTH="20%%" VALIGN="TOP" BGCOLOR="#9DB8F4"><B>%s</B></TD>
<TD WIDTH="80%%" VALIGN="TOP" BGCOLOR="#C1D2F9">%s</TD>
</TR>
</TABLE>
</TD>
</TR>
</TABLE>
</BODY>
</HTML>"""
# Default HTML for a complete Group's help window
GroupHTML = """
<HTML>
<BODY BGCOLOR="#E8E5D4">%s
<TABLE CELLPADDING="0">
<TR>
<TD BGCOLOR="#000000">
<TABLE CELLSPACING="1">%s</TABLE>
</TD>
</TR>
</TABLE>
</BODY>
</HTML>"""
# Default HTML for a single Item within a Group
ItemHelp = """
<TR>
<TD WIDTH="20%%" VALIGN="TOP" BGCOLOR="#9DB8F4"><B>%s</B>:</TD>
<TD WIDTH="80%%" VALIGN="TOP" BGCOLOR="#C1D2F9">%s</TD>
</TR>"""
# Default HTML for formatting a Group's 'help' trait
GroupHelp = """
<TABLE WIDTH="100%%" CELLPADDING="0">
<TR>
<TD BGCOLOR="#000000">
<TABLE CELLSPACING="1">
<TR>
<TD BGCOLOR="#CDCDB6">%s</TD>
</TR>
</TABLE>
</TD>
</TR>
</TABLE>"""
#-------------------------------------------------------------------------
# 'HelpTemplate' class:
#-------------------------------------------------------------------------
class HelpTemplate(HasStrictTraits):
""" Contains HTML templates for displaying help.
"""
item_html = Str(ItemHTML) # Item popup help window HTML document
group_html = Str(GroupHTML) # Group help window HTML document
item_help = Str(ItemHelp) # Single group item HTML
group_help = Str(GroupHelp) # Group level help HTML
no_group_help = Str('') # Missing group level help HTML
#-------------------------------------------------------------------------
# Gets/Sets the current HelpTemplate in use:
#-------------------------------------------------------------------------
_help_template = HelpTemplate()
def help_template(template=None):
""" Gets or sets the current HelpTemplate in use.
"""
global _help_template
if template is not None:
_help_template = template
return _help_template
|
"""
Represents an invitation message for establishing connection.
"""
from datetime import datetime, timezone
from typing import Union
from marshmallow import fields
from ...agent_message import AgentMessage, AgentMessageSchema
from ..message_types import BASIC_MESSAGE
HANDLER_CLASS = (
"indy_catalyst_agent.messaging.basicmessage."
+ "handlers.basicmessage_handler.BasicMessageHandler"
)
class BasicMessage(AgentMessage):
class Meta:
handler_class = HANDLER_CLASS
message_type = BASIC_MESSAGE
schema_class = "BasicMessageSchema"
def __init__(
self, *, sent_time: Union[str, datetime] = None, content: str = None, **kwargs
):
super(BasicMessage, self).__init__(**kwargs)
if not sent_time:
sent_time = datetime.utcnow()
if isinstance(sent_time, datetime):
sent_time = sent_time.replace(tzinfo=timezone.utc).isoformat(" ")
self.sent_time = sent_time
self.content = content
class BasicMessageSchema(AgentMessageSchema):
class Meta:
model_class = BasicMessage
sent_time = fields.Str(required=False)
content = fields.Str(required=True)
|
import argparse
from deimos import __version__
from multiprocessing import cpu_count
from pkg_resources import resource_filename
from snakemake import snakemake
def main():
parser = argparse.ArgumentParser(description='DEIMoS: Data Extraction for Integrated Multidimensional Spectrometry')
parser.add_argument('-v', '--version', action='version', version=__version__, help='print version and exit')
parser.add_argument('--config', metavar='PATH', default='config.yaml', help='path to yaml configuration file')
parser.add_argument('--dryrun', action='store_true', help='perform a dry run')
parser.add_argument('--unlock', action='store_true', help='unlock directory')
parser.add_argument('--touch', action='store_true', help='touch output files only')
parser.add_argument('--latency', metavar='N', type=int, default=3, help='specify filesystem latency (seconds)')
parser.add_argument('--cores', metavar='N', type=int, default=cpu_count(),
help='number of cores used for execution (local execution only)')
parser.add_argument('--count', metavar='N', type=int,
help='number of files to process (limits DAG size)')
parser.add_argument('--start', metavar='IDX', type=int, default=0,
help='starting file index (for use with --count)')
# cluster-specific options
clust = parser.add_argument_group('cluster arguments')
clust.add_argument('--cluster', metavar='PATH', help='path to cluster execution yaml configuration file')
clust.add_argument('--jobs', metavar='N', type=int, default=1000,
help='number of simultaneous jobs to submit to a slurm queue')
# parse args
args = parser.parse_args()
# start/stop config
if args.count is not None:
config = {'start': args.start, 'stop': args.start + args.count}
else:
config = {}
# cluster config
if args.cluster is not None:
cluster = "sbatch -A {cluster.account} -N {cluster.nodes} -t {cluster.time} -J {cluster.name} --ntasks-per-node {cluster.ntasks} -p {cluster.partition}"
else:
cluster = None
snakemake(resource_filename('workflows', 'default.smk'),
configfiles=[args.config],
config=config,
cluster_config=args.cluster,
cluster=cluster,
keepgoing=True,
force_incomplete=True,
cores=args.cores,
nodes=args.jobs,
dryrun=args.dryrun,
unlock=args.unlock,
touch=args.touch,
latency_wait=args.latency)
if __name__ == '__main__':
main()
|
from utils.api import UsernameSerializer, serializers
from .models import Course, Practice
from account.models import User
from problem.serializers import ProblemSerializer
class CourseSerializer(serializers.ModelSerializer):
created_by = UsernameSerializer()
problems = ProblemSerializer(many=True)
class Meta:
model = Course
fields = "__all__"
class CreateCourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = ['title', 'description']
class EditCourseSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
class Meta:
model = Course
fields = ['id', 'title', 'description']
class PracticeSerializer(serializers.ModelSerializer):
created_by = UsernameSerializer()
problems = ProblemSerializer(many=True)
class Meta:
model = Practice
fields = "__all__"
class CreatePracticeSerializer(serializers.ModelSerializer):
class Meta:
model = Practice
fields = ['title', 'description']
class EditPracticeSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
class Meta:
model = Practice
fields = ['id', 'title', 'description']
class ParticipantSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = "__all__" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from sciplot.skeleton import fib
__author__ = "Simon Wehle"
__copyright__ = "Simon Wehle"
__license__ = "mit"
def test_fib():
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
|
"""Run offline learning experiments."""
import functools
import os
from typing import Iterator, Tuple
from absl import app
from absl import flags
from absl import logging
from acme import types
from acme import wrappers
import gym
import jax
from ml_collections import config_flags
import numpy as np
import wandb
import yaml
from magi.projects.baselines import dataset_utils
config_flags.DEFINE_config_file("config")
flags.DEFINE_string("workdir", None, "Where to save log results")
flags.mark_flags_as_required(["config", "workdir"])
FLAGS = flags.FLAGS
def evaluate(actor, environment, eval_episodes=10):
actor.update(wait=True)
avg_reward = 0.0
for _ in range(eval_episodes):
timestep = environment.reset()
actor.observe_first(timestep)
while not timestep.last():
action = actor.select_action(timestep.observation)
timestep = environment.step(action)
actor.observe(action, timestep)
avg_reward += timestep.reward
avg_reward /= eval_episodes
d4rl_score = environment.get_normalized_score(avg_reward)
logging.info("---------------------------------------")
logging.info("Evaluation over %d episodes: %.3f", eval_episodes, d4rl_score)
logging.info("---------------------------------------")
return d4rl_score
def normalize(dataset):
trajs = dataset_utils.split_into_trajectories(
dataset.observations,
dataset.actions,
dataset.rewards,
dataset.masks,
dataset.dones_float,
dataset.next_observations,
)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def _make_dataset_iterator(dataset, batch_size: int):
while True:
batch = dataset.sample(batch_size)
yield batch
def make_env_and_dataset(
env_name: str, seed: int, batch_size: int
) -> Tuple[gym.Env, Iterator[types.Transition]]:
env = gym.make(env_name)
env.seed(seed)
env = wrappers.wrap_all(
env,
[
wrappers.GymWrapper,
wrappers.SinglePrecisionWrapper,
],
)
dataset = dataset_utils.D4RLDataset(env)
if "antmaze" in env_name:
dataset.rewards = (dataset.rewards - 0.5) * 4.0
elif "halfcheetah" in env_name or "walker2d" in env_name or "hopper" in env_name:
normalize(dataset)
return env, _make_dataset_iterator(dataset, batch_size)
def run(main):
jax.config.config_with_absl()
app.run(functools.partial(_run_main, main=main))
def _run_main(argv, *, main):
del argv
# Create working directory
os.makedirs(FLAGS.workdir, exist_ok=True)
# Fix global numpy random seed
np.random.seed(FLAGS.config.seed)
# Initialilize wandb if needed
if FLAGS.config.log_to_wandb:
wandb.init(config=FLAGS.config.to_dict(), dir=FLAGS.workdir)
# Save configuration
with open(os.path.join(FLAGS.workdir, "config.yaml"), "wt") as f:
yaml.dump(FLAGS.config.to_dict(), f)
main(config=FLAGS.config, workdir=FLAGS.workdir)
|
#!/usr/bin/env python
# Get the segments by calling GCProfile
#
# Author: Bingxin Lu
# Affiliation : National University of Singapore
# E-mail : bingxin@comp.nus.edu.sg
#
# Input:
# The genome sequence in FASTA format
#
# Output:
# The positions for a set of intervals
#
import os
import optparse
import subprocess
def GetGenomeSize(genomefile):
firstLine = open(genomefile).readline()
assert ('>' in firstLine), "This is not a standard FASTA file!"
genomesize = 0
with open(genomefile, 'r') as fin:
# skip the first line
fin.next()
for line in fin:
genomesize += len(line.strip())
return genomesize
def Segment(tool, genomefile, halt=50, minsize=5000):
Tag = 1
Start = 0
Seg_Points = []
Primary_Seg_Invals = []
subprocess.call([tool + 'GCProfile', genomefile, '-t', str(halt), '-i', str(minsize)])
Seg_File = genomefile.split('.')[0] + '_SegPoints.html'
for Seg_Line in open(Seg_File):
if '<TR align="center">' in Seg_Line:
for Seg_Lines in Seg_Line.split('<TR align="center">')[1:]:
Seg_Points.append(int(Seg_Lines.split('<div align="center">')[2].split('</div></td>')[0]))
for Seg_Point in Seg_Points:
if Tag != len(Seg_Points):
Primary_Seg_Invals.append((Start + 1, Seg_Point))
Start = Seg_Point
else:
Primary_Seg_Invals.append((Start + 1, Seg_Point))
Start = Seg_Point
Primary_Seg_Invals.append((Start + 1, GetGenomeSize(genomefile)))
Tag += 1
return Primary_Seg_Invals
def WriteSegments(intervals, outfile):
fout = open(outfile,'w')
for start, end in intervals:
line='%s\t%s\n' % (start, end)
fout.write(line)
fout.close()
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-f", "--fastafile", dest="fastafile", help="input fasta file of genome sequence")
parser.add_option("-p", "--progdir", dest="progdir", help="the directory containing GCProfile")
parser.add_option("-o", "--output", dest="output", help="output file for the merged intervals")
parser.add_option("-t", "--halt", dest="halt", type="int", default=50, help="the halting parameter for segmentation")
parser.add_option("-m", "--minl", dest="minl", type="int", default=5000, help="the minimum length of segments")
options, args = parser.parse_args()
intervals= Segment(options.progdir, options.fastafile, options.halt, options.minl)
WriteSegments(intervals, options.output)
|
from datetime import datetime, timezone
from typing import Optional
from pydantic import BaseModel
class Schedule(BaseModel):
"""名前と時間を含んだスケジュール"""
id: Optional[int]
name: str
datetime_start: datetime
datetime_end: datetime
createdAt: Optional[datetime]
def createdAtToLocal(self) -> Optional[datetime]:
"""DBに保存されているUTCの時間をJSTに変換する"""
if self.createdAt is None:
return None
else:
return self.createdAt.replace(tzinfo=timezone.utc).astimezone()
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Loss.
Cells of loss function. Loss function in machine learning is the target of the model.
It shows how well the model works on a dataset and the optimization target which the optimizer is searching.
"""
from .loss import L1Loss, MSELoss, SmoothL1Loss, \
SoftmaxCrossEntropyWithLogits, BCELoss, CosineEmbeddingLoss
__all__ = ['L1Loss', 'MSELoss', 'SmoothL1Loss',
'SoftmaxCrossEntropyWithLogits', 'BCELoss',
'CosineEmbeddingLoss']
|
import numpy as np
from collections import deque
from PIL import Image
import gym
from gym import spaces
import sys
import time
import cv2
from copy import copy
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4, max_over=2):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=max_over)
self._skip = skip
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class ClipNegativeRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
if reward >= 0:
return reward
else:
return 0
class NoRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
return 0
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, res=84):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.res = res
self.observation_space = spaces.Box(low=0, high=255, shape=(self.res, self.res, 1))
def _observation(self, obs):
frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.uint8)
return frame.reshape((self.res, self.res, 1)) / 255.0
class ResizeFrame(gym.ObservationWrapper):
def __init__(self, env, res=40):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.res = res
self.observation_space = spaces.Box(low=0, high=1, shape=(self.res, self.res, 1))
def _observation(self, obs):
frame = obs.astype("float32") * 255
frame = frame[:, :, 0]
# frame = np.concatenate([frame, frame, frame], axis=2)
# frame = np.dot(frame, np.array([0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.uint8)
return frame.reshape((self.res, self.res, 1)) / 255.0
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Buffer observations and stack across channels (last axis)."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
assert shp[2] == 1 # can only stack 1-channel frames
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k))
def _reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k): self.frames.append(ob)
return self._observation()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._observation(), reward, done, info
def _observation(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
class GreyscaleRender(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.latest_obs = None
def _step(self, action):
stuff = self.env.step(action)
self.latest_obs = stuff[0]
return stuff
def render(self, mode="human", close=False):
if mode == "human":
self.unwrapped._render(mode=mode, close=close)
else:
# print(self.unwrapped)
# grid = self.env._observation()
grid = self.latest_obs
grid = grid[:, :, -1]
grid = np.stack([grid for _ in range(3)], axis=2)
return grid * 255
def wrap_maze(env):
# Change the size of the maze to be (40, 40)
env = ResizeFrame(env, res=40)
env = FrameStack(env, 1)
env = GreyscaleRender(env)
print("Wrapping maze to be (40, 40)")
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, stack=4):
"""Configure environment for DeepMind-style Atari.
Note: this does not include frame stacking!"""
assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip
if episode_life:
env = EpisodicLifeEnv(env)
# env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, res=42)
if clip_rewards:
env = ClipRewardEnv(env)
if stack > 1:
env = FrameStack(env, 4)
print("Wrapping environment with Deepmind-style setttings but 42 x 42.")
env = GreyscaleRender(env)
return env
resolutions = ['160x120', '200x125', '200x150', '256x144', '256x160', '256x192', '320x180', '320x200',
'320x240', '320x256', '400x225', '400x250', '400x300', '512x288', '512x320', '512x384',
'640x360', '640x400', '640x480', '800x450', '800x500', '800x600', '1024x576', '1024x640',
'1024x768', '1280x720', '1280x800', '1280x960', '1280x1024', '1400x787', '1400x875',
'1400x1050', '1600x900', '1600x1000', '1600x1200', '1920x1080']
__all__ = ['SetResolution']
def SetResolution(target_resolution):
class SetResolutionWrapper(gym.Wrapper):
"""
Doom wrapper to change screen resolution
"""
def __init__(self, env):
super(SetResolutionWrapper, self).__init__(env)
if target_resolution not in resolutions:
raise gym.error.Error(
'Error - The specified resolution "{}" is not supported by Vizdoom.'.format(target_resolution))
parts = target_resolution.lower().split('x')
width = int(parts[0])
height = int(parts[1])
screen_res = target_resolution
self.screen_width, self.screen_height, self.unwrapped.screen_resolution = width, height, screen_res
self.unwrapped.observation_space = gym.spaces.Box(low=0, high=255,
shape=(self.screen_height, self.screen_width, 3))
self.observation_space = self.unwrapped.observation_space
return SetResolutionWrapper
# Adapters
from gym.spaces import Discrete, MultiDiscrete
class DiscreteToMultiDiscrete(Discrete):
"""
Adapter that adapts the MultiDiscrete action space to a Discrete action space of any size
The converted action can be retrieved by calling the adapter with the discrete action
discrete_to_multi_discrete = DiscreteToMultiDiscrete(multi_discrete)
discrete_action = discrete_to_multi_discrete.sample()
multi_discrete_action = discrete_to_multi_discrete(discrete_action)
It can be initialized using 3 configurations:
Configuration 1) - DiscreteToMultiDiscrete(multi_discrete) [2nd param is empty]
Would adapt to a Discrete action space of size (1 + nb of discrete in MultiDiscrete)
where
0 returns NOOP [ 0, 0, 0, ...]
1 returns max for the first discrete space [max, 0, 0, ...]
2 returns max for the second discrete space [ 0, max, 0, ...]
etc.
Configuration 2) - DiscreteToMultiDiscrete(multi_discrete, list_of_discrete) [2nd param is a list]
Would adapt to a Discrete action space of size (1 + nb of items in list_of_discrete)
e.g.
if list_of_discrete = [0, 2]
0 returns NOOP [ 0, 0, 0, ...]
1 returns max for first discrete in list [max, 0, 0, ...]
2 returns max for second discrete in list [ 0, 0, max, ...]
etc.
Configuration 3) - DiscreteToMultiDiscrete(multi_discrete, discrete_mapping) [2nd param is a dict]
Would adapt to a Discrete action space of size (nb_keys in discrete_mapping)
where discrete_mapping is a dictionnary in the format { discrete_key: multi_discrete_mapping }
e.g. for the Nintendo Game Controller [ [0,4], [0,1], [0,1] ] a possible mapping might be;
mapping = {
0: [0, 0, 0], # NOOP
1: [1, 0, 0], # Up
2: [3, 0, 0], # Down
3: [2, 0, 0], # Right
4: [2, 1, 0], # Right + A
5: [2, 0, 1], # Right + B
6: [2, 1, 1], # Right + A + B
7: [4, 0, 0], # Left
8: [4, 1, 0], # Left + A
9: [4, 0, 1], # Left + B
10: [4, 1, 1], # Left + A + B
11: [0, 1, 0], # A only
12: [0, 0, 1], # B only,
13: [0, 1, 1], # A + B
}
"""
def __init__(self, multi_discrete, options=None):
# assert isinstance(multi_discrete, MultiDiscrete)
self.multi_discrete = multi_discrete
self.num_discrete_space = self.multi_discrete.n
# Config 1
if options is None:
self.n = self.num_discrete_space + 1 # +1 for NOOP at beginning
self.mapping = {i: [0] * self.num_discrete_space for i in range(self.n)}
for i in range(self.num_discrete_space):
self.mapping[i + 1][i] = self.multi_discrete.high[i]
# Config 2
elif isinstance(options, list):
assert len(options) <= self.num_discrete_space
self.n = len(options) + 1 # +1 for NOOP at beginning
self.mapping = {i: [0] * self.num_discrete_space for i in range(self.n)}
for i, disc_num in enumerate(options):
assert disc_num < self.num_discrete_space
self.mapping[i + 1][disc_num] = self.multi_discrete.high[disc_num]
# Config 3
elif isinstance(options, dict):
self.n = len(options.keys())
self.mapping = options
# for i, key in enumerate(options.keys()):
# if i != key:
# raise Error('DiscreteToMultiDiscrete must contain ordered keys. ' \
# 'Item {0} should have a key of "{0}", but key "{1}" found instead.'.format(i, key))
# if not self.multi_discrete.contains(options[key]):
# raise Error('DiscreteToMultiDiscrete mapping for key {0} is ' \
# 'not contained in the underlying MultiDiscrete action space. ' \
# 'Invalid mapping: {1}'.format(key, options[key]))
# Unknown parameter provided
else:
raise Error('DiscreteToMultiDiscrete - Invalid parameter provided.')
def __call__(self, discrete_action):
return self.mapping[discrete_action]
# Discrete Action Wrapper
# Constants
NUM_ACTIONS = 43
ALLOWED_ACTIONS = [
[0, 10, 11], # 0 - Basic
[0, 10, 11, 13, 14, 15], # 1 - Corridor
[0, 14, 15], # 2 - DefendCenter
[0, 14, 15], # 3 - DefendLine
[13, 14, 15], # 4 - HealthGathering
[13, 14, 15], # 5 - MyWayHome
[0, 14, 15], # 6 - PredictPosition
[10, 11], # 7 - TakeCover
[x for x in range(NUM_ACTIONS) if x != 33], # 8 - Deathmatch
[13, 14, 15], # 9 - MyWayHomeFixed
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15], # 10 - MyWayHomeFixed15
[13, 14, 15]
]
def ToDiscrete(config):
# Config can be 'minimal', 'constant-7', 'constant-17', 'full'
class ToDiscreteWrapper(gym.Wrapper):
"""
Doom wrapper to convert MultiDiscrete action space to Discrete
config:
- minimal - Will only use the levels' allowed actions (+ NOOP)
- constant-7 - Will use the 7 minimum actions (+NOOP) to complete all levels
- constant-17 - Will use the 17 most common actions (+NOOP) to complete all levels
- full - Will use all available actions (+ NOOP)
list of commands:
- minimal:
Basic: NOOP, ATTACK, MOVE_RIGHT, MOVE_LEFT
Corridor: NOOP, ATTACK, MOVE_RIGHT, MOVE_LEFT, MOVE_FORWARD, TURN_RIGHT, TURN_LEFT
DefendCenter NOOP, ATTACK, TURN_RIGHT, TURN_LEFT
DefendLine: NOOP, ATTACK, TURN_RIGHT, TURN_LEFT
HealthGathering: NOOP, MOVE_FORWARD, TURN_RIGHT, TURN_LEFT
MyWayHome: NOOP, MOVE_FORWARD, TURN_RIGHT, TURN_LEFT
PredictPosition: NOOP, ATTACK, TURN_RIGHT, TURN_LEFT
TakeCover: NOOP, MOVE_RIGHT, MOVE_LEFT
Deathmatch: NOOP, ALL COMMANDS (Deltas are limited to [0,1] range and will not work properly)
- constant-7: NOOP, ATTACK, MOVE_RIGHT, MOVE_LEFT, MOVE_FORWARD, TURN_RIGHT, TURN_LEFT, SELECT_NEXT_WEAPON
- constant-17: NOOP, ATTACK, JUMP, CROUCH, TURN180, RELOAD, SPEED, STRAFE, MOVE_RIGHT, MOVE_LEFT, MOVE_BACKWARD
MOVE_FORWARD, TURN_RIGHT, TURN_LEFT, LOOK_UP, LOOK_DOWN, SELECT_NEXT_WEAPON, SELECT_PREV_WEAPON
"""
def __init__(self, env):
super(ToDiscreteWrapper, self).__init__(env)
if config == 'minimal':
allowed_actions = ALLOWED_ACTIONS[self.unwrapped.level]
elif config == 'constant-7':
allowed_actions = [0, 10, 11, 13, 14, 15, 31]
elif config == 'constant-17':
allowed_actions = [0, 2, 3, 4, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 31, 32]
elif config == 'full':
allowed_actions = None
else:
raise gym.error.Error(
'Invalid configuration. Valid options are "minimal", "constant-7", "constant-17", "full"')
self.action_space = DiscreteToMultiDiscrete(self.action_space, allowed_actions)
def _step(self, action):
return self.env._step(self.action_space(action))
return ToDiscreteWrapper
def wrap_vizdoom(env, stack=4, action_repeat=4):
# resolution_wrapper = SetResolution("160x120")
# env = resolution_wrapper(env)
if action_repeat > 1:
env = MaxAndSkipEnv(env, skip=action_repeat, max_over=1)
env = WarpFrame2(env, res=42)
env = ClipNegativeRewardEnv(env)
# env = FrameStack(env, 4)
# env = GreyscaleRender(env)
return env
def ToDiscreteMario():
class ToDiscreteWrapper(gym.Wrapper):
"""
Wrapper to convert MultiDiscrete action space to Discrete
Only supports one config, which maps to the most logical discrete space possible
"""
def __init__(self, env):
super(ToDiscreteWrapper, self).__init__(env)
mapping = {
0: [0, 0, 0, 0, 0, 0], # NOOP
1: [1, 0, 0, 0, 0, 0], # Up
2: [0, 0, 1, 0, 0, 0], # Down
3: [0, 1, 0, 0, 0, 0], # Left
4: [0, 1, 0, 0, 1, 0], # Left + A
5: [0, 1, 0, 0, 0, 1], # Left + B
6: [0, 1, 0, 0, 1, 1], # Left + A + B
7: [0, 0, 0, 1, 0, 0], # Right
8: [0, 0, 0, 1, 1, 0], # Right + A
9: [0, 0, 0, 1, 0, 1], # Right + B
10: [0, 0, 0, 1, 1, 1], # Right + A + B
11: [0, 0, 0, 0, 1, 0], # A
12: [0, 0, 0, 0, 0, 1], # B
13: [0, 0, 0, 0, 1, 1], # A + B
}
self.action_space = DiscreteToMultiDiscrete(self.action_space, mapping)
def _step(self, action):
return self.env.step(self.action_space(action))
return ToDiscreteWrapper
# FROm https://github.com/openai/large-scale-curiosity/blob/master/wrappers.py
import itertools
class LimitedDiscreteActions(gym.ActionWrapper):
KNOWN_BUTTONS = {"A", "B"}
KNOWN_SHOULDERS = {"L", "R"}
'''
Reproduces the action space from curiosity paper.
'''
def __init__(self, env, all_buttons, whitelist=KNOWN_BUTTONS | KNOWN_SHOULDERS):
gym.ActionWrapper.__init__(self, env)
self._num_buttons = len(all_buttons)
button_keys = {i for i in range(len(all_buttons)) if all_buttons[i] in whitelist & self.KNOWN_BUTTONS}
buttons = [(), *zip(button_keys), *itertools.combinations(button_keys, 2)]
shoulder_keys = {i for i in range(len(all_buttons)) if all_buttons[i] in whitelist & self.KNOWN_SHOULDERS}
shoulders = [(), *zip(shoulder_keys), *itertools.permutations(shoulder_keys, 2)]
arrows = [(), (4,), (5,), (6,), (7,)] # (), up, down, left, right
acts = []
acts += arrows
acts += buttons[1:]
acts += [a + b for a in arrows[-2:] for b in buttons[1:]]
self._actions = acts
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a):
mask = np.zeros(self._num_buttons)
for i in self._actions[a]:
mask[i] = 1
return mask
# From https://github.com/pathak22/noreward-rl/blob/master/src/env_wrapper.py
class MarioEnv(gym.Wrapper):
def __init__(self, env=None, tilesEnv=False):
"""Reset mario environment without actually restarting fceux everytime.
This speeds up unrolling by approximately 10 times.
"""
super(MarioEnv, self).__init__(env)
self.resetCount = -1
# reward is distance travelled. So normalize it with total distance
# https://github.com/ppaquette/gym-super-mario/blob/master/ppaquette_gym_super_mario/lua/super-mario-bros.lua
# However, we will not use this reward at all. It is only for completion.
self.maxDistance = 3000.0
self.tilesEnv = tilesEnv
def _reset(self):
if self.resetCount < 0:
print('\nDoing hard mario fceux reset (4 seconds wait) !')
sys.stdout.flush()
self.env.reset()
time.sleep(4)
obs, _, _, info = self.env.step(7) # take right once to start game
if info.get('ignore', False): # assuming this happens only in beginning
self.resetCount = -1
self.env.close()
return self._reset()
self.resetCount = info.get('iteration', -1)
if self.tilesEnv:
return obs
return obs[24:-12, 8:-8, :]
def _step(self, action):
obs, reward, done, info = self.env.step(action)
if info.get('ignore', True):
return self.reset(), 0, False, info
# print('info:', info)
done = info['iteration'] > self.resetCount
reward = float(reward) / self.maxDistance # note: we do not use this rewards at all.
if self.tilesEnv:
return obs, reward, done, info
return obs[24:-12, 8:-8, :], reward, done, info
def _close(self):
self.resetCount = -1
return self.env.close()
def wrap_mario(env, stack=4, buttons=None):
# env = MarioEnv(env)
# buttons = env.BUTTONS
env = MaxAndSkipEnv(env, skip=4, max_over=1)
env = WarpFrame(env, res=42)
# env = FrameStack(env, 4)
env = GreyscaleRender(env)
# discrete_action_wrapper = ToDiscreteMario()
env = LimitedDiscreteActions(env, buttons)
# env = discrete_action_wrapper(env)
# No reward
env = NoRewardEnv(env)
print("Wrapping mario env")
return env
class MontezumaInfoWrapper(gym.Wrapper):
def __init__(self, env, room_address):
super(MontezumaInfoWrapper, self).__init__(env)
self.room_address = room_address
self.visited_rooms = set()
def get_current_room(self):
ram = unwrap(self.env).ale.getRAM()
assert len(ram) == 128
return int(ram[self.room_address])
def step(self, action):
obs, rew, done, info = self.env.step(action)
self.visited_rooms.add(self.get_current_room())
if done:
if 'episode' not in info:
info['episode'] = {}
info['episode'].update(visited_rooms=self.visited_rooms.copy())
self.visited_rooms.clear()
return obs, rew, done, info
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
super(StickyActionEnv, self).__init__(env)
self.p = p
self.last_action = 0
def reset(self):
self.last_action = 0
return self.env.reset()
def step(self, action):
if self.unwrapped.np_random.uniform() < self.p:
action = self.last_action
self.last_action = action
obs, reward, done, info = self.env.step(action)
return obs, reward, done, info
def unwrap(env):
if hasattr(env, "unwrapped"):
return env.unwrapped
elif hasattr(env, "env"):
return unwrap(env.env)
elif hasattr(env, "leg_env"):
return unwrap(env.leg_env)
else:
return env
class WarpFrame2(gym.ObservationWrapper):
def __init__(self, env, res=84):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = res
self.height = res
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
def make_montezuma(env, max_episode_steps=4500):
# env = gym.make(env_id)
env._max_episode_steps = max_episode_steps * 4
assert 'NoFrameskip' in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
env = MontezumaInfoWrapper(env, room_address=3)
env = WarpFrame2(env)
return env
def make_montezuma_ram(env, max_episode_steps=4500):
# env = gym.make(env_id)
env._max_episode_steps = max_episode_steps * 4
assert 'NoFrameskip' in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
env = MontezumaInfoWrapper(env, room_address=3)
# env = WarpFrame2(env)
return env
def make_atari(env, max_episode_steps=4500):
# env = gym.make(env_id)
env._max_episode_steps = max_episode_steps * 4
assert 'NoFrameskip' in env.spec.id
env = StickyActionEnv(env)
env = MaxAndSkipEnv(env, skip=4)
# env = MontezumaInfoWrapper(env, room_address=3)
env = WarpFrame2(env)
return env
|
__all__ = ["encoder", "mldecoder", "rltrain", "crf"]
|
# %%
import numpy as np
from rrt_planner.manipulation_station_collision_checker import \
ManipulationStationCollisionChecker
from rrt_planner.robot import Range, ConfigurationSpace
from rrt_planner.rrt_planning import Problem
class IiwaProblem(Problem):
def __init__(self,
q_start: np.array,
q_goal: np.array,
gripper_setpoint: float,
left_door_angle: float,
right_door_angle: float,
is_visualizing=False):
self.gripper_setpoint = gripper_setpoint
self.left_door_angle = left_door_angle
self.right_door_angle = right_door_angle
self.is_visualizing = is_visualizing
self.collision_checker = ManipulationStationCollisionChecker(
is_visualizing=is_visualizing)
# Construct configuration space for IIWA.
plant = self.collision_checker.plant
nq = 7
joint_limits = np.zeros((nq, 2))
for i in range(nq):
joint = plant.GetJointByName("iiwa_joint_%i" % (i + 1))
joint_limits[i, 0] = joint.position_lower_limits()
joint_limits[i, 1] = joint.position_upper_limits()
range_list = []
for joint_limit in joint_limits:
range_list.append(Range(joint_limit[0], joint_limit[1]))
def l2_distance(q: tuple):
sum = 0
for q_i in q:
sum += q_i**2
return np.sqrt(sum)
max_steps = nq * [np.pi / 180 * 2] # three degrees
cspace_iiwa = ConfigurationSpace(range_list, l2_distance, max_steps)
# Call base class constructor.
Problem.__init__(
self,
x=10, # not used.
y=10, # not used.
robot=None, # not used.
obstacles=None, # not used.
start=tuple(q_start),
goal=tuple(q_goal),
cspace=cspace_iiwa)
def collide(self, configuration):
q = np.array(configuration)
return self.collision_checker.ExistsCollision(q, self.gripper_setpoint,
self.left_door_angle,
self.right_door_angle)
def run_planner(self, method: str):
path = None
if method == 'rrt':
path = self.rrt_planning()
elif method == 'birrt':
path = self.bidirectional_rrt_planning()
else:
raise NotImplementedError
if path is None:
print('No path found')
return None
else:
print(
'Path found with ' + str(len(path) - 1)
+ ' movements of distance ', self.path_distance(path))
smooth_path = self.smooth_path(path)
print(
'Smoothed path found with ' + str(len(smooth_path) - 1)
+ ' movements of distance ', self.path_distance(smooth_path))
# interpolated smooth path
spath = []
for i in range(1, len(smooth_path)):
spath.extend(
self.cspace.path(smooth_path[i - 1], smooth_path[i]))
# make sure path is collision free
if any([self.collide(c) for c in spath]):
print('Collision in smoothed path')
return None
return spath
def visualize_path(self, path):
# show path in meshcat
for q in path:
q = np.array(q)
self.collision_checker.DrawStation(q, self.gripper_setpoint,
self.left_door_angle,
self.right_door_angle)
input("next?")
|
from time import strftime
from datetime import datetime, timezone
import os
# this class logs everything that happens - detections, coordinates (if supplied) and nozzle
# will also log errors and framerates
class Logger:
def __init__(self, name, saveDir):
self.name = strftime("%Y%m%d-%H%M%S_") + name
self.saveDir = saveDir
if not os.path.exists(self.saveDir):
os.makedirs(self.saveDir)
self.savePath = os.path.join(self.saveDir, self.name)
self.logList = []
def log_line(self, line, verbose=False):
self.line = str(datetime.now(timezone.utc)) + " " + line + "\n"
if verbose:
print(line)
with open(self.savePath, 'a+') as file:
file.write(self.line)
self.logList.append(self.line)
def log_line_video(self, line, verbose):
self.log_line(line, verbose=False)
self.videoLine = str(datetime.now(timezone.utc)) + " " + line + "\n"
if verbose:
print(line)
with open(self.videoLog, 'a+') as file:
file.write(self.videoLine)
def new_video_logfile(self, name):
self.videoLog = name
self.log_line_video('NEW VIDEO LOG CREATED {}'.format(name), verbose=True)
|
import discord
import ast
from discord.ext import commands
def insert_returns(body):
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
class Debug(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot: commands.Bot = bot
@commands.command(name='dispatch', aliases=['event'])
async def manually_trigger_event(self, ctx: commands.Context, event: str) -> None:
event = event.lower().replace('on_', '', 1)
cor = {
'guild_join': ctx.guild,
'guild_remove': ctx.guild,
'member_join': ctx.author,
'member_remove': ctx.author
}
if cor.get(event, None) is not None:
e = cor.get(event, None)
self.bot.dispatch(event, e)
await ctx.send(f'Dispatched event `{event}`')
else:
await ctx.send(f'Failed to dispatch event `{event}`')
@commands.command()
@commands.is_owner()
async def eval(self, ctx: commands.Context, *, cmd: str) -> None:
if ctx.message.author.id == 548803750634979340:
fn_name = '_eval_expr'
cmd = cmd.strip('` ')
cmd = "\n".join(f' {i}' for i in cmd.splitlines())
body: str = f'async def {fn_name}():\n{cmd}'
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': self.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__
}
exec(compile(parsed, filename='<ast>', mode='exec'), env) # pylint: disable=exec-used
result = (await eval(f'{fn_name}()', env)) # pylint: disable=eval-used
try:
await ctx.send(result)
except discord.errors.HTTPException:
await ctx.send('Evaluation successful, no output.')
def setup(bot: commands.Bot) -> None:
bot.add_cog(Debug(bot)) |
"""Move to well command request, result, and implementation models."""
from __future__ import annotations
from pydantic import BaseModel
from typing import TYPE_CHECKING, Optional, Type
from typing_extensions import Literal
from .pipetting_common import PipetteIdMixin, WellLocationMixin
from .command import AbstractCommandImpl, BaseCommand, BaseCommandCreate
if TYPE_CHECKING:
from ..execution import MovementHandler
MoveToWellCommandType = Literal["moveToWell"]
class MoveToWellParams(PipetteIdMixin, WellLocationMixin):
"""Payload required to move a pipette to a specific well."""
pass
class MoveToWellResult(BaseModel):
"""Result data from the execution of a MoveToWell command."""
pass
class MoveToWellImplementation(AbstractCommandImpl[MoveToWellParams, MoveToWellResult]):
"""Move to well command implementation."""
def __init__(self, movement: MovementHandler, **kwargs: object) -> None:
self._movement = movement
async def execute(self, params: MoveToWellParams) -> MoveToWellResult:
"""Move the requested pipette to the requested well."""
await self._movement.move_to_well(
pipette_id=params.pipetteId,
labware_id=params.labwareId,
well_name=params.wellName,
well_location=params.wellLocation,
)
return MoveToWellResult()
class MoveToWell(BaseCommand[MoveToWellParams, MoveToWellResult]):
"""Move to well command model."""
commandType: MoveToWellCommandType = "moveToWell"
params: MoveToWellParams
result: Optional[MoveToWellResult]
_ImplementationCls: Type[MoveToWellImplementation] = MoveToWellImplementation
class MoveToWellCreate(BaseCommandCreate[MoveToWellParams]):
"""Move to well command creation request model."""
commandType: MoveToWellCommandType = "moveToWell"
params: MoveToWellParams
_CommandCls: Type[MoveToWell] = MoveToWell
|
import json
import os
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand
from rest_auth.utils import import_callable
from zconnect.models import Product
_Device = apps.get_model(settings.ZCONNECT_DEVICE_MODEL)
class Command(BaseCommand):
help = 'Provision one or more zconnect devices'
def add_arguments(self, parser):
parser.add_argument(
"product",
type=int,
help="The product ID these devices should be assigned to"
)
parser.add_argument(
"--number",
default=1,
type=int,
help="The number of devices to create"
)
parser.add_argument(
"--outputfile",
default='output.json',
type=os.path.abspath,
help="Path to the file where output should be stored"
)
def handle(self, *args, **options):
device_serializer = import_callable(settings.ZCONNECT_DEVICE_SERIALIZER)
product_id = options['product']
product = Product.objects.get(pk=product_id)
devices = [_Device(product=product) for _ in range(0, options['number'])]
for d in devices:
d.save()
serialized_devices = [device_serializer(d).data for d in devices]
self.handle_output(serialized_devices, **options)
def handle_output(self, devices, **options):
outputfilepath = options['outputfile']
with open(outputfilepath, 'w') as ofile:
json.dump(devices, ofile)
|
import argparse
from sys import stdout
import json5
from numpy.random import normal
from numpy import round
def create_circle(size, b_mean, b_dev, c_mean, c_dev):
def create_vertex():
values = round([normal(b_mean, b_dev), normal(-b_mean, b_dev)]).astype(int).tolist()
return {
'low': min(values),
'high': max(values)
}
def create_cost():
c = 0
while c <= 0:
c = round(normal(c_mean, c_dev, size=1)).astype(int).tolist()[0]
return c
return {
'vertices': [create_vertex() for _ in range(size)],
'edge_costs': [create_cost() for _ in range(size)]
}
if __name__ == '__main__':
parser = argparse.ArgumentParser('Create random sRND circle scenario')
parser.add_argument('size', help='size of the circle', type=int)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=stdout, help='output file (json5)')
parser.add_argument('-m', '--distance-mean', help='the mean of the distance to 0 for the low and high values', type=float, default=3, dest='b_mean')
parser.add_argument('-d', '--standard-deviation', help='standard deviation of the low and high values', type=float, default=1, dest='b_dev')
parser.add_argument('-c', '--cost-mean', help='standard mean of the cost values', type=float, default=5, dest='c_mean')
parser.add_argument('-s', '--cost-standard-deviation', help='standard deviation of the cost values', type=float, default=1, dest='c_dev')
args = parser.parse_args()
result = create_circle(args.size, args.b_mean, args.b_dev, args.c_mean, args.c_dev)
json5.dump(result, args.output, indent=2) |
"""
Management utility to create users on Auth0 backend and as superusers locally.
"""
from __future__ import unicode_literals
import getpass
import sys
from auth0plus.management import Auth0
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core import exceptions
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.db.models.fields import EmailField
from django.utils.encoding import force_str
from django.utils.six.moves import input
from django.utils.text import capfirst
class NotRunningInTTYException(Exception):
pass
class Command(BaseCommand):
help = 'Used to create a user on auth0 and a site superuser.'
requires_migrations_checks = True
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.UserModel = get_user_model()
def add_arguments(self, parser):
parser.add_argument(
'--email',
dest='email', default=None,
help='Specifies the login (email) for the superuser.',
)
parser.add_argument(
'--site',
dest='site_id', default=settings.SITE_ID,
help='Specifies site for the superuser.',
)
parser.add_argument(
'--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help=(
'Tells Django to NOT prompt the user for input of any kind. '
'You must use --email with --noinput, along with an option for '
'any other required field. Superusers created with --noinput will '
'not be able to log in until they\'re given a valid password.'
),
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".',
)
for field in self.UserModel.REQUIRED_FIELDS:
parser.add_argument(
'--%s' % field, dest=field, default=None,
help='Specifies the %s for the superuser.' % field,
)
def execute(self, *args, **options):
self.stdin = options.get('stdin', sys.stdin) # Used for testing
return super(Command, self).execute(*args, **options)
def handle(self, *args, **options):
email = options['email']
database = options['database']
# If not provided, create the user with an unusable password
password = None
auth0user = None
user_data = {}
# Same as user_data but with foreign keys as fake model instances
# instead of raw IDs.
fake_user_data = {}
# Do quick and dirty validation if --noinput
if not options['interactive']:
try:
if not email:
raise CommandError("You must use --email with --noinput.")
for field_name in self.UserModel.REQUIRED_FIELDS:
if options[field_name]:
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = field.clean(options[field_name], None)
else:
raise CommandError("You must use --%s with --noinput." % field_name)
except exceptions.ValidationError as e:
raise CommandError('; '.join(e.messages))
else:
# Prompt for username/password, and any other required fields.
# Enclose this whole thing in a try/except to catch
# KeyboardInterrupt and exit gracefully.
# default_email = '%s@%s' % (get_default_username(), settings.SITE_DOMAIN)
try:
if hasattr(self.stdin, 'isatty') and not self.stdin.isatty():
raise NotRunningInTTYException("Not running in a TTY")
# Get a username
verbose_field_name = 'email address'
while email is None:
input_msg = force_str('%s: ' % capfirst(verbose_field_name))
email = self.get_input_data(EmailField(), input_msg)
if not email:
continue
try:
self.UserModel.objects.get(
email=email,
site_id=options['site_id'])
self.stderr.write(
"Error: That %s is already taken." % verbose_field_name)
email = None
continue
except self.UserModel.DoesNotExist:
pass
auth0 = Auth0(
settings.AUTH0_DOMAIN,
settings.AUTH0_JWT,
client_id=settings.AUTH0_CLIENT_ID,
default_connection=settings.AUTH0_CONNECTION)
try:
auth0user = auth0.users.get(email=email)
self.stderr.write(
'Warning: An Auth0 user with that email address '
'has already been created.')
except auth0.users.DoesNotExist:
pass
for field_name in self.UserModel.REQUIRED_FIELDS:
field = self.UserModel._meta.get_field(field_name)
user_data[field_name] = options[field_name]
while user_data[field_name] is None:
default_field_value = ' (%s.%s)' % (
field.remote_field.model._meta.object_name,
field.remote_field.field_name,
) if field.remote_field else ''
message = force_str('%s%s: ' % (
capfirst(field.verbose_name),
default_field_value))
input_value = self.get_input_data(field, message)
user_data[field_name] = input_value
fake_user_data[field_name] = input_value
# Wrap any foreign keys in fake model instances
if field.remote_field:
fake_user_data[field_name] = field.remote_field.model(input_value)
# Get a password
while password is None:
password = getpass.getpass()
password2 = getpass.getpass(force_str('Password (again): '))
if password != password2:
self.stderr.write("Error: Your passwords didn't match.")
password = None
# Don't validate passwords that don't match.
continue
if password.strip() == '':
self.stderr.write("Error: Blank passwords aren't allowed.")
password = None
# Don't validate blank passwords.
continue
try:
validate_password(password2, self.UserModel(**fake_user_data))
except exceptions.ValidationError as err:
self.stderr.write('\n'.join(err.messages))
password = None
except KeyboardInterrupt:
self.stderr.write("\nOperation cancelled.")
sys.exit(1)
except NotRunningInTTYException:
self.stdout.write(
"Superuser creation skipped due to not running in a TTY. "
"You can run `manage.py createsuperuser` in your project "
"to create one manually."
)
if email:
user_data['email'] = email
user_data['password'] = password
if auth0user:
user_data['auth0user'] = auth0user
self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)
if options['verbosity'] >= 1:
self.stdout.write("Superuser created successfully.")
if not options['interactive']:
self.stdout.write("Warning: --noinput does not create an auth0 user")
def get_input_data(self, field, message, default=None):
"""
Override this method if you want to customize data inputs or
validation exceptions.
"""
raw_value = input(message)
if default and raw_value == '':
raw_value = default
try:
val = field.clean(raw_value, None)
except exceptions.ValidationError as e:
self.stderr.write("Error: %s" % '; '.join(e.messages))
val = None
return val
|
# usage: make_locale_dirs.py locale_dir [...]
#
# This script is intended to create empty locale directories (.lproj) in a
# Cocoa .app bundle. The presence of these empty directories is sufficient to
# convince Cocoa that the application supports the named localization, even if
# an InfoPlist.strings file is not provided. Chrome uses these empty locale
# directoires for its helper executable bundles, which do not otherwise
# require any direct Cocoa locale support.
import os
import sys
def main(args):
for dirname in args:
try:
os.makedirs(dirname)
except OSError as e:
if e.errno == os.errno.EEXIST:
# It's OK if it already exists
pass
else:
raise
if __name__ == '__main__':
main(sys.argv[1:])
|
# coding: utf-8
"""
Implementation of convert_to_schema(data, previous_schema, new_schema)
"""
import typing
from .generate_placeholder import generate_placeholder
from .utils import get_dimensionality_for_units
def convert_to_schema(data: dict, previous_schema: dict, new_schema: dict) -> typing.Tuple[typing.Any, typing.Sequence[str]]:
"""
Convert data from one schema to another.
:param data: the sampledb object data
:param previous_schema: the sampledb object schema for the given data
:param new_schema: the target sampledb object schema
:return: the converted data and a list of conversion warnings/notes
"""
if new_schema == previous_schema and new_schema['type'] in ('bool', 'text', 'datetime', 'tags', 'sample', 'measurement', 'quantity', 'array', 'objects', 'hazards'):
return data, []
if new_schema['type'] == 'tags' and previous_schema['type'] == 'text':
tags = []
for tag in data['text'].split(','):
tag = tag.strip().lower()
if tag not in tags:
tags.append(tag)
new_data = {
'_type': 'tags',
'tags': tags
}
return new_data, []
if previous_schema['type'] != new_schema['type']:
return generate_placeholder(new_schema), ["Unable to convert property '{}' from type '{}' to type '{}'.".format(new_schema['title'], previous_schema['type'], new_schema['type'])]
if new_schema['type'] in ('bool', 'text', 'datetime', 'tags', 'sample', 'measurement', 'hazards'):
return data, []
if new_schema['type'] == 'quantity':
previous_dimensionality = get_dimensionality_for_units(previous_schema['units'])
new_dimensionality = get_dimensionality_for_units(new_schema['units'])
if new_dimensionality == previous_dimensionality:
return data, []
return generate_placeholder(new_schema), ["Unable to convert quantity '{}' to different dimensionality: {} -> {}".format(new_schema['title'], previous_dimensionality, new_dimensionality)]
if new_schema['type'] == 'object':
upgrade_warnings = []
new_data = generate_placeholder(new_schema)
for property_name, property_value in data.items():
if property_name in new_schema['properties']:
new_property_value, property_upgrade_warnings = convert_to_schema(property_value, previous_schema['properties'][property_name], new_schema['properties'][property_name])
if new_property_value is not None:
new_data[property_name] = new_property_value
for upgrade_warning in property_upgrade_warnings:
if upgrade_warning not in upgrade_warnings:
upgrade_warnings.append(upgrade_warning)
return new_data, upgrade_warnings
if new_schema['type'] == 'array':
new_data = []
upgrade_warnings = []
for item in data:
new_item, item_upgrade_warnings = convert_to_schema(item, previous_schema['items'], new_schema['items'])
new_data.append(new_item)
for upgrade_warning in item_upgrade_warnings:
if upgrade_warning not in upgrade_warnings:
upgrade_warnings.append(upgrade_warning)
return new_data, upgrade_warnings
return generate_placeholder(new_schema), ["Unable to convert property '{}' of type '{}'.".format(new_schema['title'], new_schema['type'])]
|
import threading
import time
n = 100000
m = 10000
def process():
for _ in range(m):
time.sleep(0)
def main():
for _ in range(n):
t1 = threading.Thread(target=process)
t1.start()
if __name__ == '__main__':
main() |
# Copyright 2016 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import logging
import os
import tarfile
import tempfile
from oslo_config import cfg
from syntribos._i18n import _
from syntribos.clients.http.client import SynHTTPClient
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.StreamHandler())
temp_dirs = []
remote_dirs = []
def cache(func):
"""A method to cache return values of any method."""
cached_content = {}
@wraps(func)
def cached_func(*args, **kwargs):
if CONF.remote.enable_cache:
try:
return cached_content[args]
except KeyError:
return cached_content.setdefault(args, func(*args, **kwargs))
return func(*args, **kwargs)
return cached_func
def download(uri, cache_dir=None):
"""A simple file downloader.
A simple file downloader which returns the absolute
path to where the file has been saved. In case of tar
files the absolute patch excluding .tar extension is
passed.
:param str uri: The remote uri of the file
:param str cache_dir: The directory name/handle
:returns str: Absolute path to the downloaded file
"""
global temp_dirs
global remote_dirs
if not cache_dir:
cache_dir = tempfile.mkdtemp()
temp_dirs.append(cache_dir)
remote_dirs.append(cache_dir)
LOG.debug("Remote file location: %s", remote_dirs)
_kwargs = {'allow_redirects': True}
resp, _ = SynHTTPClient().request("GET", uri, requestslib_kwargs=_kwargs)
os.chdir(cache_dir)
saved_umask = os.umask(0o77)
fname = uri.split("/")[-1]
try:
with open(fname, 'wb') as fh:
fh.write(resp.content)
return os.path.abspath(fname)
except IOError:
LOG.error("IOError in writing the downloaded file to disk.")
finally:
os.umask(saved_umask)
def extract_tar(abs_path):
"""Extract a gzipped tar file from the given absolute_path
:param str abs_path: The absolute path to the tar file
:returns str untar_dir: The absolute path to untarred file
"""
work_dir, tar_file = os.path.split(abs_path)
os.chdir(work_dir)
try:
os.mkdir("remote")
except OSError:
LOG.error("Path exists already, not creating remote directory.")
remote_path = os.path.abspath("remote")
def safe_paths(tar_meta):
"""Makes sure all tar file paths are relative to the base path
Orignal from https://stackoverflow.com/questions/
10060069/safely-extract-zip-or-tar-using-python
:param tarfile.TarFile tar_meta: TarFile object
:returns tarfile:TarFile fh: TarFile object
"""
for fh in tar_meta:
each_f = os.path.abspath(os.path.join(work_dir, fh.name))
if os.path.realpath(each_f).startswith(work_dir):
yield fh
try:
with tarfile.open(tar_file, mode="r:gz") as tarf:
tarf.extractall(path=remote_path, members=safe_paths(tarf))
except tarfile.ExtractError as e:
LOG.error("Unable to extract the file: %s", e)
raise
os.remove(abs_path)
return remote_path
@cache
def get(uri, cache_dir=None):
"""Entry method for download method
:param str uri: A formatted remote URL of a file
:param str: Absolute path to the downloaded content
:param str cache_dir: path to save downloaded files
"""
user_base_dir = cache_dir or CONF.remote.cache_dir
if user_base_dir:
try:
temp = tempfile.TemporaryFile(dir=os.path.abspath(user_base_dir))
temp.close()
except OSError:
LOG.error("Failed to write remote files to: %s",
os.path.abspath(user_base_dir))
exit(1)
abs_path = download(uri, os.path.abspath(user_base_dir))
else:
abs_path = download(uri)
try:
return extract_tar(abs_path)
except (tarfile.TarError, Exception):
msg = _("Not a gz file, returning abs_path")
LOG.debug(msg)
return abs_path
|
"""add workflow.source_metadata column
Revision ID: b182f655505f
Revises: e7b6dcb09efd
Create Date: 2022-03-14 12:56:57.067748
"""
from alembic import op
from sqlalchemy import Column
from galaxy.model.custom_types import JSONType
from galaxy.model.migrations.util import (
drop_column,
ignore_add_column_error,
)
# revision identifiers, used by Alembic.
revision = "b182f655505f"
down_revision = "e7b6dcb09efd"
branch_labels = None
depends_on = None
def upgrade():
table, column = "workflow", "source_metadata"
with ignore_add_column_error(table, column):
op.add_column(table, Column(column, JSONType))
def downgrade():
drop_column("workflow", "source_metadata")
|
from __future__ import annotations
from prettyqt import core, gui
from prettyqt.qt import QtGui
QtGui.QLinearGradient.__bases__ = (gui.Gradient,)
class LinearGradient(QtGui.QLinearGradient):
def __repr__(self):
return f"{type(self).__name__}({self.get_start()}, {self.get_final_stop()})"
def serialize_fields(self):
start = self.start()
final_stop = self.finalStop()
return dict(start=(start[0], start[1]), final_stop=(final_stop[0], final_stop[1]))
def get_start(self) -> core.PointF:
return core.PointF(self.start())
def get_final_stop(self) -> core.PointF:
return core.PointF(self.finalStop())
|
from lxml import etree
from .. import style as main_style
class StylizedElement:
def __init__(self, *args, **kwargs):
self._built = False
self.x = self.y = 0
self.kwargs = kwargs
def do_build(self, style=None, **kwargs):
if not style:
style = main_style.STYLE
style = style.apply({**self.kwargs, **kwargs}, this=False)
self.build(style)
if self.x != 0 or self.y != 0:
self.element.set('transform', f'translate({self.x}, {self.y})')
self._built = True
return self
def translate(self, x, y, add=False):
if add:
x, y = self.x + x, self.y + y
self.x, self.y = x, y
return self
@property
def xml(self):
if not self._built:
self.do_build()
return etree.tostring(self.element)
def __repr__(self):
return 'StylizedElement()'
|
"""Test /feature_association."""
from fastapi.testclient import TestClient
import pytest
from icees_api.app import APP
from ..util import load_data, do_verify_feature_matrix_response
testclient = TestClient(APP)
table = "patient"
year = 2010
age_levels = [
'0-2',
'3-17',
'18-34',
'35-50',
'51-69',
'70-89',
]
@load_data(
APP,
"""
PatientId,year,AgeStudyStart,Albuterol,AvgDailyPM2.5Exposure,EstResidentialDensity,AsthmaDx
varchar(255),int,varchar(255),varchar(255),int,int,int
1,2010,0-2,0,1,0,1
2,2010,3-17,1,1,0,1
3,2010,18-34,>1,1,0,1
4,2010,35-50,0,2,0,1
5,2010,51-69,1,2,0,1
6,2010,70-89,>1,2,0,1
7,2010,0-2,0,3,0,1
8,2010,0-2,1,3,0,1
9,2010,0-2,>1,3,0,1
10,2010,0-2,0,4,0,1
11,2010,0-2,1,4,0,1
12,2010,0-2,>1,4,0,1
""",
"""
cohort_id,size,features,table,year
COHORT:1,12,"{}",patient,2010
"""
)
def test_feature_association2_explicit_check_coverage_is_full_2():
cohort_id = "COHORT:1"
atafdata = {
"feature_a": {
"feature_name": "AgeStudyStart",
"feature_qualifiers": list(map(lambda x: {
"operator": "=",
"value": x
}, age_levels))
},
"feature_b": {
"feature_name": "AgeStudyStart",
"feature_qualifiers": [{
"operator": ">",
"value": '0-2'
}]
},
"check_coverage_is_full": True
}
resp = testclient.post(
f"/{table}/cohort/{cohort_id}/feature_association2",
json=atafdata,
)
resp_json = resp.json()
assert "return value" in resp_json
assert isinstance(resp_json["return value"], str)
@load_data(
APP,
"""
PatientId,year,AgeStudyStart,Albuterol,AvgDailyPM2.5Exposure,EstResidentialDensity,AsthmaDx
varchar(255),int,varchar(255),varchar(255),int,int,int
1,2010,0-2,0,1,0,1
2,2010,3-17,1,1,0,1
3,2010,18-34,>1,1,0,1
4,2010,35-50,0,2,0,1
5,2010,51-69,1,2,0,1
6,2010,70-89,>1,2,0,1
7,2010,0-2,0,3,0,1
8,2010,0-2,1,3,0,1
9,2010,0-2,>1,3,0,1
10,2010,0-2,0,4,0,1
11,2010,0-2,1,4,0,1
12,2010,0-2,>1,4,0,1
""",
"""
cohort_id,size,features,table,year
COHORT:1,12,"{}",patient,2010
"""
)
def test_feature_association2_explicit_check_coverage_is_full_3():
cohort_id = "COHORT:1"
atafdata = {
"feature_a": {
"feature_name": "AgeStudyStart",
"feature_qualifiers": list(map(lambda x: {
"operator": "=",
"value": x
}, age_levels))[1:]
},
"feature_b": {
"feature_name": "AgeStudyStart",
"feature_qualifiers": [{
"operator": ">",
"value": '0-2'
}, {
"operator": "<=",
"value": '0-2'
}]
},
"check_coverage_is_full": True
}
resp = testclient.post(
f"/{table}/cohort/{cohort_id}/feature_association2",
json=atafdata,
)
resp_json = resp.json()
assert "return value" in resp_json
assert isinstance(resp_json["return value"], str)
@load_data(
APP,
"""
PatientId,year,AgeStudyStart,Albuterol,AvgDailyPM2.5Exposure,EstResidentialDensity,AsthmaDx
varchar(255),int,varchar(255),varchar(255),int,int,int
1,2010,0-2,0,1,0,1
2,2010,3-17,1,1,0,1
3,2010,18-34,>1,1,0,1
4,2010,35-50,0,2,0,1
5,2010,51-69,1,2,0,1
6,2010,70-89,>1,2,0,1
7,2010,0-2,0,3,0,1
8,2010,0-2,1,3,0,1
9,2010,0-2,>1,3,0,1
10,2010,0-2,0,4,0,1
11,2010,0-2,1,4,0,1
12,2010,0-2,>1,4,0,1
""",
"""
cohort_id,size,features,table,year
COHORT:1,12,"{}",patient,2010
"""
)
def test_feature_association2_explicit_check_coverage_is_full():
cohort_id = "COHORT:1"
atafdata = {
"feature_a": {
"feature_name": "AgeStudyStart",
"feature_qualifiers": list(map(lambda x: {
"operator": "=",
"value": x
}, age_levels))
},
"feature_b": {
"feature_name": "AgeStudyStart",
"feature_qualifiers": [{
"operator": ">",
"value": '0-2'
}, {
"operator": "<=",
"value": '0-2'
}]
},
"check_coverage_is_full": True
}
resp = testclient.post(
f"/{table}/cohort/{cohort_id}/feature_association2",
json=atafdata,
)
resp_json = resp.json()
assert "return value" in resp_json
do_verify_feature_matrix_response(resp_json["return value"])
|
# -*- coding: utf-8 -*-
"""Main running script.
This script is the main chipsnet training script. Given the input
configuration it carries out the specified task, either create, train
or study.
Example:
The example below runs the example training configuration
$ python run.py ./config/train.yml
"""
import argparse
import os
import logging
# Need to setup the logging level before we use tensorflow
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.disable(logging.CRITICAL)
import tensorflow as tf # noqa: E402
import chipsnet.config # noqa: E402
import chipsnet.data # noqa: E402
import chipsnet.models # noqa: E402
import chipsnet.trainer # noqa: E402
import chipsnet.study # noqa: E402
def setup_gpus():
"""Enable memory growth on the GPU's."""
# Need to setup the GPU's before we import anything else that uses tensorflow
gpus = tf.config.list_physical_devices("GPU")
if tf.config.list_physical_devices("GPU"):
try: # Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e: # Memory growth must be set before GPUs have been initialized
print(e)
def create_data(config):
"""Preprocesses input .root files into .tfrecords ready for use in training.
Args:
config (dotmap.DotMap): configuration namespace
"""
print("--- Setting up data creator ---\n")
creator = chipsnet.data.Creator(config)
print("--- Running creation ---\n")
creator.run()
def train_model(config):
"""Trains a model according to the configuration.
Args:
config (dotmap.DotMap): configuration namespace
"""
setup_gpus()
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print("--- Setting up directories ---\n")
chipsnet.config.setup_dirs(config, True)
print("--- Setting up data reader ---\n")
data = chipsnet.data.Reader(config)
print("--- Building model ---\n")
model = chipsnet.models.Model(config)
if config.trainer.epochs > 0:
print("\n--- Training model ---")
trainer = chipsnet.trainer.Trainer(config, model, data)
trainer.train()
print("\n--- Saving model to {} ---\n".format(config.exp.exp_dir))
trainer.save()
print("\n--- Running quick evaluation ---\n")
trainer.eval()
else:
print("\n--- Skipping training ---\n")
def study_model(config):
"""Conducts a SHERPA study on a model according to the configuration.
Args:
config (dotmap.DotMap): configuration namespace
"""
setup_gpus()
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print("--- Setting up directories ---\n")
chipsnet.config.setup_dirs(config, True)
print("--- Setting up the study---\n")
study = chipsnet.study.SherpaStudy(config)
print("--- Running study ---\n")
study.run()
def parse_args():
"""Parse the command line arguments."""
parser = argparse.ArgumentParser(description="chipsnet")
parser.add_argument("config", help="path to the configuration file")
return parser.parse_args()
def main():
"""Call correct chipsnet task."""
print("\n--- Its Magic, it must be chipsnet ---\n")
config = chipsnet.config.get(parse_args().config)
if config.task == "create":
create_data(config)
elif config.task == "train":
train_model(config)
elif config.task == "study":
study_model(config)
else:
print("\nError: must define a task in configuration [create, train, study]")
raise SystemExit
print("--- Magic complete ---\n")
if __name__ == "__main__":
main()
|
DATASET_TYPE_INVALID = -1
DATASET_TYPE_TRAIN = 0
DATASET_TYPE_VAL = 1
DATASET_TYPE_TEST = 2
DATASET_TYPE_ALL = [DATASET_TYPE_TRAIN, DATASET_TYPE_VAL, DATASET_TYPE_TEST]
DATASET_STR_TRAIN = 'train'
DATASET_STR_VAL = 'val'
DATASET_STR_TEST = 'test'
def dataset_type2str(ds_type: int) -> str:
"""convert dataset type to string"""
if ds_type == DATASET_TYPE_TRAIN:
return DATASET_STR_TRAIN
elif ds_type == DATASET_TYPE_VAL:
return DATASET_STR_VAL
elif ds_type == DATASET_TYPE_TEST:
return DATASET_STR_TEST
else:
raise TypeError(f'ds_type:{ds_type} is invalid')
def dataset_str2type(ds_str: str) -> int:
"""convert dataset type from string"""
if ds_str == DATASET_STR_TRAIN:
return DATASET_TYPE_TRAIN
elif ds_str == DATASET_STR_VAL:
return DATASET_TYPE_VAL
elif ds_str == DATASET_STR_TEST:
return DATASET_TYPE_TEST
else:
raise TypeError(f'ds_str:{ds_str} is invalid')
|
# Modify the previous program such that only the users Alice and Bob are greeted with their names.
while 1:
a = input("nhap vao ten cua ban: ")
if(a=="Alice" or a =="Bod"):
print("xin chao " + a)
break
else:
print("ban nhap sai ten, vui long nhap lai ten") |
# Lesson 5b: Packages need to be imported into Thonny using Tools | Manage Packages.
# Plot multiple stations, centred on the United Downs Deep Geothermal Project
import folium
# Set up a list of stations. This is a list of lists, a 2 dimensional list.
STATIONS = [
['RB30C','Falmouth',50.149,-5.095],
['RB5E8','Penzance',50.118,-5.539],
['RD93E','Redruth',50.234,-5.238],
['R82BD','Richard Lander',50.26,-5.103],
['R7FA5','Truro School',50.261,-5.043],
['R0353','Penair',50.267,-5.03],
['R9FEE','Truro High',50.257,-5.057]
]
# The following arguments will centre the map on United Downs Deep Geothermal Project, zooming in to Cornwall.
map = folium.Map(location=[50.230, -5.166],zoom_start=11,tiles='Stamen Terrain')
# Now you can add markers to show each station in turn
# station is a simple list showing the stationID, location, lat, long, for each station in turn
for station in STATIONS:
folium.Marker(location=[station[2], station[3]], popup=station[1], icon=folium.Icon(color='orange')).add_to(map)
# Finally, add a red marker for UDDGP, the deepest borehole in mainland UK
folium.Marker(location=[50.230, -5.166], popup='UDDGP', icon=folium.Icon(color='red')).add_to(map)
# save the file to disk as a web page.
map.save('cornish-stations.html')
# Locate the file and double click on it. You have an interactive map showing the cornish Raspberry Shake stations.
# You can zoom in and out, as well as clicking on the markers to see the popup text.
|
import unittest
import cloudwatch.modules.collectd as collectd
class collectdTest(unittest.TestCase):
def test_stub_logging_methods_are_not_throwing_exceptions(self):
collectd.debug("msg")
collectd.info("msg")
collectd.warning("msg")
collectd.error("msg")
def test_stub_callbacks_are_not_throwing_exceptions(self):
collectd.register_config()
collectd.register_init()
collectd.register_write() |
from typing import Set
import pytest
from transformers import AutoTokenizer
from gdmtl.datasets import mask_difference, mask_whole_word
@pytest.mark.parametrize( # type:ignore
"""arch,seq,prob,expected""",
[
("bert-base-uncased", "binsheng", 1.0, {"[MASK] [MASK] [MASK]"}),
(
"bert-base-uncased",
"binsheng binsheng",
0.5,
{"binsheng [MASK] [MASK] [MASK]", "[MASK] [MASK] [MASK] binsheng"},
),
],
)
def test_batch_assemble(arch: str, seq: str, prob: float, expected: Set[str]) -> None:
tokenizer = AutoTokenizer.from_pretrained(arch)
out = mask_whole_word(tokenizer, seq, prob)
assert out in expected
@pytest.mark.parametrize( # type:ignore
"""arch,seq,seq2,ratio,expected""",
[
(
"bert-base-uncased",
"hello binsheng",
"binsheng",
1.0,
{"hello [MASK] [MASK] [MASK]"},
),
(
"bert-base-uncased",
"hello binsheng",
"hello binsheng",
0.5,
{"hello [MASK] [MASK] [MASK]", "[MASK] binsheng"},
),
],
)
def test_mask_difference(
arch: str, seq: str, seq2: str, ratio: float, expected: Set[str]
) -> None:
tokenizer = AutoTokenizer.from_pretrained(arch)
out = mask_difference(tokenizer, seq, seq2, ratio)
assert out in expected
|
import sys
import logging
import hypercane.errors
module_logger = logging.getLogger("hypercane.order.memento_datetime")
def order_by_memento_datetime(urims, cache_storage):
from ..utils import get_memento_http_metadata
from datetime import datetime
import concurrent.futures
import traceback
memento_datetime_to_urim = []
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_urim = { executor.submit(get_memento_http_metadata, urim, cache_storage, metadata_fields=['memento-datetime']): urim for urim in urims }
for future in concurrent.futures.as_completed(future_to_urim):
try:
urim = future_to_urim[future]
mdt = future.result()[0]
# mdt = datetime.strptime(mdt, "%a, %d %b %Y %H:%M:%S GMT")
module_logger.info("memento-datetime for {} is {} or {}".format(urim, mdt, datetime.timestamp(mdt)))
memento_datetime_to_urim.append( (datetime.timestamp(mdt), urim) )
except Exception as exc:
module_logger.exception("Error: {}, Failed to determine memento-datetime for {}, skipping...".format(repr(exc), urim))
hypercane.errors.errorstore.add(urim, traceback.format_exc())
sorted_mementos = [ urim for mdt, urim in sorted( memento_datetime_to_urim, reverse=True ) ]
return sorted_mementos
|
# from pizza_maker.dough import Dough
# from pizza_maker.pizza import Pizza
# from pizza_maker.topping import Topping
#
#
# tomato_topping = Topping("Tomato", 60)
# print(tomato_topping.topping_type)
# print(tomato_topping.weight)
#
# mushrooms_topping = Topping("Mushroom", 75)
# print(mushrooms_topping.topping_type)
# print(mushrooms_topping.weight)
#
# mozzarella_topping = Topping("Mozzarella", 80)
# print(mozzarella_topping.topping_type)
# print(mozzarella_topping.weight)
#
# cheddar_topping = Topping("Cheddar", 150)
#
# pepperoni_topping = Topping("Pepperoni", 120)
#
# white_flour_dough = Dough("White Flour", "Mixing", 200)
# print(white_flour_dough.flour_type)
# print(white_flour_dough.weight)
# print(white_flour_dough.baking_technique)
#
# whole_wheat_dough = Dough("Whole Wheat Flour", "Mixing", 200)
# print(whole_wheat_dough.weight)
# print(whole_wheat_dough.flour_type)
# print(whole_wheat_dough.baking_technique)
#
# p = Pizza("Margherita", whole_wheat_dough, 1)
# p.add_topping(tomato_topping)
# p.add_topping(tomato_topping)
# print(p.calculate_total_weight())
#
# p.add_topping(mozzarella_topping)
# print(p.calculate_total_weight())
#
# p.add_topping(mozzarella_topping)
#
import unittest
from pizza_maker.dough import Dough
from pizza_maker.pizza import Pizza
from pizza_maker.topping import Topping
class Tests(unittest.TestCase):
def test_topping_init(self):
t = Topping("Tomato", 20)
self.assertEqual(t.topping_type, "Tomato")
self.assertEqual(t.weight, 20)
def test_topping_topping_type_error(self):
with self.assertRaises(ValueError) as ve:
t = Topping("", 20)
self.assertEqual("The topping type cannot be an empty string", str(ve.exception))
def test_topping_weight_error(self):
with self.assertRaises(ValueError) as ve:
t = Topping("a", -1)
self.assertEqual("The weight cannot be less or equal to zero", str(ve.exception))
def test_dough_init(self):
d = Dough("Sugar", "Mixing", 20)
self.assertEqual(d.flour_type, "Sugar")
self.assertEqual(d.baking_technique, "Mixing")
self.assertEqual(d.weight, 20)
def test_dough_flour_type_error(self):
with self.assertRaises(ValueError) as ve:
d = Dough("", 'ab', 20)
self.assertEqual("The flour type cannot be an empty string", str(ve.exception))
def test_dough_baking_technique_error(self):
with self.assertRaises(ValueError) as ve:
d = Dough("ab", '', 20)
self.assertEqual("The baking technique cannot be an empty string", str(ve.exception))
def test_dough_weight_error(self):
with self.assertRaises(ValueError) as ve:
t = Dough("a", 'ab', -1)
self.assertEqual("The weight cannot be less or equal to zero", str(ve.exception))
def test_pizza_init(self):
d = Dough("Sugar", "Mixing", 20)
p = Pizza("Burger", d, 5)
self.assertEqual(p.name, "Burger")
self.assertEqual(p.dough, d)
self.assertEqual(len(p.toppings), 0)
self.assertEqual(p.toppings_capacity, 5)
def test_pizza_add_topping_error(self):
d = Dough("Sugar", "Mixing", 20)
t = Topping("Tomato", 20)
p = Pizza("Burger", d, 1)
p.add_topping(t)
with self.assertRaises(ValueError) as ctx:
p.add_topping(t)
self.assertEqual("Not enough space for another topping", str(ctx.exception))
def test_pizza_add_topping_new(self):
d = Dough("Sugar", "Mixing", 20)
t = Topping("Tomato", 20)
p = Pizza("Burger", d, 200)
p.add_topping(t)
self.assertEqual(p.toppings["Tomato"], 20)
self.assertEqual(len(p.toppings), 1)
def test_pizza_add_topping_update(self):
d = Dough("Sugar", "Mixing", 20)
t = Topping("Tomato", 20)
p = Pizza("Burger", d, 200)
p.add_topping(t)
p.add_topping(t)
self.assertEqual(p.toppings["Tomato"], 40)
def test_pizza_calculate_total_weight(self):
d = Dough("Sugar", "Mixing", 20)
t = Topping("Tomato", 20)
p = Pizza("Burger", d, 200)
p.add_topping(t)
p.add_topping(t)
self.assertEqual(p.calculate_total_weight(), 60)
if __name__ == '__main__':
unittest.main() |
import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
import NB_sortseq_utils as utils
# Import matplotlib stuff for plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.core.pylabtools import figsize
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Logo-generating module
import anylogo
utils.set_plotting_style_emat()
#===============================================================================
# Set output directory
#===============================================================================
output = 'output_figs/'
#===============================================================================
# directory where emat csv files are contained
#===============================================================================
datadir = '../sortseq/20160707_purT_xylE_dgoR/'
#===============================================================================
# plot energy matrices with logos on top.
#===============================================================================
# Set color scale - I want the colorbar to be symmetric and will pick values#
# that seem appropriate for all matrices.
emat_min=-0.4
emat_max=0.4
mid_val=0.0
# energy_df = np.loadtxt(datadir + 'pymc_dgoR_glucose_mut1_003_emat_mean.txt')
energy_df = pd.read_csv(datadir + '20160707_dgoR_MG1655_M9glucose_na_mut1_4bins_RNAP_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
#
# energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
seq = 'GTACTACAAAGTTGCCGCGTTATGCATCGATCGGGGTAAAGTAGAGAAGAACATACAGAG'
seq = 'TGCCGCGTTATGCATCGATCGGGGTAAAGTA'
plt.figure(figsize=utils.cm2inch((0.18*60 + 0.2,2)))
ax = plt.gca()
# relative_scale=1.5
# relative_spacing=.65
# emat_ymin = -2 * (relative_scale + relative_spacing)
# emat_ymax = -2 * relative_spacing
# yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
# yticklabels = list('TGCA')
# anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
# use_transparency=True)
# L = len(seq)
ax.set_xticks([])
#
# im = ax.imshow(utils.zero_matrix_WT(utils.fix_matrix_gauge(energy_df[:,12:43]), seq),
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
zorder=100,
aspect='auto')
# ax.set_ylim([emat_ymin, 2])
# ax.set_yticks(yticks)
# ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# create an axes on the right side of ax. The width of cax will be 3%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
cbar.ax.set_yticklabels(['-0.4', '0', '0.4'], fontsize=6, fontname='Arial')
cbar.outline.set_visible(False)
cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
# y = .5*emat_ymax
# for i in range(L):
# ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
# fontsize=6)
# ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'figS6_dgoR_downstreamRNAP_matrix_logo.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
# im = ax.imshow(utils.zero_matrix_WT(utils.fix_matrix_gauge(energy_df[:,12:43]), seq),
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'figS9_dgoR_downstreamRNAP_matrixonly.pdf')
|
import time
import machine
# reboot the board after some delay (in seconds)
def reboot(delay=5):
print('rebooting ...')
time.sleep(delay)
machine.reset()
# convert time specified in a string to milliseconds
# the format is `Xd Yh Zm Ws` where
# X is a number of days
# Y is a number of hours
# Z is a number of minutes
# W is a number of seconds
# all Xd, Yh, Zm and Ws are optional
#
# examples:
# an empty string is 0 milliseconds
# `5m` is equal to 5 * 60 * 1000 = 300000 milliseconds
# `2h 3s` is equal to 2 * 60 * 60 * 1000 + 3 * 1000 =
# = 7200000 + 3000 = 7203000 milliseconds
def string_to_millis(string):
if not string:
return 0
value = 0
for item in string.split(' '):
item = item.strip()
l = len(item)
n = int(item[:l - 1])
m = item[l - 1]
if m == 'd':
value = value + n * 24 * 60 * 60 * 1000
if m == 'h':
value = value + n * 60 * 60 * 1000
if m == 'm':
value = value + n * 60 * 1000
if m == 's':
value = value + n * 1000
return value
|
# Copyright 2014-2020 Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This modules defines an inter-process locking scheduler for exclusive access
to CUDA GPUs.
A frequent pattern for workloads is to require exclusive access to a GPU for the
duration of a process. This module implements a simple inter-process locking
scheduler to enable that. Scripts can call LockExclusiveProcessGpuAccess() to
get access to a single GPU which is set through CUDA_VISIBLE_DEVICES.
from labm8.py import gpu_scheduler
gpu_schduler.LockExclusiveProcessGpuAccess()
# go nuts ...
This does of course assume that all GPU users go through this interface. There
is nothing stopping another user or process from coming along and violating the
lock granted to a script which has "exclusive" GPU access.
The default arguments for LockExclusiveProcessGpuAccess() work transparently
on systems with no GPUs. If no GPUs are available, the return value is None.
Alternatively, use requires_gpus=True argument to raise an OSError.
By default, GPUs are disabled during testing (as determined by the presence
of $TEST_TARGET which bazel sets). To enable the GPUs for tests, set the
$TEST_WITH_GPU environment variable to 1. Please note that when executing large
numbers of tests concurrently, they may have to queue and execute sequentially,
causing unexpected test timeouts.
"""
import contextlib
import functools
import os
import pathlib
import time
from typing import Any
from typing import Dict
from typing import Optional
import fasteners
import GPUtil
from labm8.py import app
from labm8.py import humanize
FLAGS = app.FLAGS
_LOCK_DIR = pathlib.Path("/tmp/phd/labm8/gpu_scheduler_locks")
# Set CUDA_DEVICE_ORDER so the IDs assigned by CUDA match those from
# nvidia-smi.
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
class NoGpuAvailable(OSError):
"""Error raised if no GPU is available."""
class GpuScheduler(object):
"""A simple interprocess locking scheduler for GPUs."""
def __init__(self, gpu_locks: Dict[GPUtil.GPU, Any]):
self.gpu_locks = gpu_locks
def TryToAcquireGpu(self, gpu: GPUtil.GPU) -> bool:
if gpu not in self.gpu_locks:
raise ValueError(f"GPU not found: {gpu}")
return self.gpu_locks[gpu].acquire(blocking=False)
def ReleaseGpu(self, gpu: GPUtil.GPU) -> None:
if gpu not in self.gpu_locks:
raise ValueError(f"GPU not found: {gpu}")
self.gpu_locks[gpu].release()
def BlockOnAvailableGpu(
self, timeout: Optional[int] = None, print_status: bool = True
):
start_time = time.time()
end_time = start_time + (timeout or 0)
while True:
for gpu in self.gpu_locks:
if self.TryToAcquireGpu(gpu):
if print_status:
print("\r")
wait = ""
if time.time() - start_time > 1:
wait = f" after {humanize.Duration(time.time() - start_time)} wait"
app.Log(1, "Acquired GPU %s (%s)%s", gpu.id, gpu.name, wait)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu.id)
return gpu
if timeout and time.time() > end_time:
raise NoGpuAvailable(
f"No GPU available after waiting for {humanize.Duration}"
)
if print_status:
print(
f"\rwaiting on a free gpu ... {time.time() - start_time:.1f}s",
end="",
)
time.sleep(0.5)
@functools.lru_cache(1)
def GetDefaultScheduler() -> GpuScheduler:
gpus = GPUtil.getGPUs()
if not gpus:
raise NoGpuAvailable("No GPUs available")
if os.environ.get("TEST_TARGET") and os.environ.get("TEST_WITH_GPU") != "1":
raise NoGpuAvailable("GPUs disabled for tests")
app.Log(
2, "Creating default scheduler for %s", humanize.Plural(len(gpus), "GPU")
)
return GpuScheduler(
{gpu: fasteners.InterProcessLock(_LOCK_DIR / str(gpu.id)) for gpu in gpus}
)
# This function is memoized since we can always acquire the same lock twice.
@functools.lru_cache(1)
def LockExclusiveProcessGpuAccess(
scheduler: Optional[GpuScheduler] = None,
timeout: Optional[int] = None,
print_status: bool = True,
require_gpus: bool = False,
) -> Optional[GPUtil.GPU]:
"""Lock exclusive access to the given GPU."""
try:
scheduler = scheduler or GetDefaultScheduler()
except NoGpuAvailable as e:
if require_gpus:
raise e
else:
return None
gpu = scheduler.BlockOnAvailableGpu(
timeout=timeout, print_status=print_status
)
return gpu
@contextlib.contextmanager
def ExclusiveGpuAccess(
scheduler: Optional[GpuScheduler] = None,
timeout: Optional[int] = None,
print_status: bool = True,
require_gpus: bool = False,
) -> Optional[GPUtil.GPU]:
"""Get exclusive access to a GPU with a scoped session.
Args:
scheduler:
timeout:
print_status:
require_gpus:
Returns:
"""
try:
scheduler = scheduler or GetDefaultScheduler()
except NoGpuAvailable as e:
if require_gpus:
raise e
else:
return None
gpu = scheduler.BlockOnAvailableGpu(
timeout=timeout, print_status=print_status
)
try:
yield gpu
finally:
scheduler.ReleaseGpu(gpu)
app.Log(1, "Released GPU %s (%s)", gpu.id, gpu.name)
|
# this file is present to add this folder to sys.path so tests work
# https://stackoverflow.com/questions/50155464/using-pytest-with-a-src-layer
|
import pandas as pd
import matplotlib.pyplot as plt
xco = []
for i in range(1147):
xco.append(i*382/1147)
df1 = pd.read_csv("Density_Values_Baseline_Dynamic.csv")
df2 = pd.read_csv("Density_Values_M5.csv")
plt.plot(xco, df1["Density Values"])
plt.plot(xco, df2["Density Values"]*20)
plt.xlabel("Time (in seconds)")
plt.ylabel("Density Values")
plt.title("Comparison between Sparse and Dense Optical Flow")
plt.savefig("Method_5_Sparse_vs_Dense.png")
plt.show()
### ERROR computed for FramesToSkip = 5 is: |
from flask import Blueprint, request
from .AuthController import find_user, check_user, signup, check_token, get_home
auth = Blueprint('auth', __name__, template_folder='auth_templates', url_prefix='/auth')
@auth.route('/<path:path>', methods=["POST"])
def validate(path):
if path in ['username', 'email']:
return find_user(path, request.form[path])
elif path == 'login':
return check_user(request.form)
elif path == 'signup':
return signup(request.form)
@auth.route('/confirm_email/<token>')
def confirm_email(token):
return check_token(token)
@auth.route('/<path:path>', methods=["GET"])
def home(path):
if path == 'home':
return get_home()
|
from datetime import datetime
from lxml import etree
from django.urls import reverse
from share.oaipmh.util import format_datetime, SubEl, ns, nsmap
class OAIRenderer:
def __init__(self, repository, request):
self.repository = repository
self.request = request
self.kwargs = {}
def identify(self, earliest_datestamp):
identify = etree.Element(ns('oai', 'Identify'))
SubEl(identify, ns('oai', 'repositoryName'), self.repository.NAME)
SubEl(identify, ns('oai', 'baseURL'), self.request.build_absolute_uri(reverse('oai-pmh')))
SubEl(identify, ns('oai', 'protocolVersion'), '2.0')
if earliest_datestamp:
SubEl(identify, ns('oai', 'earliestDatestamp'), format_datetime(earliest_datestamp))
SubEl(identify, ns('oai', 'deletedRecord'), 'no')
SubEl(identify, ns('oai', 'granularity'), self.repository.GRANULARITY)
for email in self.repository.ADMIN_EMAILS:
SubEl(identify, ns('oai', 'adminEmail'), email)
description = SubEl(identify, ns('oai', 'description'))
identifier = SubEl(
description,
ns('oai', 'oai-identifier'),
attrib={
ns('xsi', 'schemaLocation'): 'http://www.openarchives.org/OAI/2.0/oai-identifier http://www.openarchives.org/OAI/2.0/oai-identifier.xsd',
},
nsmap=nsmap('xsi', default='oai-identifier'),
)
SubEl(identifier, ns('oai-identifier', 'scheme'), 'oai')
SubEl(identifier, ns('oai-identifier', 'repositoryIdentifier'), self.repository.REPOSITORY_IDENTIFIER)
SubEl(identifier, ns('oai-identifier', 'delimiter'), self.repository.IDENTIFER_DELIMITER)
SubEl(identifier, ns('oai-identifier', 'sampleIdentifier'), self.repository.oai_identifier(1))
return self._render(identify)
def listMetadataFormats(self, formats):
list_formats = etree.Element(ns('oai', 'ListMetadataFormats'))
for metadata_prefix, format_info in formats.items():
metadata_format = SubEl(list_formats, ns('oai', 'metadataFormat'))
SubEl(metadata_format, ns('oai', 'metadataPrefix'), metadata_prefix)
SubEl(metadata_format, ns('oai', 'schema'), format_info['schema'])
SubEl(metadata_format, ns('oai', 'metadataNamespace'), format_info['namespace'])
return self._render(list_formats)
def listSets(self, sets):
list_sets = etree.Element(ns('oai', 'ListSets'))
for spec, name in sets:
set = SubEl(list_sets, ns('oai', 'set'))
SubEl(set, ns('oai', 'setSpec'), spec)
SubEl(set, ns('oai', 'setName'), name)
return self._render(list_sets)
def listIdentifiers(self, records, next_token):
list_identifiers = etree.Element(ns('oai', 'ListIdentifiers'))
for record in records:
list_identifiers.append(self._header(record))
SubEl(list_identifiers, ns('oai', 'resumptionToken'), next_token)
return self._render(list_identifiers)
def listRecords(self, records, next_token):
list_records = etree.Element(ns('oai', 'ListRecords'))
for record in records:
list_records.append(self._record(record))
SubEl(list_records, ns('oai', 'resumptionToken'), next_token)
return self._render(list_records)
def getRecord(self, record):
get_record = etree.Element(ns('oai', 'GetRecord'))
get_record.append(self._record(record))
return self._render(get_record)
def errors(self, errors):
elements = []
for error in errors:
element = etree.Element(ns('oai', 'error'), code=error.code)
element.text = error.description
elements.append(element)
return self._render(*elements)
def _render(self, *elements):
root = etree.Element(
ns('oai', 'OAI-PMH'),
attrib={
ns('xsi', 'schemaLocation'): 'http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd',
},
nsmap=nsmap('xsi', default='oai'),
)
SubEl(root, ns('oai', 'responseDate'), format_datetime(datetime.now()))
request = SubEl(root, ns('oai', 'request'), self.request.build_absolute_uri().rpartition('?')[0])
verb = self.kwargs.pop('verb', None)
if verb:
request.set('verb', verb)
for k, v in self.kwargs.items():
request.set(k, v)
for element in elements:
root.append(element)
return etree.tostring(root, encoding='utf-8', xml_declaration=True)
def _header(self, record):
header = etree.Element(ns('oai', 'header'))
SubEl(header, ns('oai', 'identifier'), self.repository.oai_identifier(record))
SubEl(header, ns('oai', 'datestamp'), format_datetime(record.date_modified)),
SubEl(header, ns('oai', 'setSpec'), record.suid.source_config.source.name)
return header
def _record(self, record):
record_element = etree.Element(ns('oai', 'record'))
record_element.append(self._header(record))
metadata = SubEl(record_element, ns('oai', 'metadata'))
metadata.append(etree.fromstring(record.formatted_metadata))
# TODO SHARE-730 Add <about><provenance><originDescription> elements
return record_element
|
# -*- coding: utf-8 -*-
"""
----
-----------
- Level 314 -
-----------
----
@author: Alexander Williams
date: Sun May 3 10:16:03 2020
"""
import numpy as np
import matplotlib.pyplot as plt
def map_orientation(cur_orientation, cur_count):
""" . . . . . x
. . . . . x
. . . . . x
. . . . . x
. . . . . x
. . . . . x
"""
right_edge = 34905131040
""" . . . . . .
. . . . . .
. . . . . .
. . . . . .
. . . . . .
x x x x x x
"""
bottom_edge = 67645734912
""" we will check if each position of the game peice is valid
by investigating if it touches the right edge or the bottom edge
using a logica AND (&) operation. The & will be 0 if there is
no overlap and <> 0 if there is
Pass in peices positioned in the upper left corner so that this
check can walk right and down to checkk all conditions
"""
room_to_move_right = True
room_to_move_down = True
safe_down = True
while safe_down:
room_to_move_right = True
safe_right = True
row_start = cur_orientation
while safe_right:
peice_orientation_list[cur_count] = cur_orientation
cur_count += 1
""" moving piece right 1 bit is the same as multiplying by 2^1
. x . . . .
x x x . . . = 450
. . x . . .
. x x x . . = 900
"""
if room_to_move_right:
cur_orientation = cur_orientation << 1
room_to_move_right = ((cur_orientation & right_edge) == 0)
else:
safe_right = False
""" moving down is the same as shifting right 6 times or multiplying by 2^6, aka 64
. x . . . .
x x x . . . = 450
. x . . . .
x x x . . . = 28,800
"""
if room_to_move_down:
cur_orientation = row_start << 6
room_to_move_down = ((cur_orientation & bottom_edge) == 0)
else:
safe_down = False
return cur_count
def SolveGame(game_id, max_wins = 10):
# game_id, number of wins
result_hdr = [game_id, 0]
# game_id (peg_bits), peice bits
result_detail = [[0] * (total_peices + 1)] * 10
single_result = [0] * (total_peices + 1)
peice_counter = [0] * total_peices
full_board = (2**36) - 1
board_now = [0] * total_peices
single_result[0]=game_id
try_count = 0
# initialize board
# peice 1
for peice_counter[0] in range(po_start[0], po_end[0]):
try_count += 1
if ((peice_orientation_list[peice_counter[0]] & game_id) == 0):
board_now[0] = game_id | peice_orientation_list[peice_counter[0]]
single_result[1]=peice_orientation_list[peice_counter[0]]
# peice 2
for peice_counter[1] in range(po_start[1], po_end[1]):
try_count += 1
if ((peice_orientation_list[peice_counter[1]] & board_now[0]) == 0):
board_now[1] = board_now[0] | peice_orientation_list[peice_counter[1]]
single_result[2]=peice_orientation_list[peice_counter[1]]
# peice 3
for peice_counter[2] in range(po_start[2], po_end[2]):
try_count += 1
if ((peice_orientation_list[peice_counter[2]] & board_now[1]) == 0):
board_now[2] = board_now[1] | peice_orientation_list[peice_counter[2]]
single_result[3]=peice_orientation_list[peice_counter[2]]
# peice 4
for peice_counter[3] in range(po_start[3], po_end[3]):
try_count += 1
if ((peice_orientation_list[peice_counter[3]] & board_now[2]) == 0):
board_now[3] = board_now[2] | peice_orientation_list[peice_counter[3]]
single_result[4]=peice_orientation_list[peice_counter[3]]
# peice 5
for peice_counter[4] in range(po_start[4], po_end[4]):
try_count += 1
if ((peice_orientation_list[peice_counter[4]] & board_now[3]) == 0):
board_now[4] = board_now[3] | peice_orientation_list[peice_counter[4]]
single_result[5]=peice_orientation_list[peice_counter[4]]
# peice 6
for peice_counter[5] in range(po_start[5], po_end[5]):
try_count += 1
if ((peice_orientation_list[peice_counter[5]] & board_now[4]) == 0):
board_now[5] = board_now[4] | peice_orientation_list[peice_counter[5]]
single_result[6]=peice_orientation_list[peice_counter[5]]
# peice 7
for peice_counter[6] in range(po_start[6], po_end[6]):
try_count += 1
if ((peice_orientation_list[peice_counter[6]] & board_now[5]) == 0):
board_now[6] = board_now[5] | peice_orientation_list[peice_counter[6]]
single_result[7]=peice_orientation_list[peice_counter[6]]
# peice 8
for peice_counter[7] in range(po_start[7], po_end[7]):
try_count += 1
if ((peice_orientation_list[peice_counter[7]] & board_now[6]) == 0):
try_count += 1
board_now[7] = board_now[6] | peice_orientation_list[peice_counter[7]]
single_result[8]=peice_orientation_list[peice_counter[7]]
# peice 9 automatically fits
# find it's location and save this winner
single_result[9] = board_now[7] ^ full_board
result_detail[result_hdr[1]]=single_result.copy()
result_hdr[1] += 1
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
if result_hdr[1] == max_wins:
break
print('Number of tries: {:,}'.format(try_count))
return result_hdr, result_detail
def StringToId(peg_positions):
""" input a list of strings representing peg positions
returns the game bitfield as integer number
"""
my_string = [''] * 36
cur_pos = 0
cur_bitfield = 0
for row in ['A', 'B', 'C', 'D', 'E', 'F']:
for col in ['1', '2', '3', '4', '5', '6']:
my_string[cur_pos] = row + col
cur_pos += 1
for this_peg in peg_positions:
cur_bitfield = cur_bitfield | (2 ** my_string.index(this_peg))
return cur_bitfield
def IdToString(peg_bitfield):
""" input peg position bitfield
returns the peg positions as a string
"""
my_string = [''] * 36
result_string = [''] * 7
cur_pos = 0
for row in ['A', 'B', 'C', 'D', 'E', 'F']:
for col in ['1', '2', '3', '4', '5', '6']:
my_string[cur_pos] = row + col
cur_pos += 1
cur_pos = 0
for row in range(6):
for col in range(6):
if (2**(row * 6 + col) & peg_bitfield) != 0:
result_string[cur_pos] = my_string[row * 6 + col]
cur_pos += 1
return result_string
def PlotGame(result_detail):
""" plot the game
result_detail = [game_id (peg positions), game_peices]
"""
set_bits = 0
board_array = np.zeros((6,6))
# pegs
board_array = board_array + (((board_sieve & result_detail[0]) == board_sieve) * peg_cmap_value)
set_bits = set_bits | result_detail[0]
# game peices
for i in range(total_peices):
board_array = board_array + (((board_sieve & result_detail[i+1]) == board_sieve) * peice_cmap_value[i])
set_bits = set_bits | result_detail[i+1]
# empty spots
unset_bits = set_bits ^ ((2^36) - 1)
board_array = board_array + (((board_sieve ^ unset_bits) == board_sieve) * empty_cmap_value)
# plot board
plt.matshow(board_array, cmap=plt.cm.tab20)
# show labels
row_labels = ['A', 'B', 'C', 'D', 'E', 'F']
col_labels = range(1, 7)
plt.xticks(range(6), col_labels)
plt.yticks(range(6), row_labels)
# mark the pegs
for row in range(6):
for col in range(6):
if (2**(row * 6 + col) & result_detail[0]) != 0:
plt.text(col, row, 'O', horizontalalignment='center',
verticalalignment='center', fontweight = 'bold', fontsize = 'xx-large')
plt.show()
# start main program logic
# initialize game peices meta variables
total_peices = 9
total_orientations = 625
peice_orientation_list = [0] * total_orientations
po_start = [0] * total_peices
po_end = [0] * total_peices
peice_name = ['red', 'cyan', 'grey', 'yellow', 'green', 'orange', 'purple', 'brown', 'blue']
# set up color map for plotting results
cmap_map = np.linspace(0,1,20)
peice_cmap_value = np.zeros(total_peices)
peice_cmap_value[0] = cmap_map[6]
peice_cmap_value[1] = cmap_map[19]
peice_cmap_value[2] = cmap_map[14]
peice_cmap_value[3] = cmap_map[17]
peice_cmap_value[4] = cmap_map[4]
peice_cmap_value[5] = cmap_map[2]
peice_cmap_value[6] = cmap_map[8]
peice_cmap_value[7] = cmap_map[10]
peice_cmap_value[8] = cmap_map[0]
peg_cmap_value = cmap_map[3]
empty_cmap_value = cmap_map[15]
board_sieve = np.array([2**i for i in range(36)]).reshape((6,6))
# set up game peice orientations
cur_orientation_count = 0
""" red peice (1) orientation (1)
1.1
"""
po_start[0] = cur_orientation_count
this_peice = 387
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 1.2
this_peice = 4290
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 1.3
this_peice = 198
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 1.4
this_peice = 8385
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[0] = cur_orientation_count
""" cyan peice (2) orientation (1)
2.1
"""
po_start[1] = cur_orientation_count
this_peice = 452
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.2
this_peice = 8323
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.3
this_peice = 71
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.4
this_peice = 12353
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.5
this_peice = 263
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.6
this_peice = 4163
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.7
this_peice = 449
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 2.8
this_peice = 12418
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[1] = cur_orientation_count
""" grey peice (3) orientation (1)
3.1
"""
po_start[2] = cur_orientation_count
this_peice = 15
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 3.2
this_peice = 266305
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[2] = cur_orientation_count
""" yellow peice (4) orientation (1)
4.1
"""
po_start[3] = cur_orientation_count
this_peice = 450
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 4.2
this_peice = 4289
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 4.3
this_peice = 135
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 4.4
this_peice = 8386
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[3] = cur_orientation_count
""" green peice (5) orientation (1)
5.1
"""
po_start[4] = cur_orientation_count
this_peice = 195
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[4] = cur_orientation_count
""" orange peice (6) orientation (1)
6.1
"""
po_start[5] = cur_orientation_count
this_peice = 7
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 6.2
this_peice = 4161
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[5] = cur_orientation_count
""" purple peice (7) orientation (1)
7.1
"""
po_start[6] = cur_orientation_count
this_peice = 193
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 7.2
this_peice = 194
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 7.3
this_peice = 131
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 7.4
this_peice = 67
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[6] = cur_orientation_count
""" brown peice (8) orientation (1)
8.1
"""
po_start[7] = cur_orientation_count
this_peice = 3
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
# 8.2
this_peice = 65
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[7] = cur_orientation_count
""" blue peice (9) orientation (1)
9.1
"""
po_start[8] = cur_orientation_count
this_peice = 1
cur_orientation_count = map_orientation(this_peice, cur_orientation_count)
po_end[8] = cur_orientation_count
# print(cur_orientation_count)
# print(peice_orientation_list)
# print(po_start, po_end)
game_id = StringToId(['A3', 'B2', 'B4', 'C2', 'C3', 'C5', 'C6'])
(res_hdr, res_detail) = SolveGame(game_id, 10)
print(res_hdr, res_detail)
for i in range(res_hdr[1]):
PlotGame(res_detail[i])
|
import requests
import datetime as dt
from src.typeDefs.rawVoltageCreationResp import RawVoltageCreationResp
class RawVoltageCreationHandler():
rawVoltageCreationUrl = ''
def __init__(self, rawVoltageCreationUrl):
self.rawVoltageCreationUrl = rawVoltageCreationUrl
def createRawVoltage(self, startDate: dt.datetime, endDate: dt.datetime) -> RawVoltageCreationResp:
"""create raw voltage using the api service
Args:
startDate (dt.datetime): start date
endDate (dt.datetime): end date
Returns:
RawVoltageCreationResp: Result of the rawVoltage creation operation
"""
createRawVoltagePayload = {
"startDate": dt.datetime.strftime(startDate, '%Y-%m-%d'),
"endDate": dt.datetime.strftime(endDate, '%Y-%m-%d')
}
res = requests.post(self.rawVoltageCreationUrl,
json=createRawVoltagePayload)
operationResult: RawVoltageCreationResp = {
"isSuccess": False,
'status': res.status_code,
'message': 'Unable to create rawVoltage...'
}
if res.status_code == requests.codes['ok']:
resJSON = res.json()
operationResult['isSuccess'] = True
operationResult['message'] = resJSON['message']
else:
operationResult['isSuccess'] = False
try:
resJSON = res.json()
print(resJSON['message'])
operationResult['message'] = resJSON['message']
except ValueError:
operationResult['message'] = res.text
# print(res.text)
return operationResult
|
from spider.LogInit import log
from bs4 import BeautifulSoup
from spider import Item
import re
from urllib.parse import urlparse
# 对于指定的HTML页面进行解析,抽取出要抓取图片的url和name
# 保存在Item对象中,Item对象保存在一个“线程安全”队列中
# 生产者-消费者模型
class HtmlParser(object):
'''
每一个HtmlParser对象中保存了要解析的HTML页面
parser = HtmlParser(html, taskManager)
parser.pageUrlParser()
parser.fileUrlParser()
'''
def __init__(self, taskManager):
# super().__init__()
self.taskManager = taskManager
# 解析出新目标页面链接
def pageUrlParser(self, url, html, threadName):
path = urlparse(url).path
id = re.findall(r"[0-9]+", path)[0]
url = "http://www.tan8.com/yuepu-{}-m.html".format(int(id) + int(threadName) + 1)
self.taskManager.addPageUrl(url)
# log.info('线程' + str(threadName) + 'addPage:' + url)
# 解析出新文件链接
def fileUrlParser(self, url, html, threadName):
# log.info('线程 ' + str(threadName) + ' 开始解析新图片链接')
soup = BeautifulSoup(html, 'html.parser')
try:
title = (soup.title).string
imgTags = soup.find('div', class_='swiper-container').find_all('img')
total = len(imgTags)
for imgTag in imgTags:
img = imgTag['src']
path = urlparse(img).path.split('/')[-1]
id = urlparse(img).path.split('/')[-2]
num = int(path.split('.')[-2]) + 1
item = Item.Item(img, "{0} {1} {2}-{3}" .format(id, title, num, total))
self.taskManager.addFileItem(item)
except:
pass
# 如果发生异常,说明页面不含有想要的链接,直接跳过继续即可
|
import cv2
import time
import glob
import os.path
import numpy as np
from matplotlib import pyplot as plt
import skimage
import skimage.measure
import skimage.morphology
def img_to_map(img):
map = img.copy().astype(np.float)
map[img == 128] = -1.0
map[img == 0] = 1.0
map[img == 200] = 0.0
return map
def map_to_img(c_map):
img = c_map.copy()
img[c_map == 0.0] = 200
img[c_map == 1.0] = 0
img[c_map == -1.0] = 128
img = img.astype(np.uint8)
return img
def create_img_of_centroids(centroids, c_map):
c_map = map_to_img(c_map)
img = np.zeros([c_map.shape[0], c_map.shape[1], 3], dtype=np.uint8)
img[:,:,0] = c_map.copy()
img[:,:,1] = c_map.copy()
img[:,:,2] = c_map.copy()
for centroid in centroids:
img = cv2.circle(img, (int(centroid[1]), int(centroid[0])), 4, (0,0,255), 4)
#img[int(centroid[0]), int(centroid[1]), 0] = 0
#img[int(centroid[0]), int(centroid[1]), 1] = 0
#img[int(centroid[0]), int(centroid[1]), 2] = 255
return img
def get_kernels_response(map):
diff_y = cv2.filter2D(map, -1, np.array(([0, -1], [0, 1]), dtype="int"))
diff_x = cv2.filter2D(map, -1, np.array(([0, 0], [-1, 1]), dtype="int"))
diff_xy = cv2.filter2D(map, -1, np.array(([-1, 0], [0, 1]), dtype="int"))
diff_yx = cv2.filter2D(map, -1, np.array(([0, -1], [1, 0]), dtype="int"))
return np.abs(diff_x + diff_y + diff_xy + diff_yx)
def detect_frontiers(basename, map):
response = get_kernels_response(map.copy())
# cv2.imwrite(os.path.join("./response/", basename), response_to_img(response))
obstacles = (map.copy() == 1.0).astype(np.float)
# cv2.imwrite(os.path.join("./obstacles/", basename), response_to_img(obstacles))
response_obstacles = get_kernels_response(obstacles)
# cv2.imwrite(os.path.join("./response_obstacle/", basename), response_to_img(response_obstacles))
frontier_map = response - 2 * response_obstacles
# cv2.imwrite(os.path.join("./frontier/", basename), response_to_img(frontier_map))
frontier_map_bin = frontier_map > 0
# cv2.imwrite(os.path.join("./frontier_bin/", basename), response_to_img(frontier_map_bin))
labels = skimage.measure.label(frontier_map_bin, background=0)
centroids = []
areas = []
for i in range(1, np.max(labels) + 1):
props = skimage.measure.regionprops((labels == i).astype(int))
centroids.append(props[0].centroid)
areas.append(props[0].area)
return centroids, areas, frontier_map_bin
def detect_consecutive(basename, current_map, previous_map, k_size):
cmap = current_map.copy()
diff_map = current_map.copy().astype(np.float)
if previous_map is not None:
mask = np.uint8(previous_map != -1.0)
# cv2.imwrite(os.path.join("./mask/", basename), response_to_img(mask))
kernel = np.ones((k_size, k_size), np.uint8)
mask = cv2.dilate(mask, kernel, iterations=1)
# cv2.imwrite(os.path.join("./morph_mask/", basename), response_to_img(mask))
diff_map[mask == 1] = 1.0
else:
diff_map = current_map.copy()
# cv2.imwrite(os.path.join("./diff/", basename), map_to_img(diff_map))
centroids, areas, frontier_map = detect_frontiers(basename, diff_map)
return centroids, areas, frontier_map
def clear_path(c_map, c1, c2, dist):
vec = (c2 - c1) / float(dist)
point = c1
for i in range(int(dist)):
point = point + vec
if c_map[int(point[0]), int(point[1])] == 1:
return 0
return 1
def clusters_from_frontiers(centroids, areas, frontier_map, current_map, max_dist=10):
clusters = np.zeros([len(centroids), len(centroids)], dtype=np.uint8)
clusters_props = []
for i, c in enumerate(centroids):
for j, c2 in enumerate(centroids[i+1:]):
c1a = np.asarray(c)
c2a = np.asarray(c2)
dist = np.linalg.norm(c1a - c2a)
if dist > max_dist:
continue
else:
if clear_path(current_map, c1a, c2a, dist):
clusters[i, i + j + 1] = 1
temp_clusters = -1 * np.ones([clusters.shape[0], 1], dtype=np.int8)
n_clusters = 0
for i in range(clusters.shape[0]):
if temp_clusters[i] == -1:
temp_clusters[i] = n_clusters
n_clusters = n_clusters + 1
for j in range(clusters.shape[1]):
if clusters[i, j] == 1:
if temp_clusters[j] == -1:
temp_clusters[j] = temp_clusters[i]
final_centroids = []
final_areas = []
for i in range(n_clusters):
occ = 0
temp_centroid_x = 0
temp_centroid_y = 0
temp_area = 0
for j in range(temp_clusters.shape[0]):
if temp_clusters[j] == i:
temp_centroid_x += centroids[j][1]
temp_centroid_y += centroids[j][0]
temp_area += areas[j]
occ += 1
final_centroids.append([temp_centroid_y / occ, temp_centroid_x / occ])
final_areas.append(temp_area)
return final_centroids, final_areas
def main():
files = sorted(glob.glob("./input/*.png"))
css = []
centroidss = []
f_centroidss = []
arss = []
areass = []
f_areass = []
sum_css = []
sum_centroidss = []
sum_f_centroidss = []
sum_cs = 0
sum_centroids = 0
sum_f_centroids = 0
for i in range(len(files)):
if i == 0:
previous_map = None
current_map = img_to_map(cv2.imread(files[i], 0))
else:
previous_map = img_to_map(cv2.imread(files[i-1], 0))
current_map = img_to_map(cv2.imread(files[i], 0))
cs, ars, fmap = detect_consecutive(os.path.basename(files[i]), current_map, None, 5)
centroids, areas, frontier_map = detect_consecutive(os.path.basename(files[i]), current_map, previous_map, 5)
f_centroids = []
f_areas = 0
if len(centroids) > 0:
f_centroids, f_areas = clusters_from_frontiers(centroids, areas, frontier_map, current_map, 20)
img = create_img_of_centroids(f_centroids, current_map)
cv2.imwrite("./output/output_" + str(i) + ".png", img)
img2 = create_img_of_centroids(centroids, current_map)
cv2.imwrite("./output/output_c" + str(i) + ".png", img2)
else:
areas = 0
centroids = []
css.append(len(cs))
centroidss.append(len(centroids))
f_centroidss.append(len(f_centroids))
arss.append(np.mean(ars))
areass.append(np.mean(areas))
f_areass.append(np.mean(f_areas))
sum_cs = sum_cs + len(cs)
sum_centroids = sum_centroids + len(centroids)
sum_f_centroids = sum_f_centroids + len(f_centroids)
sum_css.append(sum_cs)
sum_centroidss.append(sum_centroids)
sum_f_centroidss.append(sum_f_centroids)
#print(len(cs), len(centroids), len(f_centroids), np.mean(ars), np.mean(areas), np.mean(f_areas))
img3 = create_img_of_centroids(cs, current_map)
cv2.imwrite("./output/output_n" + str(i) + ".png", img3)
print("Centroids non-consecutive")
print(css)
print("Centroids consecutive")
print(centroidss)
print("Centroids minimal")
print(f_centroidss)
print("Sum Centroids non-consecutive")
print(sum_css)
print("Sum Centroids consecutive")
print(sum_centroidss)
print("Sum Centroids minimal")
print(sum_f_centroidss)
print("areas non-consecutive")
print(arss)
print("areas consecutive")
print(areass)
print("areas minimal")
print(f_areass)
if __name__ == '__main__':
main()
|
import unittest
from datetime import timedelta, datetime
from time import sleep
from src.utils.datetime import strfdelta
from src.utils.timing import TimedTaskLimiter, TimedOccurrenceTracker
class TestTimedTaskLimiter(unittest.TestCase):
def setUp(self) -> None:
self.interval_seconds = 2
self.interval_seconds_with_error_margin = self.interval_seconds + 0.5
self.interval_timedelta = timedelta(seconds=self.interval_seconds)
self.ttl = TimedTaskLimiter(self.interval_timedelta)
def test_time_interval_is_supplied_time_interval(self):
self.assertEqual(self.ttl.time_interval, self.interval_timedelta)
def test_last_time_that_did_task_is_min_datetime(self):
self.assertEqual(self.ttl.last_time_that_did_task, datetime.min)
def test_can_do_task_if_not_done_before(self):
self.assertTrue(self.ttl.can_do_task())
def test_can_do_task_if_not_done_before_and_wait_time_interval(self):
sleep(self.interval_seconds_with_error_margin)
self.assertTrue(self.ttl.can_do_task())
def test_cannot_do_task_if_check_within_time_interval(self):
self.ttl.did_task()
self.assertFalse(self.ttl.can_do_task())
def test_cannot_do_task_if_check_after_time_interval(self):
self.ttl.did_task()
sleep(self.interval_seconds_with_error_margin)
self.assertTrue(self.ttl.can_do_task())
def test_do_task_updates_last_time_that_did_task_to_a_greater_time(self):
before = self.ttl.last_time_that_did_task
self.ttl.did_task()
after = self.ttl.last_time_that_did_task
self.assertGreater(after, before)
def test_do_task_actually_allowed_even_if_cannot_do_task(self):
self.ttl.did_task()
self.assertFalse(self.ttl.can_do_task())
self.ttl.did_task()
def test_reset_sets_last_time_to_min_datetime(self):
self.ttl.reset()
self.assertEqual(self.ttl.last_time_that_did_task, datetime.min)
def test_reset_sets_last_time_to_min_datetime_even_after_task_done(self):
self.ttl.did_task()
self.assertNotEqual(self.ttl.last_time_that_did_task, datetime.min)
self.ttl.reset()
self.assertEqual(self.ttl.last_time_that_did_task, datetime.min)
class TestTimedOccurrenceTracker(unittest.TestCase):
def setUp(self) -> None:
self.max_occurrences = 4
self.interval_seconds = 3
self.interval_seconds_with_error_margin = self.interval_seconds + 0.5
self.interval_timedelta = timedelta(seconds=self.interval_seconds)
self.ttl = TimedOccurrenceTracker(self.max_occurrences,
self.interval_timedelta)
def test_max_occurrences_is_supplied_max_occurrences(self):
self.assertEqual(self.ttl.max_occurrences, self.max_occurrences)
def test_time_interval_is_supplied_time_interval(self):
self.assertEqual(self.ttl.time_interval, self.interval_timedelta)
def test_time_interval_pretty_returns_strfdelta_result(self):
self.assertEqual(
self.ttl.time_interval_pretty,
strfdelta(self.interval_timedelta,
"{hours}h, {minutes}m, {seconds}s"))
def test_not_too_many_occurrence_if_no_occurrences(self):
self.assertFalse(self.ttl.too_many_occurrences())
def test_not_too_many_occurrences_if_just_below_limit(self):
for i in range(self.max_occurrences - 1):
self.ttl.action_happened()
self.assertFalse(self.ttl.too_many_occurrences())
def test_too_many_occurrences_if_enough_occurrences(self):
for i in range(self.max_occurrences):
self.ttl.action_happened()
self.assertTrue(self.ttl.too_many_occurrences())
def test_not_too_many_occurrences_if_enough_occurrences_but_wait(self):
for i in range(self.max_occurrences):
self.ttl.action_happened()
self.assertTrue(self.ttl.too_many_occurrences())
sleep(self.interval_seconds_with_error_margin)
self.assertFalse(self.ttl.too_many_occurrences())
def test_not_too_many_occurrences_if_reset(self):
self.ttl.reset()
self.assertFalse(self.ttl.too_many_occurrences())
def test_not_too_many_occurrences_if_reset_after_enough_occurrences(self):
for i in range(self.max_occurrences):
self.ttl.action_happened()
self.assertTrue(self.ttl.too_many_occurrences())
self.ttl.reset()
self.assertFalse(self.ttl.too_many_occurrences())
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/24
# @Author : Jiaqi&Zecheng
# @File : data_process.py
# @Software: PyCharm
"""
import json
import argparse
import nltk
import os
import pickle
try:
from irnet_utils import symbol_filter, re_lemma, fully_part_header, group_header, partial_header, num2year, group_symbol, group_values, group_digital, load_data_entry
from irnet_utils import AGG, wordnet_lemmatizer
except:
from ir.irnet_utils import symbol_filter, re_lemma, fully_part_header, group_header, partial_header, num2year, group_symbol, group_values, group_digital, load_data_entry
from ir.irnet_utils import AGG, wordnet_lemmatizer
if os.path.exists('../data/conceptNet'):
base = '../data/conceptNet'
elif os.path.exists('../../data/conceptNet'):
base = '../../data/conceptNet'
with open(os.path.join(base, 'english_RelatedTo.pkl'), 'rb') as f:
english_RelatedTo = pickle.load(f)
with open(os.path.join(base, 'english_IsA.pkl'), 'rb') as f:
english_IsA = pickle.load(f)
def process_one(data, table):
data, _ = load_data_entry(data, table)
if 'origin_question_toks' not in data:
data['origin_question_toks'] = data['question_toks']
data['question_toks'] = symbol_filter(data['question_toks'])
origin_question_toks = symbol_filter([x for x in data['origin_question_toks'] if x.lower() != 'the'])
question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in data['question_toks'] if x.lower() != 'the']
data['question_toks'] = question_toks
table_names = []
table_names_pattern = []
for y in data['table_names']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
table_names.append(" ".join(x))
x = [re_lemma(x.lower()) for x in y.split(' ')]
table_names_pattern.append(" ".join(x))
header_toks = []
header_toks_list = []
header_toks_pattern = []
header_toks_list_pattern = []
for y in data['col_set']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
header_toks.append(" ".join(x))
header_toks_list.append(x)
x = [re_lemma(x.lower()) for x in y.split(' ')]
header_toks_pattern.append(" ".join(x))
header_toks_list_pattern.append(x)
num_toks = len(question_toks)
idx = 0
tok_concol = []
type_concol = []
nltk_result = nltk.pos_tag(question_toks)
while idx < num_toks:
# fully header
end_idx, header = fully_part_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for table
end_idx, tname = group_header(question_toks, idx, num_toks, table_names)
if tname:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["table"])
idx = end_idx
continue
# check for column
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for partial column
end_idx, tname = partial_header(question_toks, idx, header_toks_list)
if tname:
tok_concol.append(tname)
type_concol.append(["col"])
idx = end_idx
continue
# check for aggregation
end_idx, agg = group_header(question_toks, idx, num_toks, AGG)
if agg:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["agg"])
idx = end_idx
continue
if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR':
tok_concol.append([question_toks[idx]])
type_concol.append(['MORE'])
idx += 1
continue
if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS':
tok_concol.append([question_toks[idx]])
type_concol.append(['MOST'])
idx += 1
continue
# string match for Time Format
if num2year(question_toks[idx]):
question_toks[idx] = 'year'
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
def get_concept_result(toks, graph):
for begin_id in range(0, len(toks)):
for r_ind in reversed(range(1, len(toks) + 1 - begin_id)):
tmp_query = "_".join(toks[begin_id:r_ind])
if tmp_query in graph:
mi = graph[tmp_query]
for col in entry['col_set']:
if col in mi:
return col
end_idx, symbol = group_symbol(question_toks, idx, num_toks)
if symbol:
tmp_toks = [x for x in question_toks[idx: end_idx]]
assert len(tmp_toks) > 0, print(symbol, question_toks)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
end_idx, values = group_values(origin_question_toks, idx, num_toks)
if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']):
tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True]
assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
result = group_digital(question_toks, idx)
if result is True:
tok_concol.append(question_toks[idx: idx + 1])
type_concol.append(["value"])
idx += 1
continue
if question_toks[idx] == ['ha']:
question_toks[idx] = ['have']
tok_concol.append([question_toks[idx]])
type_concol.append(['NONE'])
idx += 1
continue
data['question_arg'] = tok_concol
data['question_arg_type'] = type_concol
data['nltk_pos'] = nltk_result
return data
def process_data(data):
"""
:param datas:
:param args:
:return:
"""
# copy of the origin question_toks
for d in datas:
if 'origin_question_toks' not in d:
d['origin_question_toks'] = d['question_toks']
for entry in datas:
entry['question_toks'] = symbol_filter(entry['question_toks'])
origin_question_toks = symbol_filter([x for x in entry['origin_question_toks'] if x.lower() != 'the'])
question_toks = [wordnet_lemmatizer.lemmatize(x.lower()) for x in entry['question_toks'] if x.lower() != 'the']
entry['question_toks'] = question_toks
table_names = []
table_names_pattern = []
for y in entry['table_names']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
table_names.append(" ".join(x))
x = [re_lemma(x.lower()) for x in y.split(' ')]
table_names_pattern.append(" ".join(x))
header_toks = []
header_toks_list = []
header_toks_pattern = []
header_toks_list_pattern = []
for y in entry['col_set']:
x = [wordnet_lemmatizer.lemmatize(x.lower()) for x in y.split(' ')]
header_toks.append(" ".join(x))
header_toks_list.append(x)
x = [re_lemma(x.lower()) for x in y.split(' ')]
header_toks_pattern.append(" ".join(x))
header_toks_list_pattern.append(x)
num_toks = len(question_toks)
idx = 0
tok_concol = []
type_concol = []
nltk_result = nltk.pos_tag(question_toks)
while idx < num_toks:
# fully header
end_idx, header = fully_part_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for table
end_idx, tname = group_header(question_toks, idx, num_toks, table_names)
if tname:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["table"])
idx = end_idx
continue
# check for column
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
# check for partial column
end_idx, tname = partial_header(question_toks, idx, header_toks_list)
if tname:
tok_concol.append(tname)
type_concol.append(["col"])
idx = end_idx
continue
# check for aggregation
end_idx, agg = group_header(question_toks, idx, num_toks, AGG)
if agg:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["agg"])
idx = end_idx
continue
if nltk_result[idx][1] == 'RBR' or nltk_result[idx][1] == 'JJR':
tok_concol.append([question_toks[idx]])
type_concol.append(['MORE'])
idx += 1
continue
if nltk_result[idx][1] == 'RBS' or nltk_result[idx][1] == 'JJS':
tok_concol.append([question_toks[idx]])
type_concol.append(['MOST'])
idx += 1
continue
# string match for Time Format
if num2year(question_toks[idx]):
question_toks[idx] = 'year'
end_idx, header = group_header(question_toks, idx, num_toks, header_toks)
if header:
tok_concol.append(question_toks[idx: end_idx])
type_concol.append(["col"])
idx = end_idx
continue
def get_concept_result(toks, graph):
for begin_id in range(0, len(toks)):
for r_ind in reversed(range(1, len(toks) + 1 - begin_id)):
tmp_query = "_".join(toks[begin_id:r_ind])
if tmp_query in graph:
mi = graph[tmp_query]
for col in entry['col_set']:
if col in mi:
return col
end_idx, symbol = group_symbol(question_toks, idx, num_toks)
if symbol:
tmp_toks = [x for x in question_toks[idx: end_idx]]
assert len(tmp_toks) > 0, print(symbol, question_toks)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
end_idx, values = group_values(origin_question_toks, idx, num_toks)
if values and (len(values) > 1 or question_toks[idx - 1] not in ['?', '.']):
tmp_toks = [wordnet_lemmatizer.lemmatize(x) for x in question_toks[idx: end_idx] if x.isalnum() is True]
assert len(tmp_toks) > 0, print(question_toks[idx: end_idx], values, question_toks, idx, end_idx)
pro_result = get_concept_result(tmp_toks, english_IsA)
if pro_result is None:
pro_result = get_concept_result(tmp_toks, english_RelatedTo)
if pro_result is None:
pro_result = "NONE"
for tmp in tmp_toks:
tok_concol.append([tmp])
type_concol.append([pro_result])
pro_result = "NONE"
idx = end_idx
continue
result = group_digital(question_toks, idx)
if result is True:
tok_concol.append(question_toks[idx: idx + 1])
type_concol.append(["value"])
idx += 1
continue
if question_toks[idx] == ['ha']:
question_toks[idx] = ['have']
tok_concol.append([question_toks[idx]])
type_concol.append(['NONE'])
idx += 1
continue
entry['question_arg'] = tok_concol
entry['question_arg_type'] = type_concol
entry['nltk_pos'] = nltk_result
return datas
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--data_path', type=str, help='dataset', required=True)
arg_parser.add_argument('--table_path', type=str, help='table dataset', required=True)
arg_parser.add_argument('--output', type=str, help='output data')
args = arg_parser.parse_args()
args.conceptNet = './conceptNet'
# loading dataSets
datas, table = load_dataSets(args)
# process datasets
process_result = process_datas(datas, args)
with open(args.output, 'w') as f:
json.dump(datas, f, indent=3, sort_keys=True)
|
import gym
import numpy as np
# Types
Program = [str]
Population = [Program]
class Agent:
def __init__(self):
# Agent structure parameters
self._T_values = ["pa", 'pv', '0.0', '0.025']
self._T_actions = ['L', 'R']
self._F = ["IFLTE"]
self._program_depth = 2
self._actions = {'L': 0, 'R': 1}
# GP experiment parameters
self._pop_size = 100
self._num_eps = 1000 # number of episodes to evaluate each program on
self._max_gens = 2 # max number of generations to evolve
self._term_score = 195.0 # fitness score termination criterion
self._init_pop = self._gen_init_pop()
self._best_program = []
def run(self):
if self._best_program == []:
self.train()
print("\nBest program after training:")
print(self._best_program)
env = gym.make("CartPole-v0")
net_reward = 0
for _ in range(self._num_eps):
ep_reward = 0
done = False
obs = env.reset()
while not done:
env.render()
action = self._eval(self._best_program, obs)
obs, reward, done, _ = env.step(action)
ep_reward += reward
net_reward += ep_reward
print("\nAverage reward over {} trials: {}".format(self._num_eps, net_reward/self._num_eps))
env.close()
def train(self):
best_program = []
# Evolve generations
current_pop = self._init_pop
for gen_idx in range(self._max_gens):
print("\nGeneration {}...".format(gen_idx+1))
scores = self._batch_fit(current_pop)
# Check termination criteria before evolving next generation
max_score = max(scores)
if max_score >= self._term_score:
best_program = current_pop[scores.index(max_score)]
break
# Selection & reproduction
next_pop = [self._select(current_pop, scores) for _ in range(self._pop_size)]
current_pop = next_pop
# If a solution wasn't found before reaching the last generation
# pick the best program from the last generation as the solution.
if gen_idx >= self._max_gens-1:
last_scores = self._batch_fit(current_pop)
max_score_idx = last_scores.index(max(last_scores))
best_program = current_pop[max_score_idx]
self._best_program = best_program
def _gen_init_pop(self) -> Population:
n = self._pop_size
pop = [self._gen_program(self._program_depth) for _ in range(n)]
return pop
def _gen_program(self, d: int) -> Program:
"""
Generates a program of arbitrary depth d.
"""
p = []
func = np.random.choice(self._F)
arg1 = np.random.choice(self._T_values)
arg2 = np.random.choice(self._T_values)
if d <= 1:
arg3 = np.random.choice(self._T_actions)
arg4 = np.random.choice(self._T_actions)
else:
arg3 = self._gen_program(d-1)
arg4 = self._gen_program(d-1)
p = [func, arg1, arg2, arg3, arg4]
return p
def _batch_fit(self, pop: Population) -> [float]:
"""
Computes the fitness of a population of programs.
- pop: population (list of programs)
- return: list of fitness scores
"""
fit_scores = []
env = gym.make("CartPole-v0")
fit_scores = [self._fit(p, env) for p in pop]
env.close()
return fit_scores
def _fit(self, p: Program, env) -> float:
"""
Computes the average fitness of a program over
a certain number of runs of the environment.
- p: program
- env: gym environment object
- return: fitness score
"""
avg_reward = 0
net_reward = 0
num_eps = self._num_eps
# Run episodes
for _ in range(num_eps):
ep_reward = 0
done = False
obs = env.reset()
# Run single episode
while not done:
action = self._eval(p, obs)
obs, rew, done, _ = env.step(action)
ep_reward += rew
net_reward += ep_reward
avg_reward = net_reward / num_eps
return avg_reward
def _eval(self, p:Program, obs:[float]) -> int:
"""
Interpreter: this function evaluates a program and outputs
the action it takes, parameterised by an observation from the environment.
- p: program to evaluate
- obs: gym environment observation object
- return: action (0 or 1 for CartPole-v0)
"""
action = -1
pa = obs[2]
pv = obs[3]
# Evaluate arguments 1 and 2
if p[1] == 'pa':
arg1 = pa
elif p[1] == 'pv':
arg1 = pv
else:
arg1 = float(p[1])
if p[2] == 'pa':
arg2 = pa
elif p[2] == 'pv':
arg2 = pv
else:
arg2 = float(p[2])
# Evaluate arguments 3 and 4
arg3 = self._eval(p[3], obs) if type(p[3]) is list else self._actions[p[3]]
arg4 = self._eval(p[4], obs) if type(p[4]) is list else self._actions[p[4]]
# Evaluate IFLTE(arg1, arg2, arg3, arg4)
if arg1 <= arg2:
action = arg3
else:
action = arg4
return action
# Genetic operators #
def _select(self, pop: Population, fit_scores: [float]) -> Program:
"""
Fitness Proportionate Selection (Roulette Wheel Selection)
pop: population
f_scores: fitness scores
"""
selected = []
F = sum(fit_scores)
r = np.random.uniform(0, F)
# Simulate roulette wheel with r as the fixed point
counter = 0
for i in range(len(fit_scores)):
counter += fit_scores[i]
if counter > r:
selected = pop[i]
break
return selected
|
import re
import sys
class Token:
def __init__(self, type, val, pos):
self.val = val
self.type = type
self.pos = pos
def __str__(self):
return '%s [%s] at pos %s' %(self.val, self.type, self.pos)
class LexExeption(Exception):
def __init__(self, sign,pos):
self.pos = pos
self.sign = sign
def __str__(self):
return 'Invalid sign %s at pos %s' % (self.sign , self.pos)
class Lexer:
def __init__(self, rules):
self.rules = [];
self.tokens = [];
self.w_space = re.compile('\S')
for regex, type in rules:
self.rules.append((re.compile(regex), type))
def input_insert(self, input):
self.input = input
self.pos = 0
def token(self):
if self.pos >= len(self.input):
return None
else:
t_tmp = self.w_space.search(self.input[self.pos:])
if t_tmp:
self.pos += t_tmp.start()
else:
return None
for t_regex, t_type in self.rules:
t_tmp = t_regex.match(self.input[self.pos:])
if t_tmp:
val = self.input[self.pos + t_tmp.start():self.pos + t_tmp.end()]
tok = Token(t_type, val, self.pos)
self.pos += t_tmp.end()
return tok
raise LexExeption(self.input[self.pos], self.pos)
def tokenize(self):
while True:
tok = self.token()
if tok is None: break
yield tok
def main(filePath,tokens):
lex = Lexer(tokens)
file_content = None;
with open(filePath, 'r') as file:
file_content = file.read()
file.close()
lex.input_insert(file_content)
try:
for tok in lex.tokenize():
print(tok)
except LexExeption as err:
print(err)
if __name__ == '__main__':
tokens = [
(r'^[^\d\W]\w*', 'identifier'),
(r'\+', 'plus'),
(r'\-', 'minus'),
(r'\*', 'multi'),
(r'\/', 'div'),
(r'\^', 'pow'),
(r'\(', 'lp'),
(r'\)', 'rp'),
(r'^\d*(?=\s|$|[a-zA-Z]|\(|\))','int'),
(r'^\d+[.]\d+', 'float'),
]
filePath = 'file.txt'
main(filePath,tokens)
|
from .clotho_data_loader import get_clotho_loader
from pathlib import Path
def get_dataloaders(path_to_clotho, input_modal='features', output_modal='words_ind', num_workers=1, shuffle_train=True, batch_size=20):
train_dataloader = get_clotho_loader(Path(path_to_clotho+'/data'), 'development', input_modal,
output_modal, True, batch_size, 'max', shuffle=shuffle_train, num_workers=num_workers)
valid_dataloader = get_clotho_loader(Path(path_to_clotho+'/data'), 'evaluation', input_modal,
output_modal, True, batch_size, 'max', shuffle=False, num_workers=num_workers)
return train_dataloader, valid_dataloader
|
from ase.io import read
from flosic_os import calculate_flosic, flosic,xyz_to_nuclei_fod,ase2pyscf,get_multiplicity
from pyscf import dft,gto
# This example shows how FLO-SIC calculations can be done in the one-shot mode.
# The necessary input is a .xyz file with the molecular geometry and the FOD positions.
# The easiest way to do a FLO-SIC one-shot calculation is to call calculate_flosic.
# This is FULL FLO-SIC.
# Let's define some parameters for that.
b = '6-311++Gss' # Basis set.
verbose = 4 # Amount of output. 4: full output.
max_cycle = 300 # Number of SCF iterations.
conv_tol = 1e-7 # Accuracy of the SCF cycle.
grids_level = 3 # Level of the numerical grid. 3 is the standard value.
xc = 'LDA,PW' # Exchange-correlation functional in the form: (exchange,correlation)
# NOTE: As there exists only one way to express the exchange for LDA, there is only one identifier.
# For LDA correlation there exist several.
# We need the systems name (= Filename) as input.
sysname = 'H2'
# Now we can call calculate_flosic.
# calculate_flosic operates fully automatic; it performs a DFT SCF cycle and then does FLO-SIC on top of that.
# The return value is a Python dictionary.
flosic_values_1 = calculate_flosic(spin=0,fname=sysname,basis=b,verbose=verbose,max_cycle=max_cycle,conv_tol=conv_tol,grid=grids_level,xc=xc)
# ALTERNATIVELY: ASE Atoms object as input.
# We need an ASE Atoms object as input.
# We also need to specify the spin.
#molecule = read('H2.xyz')
#spin = 0
#flosic_values_1 = calculate_flosic(spin=0,ase_atoms=molecule,basis=b,verbose=verbose,max_cycle=max_cycle,conv_tol=conv_tol,grid=grids_level,xc=xc)
# Another possibility to use FLO-SIC is as an post-processing step.
# This is POST-PROCESSING one-shot.
# Here we start a regular DFT calculation and then apply FLO-SIC.
# First, set up a DFT calculation (see example 01).
# The mole object can be generated by Pyflosic routines as well.
# This routine properly parses the .xyz file.
molecule = read(sysname+'.xyz')
geo,nuclei,fod1,fod2,included = xyz_to_nuclei_fod(molecule)
# Set spin and charge.
charge = 0
spin = get_multiplicity(sysname)
# Build the mole object.
mol = gto.M(atom=ase2pyscf(nuclei), basis={'default':b},spin=spin,charge=charge)
# Set up the DFT calculation.
dft_object = dft.UKS(mol)
dft_object.verbose = verbose
dft_object.max_cycle = max_cycle
dft_object.conv_tol = conv_tol
dft_object.grids.level = grids_level
dft_object.xc = xc
# Perform the DFT calculation.
dft_energy = dft_object.kernel()
# Apply FLO-SIC to the DFT calculation.
flosic_values_2 = flosic(mol,dft_object,fod1,fod2)
# Output the results. The output for FLO-SIC is given in the form of Python dictionaries.
print("ESIC: {}".format(flosic_values_1['etot_sic']-dft_energy))
print('Total energy of H2 (DFT): %0.5f (should be %0.5f)' % (dft_energy,-1.13634167738585))
print('Total energy of H2 (FLO-SIC FULL): %0.5f (should be %0.5f) ' % (flosic_values_1['etot_sic'],-1.18032726019))
print('Total energy of H2 (FLO-SIC POST-PROCESSING): % 0.5f (should be %0.5f) ' % (flosic_values_2['etot_sic'],-1.18032726019))
|
import json
import math
import string
from ast import literal_eval
from pathlib import Path
import ftfy
import jsonlines
import plac
import validators
from preshed.counter import PreshCounter
from spacy.lang.en import stop_words as en_stop_words
from spacy.lang.zh import stop_words as zh_stop_words
from tqdm import tqdm
class Word:
counter = -1
def __init__(self, word_str, cluster, probs):
self._word = word_str
self._cluster = cluster
self._probs = probs
chinese_punct = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
self._punct_list = list(set(string.punctuation + chinese_punct))
chinese_whitespace = ""
self._whitespace_list = list(set(string.whitespace + chinese_whitespace))
english_stopword = en_stop_words.STOP_WORDS
chinese_stopword = zh_stop_words.STOP_WORDS
self._stopword_list = {*english_stopword, *chinese_stopword}
chinese_quote = "“”‘’"
english_quote = "\"'"
self._qute_list = list(set(english_quote + chinese_quote))
chinese_left_punct = "<([{"
english_left_punct = "<([「『【〔〖〘〚{"
self._left_punct_list = list(set(english_left_punct + chinese_left_punct))
chinese_right_punct = ">)]}"
english_right_punct = ">)]」』】〕〗〙〛}"
self._right_punct_list = list(set(english_right_punct + chinese_right_punct))
@property
def orth(self):
return self._word
@property
def id(self):
self.__class__.counter += 1
return self.__class__.counter
@property
def lower(self):
return self._word.lower()
@property
def norm(self):
return self._word
@property
def shape(self):
return "".join(map(lambda x: "X" if x.isupper() else "x", self._word))
@property
def prefix(self):
return self._word[0]
@property
def suffix(self):
return self._word[-1]
@property
def length(self):
return len(self._word)
@property
def cluster(self):
return self._cluster
@property
def prob(self):
return self._probs.get(self, 0)
@property
def is_alpha(self):
return self._word.isalpha()
@property
def is_ascii(self):
# only for py 3.7
# return self._word.isascii()
try:
self._word.encode('ascii')
except UnicodeEncodeError:
return False
return True
@property
def is_digit(self):
return self._word.isdigit()
@property
def is_lower(self):
return self._word.islower()
@property
def is_punct(self):
return self._word in self._punct_list
@property
def is_space(self):
return self._word in self._whitespace_list
@property
def is_title(self):
return self._word.istitle()
@property
def is_upper(self):
return self._word.isupper()
@property
def like_url(self):
return bool(validators.url(self._word))
@property
def like_num(self):
# TODO(howl-anderson): fix it later
return False
@property
def like_email(self):
return bool(validators.email(self._word))
@property
def is_stop(self):
return self._word in self._stopword_list
@property
def is_oov(self):
return not self._word in self._probs
@property
def is_quote(self):
return self._word in self._qute_list
@property
def is_left_punct(self):
return self._word in self._left_punct_list
@property
def is_right_punct(self):
return self._word in self._right_punct_list
def read_freqs(freqs_loc, max_length=100, min_doc_freq=5, min_freq=50):
print("Counting frequencies...")
counts = PreshCounter()
total = 0
with freqs_loc.open() as f:
for i, line in enumerate(f):
freq, doc_freq, key = line.rstrip().split("\t", 2)
freq = int(freq)
counts.inc(i + 1, freq)
total += freq
counts.smooth()
log_total = math.log(total)
probs = {}
with freqs_loc.open() as f:
for line in tqdm(f):
freq, doc_freq, key = line.rstrip().split("\t", 2)
doc_freq = int(doc_freq)
freq = int(freq)
if doc_freq >= min_doc_freq and freq >= min_freq and len(key) < max_length:
word = literal_eval(key)
smooth_count = counts.smoother(int(freq))
probs[word] = math.log(smooth_count) - log_total
oov_prob = math.log(counts.smoother(0)) - log_total
return probs, oov_prob
def read_clusters(clusters_loc):
print("Reading clusters...")
clusters = {}
with clusters_loc.open() as f:
for line in tqdm(f):
try:
cluster, word, freq = line.split()
word = ftfy.fix_text(word)
except ValueError:
continue
# If the clusterer has only seen the word a few times, its
# cluster is unreliable.
if int(freq) >= 3:
clusters[word] = cluster
else:
clusters[word] = "0"
# Expand clusters with re-casing
for word, cluster in list(clusters.items()):
if word.lower() not in clusters:
clusters[word.lower()] = cluster
if word.title() not in clusters:
clusters[word.title()] = cluster
if word.upper() not in clusters:
clusters[word.upper()] = cluster
return clusters
@plac.annotations(
lang=("model language", "positional", None, str),
output_loc=("model output directory", "positional", None, str),
freqs_loc=("location of words frequencies file", "positional", None, Path),
clusters_loc=("location of brown clusters data", "positional", None, Path),
)
def main(lang, output_loc, freqs_loc, clusters_loc):
clusters = read_clusters(clusters_loc)
probs, oov_prob = read_freqs(freqs_loc)
with jsonlines.open(output_loc, mode="w") as writer:
header = {"lang": lang, "settings": {"oov_prob": oov_prob}}
writer.write(header)
for word_str, cluster in clusters.items():
if not word_str:
continue
word = Word(word_str, cluster, probs)
row = {
"orth": word.orth, # the word text
"id": word.id, # can correspond to row in vectors table
"lower": word.lower,
"norm": word.norm,
"shape": word.shape,
"prefix": word.prefix,
"suffix": word.suffix,
"length": word.length,
"cluster": word.cluster,
"prob": word.prob,
"is_alpha": word.is_alpha,
"is_ascii": word.is_ascii,
"is_digit": word.is_digit,
"is_lower": word.is_lower,
"is_punct": word.is_punct,
"is_space": word.is_space,
"is_title": word.is_title,
"is_upper": word.is_upper,
"like_url": word.like_url,
"like_num": word.like_num,
"like_email": word.like_email,
"is_stop": word.is_stop,
"is_oov": word.is_oov,
"is_quote": word.is_quote,
"is_left_punct": word.is_left_punct,
"is_right_punct": word.is_right_punct,
}
writer.write(row)
if __name__ == "__main__":
plac.call(main)
|
from urllib.request import urlopen
import json
url = "https://raw.githubusercontent.com/pokemongo-dev-contrib/pokemongo-json-pokedex/master/output/pokemon.json"
response = urlopen(url)
final_data = []
json_data = json.loads(response.read())
for p in json_data:
final_data.append({
'dex': p['dex'],
'name': p['name'],
'height': p['height'],
'weight': p['weight'],
'kmBuddyDistance': p['kmBuddyDistance'],
'maxCP': p['maxCP'],
'cinematicMoves': [x['name'] for x in p['cinematicMoves']],
'quickMoves': [x['name'].replace(' Fast','') for x in p['quickMoves']],
'family': p['family']['name'],
'stats': p['stats'],
'types': [x['name'] for x in p['types']],
'gender': p['encounter']['gender'] if 'gender' in p['encounter'] else '',
'baseCaptureRate': p['encounter']['baseCaptureRate'] if 'baseCaptureRate' in p['encounter'] else '',
'baseFleeRate': p['encounter']['baseFleeRate'] if 'baseFleeRate' in p['encounter'] else '',
'evolution': p['evolution'],
'forms': [x['name'] for x in p['forms']],
})
# print(json.dumps(final_data))
with open('pokedex.json', 'w') as f:
json.dump(final_data, f, separators=(',', ':'))
with open("pokedex_pretty.json", "w") as f:
json.dump(final_data, f, indent=2)
# ========
url = "https://raw.githubusercontent.com/pokemongo-dev-contrib/pokemongo-json-pokedex/master/output/move.json"
response = urlopen(url)
json_data = json.loads(response.read())
with open('moves.json', 'w') as f:
json.dump(json_data, f, separators=(',', ':'))
with open("moves_pretty.json", "w") as f:
json.dump(json_data, f, indent=2) |
from shared import Blob
from typing import List
from ._base import _Base
def _blob_with_header(count: int) -> Blob():
output = Blob()
output.append_char4("W3do")
output.append_int(8) # major version
output.append_int(0xB) # minor version (often set to [0B 00 00 00]h)
output.append_int(count)
return output
def serialize_as_blob(objects: List[_Base]) -> Blob:
"""
Data:
Each unit/item is defined by a block of bytes (variable length) organized like this:
char[4]: type ID (iDNR = random item, uDNR = random unit)
int: variation
float: coordinate X
float: coordinate Y
float: coordinate Z
float: rotation angle
float: scale X
float: scale Y
float: scale Z
byte: flags*
int: player number (owner) (player1 = 0, 16=neutral passive)
byte: unknown (0)
byte: unknown (0)
int: hit points (-1 = use default)
int: mana points (-1 = use default, 0 = unit doesn't have mana)
int: map item table pointer (for dropped items on death)
if -1 => no item table used
if >= 0 => the item table with this number will be dropped on death
int: number "s" of dropped item sets (can only be greater 0 if the item table pointer was -1)
then we have s times a dropped item sets structures (see below)
int: gold amount (default = 12500)
float: target acquisition (-1 = normal, -2 = camp)
int: hero level (set to1 for non hero units and items)
int: strength of the hero (0 = use default)
int: agility of the hero (0 = use default)
int: intelligence of the hero (0 = use default)
int: number "n" of items in the inventory
then there is n times a inventory item structure (see below)
int: number "n" of modified abilities for this unit
then there is n times a ability modification structure (see below)
int: random unit/item flag "r" (for uDNR units and iDNR items)
0 = Any neutral passive building/item, in this case we have
byte[3]: level of the random unit/item,-1 = any (this is actually interpreted as a 24-bit number)
byte: item class of the random item, 0 = any, 1 = permanent ... (this is 0 for units)
r is also 0 for non random units/items so we have these 4 bytes anyway (even if the id wasn't uDNR or iDNR)
1 = random unit from random group (defined in the w3i), in this case we have
int: unit group number (which group from the global table)
int: position number (which column of this group)
the column should of course have the item flag set (in the w3i) if this is a random item
2 = random unit from custom table, in this case we have
int: number "n" of different available units
then we have n times a random unit structure
int: custom color (-1 = none, 0 = red, 1=blue,...)
int: Waygate: active destination number (-1 = deactivated, else it's the creation number of the target rect as in war3map.w3r)
int: creation number
*flags: may be similar to the war3map.doo flags
"""
b = _blob_with_header(count=len(objects))
for o in objects:
b.append_char4(o.type_id)
b.append_int(o.variation)
b.append_float(o.x)
b.append_float(o.y)
b.append_float(o.z)
b.append_float(o.rotation)
b.append_float(o.x_scale)
b.append_float(o.y_scale)
b.append_float(o.z_scale)
b.append_byte(0) # flags
b.append_int(o.owner)
b.append_byte(0) # unknown
b.append_byte(0) # unknown
b.append_int(o.hp)
b.append_int(o.mp)
assert o.item_table_pointer == -1 and not o.dropped_item_sets # other stuff not supported
b.append_int(o.item_table_pointer)
b.append_int(o.gold_amount)
b.append_float(o.target_acquisition)
b.append_int(o.hero_level)
b.append_int(o.strength)
b.append_int(o.agility)
b.append_int(o.intelligence)
assert not o.items, "serializing inventories is not supported"
b.append_int(len(o.items))
assert not o.modified_abilities, "serializing modified abilities is not supported"
b.append_int(len(o.modified_abilities))
b.append_int(o.color)
b.append_int(o.way_gate)
b.append_int(o.creation_number)
return b
"""
Dropped item set format
int: number "d" of dropable items
"d" times dropable items structures:
char[4]: item ID ([00 00 00 00]h = none)
this can also be a random item id (see below)
int: % chance to be dropped
Inventory item format
int: inventory slot (this is the actual slot - 1, so 1 => 0)
char[4]: item id (as in ItemData.slk) 0x00000000 = none
this can also be a random item id (see below)
Ability modification format
char[4]: ability id (as in AbilityData.slk)
int: active for autocast abilities, 0 = no, 1 = active
int: level for hero abilities
Random unit format
char[4]: unit id (as in UnitUI.slk)
this can also be a random unit id (see below)
int: percentual chance of choice
Random item ids
random item ids are of the type char[4] where the 1st letter is "Y" and the 3rd letter is "I"
the 2nd letter narrows it down to items of a certain item types
"Y" = any type
"i" to "o" = item of this type, the letters are in order of the item types in the dropdown box ("i" = charged)
the 4th letter narrows it down to items of a certain level
"/" = any level (ASCII 47)
"0" ... = specific level (this is ASCII 48 + level, so level 10 will be ":" and level 15 will be "?" and so on)
Random unit ids
random unit ids are of the type char[4] where the 1st three letters are "YYU"
the 4th letter narrows it down to units of a certain level
"/" = any level (ASCII 47)
"0" ... = specific level (this is ASCII 48 + level, so level 10 will be ":" and level 15 will be "?" and so on)
"""
|
#!/usr/bin/env python
from stile.hsc.base_tasks import CCDSingleEpochStileTask
CCDSingleEpochStileTask.parseAndRun()
|
help_txt = "Give admin power to user"
isadmin = True
syntax = "/admin [nick]"
def execute(caller, args, ex):
if len(args) < 1:
return ex.SHOW_USAGE
user = ex.server.nick2user(args[0])
if user is ex.server.userNotFound:
return ex.INVALID_ARG
user.admin = True
|
from datetime import timedelta
from rest_framework.status import HTTP_200_OK, HTTP_201_CREATED, HTTP_204_NO_CONTENT, HTTP_401_UNAUTHORIZED, \
HTTP_403_FORBIDDEN, HTTP_400_BAD_REQUEST
from rest_framework.test import APITestCase
from rest_framework_simplejwt.tokens import RefreshToken, OutstandingToken
from django.core import mail
from django.urls import reverse
from django.utils import timezone
from blog_api.users.models import User, VerificationCode
from blog_api.posts.models import Tag, Post
class PostTestsUpdate(APITestCase):
def setUp(self):
self.user_data = {
'name': 'DabApps',
'username': 'someuser00',
'email': 'someemail@email.com',
'password': 'Testing4321@',
'password2': 'Testing4321@'
}
self.user2_data = {
'name': 'DabApps',
'username': 'someuser001',
'email': 'someemail1@email.com',
'password': 'Testing4321@',
'password2': 'Testing4321@'
}
self.blog_post_data = {
'title': 'A really cool title for some really cool blog post by a really cool developer.',
'content': 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed facilisis nunc id orci hendrerit, id tempor lorem tincidunt.',
}
def test_user_can_delete_post(self):
print('Testing authenticated user can delete a post')
register_url = reverse('user-register')
verification_url = reverse('user-verify')
login_url = reverse('user-login')
delete_post_url = reverse('post-delete')
reg_response = self.client.post(register_url, self.user_data, format='json')
self.assertEqual(reg_response.status_code, HTTP_201_CREATED)
verificaton_data = {
'verification_code': VerificationCode.objects.latest('created_at').verification_code
}
verification_response = self.client.post(verification_url, verificaton_data, format='json')
self.assertEqual(verification_response.status_code, HTTP_200_OK)
login_data = {
'email': self.user_data['email'],
'password': self.user_data['password']
}
new_login = self.client.post(login_url, login_data, format='json')
self.assertEqual(new_login.status_code, HTTP_200_OK)
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + new_login.data['access'])
new_post_slug = Post.objects.create(
author=User.objects.get(email=self.user_data['email']),
title=self.blog_post_data['title'],
content=self.blog_post_data['content']).slug
delete_post_data = {
'post_to_delete': new_post_slug
}
delete_post_response = self.client.post(delete_post_url, delete_post_data, format='json')
self.assertEqual(delete_post_response.status_code, HTTP_204_NO_CONTENT)
print('Done.....')
def test_user_can_only_delete_own_posts(self):
print('Testing user can only delete his own posts')
register_url = reverse('user-register')
verification_url = reverse('user-verify')
login_url = reverse('user-login')
delete_post_url = reverse('post-delete')
reg_response = self.client.post(register_url, self.user_data, format='json')
self.assertEqual(reg_response.status_code, HTTP_201_CREATED)
reg2_response = self.client.post(register_url, self.user2_data, format='json')
self.assertEqual(reg2_response.status_code, HTTP_201_CREATED)
for vcode in VerificationCode.objects.all():
verificaton_data = {
'verification_code': vcode.verification_code
}
verification_response = self.client.post(verification_url, verificaton_data, format='json')
self.assertEqual(verification_response.status_code, HTTP_200_OK)
login_data = {
'email': self.user_data['email'],
'password': self.user_data['password']
}
new_login = self.client.post(login_url, login_data, format='json')
self.assertEqual(new_login.status_code, HTTP_200_OK)
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + new_login.data['access'])
new_post_slug = Post.objects.create(
author=User.objects.get(email=self.user_data['email']),
title=self.blog_post_data['title'],
content=self.blog_post_data['content']).slug
self.client.credentials()
login_data = {
'email': self.user2_data['email'],
'password': self.user2_data['password']
}
new_login = self.client.post(login_url, login_data, format='json')
self.assertEqual(new_login.status_code, HTTP_200_OK)
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + new_login.data['access'])
delete_post_data = {
'post_to_delete': new_post_slug
}
delete_post_response = self.client.post(delete_post_url, delete_post_data, format='json')
self.assertEqual(delete_post_response.status_code, HTTP_400_BAD_REQUEST)
self.assertEqual(delete_post_response.data['deleted'], False)
self.assertEqual(delete_post_response.data['message'], 'You can only delete your own post.')
print('Done.....')
# def test_users_post_count_decrements_upon_deleting_post(self):
# print('Testing users post count decrements after deleting a post')
# register_url = reverse('user-register')
# verification_url = reverse('user-verify')
# login_url = reverse('user-login')
# create_post_url = reverse('post-create')
# delete_post_url = reverse('post-delete')
# reg_response = self.client.post(register_url, self.user_data, format='json')
# self.assertEqual(reg_response.status_code, HTTP_201_CREATED)
# verificaton_data = {
# 'verification_code': VerificationCode.objects.latest('created_at').verification_code
# }
# verification_response = self.client.post(verification_url, verificaton_data, format='json')
# self.assertEqual(verification_response.status_code, HTTP_200_OK)
# login_data = {
# 'email': self.user_data['email'],
# 'password': self.user_data['password']
# }
# new_login = self.client.post(login_url, login_data, format='json')
# self.assertEqual(new_login.status_code, HTTP_200_OK)
# self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + new_login.data['access'])
# user = User.objects.first()
# self.blog_post_data['author'] = user.pub_id
# create_post_response = self.client.post(create_post_url, self.blog_post_data, format='json')
# self.assertEqual(create_post_response.status_code, HTTP_201_CREATED)
# self.assertEqual(create_post_response.data['created'], True)
# user.refresh_from_db()
# self.assertEqual(user.post_count, 1)
# delete_post_data = {
# 'post_to_delete': Post.objects.first().slug
# }
# delete_post_response = self.client.post(delete_post_url, delete_post_data, format='json')
# self.assertEqual(delete_post_response.status_code, HTTP_204_NO_CONTENT)
# user.refresh_from_db()
# self.assertEqual(user.post_count, 0)
# print('Done.....')
# def test_tags_post_count_decrements_upon_deleting_post(self):
# '''
# Ensure a tags post count decrements upon deleting a post
# '''
# print('Testing tags post count decrements after deleting a post')
# register_url = reverse('user-register')
# verification_url = reverse('user-verify')
# login_url = reverse('user-login')
# create_post_url = reverse('post-create')
# delete_post_url = reverse('post-delete')
# reg_response = self.client.post(register_url, self.user_data, format='json')
# self.assertEqual(reg_response.status_code, HTTP_201_CREATED)
# verificaton_data = {
# 'verification_code': VerificationCode.objects.latest('created_at').verification_code
# }
# verification_response = self.client.post(verification_url, verificaton_data, format='json')
# self.assertEqual(verification_response.status_code, HTTP_200_OK)
# login_data = {
# 'email': self.user_data['email'],
# 'password': self.user_data['password']
# }
# new_login = self.client.post(login_url, login_data, format='json')
# self.assertEqual(new_login.status_code, HTTP_200_OK)
# self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + new_login.data['access'])
# tag = Tag.objects.create(name='lklkjlkj')
# self.assertEqual(tag.post_count, 0)
# self.blog_post_data['post_tags'] = tag.name
# create_post_response = self.client.post(create_post_url, self.blog_post_data, format='json')
# self.assertEqual(create_post_response.status_code, HTTP_201_CREATED)
# self.assertEqual(create_post_response.data['created'], True)
# tag.refresh_from_db()
# self.assertEqual(tag.post_count, 1)
# delete_post_data = {
# 'post_to_delete': Post.objects.first().slug
# }
# delete_post_response = self.client.post(delete_post_url, delete_post_data, format='json')
# self.assertEqual(delete_post_response.status_code, HTTP_204_NO_CONTENT)
# tag.refresh_from_db()
# self.assertEqual(tag.post_count, 0)
# print('Done.....')
|
from flask.views import MethodView
from flask import request, session
from flask import jsonify
from orm.fields import HasField
from orm.exceptions import FieldNotValidError, ObjectNotFoundError
from .login import api_login_required
'''
These two classes implements basic JSON REST-api
'''
class APIListView(MethodView):
methods = ['GET', 'POST']
decorators = [api_login_required]
def getModel(self):
raise Exception("Model not defined")
def get(self):
objects = self.getModel().filter(owner__exact=session['logged_in']).serialize()
resp = jsonify({"data":objects})
resp.status_code = 200
return resp
def post(self):
Model = self.getModel()
try:
populated_model = Model.deserialize(request.json, True)
populated_model.owner = session['logged_in']
populated_model.save()
resp = jsonify(populated_model.serialize())
resp.status_code = 201 # created
return resp
except FieldNotValidError as e:
resp = jsonify({"error": "Field '" + str(e) + "' is not valid!", "code": 400})
resp.status_code = 400
return resp
except Exception as e:
resp = jsonify({"error": str(e), "code": 400})
resp.status_code = 400
return resp
class APIDetailView(MethodView):
methods = ['GET', 'PUT', 'DELETE']
decorators = [api_login_required]
def getModel(self):
raise Exception("Model not defined")
def get(self, pk, field=None):
try:
obj = self.getModel().get(pk)
if obj.owner.pk != session['logged_in']:
raise ObjectNotFoundError("Access denied")
except ObjectNotFoundError as e:
resp = jsonify({"error": "Object not found", "code": 404})
resp.status_code = 404
return resp
if field and hasattr(obj, field) and hasattr(obj, '_'+field) and isinstance(getattr(obj, '_'+field), HasField):
ret = {}
ret['data'] = getattr(obj, field).serialize()
resp = jsonify(ret)
resp.status_code = 200
return resp
resp = jsonify(obj.serialize())
resp.status_code = 200
return resp
def put(self, pk, field=None):
try:
obj = self.getModel().get(pk)
if obj.owner.pk != session['logged_in']:
raise ObjectNotFoundError("Access denied")
populated_model = self.getModel().deserialize(request.json)
populated_model.owner = session['logged_in']
populated_model.save()
resp = jsonify(populated_model.serialize())
resp.status_code = 200
return resp
except FieldNotValidError as e:
resp = jsonify({"error": "Field '" + str(e) + "' is not valid!", "code": 400})
resp.status_code = 400
return resp
except Exception as e:
resp = jsonify({"error": str(e), "code": 400})
resp.status_code = 400
return resp
def delete(self, pk, field=None):
try:
obj = self.getModel().get(pk)
if obj.owner.pk != session['logged_in']:
raise ObjectNotFoundError("Access denied")
resp = jsonify(obj.serialize())
obj.delete()
resp.status_code = 200
return resp
except Exception as e:
resp = jsonify({"error": str(e), "code": 400})
resp.status_code = 400
return resp |
# coding=utf-8
import ccxt
import sys
import time
verbose = False
try:
from config import markets as markets
except ImportError:
markets = { # defaults
'_1broker': { 'verbose': verbose, 'apiKey': '' },
'_1btcxe': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bit2c': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bitbay': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bitcoincoid': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bitfinex': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bitlish': { 'verbose': verbose, 'apiKey': '', 'login': '', 'password': '' },
'bitmarket': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bitmex': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bitso': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bittrex': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'btcx': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'bxinth': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'ccex': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'cex': { 'verbose': verbose, 'apiKey': '', 'secret': '', 'uid': '' },
'coincheck': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'coinsecure': { 'verbose': verbose, 'apiKey': '' },
'exmo': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'fybse': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'fybsg': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'hitbtc': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'huobi': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'jubi': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'kraken': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'luno': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'okcoinusd': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'okcoincny': { 'verbose': verbose, 'apiKey': '', 'secret' : '' },
'poloniex': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'quadrigacx': { 'verbose': verbose, 'apiKey': '', 'secret': '', 'uid': '' },
'quoine': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'therock': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'vaultoro': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'virwox': { 'verbose': verbose, 'apiKey': '', 'login': '', 'password': '' },
'yobit': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
'zaif': { 'verbose': verbose, 'apiKey': '', 'secret': '' },
}
#------------------------------------------------------------------------------
tuples = list (ccxt.Market.keysort (markets).items ())
print (tuples)
for (id, params) in tuples:
# id, params = tuples[t]
market = getattr (ccxt, id)
markets[id] = market (dict (params, **({ 'id': id, 'verbose': verbose })))
def test_market (market):
delay = 2
print ('-----------------------------------------------------------------')
# print (dir (market))
# print (market.id)
products = market.load_products ()
# print (market.id, 'products', products)
keys = products.keys ()
print (market.id , len (keys), 'symbols', keys)
# time.sleep (delay)
symbol = products.keys ()[0]
for s in ['BTC/USD', 'BTC/CNY', 'BTC/ETH', 'ETH/BTC', 'BTC/JPY']:
if s in keys:
symbol = s
break
# symbol = products.keys ()[0]
# symbol = 'BTC/IDR'
# symbol = 'BTC/JPY'
# symbol = 'BTC/CNY'
#--------------------------------------------------------------------------
# public API
# print (market.id, symbol, 'orderbook')
# orderbook = market.fetch_order_book (symbol)
# print (orderbook)
# time.sleep (delay)
# print (market.id, symbol, 'trades')
# trades = market.fetch_trades (symbol)
# print (trades)
# time.sleep (delay)
for symbol in keys:
if symbol.find ('.d') < 0:
# print (market.id, symbol, market.product (symbol))
ticker = market.fetch_ticker (symbol)
print (market.id, symbol, 'ticker',
ticker['datetime'],
'high: ' + str (ticker['high']),
'low: ' + str (ticker['low']),
'bid: ' + str (ticker['bid']),
'ask: ' + str (ticker['ask']),
'volume: ' + str (ticker['quoteVolume']),
)
time.sleep (delay)
#--------------------------------------------------------------------------
# private API
if (not market.apiKey) or (len (market.apiKey) < 1):
return
print ('balance')
balance = market.fetch_balance ()
print (balance)
time.sleep (delay)
amount = 1
price = 0.0161
# print ('market buy')
# marketBuy = market.buy (symbol, amount)
# print (marketBuy)
# time.sleep (delay)
# print ('market sell')
# marketSell = market.sell (symbol, amount)
# print (marketSell)
# time.sleep (delay)
# print ('limit buy')
# limitBuy = market.buy (symbol, amount, price)
# print (limitBuy)
# time.sleep (delay)
# print ('limit sell')
# limitSell = market.sell (symbol, amount, price)
# print (limitSell)
# time.sleep (delay)
# for (id, params) in tuples:
# try:
# test_market (markets[id])
# except Exception as e:
# print (type (e).__name__, e.args)
# sys.exit ()
test_market (markets['bitfinex'])
|
from alembic_utils.experimental._collect_instances import collect_instances
__all__ = ["collect_instances"]
|
import random
from typing import List
from discord import Embed, Color, Member, VoiceState, VoiceChannel
from discord.ext import commands
from discord.ext.commands import Context as CommandContext, Bot
class VCRandomCog(object):
def __init__(self, bot: commands.Bot):
self.bot: commands.Bot = bot
@commands.command(hidden=True)
async def random_voice_member(self, ctx: CommandContext):
if ctx.guild is None:
return await ctx.send(
embed=Embed(
title="Only available in guilds",
color=Color.red()))
member: Member = ctx.author
voice: VoiceState = member.voice
if voice is None:
return await ctx.send(
embed=Embed(
title="Please join a Voice Channel",
color=Color.red()))
channel: VoiceChannel = voice.channel
members: List[Member] = channel.members
if len(members) < 2:
return await ctx.send(
embed=Embed(
title="YOU LONELY FUCK LOOOOOOOOL",
color=Color.red()
).set_image(url="http://www.locopengu.com/images/media/750xauto/HG2GT.png"))
await ctx.send(
embed=Embed(
title="Random Voicechat Member",
description=random.choice(members).mention,
color=Color.red()))
def setup(bot: Bot):
bot.add_cog(VCRandomCog(bot))
|
#!/usr/bin/env python
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keytar webdriver test."""
import json
import logging
import signal
import subprocess
import time
import os
from selenium import webdriver
import unittest
import urllib2
import environment
class TestKeytarWeb(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = environment.create_webdriver()
port = environment.reserve_ports(1)
keytar_folder = os.path.join(environment.vttop, 'test/cluster/keytar')
cls.flask_process = subprocess.Popen(
[os.path.join(keytar_folder, 'keytar.py'),
'--config_file=%s' % os.path.join(keytar_folder, 'test_config.yaml'),
'--port=%d' % port, '--password=foo'],
preexec_fn=os.setsid)
cls.flask_addr = 'http://localhost:%d' % port
@classmethod
def tearDownClass(cls):
os.killpg(cls.flask_process.pid, signal.SIGTERM)
cls.driver.quit()
def _wait_for_complete_status(self, timeout_s=180):
start_time = time.time()
while time.time() - start_time < timeout_s:
if 'Complete' in self.driver.find_element_by_id('results').text:
return
self.driver.refresh()
time.sleep(5)
self.fail('Timed out waiting for test to finish.')
def test_keytar_web(self):
self.driver.get(self.flask_addr)
req = urllib2.Request('%s/test_request?password=foo' % self.flask_addr)
req.add_header('Content-Type', 'application/json')
urllib2.urlopen(
req, json.dumps({'repository': {'repo_name': 'test/image'}}))
self._wait_for_complete_status()
logging.info('Dummy test complete.')
self.driver.find_element_by_partial_link_text('PASSED').click()
self.assertIn('Dummy output.',
self.driver.find_element_by_tag_name('body').text)
if __name__ == '__main__':
unittest.main()
|
"""Session wide fixtures."""
from __future__ import annotations
import asyncio
from pathlib import Path
from typing import TYPE_CHECKING, Generator, Optional
import numpy as np
import pytest
from fastapi.testclient import TestClient
from PIL import Image
if TYPE_CHECKING:
from _pytest.monkeypatch import MonkeyPatch
from fastapi_mail import FastMail
from numpy.typing import ArrayLike
from py.path import local
from pytest_mock import MockFixture
from imagesecrets.config import Settings
from imagesecrets.database.image.services import ImageService
from imagesecrets.database.token.services import TokenService
from imagesecrets.database.user.services import UserService
@pytest.fixture(autouse=True)
def api_settings(
request,
tmpdir: local,
monkeypatch: MonkeyPatch,
) -> Optional[Settings]:
"""Return settings for testing environment."""
if "disable_autouse" in set(request.keywords):
return
monkeypatch.setenv(
"DATABASE_URL",
"postgres://username:password@test_database:5432/imagesecrets",
)
monkeypatch.setenv("SECRET_KEY", "test_secret_key" * 10)
monkeypatch.setenv("ICON_URL", "https://www.test_icon_url.com")
monkeypatch.setenv("SWAGGER_URL", "https://www.test_swagger_url.com")
monkeypatch.setenv("REDOC_URL", "https://www.test_redoc_url.com")
monkeypatch.setenv("REPOSITORY_URL", "https://www.test_repository_url.com")
monkeypatch.setenv("MAIL_USERNAME", "test_username")
monkeypatch.setenv("MAIL_PASSWORD", "test_password")
monkeypatch.setenv("MAIL_PORT", "0")
monkeypatch.setenv("MAIL_SERVER", "test_server")
monkeypatch.setenv("MAIL_FROM", "test_mail_from@email.com")
from imagesecrets.config import settings
return settings
@pytest.fixture(scope="session")
def app_name(api_settings) -> str:
"""Return the default app name, specified in the Settings BaseModel."""
return api_settings.app_name
@pytest.fixture(scope="function", autouse=True)
def patch_tasks(request, monkeypatch, api_settings: Settings) -> None:
"""Patch tasks with dummy functions."""
if "disable_autouse" in set(request.keywords):
return
from imagesecrets.api import tasks
async def clear_tokens():
"""Test function to clear tokens."""
monkeypatch.setattr(tasks, "clear_tokens", lambda: clear_tokens())
@pytest.fixture(scope="function", autouse=True)
def email_client(
request,
monkeypatch: MonkeyPatch,
api_settings: Settings,
) -> FastMail | None:
"""Return test email client."""
if "disable_autouse" in set(request.keywords):
return
from imagesecrets.api import dependencies
fm = dependencies.get_mail()
fm.config.SUPPRESS_SEND = 1
fm.config.USE_CREDENTIALS = False
monkeypatch.setattr(dependencies, "get_mail", lambda: fm)
return dependencies.get_mail()
@pytest.fixture()
def async_context_manager():
"""Return asynchronous context manager."""
# not using func wrapped in `contextlib.asynccontextmanager`
# so we can dynamically specify what should be returned by
# __aenter__ (eg. some `Mock` objects).
class AsyncContextManager:
def __init__(self, obj):
self.obj = obj
async def __aenter__(self):
return self.obj
async def __aexit__(self, exc_type, exc_val, exc_tb):
...
return AsyncContextManager(obj=None)
@pytest.fixture()
def async_iterator():
class AsyncIterator:
def __init__(self, objs):
self.objs = objs
async def __aiter__(self):
for obj in self.objs:
yield obj
return AsyncIterator(objs=range(5))
@pytest.fixture()
def database_session(mocker: MockFixture, async_context_manager):
session = mocker.Mock()
session.execute = mocker.AsyncMock()
session.stream = mocker.AsyncMock()
session.begin_nested = mocker.Mock(return_value=async_context_manager)
session.add = mocker.Mock()
return session
@pytest.fixture()
def user_service(database_session) -> UserService:
from imagesecrets.database.user.services import UserService
return UserService(session=database_session)
@pytest.fixture()
def image_service(database_session) -> ImageService:
from imagesecrets.database.image.services import ImageService
return ImageService(session=database_session)
@pytest.fixture()
def token_service(database_session) -> TokenService:
from imagesecrets.database.token.services import TokenService
return TokenService(session=database_session)
@pytest.fixture()
def api_client(
monkeypatch,
mocker: MockFixture,
api_settings: Settings,
user_service,
token_service,
image_service,
) -> Generator[TestClient, None, None]:
"""Return api test client connected to fake database."""
from imagesecrets.database.image.services import ImageService
from imagesecrets.database.token.services import TokenService
from imagesecrets.database.user.services import UserService
from imagesecrets.interface import app
for index, func in enumerate(app.router.on_startup.copy()):
if func.__module__ == "imagesecrets.database.base":
app.router.on_startup.pop(index)
for service, fixture in zip(
(UserService, ImageService, TokenService),
(user_service, image_service, token_service),
):
async def func(obj=fixture):
yield obj
monkeypatch.setattr(
service,
"from_session",
func,
)
methods = [
method
for method in dir(service)
if not method.startswith("__") and method != "from_session"
]
for method in methods:
func = getattr(service, method)
if asyncio.iscoroutinefunction(func=func):
mock = mocker.AsyncMock()
else:
mock = mocker.Mock()
monkeypatch.setattr(service, method, mock)
# testclient __enter__ and __exit__ deals with event loop
with TestClient(app=app) as client:
yield client
@pytest.fixture(scope="session")
def test_image_path() -> Path:
"""Return the path to the test.png image."""
fp: Path = Path(__file__).parent / "test.png"
assert fp.is_file()
return fp
@pytest.fixture(scope="session")
def api_image_file(
test_image_path: Path,
) -> dict[str, tuple[str, bytes, str]]:
"""Return the dict with file needed to use post requests."""
return {
"file": (
test_image_path.name,
test_image_path.open("rb").read(),
"image/png",
),
}
@pytest.fixture(scope="session")
def test_image_array(
test_image_path: Path,
) -> ArrayLike:
"""Return numpy array of the test image."""
with Image.open(test_image_path).convert("RGB") as img:
array = np.array(img, dtype=np.uint8)
return array
@pytest.fixture(scope="session")
def delimiter_array() -> ArrayLike:
"""Return a delimiter array, string form = 'dlm'."""
return np.array(
(
[0],
[1],
[1],
[0],
[0],
[1],
[0],
[0],
[0],
[1],
[1],
[0],
[1],
[1],
[0],
[0],
[0],
[1],
[1],
[0],
[1],
[1],
[0],
[1],
),
)
|
import numpy as np
def predict(x, w):
a = np.sum(x * w)
return step_function(a)
def step_function(a):
return np.array(a >= 0, dtype=np.int)
x_data = np.array([[-1, 0, 0], [-1, 0, 1], [-1, 1, 0], [-1, 1, 1]])
y_data = np.array([0, 0, 0, 1])
w = np.array([0.3, 0.4, 0.1])
learning_rate = 0.05
while True:
epoch_cost = 0
for idx, x in enumerate(x_data):
y_ = predict(x, w)
cost = y_data[idx] - y_
w = w + learning_rate * x * cost
epoch_cost += cost
if epoch_cost == 0:
break
print(w) |
# Generated by Django 3.0.4 on 2020-11-27 11:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20201127_1236'),
]
operations = [
migrations.AddField(
model_name='category',
name='image',
field=models.ImageField(blank=True, upload_to=''),
),
]
|
import logging
import requests
from django.conf import settings
logger = logging.getLogger('ppwa.api')
# Users of api should't import requests to catch exceptions.
HTTPError = requests.HTTPError
def product_list():
'''Product listing.
:return: list of {id, uuid, slug, name}
:raise ValueError: JSON parsing issues.
:raise HTTPError: various networking issues.
'''
url = settings.PRODUCT_API_URL
auth = settings.PRODUCT_API_AUTH
response = requests.get(url, headers={'X-AUTH': auth})
logger.info('GET %s: %s' % (response.status_code, url, ))
response.raise_for_status()
data = response.json()
return data
def product_detail(pid):
'''Return detail on one product from API.
:param pid: product id.
:return: {pid, uuid, name, slug, price, cost, inventory_on_hand, description}
:raise ValueError: JSON parsing issues.
:raise HTTPError: various networking issues.
'''
base = settings.PRODUCT_API_URL.strip('/')
auth = settings.PRODUCT_API_AUTH
url = '%s/%s/' % (base, pid, )
response = requests.get(url, headers={'X-AUTH': auth})
logger.info('GET %s: %s' % (response.status_code, url, ))
response.raise_for_status()
data = response.json()
return data
def post_purchase(pid, quantity, cust_name, email, phone):
'''POST purchase order to API.
:param pid: product id to be purchased.
:param quantity: quantity to be purchased.
:param cust_name: Customer name.
:param email: Customer email.
:param phone: Customer phone.
:return: {confirmation_code, }
:raise ValueError: JSON parsing issues.
:raise HTTPError: various networking issues.
'''
base = settings.PRODUCT_API_URL.strip('/')
auth = settings.PRODUCT_API_AUTH
url = '%s/%s/purchase/' % (base, pid, )
payload = {
'customer_name': cust_name,
'customer_email': email,
'customer_phone': phone,
'quantity': quantity,
}
response = requests.post(url, headers={'X-AUTH': auth}, json=payload)
logger.info('POST %s: %s' % (response.status_code, url, ))
response.raise_for_status()
data = response.json()
return data
|
from app import db
class Team(db.Model):
__tablename__ = "teams"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(200))
country = db.Column(db.String(100))
brand = db.Column(db.String(800))
color = db.Column(db.String(20))
alternateColor = db.Column(db.String(20))
abbreviation = db.Column(db.String(5))
foundation_date = db.Column(db.DateTime, nullable=True)
uniform_one = db.Column(db.String(300), nullable=True)
uniform_two = db.Column(db.String(300), nullable=True)
activate = db.Column(db.Boolean, unique=False, default=True)
players = db.relationship('Player', secondary = 'teams_players')
def __init__(self, name, country, brand, color, alternateColor, abbreviation, foundation_date, uniform_one, uniform_two):
self.name = name
self.country = country
self.brand = brand
self.color = color
self.alternateColor = alternateColor
self.abbreviation = abbreviation
self.uniform_one = uniform_one
self.uniform_two = uniform_two
self.foundation_date = foundation_date
def __repr__(self):
return "<Team %r>" % self.name |
from .settings import Settings
from .COCO_classes_tag import *
name = 'core'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.