content stringlengths 5 1.05M |
|---|
#!/usr/bin/env python3
import random
s, t=input(), list(input())
for c in s:
while t[0] != c: t = t[1:] + t[:1]
t.remove(c)
t = list(set(t))
random.shuffle(t)
print(*t, sep='')
print(' \t\n \n \t\t')
|
import pathlib
from .models import ConvTasNet, DCCRNet, DCUNet, DPRNNTasNet, DPTNet, LSTMTasNet, DeMask
from .utils import deprecation_utils, torch_utils # noqa
project_root = str(pathlib.Path(__file__).expanduser().absolute().parent.parent)
__version__ = "0.5.0dev"
def show_available_models():
from .utils.hub_utils import MODELS_URLS_HASHTABLE
print(" \n".join(list(MODELS_URLS_HASHTABLE.keys())))
def available_models():
from .utils.hub_utils import MODELS_URLS_HASHTABLE
return MODELS_URLS_HASHTABLE
__all__ = [
"ConvTasNet",
"DPRNNTasNet",
"DPTNet",
"LSTMTasNet",
"DeMask",
"DCUNet",
"DCCRNet",
"show_available_models",
]
|
"""
Main entry point for `graphql-validate` command-line utility.
This is to enable `python -m graphql-validate` if that is needed for any reason,
normal use should be to use the `graphql-validate` command-line tool directly.
"""
from graphql_validate.cli import cli
cli()
|
#!/usr/bin/env python
from __future__ import print_function
import glob
import io
import os
import subprocess
import sys
import tooltool
PLATFORM_MANIFESTS = {
'Linux': ['browser/config/tooltool-manifests/linux32/*.manifest', 'browser/config/tooltool-manifests/linux64/*.manifest*', 'browser/config/tooltool-manifests/macosx64/cross-*.manifest', 'mobile/android/config/tooltool-manifests/*/*.manifest'],
# Don't include cross-releng.manifest here.
'Darwin': ['browser/config/tooltool-manifests/macosx64/asan.manifest', 'browser/config/tooltool-manifests/macosx64/clang.manifest', 'browser/config/tooltool-manifests/macosx64/releng.manifest'],
'Windows': ['browser/config/tooltool-manifests/win32/*.manifest', 'browser/config/tooltool-manifests/win64/*.manifest'],
}
def indices(s, which):
i = 0
while True:
i = s.find(which, i)
if i == -1:
return
yield i
i += 1
def rewrite_manifest_entry(manifest_file, new_data, index):
old_data = open(manifest_file, 'rb').read()
start = list(indices(old_data, '{'))[index]
end = old_data.index('}', start)
with open(manifest_file, 'wb') as f:
f.write(old_data[:start])
f.write(new_data)
f.write(old_data[end+1:])
def update_tooltool_manifests(build_dir, gecko_dir):
system = os.path.basename(build_dir)
new_manifest_file = os.path.join(build_dir, 'releng.manifest')
rev = open(os.path.join(build_dir, 'REV'), 'rb').read().strip()
manifest = tooltool.open_manifest(new_manifest_file)
b = io.BytesIO()
manifest.dump(b)
new_data = '\n'.join(['{', ' "version": "sccache rev %s",' % rev] + b.getvalue().strip(' \n[]').splitlines()[1:])
for manifest_glob in PLATFORM_MANIFESTS[system]:
for platform_manifest_file in glob.glob(os.path.join(gecko_dir, manifest_glob)):
print(platform_manifest_file)
platform_manifest = tooltool.open_manifest(platform_manifest_file)
for i, f in enumerate(platform_manifest.file_records):
if f.filename.startswith('sccache'):
platform_manifest.file_records[i] = manifest.file_records[0]
rewrite_manifest_entry(platform_manifest_file, new_data, i)
break
def main():
if len(sys.argv) < 3:
print("Usage: update-gecko-manifests.py <destination directory> <gecko clone>")
sys.exit(1)
dest_dir = sys.argv[1]
gecko_dir = sys.argv[2]
for d in os.listdir(dest_dir):
update_tooltool_manifests(os.path.join(dest_dir, d), gecko_dir)
if __name__ == '__main__':
main()
|
import random
import numpy as np
import os
from scipy import ndimage
import math
import tensorflow as tf
def clip(image, clip_min, clip_max):
shape = np.asarray(image).shape
for i in range(shape[0]):
for j in range(shape[1]):
for k in range(shape[2]):
if image[i][j][k] > clip_max:
image[i][j][k] = clip_max
elif image[i][j][k] < clip_min:
image[i][j][k] = clip_min
return image
def c_occl(gradients, start_point, rect_shape):
gradients = np.asarray(gradients)
new_grads = np.zeros_like(gradients)
new_grads[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]] = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
return new_grads
def c_light(gradients):
new_grads = np.ones_like(gradients)
grad_mean = np.mean(gradients)
return grad_mean * new_grads
def c_black(gradients, start_point, rect_shape):
# start_point = (
# random.randint(0, gradients.shape[1] - rect_shape[0]), random.randint(0, gradients.shape[2] - rect_shape[1]))
gradients = np.asarray(gradients)
new_grads = np.zeros_like(gradients)
patch = gradients[:, start_point[0]:start_point[0] + rect_shape[0], start_point[1]:start_point[1] + rect_shape[1]]
if np.mean(patch) < 0:
new_grads[:, start_point[0]:start_point[0] + rect_shape[0],
start_point[1]:start_point[1] + rect_shape[1]] = -np.ones_like(patch)
return new_grads
def generate_value_1(row, col):
matrix = []
for i in range(row):
line = []
for j in range(col):
pixel = []
for k in range(1):
div = random.randint(1, 20)#5,100
pixel.append((random.random() - 0.5) / (div*100)) #normal
line.append(pixel)
matrix.append(line)
return [matrix]
# generate for RGB
def generate_value_3(row, col):
matrix = []
for i in range(row):
line = []
for j in range(col):
pixel = []
for k in range(3):
div = random.randint(1, 20) # 1,20
pixel.append((random.random() - 0.5)/ (div*100)) #*4 /10
line.append(pixel)
matrix.append(line)
return [matrix]
def generate_value_uniform_1(row, col, delta):
matrix = []
for i in range(row):
line = []
for j in range(col):
pixel = []
for k in range(1):
# x = delta / 28
x = delta
pixel.append(random.uniform(-x,x))
line.append(pixel)
matrix.append(line)
return matrix
# generate for RGB
def generate_value_uniform_3(row, col, delta):
matrix = []
for i in range(row):
line = []
for j in range(col):
pixel = []
for k in range(3):
x = delta /(32 * np.sqrt(3))
pixel.append(random.uniform(-x,x))
line.append(pixel)
matrix.append(line)
return matrix
def generate_value_normal_1(row, col, delta):
matrix = []
for i in range(row):
line = []
for j in range(col):
pixel = []
for k in range(1):
# sigma = 1/4 * pow(delta / 28, 2)
sigma = 1 / 4 * pow(delta, 2)
pixel.append(random.gauss(0, sigma))
line.append(pixel)
matrix.append(line)
return matrix
# generate for RGB
def generate_value_normal_3(row, col, delta):
matrix = []
for i in range(row):
line = []
for j in range(col):
pixel = []
for k in range(3):
sigma = 1 / 4 * pow(delta /(32 * np.sqrt(3)), 2)
pixel.append(random.gauss(0, sigma))
line.append(pixel)
matrix.append(line)
return matrix
def get_data_mutation_test(adv_file_path):
'''
:param file_path: the file path for the adversary images
:return: the formatted data for mutation test, the actual label of the images, and the predicted label of the images
'''
image_list = []
real_labels = []
predicted_labels = []
image_files =[]
for img_file in os.listdir(adv_file_path):
if img_file.endswith('.png'):
img_file_split = img_file.split('_')
real_label = int(img_file_split[-3])
predicted_label = int(img_file_split[-2])
if real_label!=predicted_label: # only extract those successfully generated adversaries
real_labels.append(real_label)
predicted_labels.append(predicted_label)
current_img = ndimage.imread(adv_file_path + os.sep + img_file)
image_list.append(current_img)
image_files.append(img_file)
# if len(image_list) >= 100:
# break
print('--- Total number of adversary images: ', len(image_list))
return image_list, image_files, real_labels, predicted_labels
def get_normal_data_mutation_test(adv_file_path):
'''
:param file_path: the file path for the adversary images
:return: the formatted data for mutation test, the actual label of the images, and the predicted label of the images
'''
image_list = []
real_labels = []
predicted_labels = []
image_files =[]
for img_file in os.listdir(adv_file_path):
if img_file.endswith('.png'):
img_file_split = img_file.split('_')
real_label = int(img_file_split[-3])
predicted_label = int(img_file_split[-2])
if real_label==predicted_label: # only extract those successfully generated adversaries
real_labels.append(real_label)
predicted_labels.append(predicted_label)
current_img = ndimage.imread(adv_file_path + os.sep + img_file)
image_list.append(current_img)
image_files.append(img_file)
# if len(image_list) >= 100:
# break
print('--- Total number of normal images: ', len(image_list))
return image_list, image_files, real_labels, predicted_labels
def extract_label_change_ratios(stats_file):
normal_label_change_1 = []
adv_label_change_1 = []
normal_label_change_5 = []
adv_label_change_5 = []
normal_label_change_10 = []
adv_label_change_10 = []
with open(stats_file) as f:
for line in f:
lis = line.split(',')
image_name = lis[0].split('_')
step_size = int(lis[1])
if step_size==1:
if image_name[-3]==image_name[-2]:
normal_label_change_1.append(int(lis[2]))
else:
adv_label_change_1.append(int(lis[2]))
if step_size==5:
if image_name[-3]==image_name[-2]:
normal_label_change_5.append(int(lis[2]))
else:
adv_label_change_5.append(int(lis[2]))
if step_size==10:
if image_name[-3]==image_name[-2]:
normal_label_change_10.append(int(lis[2]))
else:
adv_label_change_10.append(int(lis[2]))
print('=== Result file: ', stats_file)
print('--- Step size: 1')
if(len(normal_label_change_1)>0):
print('- Normal change: ', extract_ci(normal_label_change_1))
else:
print('- Normal change not applicable')
print('- Adv change: ', extract_ci(adv_label_change_1))
print('--- Step size: 5')
if (len(normal_label_change_5) > 0):
print('- Normal change: ', extract_ci(normal_label_change_5))
else:
print('- Normal change not applicable')
print('- Adv change: ', extract_ci(adv_label_change_5))
print('--- Step size: 10')
if (len(normal_label_change_10) > 0):
print('- Normal change: ', extract_ci(normal_label_change_10))
else:
print('- Normal change not applicable')
print('- Adv change: ', extract_ci(adv_label_change_10))
return normal_label_change_1, adv_label_change_1, normal_label_change_5, adv_label_change_5, normal_label_change_10, adv_label_change_10
def extract_ci(label_change_number):
adv_average = round(np.mean(label_change_number), 2)
adv_std = np.std(label_change_number)
adv_99ci = round(2.576 * adv_std / math.sqrt(len(label_change_number)), 2)
return adv_average, adv_std, adv_99ci
def process(img):
new_img = img
return new_img
# "ENHANCING THE RELIABILITY OF OUT-OF-DISTRIBUTION IMAGE DETECTION IN NEURAL NETWORKS(ICLR2018)"
def input_preprocessing(preds, x, eps, clip_min, clip_max):
y = tf.reduce_max(preds, 1, keep_dims=True)
# grad, = tf.gradients(tf.log(y), x)
# normalized_grad = tf.sign(tf.multiply(-1.0, grad))
# normalized_grad = tf.stop_gradient(normalized_grad)
# scaled_grad = eps * normalized_grad
# output = x - scaled_grad
grad, = tf.gradients(y, x)
normalized_grad = tf.sign(grad)
normalized_grad = tf.stop_gradient(normalized_grad)
scaled_grad = eps * normalized_grad
output = x - scaled_grad
if (clip_min is not None) and (clip_max is not None):
output = tf.clip_by_value(output, clip_min, clip_max)
return output
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/mnist_jsma/adv_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/mnist_cw/adv_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/mnist_bb/adv_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/mnist_fgsm1/adv_fgsm1_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/cifar10_jsma/adv_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/cifar10_cw/adv_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/cifar10_bb/adv_result.csv')
# extract_label_change_ratios('/Users/jingyi/nMutant/mt_result/cifar10_tf/adv_fgsm1_result.csv') |
from sqlalchemy import (
MetaData,
Table,
Column,
Integer,
NVARCHAR,
DateTime,
Date,
Boolean,
DECIMAL,
)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
t = Table(
"administration",
meta,
Column("id", Integer, primary_key=True),
Column("administration_id", NVARCHAR(50), index=True, nullable=False),
Column("uhl_system_number", NVARCHAR(50), index=True, nullable=False),
Column("administration_datetime", DateTime),
Column("medication_name", NVARCHAR(100)),
Column("dose_id", NVARCHAR(50)),
Column("dose", NVARCHAR(50)),
Column("dose_unit", NVARCHAR(50)),
Column("form_name", NVARCHAR(50)),
Column("route_name", NVARCHAR(50)),
)
t.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
t = Table("administration", meta, autoload=True)
t.drop()
|
import asyncio
loop = asyncio.get_event_loop()
import aiohttp
import datetime
class TelegramAPIError(Exception):
def __init__(self, code, description):
# Error code
self.code = code
# Error description
self.description = description
class UpdateError(Exception):
pass
class Bot:
def __init__(self, token):
self.token = token
self.user_data = None
self.updates = list()
self.chats = list()
self.commands = dict()
self.offset = 0
# Update user_data
loop.create_task(self.update_bot_data())
def __str__(self):
return self.user_data.first_name
def __repr__(self):
return f"<Bot {self.user_data.first_name}>"
def __hash__(self):
return hash(self.token)
async def run(self):
"""Run the bot automatically."""
while True:
await self.get_updates()
for u in self.updates:
loop.create_task(self.parse_update(u))
self.updates = list()
# Wait 5 seconds between two requests, allowing the parsing of updates.
await asyncio.sleep(5)
async def update_bot_data(self):
"""Update self.user_data with the latest information from /getMe."""
data = await self.api_request("getMe")
self.user_data = User(data)
async def get_updates(self):
"""Get the latest updates from the Telegram API with /getUpdates."""
try:
# TODO: Fix long polling
data = await self.api_request("getUpdates", offset=self.offset)
except asyncio.TimeoutError:
return
for update in data:
try:
self.updates.append(Update(update))
except NotImplementedError:
pass
if len(self.updates) > 0:
self.offset = self.updates[-1].update_id + 1
async def parse_update(self, update):
"""Parse the first update in the list."""
# Add the chat to the chat list
if update.message.chat not in self.chats:
self.chats.append(update.message.chat)
else:
# Replace the chat object in the update with the correct one
update.message.chat = self.chats[self.chats.index(update.message.chat)]
# Add the user to the chat
chat = self.find_chat(update.message.chat.chat_id)
if update.message.sent_from not in chat.users:
chat.users.append(update.message.sent_from)
else:
update.message.sent_from = chat.users[chat.users.index(update.message.sent_from)]
# Add / edit the message to the message list
if not update.message.edited:
chat.messages.append(update.message)
else:
try:
i = chat.messages.index(chat.find_message(update.message.msg_id))
except ValueError:
pass
else:
chat.messages[i] = update.message
# Check if a command can be run
# TODO: use message entities?
if isinstance(update.message.content, str) and update.message.content.startswith("/"):
split_msg = update.message.content.split(" ")
# Ignore the left slash and the right @botname
command = split_msg[0].lstrip("/").split("@")[0]
if command in self.commands:
arguments = split_msg[1:]
loop.create_task(self.commands[command](self, update, arguments))
# Update message status if a service message is received
if isinstance(update.message.content, ServiceMessage):
# New user in chat
if update.message.content.type == "new_chat_user":
new_user = update.message.content.content
chat.users.append(new_user)
# User left chat
elif update.message.content.type == "left_chat_user":
left_user = update.message.content.content
if left_user in chat.users:
# Remove the user from the list
del chat.users[chat.users.index(left_user)]
# Chat title changed
elif update.message.content.type == "new_chat_title":
chat.title = update.message.content.content
# New chat photo
elif update.message.content.type == "new_chat_photo":
chat.chat_photo = update.message.content.content
# Chat photo deleted
elif update.message.content.type == "delete_chat_photo":
chat.chat_photo = None
# New pinned message
elif update.message.content.type == "pinned_message":
chat.pinned_msg = update.message.content.content
# TODO: handle group -> supergroup migrations
def find_update(self, upd_id):
for update in self.updates:
if update.update_id == upd_id:
return update
def find_chat(self, chat_id):
for chat in self.chats:
if chat.chat_id == chat_id:
return chat
async def api_request(self, endpoint, **params):
"""Send a request to the Telegram API at the specified endpoint."""
# TODO: Reintroduce the timeout to prevent stuck requests
# Create a new session for each request.
async with aiohttp.ClientSession() as session:
# Send the request to the Telegram API
token = self.token
async with session.request("GET", f"https://api.telegram.org/bot{token}/{endpoint}", params=params) as response:
# Parse the json data as soon it's ready
data = await response.json()
# Check for errors in the request
if response.status != 200 or not data["ok"]:
raise TelegramAPIError(data["error_code"], data["description"])
# Return a dictionary containing the data
return data["result"]
class Update:
def __init__(self, upd_dict):
self.update_id = upd_dict["update_id"]
if "message" in upd_dict:
self.message = Message(upd_dict["message"])
elif "edited_message" in upd_dict:
self.message = Message(upd_dict["edited_message"], edited=True)
elif "channel_post" in upd_dict:
self.message = Message(upd_dict["channel_post"])
elif "edited_channel_post" in upd_dict:
self.message = Message(upd_dict["edited_channel_post"], edited=True)
else:
raise NotImplementedError("No inline support yet.")
class Chat:
def __init__(self, chat_dict):
self.chat_id = chat_dict["id"]
self.type = chat_dict["type"]
self.users = list()
self.admins = list()
self.messages = list()
self.chat_photo = None
self.pinned_msg = None
if self.type == "private":
self.first_name = chat_dict["first_name"]
if "last_name" in chat_dict:
self.last_name = chat_dict["last_name"]
else:
self.last_name = None
if "username" in chat_dict:
self.username = chat_dict["username"]
else:
self.username = None
self.title = f"{self.first_name} {self.last_name}"
self.everyone_is_admin = True
elif self.type == "group" or self.type == "supergroup" or self.type == "channel":
self.first_name = None
self.last_name = None
if self.type == "supergroup" or self.type == "channel":
self.everyone_is_admin = False
if "username" in chat_dict:
self.username = chat_dict["username"]
else:
self.username = None
else:
self.everyone_is_admin = chat_dict["all_members_are_administrators"]
self.username = None
self.title = chat_dict["title"]
else:
raise UpdateError(f"Unknown message type: {self.type}")
def __str__(self):
return self.title
def __repr__(self):
return f"<{self.type} Chat {self.title}>"
def __hash__(self):
return self.chat_id
def __eq__(self, other):
if isinstance(other, Chat):
return self.chat_id == other.chat_id
else:
TypeError("Can't compare Chat to a different object.")
def find_message(self, msg_id):
for msg in self.messages:
if msg.msg_id == msg_id:
return msg
async def send_message(self, bot, text, **params):
"""Send a message in the chat through the bot object."""
# TODO: This could give problems if a class inherits Bot
if not isinstance(bot, Bot):
raise TypeError("bot is not an instance of Bot.")
await bot.api_request("sendMessage", text=text, chat_id=self.chat_id, **params)
async def set_chat_action(self, bot, action):
"""Set a status for the chat.
Valid actions are:
typing
upload_photo
record_video
upload_video
record_audio
upload_audio
upload_document
find_location"""
# TODO: This could give problems if a class inherits Bot
if not isinstance(bot, Bot):
raise TypeError("bot is not an instance of Bot.")
# Check if the action is valid
if action not in ["typing", "upload_photo", "record_video", "upload_video", "record_audio", "upload_audio", "upload_document", "find_location"]:
raise ValueError("Invalid action")
# Send the request
await bot.api_request("sendChatAction", chat_id=self.chat_id, action=action)
class User:
def __init__(self, user_dict):
self.user_id = user_dict["id"]
self.first_name = user_dict["first_name"]
if "last_name" in user_dict:
self.last_name = user_dict["last_name"]
else:
self.last_name = None
if "username" in user_dict:
self.username = user_dict["username"]
else:
self.username = None
def __str__(self):
if self.username is not None:
return f"@{self.username}"
else:
if self.last_name is not None:
return f"{self.first_name} {self.last_name}"
else:
return self.first_name
def __repr__(self):
if self.username is not None:
return f"<User {self.username}>"
else:
return f"<User {self.user_id}>"
def __hash__(self):
return self.user_id
def __eq__(self, other):
if isinstance(other, User):
return self.user_id == other.user_id
else:
TypeError("Can't compare User to a different object.")
class Message:
def __init__(self, msg_dict, edited=False):
self.msg_id = msg_dict["message_id"]
self.date = datetime.datetime.fromtimestamp(msg_dict["date"])
self.chat = Chat(msg_dict["chat"])
self.edited = edited
if "from" in msg_dict:
self.sent_from = User(msg_dict["from"])
else:
self.sent_from = None
self.forwarded = "forward_date" in msg_dict
if self.forwarded:
if "forward_from" in msg_dict:
self.original_sender = User(msg_dict["forward_from"])
elif "forward_from_chat" in msg_dict:
self.original_sender = Chat(msg_dict["forward_from_chat"])
# TODO: Add forward_from_message_id
if "reply_to_message" in msg_dict:
self.is_reply_to = Message(msg_dict["reply_to_message"])
else:
self.is_reply_to = None
if "text" in msg_dict:
self.content = msg_dict["text"]
# TODO: Check for MessageEntities
elif "audio" in msg_dict:
self.content = Audio(msg_dict["audio"])
elif "document" in msg_dict:
self.content = Document(msg_dict["document"])
elif "game" in msg_dict:
self.content = Game(msg_dict["game"])
elif "photo" in msg_dict:
self.content = Photo(msg_dict["photo"])
elif "sticker" in msg_dict:
self.content = Sticker(msg_dict["sticker"])
elif "video" in msg_dict:
self.content = Video(msg_dict["video"])
elif "voice" in msg_dict:
self.content = Voice(msg_dict["voice"])
elif "contact" in msg_dict:
self.content = Contact(msg_dict["contact"])
elif "location" in msg_dict:
self.content = Location(msg_dict["location"])
elif "venue" in msg_dict:
self.content = Venue(msg_dict["venue"])
elif "new_chat_member" in msg_dict:
self.content = ServiceMessage("new_chat_member", User(msg_dict["new_chat_member"]))
elif "left_chat_member" in msg_dict:
self.content = ServiceMessage("left_chat_member", User(msg_dict["left_chat_member"]))
elif "new_chat_title" in msg_dict:
self.content = ServiceMessage("new_chat_title", msg_dict["new_chat_title"])
elif "new_chat_photo" in msg_dict:
self.content = ServiceMessage("new_chat_photo", Photo(msg_dict["new_chat_photo"]))
elif "delete_chat_photo" in msg_dict:
self.content = ServiceMessage("delete_chat_photo")
elif "group_chat_created" in msg_dict:
self.content = ServiceMessage("group_chat_created")
elif "supergroup_chat_created" in msg_dict:
self.content = ServiceMessage("supergroup_chat_created")
elif "channel_chat_created" in msg_dict:
self.content = ServiceMessage("channel_chat_created")
elif "migrate_to_chat_id" in msg_dict:
self.content = ServiceMessage("migrate_to_chat_id", msg_dict["migrate_to_chat_id"])
elif "migrate_from_chat_id" in msg_dict:
self.content = ServiceMessage("migrate_from_chat_id", msg_dict["migrate_from_chat_id"])
elif "pinned_message" in msg_dict:
self.content = ServiceMessage("pinned_message", Message(msg_dict["pinned_message"]))
else:
raise UpdateError("Message doesn't contain anything.")
def __repr__(self):
if isinstance(self.content, str):
return f"<Message: {self.content}>"
else:
return f"<Message containing {type(self.content)}>"
async def reply(self, bot, text, **params):
"""Reply to this message."""
await self.chat.send_message(bot, text, reply_to_message_id=self.msg_id, **params)
class ServiceMessage:
def __init__(self, msg_type, extra=None):
self.type = msg_type
self.content = extra
class Audio:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Document:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Game:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Photo:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Sticker:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Video:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Voice:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Contact:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Location:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
class Venue:
def __init__(self, init_dict):
raise NotImplementedError("Not yet.")
|
def jogar():
print("*********************************")
print("Bem vindo ao jogo de Forca")
print("*********************************")
print("Fim do jogo") |
from django.contrib import admin
from habitat._common.admin import HabitatAdmin
from habitat.building.models import Module
@admin.register(Module)
class ModuleAdmin(HabitatAdmin):
list_display = ['name', 'status', 'hazard', 'width', 'height', 'length', 'plan', 'capacity']
list_filter = ['status', 'hazard', 'plan', 'capacity']
list_editable = ['status', 'hazard']
search_fields = ['name']
ordering = ['name']
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from nose.tools import eq_, ok_
from os_ken.services.protocols.bgp.utils import validation
LOG = logging.getLogger(__name__)
class Test_Utils_Validation(unittest.TestCase):
"""
Test case for os_ken.services.protocols.bgp.utils.validation
"""
def test_is_valid_mac(self):
ok_(validation.is_valid_mac('aa:bb:cc:dd:ee:ff'))
def test_is_valid_mac_hyphenation(self):
ok_(validation.is_valid_mac('aa-bb-cc-dd-ee-ff'))
def test_is_valid_mac_short(self):
eq_(False, validation.is_valid_mac('aa:bb:cc:dd:ee'))
def test_is_valid_ip_prefix(self):
ok_(validation.is_valid_ip_prefix(24, 32))
def test_is_valid_ip_prefix_str(self):
ok_(validation.is_valid_ip_prefix('24', 32))
def test_is_valid_ip_prefix_not_digit(self):
eq_(False, validation.is_valid_ip_prefix('foo', 32))
def test_is_valid_ip_prefix_over(self):
eq_(False, validation.is_valid_ip_prefix(100, 32))
def test_is_valid_ipv4(self):
ok_(validation.is_valid_ipv4('10.0.0.1'))
def test_is_valid_ipv4_not_dot(self):
eq_(False, validation.is_valid_ipv4('192:168:0:1'))
def test_is_valid_ipv4_prefix(self):
ok_(validation.is_valid_ipv4_prefix('10.0.0.1/24'))
def test_is_valid_ipv4_prefix_not_str(self):
eq_(False, validation.is_valid_ipv4_prefix(1234))
def test_is_valid_ipv4_prefix_without_prefix(self):
eq_(False, validation.is_valid_ipv4_prefix('10.0.0.1'))
def test_is_valid_ipv4_prefix_invalid_addr(self):
eq_(False, validation.is_valid_ipv4_prefix('xxx.xxx.xxx.xxx/24'))
def test_is_valid_ipv6(self):
ok_(validation.is_valid_ipv6('fe80::0011:aabb:ccdd:eeff'))
def test_is_valid_ipv6_not_colon(self):
eq_(False, validation.is_valid_ipv6('fe80--0011-aabb-ccdd-eeff'))
def test_is_valid_ipv6_prefix(self):
ok_(validation.is_valid_ipv6_prefix('fe80::0011:aabb:ccdd:eeff/64'))
def test_is_valid_ipv6_prefix_not_str(self):
eq_(False, validation.is_valid_ipv6_prefix(1234))
def test_is_valid_ipv6_prefix_without_prefix(self):
eq_(False,
validation.is_valid_ipv6_prefix('fe80::0011:aabb:ccdd:eeff'))
def test_is_valid_ipv6_prefix_invalid_addr(self):
eq_(False, validation.is_valid_ipv6_prefix('xxxx::xxxx/64'))
def test_is_valid_old_asn(self):
ok_(validation.is_valid_old_asn(65000))
def test_is_valid_old_asn_negative(self):
eq_(False, validation.is_valid_old_asn(-1))
def test_is_valid_old_asn_over(self):
eq_(False, validation.is_valid_old_asn(0xffff + 1))
def test_is_valid_asn(self):
ok_(validation.is_valid_asn(6553800))
def test_is_valid_asn_old(self):
ok_(validation.is_valid_asn(65000))
def test_is_valid_asn_negative(self):
eq_(False, validation.is_valid_asn(-1))
def test_is_valid_asn_over(self):
eq_(False, validation.is_valid_asn(0xffffffff + 1))
def test_is_valid_vpnv4_prefix(self):
ok_(validation.is_valid_vpnv4_prefix('100:200:10.0.0.1/24'))
def test_is_valid_vpnv4_prefix_not_str(self):
eq_(False, validation.is_valid_vpnv4_prefix(1234))
def test_is_valid_vpnv4_prefix_short_rd(self):
eq_(False, validation.is_valid_vpnv4_prefix('100:10.0.0.1/24'))
def test_is_valid_vpnv4_prefix_invalid_rd(self):
eq_(False, validation.is_valid_vpnv4_prefix('foo:bar:10.0.0.1/24'))
def test_is_valid_vpnv6_prefix(self):
ok_(validation.is_valid_vpnv6_prefix(
'100:200:fe80::0011:aabb:ccdd:eeff/64'))
def test_is_valid_vpnv6_prefix_not_str(self):
eq_(False, validation.is_valid_vpnv6_prefix(1234))
def test_is_valid_vpnv6_prefix_short_rd(self):
eq_(False, validation.is_valid_vpnv6_prefix('100:eeff/64'))
def test_is_valid_vpnv6_prefix_invalid_rd(self):
eq_(False, validation.is_valid_vpnv6_prefix('foo:bar:10.0.0.1/24'))
def test_is_valid_med(self):
ok_(validation.is_valid_med(100))
def test_is_valid_med_not_num(self):
eq_(False, validation.is_valid_med('foo'))
def test_is_valid_med_negative(self):
eq_(False, validation.is_valid_med(-1))
def test_is_valid_med_over(self):
eq_(False, validation.is_valid_med(0xffffffff + 1))
def test_is_valid_mpls_label(self):
ok_(validation.is_valid_mpls_label(100))
def test_is_valid_mpls_label_reserved(self):
eq_(False, validation.is_valid_mpls_label(4))
def test_is_valid_mpls_label_not_num(self):
eq_(False, validation.is_valid_mpls_label('foo'))
def test_is_valid_mpls_label_negative(self):
eq_(False, validation.is_valid_mpls_label(-1))
def test_is_valid_mpls_label_over(self):
eq_(False, validation.is_valid_mpls_label(0x100000 + 1))
def test_is_valid_mpls_labels(self):
ok_(validation.is_valid_mpls_labels([100, 200]))
def test_is_valid_mpls_labels_not_list(self):
eq_(False, validation.is_valid_mpls_labels(100))
def test_is_valid_mpls_labels_with_invalid_label(self):
eq_(False, validation.is_valid_mpls_labels(['foo', 200]))
def test_is_valid_route_dist(self):
ok_(validation.is_valid_route_dist('65000:222'))
def test_is_valid_route_dist_ipv4_based(self):
ok_(validation.is_valid_route_dist('10.0.0.1:333'))
def test_is_valid_route_not_str(self):
eq_(False, validation.is_valid_route_dist(65000))
def test_is_valid_route_dist_short(self):
eq_(False, validation.is_valid_route_dist('65000'))
def test_is_valid_route_dist_invalid_ipv4_addr(self):
eq_(False, validation.is_valid_route_dist('xxx.xxx.xxx.xxx:333'))
def test_is_valid_esi(self):
ok_(validation.is_valid_esi(100))
def test_is_valid_esi_not_int(self):
eq_(False, validation.is_valid_esi('foo'))
def test_is_valid_ethernet_tag_id(self):
ok_(validation.is_valid_ethernet_tag_id(100))
def test_is_valid_ethernet_tag_id_not_int(self):
eq_(False, validation.is_valid_ethernet_tag_id('foo'))
def test_is_valid_ethernet_tag_id_negative(self):
eq_(False, validation.is_valid_ethernet_tag_id(-1))
def test_is_valid_ethernet_tag_id_over(self):
eq_(False, validation.is_valid_ethernet_tag_id(0xffffffff + 1))
def test_is_valid_vni(self):
ok_(validation.is_valid_vni(100))
def test_is_valid_vni_not_int(self):
eq_(False, validation.is_valid_vni('foo'))
def test_is_valid_vni_negative(self):
eq_(False, validation.is_valid_vni(-1))
def test_is_valid_vni_over(self):
eq_(False, validation.is_valid_vni(0xffffff + 1))
|
# Copyright Rein Halbersma 2020-2021.
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import numpy as np
from ..enums import Action, Card, Hand, nC, nH
class BasicStrategyAgent:
"""
A blackjack agent that plays according to the Basic Strategy of Thorp (1966).
"""
def __init__(self, env):
self.policy = np.full((nH, nC), Action.HIT)
self.policy[Hand.H13:(Hand.H21 + 1), Card._2:(Card._3 + 1)] = Action.STAND
self.policy[Hand.H12:(Hand.H21 + 1), Card._4:(Card._6 + 1)] = Action.STAND
self.policy[Hand.H17:(Hand.H21 + 1), Card._7:(Card._A + 1)] = Action.STAND
self.policy[Hand.S18:(Hand.BJ + 1), Card._2:(Card._8 + 1)] = Action.STAND
self.policy[Hand.S19:(Hand.BJ + 1), Card._9:(Card._A + 1)] = Action.STAND
self.decode = {
'Blackjack-v0': (lambda obs: (Hand[('S' if obs[2] else 'H') + str(obs[0])], Card((obs[1] - 2) % nC))),
'Blackjack-v1': (lambda obs: divmod(obs, nC))
}[env.unwrapped.spec.id]
def act(self, obs, reward, done):
return self.policy[self.decode(obs)]
|
from market.models import db, Category
db.session.add(Category(name="Koszykówka - punkty", full_name="Koszykówka - punkty"))
db.session.commit()
# db.drop_all()
# db.create_all()
|
import math
from abc import abstractmethod
from numbers import (Rational,
Real)
from typing import (Any,
Optional,
Tuple,
Union)
from cfractions import Fraction
from reprit.base import generate_repr
from .expression import Expression
from .hints import SqrtEvaluator
from .utils import (digits_count,
identity,
perfect_sqrt,
positiveness_to_sign,
square)
class Constant(Expression):
@property
def degree(self) -> int:
return 0
@property
@abstractmethod
def value(self) -> Real:
"""Returns value of the constant."""
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def is_positive(self) -> bool:
return self.value > 0
def lower_bound(self) -> Real:
return self.value
upper_bound = lower_bound
def __eq__(self, other: Any) -> Any:
return (self.value == other
if isinstance(other, Real)
else (isinstance(other, Constant)
and self.value == other.value
if isinstance(other, Expression)
else NotImplemented))
def __hash__(self) -> int:
return hash(self.value)
def __str__(self) -> str:
return str(self.value)
class Finite(Constant):
"""Represents rational number."""
is_finite = True
__slots__ = '_value',
def __init__(self, value: Real = 0) -> None:
self._value = Fraction(value)
@property
def value(self) -> Rational:
return self._value
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def extract_common_denominator(self) -> Tuple[int, 'Finite']:
return self.value.denominator, Finite(self.value.numerator)
def extract_common_numerator(self) -> Tuple[int, 'Finite']:
return self.value.numerator, One / self.value.denominator
def inverse(self) -> 'Finite':
return Finite(Fraction(self.value.denominator, self.value.numerator))
def is_positive(self) -> bool:
return self.value > 0
def perfect_sqrt(self) -> Expression:
return Finite(Fraction(perfect_sqrt(self.value.numerator),
perfect_sqrt(self.value.denominator)))
def significant_digits_count(self) -> int:
return digits_count(self._value.limit_denominator(1).numerator)
def square(self) -> 'Finite':
return Finite(square(self.value))
def __add__(self, other: Union[Real, 'Finite']) -> 'Finite':
other = to_expression(other)
return ((Finite(self.value + other.value)
if isinstance(other, Finite)
else other.__radd__(self))
if isinstance(other, Expression)
else NotImplemented)
def __bool__(self) -> bool:
return bool(self.value)
def __mul__(self, other: Union[Real, 'Finite']) -> 'Finite':
other = to_expression(other)
return ((Finite(self.value * other.value)
if isinstance(other, Finite)
else other.__rmul__(self))
if isinstance(other, Expression)
else NotImplemented)
def __neg__(self) -> 'Finite':
return Finite(-self.value)
def __radd__(self, other: Union[Real, 'Finite']) -> 'Finite':
return (to_expression(other) + self
if isinstance(other, Real)
else NotImplemented)
__repr__ = generate_repr(__init__)
def __rmul__(self, other: Union[Real, 'Finite']) -> 'Finite':
return (to_expression(other) * self
if isinstance(other, Real)
else NotImplemented)
Zero, One = Finite(0), Finite(1)
class Infinite(Constant):
is_finite = False
@property
def degree(self) -> int:
return 0
@property
def value(self) -> Real:
return positiveness_to_sign(self.is_positive()) * math.inf
__slots__ = '_is_positive',
def __init__(self, is_positive: bool) -> None:
self._is_positive = is_positive
def evaluate(self, sqrt_evaluator: Optional[SqrtEvaluator] = None) -> Real:
return self.value
def extract_common_denominator(self) -> Tuple[int, 'Expression']:
return 1, self
def extract_common_numerator(self) -> Tuple[int, 'Expression']:
return 1, self
def inverse(self) -> 'Expression':
return Zero
def is_positive(self) -> bool:
return self._is_positive
perfect_sqrt = identity
def significant_digits_count(self) -> int:
return 0
def square(self) -> 'Expression':
return Infinity
def __add__(self, other: Union[Real, 'Expression']) -> Constant:
other = to_expression(other)
return ((self
if (other.is_finite
or (other is not NaN
and self.is_positive() is other.is_positive()))
else NaN)
if isinstance(other, Expression)
else NotImplemented)
def __ge__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and (self.is_positive() or self == other)
if isinstance(other, (Real, Expression))
else NotImplemented)
def __gt__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and self.is_positive() and self != other
if isinstance(other, Expression)
else NotImplemented)
def __le__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and (not self.is_positive() or self == other)
if isinstance(other, Expression)
else NotImplemented)
def __lt__(self, other: Union[Real, 'Expression']) -> bool:
other = to_expression(other)
return (other is not NaN and not self.is_positive() and self != other
if isinstance(other, Expression)
else NotImplemented)
def __mul__(self, other: Union[Real, 'Expression']) -> Constant:
other = to_expression(other)
return (((Infinity
if self.is_positive() is other.is_positive()
else -Infinity)
if other and other is not NaN
else NaN)
if isinstance(other, Expression)
else NotImplemented)
def __neg__(self) -> 'Expression':
return Infinite(not self.is_positive())
__radd__ = __add__
__repr__ = generate_repr(__init__)
__rmul__ = __mul__
Infinity = Infinite(True)
class _NaN(Constant):
is_finite = False
value = math.nan
_instance = None
def __new__(cls) -> '_NaN':
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
__slots__ = ()
def extract_common_denominator(self) -> Tuple[int, 'Expression']:
return 1, self
def extract_common_numerator(self) -> Tuple[int, 'Expression']:
return 1, self
def inverse(self) -> 'Expression':
return self
def is_positive(self) -> bool:
return False
perfect_sqrt = identity
def significant_digits_count(self) -> int:
return 0
square = identity
def __add__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
def __ge__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __gt__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __le__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __lt__(self, other: Union[Real, 'Expression']) -> bool:
return (False
if isinstance(other, (Real, Expression))
else NotImplemented)
def __mul__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
__neg__ = identity
def __radd__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
def __repr__(self) -> str:
return 'NaN'
def __rmul__(self, other: Union[Real, 'Expression']) -> 'Expression':
return self
NaN = _NaN()
def to_expression(other: Union[Real, Expression]) -> Expression:
return ((Finite(other)
if isinstance(other, Rational)
else (Finite(float(other))
if math.isfinite(other)
else (Infinite(other > 0)
if math.isinf(other)
else NaN)))
if isinstance(other, Real)
else other)
|
from csvio.processors import FieldProcessor
from .csv_data import get_csv_reader_writer
result = [
{
"Supplier": "Enormous Apples",
"Fruit": "Apple",
"Origin": "SPAIN",
"Quantity": 2,
},
{
"Supplier": "Enormous Melons",
"Fruit": "Melons",
"Origin": "ITALY",
"Quantity": 3,
},
{
"Supplier": "Long Mangoes",
"Fruit": "Mango",
"Origin": "INDIA",
"Quantity": 4,
},
{
"Supplier": "Small Strawberry",
"Fruit": "Strawberry",
"Origin": "FRANCE",
"Quantity": 5,
},
{
"Supplier": "Short Mangoes",
"Fruit": "Mango",
"Origin": "FRANCE",
"Quantity": 6,
},
{
"Supplier": "Sweet Strawberry",
"Fruit": "Strawberry",
"Origin": "SPAIN",
"Quantity": 7,
},
{
"Supplier": "Square Apples",
"Fruit": "Apple",
"Origin": "ITALY",
"Quantity": 8,
},
{
"Supplier": "Small Melons",
"Fruit": "Melons",
"Origin": "ITALY",
"Quantity": 9,
},
{
"Supplier": "Dark Berries",
"Fruit": "Strawberry",
"Origin": "AUSTRALIA",
"Quantity": 10,
},
{
"Supplier": "Sweet Berries",
"Fruit": "Blackcurrant",
"Origin": "AUSTRALIA",
"Quantity": 11,
},
]
def add1(x):
return x + 1
def cast_to_int(x):
return int(x)
def replace_big_huge(x):
return x.replace("Big", "Huge")
def test_field_processors_csv_reader(tmp_path):
proc1 = FieldProcessor("proc1")
proc1.add_processor("Quantity", [cast_to_int, add1])
proc1.add_processor("Supplier", replace_big_huge)
proc1.add_processor("Origin", lambda x: x.upper())
proc1.add_processor(
"Supplier", lambda x: x.replace("Strawberries", "Strawberry")
)
proc1.add_processor("Supplier", lambda x: x.replace("Huge", "Enormous"))
_, reader = get_csv_reader_writer(tmp_path, {"processors": [proc1]})
assert reader.rows == result
def test_field_processors_csv_writer(tmp_path):
proc1 = FieldProcessor("proc1")
proc1.add_processor("Quantity", [cast_to_int, add1])
proc1.add_processor("Supplier", replace_big_huge)
proc1.add_processor("Origin", lambda x: x.upper())
proc1.add_processor(
"Supplier", lambda x: x.replace("Strawberries", "Strawberry")
)
proc1.add_processor("Supplier", lambda x: x.replace("Huge", "Enormous"))
writer, _ = get_csv_reader_writer(tmp_path, {}, {"processors": [proc1]})
assert writer.rows == result
|
import subprocess as sp
import multiprocessing as mp
import glob
import os
import gzip
import time
import datetime
import shutil
#Non-Standard
from pyig.backend import split_fasta
from pyig.backend import output_parser
def run_mp_and_delete(manager):
'''main method to run igblast through multiprocessing protocol,
takes in a list of dictionaires each with a seperate set of arguments'''
# blast options
blast_options = manager['blast_options']
# bools
_zip_bool = manager['zip_bool']
# file name outputs, these will all be temp files to be parsed later
_file = manager['split_file']
_blast_out = _file + ".blast_out"
if not _zip_bool:
_json_out = _file + ".json"
else:
_json_out = _file + ".json.gz"
# set the filename in the instance:
blast_options['-query'] = _file
blast_options['-out'] = _blast_out
# temporary path
_temporary_path = manager['temporary_path']
# output options
_output_options = manager['output_options']
# set up the command line
_cline = [manager['executable']] # we know its in this directory since we copied it here to make this executable
for argument in blast_options:
arg = blast_options[argument]
if arg.startswith("C"):
arg = '"' + arg + '"'
current_argument = [argument, arg]
_cline += current_argument
print "Running BLAST on processor {0} for split file {1}".format(manager['proc_number'], _file)
print " ".join(_cline)
sub = sp.Popen(_cline, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = sub.communicate()
# if we have output, lets print it
if stdout:
print stdout
if stderr:
raise Exception("ERROR FROM BLAST {0}".format(stderr))
_output_type = manager['output_type']
print "Parsing BLAST output to {0} on Processor {1}".format(_output_type, manager['proc_number'])
if _output_type == "blast_out":
os.remove(_file)
print "Removing {0}".format(_file)
else:
print _blast_out, _file, _temporary_path
op = output_parser.igblast_output(_blast_out, _file, _temporary_path,
_output_options, species="human", gui=True, zip_bool=_zip_bool)
op.parse_blast_file_to_type(_json_out, _output_type)
print "Done parsing {0} type\nRemoving {1} and {2}".format(_output_type, _file, _blast_out)
os.remove(_file)
os.remove(_blast_out)
def concat(_manager_dict):
out_file = _manager_dict['output_file']
file_type = _manager_dict['output_type']
zip_bool = _manager_dict['zip_bool']
file_names = os.path.dirname(_manager_dict['split_file']) \
+ "/" + os.path.basename(_manager_dict['non_split']).split('.')[0]
marker = ""
if zip_bool:
marker = ".gz"
print "Concatinating {0} files to {1}.{2}{3}".format(file_type, out_file, file_type, marker)
if zip_bool and file_type == "json":
zipped_and_json = glob.glob(file_names + "*.json.gz")
with gzip.open(out_file + ".json.gz", 'wb') as gf:
for file in zipped_and_json:
f_in = gzip.open(file, 'rb')
gf.writelines(f_in)
f_in.close()
os.remove(file)
os.remove(file.split('.json.gz')[0] + '.db')
elif file_type == "json" and not zip_bool:
just_json = glob.glob(file_names + "*.json")
with open(out_file + ".json", 'w') as gf:
for file in just_json:
f_in = open(file, 'r')
gf.writelines(f_in)
f_in.close()
os.remove(file)
os.remove(file.split('.json')[0] + '.db')
elif zip_bool and file_type == "csv":
csv_zip = glob.glob(file_names + "*.csv.gz")
with gzip.open(out_file + ".csv.gz", 'wb') as gf:
for line in gzip.open(csv_zip[0], 'rb'):
gf.write(line)
for files in csv_zip[1:]:
f = gzip.open(files, 'rb')
f.next()
for line in f:
gf.write(line)
f.close()
for file in csv_zip:
os.remove(file)
os.remove(file.split('.csv.gz')[0] + '.db')
elif file_type == "csv" and not zip_bool:
just_csv = glob.glob(file_names + "*.csv")
with open(out_file + ".csv", 'w') as gf:
for line in open(just_csv[0]):
gf.write(line)
for files in just_csv[1:]:
f = open(files)
f.next()
for line in f:
gf.write(line)
f.close()
for file in just_csv:
os.remove(file)
os.remove(file.split('.csv')[0] + '.db')
elif file_type == "blast_out" and not zip_bool:
blast_only = glob.glob(file_names + "*.blast_out")
with open(out_file + ".blast_out", 'w') as gf:
for file in blast_only:
for line in open(file):
gf.write(line)
os.remove(file)
elif zip_bool and file_type == "blast_out":
blast_only = glob.glob(file_names + "*.blast_out")
with gzip.open(out_file + ".blast_out.gz", 'wb') as gf:
for file in blast_only:
for lines in open(file):
gf.write(lines)
os.remove(file)
def g_execute(blast_options, outputoptions):
'''A function that takes in and executes options from the gui widgets'''
# variables
mp.freeze_support()
ts = time.time()
fomatted_time = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
print "Process Started {0}".format(fomatted_time)
processors = outputoptions['num_procs']
pool = mp.Pool(processes=processors)
file_name = outputoptions['pre_split_up_input']
path = outputoptions['tmp_data_directory']
if not os.path.exists(path):
msg = "{0} is not found, creating directory...".format(path)
os.makedirs(os.path.abspath(path))
print msg
if not os.path.exists(os.getcwd() + "/internal_data"):
print "Copying internal data to current directory {0} to {1}".format(outputoptions['internal_data_directory'],
os.getcwd() + "/internal_data")
shutil.copytree(outputoptions['internal_data_directory'], os.getcwd() + "/internal_data")
raw_input()
# split fasta file up
all_fasta = split_fasta.split_fasta(processors, path, file_name, suffix=".tmp_fasta")
glob_path = os.path.join(path, os.path.basename(file_name).split('.fasta')[0] + "*.tmp_fasta")
print "Splitting up file {0} into {1}".format(file_name, path)
split_up_starting_files = glob.glob(glob_path)
# output options
zip_bool = outputoptions['zip_bool']
output_file = outputoptions['final_outfile']
# manager_dict
_manager_list = []
_manager_dict = {}
for i, _file in enumerate(split_up_starting_files, start=1): # the full file name
_manager_dict['executable'] = outputoptions['executable']
_manager_dict['non_split'] = file_name
_manager_dict['split_file'] = _file
_manager_dict['zip_bool'] = zip_bool
_manager_dict['all_fasta'] = all_fasta
_manager_dict['blast_options'] = blast_options # all the blast options
_manager_dict['output_type'] = outputoptions['output_type']
_manager_dict['output_file'] = output_file
_manager_dict['output_options'] = outputoptions['output_options']
_manager_dict['temporary_path'] = path
_manager_dict['proc_number'] = i
_manager_list.append(_manager_dict)
_manager_dict = {}
# run_protocol
# for i in _manager_list:
# run_mp_and_delete(i)
pool.map(run_mp_and_delete, _manager_list)
concat(_manager_list[0])
print "Process is done"
print "Took {0}".format(time.time() - ts)
|
import functools
import typing
from collections import defaultdict
from django.core.cache import cache
from site_settings.models import Setting
VALUES_TYPE_MAP = (
(int, 1),
(str, 2),
(bool, 3),
)
CACHE_SETTINGS_KEY = 'settings_%s'
def cached_setting(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
is_force = kwargs.pop('force', False)
key = CACHE_SETTINGS_KEY % args[0]
if key in cache and not is_force:
return cache.get(key)
result, cache_time = func(*args, **kwargs)
cache.set(key, result, timeout=cache_time)
return result
return wrapper
@cached_setting
def get_setting(alias: str, default: typing.Optional[typing.Union[str, int, bool]] = None, get_or_create: bool = False):
if get_or_create:
assert default, 'default must be set'
instance, _ = Setting.objects.values('value', 'value_type').get_or_create(
alias=alias,
defaults=dict(
alias=alias,
value=str(default),
value_type=dict(VALUES_TYPE_MAP).get(type(default))
)
)
return Setting.get_value_by_type(instance['value'], instance['value_type'])
try:
instance = Setting.objects.values('value', 'value_type').get(alias=alias)
return Setting.get_value_by_type(instance['value'], instance['value_type'])
except Setting.DoesNotExist:
return default
@cached_setting
def get_setting_group(alias: str):
instances = Setting.objects.filter(group__alias=alias)
return {instance.alias: instance.get_value() for instance in instances}
def get_context_settings():
result = defaultdict(dict)
settings_values = Setting.objects.values('alias', 'value', 'value_type').filter(load_in_template=True)
settings_values_group = Setting.objects.values('alias', 'value', 'value_type', 'group__alias').filter(
group__load_in_template=True)
for item in settings_values_group:
grp = item['group__alias']
result[grp][item['alias']] = Setting.get_value_by_type(item['value'], item['value_type'])
settings = {instance['alias']: Setting.get_value_by_type(instance['value'], instance['value_type']) for instance in
settings_values}
result.update(settings)
return dict(result)
|
"Geocoder app"
from flask import Blueprint
geocoder = Blueprint("geocoder", __name__)
|
"""
"""
import cPickle
import lasagne
import numpy as np
import sys
import theano
BATCH_SIZE = 128
in_text = ""
def gen_data(p, char_to_ix, batch_size = BATCH_SIZE, data = in_text, SEQ_LENGTH = 20, vocab_size=500, return_target=True):
x = np.zeros((batch_size,SEQ_LENGTH,vocab_size))
y = np.zeros(batch_size)
for n in range(batch_size):
ptr = n
for i in range(SEQ_LENGTH):
x[n,i,char_to_ix[data[p+ptr+i]]] = 1.
if(return_target):
y[n] = char_to_ix[data[p+ptr+SEQ_LENGTH]]
return x, np.array(y,dtype='int32')
def main(file_name, N=1000):
with open(file_name, "r") as f:
net_list = cPickle.load(f)
probs = net_list[0]
network_output = net_list[1]
cost = net_list[2]
all_params = net_list[3]
l_out = net_list[4]
l_forward_slice = net_list[5]
l_forward_1 = net_list[6]
l_forward_2 = net_list[7]
l_in = net_list[8]
seq_len = net_list[9]
ix_to_char = net_list[10]
char_to_ix = net_list[11]
vocab_size = net_list[12]
batch_size = net_list[13]
phrase = "<!DOCTYPE html><html>"
x,_ = gen_data(len(phrase)-seq_len, char_to_ix, 1, phrase, seq_len, vocab_size, 0)
sample_ix = []
for i in range(N):
# Pick the character that got assigned the highest probability
ix = np.argmax(probs(x).ravel())
# Alternatively, to sample from the distribution instead:
# ix = np.random.choice(np.arange(vocab_size), p=probs(x).ravel())
sample_ix.append(ix)
x[:,0:seq_len-1,:] = x[:,1:,:]
x[:,seq_len-1,:] = 0
x[0,seq_len-1,sample_ix[-1]] = 1.
random_snippet = phrase + ''.join(ix_to_char[ix] for ix in sample_ix)
print("----\n %s \n----" % random_snippet)
if __name__ == '__main__':
main(sys.argv[1]) |
#!/usr/bin/python
import ldap
import re
import datetime
import sys
flavors = ['org.glite.ce.CREAM', 'org.glite.ce.Monitor', 'org.glite.RTEPublisher', 'org.glite.wms.WMProxy', 'local-data-location-interface', 'SRM']
def convertServiceFlavour(flav):
flav = flav.replace('org.glite.ce.CREAM', 'CREAM-CE')
flav = flav.replace('org.glite.ce.Monitor', 'CREAM-CE')
flav = flav.replace('org.glite.RTEPublisher', 'CREAM-CE')
flav = flav.replace('org.glite.wms.WMProxy', 'WMS')
flav = flav.replace('SRM','SRMv2')
flav = flav.replace('local-data-location-interface','Local-LFC')
return flav
def isServiceFlavour(flav):
return flav in flavors
l = ldap.initialize('ldap://topbdii.grif.fr:2170')
r = l.search_s('mds-vo-name=local,o=grid',ldap.SCOPE_SUBTREE,'(GlueServiceAccessControlBaseRule=VO:biomed)',['GlueServiceType','GlueForeignKey','GlueServiceEndpoint'])
if r == {}:
print "Error, no feed generate"
sys.exit(1)
sites = {};
for dn,entry in r:
site_name = entry['GlueForeignKey'][0].replace('GlueSiteUniqueID=','');
service_name = entry['GlueServiceType'][0];
endpoint = re.search('(?<=//).*:',entry['GlueServiceEndpoint'][0]);
endpoint_str = "";
try:
endpoint_str = endpoint.group(0).replace(':','');
except:
pass;
if endpoint_str != "" :
try :
sites[site_name][endpoint_str] = entry['GlueServiceType'][0];
except KeyError:
sites[site_name] = {endpoint_str : entry['GlueServiceType'][0]};
print "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>";
print "<root xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:noNamespaceSchemaLocation=\"atp_vo_feed_schema.xsd\">";
print " <title>BIOMED topology for SAM</title>"
print " <description>Groups of services defined by BIOMED VO to be used by the SAM/Nagios monitoring infrastructure</description>";
print " <feed_responsible name=\"Franck Michel\" dn=\"/O=GRID-FR/C=FR/O=CNRS/OU=I3S/CN=Franck Michel\"/>";
print " <last_update>" + datetime.datetime.now().strftime('%Y-%m-%dT%XZ%Z') + "</last_update>"
print " <vo>biomed</vo>";
for site in sorted(sites):
if not re.match('Glue.*',site) :
siteEntry = " <atp_site name=\"" + site + "\">\n";
isAtLeastOneServer = False
for box in sites[site]:
if isServiceFlavour(sites[site][box]):
siteEntry += " <service hostname=\"" + box + "\" flavour=\"" + convertServiceFlavour(sites[site][box]) + "\"/>\n";
isAtLeastOneServer = True
siteEntry += " <group name=\"Tier-2\" type=\"biomed_Tier\" />\n";
siteEntry += " <group name=\"" + site + "\" type=\"biomed_Site\" />\n";
siteEntry += " </atp_site>";
if isAtLeastOneServer:
print siteEntry;
print "</root>";
|
# -*- coding: utf-8 -*-
import json
import hashlib
import os
import re
import time
import hashlib
import datetime
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
from config import config_page_name # pylint: disable=E0611,W0614
def timeRecord(op, t):
user_page = pywikibot.Page(site, "User:Hamish-bot")
user_page_text = user_page.text
user_page_text = re.sub(r'<!-- T3rs -->(.*)<!-- T3re -->', '<!-- T3rs -->' + t + '<!-- T3re -->', user_page_text, flags=re.M)
if op:
user_page_text = re.sub(r'<!-- T3os -->(.*)<!-- T3oe -->', '<!-- T3os -->' + t + '<!-- T3oe -->', user_page_text, flags=re.M)
pywikibot.showDiff(user_page.text, user_page_text)
user_page.text = user_page_text
user_page.save(summary = "Updating task report", minor = False)
os.environ['TZ'] = 'UTC'
print('Starting at: ' + time.asctime(time.localtime(time.time())))
rec_time = (datetime.datetime.now() + datetime.timedelta(hours = 8)).__format__('%d/%m/%y %H:%M')
site = pywikibot.Site()
site.login()
config_page = pywikibot.Page(site, config_page_name)
cfg = config_page.text
cfg = json.loads(cfg)
print(json.dumps(cfg, indent=4, ensure_ascii=False))
op = False
if not cfg["enable"]:
exit("disabled\n")
summary_prefix = "[[Wikipedia:机器人/申请/Hamish-bot/3|T3]]:"
rsnpage = pywikibot.Page(site, cfg["main_page_name"])
text = rsnpage.text
rndstr = hashlib.md5(str(time.time()).encode()).hexdigest()
text = re.sub(r'^(==[^=]+==)$', rndstr + r'\1', text, flags=re.M)
text = text.split(rndstr)
mainPageText = text[0].strip()
text = text[1:]
archivelist = {}
count = 0
for section in text:
section = section.strip()
if section == '':
continue
else:
title = section.split('\n')[0]
print(title, end="\t")
processed = False
publicizing = False
moved_pattern = r"\{\{(moveto|Movedto|Moveto|Moved to|Switchto|移动到|已移动至|移動到|已移動至)"
status_pattern = r"\{\{(S|s)tatus\|(.*)\}\}"
status = "done" if re.findall(moved_pattern, section) else re.findall(status_pattern, section)[0][1]
print("status", status, end="\t")
if status in cfg["publicizing_status"]:
publicizing = True
print("publicizing", end="\t")
elif status in cfg["done_status"]:
processed = True
print("processed", end="\t")
else:
print("not processed", end="\t")
lasttime = datetime.datetime(1, 1, 1)
for m in re.findall(r"(\d{4})年(\d{1,2})月(\d{1,2})日 \(.\) (\d{2}):(\d{2}) \(UTC\)", str(section)):
d = datetime.datetime(int(m[0]), int(m[1]), int(m[2]), int(m[3]), int(m[4]))
lasttime = max(lasttime, d)
print(lasttime, end="\t")
if (
(
(processed and not publicizing and time.time() - lasttime.timestamp() > cfg["time_to_live_for_processed"])
or (not processed and not publicizing and time.time() - lasttime.timestamp() > cfg["time_to_live_for_not_processed"])
)
and lasttime != datetime.datetime(1, 1, 1)):
target = (lasttime.year, lasttime.month)
if target not in archivelist:
archivelist[target] = []
archivelist[target].append(section)
count += 1
print("archive to " + str(target), end="\t")
else:
mainPageText += '\n\n' + section
print("not archive", end="\t")
print()
if count == 0:
timeRecord(op, rec_time)
exit("nothing changed")
pywikibot.showDiff(rsnpage.text, mainPageText)
rsnpage.text = mainPageText
summary = cfg["main_page_summary"].format(count)
print(summary)
rsnpage.save(summary=summary_prefix + summary, minor=False)
op = True
for target in archivelist:
archivepage = pywikibot.Page(site, cfg["archive_page_name"].format(target[0], target[1]))
text = archivepage.text
print(archivepage.title())
if not archivepage.exists():
text = cfg["archive_page_preload"]
text += "\n\n" + "\n\n".join(archivelist[target])
text = re.sub(r"{{status2\|(讨论|討論)中}}", "{{status2|-|已過時並存檔}}", text)
pywikibot.showDiff(archivepage.text, text)
archivepage.text = text
summary = cfg["archive_page_summary"].format(len(archivelist[target]))
print(summary)
archivepage.save(summary=summary_prefix + summary, minor=False)
timeRecord(op, rec_time) |
import sys
import os
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
lib_path = os.path.abspath(os.path.join(sys.path[0], '..'))
sys.path.append(lib_path)
from src.basic_cv_tool import *
'''
This is the test file for project No.1 which consists of all the required
assignments.
'''
image_2_name = '../../homework1/lena.bmp'
tool = basic_cv_tool(image_2_name)
img = tool.ImageRead(image_2_name)
new_img1 = tool.image_bicubic_interpolation(img,(2048,2048))
new_img2 = tool.image_bilinear_interpolation(img,(2048,2048))
new_img3 = tool.image_Nearest_neighbor_interpolation(img,(2048,2048))
new_img1 = Image.fromarray(new_img1)
new_img2 = Image.fromarray(new_img2)
new_img3 = Image.fromarray(new_img3)
new_img1.save('elaincubic.bmp')
new_img1.save('elaincubic.png')
new_img2.save('elainlinear.bmp')
new_img2.save('elainlinear.png')
new_img3.save('elainnear.bmp')
new_img3.save('elainnear.png') |
# -*- coding: utf-8 -*-
import random
from datetime import datetime
from operator import itemgetter
import requests
import time
from pyinstagram.model import Media
from .exceptions import OAuthException, PyInstagramException
from .oauth import OAuth
from .constants import API_URL
from .utils import DESAdapter
class InstagramApiClient(object):
"""
Classe base per le chiamate all'API ufficiale!
"""
def __init__(self, access_token=None):
self.access_token = access_token
if isinstance(access_token, OAuth):
self.access_token = access_token.access_token
if not self.access_token:
# TODO: Gestire il caso in cui l'access token scada
raise OAuthException("Per usare la libreria devi prima autenticarti!")
@staticmethod
def go_to_sleep(seconds=3600):
"""
Questo metodo viene chiamato quando è stato raggiunto il
limite consentito dall'API, se succede metto in pausa il
programma per un'ora.
:param seconds: int - Numero di secondi di attesa
:return: None
"""
time.sleep(seconds)
def _make_request(self, uri, method='get', data=None):
"""
Metodo che effettua la richiesta alle API Instagram.
:param uri: str - L'Uri da chiamare
:param method: str - metodo http con cui fare la richiesta
:param data: dict - dizionario con i dati da passare nella richiesta
:return: list - lista di dati di risposta
"""
next_url = "" # per la paginazione
res = []
retry = 1 # serve per ripetere la chiamata dopo un ora se supero il limite di richieste
while retry:
res = getattr(requests, method)(uri, data=data)
res, next_url = self._handle_response(res)
if res == 0:
# la chiamata non è andata a buon fine perchè ho raggiunto il limite di chiamate
# ho già aspettato un'ora, adesso ci riprovo.
continue
retry = 0
return res, next_url
def _handle_response(self, request):
"""
Una volta effettuata la chiamata, ci occupiamo di
interpretarne la risposta.
Se la richiesta è andata a buon fine, restituiamo la
lista dei dati, altrimenti o mettiamo in pausa il
programma (se abbiamo raggiunto il limite dell'API)
o solleviamo un'eccezione appropriata.
:param request: requests - la risposta della chiamata
:return: list - lista dei dati ricevuti
"""
if request.status_code == 200:
# Tutto ok!
try:
res = request.json()
except Exception:
raise Exception(request.text)
else:
data = res['data']
next_url = res.get('pagination', {}).get('next_url')
return data, next_url
elif request.status_code == 429:
# OAuthRateLimitException
self.go_to_sleep()
return 0
elif request.status_code == 400:
raise OAuthException(request.json()['meta']['error_message'])
elif "<!DOCTYPE html>" in request.text:
raise PyInstagramException("Page not found")
else:
raise PyInstagramException
def get_by_user(self, id_user=None, count=0):
"""
Metodo usato per cercare gli ultimi post di un utente.
Se non viene passato il paramentro id_user, chiederemo
i post dell'utente che ha autorizzato l'app.
:param id_user: str - post dell'utente da cercare
:param count: int - limita a {count} risultati
:return: list - lista dati
"""
all_media = []
id_user = id_user or "self"
url = API_URL + "users/{0}/media/recent/?access_token={1}".format(id_user, self.access_token)
if count:
url += "&count={}".format(count)
raw_list, next_url = self._make_request(url)
all_media.extend(raw_list)
if len(all_media) > count:
return all_media[:count]
while next_url:
raw_list, next_url = self._make_request(next_url)
all_media.extend(raw_list)
return all_media[:count]
def get_by_hashtag(self, tags=(), count=0):
"""
Metodo usato per cercare i post con uno o più hashtag.
:param tags: iterable - gli hashtag da cercare
:param count: int - massimo numero di risultati da restituire
:return: list - lista di dati
"""
if isinstance(tags, str):
tags = (tags, )
all_media = []
for tag in tags:
url = API_URL + "tags/{0}/media/recent?access_token={1}".format(tag, self.access_token)
if count:
url += "&count={}".format(count)
raw_list, next_url = self._make_request(url)
all_media.extend(raw_list)
while next_url:
raw_list, next_url = self._make_request(next_url)
all_media.extend(raw_list)
return all_media
def search_for_tag(self, tag, count=3):
"""
Metodo usato per cercare hashtag simili a un altro.
:param tag: str - hashtag da cercare
:param count: int - limita a un numero di hashtag
:return: dict
"""
url = API_URL + "tags/search?q={0}&access_token={1}".format(tag, self.access_token)
res, _ = self._make_request(url)
res = sorted(res, key=itemgetter('media_count'))
names = {r['name']: r['media_count'] for r in res[:count]}
return names
class InstagramJsonClient(object):
"""
Classe per fare semplici richieste in get senza usare access token
o le API ufficiali. Fa largo uso di url con query string.
"""
def __init__(self):
self.base_url = "https://www.instagram.com/"
self.session = self._init_session()
def _init_session(self):
"""Abilita il supporto 3DES su Instagram"""
s = requests.Session()
s.mount(self.base_url, DESAdapter())
return s
def get_user_info(self, user):
"""
Ritorna le informazioni di un utente
:param user: username Instagram
:return: dizionario con le info dell'utente
"""
base_url = "{base}{user}/?__a=1".format(
base=self.base_url,
user=user
)
res = self.session.get(base_url)
try:
res = res.json()
except Exception:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(base_url))
return res.get('user', {})
def get_by_user(self, user, count=None, since=None, until=None):
"""
Ricerca post (pubblici) di un utente.
Gestisce automaticamente la paginazione.
Ritorna una lista di dizionari così composta:
[
{
id: "1606977067425770236_528817151",
code: "BZNISDyHKr8",
user: {
id: "528817151",
full_name: "NASA",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/11375151_392132304319140_1291663475_a.jpg",
username: "nasa"
},
images: {
thumbnail: {
width: 150,
height: 150,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s150x150/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
},
low_resolution: {
width: 320,
height: 320,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s320x320/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
},
standard_resolution: {
width: 640,
height: 640,
url: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s640x640/e15/21690201_1801206810171539_7249344908006260736_n.jpg"
}
},
created_time: "1505786616",
caption: {
id: "17887172635109592",
text: "Look up in the sky tonight and see Saturn! This month Saturn is the only prominent evening planet low in the southwest sky. Look for it near the constellation Sagittarius. Above and below Saturn--from a dark sky--you can't miss the summer Milky Way spanning the sky from northeast to southwest! Grab a pair of binoculars and scan the teapot-shaped Sagittarius, where stars and some brighter clumps appear as steam from the teapot. Those bright clumps are near the center of our galaxy, which is full of gas, dust and stars. Credit: NASA #nasa #space #astronomy #september #whatsup #night #nightsky #stars #stargazing #saturn #planet",
created_time: "1505786616",
from: {
id: "528817151",
full_name: "NASA",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/11375151_392132304319140_1291663475_a.jpg",
username: "nasa"
}
},
user_has_liked: false,
likes: {
data: [
{
id: "4010977557",
full_name: "Natalia",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/14482183_140565769737733_5249004653428867072_a.jpg",
username: "nata.barata"
},
{
id: "2055640911",
full_name: "S@brin@ Lec○cq ♡☆♡",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/13534211_1557747037863158_1773299287_a.jpg",
username: "melsab19"
},
{
id: "752521983",
full_name: "Laura Álvarez Peláez",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10624147_809215025765686_985825156_a.jpg",
username: "lauriwushu"
},
{
id: "1719376530",
full_name: "Julia Paniti",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10985984_1575721159312127_239135761_a.jpg",
username: "julia_paniti"
}
],
count: 204038
},
comments: {
data: [
{
id: "17876620534138631",
text: "@jennytried ❤️",
created_time: "1505855823",
from: {
id: "4610349",
full_name: "",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/10932285_747424172021124_1089839988_a.jpg",
username: "siskascherz"
}
},
{
id: "17899664473040297",
text: "@a.hm.ed.1",
created_time: "1505855825",
from: {
id: "416900232",
full_name: "Maryem BenKh",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/16907969_415736022127336_8841431139366207488_a.jpg",
username: "maariam_bk"
}
},
{
id: "17871962107174729",
text: "Wonderful 😍",
created_time: "1505855872",
from: {
id: "2982243595",
full_name: "Smit Raj",
profile_picture: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-19/s150x150/21690360_117321958944805_772082897589895168_n.jpg",
username: "smit_raj_"
}
}
],
count: 1564
},
can_view_comments: true,
can_delete_comments: false,
type: "video",
link: "https://www.instagram.com/p/BZNISDyHKr8/",
location: null,
alt_media_url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21904634_340030459792492_153261372472295424_n.mp4",
videos: {
standard_resolution: {
width: 640,
height: 640,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21904634_340030459792492_153261372472295424_n.mp4"
},
low_bandwidth: {
width: 480,
height: 480,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21868687_149708205622876_4737472794344816640_n.mp4"
},
low_resolution: {
width: 480,
height: 480,
url: "https://scontent-mxp1-1.cdninstagram.com/t50.2886-16/21868687_149708205622876_4737472794344816640_n.mp4"
}
},
video_views: 1012473
},
]
:param user: str - username Instagram
:param count: int - limita il numero di risultati
:param since: str - Risultati a partire da questa data, es. "20170101000000"
:param until: str - Risultati entro questa data, es. "20171231235959"
:return:
"""
if since:
try:
since = datetime.strptime(since, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro since non è in un formato corretto (es. '20170101000000')")
if until:
try:
until = datetime.strptime(until, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro until non è in un formato corretto (es. '20170101000000')")
all_data = []
base_url = "{base}{user}?__a=1{{max}}".format(
base=self.base_url,
user=user
)
max_id = ""
next_url = base_url.format(max=max_id)
while True:
res = self.session.get(next_url)
if not res.status_code == 200:
return all_data[:count]
try:
res = res.json()
except Exception:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(next_url))
for media_res in res['user']['media']['nodes']:
# Instagram non mi permette di cercare per data, però mi fornisce la
# data di creazione del post in formato Unix Timestamp. Quindi, per
# gestire il caso in cui volessi solo risultati in un certo intervallo,
# verifico che il mio post sia stato creato in questo lasso di tempo.
created_at = int(media_res['date'])
if since and created_at < time.mktime(since.timetuple()):
# sono andato troppo indietro, posso uscire
return all_data[:count]
if until and created_at > time.mktime(until.timetuple()):
continue
all_data.append(media_res)
if res['user']['media']['nodes'] and (not len(all_data) > count if count else True):
# ho oggetti, ne ho altri da scaricare, e non ho raggiunto il limite di risultati
try:
max_id = res['user']['media']['nodes'][-1]['id']
next_url = base_url.format(max="&max_id={}".format(max_id))
except IndexError:
# aspetto un po', index è vuoto e Instagram mi blocca il flusso
time.sleep(random.randint(10, 60))
else:
# tutto ok, ho altri dati da scaricare
continue
else:
# non ho dati, oppure ne ho di più di quelli voluti
break
return all_data[:count]
def get_by_hashtag(self, tags=(), count=1000000, top_posts=True, since=None, until=None):
"""
Ricerca per hashtag.
Gestisce automaticamente la paginazione.
Ritorna una lista di oggetti SqlAlchemy a partire da
una lista di dizionari fatti come segue:
[
{
comments_disabled: false,
id: "1607551655901147333",
dimensions: {
height: 640,
width: 640
},
owner: {
id: "981246989"
},
thumbnail_src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/e35/21820166_125621088095492_8628217971971457024_n.jpg",
thumbnail_resources: [
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s150x150/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 150,
config_height: 150
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s240x240/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 240,
config_height: 240
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s320x320/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 320,
config_height: 320
},
{
src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/s480x480/e35/21820166_125621088095492_8628217971971457024_n.jpg",
config_width: 480,
config_height: 480
}
],
is_video: false,
code: "BZPK7bAFDDF",
date: 1505855112,
display_src: "https://scontent-mxp1-1.cdninstagram.com/t51.2885-15/e35/21820166_125621088095492_8628217971971457024_n.jpg",
caption: "Tommy Hilfiger London Fashion Week Spring_Summer 2018 @londonfashionweek @britishfashioncouncil @tommyhilfiger #londonfashionweek#LFW#fashion#paris#fashionblogger#tehran#fashioneditor#fashionweek#style#streetstyle##milan#london#newyork#mfw#lfw#nyfw#vogue#gq#art#love#fashionshow#blogger#life#event#ss2018#instafashion#runway#fashionmoment0#TOMMYNOW",
comments: {
count: 1
},
likes: {
count: 24
}
},
]
:param tags: str or tuple - hashtag (senza il #) o tupla di hastag
:param count: int - limita i risultati
:param top_posts: bool - limita ai top posts altrimenti ritorna tutto
:param since: str - Risultati a partire da questa data, es. "20170101000000"
:param until: str - Risultati entro questa data, es. "20171231235959"
:return: list - lista di dizionari
"""
if isinstance(tags, str):
tags = (tags, )
if since:
try:
since = datetime.strptime(since, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro since non è in un formato corretto (es. '20170101000000')")
if until:
try:
until = datetime.strptime(until, "%Y%m%d%H%M%S")
except ValueError:
raise ValueError("Il parametro until non è in un formato corretto (es. '20170101000000')")
mapper = {
'id': 'id',
'comments': 'edge_media_to_comment.count',
'unix_datetime': 'taken_at_timestamp',
'user': 'owner.id',
'likes': 'edge_liked_by.count',
'is_video': 'is_video',
'url': 'display_src',
'height': 'dimensions.height',
'width': 'dimensions.width',
'code': 'shortcode'
}
all_data = []
for tag in tags:
all_data_tag = []
base_url = "{base}explore/tags/{tag}?__a=1{{max}}".format(
base=self.base_url,
tag=tag
)
max_id = ""
next_url = base_url.format(max=max_id)
while True:
res = self.session.get(next_url)
try:
res = res.json()
except Exception:
if "Sorry, this page isn't available" in res.text:
# Post rimosso o non più raggiungibile
continue
else:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(next_url))
res_media = res['graphql']['hashtag']['edge_hashtag_to_top_posts'] if top_posts else res['graphql']['hashtag']['edge_hashtag_to_media']
has_next_page = res['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['has_next_page']
# converto in oggetti SqlAlchemy
sqlalchemy_media = []
for element in res_media['edges']:
# Instagram non mi permette di cercare per data, però mi fornisce la
# data di creazione del post in formato Unix Timestamp. Quindi, per
# gestire il caso in cui volessi solo risultati in un certo intervallo,
# verifico che il mio post sia stato creato in questo lasso di tempo.
created_at = int(element['node']['taken_at_timestamp'])
if since and created_at < time.mktime(since.timetuple()):
# sono andato troppo indietro, posso uscire
break
if until and created_at > time.mktime(until.timetuple()):
continue
model = Media()
for field_to, getter in mapper.items():
path = getter.split('.')
val = element['node']
for key in path:
val = val.get(key, {})
if isinstance(val, dict):
val = None
setattr(model, field_to, val)
model.json = element['node']
model.caption = element['node']['edge_media_to_caption']['edges'][0]['node']['text']
sqlalchemy_media.append(model)
all_data_tag.extend(sqlalchemy_media)
if res_media['edges'] and has_next_page and not len(all_data_tag) > count and not top_posts:
try:
max_id = res['graphql']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']
next_url = base_url.format(max="&max_id={}".format(max_id))
except IndexError:
# aspetto un po', index è vuoto e Instagram mi blocca il flusso
time.sleep(random.randint(10, 60))
else:
# tutto ok, ho altri dati da scaricare
continue
else:
# non ho dati, oppure ne ho di più di quelli voluti
break
all_data.extend(all_data_tag)
return all_data[:count]
def get_by_media_codes(self, codes=(), all_comments=False):
"""
Restituisce una lista contenente i dati dei post richiesti
(identificati dalla stringa 'code' del post). Attivando
il flag all_comments, verranno fatte ulteriori richieste
gestendo la paginazione dei commenti. I commenti verranno
aggiunti al json originale in modo da avere alla fina una
lista composta da tanti elementi quanti sono i post
richiesti.
:param codes: stringa del codice o tupla con i codici dei post
:param all_comments: bool - se attivato, scarica tutti i commenti
:return: lista di json con i dati dei post richiesti
"""
if isinstance(codes, str):
codes = (codes,)
all_data = []
for code in codes:
url = "{base}p/{code}?__a=1".format(
base=self.base_url,
code=code
)
res = self.session.get(url)
try:
res = res.json()
except Exception:
if "Sorry, this page isn't available" in res.text:
# Post rimosso o non più raggiungibile
continue
else:
raise PyInstagramException("Impossibile scaricare i dati dall'indirizzo: {}".format(url))
if all_comments:
while True:
page_info = res['graphql']['shortcode_media']['edge_media_to_comment']['page_info']
if page_info['has_next_page']:
next_url = url + "&max_id={}".format(page_info['end_cursor'])
next_res = self.session.get(next_url)
next_res = next_res.json()
res_edges = res['graphql']['shortcode_media']['edge_media_to_comment']['edges']
next_edges = next_res['graphql']['shortcode_media']['edge_media_to_comment']['edges']
res_edges.extend(next_edges)
else:
break
all_data.append(res)
return all_data
|
#programador: Diego Franco
#data: 20/10/2019
''' Este programa tem como objetivo facilitar o orçamento de sessoes fotográficas'''
import os
def validaEntrada(string):
entrada = 0
while entrada <= 0:
try:
entrada = float(input(string))
except ValueError:
pass
return entrada
def recebeEntrada(string1, string2):
inputString = '0'
while inputString != 's' and inputString != 'n':
inputString = input(string1)
if inputString == 's':
custoString = validaEntrada(string2)
elif inputString == 'n':
custoString = 0
else:
print("entre com \'s\' para sim e \'n\' para não.")
return custoString
def main():
inputConfig = '0'
while inputConfig != 's' and inputConfig != 'n':
inputConfig = input("Usar as configurações de equipamento, investimento e despesas com ferramentas de edição salvas?(s/n) ") #configurações específicas desse cliente no futuro criar um bd
if inputConfig == 's':
custoMaterial = 9700
investimentoProfissional = 250
outrasDespesas = 40 #gasto mensal com assinatura do photoshop e lightroom.
custoMarketing = 0
elif inputConfig == 'n':
custoMaterial = validaEntrada("Qual o valor aproximado do seu equipamento?(Camêras, Lentes, Flashes, Tripés, etc): ")
investimentoProfissional = validaEntrada("Quanto já investiu/investe em cursos e/ou workshops profissionalizantes? ")
custoMarketing = recebeEntrada("Investe em marketing e/ou atração de clientes?(Anúncios pagos, Hospedagem de site, Impulsionamento em mídias socias): ", "Qual a sua despesa mensal com marketing? ")
'''
#temporariamente desabilitado por falta de uso.
custoAluguel = recebeEntrada("Salão comercial?(s/n) ", "Qual o valor do aluguel? ")
if custoAluguel > 0:
despesasMensais = validaEntrada("E as despesas mensais do estabelecimento?(Água, Luz, Internet, Telefone): ")
else:
despesasMensais = 0
custoFuncionarios = recebeEntrada("Possui funcionários?(s/n) ", "Qual a sua despesa mensal com funcionários? ")
'''
mediaTrabalhosMes = validaEntrada("Quantos trabalhos em média você prevê realizar esse mês? ")
#evita que o custo de manutenção do equipamento seja muito diluído ou seja um valor inválido.
if mediaTrabalhosMes < 1:
mediaTrabalhosMes = 1
elif mediaTrabalhosMes > 10:
mediaTrabalhosMes = 10
horaTrabalho = validaEntrada("Qual o valor da sua hora de trabalho? ")
nHoras = validaEntrada("Serão quantas horas de trabalho? ")
nHoras *= 4 #cliente gasta em média 3h editando 1h de fotos.
custoDeslocamento = recebeEntrada("Terá custo de deslocamento para o local ou aluguel de estúdio?(s/n) ", "Qual a sua despesa total com deslocamento ou aluguel do estúdio? ")
outrasDespesas = recebeEntrada("Possui alguma outra despesa não descrita acima?(s/n) ", "Qual o valor? ")
orcamento = (custoMaterial/36) + (investimentoProfissional/mediaTrabalhosMes) + (custoMarketing/mediaTrabalhosMes) + (outrasDespesas/mediaTrabalhosMes)
'''
#temporariamente desabilitado por falta de uso.
orcamento = orcamento + (custoFuncionarios/mediaTrabalhosMes) + (custoAluguel/mediaTrabalhosMes) + (despesasMensais/mediaTrabalhosMes)
'''
orcamento = ((orcamento/20) + horaTrabalho)*nHoras + custoDeslocamento
print(f"O valor sugerido para este trabaho é de R$ {orcamento:.2f}")
os.system("PAUSE") #evita que o cmd feche após exibir o resultado.
main()
|
# Natural Language Toolkit: Recursive Descent Parser Application
#
# Copyright (C) 2001-2014 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A graphical tool for exploring the recursive descent parser.
The recursive descent parser maintains a tree, which records the
structure of the portion of the text that has been parsed. It uses
CFG productions to expand the fringe of the tree, and matches its
leaves against the text. Initially, the tree contains the start
symbol ("S"). It is shown in the main canvas, to the right of the
list of available expansions.
The parser builds up a tree structure for the text using three
operations:
- "expand" uses a CFG production to add children to a node on the
fringe of the tree.
- "match" compares a leaf in the tree to a text token.
- "backtrack" returns the tree to its state before the most recent
expand or match operation.
The parser maintains a list of tree locations called a "frontier" to
remember which nodes have not yet been expanded and which leaves have
not yet been matched against the text. The leftmost frontier node is
shown in green, and the other frontier nodes are shown in blue. The
parser always performs expand and match operations on the leftmost
element of the frontier.
You can control the parser's operation by using the "expand," "match,"
and "backtrack" buttons; or you can use the "step" button to let the
parser automatically decide which operation to apply. The parser uses
the following rules to decide which operation to apply:
- If the leftmost frontier element is a token, try matching it.
- If the leftmost frontier element is a node, try expanding it with
the first untried expansion.
- Otherwise, backtrack.
The "expand" button applies the untried expansion whose CFG production
is listed earliest in the grammar. To manually choose which expansion
to apply, click on a CFG production from the list of available
expansions, on the left side of the main window.
The "autostep" button will let the parser continue applying
applications to the tree until it reaches a complete parse. You can
cancel an autostep in progress at any time by clicking on the
"autostep" button again.
Keyboard Shortcuts::
[Space]\t Perform the next expand, match, or backtrack operation
[a]\t Step through operations until the next complete parse
[e]\t Perform an expand operation
[m]\t Perform a match operation
[b]\t Perform a backtrack operation
[Delete]\t Reset the parser
[g]\t Show/hide available expansions list
[h]\t Help
[Ctrl-p]\t Print
[q]\t Quit
"""
import nltk.compat
import tkinter.font
from tkinter import (Listbox, IntVar, Button,
Frame, Label, Menu, Scrollbar, Tk)
from nltk.tree import Tree
from nltk.util import in_idle
from nltk.parse import SteppingRecursiveDescentParser
from nltk.draw.util import TextWidget, ShowText, CanvasFrame, EntryDialog
from nltk.draw import CFGEditor, TreeSegmentWidget, tree_to_treesegment
class RecursiveDescentApp(object):
"""
A graphical tool for exploring the recursive descent parser. The tool
displays the parser's tree and the remaining text, and allows the
user to control the parser's operation. In particular, the user
can expand subtrees on the frontier, match tokens on the frontier
against the text, and backtrack. A "step" button simply steps
through the parsing process, performing the operations that
``RecursiveDescentParser`` would use.
"""
def __init__(self, grammar, sent, trace=0):
self._sent = sent
self._parser = SteppingRecursiveDescentParser(grammar, trace)
# Set up the main window.
self._top = Tk()
self._top.title('Recursive Descent Parser Application')
# Set up key bindings.
self._init_bindings()
# Initialize the fonts.
self._init_fonts(self._top)
# Animations. animating_lock is a lock to prevent the demo
# from performing new operations while it's animating.
self._animation_frames = IntVar(self._top)
self._animation_frames.set(5)
self._animating_lock = 0
self._autostep = 0
# The user can hide the grammar.
self._show_grammar = IntVar(self._top)
self._show_grammar.set(1)
# Create the basic frames.
self._init_menubar(self._top)
self._init_buttons(self._top)
self._init_feedback(self._top)
self._init_grammar(self._top)
self._init_canvas(self._top)
# Initialize the parser.
self._parser.initialize(self._sent)
# Resize callback
self._canvas.bind('<Configure>', self._configure)
#########################################
## Initialization Helpers
#########################################
def _init_fonts(self, root):
# See: <http://www.astro.washington.edu/owen/ROTKFolklore.html>
self._sysfont = tkinter.font.Font(font=Button()["font"])
root.option_add("*Font", self._sysfont)
# TWhat's our font size (default=same as sysfont)
self._size = IntVar(root)
self._size.set(self._sysfont.cget('size'))
self._boldfont = tkinter.font.Font(family='helvetica', weight='bold',
size=self._size.get())
self._font = tkinter.font.Font(family='helvetica',
size=self._size.get())
if self._size.get() < 0: big = self._size.get()-2
else: big = self._size.get()+2
self._bigfont = tkinter.font.Font(family='helvetica', weight='bold',
size=big)
def _init_grammar(self, parent):
# Grammar view.
self._prodframe = listframe = Frame(parent)
self._prodframe.pack(fill='both', side='left', padx=2)
self._prodlist_label = Label(self._prodframe, font=self._boldfont,
text='Available Expansions')
self._prodlist_label.pack()
self._prodlist = Listbox(self._prodframe, selectmode='single',
relief='groove', background='white',
foreground='#909090', font=self._font,
selectforeground='#004040',
selectbackground='#c0f0c0')
self._prodlist.pack(side='right', fill='both', expand=1)
self._productions = list(self._parser.grammar().productions())
for production in self._productions:
self._prodlist.insert('end', (' %s' % production))
self._prodlist.config(height=min(len(self._productions), 25))
# Add a scrollbar if there are more than 25 productions.
if len(self._productions) > 25:
listscroll = Scrollbar(self._prodframe,
orient='vertical')
self._prodlist.config(yscrollcommand = listscroll.set)
listscroll.config(command=self._prodlist.yview)
listscroll.pack(side='left', fill='y')
# If they select a production, apply it.
self._prodlist.bind('<<ListboxSelect>>', self._prodlist_select)
def _init_bindings(self):
# Key bindings are a good thing.
self._top.bind('<Control-q>', self.destroy)
self._top.bind('<Control-x>', self.destroy)
self._top.bind('<Escape>', self.destroy)
self._top.bind('e', self.expand)
#self._top.bind('<Alt-e>', self.expand)
#self._top.bind('<Control-e>', self.expand)
self._top.bind('m', self.match)
self._top.bind('<Alt-m>', self.match)
self._top.bind('<Control-m>', self.match)
self._top.bind('b', self.backtrack)
self._top.bind('<Alt-b>', self.backtrack)
self._top.bind('<Control-b>', self.backtrack)
self._top.bind('<Control-z>', self.backtrack)
self._top.bind('<BackSpace>', self.backtrack)
self._top.bind('a', self.autostep)
#self._top.bind('<Control-a>', self.autostep)
self._top.bind('<Control-space>', self.autostep)
self._top.bind('<Control-c>', self.cancel_autostep)
self._top.bind('<space>', self.step)
self._top.bind('<Delete>', self.reset)
self._top.bind('<Control-p>', self.postscript)
#self._top.bind('<h>', self.help)
#self._top.bind('<Alt-h>', self.help)
self._top.bind('<Control-h>', self.help)
self._top.bind('<F1>', self.help)
#self._top.bind('<g>', self.toggle_grammar)
#self._top.bind('<Alt-g>', self.toggle_grammar)
#self._top.bind('<Control-g>', self.toggle_grammar)
self._top.bind('<Control-g>', self.edit_grammar)
self._top.bind('<Control-t>', self.edit_sentence)
def _init_buttons(self, parent):
# Set up the frames.
self._buttonframe = buttonframe = Frame(parent)
buttonframe.pack(fill='none', side='bottom', padx=3, pady=2)
Button(buttonframe, text='Step',
background='#90c0d0', foreground='black',
command=self.step,).pack(side='left')
Button(buttonframe, text='Autostep',
background='#90c0d0', foreground='black',
command=self.autostep,).pack(side='left')
Button(buttonframe, text='Expand', underline=0,
background='#90f090', foreground='black',
command=self.expand).pack(side='left')
Button(buttonframe, text='Match', underline=0,
background='#90f090', foreground='black',
command=self.match).pack(side='left')
Button(buttonframe, text='Backtrack', underline=0,
background='#f0a0a0', foreground='black',
command=self.backtrack).pack(side='left')
# Replace autostep...
# self._autostep_button = Button(buttonframe, text='Autostep',
# underline=0, command=self.autostep)
# self._autostep_button.pack(side='left')
def _configure(self, event):
self._autostep = 0
(x1, y1, x2, y2) = self._cframe.scrollregion()
y2 = event.height - 6
self._canvas['scrollregion'] = '%d %d %d %d' % (x1,y1,x2,y2)
self._redraw()
def _init_feedback(self, parent):
self._feedbackframe = feedbackframe = Frame(parent)
feedbackframe.pack(fill='x', side='bottom', padx=3, pady=3)
self._lastoper_label = Label(feedbackframe, text='Last Operation:',
font=self._font)
self._lastoper_label.pack(side='left')
lastoperframe = Frame(feedbackframe, relief='sunken', border=1)
lastoperframe.pack(fill='x', side='right', expand=1, padx=5)
self._lastoper1 = Label(lastoperframe, foreground='#007070',
background='#f0f0f0', font=self._font)
self._lastoper2 = Label(lastoperframe, anchor='w', width=30,
foreground='#004040', background='#f0f0f0',
font=self._font)
self._lastoper1.pack(side='left')
self._lastoper2.pack(side='left', fill='x', expand=1)
def _init_canvas(self, parent):
self._cframe = CanvasFrame(parent, background='white',
#width=525, height=250,
closeenough=10,
border=2, relief='sunken')
self._cframe.pack(expand=1, fill='both', side='top', pady=2)
canvas = self._canvas = self._cframe.canvas()
# Initially, there's no tree or text
self._tree = None
self._textwidgets = []
self._textline = None
def _init_menubar(self, parent):
menubar = Menu(parent)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Reset Parser', underline=0,
command=self.reset, accelerator='Del')
filemenu.add_command(label='Print to Postscript', underline=0,
command=self.postscript, accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Edit Grammar', underline=5,
command=self.edit_grammar,
accelerator='Ctrl-g')
editmenu.add_command(label='Edit Text', underline=5,
command=self.edit_sentence,
accelerator='Ctrl-t')
menubar.add_cascade(label='Edit', underline=0, menu=editmenu)
rulemenu = Menu(menubar, tearoff=0)
rulemenu.add_command(label='Step', underline=1,
command=self.step, accelerator='Space')
rulemenu.add_separator()
rulemenu.add_command(label='Match', underline=0,
command=self.match, accelerator='Ctrl-m')
rulemenu.add_command(label='Expand', underline=0,
command=self.expand, accelerator='Ctrl-e')
rulemenu.add_separator()
rulemenu.add_command(label='Backtrack', underline=0,
command=self.backtrack, accelerator='Ctrl-b')
menubar.add_cascade(label='Apply', underline=0, menu=rulemenu)
viewmenu = Menu(menubar, tearoff=0)
viewmenu.add_checkbutton(label="Show Grammar", underline=0,
variable=self._show_grammar,
command=self._toggle_grammar)
viewmenu.add_separator()
viewmenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
viewmenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
viewmenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
viewmenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=18, command=self.resize)
viewmenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=24, command=self.resize)
menubar.add_cascade(label='View', underline=0, menu=viewmenu)
animatemenu = Menu(menubar, tearoff=0)
animatemenu.add_radiobutton(label="No Animation", underline=0,
variable=self._animation_frames,
value=0)
animatemenu.add_radiobutton(label="Slow Animation", underline=0,
variable=self._animation_frames,
value=10, accelerator='-')
animatemenu.add_radiobutton(label="Normal Animation", underline=0,
variable=self._animation_frames,
value=5, accelerator='=')
animatemenu.add_radiobutton(label="Fast Animation", underline=0,
variable=self._animation_frames,
value=2, accelerator='+')
menubar.add_cascade(label="Animate", underline=1, menu=animatemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='About', underline=0,
command=self.about)
helpmenu.add_command(label='Instructions', underline=0,
command=self.help, accelerator='F1')
menubar.add_cascade(label='Help', underline=0, menu=helpmenu)
parent.config(menu=menubar)
#########################################
## Helper
#########################################
def _get(self, widget, treeloc):
for i in treeloc: widget = widget.subtrees()[i]
if isinstance(widget, TreeSegmentWidget):
widget = widget.label()
return widget
#########################################
## Main draw procedure
#########################################
def _redraw(self):
canvas = self._canvas
# Delete the old tree, widgets, etc.
if self._tree is not None:
self._cframe.destroy_widget(self._tree)
for twidget in self._textwidgets:
self._cframe.destroy_widget(twidget)
if self._textline is not None:
self._canvas.delete(self._textline)
# Draw the tree.
helv = ('helvetica', -self._size.get())
bold = ('helvetica', -self._size.get(), 'bold')
attribs = {'tree_color': '#000000', 'tree_width': 2,
'node_font': bold, 'leaf_font': helv,}
tree = self._parser.tree()
self._tree = tree_to_treesegment(canvas, tree, **attribs)
self._cframe.add_widget(self._tree, 30, 5)
# Draw the text.
helv = ('helvetica', -self._size.get())
bottom = y = self._cframe.scrollregion()[3]
self._textwidgets = [TextWidget(canvas, word, font=self._font)
for word in self._sent]
for twidget in self._textwidgets:
self._cframe.add_widget(twidget, 0, 0)
twidget.move(0, bottom-twidget.bbox()[3]-5)
y = min(y, twidget.bbox()[1])
# Draw a line over the text, to separate it from the tree.
self._textline = canvas.create_line(-5000, y-5, 5000, y-5, dash='.')
# Highlight appropriate nodes.
self._highlight_nodes()
self._highlight_prodlist()
# Make sure the text lines up.
self._position_text()
def _redraw_quick(self):
# This should be more-or-less sufficient after an animation.
self._highlight_nodes()
self._highlight_prodlist()
self._position_text()
def _highlight_nodes(self):
# Highlight the list of nodes to be checked.
bold = ('helvetica', -self._size.get(), 'bold')
for treeloc in self._parser.frontier()[:1]:
self._get(self._tree, treeloc)['color'] = '#20a050'
self._get(self._tree, treeloc)['font'] = bold
for treeloc in self._parser.frontier()[1:]:
self._get(self._tree, treeloc)['color'] = '#008080'
def _highlight_prodlist(self):
# Highlight the productions that can be expanded.
# Boy, too bad tkinter doesn't implement Listbox.itemconfig;
# that would be pretty useful here.
self._prodlist.delete(0, 'end')
expandable = self._parser.expandable_productions()
untried = self._parser.untried_expandable_productions()
productions = self._productions
for index in range(len(productions)):
if productions[index] in expandable:
if productions[index] in untried:
self._prodlist.insert(index, ' %s' % productions[index])
else:
self._prodlist.insert(index, ' %s (TRIED)' %
productions[index])
self._prodlist.selection_set(index)
else:
self._prodlist.insert(index, ' %s' % productions[index])
def _position_text(self):
# Line up the text widgets that are matched against the tree
numwords = len(self._sent)
num_matched = numwords - len(self._parser.remaining_text())
leaves = self._tree_leaves()[:num_matched]
xmax = self._tree.bbox()[0]
for i in range(0, len(leaves)):
widget = self._textwidgets[i]
leaf = leaves[i]
widget['color'] = '#006040'
leaf['color'] = '#006040'
widget.move(leaf.bbox()[0] - widget.bbox()[0], 0)
xmax = widget.bbox()[2] + 10
# Line up the text widgets that are not matched against the tree.
for i in range(len(leaves), numwords):
widget = self._textwidgets[i]
widget['color'] = '#a0a0a0'
widget.move(xmax - widget.bbox()[0], 0)
xmax = widget.bbox()[2] + 10
# If we have a complete parse, make everything green :)
if self._parser.currently_complete():
for twidget in self._textwidgets:
twidget['color'] = '#00a000'
# Move the matched leaves down to the text.
for i in range(0, len(leaves)):
widget = self._textwidgets[i]
leaf = leaves[i]
dy = widget.bbox()[1] - leaf.bbox()[3] - 10.0
dy = max(dy, leaf.parent().label().bbox()[3] - leaf.bbox()[3] + 10)
leaf.move(0, dy)
def _tree_leaves(self, tree=None):
if tree is None: tree = self._tree
if isinstance(tree, TreeSegmentWidget):
leaves = []
for child in tree.subtrees(): leaves += self._tree_leaves(child)
return leaves
else:
return [tree]
#########################################
## Button Callbacks
#########################################
def destroy(self, *e):
self._autostep = 0
if self._top is None: return
self._top.destroy()
self._top = None
def reset(self, *e):
self._autostep = 0
self._parser.initialize(self._sent)
self._lastoper1['text'] = 'Reset Application'
self._lastoper2['text'] = ''
self._redraw()
def autostep(self, *e):
if self._animation_frames.get() == 0:
self._animation_frames.set(2)
if self._autostep:
self._autostep = 0
else:
self._autostep = 1
self._step()
def cancel_autostep(self, *e):
#self._autostep_button['text'] = 'Autostep'
self._autostep = 0
# Make sure to stop auto-stepping if we get any user input.
def step(self, *e): self._autostep = 0; self._step()
def match(self, *e): self._autostep = 0; self._match()
def expand(self, *e): self._autostep = 0; self._expand()
def backtrack(self, *e): self._autostep = 0; self._backtrack()
def _step(self):
if self._animating_lock: return
# Try expanding, matching, and backtracking (in that order)
if self._expand(): pass
elif self._parser.untried_match() and self._match(): pass
elif self._backtrack(): pass
else:
self._lastoper1['text'] = 'Finished'
self._lastoper2['text'] = ''
self._autostep = 0
# Check if we just completed a parse.
if self._parser.currently_complete():
self._autostep = 0
self._lastoper2['text'] += ' [COMPLETE PARSE]'
def _expand(self, *e):
if self._animating_lock: return
old_frontier = self._parser.frontier()
rv = self._parser.expand()
if rv is not None:
self._lastoper1['text'] = 'Expand:'
self._lastoper2['text'] = rv
self._prodlist.selection_clear(0, 'end')
index = self._productions.index(rv)
self._prodlist.selection_set(index)
self._animate_expand(old_frontier[0])
return True
else:
self._lastoper1['text'] = 'Expand:'
self._lastoper2['text'] = '(all expansions tried)'
return False
def _match(self, *e):
if self._animating_lock: return
old_frontier = self._parser.frontier()
rv = self._parser.match()
if rv is not None:
self._lastoper1['text'] = 'Match:'
self._lastoper2['text'] = rv
self._animate_match(old_frontier[0])
return True
else:
self._lastoper1['text'] = 'Match:'
self._lastoper2['text'] = '(failed)'
return False
def _backtrack(self, *e):
if self._animating_lock: return
if self._parser.backtrack():
elt = self._parser.tree()
for i in self._parser.frontier()[0]:
elt = elt[i]
self._lastoper1['text'] = 'Backtrack'
self._lastoper2['text'] = ''
if isinstance(elt, Tree):
self._animate_backtrack(self._parser.frontier()[0])
else:
self._animate_match_backtrack(self._parser.frontier()[0])
return True
else:
self._autostep = 0
self._lastoper1['text'] = 'Finished'
self._lastoper2['text'] = ''
return False
def about(self, *e):
ABOUT = ("NLTK Recursive Descent Parser Application\n"+
"Written by Edward Loper")
TITLE = 'About: Recursive Descent Parser Application'
try:
from tkinter.messagebox import Message
Message(message=ABOUT, title=TITLE).show()
except:
ShowText(self._top, TITLE, ABOUT)
def help(self, *e):
self._autostep = 0
# The default font's not very legible; try using 'fixed' instead.
try:
ShowText(self._top, 'Help: Recursive Descent Parser Application',
(__doc__ or '').strip(), width=75, font='fixed')
except:
ShowText(self._top, 'Help: Recursive Descent Parser Application',
(__doc__ or '').strip(), width=75)
def postscript(self, *e):
self._autostep = 0
self._cframe.print_to_file()
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
def resize(self, size=None):
if size is not None: self._size.set(size)
size = self._size.get()
self._font.configure(size=-(abs(size)))
self._boldfont.configure(size=-(abs(size)))
self._sysfont.configure(size=-(abs(size)))
self._bigfont.configure(size=-(abs(size+2)))
self._redraw()
#########################################
## Expand Production Selection
#########################################
def _toggle_grammar(self, *e):
if self._show_grammar.get():
self._prodframe.pack(fill='both', side='left', padx=2,
after=self._feedbackframe)
self._lastoper1['text'] = 'Show Grammar'
else:
self._prodframe.pack_forget()
self._lastoper1['text'] = 'Hide Grammar'
self._lastoper2['text'] = ''
# def toggle_grammar(self, *e):
# self._show_grammar = not self._show_grammar
# if self._show_grammar:
# self._prodframe.pack(fill='both', expand='y', side='left',
# after=self._feedbackframe)
# self._lastoper1['text'] = 'Show Grammar'
# else:
# self._prodframe.pack_forget()
# self._lastoper1['text'] = 'Hide Grammar'
# self._lastoper2['text'] = ''
def _prodlist_select(self, event):
selection = self._prodlist.curselection()
if len(selection) != 1: return
index = int(selection[0])
old_frontier = self._parser.frontier()
production = self._parser.expand(self._productions[index])
if production:
self._lastoper1['text'] = 'Expand:'
self._lastoper2['text'] = production
self._prodlist.selection_clear(0, 'end')
self._prodlist.selection_set(index)
self._animate_expand(old_frontier[0])
else:
# Reset the production selections.
self._prodlist.selection_clear(0, 'end')
for prod in self._parser.expandable_productions():
index = self._productions.index(prod)
self._prodlist.selection_set(index)
#########################################
## Animation
#########################################
def _animate_expand(self, treeloc):
oldwidget = self._get(self._tree, treeloc)
oldtree = oldwidget.parent()
top = not isinstance(oldtree.parent(), TreeSegmentWidget)
tree = self._parser.tree()
for i in treeloc:
tree = tree[i]
widget = tree_to_treesegment(self._canvas, tree,
node_font=self._boldfont,
leaf_color='white',
tree_width=2, tree_color='white',
node_color='white',
leaf_font=self._font)
widget.label()['color'] = '#20a050'
(oldx, oldy) = oldtree.label().bbox()[:2]
(newx, newy) = widget.label().bbox()[:2]
widget.move(oldx-newx, oldy-newy)
if top:
self._cframe.add_widget(widget, 0, 5)
widget.move(30-widget.label().bbox()[0], 0)
self._tree = widget
else:
oldtree.parent().replace_child(oldtree, widget)
# Move the children over so they don't overlap.
# Line the children up in a strange way.
if widget.subtrees():
dx = (oldx + widget.label().width()/2 -
widget.subtrees()[0].bbox()[0]/2 -
widget.subtrees()[0].bbox()[2]/2)
for subtree in widget.subtrees(): subtree.move(dx, 0)
self._makeroom(widget)
if top:
self._cframe.destroy_widget(oldtree)
else:
oldtree.destroy()
colors = ['gray%d' % (10*int(10*x/self._animation_frames.get()))
for x in range(self._animation_frames.get(),0,-1)]
# Move the text string down, if necessary.
dy = widget.bbox()[3] + 30 - self._canvas.coords(self._textline)[1]
if dy > 0:
for twidget in self._textwidgets: twidget.move(0, dy)
self._canvas.move(self._textline, 0, dy)
self._animate_expand_frame(widget, colors)
def _makeroom(self, treeseg):
"""
Make sure that no sibling tree bbox's overlap.
"""
parent = treeseg.parent()
if not isinstance(parent, TreeSegmentWidget): return
index = parent.subtrees().index(treeseg)
# Handle siblings to the right
rsiblings = parent.subtrees()[index+1:]
if rsiblings:
dx = treeseg.bbox()[2] - rsiblings[0].bbox()[0] + 10
for sibling in rsiblings: sibling.move(dx, 0)
# Handle siblings to the left
if index > 0:
lsibling = parent.subtrees()[index-1]
dx = max(0, lsibling.bbox()[2] - treeseg.bbox()[0] + 10)
treeseg.move(dx, 0)
# Keep working up the tree.
self._makeroom(parent)
def _animate_expand_frame(self, widget, colors):
if len(colors) > 0:
self._animating_lock = 1
widget['color'] = colors[0]
for subtree in widget.subtrees():
if isinstance(subtree, TreeSegmentWidget):
subtree.label()['color'] = colors[0]
else:
subtree['color'] = colors[0]
self._top.after(50, self._animate_expand_frame,
widget, colors[1:])
else:
widget['color'] = 'black'
for subtree in widget.subtrees():
if isinstance(subtree, TreeSegmentWidget):
subtree.label()['color'] = 'black'
else:
subtree['color'] = 'black'
self._redraw_quick()
widget.label()['color'] = 'black'
self._animating_lock = 0
if self._autostep: self._step()
def _animate_backtrack(self, treeloc):
# Flash red first, if we're animating.
if self._animation_frames.get() == 0: colors = []
else: colors = ['#a00000', '#000000', '#a00000']
colors += ['gray%d' % (10*int(10*x/(self._animation_frames.get())))
for x in range(1, self._animation_frames.get()+1)]
widgets = [self._get(self._tree, treeloc).parent()]
for subtree in widgets[0].subtrees():
if isinstance(subtree, TreeSegmentWidget):
widgets.append(subtree.label())
else:
widgets.append(subtree)
self._animate_backtrack_frame(widgets, colors)
def _animate_backtrack_frame(self, widgets, colors):
if len(colors) > 0:
self._animating_lock = 1
for widget in widgets: widget['color'] = colors[0]
self._top.after(50, self._animate_backtrack_frame,
widgets, colors[1:])
else:
for widget in widgets[0].subtrees():
widgets[0].remove_child(widget)
widget.destroy()
self._redraw_quick()
self._animating_lock = 0
if self._autostep: self._step()
def _animate_match_backtrack(self, treeloc):
widget = self._get(self._tree, treeloc)
node = widget.parent().label()
dy = (1.0 * (node.bbox()[3] - widget.bbox()[1] + 14) /
max(1, self._animation_frames.get()))
self._animate_match_backtrack_frame(self._animation_frames.get(),
widget, dy)
def _animate_match(self, treeloc):
widget = self._get(self._tree, treeloc)
dy = ((self._textwidgets[0].bbox()[1] - widget.bbox()[3] - 10.0) /
max(1, self._animation_frames.get()))
self._animate_match_frame(self._animation_frames.get(), widget, dy)
def _animate_match_frame(self, frame, widget, dy):
if frame > 0:
self._animating_lock = 1
widget.move(0, dy)
self._top.after(10, self._animate_match_frame,
frame-1, widget, dy)
else:
widget['color'] = '#006040'
self._redraw_quick()
self._animating_lock = 0
if self._autostep: self._step()
def _animate_match_backtrack_frame(self, frame, widget, dy):
if frame > 0:
self._animating_lock = 1
widget.move(0, dy)
self._top.after(10, self._animate_match_backtrack_frame,
frame-1, widget, dy)
else:
widget.parent().remove_child(widget)
widget.destroy()
self._animating_lock = 0
if self._autostep: self._step()
def edit_grammar(self, *e):
CFGEditor(self._top, self._parser.grammar(), self.set_grammar)
def set_grammar(self, grammar):
self._parser.set_grammar(grammar)
self._productions = list(grammar.productions())
self._prodlist.delete(0, 'end')
for production in self._productions:
self._prodlist.insert('end', (' %s' % production))
def edit_sentence(self, *e):
sentence = " ".join(self._sent)
title = 'Edit Text'
instr = 'Enter a new sentence to parse.'
EntryDialog(self._top, sentence, instr, self.set_sentence, title)
def set_sentence(self, sentence):
self._sent = sentence.split() #[XX] use tagged?
self.reset()
def app():
"""
Create a recursive descent parser demo, using a simple grammar and
text.
"""
from nltk.grammar import CFG
grammar = CFG.fromstring("""
# Grammatical productions.
S -> NP VP
NP -> Det N PP | Det N
VP -> V NP PP | V NP | V
PP -> P NP
# Lexical productions.
NP -> 'I'
Det -> 'the' | 'a'
N -> 'man' | 'park' | 'dog' | 'telescope'
V -> 'ate' | 'saw'
P -> 'in' | 'under' | 'with'
""")
sent = 'the dog saw a man in the park'.split()
RecursiveDescentApp(grammar, sent).mainloop()
if __name__ == '__main__':
app()
__all__ = ['app']
|
import gzip
from common.utils import *
from common.metrics import *
from aad.aad_base import *
from aad.query_model import *
from aad.aad_loss import *
from aad.forest_aad_detector import *
from aad.loda_aad import *
from aad.precomputed_aad import *
def get_aad_model(x, opts, random_state=None):
if opts.detector_type == LODA:
model = AadLoda(sparsity=opts.sparsity, mink=opts.mink, maxk=opts.maxk)
elif is_forest_detector(opts.detector_type):
model = AadForest(n_estimators=opts.forest_n_trees,
max_samples=min(opts.forest_n_samples, x.shape[0]),
score_type=opts.forest_score_type, random_state=random_state,
add_leaf_nodes_only=opts.forest_add_leaf_nodes_only,
max_depth=opts.forest_max_depth,
ensemble_score=opts.ensemble_score,
detector_type=opts.detector_type, n_jobs=opts.n_jobs,
tree_update_type=opts.tree_update_type,
forest_replace_frac=opts.forest_replace_frac,
feature_partitions=opts.feature_partitions)
elif opts.detector_type == PRECOMPUTED_SCORES:
model = AadPrecomputed(opts, random_state=random_state)
else:
raise ValueError("Unsupported ensemble")
return model
class SequentialResults(object):
def __init__(self, num_seen=None, num_not_seen=None, num_seen_baseline=None,
true_queried_indexes=None, true_queried_indexes_baseline=None,
stream_window=None, stream_window_baseline=None,
aucs=None):
self.num_seen = num_seen
self.num_not_seen = num_not_seen
self.num_seen_baseline = num_seen_baseline
self.true_queried_indexes = true_queried_indexes
self.true_queried_indexes_baseline = true_queried_indexes_baseline
self.stream_window = stream_window
self.stream_window_baseline = stream_window_baseline
self.aucs = aucs
def summarize_aad_metrics(ensembles, metrics_struct):
nqueried = len(metrics_struct.metrics[0][0].queried)
num_seen = np.zeros(shape=(0, nqueried+2))
num_seen_baseline = np.zeros(shape=(0, nqueried+2))
true_queried_indexes = np.zeros(shape=(0, nqueried+2))
true_queried_indexes_baseline = np.zeros(shape=(0, nqueried + 2))
for i in range(len(metrics_struct.metrics)):
# file level
submetrics = metrics_struct.metrics[i]
subensemble = ensembles[i]
for j in range(len(submetrics)):
# rerun level
queried = submetrics[j].queried
lbls = subensemble[j].labels
nseen = np.zeros(shape=(1, nqueried+2))
nseen[0, 0:2] = [metrics_struct.fids[i], metrics_struct.runidxs[j]]
nseen[0, 2:(nseen.shape[1])] = np.cumsum(lbls[queried])
num_seen = rbind(num_seen, nseen)
qlbls = subensemble[j].labels[subensemble[j].ordered_anom_idxs[0:nqueried]]
nseen = np.zeros(shape=(1, nqueried+2))
nseen[0, 0:2] = [metrics_struct.fids[i], metrics_struct.runidxs[j]]
nseen[0, 2:(nseen.shape[1])] = np.cumsum(qlbls)
num_seen_baseline = rbind(num_seen_baseline, nseen)
# the ensembles store samples in sorted order of default anomaly
# scores. The corresponding indexes are stored in ensemble.original_indexes
t_idx = np.zeros(shape=(1, nqueried + 2))
t_idx[0, 0:2] = [metrics_struct.fids[i], metrics_struct.runidxs[j]]
t_idx[0, 2:(t_idx.shape[1])] = subensemble[j].original_indexes[queried]
# Note: make the queried indexes realive 1 (NOT zero)
true_queried_indexes = rbind(true_queried_indexes, t_idx + 1)
# the ensembles store samples in sorted order of default anomaly
# scores. The corresponding indexes are stored in ensemble.original_indexes
b_idx = np.zeros(shape=(1, nqueried + 2))
b_idx[0, 0:2] = [metrics_struct.fids[i], metrics_struct.runidxs[j]]
b_idx[0, 2:(b_idx.shape[1])] = subensemble[j].original_indexes[np.arange(nqueried)]
# Note: make the queried indexes realive 1 (NOT zero)
true_queried_indexes_baseline = rbind(true_queried_indexes_baseline, b_idx + 1)
return SequentialResults(num_seen=num_seen, num_seen_baseline=num_seen_baseline,
true_queried_indexes=true_queried_indexes,
true_queried_indexes_baseline=true_queried_indexes_baseline)
def save_aad_summary(alad_summary, opts):
cansave = opts.resultsdir != "" and os.path.isdir(opts.resultsdir)
if cansave:
save(alad_summary, filepath=opts.get_metrics_summary_path())
def get_score_ranges(x, w):
s = x.dot(w)
qvals = list()
qvals.append(np.min(s))
for i in range(1, 10):
qvals.append(quantile(s, (i * 10.0)))
qvals.append(np.max(s))
return qvals
def get_linear_score_variance(x, w):
indxs = x.nonzero()[1] # column indexes
x_ = x[0, indxs].todense()
xw = np.array(x_) * w[indxs]
# xw = x_.reshape(-1, 1) * w[indxs]
# logger.debug("xw:\n%s" % str(list(xw)))
#xw = np.array(x[0, indxs].multiply(w[indxs]))
#xw_mean = xw.mean(axis=1)[0]
#xw_sq = xw ** 2
#var = xw_sq.mean(axis=1)[0] - xw_mean ** 2
var = np.var(xw)
score = np.sum(xw)
if False:
s = x.dot(w)
if s != score:
logger.debug("size of x: %s" % str(x.shape))
logger.debug("x_: %s" % str(list(x_)))
logger.debug("w : %s" % str(list(w[indxs])))
logger.debug("xw: %s" % str(list(xw)))
raise ArithmeticError("s=%f != score=%f" % (s, score))
return score, var
def get_closest_indexes(inst, test_set, num=1, dest_set=None):
n = test_set.shape[0]
dists = np.zeros(n)
for i in np.arange(n):
ts = test_set[i, :]
if ts.shape[0] > 1:
# dense matrix
ts = matrix(ts, nrow=1)
diff = inst - ts
dist = np.sum(diff**2)
else:
# sparse matrix
diff = inst - ts
tmp = diff * diff.T
if tmp.shape[0] != 1:
raise ValueError("dot product is %s" % str(tmp.shape))
dist = tmp[0, 0]
dists[i] = dist
ordered = np.argsort(dists)[np.arange(num)]
if False:
logger.debug("last ts:\n%s" % str(ts))
logger.debug("last diff:\n%s" % str(diff))
logger.debug("ordered indexes: %s" % str(list(ordered)))
logger.debug("dists: %s" % str(list(dists[ordered])))
# logger.debug("dists: %s" % str(list(dists)))
logger.debug("inst:\n%s" % str(inst))
logger.debug("points:\n%s" % str(test_set[ordered, :]))
ts = test_set[ordered[1], :]
ts = matrix(ts, nrow=1)
logger.debug("dist 2:\n%s" % str(np.sum((inst - ts)**2)))
if dest_set is not None:
for indx in ordered:
dest_set.add(indx)
return ordered
def get_score_variances(x, w, n_test, ordered_indexes=None, queried_indexes=None,
test_indexes=None,
eval_set=None, n_closest=9):
if test_indexes is None:
n_test = min(x.shape[0], n_test)
top_ranked_indexes = ordered_indexes[np.arange(len(queried_indexes) + n_test)]
tmp = np.array(SetList(top_ranked_indexes) - SetList(queried_indexes))
test = tmp[np.arange(n_test)]
# logger.debug("test:\n%s" % str(list(test)))
else:
test = test_indexes
n_test = len(test)
tm = Timer()
vars = np.zeros(len(test))
means = np.zeros(len(test))
for i, idx in enumerate(test):
means[i], vars[i] = get_linear_score_variance(x[idx], w)
# logger.debug(tm.message("Time for score variance computation on test set:"))
v_eval = None
m_eval = None
if eval_set is not None:
tm = Timer()
v_eval = np.zeros(eval_set.shape[0], dtype=float)
m_eval = np.zeros(eval_set.shape[0], dtype=float)
closest_indexes = set() # all indexes from test_set that are closest to any unlabeled instances
for i in range(n_test):
test_index = test[i]
get_closest_indexes(x[test_index, :], eval_set, num=n_closest, dest_set=closest_indexes)
logger.debug("# Closest: %d" % len(closest_indexes))
for i, idx in enumerate(closest_indexes):
m_eval[idx], v_eval[idx] = get_linear_score_variance(eval_set[idx, :], w)
logger.debug(tm.message("Time for score variance computation on eval set:"))
return means, vars, test, v_eval, m_eval
def get_queried_indexes(scores, labels, opts):
# logger.debug("computing queried indexes...")
queried = np.argsort(-scores)[0:opts.budget]
num_seen = np.cumsum(labels[queried[np.arange(opts.budget)]])
return num_seen, queried
def write_baseline_query_indexes(queried_info, opts):
logger.debug("writing baseline queries...")
queried = np.zeros(shape=(len(queried_info), opts.budget + 2), dtype=int)
num_seen = np.zeros(shape=(len(queried_info), opts.budget + 2), dtype=int)
for i, info in enumerate(queried_info):
num_seen[i, 2:(opts.budget + 2)] = info[0]
num_seen[i, 0] = 1
queried[i, 2:(opts.budget + 2)] = info[1] + 1 # make indexes relative 1, *not* 0
queried[i, 0] = 1
prefix = opts.get_alad_metrics_name_prefix()
baseline_file = os.path.join(opts.resultsdir, "%s-baseline.csv" % (prefix,))
# np.savetxt(baseline_file, num_seen, fmt='%d', delimiter=',')
queried_idxs_baseline_file = os.path.join(opts.resultsdir, "%s-queried-baseline.csv" % (prefix,))
np.savetxt(queried_idxs_baseline_file, queried, fmt='%d', delimiter=',')
def write_sequential_results_to_csv(results, opts):
"""
:param results: SequentialResults
:param opts: AadOpts
:return:
"""
prefix = opts.get_alad_metrics_name_prefix()
num_seen_file = os.path.join(opts.resultsdir, "%s-num_seen.csv" % (prefix,))
num_not_seen_file = os.path.join(opts.resultsdir, "%s-num_not_seen.csv" % (prefix,))
num_total_anoms_file = os.path.join(opts.resultsdir, "%s-num_total_anoms.csv" % (prefix,))
baseline_file = os.path.join(opts.resultsdir, "%s-baseline.csv" % (prefix,))
stream_window_file = os.path.join(opts.resultsdir, "%s-window.csv" % (prefix,))
stream_window_baseline_file = os.path.join(opts.resultsdir, "%s-window-baseline.csv" % (prefix,))
queried_idxs_file = os.path.join(opts.resultsdir, "%s-queried.csv" % (prefix,))
queried_idxs_baseline_file = os.path.join(opts.resultsdir, "%s-queried-baseline.csv" % (prefix,))
aucs_file = os.path.join(opts.resultsdir, "%s-aucs.csv" % (prefix,))
if results.num_seen is not None:
np.savetxt(num_seen_file, results.num_seen, fmt='%d', delimiter=',')
if results.num_not_seen is not None:
np.savetxt(num_not_seen_file, results.num_not_seen, fmt='%d', delimiter=',')
tmp = np.copy(results.num_seen)
tmp[:, 2:tmp.shape[1]] += results.num_not_seen[:, 2:results.num_not_seen.shape[1]]
np.savetxt(num_total_anoms_file, tmp, fmt='%d', delimiter=',')
if results.num_seen_baseline is not None:
np.savetxt(baseline_file, results.num_seen_baseline, fmt='%d', delimiter=',')
if results.true_queried_indexes is not None:
np.savetxt(queried_idxs_file, results.true_queried_indexes, fmt='%d', delimiter=',')
if results.true_queried_indexes_baseline is not None:
np.savetxt(queried_idxs_baseline_file, results.true_queried_indexes_baseline, fmt='%d', delimiter=',')
if results.stream_window is not None:
np.savetxt(stream_window_file, results.stream_window, fmt='%d', delimiter=',')
if results.stream_window_baseline is not None:
np.savetxt(stream_window_baseline_file, results.stream_window_baseline, fmt='%d', delimiter=',')
if results.aucs is not None:
np.savetxt(aucs_file, results.aucs, fmt='%f', delimiter=',')
def summarize_ensemble_num_seen(ensemble, metrics, fid=0, runidx=0):
"""
IMPORTANT: returned queried_indexes and queried_indexes_baseline are 1-indexed (NOT 0-indexed)
"""
nqueried = len(metrics.queried)
num_seen = np.zeros(shape=(1, nqueried + 2))
num_seen_baseline = np.zeros(shape=(1, nqueried + 2))
num_seen[0, 0:2] = [fid, runidx]
num_seen[0, 2:(num_seen.shape[1])] = np.cumsum(ensemble.labels[metrics.queried])
queried_baseline = ensemble.ordered_anom_idxs[0:nqueried]
qlbls = ensemble.labels[queried_baseline]
num_seen_baseline[0, 0:2] = [fid, runidx]
num_seen_baseline[0, 2:(num_seen_baseline.shape[1])] = np.cumsum(qlbls)
# the ensembles store samples in sorted order of default anomaly
# scores. The corresponding indexes are stored in ensemble.original_indexes
true_queried_indexes = np.zeros(shape=(1, nqueried + 2))
true_queried_indexes[0, 0:2] = [fid, runidx]
# Note: make the queried indexes relative 1 (NOT zero)
true_queried_indexes[0, 2:(true_queried_indexes.shape[1])] = ensemble.original_indexes[metrics.queried] + 1
true_queried_indexes_baseline = np.zeros(shape=(1, nqueried + 2))
true_queried_indexes_baseline[0, 0:2] = [fid, runidx]
# Note: make the queried indexes relative 1 (NOT zero)
true_queried_indexes_baseline[0, 2:(true_queried_indexes_baseline.shape[1])] = \
queried_baseline + 1
return num_seen, num_seen_baseline, true_queried_indexes, true_queried_indexes_baseline
def write_sparsemat_to_file(fname, X, fmt='%.18e', delimiter=','):
if isinstance(X, np.ndarray):
np.savetxt(fname, X, fmt='%3.2f', delimiter=",")
elif isinstance(X, csr_matrix):
f = open(fname, 'w')
for i in range(X.shape[0]):
a = X[i, :].toarray()[0]
f.write(delimiter.join([fmt % v for v in a]))
f.write(os.linesep)
if (i + 1) % 10 == 0:
f.flush()
f.close()
else:
raise ValueError("Invalid matrix type")
def save_aad_model(filepath, model):
import cPickle
f = gzip.open(filepath, 'wb')
cPickle.dump(model, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
def load_aad_model(filepath):
import cPickle
f = gzip.open(filepath, 'rb')
model = cPickle.load(f)
f.close()
return model
def save_aad_metrics(metrics, opts):
cansave = (opts.resultsdir != "" and os.path.isdir(opts.resultsdir))
if cansave:
save(metrics, filepath=opts.get_metrics_path())
def load_aad_metrics(opts):
metrics = None
fpath = opts.get_metrics_path()
canload = (opts.resultsdir != "" and os.path.isfile(fpath))
if canload:
# print "Loading metrics" + fpath
metrics = load(fpath)
else:
print ("Cannot load %s" % fpath)
return metrics
|
from src.video_player import VideoPlayer
from unittest import mock
@mock.patch('builtins.input', lambda *args: 'No')
def test_search_videos_with_no_answer(capfd):
player = VideoPlayer()
player.search_videos("cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Here are the results for cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert (
"If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
assert "Playing video" not in out
@mock.patch('builtins.input', lambda *args: '2')
def test_search_videos_and_play_answer(capfd):
player = VideoPlayer()
player.search_videos("cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 6
assert "Here are the results for cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
assert "Playing video: Another Cat Video" in lines[5]
@mock.patch('builtins.input', lambda *args: '6')
def test_search_videos_number_out_of_bounds(capfd):
player = VideoPlayer()
player.search_videos("cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Here are the results for cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
assert "Playing video" not in out
@mock.patch('builtins.input', lambda *args: 'ab3g')
def test_search_videos_invalid_number(capfd):
player = VideoPlayer()
player.search_videos("cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Here are the results for cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
assert "Playing video" not in out
def test_search_videos_no_results(capfd):
player = VideoPlayer()
player.search_videos("blah")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "No search results for blah" in lines[0]
@mock.patch('builtins.input', lambda *args: 'No')
def test_search_videos_with_tag_no_answer(capfd):
player = VideoPlayer()
player.search_videos_tag("#cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Here are the results for #cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
@mock.patch('builtins.input', lambda *args: '1')
def test_search_videos_with_tag_play_answered_number(capfd):
player = VideoPlayer()
player.search_videos_tag("#cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 6
assert "Here are the results for #cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
assert "Playing video: Amazing Cats" in lines[5]
@mock.patch('builtins.input', lambda *args: '5')
def test_search_videos_with_tag_number_out_of_bounds(capfd):
player = VideoPlayer()
player.search_videos_tag("#cat")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 5
assert "Here are the results for #cat:" in lines[0]
assert "1) Amazing Cats (amazing_cats_video_id) [#cat #animal]" in lines[1]
assert "2) Another Cat Video (another_cat_video_id) [#cat #animal]" in lines[2]
assert ("Would you like to play any of the above? If yes, "
"specify the number of the video.") in lines[3]
assert ("If your answer is not a valid number, we will assume "
"it's a no.") in lines[4]
assert "Playing video" not in out
def test_search_videos_tag_no_results(capfd):
player = VideoPlayer()
player.search_videos_tag("#blah")
out, err = capfd.readouterr()
lines = out.splitlines()
assert len(lines) == 1
assert "No search results for #blah" in lines[0]
|
import pandas as pd
from urllib.error import HTTPError
from .utils.transform import normalize
make_url = lambda version: f"https://s3.amazonaws.com/files.fred.stlouisfed.org/fred-md/monthly/{version}.csv"
def import_raw_fred(version: str = "current") -> pd.DataFrame:
"""
Takes a version name and imports the data. Defaults to the latest data.
Parameters
----------
version : str, optional
version in format YYYY-MM, by default "current"
Returns
-------
pd.DataFrame
The raw data
"""
url = make_url(version)
try:
data = pd.read_csv(url)
except HTTPError:
raise ValueError(f"Got a HTTP error, check your version: {version}!")
return data
def import_transformed_data(version: str = "current") -> pd.DataFrame:
"""
Takes a version name and imports the data and applies the transformation.
Defaults to the latest data.
Parameters
----------
version : str, optional
version in format YYYY-MM, by default "current"
Returns
-------
pd.DataFrame
The transformed data
"""
raw_df = import_raw_fred(version)
return normalize(raw_df) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Project: GDAL utils.auxiliary
# Purpose: OGR utility functions
# Author: Idan Miara <idan@miara.com>
#
# ******************************************************************************
# Copyright (c) 2021, Idan Miara <idan@miara.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import os
from typing import Sequence
from osgeo import ogr
from osgeo_utils.auxiliary.base import PathLikeOrStr
from osgeo_utils.auxiliary.osr_util import AnySRS, get_srs
from osgeo_utils.auxiliary.rectangle import GeoRectangle
def ogr_create_geometries_from_wkt(path: PathLikeOrStr, wkt_list: Sequence[str],
of='ESRI Shapefile', srs: AnySRS = 4326):
driver = ogr.GetDriverByName(of)
ds = driver.CreateDataSource(os.fspath(path))
srs = get_srs(srs)
layer = ds.CreateLayer('', srs, ogr.wkbUnknown)
for wkt in wkt_list:
feature = ogr.Feature(layer.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt(wkt)
feature.SetGeometry(geom) # Set the feature geometry
layer.CreateFeature(feature) # Create the feature in the layer
feature.Destroy() # Destroy the feature to free resources
# Destroy the data source to free resources
ds.Destroy()
def ogr_get_layer_extent(lyr: ogr.Layer) -> GeoRectangle:
r = GeoRectangle.from_min_max(*lyr.GetExtent())
return r
|
from JEDatabase.Core.SQLiteCore import SQLiteCore
SQL = SQLiteCore(db_name=r'StudentSystemData.sqlite', table_name='StudentSystem')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS Account('
'PersonnelNumber VARCHAR(20) PRIMARY KEY ,'
'Password VARCHAR(20))')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS PersonnelDetail('
'PersonnelNumber VARCHAR(20) PRIMARY KEY ,'
'PersonnelName VARCHAR(20),'
'EnrollYear VARCHAR(10))')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS LessonDetail('
'LessonCode VARCHAR (10) PRIMARY KEY ,'
'LessonName VARCHAR (20),'
'LessonCredit VARCHAR (5),'
'LessonProfessor VARCHAR (20),'
'LessonType VARCHAR (3))')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS SemesterLesson('
'LessonCode VARCHAR (10) PRIMARY KEY,'
'PersonnelNumber VARCHAR(20),'
'Semester VARCHAR(5))')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS LessonContent('
'LessonCode VARCHAR(10) PRIMARY KEY ,'
'LessonName VARCHAR(20),'
'LessonContent VARCHAR(3000),'
'Semester VARCHAR (5))')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS LessonGrade('
'LessonCode VARCHAR(20) PRIMARY KEY ,'
'PersonnelNumber VARCHAR(20),'
'Grade VARCHAR(5),'
'Semester VARCHAR(5),'
'LessonProfessor VARCHAR(20))')
SQL.create_table(
'CREATE TABLE IF NOT EXISTS PersonnelAccess('
'PersonnelNumber VARCHAR(20) PRIMARY KEY ,'
'Access VARCHAR(10))')
SQL.close()
|
import pyautogui
import time
#screenWidth, screenHeight = pyautogui.size()
#pyautogui.position()
time.sleep(5) #
for i in range(38):
# fill amount page
pyautogui.click(x=895, y=580, button='left')
pyautogui.typewrite('15\n')
pyautogui.click(x=723,y=656,button='left')
time.sleep(4)
# check out page
pyautogui.click(x=1185,y=301,button='left')
time.sleep(4)
#place order
pyautogui.click(x=1149,y=292,button='left')
#back to the oder
time.sleep(4)
pyautogui.click(x=1217,y=135,button='left')
#back to the item
time.sleep(4)
pyautogui.click(x=512,y=541,button='left')
time.sleep(4)
#pyautogui.typewrite('15')
|
# -*- coding: utf8 -*-
def is_unique(sentence):
""" 1.1 Is Unique: Implement an algorithm to determine if a string has
all unique characters. What if you cannot use additional data structures?
Complexity: O(n) time, O(n) space
"""
h = set([])
for c in sentence:
if c in h:
return False
h.add(c)
return True
def is_unique_no_ds(sentence):
""" Complexity: O(n) time, O(1) space """
x = 0
for c in sentence:
if x & (1 << ord(c)) != 0:
return False
x += 1 << ord(c)
return True
def check_permutation(str1, str2):
""" 1.2 Check Permutation: Given two strings, write a method to decide if
one is a permutation of the other.
Complexity: O(n) time, O(n) space
"""
h = {}
for c in str1:
if c not in h:
h[c] = 0
h[c] += 1
for c in str2:
if c not in h:
return False
h[c] -= 1
for (_, count) in h.items():
if count != 0:
return False
return True
def check_permutation_sort(str1, str2):
"""Complexity: O(nlogn) time, O(1) space"""
str1 = sorted(str1)
str2 = sorted(str2)
if len(str1) != len(str2):
return False
for i in range(len(str1)):
if str1[i] != str2[i]:
return False
return True
def urlify(sentence):
""" 1.3 URLify: Write a method to replace all spaces in a string with '%20'.
You may assume that the string has sufficient space at the end to hold the
additional characters, and that you are given the "true" length of the string.
EXAMPLE
Input: "Mr John Smith"
Output: "Mr%20John%20Smith"
Complexity: O(n) time, O(1) space
"""
sentence = list(sentence)
length = len(sentence)
count_spaces = 0
for c in sentence:
if c == ' ':
count_spaces += 1
sentence.extend([' '] * count_spaces * 2)
pos = length - 1 + count_spaces * 2
for i in range(length - 1, -1, -1):
if sentence[i] != ' ':
sentence[pos] = sentence[i]
pos -= 1
else:
sentence[pos] = '0'
sentence[pos-1] = '2'
sentence[pos-2] = '%'
pos -= 3
return "".join(sentence)
def palindrome_permutation(sentence):
""" 1.4 Palindrome Permutation: Given a string, write a function to check
if it is a permutation of a palindrome. A palindrome is a word or
phrase that is the same forwards and backwards. A permutation is a
rearrangement of letters. The palindrome does not need to be limited to
just dictionary words.
EXAMPLE
Input: Tact Coa
Output: True (permutations: "taco cat", "atco eta", etc.)
Complexity: O(n) in time, O(n) in space
"""
sentence = sentence.lower().replace(" ", "")
char_counts = {}
for c in sentence:
if c not in char_counts:
char_counts[c] = 0
char_counts[c] += 1
has_odd_count = False
for (c, cnt) in char_counts.items():
if cnt % 2 != 0:
if has_odd_count:
return False
else:
has_odd_count = True
return True
def palindrome_permutation_no_ds(sentence):
""" Complexity: O(nlogn) in time, O(1) in space """
sentence = sorted(list(sentence.lower().replace(" ", "")))
has_odd_count = False
char_count = 1
i = 1
while i < len(sentence):
if sentence[i] == sentence[i-1]:
char_count += 1
else:
if char_count % 2 != 0:
if has_odd_count:
return False
else:
has_odd_count = True
char_count = 1
i += 1
if char_count % 2 != 0 and has_odd_count == True:
return False
return True
def one_away(str1, str2):
""" 1.5 One Away: There are three types of edits that can be performed on strings:
insert a character, remove a character, or replace a character.
Given two strings, write a function to check if they are
one edit (or zero edits) away.
EXAMPLE
pales, pale -> true
pale, bale -> true
pale, bake -> false
pale, ple -> true
Complexity: O(n) time, O(1) space
"""
if len(str1) == len(str2):
# substitution
already_one_substitution = False
for i in range(len(str1)):
if str1[i] != str2[i]:
if not already_one_substitution:
already_one_substitution = True
else:
return False
return True
elif len(str1) == len(str2) - 1:
# addition
i, j = 0, 0
while i < len(str1) and j < len(str2):
if str1[i] == str2[j]:
i += 1
j += 1
else:
j += 1
if j > i + 1:
return False
return True
elif len(str1) == len(str2) + 1:
# deletion
i, j = 0, 0
while i < len(str1) and j < len(str2):
if str1[i] == str2[j]:
i += 1
j += 1
else:
i += 1
if i > j + 1:
return False
return True
else:
return False
def compression(sentence):
""" 1.6 String Compression: Implement a method to perform basic string
compression using the counts of repeated characters.
For example, the string aabcccccaaa would become a2blc5a3.
If the "compressed" string would not become smaller than the original string,
your method should return the original string.
You can assume the string has only uppercase and lowercase letters (a - z).
"""
if (len(sentence) in [0, 1, 2]):
return sentence
sentence = list(sentence)
output = []
i = 1
j = 1 # counts the length of continuous character so far
while i < len(sentence):
if sentence[i] == sentence[i-1]:
j += 1
else:
output.extend([sentence[i-1], str(j)])
j = 1
i += 1
output.extend([sentence[i-1], str(j)])
if len(output) >= len(sentence):
return ''.join(sentence)
return ''.join(output)
def rotate_matrix(mat):
""" 1.7 Rotate Matrix: Given an image represented by an NxN matrix,
where each pixel in the image is 4 bytes, write a method to rotate the
image by 90 degrees. Can you do this in place?
"""
n = len(mat)
for i in range(n/2):
for j in range(i, n-1-i):
#print
#print (i,j), '<-', (n-1-j,i)
#print (n-1-j,i), '<-', (n-1-i,n-1-j)
#print (n-1-i,n-1-j), '<-', (j,n-1-i)
#print (j,n-1-i), '<-', (i, j)
#print
tmp = mat[i][j]
mat[i][j] = mat[n-1-j][i]
mat[n-1-j][i] = mat[n-1-i][n-1-j]
mat[n-1-i][n-1-j] = mat[j][n-1-i]
mat[j][n-1-i] = tmp
return mat
def zero_matrix(mat):
""" 1.8 Zero Matrix: Write an algorithm such that if an element in an MxN
matrix is 0, its entire row and column are set to 0.
"""
n = len(mat)
m = len(mat[0])
rows = set([])
columns = set([])
for i in range(n):
for j in range(m):
if mat[i][j] == 0:
rows.add(i)
columns.add(j)
for i in range(n):
for j in range(m):
if i in rows or j in columns:
mat[i][j] = 0
return mat
def is_string_rotation(s1, s2):
""" 1.9 String Rotation: Assume you have a method isSubstringwhich checks
if one word is a substring of another. Given two strings, sl and s2, write
code to check if s2 is a rotation of s1 using only one call to isSubstring
(e.g., "waterbottle" is a rotation of"erbottlewat").
"""
return s1 in s2 + s2
|
"""
Main function for network training using GeoConv
"""
import torch
from torch import optim
from torch.utils.data import DataLoader
import os, argparse
import numpy as np
from model.GeoConvNet import GeoConvNet
from utils.process_data import collate_ball, collect_file, data_reader, PointData
from train import train
def parse_arguments():
parser = argparse.ArgumentParser(description='Training Script for Particle Latent Representation')
# training parameters
parser.add_argument('-b','--batch-size', type=int, default=128, help='input batch size for training')
parser.add_argument('--sample-size', type=int, default=2000, help='sample size per file ')
parser.add_argument('--epochs', type=int, default=10, help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--learning-rate', dest='lr', type=float, default=0.001, help='learning-rate')
parser.add_argument('--sample-type', type=str, default='even',choices=['even','random'], help='Method used to sample')
#model parameters (saved for inference)
parser.add_argument('-v', '--lat-dim', type=int, default=16, help='Letent vector length')
parser.add_argument('--ball', action='store_true', default=False, help='Train with ball or knn neighbor')
parser.add_argument('-d','--data-source', dest='source', type=str, default="fpm", help='Data source type', choices=['fpm','cos','jet3b'])
parser.add_argument('-k', dest='k', type=int, default=256, help='k in knn')
parser.add_argument('-r', dest='r', type=float, default=0.03, help='r in ball query')
parser.add_argument('--enc-out', type=int, default=256, help='Encoder output channel in geoGonv')
#control parameters
parser.add_argument('-l', '--load', dest='load', type=str, help='load file model')
parser.add_argument("--result-dir", dest="result_dir", type=str, default="states", help='the directory to save the result')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
load = args.load
#Continue training
if load:
state_dict = torch.load(load)
state = state_dict['state']
config = state_dict['config']
start_epoch = state_dict['end_epoch']
args_dict = vars(args)
args_dict.update(vars(config))
args = argparse.Namespace(**args_dict) #Update with new parameters
else:
start_epoch = 1
if not os.path.isdir(args.result_dir):
os.mkdir(args.result_dir)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Total dimensionality of the dataset
if args.source == "fpm":
input_dim = 7
elif args.source == "cos":
input_dim = 10
elif args.source == 'jet3b':
input_dim = 5
#Data directory
try:
data_path = os.environ['data']
except KeyError:
data_path = './data/'
torch.manual_seed(args.seed)
model = GeoConvNet(args.lat_dim, input_dim, args.ball, args.enc_out, args.r).float().to(device)
if load:
model.load_state_dict(state)
print('Model loaded from {}'.format(load))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
kwargs = {'pin_memory': True} if use_cuda else {}
print(args)
# prepare data
if args.source == "fpm":
file_list = collect_file(os.path.join(data_path,"2016_scivis_fpm/0.44/run41"),args.source,shuffle=True)
elif args.source == "cos":
file_list = collect_file(os.path.join(data_path,"ds14_scivis_0128/raw"),args.source,shuffle=True)
elif args.source == "jet3b":
file_list = collect_file(os.path.join(data_path,"jet3b"),args.source,shuffle=True)
for epoch in range(start_epoch, args.epochs + 1):
epoch_loss = 0
for i,f in enumerate(file_list):
print("===> File in process: ",f)
data_source = data_reader(f, args.source)
if args.sample_type == 'random':
choice = np.random.choice(len(data_source),args.sample_size)
pd = PointData(data_source, args.k, args.r, args.ball, choice)
elif args.sample_type == 'even':
pd = PointData(data_source, args.k, args.r, args.ball, args.sample_size)
loader = DataLoader(pd, batch_size=args.batch_size, shuffle=True, drop_last=True, collate_fn=collate_ball if args.ball else None, **kwargs,num_workers=0)
train_loss = train(model,loader,optimizer,args.ball,device)
epoch_loss += train_loss
save_dict = {
"state": model.state_dict(),
"config":args,
"end_epoch": epoch,
}
torch.save(save_dict,os.path.join(args.result_dir,'current_model.pth'))
print("===> File processed: {}/{}".format(i+1,len(file_list)))
epoch_loss /= len(file_list)
print('==> Epoch average loss: {:.6f}'.format(epoch_loss))
with open(os.path.join(args.result_dir,'epoch_loss_log.txt'),'a') as f:
f.write("%f\n" % epoch_loss)
save_dict = {
"state": model.state_dict(),
"config":args,
"end_epoch": epoch,
}
torch.save(save_dict,os.path.join(args.result_dir,'final_model.pth'))
print('Training complete. Final model saved!') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# utils.py --- utils for musicbox
# Copyright (c) 2015-2016 omi & Contributors
import platform
import os
def notify_command_osx(msg, msg_type, t=None):
command = '/usr/bin/osascript -e \'display notification "' + msg
if msg_type == 1:
command += '"sound name "/System/Library/Sounds/Ping.aiff'
command += '"\''
return command
def notify_command_linux(msg, t=None):
command = '/usr/bin/notify-send "' + msg + '"'
if t:
command += ' -t ' + str(t)
command += ' -h int:transient:1'
return command
def notify(msg, msg_type=0, t=None):
"Show system notification with duration t (ms)"
if platform.system() == 'Darwin':
command = notify_command_osx(msg, msg_type, t)
else:
command = notify_command_linux(msg, t)
os.system(command)
if __name__ == "__main__":
notify("test", t=1000)
|
# region [Imports]
# * Standard Library Imports ---------------------------------------------------------------------------->
import os
import sys
import json
import pickle
import shutil
import hashlib
import datetime
import configparser
from pprint import pformat
from contextlib import contextmanager
# * Gid Imports ----------------------------------------------------------------------------------------->
import gidlogger as glog
# endregion [Imports]
# region [Logging]
log = glog.aux_logger(__name__)
glog.import_notification(log, __name__)
# endregion [Logging]
# region [Constants]
# endregion [Constants]
def loadjson(in_file):
with open(in_file, 'r') as jsonfile:
_out = json.load(jsonfile)
return _out
def writejson(in_object, in_file, sort_keys=True, indent=4):
with open(in_file, 'w') as jsonoutfile:
json.dump(in_object, jsonoutfile, sort_keys=sort_keys, indent=indent)
def hash_to_solidcfg(in_file, in_name=None, in_config_loc='default'):
_cfg = configparser.ConfigParser()
_cfg_loc = pathmaker('cwd', 'config', 'solid_config.ini') if in_config_loc == 'default' else in_config_loc
_bin_file = readbin(in_file)
_name = splitoff(in_file)[1].replace('.', '') if in_name is None else in_name
_cfg.read(_cfg_loc)
_hash = hashlib.md5(_bin_file).hexdigest()
if _cfg.has_section('hashes') is False:
log.info("section ['hashes'] does not exist in solid_config.ini, creating it now!")
_cfg.add_section('hashes')
log.info("section ['hashes'] added to solid_config.ini")
_cfg.set('hashes', _name, _hash)
log.info(f"added hash [{_hash}] to section ['hashes'] in solid_config.ini")
with open(_cfg_loc, 'w') as configfile:
_cfg.write(configfile)
log.debug("saved new solid_config.ini at '{_cfg_loc}'")
_cfg.read(_cfg_loc)
if _cfg.get('hashes', _name) != _hash:
raise configparser.Error("recently saved hash does not match the file hash")
def ishash_same(in_file, in_name=None, in_config_loc='default'):
_cfg = configparser.ConfigParser()
_cfg_loc = pathmaker('cwd', 'config', 'solid_config.ini') if in_config_loc == 'default' else in_config_loc
_bin_file = readbin(in_file)
_name = splitoff(in_file)[1].replace('.', '') if in_name is None else in_name
_cfg.read(_cfg_loc)
_hash = hashlib.md5(_bin_file).hexdigest()
if _cfg.has_section('hashes') is True:
if _cfg.has_option('hashes', _name):
if _cfg.get('hashes', _name) != _hash:
_out = False
log.info("hashes are !NOT! the same")
elif _cfg.get('hashes', _name) == _hash:
_out = True
log.info("hashes are the same")
else:
_out = False
log.info('missing option')
else:
log.critical("section ['hashes'] is missing in solid_config.ini, it is absolutely needed")
raise configparser.Error("section ['hashes'] does not exist!!")
return _out
def absolute_listdir(in_dir, in_filter=None, in_filter_type=True):
for files in os.listdir(in_dir):
if in_filter is not None:
if in_filter_type is True:
if in_filter in files:
yield pathmaker(in_dir, files)
elif in_filter_type is False:
if in_filter not in files:
yield pathmaker(in_dir, files)
else:
yield pathmaker(in_dir, files)
def readbin(in_file):
"""
Reads a binary file.
Parameters
----------
in_file : str
A file path
Returns
-------
str
the decoded file as string
"""
with open(pathmaker(in_file), 'rb') as binaryfile:
return binaryfile.read()
def readit(in_file, per_lines=False, in_encoding='utf-8', in_errors=None):
"""
Reads a file.
Parameters
----------
in_file : str
A file path
per_lines : bool, optional
If True, returns a list of all lines, by default False
in_encoding : str, optional
Sets the encoding, by default 'utf-8'
in_errors : str, optional
How to handle encoding errors, either 'strict' or 'ignore', by default 'strict'
Returns
-------
str/list
the read in file as string or list (if per_lines is True)
"""
with open(in_file, 'r', encoding=in_encoding, errors=in_errors) as _rfile:
_content = _rfile.read()
if per_lines is True:
_content = _content.splitlines()
return _content
def linereadit(in_file, in_encoding='utf-8', in_errors='strict'):
with open(in_file, 'r', encoding=in_encoding, errors=in_errors) as lineread_file:
_out = lineread_file.read().splitlines()
return _out
def from_dict_to_file(in_out_file, in_dict_name, in_dict):
appendwriteit(in_out_file, '\n\n')
_dict_string = in_dict_name + ' = {' + pformat(in_dict) + '\n}'
_dict_string = _dict_string.replace('{{', '{\n').replace('}}', '}').replace('}\n}', '\n}')
appendwriteit(in_out_file, _dict_string)
def writebin(in_file, in_data):
"""
Writes a string to binary.
Parameters
----------
in_file : str
The target file path
in_data : str
The data to write
"""
with open(in_file, 'wb') as outbinfile:
outbinfile.write(in_data)
def writeit(in_file, in_data, append=False, in_encoding='utf-8', in_errors=None):
"""
Writes to a file.
Parameters
----------
in_file : str
The target file path
in_data : str
The data to write
append : bool, optional
If True appends the data to the file, by default False
in_encoding : str, optional
Sets the encoding, by default 'utf-8'
"""
_write_type = 'w' if append is False else 'a'
with open(in_file, _write_type, encoding=in_encoding, errors=in_errors,) as _wfile:
_wfile.write(in_data)
def appendwriteit(in_file, in_data, in_encoding='utf-8'):
with open(in_file, 'a', encoding=in_encoding) as appendwrite_file:
appendwrite_file.write(in_data)
def clearit(in_file):
"""
Deletes the contents of a file.
Parameters
----------
in_file : str
The target file path
"""
with open(in_file, 'w') as file_to_clear:
file_to_clear.write('')
log.debug(f"contents of file '{in_file}' was cleared")
def pathmaker(first_segment, *in_path_segments, rev=False):
"""
Normalizes input path or path fragments, replaces '\\\\' with '/' and combines fragments.
Parameters
----------
first_segment : str
first path segment, if it is 'cwd' gets replaced by 'os.getcwd()'
rev : bool, optional
If 'True' reverts path back to Windows default, by default None
Returns
-------
str
New path from segments and normalized.
"""
_path = first_segment
_path = os.path.join(_path, *in_path_segments)
if rev is True or sys.platform not in ['win32', 'linux']:
return os.path.normpath(_path)
return os.path.normpath(_path).replace(os.path.sep, '/')
@contextmanager
def work_in(in_dir):
"""
A context manager which changes the working directory to the given path,
and then changes it back to its previous value on exit.
Parameters
----------
in_dir : str
A file directory path
"""
prev_cwd = os.getcwd()
os.chdir(in_dir)
log.debug(f"starting to work in directory [{in_dir}]")
yield
log.debug(f"stopped to work in directory [{in_dir}] and returned to directory [{prev_cwd}]")
os.chdir(prev_cwd)
def path_part_remove(in_file):
"""
Removes last segment of path, to get parent path.
Parameters
----------
in_file : str
A file path
Returns
-------
str
A new file path, parent path of input.
"""
_file = pathmaker(in_file)
_path = _file.split('/')
_useless = _path.pop(-1)
_first = _path.pop(0) + '/'
_out = pathmaker(_first, *_path)
log.debug(f"path segment [{_useless}] was removed from path [{_file}] to get [{_out}]")
return _out
def dir_change(*args, in_adress_home=False, ):
"""
changes directory to script location or provided path.
Parameters
----------
in_adress_home : bool, optional
'in_home_adress' if True defaults everything to location of current file and *args are ignored, by default False
"""
if in_adress_home is True:
_path_to_home = os.path.abspath(os.path.dirname(__file__))
else:
_path_to_home = pathmaker(*args)
os.chdir(_path_to_home)
log.debug('We are now in ' + _path_to_home)
def get_absolute_path(in_path='here', include_file=False):
"""
Generates absolute path from relative path, optional gives it out as folder, by removing the file segment.
Parameters
----------
in_path : str, optional
A relative filepath, if 'here' gets replaced by current file, by default 'here'
include_file : bool, optional
if False doesn't include last segment of path, by default False
Returns
-------
str
An absolute file path
"""
_rel_path = __file__ if in_path == 'here' else in_path
_out = os.path.abspath(_rel_path)
if include_file is False:
_out = splitoff(_out)[0]
return _out
def file_name_time(var_sep='_', date_time_sep='-', box=('[', ']')):
"""
creates a name that is the date and time.
Parameters
----------
var_sep : str, optional
specifies the symbol used to seperate the file name and the datetime, by default '_'
date_time_sep : str, optional
specifies the symbol used to seperate the date and time, by default '-'
box : tuple, optional
symbols used to frame the datetime, by default ('[', ']')
Returns
-------
str
New file name
"""
whole_time = str(datetime.datetime.today()).split(' ')
today_date_temp = whole_time[0].split('-')
today_date = var_sep.join(today_date_temp)
today_time_temp = whole_time[1].split('.')[0].split(':')
today_time = '' + today_time_temp[0] + var_sep + today_time_temp[1]
if box is not None:
_output = box[0] + today_date + date_time_sep + today_time + box[1]
else:
_output = today_date + date_time_sep + today_time
log.debug(f"created file name [{_output}]")
return _output
def number_rename(in_file_name, in_round=1):
"""
Appends a number to a file name if it already exists, increases the number and checks again.
Parameters
----------
in_file_name : str
[description]
in_round : int, optional
specifies the number to start on, by default 0
Returns
-------
str
new file name
"""
_temp_path = in_file_name
_temp_path = _temp_path.split('.')
log.debug(f" Parts of rename: [0] = {_temp_path[0]}, [1] = {_temp_path[1]}")
_output = _temp_path[0] + str(in_round) + '.' + _temp_path[1]
log.debug(f"Setting name to {_output}")
_new_round = int(in_round) + 1
return _exist_handle(_output, _new_round, _temp_path[0] + '.' + _temp_path[1])
def cascade_rename(in_file_name, in_folder, in_max_files=3):
_temp_file_dict = {}
_name = ext_splitter(in_file_name)
_ext = ext_splitter(in_file_name, _out='ext')
file_index = 1
for files in os.listdir(in_folder):
files = files.casefold()
if _name in files:
if any(letter.isdigit() for letter in files):
_temp_file_dict[str(file_index)] = pathmaker(in_folder, files)
file_index = int(file_index) + 1
else:
_temp_file_dict[str(0)] = pathmaker(in_folder, files)
if file_index + 1 <= in_max_files:
if file_index == 1:
writeit(pathmaker(in_folder, _name + str(file_index) + '.' + _ext), ' ')
_temp_file_dict[str(file_index)] = pathmaker(in_folder, _name + str(file_index) + '.' + _ext)
else:
writeit(pathmaker(in_folder, _name + str(file_index + 1) + '.' + _ext), ' ')
_temp_file_dict[str(file_index + 1)] = pathmaker(in_folder, _name + str(file_index + 1) + '.' + _ext)
for i in range(len(_temp_file_dict) - 1):
if i != 0:
shutil.copy(_temp_file_dict[str(i)], _temp_file_dict[str(i - 1)])
else:
os.remove(_temp_file_dict[str(0)])
return pathmaker(in_folder, _temp_file_dict[str(0)])
def _exist_handle(in_path, in_round, original_path):
"""
internal use for the "number_rename" function.
"""
if os.path.exists(in_path) is True:
log.debug(f"{in_path} already exists")
_new_path = number_rename(original_path, in_round)
log.debug(f" variables for rename round {in_round} are: original_path = {original_path}, in_round = {in_round}")
else:
_new_path = in_path
log.debug(
f'{_new_path} does not exist, setting it to f"{in_path} does not exist, setting it to {_new_path}"'
)
return _new_path
def splitoff(in_file):
"""splitoff, wraps os.path.dirname and os.path.basename to return both as tuple.
Args:
in_file (str): the full file path
Returns:
tuple: where '[0]' is the dirname and '[1]' is the basename(filename)"""
_file = pathmaker(in_file)
return (os.path.dirname(_file), os.path.basename(_file))
def timenamemaker(in_full_path):
"""
Creates a filename, that has the time included.
Parameters
----------
in_full_path : str
full path of the file name that is to be modified
Returns
-------
str
the new file name
"""
_time = datetime.datetime.utcnow().strftime('_[%Y-%m-%dT%H-%M]')
log.debug(f"_time is [{_time}]")
_file = splitoff(in_full_path)[1]
_file_tup = os.path.splitext(_file)
_new_file_name = _file_tup[0] + _time + _file_tup[1]
_path = splitoff(in_full_path)[0]
_out = pathmaker(_path, _new_file_name)
log.debug(f"created file name [{_out}] from original name [{in_full_path}]")
return _out
def ext_splitter(in_file, _out='file'):
"""
Splits a file name by the extension and returns either the name or the extension.
Parameters
----------
in_file : str
a file name
_out : str, optional
the part to return either "file" or "ext", by default 'file'
Returns
-------
str
either the file name or the file extension
"""
if '.' in in_file:
_file = in_file.rsplit('.', maxsplit=1)[0]
_ext = in_file.rsplit('.', maxsplit=1)[1]
else:
_file = in_file
_ext = 'folder'
if _out == 'file':
_output = _file
elif _out == 'ext':
_output = _ext
elif _out == 'both':
_output = (_file, _ext)
return _output
def file_name_modifier(in_path, in_string, pos='prefix', new_ext=None, seperator=None):
"""
changes a file name by inserting a string.
Parameters
----------
in_path : str
the file path
in_string : str
the string inserted in the name
pos : str, optional
the position where to insert the string, either "prefix" or "postfix", by default 'prefix'
new_ext : str, optional
a new extension for th file name if not None, by default None
seperator : str, optional
the symbol that is used to seperate the old and new name, by default None
Returns
-------
str
the new file path
Raises
------
Exception
checks the input for forbidden characters for filenames on Windows.
"""
_forbiden_chars = ['<', '>', ':', '"', '/', '\\', '|', '?', '*']
if new_ext is not None and any(chars in new_ext for chars in _forbiden_chars):
raise Exception(f"You can't use the following symbols in file names {str(_forbiden_chars)}")
if seperator is not None and any(chars in seperator for chars in _forbiden_chars):
raise Exception(f"You can't use the following symbols in file names {str(_forbiden_chars)}")
if any(chars in in_string for chars in _forbiden_chars):
raise Exception(f"You can't use the following symbols in file names {str(_forbiden_chars)}")
_path, _file = splitoff(pathmaker(in_path))
if new_ext is not None:
_file = _file.rsplit('.', 1)[0] + new_ext if '.' in new_ext else _file.rsplit('.', 1)[0] + '.' + new_ext
_file, _ext = _file.rsplit('.', 1)
if seperator is None:
_outfile = in_string + _file + '.' + _ext if pos == 'prefix' else _file + in_string + '.' + _ext
else:
_outfile = in_string + seperator + _file + '.' + _ext if pos == 'prefix' else _file + seperator + in_string + '.' + _ext
_out = pathmaker(_path, _outfile)
log.debug(f"created file name [{_out}] from original name [{in_path}]")
return _out
def pickleit(obj, in_path):
"""
saves an object as pickle file.
Parameters
----------
obj : object
the object to save
in_name : str
the name to use for the pickled file
in_dir : str
the path to the directory to use
"""
with open(pathmaker(in_path), 'wb') as filetopickle:
log.debug(f"saved object [{str(obj)}] as pickle file [{in_path}]")
pickle.dump(obj, filetopickle, pickle.HIGHEST_PROTOCOL)
def get_pickled(in_path):
"""
loads a pickled file.
Parameters
----------
in_path : str
the file path to the pickle file
Returns
-------
object
the pickled object
"""
with open(pathmaker(in_path), 'rb') as pickletoretrieve:
log.debug(f"loaded pickle file [{in_path}]")
return pickle.load(pickletoretrieve)
def file_walker(in_path, in_with_folders=False):
"""
walks recursively through a file system and returns a list of file paths.
Parameters
----------
in_path : str
the path to the directory from where to start
Returns
-------
list
a list of all files found as file paths.
"""
_out_list = []
log.debug(f"start to walk and find all files in [{in_path}]")
for root, _, filelist in os.walk(in_path):
for files in filelist:
_out = os.path.join(root, files)
_out_list.append(_out)
if in_with_folders is True and root != in_path:
_out_list.append(root)
log.debug(f"finished walking [{in_path}]")
return _out_list
def _filter_by_fileage(file_path):
return os.stat(file_path).st_ctime
def limit_amount_files_absolute(in_basename, in_directory, in_amount_max):
existing_files = []
for file in os.scandir(in_directory):
if in_basename in file.name:
existing_files.append(pathmaker(file.path))
existing_files = sorted(existing_files, key=_filter_by_fileage)
while len(existing_files) > in_amount_max:
os.remove(existing_files.pop(0))
def limit_amount_of_files(in_basename, in_directory, in_amount_max):
"""
limits the amount of files in a folder that have a certain basename,
if needed deletes the oldest and renames every file to move up namewise.
(second oldest gets named to the oldest,...)
Parameters
----------
in_basename : str
the common string all file names that should be affected share.
in_directory : str
path of the directory to affect
in_amount_max : int
the max amount of files allowed
"""
log.debug(f"checking amount of files with name [{in_basename}] in [{in_directory}], if more than [{in_amount_max}]")
_existing_file_list = []
for files in os.listdir(pathmaker(in_directory)):
if in_basename in files:
_existing_file_list.append(pathmaker(in_directory, files))
if len(_existing_file_list) > in_amount_max:
log.debug(f"files are exceding max amount by [{len(_existing_file_list)-in_amount_max}]")
_existing_file_list.sort(key=os.path.getmtime)
for index, files in enumerate(_existing_file_list):
_rename_index = index - 1
if index == 0:
os.remove(files)
log.debug(f"removing oldest file [{files}]")
elif index > in_amount_max:
break
else:
os.rename(files, _existing_file_list[_rename_index])
log.debug(f"renaming file [{files}] to [{_existing_file_list[_rename_index]}]")
def create_file(in_path, overwrite=False):
if os.path.isfile(in_path) is True and overwrite is False:
return
with open(in_path, 'w') as f:
f.write('')
def create_folder(in_path):
if os.path.isdir(in_path) is False:
log.error(f"Folder '{in_path}' does **NOT** exist!")
os.makedirs(in_path)
log.info("Created Folder '{in_path}'")
else:
log.info(f"Folder '{in_path}' does exist!")
def bytes2human(n, annotate=False):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb', 'Yb')
prefix = {s: 1 << (i + 1) * 10 for i, s in enumerate(symbols)}
for s in reversed(symbols):
if n >= prefix[s]:
_out = float(n) / prefix[s]
if annotate is True:
_out = '%.1f %s' % (_out, s)
return _out
_out = n
if annotate is True:
_out = "%s b" % _out
return _out
# region [Main_Exec]
if __name__ == '__main__':
pass
# endregion [Main_Exec]
|
import numpy as np
from sklearn import datasets
from sklearn import preprocessing
from sklearn.model_selection import (
GridSearchCV,
RepeatedStratifiedKFold,
)
from sklearn.pipeline import make_pipeline
from .. import GLVQ
def test_shared_memory_glvq():
X, y = datasets.load_iris(return_X_y=True)
m = GLVQ(activation_type="identity").fit(X, y)
p = m.prototypes_
m.set_model_params(np.random.random(size=(3, 4)))
assert np.shares_memory(p, m.get_variables())
assert np.all(m.get_variables() == m.prototypes_.ravel())
model_params = m.to_model_params_view(m.get_variables())
assert np.all(m.prototypes_.shape == model_params.shape)
assert np.shares_memory(m.prototypes_, m.get_variables())
assert np.shares_memory(model_params, m.get_variables())
def test_glvq():
iris = datasets.load_iris()
estimator = GLVQ(random_state=31415)
pipeline = make_pipeline(preprocessing.StandardScaler(), estimator)
scipy_solvers_types = ["lbfgs", "bfgs"]
# Run each solver ones
solvers_types = [
"steepest-gradient-descent",
"waypoint-gradient-descent",
"adaptive-moment-estimation",
]
discriminant_types = ["relative-distance"]
# Every compatible distance
distance_types = ["squared-euclidean", "euclidean"]
# Every compatible activation
activation_types = ["identity", "sigmoid", "soft-plus", "swish"]
param_grid = [
{
"glvq__solver_type": scipy_solvers_types,
"glvq__solver_params": [{"jac": None}, {}],
"glvq__discriminant_type": discriminant_types,
"glvq__distance_type": distance_types,
"glvq__activation_type": activation_types,
},
{
"glvq__solver_type": solvers_types,
"glvq__discriminant_type": discriminant_types,
"glvq__distance_type": distance_types,
"glvq__activation_type": activation_types,
},
]
repeated_kfolds = RepeatedStratifiedKFold(n_splits=2, n_repeats=1)
search = GridSearchCV(
pipeline,
param_grid,
scoring=["accuracy", "roc_auc_ovo", "precision_macro", "recall_macro"],
cv=repeated_kfolds,
return_train_score=True,
refit="roc_auc_ovo",
)
search.fit(iris.data, iris.target)
assert np.all(search.cv_results_["mean_train_roc_auc_ovo"] > 0.75)
assert np.all(search.cv_results_["mean_test_roc_auc_ovo"] > 0.75)
print("\nBest parameter (CV roc_auc=%0.3f):" % search.best_score_)
print(search.best_params_)
|
# Copyright (c) 2016, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Graham Schelle, Giuseppe Natale, Yun Rock Qu"
__copyright__ = "Copyright 2016, Xilinx"
__email__ = "pynq_support@xilinx.com"
import time
from pynq import MMIO
from pynq.iop import request_iop
from pynq.iop import iop_const
from pynq.iop import PMODA
from pynq.iop import PMODB
PMOD_DAC_PROGRAM = "pmod_dac.bin"
class Pmod_DAC(object):
"""This class controls a Digital to Analog Converter Pmod.
The Pmod DA4 (PB 200-245) is an 8 channel 12-bit digital-to-analog
converter run via AD5628.
Attributes
----------
iop : _IOP
I/O processor instance used by the DAC
mmio : MMIO
Memory-mapped I/O instance to read and write instructions and data.
"""
def __init__(self, if_id, value=None):
"""Return a new instance of a DAC object.
Note
----
The floating point number to be written should be in the range
of [0.00, 2.00].
Parameters
----------
if_id : int
The interface ID (1, 2) corresponding to (PMODA, PMODB).
value: float
The value to be written to the DAC Pmod.
"""
if not if_id in [PMODA, PMODB]:
raise ValueError("No such IOP for Pmod device.")
self.iop = request_iop(if_id, PMOD_DAC_PROGRAM)
self.mmio = self.iop.mmio
self.iop.start()
if value:
self.write(value)
def write(self, value):
"""Write a floating point number onto the DAC Pmod.
Note
----
User is not allowed to use a number outside of the range [0.00, 2.00]
as the input value.
Parameters
----------
value : float
The value to be written to the DAC Pmod
Returns
-------
None
"""
if not 0.00 <= value <= 2.00:
raise ValueError("Requested value not in range [0.00, 2.00].")
# Calculate the voltage value and write to DAC
int_val = int(value / 0.000610351)
self.mmio.write(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET,
(int_val << 20) | 0x3)
# Wait for I/O Processor to complete
while (self.mmio.read(iop_const.MAILBOX_OFFSET +
iop_const.MAILBOX_PY2IOP_CMD_OFFSET)
& 0x1) == 0x1:
time.sleep(0.001)
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.orm.calculation import *
from aiida.orm.data import *
from aiida.orm.utils import *
from aiida.orm.code import Code
from aiida.orm.computer import Computer, delete_computer
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.workflow import Workflow
from .authinfo import *
from .user import *
from aiida.orm.group import Group
__all__ = (['JobCalculation', 'WorkCalculation', 'Code', 'Computer',
'CalculationFactory', 'DataFactory', 'WorkflowFactory',
'QueryBuilder', 'Workflow', 'Group', 'delete_computer'] +
calculation.__all__ + utils.__all__ + user.__all__ + authinfo.__all__)
|
import CppHeaderParser
from CppHeaderParser import CppVariable, CppClass, CppMethod, CppEnum
from typing import List, Dict
def gen_variables(variables: List[CppVariable]):
out = []
for var in variables:
string = 'm.attr("{name}") = {name};'
name = var["name"]
if "using" in var["type"]:
continue
out.append(string.format(name=name))
return out
def pybind_overload(method):
overload_call = "py::overload_cast<{types}>("
overload_call = overload_call.format(
types=gen_args_types(method["parameters"]),
)
overload_close = ")"
if method["const"]:
overload_close = ", py::const_)"
return overload_call, overload_close
def gen_method(class_name, method: CppMethod, needs_overload=False):
string = 'cls_{class_name}.def("{name}", {overload_call}&{class_name}::{name}{overload_close}{args});'
args = ""
if method["parameters"]:
args = ", " + gen_args_names(method["parameters"])
overload_call, overload_close = "", ""
if needs_overload:
overload_call, overload_close = pybind_overload(method)
formatted = string.format(
class_name=class_name,
name=method["name"],
args=args,
overload_call=overload_call,
overload_close=overload_close,
)
return formatted
def gen_args_types(params: List):
args_types = []
for p in params:
const = "" if not p["constant"] else "const "
ref = "" if not p["reference"] else " &"
type_ = p["type"] if p.get("enum") else p["raw_type"]
ptr = "" if not p["pointer"] else " *"
args_types.append(const + type_ + ref + ptr)
return ", ".join(args_types)
def gen_args_names(params: List):
string = '"{name}"_a{default}'
args_names = []
for p in params:
default = ""
if p.get("defaultValue"):
default = "=" + p["defaultValue"].replace(" ", "")
args_names.append(string.format(name=p["name"], default=default))
return ", ".join(args_names)
def gen_constructor(class_name, method: CppMethod):
string = 'cls_{class_name}.def(py::init<{args_types}>(){args_names});'
args_names = gen_args_names(method["parameters"])
formatted = string.format(
class_name=class_name,
args_types=gen_args_types(method["parameters"]),
args_names=", " + args_names if args_names else "",
)
return formatted
def gen_classes(classes: Dict[str, CppClass]):
out = []
for name, class_ in classes.items():
string = 'py::class_<{name}> cls_{name}(m, "{name}");'
out.append(string.format(name=name))
method_names = [m["name"] for m in class_["methods"]["public"]]
for method in class_["methods"]["public"]:
if "operator" in method["name"]:
continue
if method["constructor"]:
if name in ["Node", "E57Exception"]:
continue
out.append(gen_constructor(name, method))
elif method["destructor"]:
continue
elif method["name"] in ("dump", "report"):
continue
else:
needs_overload = method_names.count(method["name"]) >= 2
out.append(gen_method(name, method, needs_overload=needs_overload))
out.append("")
out.append("")
return out
def gen_enums(enums: List[CppEnum]):
out = []
for e in enums:
enum_lines = ['py::enum_<{name}>(m, "{name}")']
for value in e["values"]:
enum_lines.append(' .value("%s", {name}::%s)' % (value["name"], value["name"]))
enum_lines.append(" .export_values();")
for line in enum_lines:
out.append(line.format(name=e["name"]))
return out
def generate_lines(lines, indent=""):
line_break = "\n" + indent
return indent + line_break.join(lines)
def main(path):
base_indent = " "
header = CppHeaderParser.CppHeader(path)
variables = gen_variables(header.variables)
enums = gen_enums(header.enums)
classes = gen_classes(header.classes)
print(generate_lines(variables + enums + classes, base_indent))
if __name__ == '__main__':
path = "../libE57Format/include/E57Foundation.h"
class_order = ["Node",
"StructureNode",
"VectorNode",
"SourceDestBuffer",
"CompressedVectorNode",
"CompressedVectorReader",
"CompressedVectorWriter",
"IntegerNode",
"ScaledIntegerNode",
"FloatNode",
"StringNode",
"BlobNode",
"ImageFile",
"E57Exception",
"E57Utilities",
]
main(path)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains various utility functions for Dynet models."""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
def compute_loss(gold_seq,
scores,
index_to_token_maps,
gold_tok_to_id,
noise=0.00000001):
""" Computes the loss of a gold sequence given scores.
Args:
gold_seq (`list`): A sequence of gold tokens.
scores (`list`): Expressions representing the scores of
potential output tokens for each token in gold_seq.
index_to_token_maps (`list`): Maps from index in the
sequence to a dictionary mapping from a string to a set of integers.
gold_tok_to_id (`func`): Maps from the gold token
and some lookup function to the indices in the probability distribution
where the gold token occurs.
noise (`float`, optional): The amount of noise to add to the loss.
Returns:
`Tensor`: representing the sum of losses over the sequence.
"""
assert len(gold_seq) == len(scores) == len(index_to_token_maps)
losses = []
predicted_sql = []
for i, gold_tok in enumerate(gold_seq):
score = scores[i]
token_map = index_to_token_maps[i]
gold_indices = gold_tok_to_id(gold_tok, token_map)
assert len(gold_indices) > 0
noise_i = noise
'''
if len(gold_indices) == 1:
noise_i = 0
'''
probdist = score
prob_of_tok = paddle.sum(
paddle.index_select(probdist, paddle.to_tensor(gold_indices)))
if prob_of_tok < noise_i:
prob_of_tok = prob_of_tok + noise_i
elif prob_of_tok > 1 - noise_i:
prob_of_tok = prob_of_tok - noise_i
losses.append(-paddle.log(prob_of_tok))
return paddle.sum(paddle.stack(losses))
def get_seq_from_scores(scores, index_to_token_maps):
"""Gets the argmax sequence from a set of scores.
Args:
scores (`list`): Sequences of output scores.
index_to_token_maps (`list`): For each output token, maps
the index in the probability distribution to a string.
Returns:
`list`: Representing the argmax sequence.
"""
seq = []
for score, tok_map in zip(scores, index_to_token_maps):
# score_numpy_list = score.cpu().detach().numpy()
score_numpy_list = score.cpu().numpy()
assert score.shape[0] == len(tok_map) == len(list(score_numpy_list))
seq.append(tok_map[np.argmax(score_numpy_list)])
return seq
def per_token_accuracy(gold_seq, pred_seq):
""" Returns the per-token accuracy comparing two strings (recall).
Args:
gold_seq (`list`): A list of gold tokens.
pred_seq (`list`): A list of predicted tokens.
Returns:
`float`: Representing the accuracy.
"""
num_correct = 0
for i, gold_token in enumerate(gold_seq):
if i < len(pred_seq) and pred_seq[i] == gold_token:
num_correct += 1
return float(num_correct) / len(gold_seq)
def forward_one_multilayer(rnns, lstm_input, layer_states, dropout_amount=0.):
""" Goes forward for one multilayer RNN cell step.
Args:
lstm_input (`Tensor`): Some input to the step.
layer_states (`list`): The states of each layer in the cell.
dropout_amount (`float`, optional): The amount of dropout to apply, in
between the layers.
Returns:
(`list` , `list`), `Tensor`, (`list`): Representing (each layer's cell memory,
each layer's cell hidden state), the final hidden state, and (each layer's updated RNNState).
"""
num_layers = len(layer_states)
new_states = []
cell_states = []
hidden_states = []
state = lstm_input
for i in range(num_layers):
layer_h, new_state = rnns[i](paddle.unsqueeze(state, 0),
layer_states[i])
new_states.append(new_state)
layer_h = layer_h.squeeze()
layer_c = new_state[1].squeeze()
state = layer_h
if i < num_layers - 1:
# p stands for probability of an element to be zeroed. i.e. p=1 means switch off all activations.
state = F.dropout(state, p=dropout_amount)
cell_states.append(layer_c)
hidden_states.append(layer_h)
return (cell_states, hidden_states), state, new_states
def encode_sequence(sequence, rnns, embedder, dropout_amount=0.):
""" Encodes a sequence given RNN cells and an embedding function.
Args:
seq (`list`): The sequence to encode.
rnns (`list`): The RNNs to use.
emb_fn (`func`): Function that embeds strings to
word vectors.
size (`int`): The size of the RNN.
dropout_amount (`float`, optional): The amount of dropout to apply.
Returns:
(`list`, `list`), `list`: The first pair is the (final cell memories, final cell states)
of all layers, and the second list is a list of the final layer's cell
state for all tokens in the sequence.
"""
batch_size = 1
layer_states = []
for rnn in rnns:
hidden_size = rnn.weight_hh.shape[1]
h_0 = paddle.zeros([batch_size, hidden_size])
c_0 = paddle.zeros([batch_size, hidden_size])
layer_states.append((h_0, c_0))
outputs = []
for token in sequence:
rnn_input = embedder(token)
(cell_states,
hidden_states), output, layer_states = forward_one_multilayer(
rnns, rnn_input, layer_states, dropout_amount)
outputs.append(output)
return (cell_states, hidden_states), outputs
def mask_fill(input, mask, value):
return input * paddle.cast(paddle.logical_not(mask),
input.dtype) + paddle.cast(mask,
input.dtype) * value
def LSTM_output_transfer(utterance_states, final_utterance_state):
if len(utterance_states) != 0:
utterance_states = utterance_states.squeeze(0)
utterance_states = paddle.split(utterance_states,
utterance_states.shape[0])
for idx in range(len(utterance_states)):
utterance_states[idx] = utterance_states[idx].squeeze(0)
if len(final_utterance_state) != 0:
(hidden_state, cell_memory) = final_utterance_state
hidden_states = paddle.concat(
[hidden_state[0], hidden_state[1]], axis=-1).squeeze(0)
cell_memories = paddle.concat(
[cell_memory[0], cell_memory[1]], axis=-1).squeeze(0)
final_utterance_state = (hidden_states.squeeze(0),
cell_memories.squeeze(0))
return utterance_states, final_utterance_state
|
# Generated by Django 2.1.9 on 2019-06-24 22:49
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('rmanager', '0001_initial'),
('bhs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CleanFlat',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('points', models.IntegerField()),
],
),
migrations.CreateModel(
name='CleanPanelist',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('season', models.IntegerField(choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')])),
('district', models.CharField(max_length=255)),
('convention', models.CharField(max_length=255)),
('session', models.IntegerField(choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')])),
('round', models.IntegerField(choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')])),
('category', models.IntegerField(choices=[(30, 'Music'), (40, 'Performance'), (50, 'Singing')])),
('num', models.IntegerField()),
('legacy_person', models.CharField(max_length=255)),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
('panelist', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Panelist')),
],
),
migrations.CreateModel(
name='CleanSong',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('season', models.IntegerField(choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')])),
('district', models.CharField(max_length=255)),
('convention', models.CharField(max_length=255)),
('session', models.IntegerField(choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')])),
('round', models.IntegerField(choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')])),
('appearance_num', models.IntegerField()),
('song_num', models.IntegerField()),
('legacy_group', models.CharField(max_length=255)),
('legacy_chart', models.CharField(max_length=255)),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
('appearance', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Appearance')),
('song', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Song')),
],
),
migrations.CreateModel(
name='Complete',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('row_id', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
('season_kind', models.IntegerField(blank=True, choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')], null=True)),
('district_code', models.CharField(blank=True, max_length=255)),
('convention_name', models.CharField(blank=True, max_length=255)),
('session_kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')], null=True)),
('round_kind', models.IntegerField(blank=True, choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')], null=True)),
('category_kind', models.IntegerField(blank=True, choices=[(30, 'Music'), (40, 'Performance'), (50, 'Singing')], null=True)),
('points', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), blank=True, null=True, size=None)),
('panelist', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Panelist')),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='completes', to='bhs.Person')),
],
),
migrations.CreateModel(
name='Flat',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('complete', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='flats', to='keller.Complete')),
('score', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Score')),
],
),
migrations.CreateModel(
name='RawPanelist',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('year', models.IntegerField()),
('season', models.CharField(max_length=255)),
('district', models.CharField(max_length=255)),
('convention', models.CharField(max_length=255)),
('session', models.CharField(max_length=255)),
('round', models.CharField(max_length=255)),
('category', models.CharField(max_length=255)),
('num', models.IntegerField(blank=True, null=True)),
('judge', models.CharField(max_length=255)),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='RawSong',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('season', models.CharField(max_length=255)),
('year', models.IntegerField()),
('district', models.CharField(max_length=255)),
('event', models.CharField(max_length=255)),
('session', models.CharField(max_length=255)),
('group_name', models.CharField(max_length=255)),
('appearance_num', models.IntegerField()),
('song_num', models.IntegerField()),
('song_title', models.CharField(max_length=255)),
('totals', models.IntegerField()),
('scores', django.contrib.postgres.fields.jsonb.JSONField()),
],
),
migrations.CreateModel(
name='Selection',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('mark', models.BooleanField(default=False)),
('row_id', models.IntegerField(blank=True, null=True)),
('year', models.IntegerField(blank=True, null=True)),
('season_kind', models.IntegerField(blank=True, choices=[(1, 'Summer'), (2, 'Midwinter'), (3, 'Fall'), (4, 'Spring')], null=True)),
('district_code', models.CharField(blank=True, max_length=255)),
('convention_name', models.CharField(blank=True, max_length=255)),
('session_kind', models.IntegerField(blank=True, choices=[(32, 'Chorus'), (41, 'Quartet'), (42, 'Mixed'), (43, 'Senior'), (44, 'Youth'), (45, 'Unknown'), (46, 'VLQ')], null=True)),
('round_kind', models.IntegerField(blank=True, choices=[(1, 'Finals'), (2, 'Semi-Finals'), (3, 'Quarter-Finals')], null=True)),
('group_name', models.CharField(blank=True, max_length=255)),
('appearance_num', models.IntegerField(blank=True, null=True)),
('song_num', models.IntegerField(blank=True, null=True)),
('song_title', models.CharField(blank=True, max_length=255)),
('totals', models.IntegerField(blank=True, null=True)),
('points', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), blank=True, null=True, size=None)),
('song', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Song')),
],
),
migrations.AddField(
model_name='flat',
name='selection',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='flats', to='keller.Selection'),
),
migrations.AddField(
model_name='cleanflat',
name='cleanpanelist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cleanflats', to='keller.CleanPanelist'),
),
migrations.AddField(
model_name='cleanflat',
name='cleansong',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cleanflats', to='keller.CleanSong'),
),
migrations.AddField(
model_name='cleanflat',
name='score',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='rmanager.Score'),
),
migrations.AlterUniqueTogether(
name='flat',
unique_together={('complete', 'selection', 'score')},
),
migrations.AlterUniqueTogether(
name='cleanflat',
unique_together={('cleanpanelist', 'cleansong')},
),
]
|
# coding: utf-8
from __future__ import print_function, division
from gurobipy import Model, GRB, quicksum, LinExpr
import networkx as nx
class Basic_Model(object):
def create_graph(self):
G = nx.Graph()
G.add_nodes_from(range(self.n_vertices))
for v1, v2 in self.edges:
G.add_edge(v1, v2)
self.graph = G
self.node_sets = set()
self.node_set_vars = dict()
def connectivity_vars(self, cluster, v1, v2):
assert((v1, v2) not in self.edges)
connectivity_vars = []
for path in nx.all_simple_paths(self.graph, v1, v2):
node_set = tuple(sorted(path[1:-1]))
n = len(node_set)
if n == 1:
node = node_set[0]
cvar = self.mvars[cluster][node]
else:
# check if the node set is new
if not node_set in self.node_sets:
n = len(node_set)
for i in range(self.k):
var = self.model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY)
ns_vars = [self.mvars[i][j] for j in node_set]
self.node_set_vars[(node_set, i)] = var
self.model.addConstr(quicksum(ns_vars) - n*var <= n-1)
self.model.addConstr(quicksum(ns_vars) - n*var >= 0)
self.node_sets.add(node_set)
cvar = self.node_set_vars[(node_set, cluster)]
connectivity_vars.append(cvar)
return connectivity_vars
def __init__(self, n_vertices, edges, constraints, k, gamma,
verbosity=0,
symmetry_breaking=True,
overlap=False,
timeout=None):
self.check_graph(n_vertices, edges)
self.n_vertices = n_vertices
self.edges = edges
self.k = k
self.verbosity = verbosity
self.timeout = timeout
self.create_graph()
self.model = Model('graph_clustering')
self.model.params.updatemode = 1
self.mvars = []
for i in range(k):
cvars = []
for j in range(n_vertices):
v = self.model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY)
cvars.append(v)
self.mvars.append(cvars)
ineq_sense = GRB.GREATER_EQUAL if overlap else GRB.EQUAL
# constraint: each vertex in exactly/at least one cluster
for v in range(n_vertices):
self.model.addConstr(quicksum([self.mvars[i][v] for i in range(k)]),
ineq_sense, 1)
# connectivity constraints:
for v1 in range(n_vertices):
for v2 in range(v1+1, n_vertices):
if (v1, v2) in self.edges: continue
for i in range(k):
cvars = self.connectivity_vars(i, v1, v2)
self.model.addConstr(self.mvars[i][v1] + self.mvars[i][v2],
GRB.LESS_EQUAL,
quicksum(cvars) + 1)
# symmetry-breaking constraints
if symmetry_breaking:
self.model.addConstr(self.mvars[0][0], GRB.EQUAL, 1)
for i in range(2, k):
self.model.addConstr(quicksum([self.mvars[i-1][j] for j in range(n_vertices)]),
GRB.LESS_EQUAL,
quicksum([self.mvars[i][j] for j in range(n_vertices)]))
obj_expr = LinExpr()
wsum = sum(w for (_, _, w) in constraints)
gamma = gamma/wsum
# indicators for violation of cl constraints
for (u, v, w) in constraints:
for i in range(k):
y = self.model.addVar(lb=0.0, ub=1.0, vtype=GRB.BINARY)
self.model.addConstr(y >= self.mvars[i][u] + self.mvars[i][v] - 1)
obj_expr.add(y, -w * gamma)
# size of smallest cluster
s = self.model.addVar(lb=0.0, ub=n_vertices, vtype=GRB.INTEGER)
for i in range(k):
self.model.addConstr(s <= quicksum([self.mvars[i][v] for v in range(n_vertices)]))
s_coef = 1/n_vertices if overlap else k/n_vertices
obj_expr.add(s_coef * s)
self.model.setObjective(obj_expr, GRB.MAXIMIZE)
self.model.update()
self.model.params.OutputFlag = self.verbosity
def check_graph(self, n_vertices, edges):
vertices = set([i for (i, _) in edges])
vertices |= set([i for (_, i) in edges])
assert(vertices == set(range(n_vertices)))
for u, v in edges:
assert(u < v)
assert(u < n_vertices)
def solve(self):
if self.timeout:
self.model.Params.TimeLimit = self.timeout
try:
self.model.optimize()
except GurobiError:
print(GurobiError.message)
self.objective = None
self.clusters = None
self.optimal = (self.model.Status == GRB.OPTIMAL)
self.runtime = self.model.Runtime
self.node_count = self.model.nodecount
self.mip_gap = self.model.mipgap
self.objective = self.model.ObjVal
if self.model.solcount > 0:
clusters = []
for i in range(self.k):
cluster = []
for j in range(self.n_vertices):
if abs(self.mvars[i][j].x) > 1e-4:
cluster.append(j)
clusters.append(cluster)
self.clusters = clusters
def print_stat(self):
pass
|
import time
import random
start_time = time.time()
random.seed(0)
ROUTES_TO = [random.randint(0, 10000) for n in range(10000)]
ROUTES_BACK = [random.randint(0, 10000) for n in range(10000)]
MAX = 10000
count = 0
current_max = 0
for i in range(len(ROUTES_TO)):
for j in range(len(ROUTES_BACK)):
total = ROUTES_TO[i] + ROUTES_BACK[j]
if total > MAX:
continue
if total > current_max:
current_max = total
count = 1
elif total == current_max:
count += 1
end_time = time.time()
print (f'upper limit set to = {MAX}')
print (f'current maximum possible = {current_max}, number of combinations = {count}')
print( f'time elapsed = {round(end_time - start_time, 2)}')
|
"""
MIT License
Copyright (c) 2020 xPolar
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Packages.
## Packages default to Python.
from typing import Union
## Packages that have to be installed through the package manager.
import discord
from colorama import Fore, Style
from discord.ext import commands
## Packages on this machine.
import Config
from Utilities import embed_color
class Block(commands.Cog):
def __init__(self, bot):
self.bot = bot
async def cog_check(self, ctx):
return ctx.author.id in Config.OWNERIDS
@commands.command(aliases = ["blacklist"])
async def block(self, ctx, user : Union[discord.Member, discord.User, int] = None, reason = None):
"""Block a user from creating suggestions."""
if user == None:
embed = discord.Embed(
title = "Empty Argument",
description = "Please provide a user to block!",
color = Config.ERRORCOLOR
)
else:
if isinstance(user, int):
try: # Use self.bot.fetch_user to turn an int into discord.User, if discord.NotFound is raised ask the user to provide a valid user.
user = await self.bot.fetch_user(user)
except discord.NotFound:
embed = discord.Embed(
title = "Invalid Argument",
description = "Please provide a valid user!",
color = Config.ERRORCOLOR
)
return await ctx.send(embed = embed)
embed = discord.Embed(
title = "Blocked",
description = f"You have blocked `{user}` from making suggestions{ f' for: {reason}' if reason != None else '!' }",
color = embed_color(ctx.author) if ctx.guild else Config.MAINCOLOR
)
Config.CLUSTER["users"]["blocked"].update_one({"_id": user.id}, {"$set": {"reason": "No reason specified." if reason == None else reason}}, upsert = True)
print(f"{Style.BRIGHT}{Fore.RED}[USER-BLOCKED]{Fore.WHITE} {Fore.YELLOW}{user.id}{Fore.WHITE} has been blocked by {Fore.YELLOW}{ctx.author.name}{Fore.WHITE}{ f'for: {reason}' if reason != None else '!' }{Fore.RESET}")
await ctx.send(embed = embed)
@block.error
async def block_error(self, ctx, error):
"""Block command error handler.
Args:
ctx (discord.Context): discord.py's context object.
error (Exception): The exception that was raised.
"""
if isinstance(error, commands.BadUnionArgument):
embed = discord.Embed(
title = "Invalid Argument",
description = "Please provide a valid user to block!",
color = Config.ERRORCOLOR
)
await ctx.send(embed = embed)
@commands.command(aliases = ["whitelist"])
async def unblock(self, ctx, user : Union[discord.Member, discord.User, int] = None):
"""Unblock a user from creating suggestions."""
if user == None:
embed = discord.Embed(
title = "Empty Argument",
description = "Please provide a user to block!",
color = Config.ERRORCOLOR
)
else:
if isinstance(user, int):
try: # Use self.bot.fetch_user to turn an int into discord.User, if discord.NotFound is raised ask the user to provide a valid user.
user = await self.bot.fetch_user(user)
except discord.NotFound:
embed = discord.Embed(
title = "Invalid Argument",
description = "Please provide a valid user!",
color = Config.ERRORCOLOR
)
return await ctx.send(embed = embed)
embed = discord.Embed(
title = "Unblocked",
description = f"You have unblocked `{user}` from making suggestions!",
color = embed_color(ctx.author) if ctx.guild else Config.MAINCOLOR
)
Config.CLUSTER["users"]["blocked"].delete_one({"_id": user.id})
print(f"{Style.BRIGHT}{Fore.CYAN}[USER-UNBLOCKED]{Fore.WHITE} {Fore.YELLOW}{user.id}{Fore.WHITE} has been unblocked by {Fore.YELLOW}{ctx.author.name}{Fore.WHITE}!{Fore.RESET}")
await ctx.send(embed = embed)
@unblock.error
async def unblock_error(self, ctx, error):
"""Unblock command error handler.
Args:
ctx (discord.Context): discord.py's context object.
error (Exception): The exception that was raised.
"""
if isinstance(error, commands.BadUnionArgument):
embed = discord.Embed(
title = "Invalid Argument",
description = "Please provide a valid user to unblock!",
color = Config.ERRORCOLOR
)
await ctx.send(embed = embed)
def setup(bot):
bot.add_cog(Block(bot))
|
#!/usr/bin/env python
from math import atan2, sin, cos
from .controller import Controller
class PIDController(Controller):
def __init__(self, trajectory, simulation_data, control_constants, speed_limits):
Controller.__init__(self, trajectory, simulation_data)
self.K_P_V = control_constants['kpv']
self.K_I_V = control_constants['kiv']
self.K_D_V = control_constants['kdv']
self.K_P_W = control_constants['kpw']
self.K_I_W = control_constants['kiw']
self.K_D_W = control_constants['kdw']
self.v_c_nm1 = 0
self.w_c_nm1 = 0
self.e_v_nm1 = 0
self.e_w_nm1 = 0
self.e_v_nm2 = 0
self.e_w_nm2 = 0
self.MAX_V = speed_limits['linear']
self.MAX_W = speed_limits['angular']
def set_next_reference(self):
reference = self.trajectory.get_position_at((self.i + 1) * self.delta)
self.x_ref_n_plus_1 = reference.x
self.y_ref_n_plus_1 = reference.y
def compute_control_actions(self, pose, twist, i):
self.i = i
self.set_current_orientation(pose.orientation)
self.set_current_position(pose.position)
self.set_current_reference(self.trajectory.get_position_at(i * self.delta))
self.set_next_reference()
v, w = self.get_velocities(twist)
v_ref_n = self.compute_linear_speed_reference()
v_ref_n = self.limit_linear_speed_reference(v_ref_n)
w_ref_n = self.compute_angular_speed_reference()
w_ref_n = self.limit_angular_speed_reference(w_ref_n)
self.e_v_n, self.e_w_n = self.compute_errors(v, v_ref_n, w, w_ref_n)
a_v, b_v, c_v = self.compute_v_pid_factors()
a_w, b_w, c_w = self.compute_w_pid_factors()
self.v_c_n = self.v_c_nm1 + a_v * self.e_v_n + b_v * self.e_v_nm1 + c_v * self.e_v_nm2
self.w_c_n = self.w_c_nm1 + a_w * self.e_w_n + b_w * self.e_w_nm1 + c_w * self.e_w_nm2
self.limit_linear_speed_control_action()
self.limit_angular_speed_control_action()
self.store_values_for_next_iteration()
def store_values_for_next_iteration(self):
self.v_c_nm1 = self.v_c_n
self.w_c_nm1 = self.w_c_n
self.e_v_nm2 = self.e_v_nm1
self.e_w_nm2 = self.e_w_nm1
self.e_v_nm1 = self.e_v_n
self.e_w_nm1 = self.e_w_n
def limit_angular_speed_control_action(self):
if self.w_c_n > self.MAX_W:
self.w_c_n = self.MAX_W
elif self.w_c_n < -self.MAX_W:
self.w_c_n = -self.MAX_W
def limit_linear_speed_control_action(self):
if self.v_c_n > self.MAX_V:
self.v_c_n = self.MAX_V
elif self.v_c_n < -self.MAX_V:
self.v_c_n = -self.MAX_V
def compute_errors(self, v, v_ref_n, w, w_ref_n):
e_v = v_ref_n - v
e_w = w_ref_n - w
return e_v, e_w
def compute_w_pid_factors(self):
a_w = self.K_P_W + self.K_I_W * self.delta / 2 + self.K_D_W / self.delta
b_w = - self.K_P_W + self.K_I_W * self.delta / 2 - 2 * self.K_D_W / self.delta
c_w = self.K_D_W / self.delta
return a_w, b_w, c_w
def compute_v_pid_factors(self):
a_v = self.K_P_V + self.K_I_V * self.delta / 2 + self.K_D_V / self.delta
b_v = -self.K_P_V + self.K_I_V * self.delta / 2 - 2 * self.K_D_V / self.delta
c_v = self.K_D_V / self.delta
return a_v, b_v, c_v
def limit_angular_speed_reference(self, w_ref_n):
if w_ref_n > self.MAX_W:
w_ref_n = self.MAX_W
elif w_ref_n < -self.MAX_W:
w_ref_n = -self.MAX_W
return w_ref_n
def compute_angular_speed_reference(self):
self.theta_ref_n = atan2(self.y_ref_n_plus_1 - self.y_n, self.x_ref_n_plus_1 - self.x_n)
self.theta_n = atan2(sin(self.theta_n), cos(self.theta_n))
w_ref_n = (self.theta_ref_n - self.theta_n) / self.delta
return w_ref_n
def limit_linear_speed_reference(self, v_ref_n):
if v_ref_n > self.MAX_V:
v_ref_n = self.MAX_V
elif v_ref_n < -self.MAX_V:
v_ref_n = -self.MAX_V
return v_ref_n
def compute_linear_speed_reference(self):
v_x_ref = (self.x_ref_n_plus_1 - self.x_n) / self.delta
v_y_ref = (self.y_ref_n_plus_1 - self.y_n) / self.delta
v_ref_n = (v_x_ref ** 2 + v_y_ref ** 2) ** 0.5
return v_ref_n
def get_velocities(self, twist):
v_x = twist.linear.x
v_y = twist.linear.y
v = (v_x ** 2 + v_y ** 2) ** 0.5
w = twist.angular.z
return v, w
|
# -*- coding: utf-8 -*-
# Copyright 2013 Dev in Cachu authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from django.db import models as django_models
from .. import models
class ParticipanteTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.field_names = models.Participante._meta.get_all_field_names()
def test_deve_ter_campo_com_o_nome_do_participante(self):
self.assertIn("nome", self.field_names)
def test_nome_deve_ser_do_tipo_CharField(self):
field = models.Participante._meta.get_field_by_name("nome")[0]
self.assertIsInstance(field, django_models.CharField)
def test_nome_deve_ter_no_maximo_100_caracteres(self):
field = models.Participante._meta.get_field_by_name("nome")[0]
self.assertEqual(100, field.max_length)
def test_deve_ter_campo_nome_no_cracha(self):
self.assertIn("nome_cracha", self.field_names)
def test_nome_no_cracha_deve_ser_do_tipo_CharField(self):
field = models.Participante._meta.get_field_by_name("nome_cracha")[0]
self.assertIsInstance(field, django_models.CharField)
def test_nome_no_cracha_deve_ter_no_maximo_100_caracteres(self):
field = models.Participante._meta.get_field_by_name("nome_cracha")[0]
self.assertEqual(100, field.max_length)
def test_nome_no_cracha_nao_deve_ser_obrigatorio(self):
field = models.Participante._meta.get_field_by_name("nome_cracha")[0]
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_nome_no_cracha_deve_ter_verbose_name_com_acento(self):
field = models.Participante._meta.get_field_by_name("nome_cracha")[0]
self.assertEqual(u"Nome no crachá", field.verbose_name)
def test_deve_ter_campo_sexo(self):
self.assertIn("sexo", self.field_names)
def test_campo_sexo_deve_ser_do_tipo_CharField(self):
field = models.Participante._meta.get_field_by_name("sexo")[0]
self.assertIsInstance(field, django_models.CharField)
def test_campo_sexo_deve_ter_no_maximo_1_caracter(self):
field = models.Participante._meta.get_field_by_name("sexo")[0]
self.assertEqual(1, field.max_length)
def test_campo_sexo_deve_ter_m_ou_f(self):
esperado = (
(u"M", u"Masculino"),
(u"F", u"Feminino"),
)
field = models.Participante._meta.get_field_by_name("sexo")[0]
self.assertEqual(esperado, field.choices)
def test_deve_ter_campo_email(self):
self.assertIn("email", self.field_names)
def test_email_deve_ser_do_tipo_EmailField(self):
field = models.Participante._meta.get_field_by_name("email")[0]
self.assertIsInstance(field, django_models.EmailField)
def test_email_deve_ter_no_maximo_100_caracteres(self):
field = models.Participante._meta.get_field_by_name("email")[0]
self.assertEqual(100, field.max_length)
def test_email_e_status_devem_ser_unique_juntos(self):
self.assertEqual((u'email', u'status'),
models.Participante._meta.unique_together[0])
def test_deve_ter_campo_status(self):
self.assertIn("status", self.field_names)
def test_campo_status_deve_ser_do_tipo_CharField(self):
field = models.Participante._meta.get_field_by_name("status")[0]
self.assertIsInstance(field, django_models.CharField)
def test_campo_status_deve_ter_no_maximo_20_caracteres(self):
field = models.Participante._meta.get_field_by_name("status")[0]
self.assertEqual(20, field.max_length)
def test_campo_stauts_deve_ser_AGUARDANDO_por_padrao(self):
field = models.Participante._meta.get_field_by_name("status")[0]
self.assertEqual(u"AGUARDANDO", field.default)
def test_campo_status_deve_ter_choices_adequados(self):
esperado = (
(u'AGUARDANDO', u'Aguardando pagamento'),
(u'CONFIRMADO', u'Confirmado'),
(u'CANCELADO', u'Cancelado'),
(u'CORTESIA', u'Cortesia'),
(u'PALESTRANTE', u'Palestrante'),
(u'ORGANIZACAO', u'Organização'),
(u'CARAVANA', u'Caravana'),
)
field = models.Participante._meta.get_field_by_name("status")[0]
self.assertEqual(esperado, field.choices)
def test_deve_ter_campo_para_tamanho_de_camiseta(self):
self.assertIn("tamanho_camiseta", self.field_names)
def test_tamanho_de_camiseta_deve_ser_CharField(self):
field = models.Participante._meta.get_field_by_name(
"tamanho_camiseta")[0]
self.assertIsInstance(field, django_models.CharField)
def test_tamanho_de_camiseta_deve_ter_no_maximo_2_caracteres(self):
field = models.Participante._meta.get_field_by_name(
"tamanho_camiseta")[0]
self.assertEqual(2, field.max_length)
def test_tamanho_de_camiseta_deve_ter_verbose_name_descritivo(self):
field = models.Participante._meta.get_field_by_name(
"tamanho_camiseta")[0]
self.assertEqual(u"Tamanho da camiseta", field.verbose_name)
def test_tamanho_de_camiseta_deve_ter_options_limitadas(self):
esperado = (
(u'P', u'P (53cm x 71cm)'),
(u'M', u'M (56cm x 74cm)'),
(u'G', u'G (58cm x 76cm)'),
(u'GG', u'GG (62cm x 80cm)'),
)
field = models.Participante._meta.get_field_by_name(
"tamanho_camiseta")[0]
self.assertEqual(esperado, field.choices)
def test_deve_ter_instituicao_de_ensino(self):
self.assertIn("instituicao_ensino", self.field_names)
def test_instituicao_de_ensino_deve_ser_do_tipo_CharField(self):
field = models.Participante._meta.get_field_by_name(
"instituicao_ensino")[0]
self.assertIsInstance(field, django_models.CharField)
def test_instituicao_de_ensino_deve_ter_no_maximo_100_caracteres(self):
field = models.Participante._meta.get_field_by_name(
"instituicao_ensino")[0]
self.assertEqual(100, field.max_length)
def test_instituicao_de_ensino_nao_deve_ser_obrigatorio(self):
field = models.Participante._meta.get_field_by_name(
"instituicao_ensino")[0]
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_instituicao_de_ensino_verbose_name_deve_citar_estudante(self):
field = models.Participante._meta.get_field_by_name(
"instituicao_ensino")[0]
self.assertEqual(u"Instituição de ensino (estudantes)",
field.verbose_name)
def test_deve_ter_empresa(self):
self.assertIn("empresa", self.field_names)
def test_empresa_deve_ser_do_tipo_CharField(self):
field = models.Participante._meta.get_field_by_name("empresa")[0]
self.assertIsInstance(field, django_models.CharField)
def test_empresa_deve_ter_no_maximo_100_caracteres(self):
field = models.Participante._meta.get_field_by_name("empresa")[0]
self.assertEqual(100, field.max_length)
def test_empresa_nao_deve_ser_obrigatorio(self):
field = models.Participante._meta.get_field_by_name("empresa")[0]
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_empresa_deve_ter_verbose_name_empresa_onde_trabalha(self):
field = models.Participante._meta.get_field_by_name("empresa")[0]
self.assertEqual(u"Empresa onde trabalha", field.verbose_name)
def test_deve_ter_cidade_estado(self):
self.assertIn("cidade", self.field_names)
def test_cidade_estado_deve_ser_CharField(self):
field = models.Participante._meta.get_field_by_name("cidade")[0]
self.assertIsInstance(field, django_models.CharField)
def test_cidade_estado_deve_ter_no_maximo_255_caracteres(self):
field = models.Participante._meta.get_field_by_name("cidade")[0]
self.assertEqual(255, field.max_length)
def test_cidade_estado_deve_ter_verbose_name_com_barra(self):
field = models.Participante._meta.get_field_by_name("cidade")[0]
self.assertEqual(u"Cidade/Estado", field.verbose_name)
def test_deve_ter_field_presente(self):
self.assertIn("presente", self.field_names)
def test_presente_deve_ser_boolean_field(self):
field = models.Participante._meta.get_field_by_name("presente")[0]
self.assertIsInstance(field, django_models.BooleanField)
def test_presente_deve_ser_False_por_padrao(self):
field = models.Participante._meta.get_field_by_name("presente")[0]
self.assertEqual(False, field.default)
def test_deve_ter_campo_observacao(self):
self.assertIn("observacao", self.field_names)
def test_observacao_deve_ter_char_field(self):
field = models.Participante._meta.get_field_by_name("observacao")[0]
self.assertIsInstance(field, django_models.CharField)
def test_observacao_deve_ter_no_maximo_1000_chars(self):
field = models.Participante._meta.get_field_by_name("observacao")[0]
self.assertEqual(1000, field.max_length)
def test_observacao_nao_deve_ser_obrigatorio(self):
field = models.Participante._meta.get_field_by_name("observacao")[0]
self.assertTrue(field.blank)
self.assertTrue(field.null)
def test_observacao_deve_ter_verbose_name(self):
field = models.Participante._meta.get_field_by_name("observacao")[0]
self.assertEqual(u"Observação", field.verbose_name)
def test__repr__deve_ter_nome(self):
participante = models.Participante(nome=u"Francisco Souza")
self.assertEqual(u"<Participante: Francisco Souza>",
repr(participante))
def test__unicode__deve_ser_o_nome(self):
participante = models.Participante(nome=u"Francisco Souza")
self.assertEqual(u"Francisco Souza", unicode(participante))
|
#!/usr/bin/env python3
# Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import stat
import unittest
import tempfile
import os
from common.common_arguments import build_common_argument_parser
from common.benchmark_config import BenchmarkConfig, TraceCaptureConfig
class BenchmarkConfigTest(unittest.TestCase):
def setUp(self):
self.build_dir = tempfile.TemporaryDirectory()
self.tmp_dir = tempfile.TemporaryDirectory()
self.normal_tool_dir = os.path.join(self.build_dir.name, "normal_tool")
os.mkdir(self.normal_tool_dir)
self.traced_tool_dir = os.path.join(self.build_dir.name, "traced_tool")
os.mkdir(self.traced_tool_dir)
self.trace_capture_tool = tempfile.NamedTemporaryFile()
os.chmod(self.trace_capture_tool.name, stat.S_IEXEC)
def tearDown(self):
self.tmp_dir.cleanup()
self.build_dir.cleanup()
def test_build_from_args(self):
args = build_common_argument_parser().parse_args([
f"--tmp_dir={self.tmp_dir.name}",
f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
f"--trace_capture_tool={self.trace_capture_tool.name}",
f"--capture_tarball=capture.tar", f"--driver_filter_regex=a",
f"--model_name_regex=b", f"--mode_regex=c", f"--keep_going",
f"--benchmark_min_time=10", self.build_dir.name
])
config = BenchmarkConfig.build_from_args(args=args, git_commit_hash="abcd")
per_commit_tmp_dir = os.path.join(self.tmp_dir.name, "abcd")
expected_trace_capture_config = TraceCaptureConfig(
traced_benchmark_tool_dir=self.traced_tool_dir,
trace_capture_tool=self.trace_capture_tool.name,
capture_tarball=os.path.realpath("capture.tar"),
capture_tmp_dir=os.path.join(per_commit_tmp_dir, "captures"))
self.assertEqual(
config,
BenchmarkConfig(root_benchmark_dir=os.path.join(self.build_dir.name,
"benchmark_suites"),
benchmark_results_dir=os.path.join(
per_commit_tmp_dir, "benchmark-results"),
normal_benchmark_tool_dir=self.normal_tool_dir,
trace_capture_config=expected_trace_capture_config,
driver_filter="a",
model_name_filter="b",
mode_filter="c",
keep_going=True,
benchmark_min_time=10))
def test_build_from_args_benchmark_only(self):
args = build_common_argument_parser().parse_args([
f"--tmp_dir={self.tmp_dir.name}",
f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
self.build_dir.name
])
config = BenchmarkConfig.build_from_args(args=args, git_commit_hash="abcd")
self.assertIsNone(config.trace_capture_config)
def test_build_from_args_invalid_capture_args(self):
args = build_common_argument_parser().parse_args([
f"--tmp_dir={self.tmp_dir.name}",
f"--normal_benchmark_tool_dir={self.normal_tool_dir}",
f"--traced_benchmark_tool_dir={self.traced_tool_dir}",
self.build_dir.name
])
self.assertRaises(
ValueError,
lambda: BenchmarkConfig.build_from_args(args=args,
git_commit_hash="abcd"))
if __name__ == "__main__":
unittest.main()
|
from typing import Dict
import pythonbible as bible
# noinspection SpellCheckingInspection
BOOK_IDS: Dict[bible.Book, str] = {
bible.Book.GENESIS: "Gen",
bible.Book.EXODUS: "Exod",
bible.Book.LEVITICUS: "Lev",
bible.Book.NUMBERS: "Num",
bible.Book.DEUTERONOMY: "Deut",
bible.Book.JOSHUA: "Josh",
bible.Book.JUDGES: "Judg",
bible.Book.RUTH: "Ruth",
bible.Book.SAMUEL_1: "1Sam",
bible.Book.SAMUEL_2: "2Sam",
bible.Book.KINGS_1: "1Kgs",
bible.Book.KINGS_2: "2Kgs",
bible.Book.CHRONICLES_1: "1Chr",
bible.Book.CHRONICLES_2: "2Chr",
bible.Book.EZRA: "Ezra",
bible.Book.NEHEMIAH: "Neh",
bible.Book.ESTHER: "Esth",
bible.Book.JOB: "Job",
bible.Book.PSALMS: "Ps",
bible.Book.PROVERBS: "Prov",
bible.Book.ECCLESIASTES: "Eccl",
bible.Book.SONG_OF_SONGS: "Song",
bible.Book.ISAIAH: "Isa",
bible.Book.JEREMIAH: "Jer",
bible.Book.LAMENTATIONS: "Lam",
bible.Book.EZEKIEL: "Ezek",
bible.Book.DANIEL: "Dan",
bible.Book.HOSEA: "Hos",
bible.Book.JOEL: "Joel",
bible.Book.AMOS: "Amos",
bible.Book.OBADIAH: "Obad",
bible.Book.JONAH: "Jonah",
bible.Book.MICAH: "Mic",
bible.Book.NAHUM: "Nah",
bible.Book.HABAKKUK: "Hab",
bible.Book.ZEPHANIAH: "Zeph",
bible.Book.HAGGAI: "Hag",
bible.Book.ZECHARIAH: "Zech",
bible.Book.MALACHI: "Mal",
bible.Book.MATTHEW: "Matt",
bible.Book.MARK: "Mark",
bible.Book.LUKE: "Luke",
bible.Book.JOHN: "John",
bible.Book.ACTS: "Acts",
bible.Book.ROMANS: "Rom",
bible.Book.CORINTHIANS_1: "1Cor",
bible.Book.CORINTHIANS_2: "2Cor",
bible.Book.GALATIANS: "Gal",
bible.Book.EPHESIANS: "Eph",
bible.Book.PHILIPPIANS: "Phil",
bible.Book.COLOSSIANS: "Col",
bible.Book.THESSALONIANS_1: "1Thess",
bible.Book.THESSALONIANS_2: "2Thess",
bible.Book.TIMOTHY_1: "1Tim",
bible.Book.TIMOTHY_2: "2Tim",
bible.Book.TITUS: "Titus",
bible.Book.PHILEMON: "Phlm",
bible.Book.HEBREWS: "Heb",
bible.Book.JAMES: "Jas",
bible.Book.PETER_1: "1Pet",
bible.Book.PETER_2: "2Pet",
bible.Book.JOHN_1: "1John",
bible.Book.JOHN_2: "2John",
bible.Book.JOHN_3: "3John",
bible.Book.JUDE: "Jude",
bible.Book.REVELATION: "Rev",
# bible.Book.BARUCH: "Bar",
# bible.Book.ADDITIONS_TO_DANIEL: "AddDan",
# bible.Book.PRAYER_OF_AZARIAH: "PrAzar",
# bible.Book.BEL_AND_THE_DRAGON: "Bel",
# bible.Book.SONG_OF_THE_THREE_YOUNG_MEN: "SgThree",
# bible.Book.SUSANNA: "Sus",
bible.Book.ESDRAS_1: "1Esd",
# bible.Book.ESDRAS_2: "2Esd",
# bible.Book.ADDITIONS_TO_ESTHER: "AddEsth",
# bible.Book.EPISTLE_OF_JEREMIAH: "EpJer",
# bible.Book.JUDITH: "Jdt",
bible.Book.MACCABEES_1: "1Macc",
bible.Book.MACCABEES_2: "2Macc",
# bible.Book.MACCABEES_3: "3Macc",
# bible.Book.MACCABEES_4: "4Macc",
# bible.Book.PRAYER_OF_MANASSEH: "PrMan",
bible.Book.ECCLESIASTICUS: "Sir",
bible.Book.TOBIT: "Tobit",
bible.Book.WISDOM_OF_SOLOMON: "Wis",
}
def get_book_by_id(book_id: str) -> bible.Book:
for next_book, next_book_id in BOOK_IDS.items():
if book_id == next_book_id:
return next_book
raise bible.InvalidBookError
|
import argparse
import os
from tqdm import tqdm
import torch
import pandas as pd
from pathlib import Path
from models.experimental import attempt_load
from utils.datasets import LoadImages
from utils.general import check_img_size, non_max_suppression, scale_coords
from utils.torch_utils import select_device
parser = argparse.ArgumentParser()
parser.add_argument("--folds", default=[0,1,2,3,4], nargs="+", type=int)
parser.add_argument("--sources", default=['padchest', 'pneumothorax', 'vin'], nargs="+", type=str)
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--batch-size', type=int, default=16)
parser.add_argument("--workers", default=4, type=int)
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
parser.add_argument('--frac', default=1.0, type=float)
opt = parser.parse_args()
print(opt)
def refine_det(boxes, labels, scores):
boxes = boxes.clip(0,1)
boxes_out = []
labels_out = []
scores_out = []
for box, label, score in zip(boxes, labels, scores):
x1, y1, x2, y2 = box
if x1==x2 or y1==y2:
continue
box = [min(x1,x2), min(y1,y2), max(x1,x2), max(y1,y2)]
boxes_out.append(box)
labels_out.append(label)
scores_out.append(score)
return boxes_out, labels_out, scores_out
if __name__ == "__main__":
os.makedirs('predictions', exist_ok = True)
device = select_device(opt.device)
stride = None
models = {}
for fold in opt.folds:
print('*'*20, 'Fold {}'.format(fold), '*'*20)
CHECKPOINT = 'runs/train/fold{}/weights/best.pt'.format(fold)
model = attempt_load(CHECKPOINT, map_location=device) # load FP32 model
if stride is None:
stride = int(model.stride.max()) # model stride
else:
assert stride == int(model.stride.max())
model.half() # to FP16
model.eval()
models[fold] = model
imgsz = check_img_size(opt.img_size, s=stride) # check img_size
for source in opt.sources:
print('*'*20, source, '*'*20)
if source == 'padchest':
img_source = '../../dataset/external_dataset/padchest/images'
elif source == 'pneumothorax':
img_source = '../../dataset/external_dataset/pneumothorax/images'
elif source == 'vin':
img_source = '../../dataset/external_dataset/vinbigdata/images'
else:
raise ValueError('source !!!')
predict_dict = {}
dataset = LoadImages(img_source, img_size=imgsz, stride=stride)
for path, img, im0s, vid_cap in tqdm(dataset):
path = '../../dataset'+ path.split('../../dataset')[-1]
img_height, img_width = im0s.shape[0:2]
if path not in list(predict_dict.keys()):
predict_dict[path] = [[],[],[], img_width, img_height]
img = torch.from_numpy(img).to(device)
img = img.half()
img /= 255.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
for fold in opt.folds:
with torch.no_grad():
pred = models[fold](img, augment=True)[0]
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=None, agnostic=opt.agnostic_nms)
boxes = []
scores = []
labels = []
for det in pred:
if det is not None and len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0s.shape).round()
det = det.data.cpu().numpy()
box_pred = det[:,:4].astype(float)
box_pred[:,[0,2]] = box_pred[:,[0,2]]/float(img_width)
box_pred[:,[1,3]] = box_pred[:,[1,3]]/float(img_height)
score_pred = det[:,4]
label_pred = det[:,5].astype(int)
box_pred, label_pred, score_pred = refine_det(box_pred, label_pred, score_pred)
predict_dict[path][0] += [box_pred]
predict_dict[path][1] += [score_pred]
predict_dict[path][2] += [label_pred]
pred_dict_path = 'predictions/yolov5x6_{}_fold{}_{}_pred.pth'.format(opt.img_size, '_'.join(str(x) for x in opt.folds), source)
torch.save(predict_dict, pred_dict_path)
|
# this is a demo of UserFacts which uses files baked into
# /etc/opsmop/facts.d to implement feature flags
from opsmop.core.easy import *
import getpass
USERNAME = getpass.getuser()
class FeatureOne(Role):
def should_process_when(self):
return UserFacts.feature_flags.get('ff01', False)
def set_resources(self):
command = T("Set warp speed to {{ UserFacts.warp_factor }}, Engage!")
return Resources(
DebugFacts(),
Echo("The security officer is {{ UserFacts.crew.officers.security }}"),
Set(command=command), # copy python var into scope
Echo("The singularity is becoming unstable. {{ command }}")
)
class FeatureTwo(Role):
def should_process_when(self):
return UserFacts.feature_flags.get('ff02', False)
def set_resources(self):
return Resources(
Echo("This won't actually run until you modify feature_flags.json to enable ff02"),
DebugFacts()
)
class CommonSetup(Role):
def set_resources(self):
resources = Resources()
resources.add([
Directory("/etc/opsmop/facts.d", owner=USERNAME),
File("/etc/opsmop/facts.d/feature_flags.json", from_file="files/feature_flags.json", owner=USERNAME, mode=0o644, signals='yep'),
File("/etc/opsmop/facts.d/star_trek.yml", from_file="files/star_trek.yml", owner=USERNAME, mode=0o644, signals='yep'),
File("/etc/opsmop/facts.d/dynamic_facts.sh", from_file="files/dynamic_facts.sh", owner=USERNAME, mode=0o755, signals='yep')
])
return resources
def set_handlers(self):
return Handlers(
yep = Resources(
Echo("fyi, we just set up /etc/opsmop/facts.d for you"),
Echo("check out the file contents and edit them if you like")
)
)
def post(self):
UserFacts.invalidate()
class Demo(Policy):
def set_roles(self):
return Roles(
CommonSetup(),
FeatureOne(),
FeatureTwo(),
)
if __name__ == '__main__':
Cli(Demo())
|
# Copyright 2018-2020 Jérôme Dumonteil
# Copyright (c) 2009-2013 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): jerome.dumonteil@gmail.com
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: David Versmisse <david.versmisse@itaapy.com>
# Hervé Cauwelier <herve@itaapy.com>
# Romain Gauthier <romain@itaapy.com>
# Jerome Dumonteil <jerome.dumonteil@itaapy.com>
"""Paragraph class for "text:p", Span class for "text:span"
"""
import re
from functools import wraps # for keeping trace of docstring with decorators
from .bookmark import Bookmark, BookmarkStart, BookmarkEnd
from .element import FIRST_CHILD, NEXT_SIBLING, register_element_class, Element
from .paragraph_base import ParagraphBase, Spacer, Tab, LineBreak
from .note import Note, Annotation, AnnotationEnd
from .reference import Reference, ReferenceMark, ReferenceMarkStart, ReferenceMarkEnd
from .link import Link
def _by_regex_offset(method):
@wraps(method)
def wrapper(element, *args, **kwargs):
"""Insert the result of method(element, ...) at the place matching the
regex OR the positional arguments offset and length.
Arguments:
method -- wrapped method
element -- self
regex -- unicode regular expression
offset -- int
length -- int
"""
offset = kwargs.get("offset", None)
regex = kwargs.get("regex", None)
if offset:
length = kwargs.get("length", 0)
counted = 0
for text in element.xpath("//text()"):
if len(text) + counted <= offset:
counted += len(text)
continue
if length > 0:
length = min(length, len(text))
else:
length = len(text)
# Static information about the text node
container = text.parent
upper = container.parent
is_text = text.is_text()
start = offset - counted
end = start + length
# Do not use the text node as it changes at each loop
if is_text:
text = container.text
else:
text = container.tail
before = text[:start]
match = text[start:end]
tail = text[end:]
result = method(element, match, tail, *args, **kwargs)
if is_text:
container.text = before
# Insert as first child
container.insert(result, position=0)
else:
container.tail = before
# Insert as next sibling
index = upper.index(container)
upper.insert(result, position=index + 1)
return
if regex:
pattern = re.compile(regex)
for text in element.xpath("descendant::text()"):
# Static information about the text node
container = text.parent
upper = container.parent
is_text = text.is_text()
# Group positions are calculated and static, so apply in
# reverse order to preserve positions
for group in reversed(list(pattern.finditer(text))):
start, end = group.span()
# Do not use the text node as it changes at each loop
if is_text:
text = container.text
else:
text = container.tail
before = text[:start]
match = text[start:end]
tail = text[end:]
result = method(element, match, tail, *args, **kwargs)
if is_text:
container.text = before
# Insert as first child
container.insert(result, position=0)
else:
container.tail = before
# Insert as next sibling
index = upper.index(container)
upper.insert(result, position=index + 1)
return wrapper
class Paragraph(ParagraphBase):
"""Specialised element for paragraphs "text:p". The "text:p" element
represents a paragraph, which is the basic unit of text in an OpenDocument
file.
"""
_tag = "text:p"
def __init__(self, text_or_element=None, style=None, **kwargs):
"""Create a paragraph element of the given style containing the optional
given text.
Arguments:
text -- str or Element
style -- str
Return: Paragraph
"""
super().__init__(**kwargs)
if self._do_init:
if isinstance(text_or_element, Element):
self.append(text_or_element)
else:
self.text = text_or_element
if style is not None:
self.style = style
def insert_note(
self,
note_element=None,
after=None,
note_class="footnote",
note_id=None,
citation=None,
body=None,
):
if note_element is None:
note_element = Note(
note_class=note_class, note_id=note_id, citation=citation, body=body
)
else:
# XXX clone or modify the argument?
if note_class:
note_element.note_class = note_class
if note_id:
note_element.note_id = note_id
if citation:
note_element.citation = citation
if body:
note_element.note_body = body
note_element.check_validity()
if isinstance(after, str):
self._insert(note_element, after=after, main_text=True)
elif isinstance(after, Element):
after.insert(note_element, FIRST_CHILD)
else:
self.insert(note_element, FIRST_CHILD)
def insert_annotation(
self,
annotation_element=None,
before=None,
after=None,
position=0,
content=None,
body=None,
creator=None,
date=None,
):
"""Insert an annotation, at the position defined by the regex (before,
after, content) or by positionnal argument (position). If content is
provided, the annotation covers the full content regex. Else, the
annotation is positionned either 'before' or 'after' provided regex.
If content is an odf element (ie: paragraph, span, ...), the full inner
content is covered by the annotation (of the position just after if
content is a single empty tag).
If content/before or after exists (regex) and return a group of matching
positions, the position value is the index of matching place to use.
annotation_element can contain a previously created annotation, else
the annotation is created from the body, creator and optional date
(current date by default).
Arguments:
annotation_element -- Annotation or name
before -- str regular expression or None
after -- str regular expression or None
content -- str regular expression or None, or Element
position -- int or tuple of int
body -- str or Element
creator -- str
date -- datetime
"""
if annotation_element is None:
annotation_element = Annotation(
text_or_element=body, creator=creator, date=date, parent=self
)
else:
# XXX clone or modify the argument?
if body:
annotation_element.body = body
if creator:
annotation_element.dc_creator = creator
if date:
annotation_element.dc_date = date
annotation_element.check_validity()
# special case: content is an odf element (ie: a paragraph)
if isinstance(content, Element):
if content.is_empty():
content.insert(annotation_element, xmlposition=NEXT_SIBLING)
return annotation_element
content.insert(annotation_element, start=True)
annotation_end = AnnotationEnd(annotation_element)
content.append(annotation_end)
return annotation_element
# special case
if isinstance(after, Element):
after.insert(annotation_element, FIRST_CHILD)
return annotation_element
# With "content" => automatically insert a "start" and an "end"
# bookmark
if (
before is None
and after is None
and content is not None
and isinstance(position, int)
):
# Start tag
self._insert(
annotation_element, before=content, position=position, main_text=True
)
# End tag
annotation_end = AnnotationEnd(annotation_element)
self._insert(
annotation_end, after=content, position=position, main_text=True
)
return annotation_element
# With "(int, int)" => automatically insert a "start" and an "end"
# bookmark
if (
before is None
and after is None
and content is None
and isinstance(position, tuple)
):
# Start
self._insert(annotation_element, position=position[0], main_text=True)
# End
annotation_end = AnnotationEnd(annotation_element)
self._insert(annotation_end, position=position[1], main_text=True)
return annotation_element
# Without "content" nor "position"
if content is not None or not isinstance(position, int):
raise ValueError("bad arguments")
# Insert
self._insert(
annotation_element,
before=before,
after=after,
position=position,
main_text=True,
)
return annotation_element
def insert_annotation_end(
self, annotation_element, before=None, after=None, position=0
):
"""Insert an annotation end tag for an existing annotation. If some end
tag already exists, replace it. Annotation end tag is set at the
position defined by the regex (before or after).
If content/before or after (regex) returns a group of matching
positions, the position value is the index of matching place to use.
Arguments:
annotation_element -- Annotation (mandatory)
before -- str regular expression or None
after -- str regular expression or None
position -- int
"""
if annotation_element is None:
raise ValueError
if not isinstance(annotation_element, Annotation):
raise ValueError("Not a <office:annotation> Annotation")
# remove existing end tag
name = annotation_element.name
existing_end_tag = self.get_annotation_end(name=name)
if existing_end_tag:
existing_end_tag.delete()
# create the end tag
end_tag = AnnotationEnd(annotation_element)
# Insert
self._insert(
end_tag, before=before, after=after, position=position, main_text=True
)
return end_tag
def set_reference_mark(
self, name, before=None, after=None, position=0, content=None
):
"""Insert a reference mark, at the position defined by the regex
(before, after, content) or by positionnal argument (position). If
content is provided, the annotation covers the full range content regex
(instances of ReferenceMarkStart and ReferenceMarkEnd are
created). Else, an instance of ReferenceMark is positionned either
'before' or 'after' provided regex.
If content is an ODF Element (ie: Paragraph, Span, ...), the full inner
content is referenced (of the position just after if content is a single
empty tag).
If content/before or after exists (regex) and return a group of matching
positions, the position value is the index of matching place to use.
Name is mandatory and shall be unique in the document for the preference
mark range.
Arguments:
name -- str
before -- str regular expression or None
after -- str regular expression or None, or Element
content -- str regular expression or None, or Element
position -- int or tuple of int
Return: the created ReferenceMark or ReferenceMarkStart
"""
# special case: content is an odf element (ie: a paragraph)
if isinstance(content, Element):
if content.is_empty():
reference = ReferenceMark(name)
content.insert(reference, xmlposition=NEXT_SIBLING)
return reference
reference_start = ReferenceMarkStart(name)
content.insert(reference_start, start=True)
reference_end = ReferenceMarkEnd(name)
content.append(reference_end)
return reference_start
# With "content" => automatically insert a "start" and an "end"
# reference
if (
before is None
and after is None
and content is not None
and isinstance(position, int)
):
# Start tag
reference_start = ReferenceMarkStart(name)
self._insert(
reference_start, before=content, position=position, main_text=True
)
# End tag
reference_end = ReferenceMarkEnd(name)
self._insert(
reference_end, after=content, position=position, main_text=True
)
return reference_start
# With "(int, int)" => automatically insert a "start" and an "end"
if (
before is None
and after is None
and content is None
and isinstance(position, tuple)
):
# Start
reference_start = ReferenceMarkStart(name)
self._insert(reference_start, position=position[0], main_text=True)
# End
reference_end = ReferenceMarkEnd(name)
self._insert(reference_end, position=position[1], main_text=True)
return reference_start
# Without "content" nor "position"
if content is not None or not isinstance(position, int):
raise ValueError("bad arguments")
# Insert a positional reference mark
reference = ReferenceMark(name)
self._insert(
reference, before=before, after=after, position=position, main_text=True
)
return reference
def set_reference_mark_end(
self, reference_mark, before=None, after=None, position=0
):
"""Insert/move a ReferenceMarkEnd for an existing reference mark. If
some end tag already exists, replace it. Reference tag is set at the
position defined by the regex (before or after).
If content/before or after (regex) returns a group of matching
positions, the position value is the index of matching place to use.
Arguments:
reference_mark -- ReferenceMark or ReferenceMarkStart (mandatory)
before -- str regular expression or None
after -- str regular expression or None
position -- int
"""
if not isinstance(reference_mark, (ReferenceMark, ReferenceMarkStart)):
raise ValueError("Not a ReferenceMark or ReferenceMarkStart")
name = reference_mark.name
if isinstance(reference_mark, ReferenceMark):
# change it to a range reference:
reference_mark.tag = ReferenceMarkStart._tag
existing_end_tag = self.get_reference_mark_end(name=name)
if existing_end_tag:
existing_end_tag.delete()
# create the end tag
end_tag = ReferenceMarkEnd(name)
# Insert
self._insert(
end_tag, before=before, after=after, position=position, main_text=True
)
return end_tag
def insert_variable(self, variable_element, after):
self._insert(variable_element, after=after, main_text=True)
@_by_regex_offset
def set_span(self, match, tail, style, regex=None, offset=None, length=0):
"""
set_span(style, regex=None, offset=None, length=0)
Apply the given style to text content matching the regex OR the
positional arguments offset and length.
(match, tail: provided by regex decorator)
Arguments:
style -- style element or name
regex -- str regular expression
offset -- int
length -- int
"""
span = Span(match, style=style)
span.tail = tail
return span
def remove_spans(self, keep_heading=True):
"""Send back a copy of the element, without span styles.
If keep_heading is True (default), the first level heading style is left
unchanged.
"""
strip = (Span._tag,)
if keep_heading:
protect = ("text:h",)
else:
protect = None
return self.strip_tags(strip=strip, protect=protect)
def remove_span(self, spans):
"""Send back a copy of the element, the spans (not a clone) removed.
Arguments:
spans -- Element or list of Element
"""
return self.strip_elements(spans)
@_by_regex_offset
def set_link(self, match, tail, url, regex=None, offset=None, length=0):
"""
set_link(url, regex=None, offset=None, length=0)
Make a link to the provided url from text content matching the regex
OR the positional arguments offset and length.
(match, tail: provided by regex decorator)
Arguments:
url -- str
regex -- str regular expression
offset -- int
length -- int
"""
link = Link(url, text=match)
link.tail = tail
return link
def remove_links(self):
"""Send back a copy of the element, without links tags."""
strip = (Link._tag,)
return self.strip_tags(strip=strip)
def remove_link(self, links):
"""Send back a copy of the element (not a clone), with the sub links
removed.
Arguments:
links -- Link or list of Link
"""
return self.strip_elements(links)
def insert_reference(
self, name, ref_format="", before=None, after=None, position=0, display=None
):
"""Create and insert a reference to a content marked by a reference
mark. The Reference element ("text:reference-ref") represents a
field that references a "text:reference-mark-start" or
"text:reference-mark" element. Its "text:reference-format" attribute
specifies what is displayed from the referenced element. Default is
'page'. Actual content is not automatically updated except for the 'text'
format.
name is mandatory and should represent an existing reference mark of the
document.
ref_format is the argument for format reference (default is 'page').
The reference is inserted the position defined by the regex (before /
after), or by positionnal argument (position). If 'display' is provided,
it will be used as the text value for the reference.
If after is an ODF Element, the reference is inserted as first child of
this element.
Arguments:
name -- str
ref_format -- one of : 'chapter', 'direction', 'page', 'text',
'caption', 'category-and-value', 'value',
'number', 'number-all-superior',
'number-no-superior'
before -- str regular expression or None
after -- str regular expression or odf element or None
position -- int
display -- str or None
"""
reference = Reference(name, ref_format)
if display is None and ref_format == "text":
# get reference content
body = self.document_body
if not body:
body = self.root
mark = body.get_reference_mark(name=name)
if mark:
display = mark.referenced_text
if not display:
display = " "
reference.text = display
if isinstance(after, Element):
after.insert(reference, FIRST_CHILD)
else:
self._insert(
reference, before=before, after=after, position=position, main_text=True
)
def set_bookmark(
self, name, before=None, after=None, position=0, role=None, content=None
):
"""Insert a bookmark before or after the characters in the text which
match the regex before/after. When the regex matches more of one part
of the text, position can be set to choose which part must be used.
If before and after are None, we use only position that is the number
of characters.
So, by default, this function inserts a bookmark before the first
character of the content. Role can be None, "start" or "end", we
insert respectively a position bookmark a bookmark-start or a
bookmark-end.
If content is not None these 2 calls are equivalent:
paragraph.set_bookmark("bookmark", content="xyz")
and:
paragraph.set_bookmark("bookmark", before="xyz", role="start")
paragraph.set_bookmark("bookmark", after="xyz", role="end")
If position is a 2-tuple, these 2 calls are equivalent:
paragraph.set_bookmark("bookmark", position=(10, 20))
and:
paragraph.set_bookmark("bookmark", position=10, role="start")
paragraph.set_bookmark("bookmark", position=20, role="end")
Arguments:
name -- str
before -- str regex
after -- str regex
position -- int or (int, int)
role -- None, "start" or "end"
content -- str regex
"""
# With "content" => automatically insert a "start" and an "end"
# bookmark
if (
before is None
and after is None
and role is None
and content is not None
and isinstance(position, int)
):
# Start
start = BookmarkStart(name)
self._insert(start, before=content, position=position, main_text=True)
# End
end = BookmarkEnd(name)
self._insert(end, after=content, position=position, main_text=True)
return start, end
# With "(int, int)" => automatically insert a "start" and an "end"
# bookmark
if (
before is None
and after is None
and role is None
and content is None
and isinstance(position, tuple)
):
# Start
start = BookmarkStart(name)
self._insert(start, position=position[0], main_text=True)
# End
end = BookmarkEnd(name)
self._insert(end, position=position[1], main_text=True)
return start, end
# Without "content" nor "position"
if content is not None or not isinstance(position, int):
raise ValueError("bad arguments")
# Role
if role is None:
bookmark = Bookmark(name)
elif role == "start":
bookmark = BookmarkStart(name)
elif role == "end":
bookmark = BookmarkEnd(name)
else:
raise ValueError("bad arguments")
# Insert
self._insert(
bookmark, before=before, after=after, position=position, main_text=True
)
return bookmark
class Span(Paragraph):
"""Create a span element "text:span" of the given style containing the optional
given text.
"""
_tag = "text:span"
_properties = (("style", "text:style-name"), ("class_names", "text:class-names"))
def __init__(self, text=None, style=None, **kwargs):
"""
Arguments:
text -- str
style -- str
Return: Span
"""
super().__init__(**kwargs)
if self._do_init:
if text:
self.text = text
if style:
self.style = style
Span._define_attribut_property()
register_element_class(Span)
register_element_class(Paragraph)
|
from src.base.solution import Solution
from src.structures.listnode import ListNode
from src.tests.part1.q328_test_odd_even_linkedlist import OddEvenLinkedlistTestCases
class OddEvenLinkedlist(Solution):
def run_test(self, input):
return self.oddEvenList(input)
def gen_test_cases(self):
return OddEvenLinkedlistTestCases()
def verify_output(self, test_output, output):
return test_output.to_str() == output.to_str()
def print_output(self, output):
print(output.to_str())
def oddEvenList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
olst = elst = ehead = ListNode(None)
olst_end = olst
is_odd = True
pt = head
while pt:
print (olst.to_str())
print (elst.to_str())
if is_odd:
olst.next = pt
olst = olst.next
olst_end = olst
is_odd = False
else:
elst.next = pt
elst = elst.next
is_odd = True
pt = pt.next
elst.next = None
# print (olst.to_str())
# print (elst.to_str())
olst_end.next = ehead.next
return head
if __name__ == '__main__':
sol = OddEvenLinkedlist()
sol.run_tests() |
"""Configurações."""
import logging
import os
from functools import lru_cache
from pydantic import BaseSettings
log = logging.getLogger("uvicorn")
class Config(BaseSettings):
"""Modelo de configurações."""
env: str = os.getenv("ENV", "dev")
testing: bool = bool(os.getenv("TESTING", True))
@lru_cache()
def carregar_config() -> BaseSettings:
"""Carrega as configurações.
Esta função é injetada na rota "pong". Carregar as configurações diretamente
do código (ao invés de fazer a partir de um arquivo) é computacionalmente
mais barato. Ainda assim, esta função é executada a cada requisição feita à
rota "pong". Usaremos "lru_cache" para fazer com que a função seja executa
apenas uma única vez.
Veja a documentação oficial de "lru_cache" para mais detalhes:
https://docs.python.org/3/library/functools.html#functools.lru_cache
"""
log.info("Carregando as configurações de ambiente...")
return Config()
|
from pydantic import BaseModel
class ProductsSchema(BaseModel):
name: str
description: str
brand: str
price: float
quantity: int
is_active: bool
|
'''
Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo.
'''
from math import cos, tan, sin, radians
angulo = int(input("Digite um ângulo qualquer: "))
sen = sin(radians(angulo))
cos = cos(radians(angulo))
tan = tan(radians(angulo))
print("Do angulo {} podemos identificar que seu:\nSeno é de {:.2f}.\nCosseno é de {:.2f}.\nE a Tangente é de {:.2f}.".format(angulo, sen, cos, tan)) |
# Copyright (c) 2017-2019 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.contrib.gis.db import models
class Granularity(models.Model):
class Meta(object):
verbose_name = "Granularité de la couverture territoriale"
verbose_name_plural = "Granularités des couvertures territoriales"
slug = models.SlugField(
verbose_name="Slug",
max_length=100,
blank=True,
unique=True,
db_index=True,
primary_key=True,
)
name = models.TextField(
verbose_name="Nom",
)
order = models.IntegerField(
unique=True,
null=True,
blank=True,
)
def __str__(self):
return self.name
|
# Python
# 生成签名
import hashlib
import hmac
import base64
import sys
sys.path.append("/")
from control import constant
#
appid = constant.appid1
appkey = constant.appkey1
# sha256加密后返回的二进制数据使用base64编码(易微联标准)
def makeSign(key, value):
message = value.encode('utf-8')
sha256_message_dig = hmac.new(key.encode('utf-8'), message, digestmod=hashlib.sha256).digest()
bash64_sha256_message = base64.b64encode(sha256_message_dig).decode('utf-8')
return bash64_sha256_message
def main():
import json
data = {
"appid": appid,
# "phoneNumber": "+8615915726010",
# "password": "123123aa",
"phoneNumber": "+8618682218203",
"password": "66668888",
"ts": "1558004249",
"version": "8",
"nonce": "12345678"
}
message = json.dumps(data, sort_keys=False)
Sign = makeSign(appkey, message)
return Sign
if __name__ == "__main__":
main()
|
"""
#######################
UTILITIES
#######################
"""
import json
import os
from os.path import exists, join
"""
Method that returns data paths and checks whether they really exist
"""
def check_data_path(args):
data_paths = {}
if args.mode == "train":
data_paths["train"] = "data/train_CNNDM_bert.jsonl" # change this according to your own dataset
data_paths["val"] = "data/val_CNNDM_bert.jsonl" # change this according to your own dataset
else:
data_paths["test"] = "data/test_CNNDM_bert.jsonl" # change this according to your own dataset
for mode in data_paths:
assert exists(data_paths[mode])
return data_paths
"""
Method that paths for decoded and reference pairs
"""
def get_results_path(save_path, curr_model):
result_path = join(save_path, "results")
if not exists(result_path):
os.makedirs(result_path)
model_path = join(result_path, curr_model)
if not exists(model_path):
os.makedirs(model_path)
dec_path = join(model_path, "dec")
ref_path = join(model_path, "ref")
os.makedirs(dec_path)
os.makedirs(ref_path)
return dec_path, ref_path
"""
Method that creates generator that reads sequential json datapoints from the file.
Arguments:
- path = path to the json file
- fields = fields in the file that are important
- encoding = file encoding
"""
def read_json(path, fields=None, encoding='utf-8'):
if fields is not None:
fields = set(fields)
with open(path, 'r', encoding=encoding) as f:
for line_idx, line in enumerate(f):
data = json.loads(line)
if fields is None:
yield line_idx, data, len(line)
continue
field_data = {}
for key, value in data.items():
if key in fields:
field_data[key] = value
if len(field_data) < len(fields):
raise ValueError("Invalid instance at line: {}".format(line_idx))
yield line_idx, field_data, len(line)
"""
Method that prints current progress of certain task.
"""
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', print_end="\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
# print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)
# Print New Line on Complete
if iteration == total:
print()
"""
Method that saves last parameter configuration into a json file.
"""
def save_last_parameter_configuration(args):
params = {'candidate_num': args.candidate_num, 'batch_size': args.batch_size, 'accum_count': args.accum_count,
'max_lr': args.max_lr, 'margin': args.margin, 'warmup_steps': args.warmup_steps,
'n_epochs': args.n_epochs, 'valid_steps': args.valid_every}
with open('./parameters/params.json', 'w') as f:
json.dump(params, f, indent=4)
|
# Generated by Django 2.2.4 on 2019-08-10 19:19
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_approved', models.BooleanField(default=False)),
('is_available', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('street_number', models.IntegerField(default=0)),
('street_name', models.CharField(default='', max_length=400)),
('city', models.CharField(default='', max_length=400)),
('prov_state', models.CharField(max_length=2)),
('postal_code', models.CharField(default='', max_length=7)),
('country', models.CharField(default='', max_length=100)),
('hide_address', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
from utils.epa.sdw_downloader import (SDW_Downloader)
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from utils.epa.sdw_downloader import ( SDW_Downloader )
class Command(BaseCommand):
def handle(self, *args, **options):
target_dir = os.path.join(
settings.BASE_DIR, settings.EPA_DATA_DIRECTORY)
downloader = SDW_Downloader()
downloader.get_water_data(target_dir)
class Command(BaseCommand):
def handle(self, *args, **options):
target_dir = os.path.join(
settings.BASE_DIR, settings.EPA_DATA_DIRECTORY)
downloader = SDW_Downloader()
downloader.get_facility_data(target_dir)
|
import json
import locale
import os
class BabelFish():
def __init__(self,subpath=["resources","app","meta"],lang=None):
localization_string = locale.getdefaultlocale()[0] #get set localization
self.locale = localization_string[:2] if lang is None else lang #let caller override localization
self.langs = ["en"] #start with English
if(not self.locale == "en"): #add localization
self.langs.append(self.locale)
self.lang_defns = {} #collect translations
self.add_translation_file() #start with default translation file
self.add_translation_file(["resources","app","cli"]) #add help translation file
self.add_translation_file(["resources","app","gui"]) #add gui label translation file
self.add_translation_file(["resources","user","meta"]) #add user translation file
def add_translation_file(self,subpath=["resources","app","meta"]):
if not isinstance(subpath, list):
subpath = [subpath]
if "lang" not in subpath:
subpath.append("lang") #look in lang folder
subpath = os.path.join(*subpath) #put in path separators
key = subpath.split(os.sep)
for check in ["resources","app","user"]:
if check in key:
key.remove(check)
key = os.path.join(*key) #put in path separators
for lang in self.langs:
if not lang in self.lang_defns:
self.lang_defns[lang] = {}
langs_filename = os.path.join(subpath,lang + ".json") #get filename of translation file
if os.path.isfile(langs_filename): #if we've got a file
with open(langs_filename,encoding="utf-8") as f: #open it
self.lang_defns[lang][key[:key.rfind(os.sep)].replace(os.sep,'.')] = json.load(f) #save translation definitions
else:
pass
# print(langs_filename + " not found for translation!")
def translate(self, domain="", key="", subkey="", uselang=None): #three levels of keys
# start with nothing
display_text = ""
# exits check for not exit first and then append Exit at end
# multiRooms check for not chest name first and then append chest name at end
specials = {
"exit": False,
"multiRoom": False
}
# Domain
if os.sep in domain:
domain = domain.replace(os.sep,'.')
# display_text = domain
# Operate on Key
if key != "":
if display_text != "":
display_text += '.'
# display_text += key
# Exits
if "exit" in key and "gui" not in domain:
key = key.replace("exit","")
specials["exit"] = True
if "Exit" in key and "gui" not in domain:
key = key.replace("Exit","")
specials["exit"] = True
# Locations
tmp = key.split(" - ")
if len(tmp) >= 2:
specials["multiRoom"] = tmp[len(tmp) - 1]
tmp.pop()
key = " - ".join(tmp)
key = key.strip()
# Operate on Subkey
if subkey != "":
if display_text != "":
display_text += '.'
display_text += subkey
# Exits
if "exit" in subkey and "gui" not in domain:
subkey = subkey.replace("exit","")
specials["exit"] = True
if "Exit" in subkey and "gui" not in domain:
subkey = subkey.replace("Exit","")
specials["exit"] = True
# Locations
tmp = subkey.split(" - ")
if len(tmp) >= 2:
specials["multiRoom"] = tmp[len(tmp) - 1]
tmp.pop()
subkey = " - ".join(tmp)
subkey = subkey.strip()
my_lang = self.lang_defns[uselang if uselang is not None else self.locale ] #handle for localization
en_lang = self.lang_defns["en"] #handle for English
if domain in my_lang and key in my_lang[domain] and subkey in my_lang[domain][key] and not my_lang[domain][key][subkey] == "": #get localization first
display_text = my_lang[domain][key][subkey]
elif domain in en_lang and key in en_lang[domain] and subkey in en_lang[domain][key] and not en_lang[domain][key][subkey] == "": #gracefully degrade to English
display_text = en_lang[domain][key][subkey]
elif specials["exit"]:
specials["exit"] = False
if specials["exit"]:
display_text += " Exit"
elif specials["multiRoom"] and specials["multiRoom"] not in display_text:
display_text += " - " + specials["multiRoom"]
return display_text
|
from bs4 import BeautifulSoup
from abc import abstractmethod
from IzVerifier.exceptions.IzVerifierException import MissingFileException
class IzContainer():
"""
Abstract class responsible for containing some izpack spec resource.
For example implementers, see:
izconditions (for izpack conditions)
izstrings (for izpack localized strings)
izvariables (for izpack variables)
"""
def __init__(self, path):
self.container = {}
self.referenced = {}
try:
self.soup = BeautifulSoup(open(path), 'xml')
except IOError:
raise MissingFileException("spec not found at: " + path)
self.parse(self.soup)
def get_referenced(self):
"""
Return a dict containing all referenced entities and the location of their references:
{
'id1' => [file1, file2, ...],
...
}
"""
return self.referenced
@abstractmethod
def parse(self, soup):
"""
Parse the xml soup generated from the izpack descriptor file.
"""
pass
@abstractmethod
def get_keys(self):
"""
Return all unique keys found for the container's entity.
"""
pass
@abstractmethod
def count(self):
"""
Return a count of all unique keys found for the container's entity.
"""
pass
@abstractmethod
def has_definition(self, element):
"""
Return true if the given element contains an izpack definition for the container item.
"""
pass
@abstractmethod
def has_reference(self, element):
"""
Return true if the given element contains an izpack string reference.
This method is used to define all the rules that allow the verifier to find
references to the type of izpack entity being searched for.
"""
pass
@abstractmethod
def get_spec_elements(self):
"""
Returns a set of the elements defining each of the container's entities.
"""
pass
@abstractmethod
def element_sort_key(self):
"""
Returns the key to use when sorting elements for this container.
"""
pass
|
import asyncio
def main():
print("Creating our event loop")
loop = asyncio.get_event_loop()
loop.run_forever()
print("Our Loop will now run forever, this will never execute")
if __name__ == '__main__':
main() |
"""
Collection of exceptions raised by the dnsimple2 package
"""
class InvalidAccountError(Exception):
pass
|
import numpy as np
import scipy.sparse as sp
import properties
from .. import utils
from .base import BaseSimilarityMeasure
class LinearCorrespondence(BaseSimilarityMeasure):
"""
The petrophysical linear constraint for joint inversions.
..math::
\\phi_c({\\mathbf m}_{\\mathbf1},{\\mathbf m}_{\\mathbf2})=\\lambda\\sum_{i=1}^M
(k_1*m_1 + k_2*m_2 + k_3)
Assuming that we are working with two models only.
"""
coefficients = properties.Array(
"coefficients for the linear relationship between parameters",
shape=(3,),
default=np.array([1.0, -1.0, 0.0]),
)
def relation(self, model):
"""
Computes the values of petrophysical linear relationship between two different
geophysical models.
The linear relationship is defined as:
f(m1, m2) = k1*m1 + k2*m2 + k3
:param numpy.ndarray model: stacked array of individual models
np.c_[model1, model2,...]
:rtype: float
:return: linearly related petrophysical values of two different models,
dimension: M by 1, :M number of model parameters.
"""
m1, m2 = self.wire_map * model
k1, k2, k3 = self.coefficients
return k1 * m1 + k2 * m2 + k3
def __call__(self, model):
"""
Computes the sum of values of petrophysical linear relationship
between two different geophysical models.
:param numpy.ndarray model: stacked array of individual models
np.c_[model1, model2,...]
:rtype: float
:return: a scalar value.
"""
result = self.relation(model)
return 0.5 * result.T @ result
def deriv(self, model):
"""Computes the Jacobian of the coupling term.
:param list of numpy.ndarray ind_models: [model1, model2,...]
:rtype: numpy.ndarray
:return: result: gradient of the coupling term with respect to model1, model2,
:dimension 2M by 1, :M number of model parameters.
"""
k1, k2, k3 = self.coefficients
r = self.relation(model)
dc_dm1 = k1 * r
dc_dm2 = k2 * r
result = np.r_[dc_dm1, dc_dm2]
return result
def deriv2(self, model, v=None):
"""Computes the Hessian of the linear coupling term.
:param list of numpy.ndarray ind_models: [model1, model2, ...]
:param numpy.ndarray v: vector to be multiplied by Hessian
:rtype: scipy.sparse.csr_matrix if v is None
numpy.ndarray if v is not None
:return Hessian matrix: | h11, h21 | :dimension 2M*2M.
| |
| h12, h22 |
"""
k1, k2, k3 = self.coefficients
if v is not None:
v1, v2 = self.wire_map * v
p1 = k1**2 * v1 + k2 * k1 * v2
p2 = k2 * k1 * v1 + k2**2 * v2
return np.r_[p1, p2]
else:
n = self.regmesh.nC
A = utils.sdiag(np.ones(n) * (k1**2))
B = utils.sdiag(np.ones(n) * (k2**2))
C = utils.sdiag(np.ones(n) * (k1 * k2))
return sp.bmat([[A, C], [C, B]], format="csr")
|
import datetime
import jwt
from sqlalchemy import and_
from project.server import app, db, bcrypt
from Crypto.PublicKey import RSA
class User(db.Model):
"""
User Model for storing user related details
"""
__tablename__ = "user"
id = db.Column(db.Integer, primary_key = True, autoincrement = True)
email = db.Column(db.String(), unique=True, nullable=False)
password = db.Column(db.String(), nullable=False)
bday = db.Column(db.DateTime(), nullable=False)
fullname = db.Column(db.String(), nullable=False)
job = db.Column(db.String(), nullable=True)
country = db.Column(db.String(), nullable=True)
registered_on = db.Column(db.DateTime, nullable=False)
is_confirmed= db.Column(db.Boolean, nullable=True, default=False)
device_list = db.relationship('DeviceList', backref='user',lazy='dynamic')
def __init__(self, email, password, bday, fullname,
job=None, country=None):
self.email = email
self.password = bcrypt.generate_password_hash(
password, app.config.get('BCRYPT_LOG_ROUNDS')
).decode()
self.registered_on = datetime.datetime.now()
self.bday=bday
self.fullname=fullname
self.country=country
self.job=job
@staticmethod
def get_user_by_email(user_email):
return User.query.filter_by(email=user_email).first()
@staticmethod
def get_user_by_id(user_id):
return User.query.filter_by(id=user_id).first()
@staticmethod
def encode_auth_token(user_id, modulus=None, exponent=None, main_key=None, hasRoot=False):
"""
Generates the Auth Token
:param user_id key:
:return: string
"""
try:
if main_key:
hasRoot = True
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id,
'modulus': modulus if modulus else "No modulus is available",
'exponent': exponent if exponent else "No exponent is available",
'key': main_key if main_key else "No main key is available",
'hasRoot': hasRoot
}
return jwt.encode(
payload,
app.config.get('SECRET_KEY'),
)
except Exception as e:
return e
@staticmethod
def decode_public_key(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload['modulus'], payload['exponent']
except:
return 'Invalid auth token'
@staticmethod
def decode_auth_token_key(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token is blacklisted. Please login again.'
else:
key = [payload['modulus'], payload['exponent']]
return payload['sub'], key
except jwt.ExpiredSignatureError:
return 'Signature expired. Please login again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please login again.'
@staticmethod
def decode_auth_token(auth_token):
"""
Decodes the authentication token
:param auth_token:
:return: integer|string
"""
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
is_blacklisted_token = BlacklistToken.check_blacklist(auth_token)
if is_blacklisted_token:
return 'Token is blacklisted. Please login again.'
else:
return payload['sub']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please login again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please login again.'
@staticmethod
def decode_auth_key(auth_token):
try:
payload = jwt.decode(auth_token, app.config.get('SECRET_KEY'))
return payload['modulus'], payload['exponent']
except jwt.ExpiredSignatureError:
return 'Signature expired. Please login again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please login again.'
class DeviceList(db.Model):
"""
Model for storing list of devices associate with a user
"""
__tablename__='device_list'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
registered_on = db.Column(db.DateTime, nullable=False)
mac_address = db.Column(db.String(), nullable=False)
os = db.Column(db.String(), nullable=False)
root= db.Column(db.Boolean, nullable=True, default=False)
main_key = db.Column(db.String(), nullable=False)
backup_key = db.Column(db.String(), nullable=False)
otp_modulus = db.Column(db.String(), nullable=False)
otp_exponent = db.Column(db.Integer, nullable=False)
encrypted_key = db.Column(db.String(), nullable=True)
def serialize(self):
return {
'id': self.id,
'mac_address': self.mac_address,
'os': self.os,
'registered_on': self.registered_on
}
def __init__(self, user, mac_address, backup_key,
main_key, otp_modulus, otp_exponent,os="Unknown",is_root=False):
self.user = user
self.backup_key =backup_key
self.registered_on = datetime.datetime.now()
self.main_key = main_key
self.otp_modulus=otp_modulus
self.otp_exponent=otp_exponent
self.mac_address=mac_address
self.os=os
self.root=is_root
self.encrypted_key = None
@staticmethod
def get_root_device(user_id):
return DeviceList.query.filter(and_(DeviceList.user.has(id=user_id),DeviceList.root==True)).first()
@staticmethod
def get_device_by_user_id_and_mac(user_id,mac):
return DeviceList.query.filter(and_(DeviceList.user.has(id=user_id),DeviceList.mac_address==mac)).first()
@staticmethod
def get_device_by_mac(mac):
return DeviceList.query.filter_by(mac_address=mac).first()
@staticmethod
def get_device_by_user_id(user_id):
return DeviceList.query.filter(DeviceList.user.has(id=user_id))
@staticmethod
def is_root(mac):
return DeviceList.get_device_by_mac(mac).root
class RSAPair(db.Model):
"""
RSAPair model for database mapping to create RSAPair table
which store RSA Key Pairs generated for each of login session
"""
__tablename__= 'rsa_key'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
public_modulus= db.Column(db.String(), nullable=False)
public_exponent= db.Column(db.Integer, nullable=False)
private_exponent = db.Column(db.String(), nullable=False)
def __init__(self,public_modulus, public_exponent, private_exponent):
"""
RSAPair Model Constructor
:params:
:modulus: public modulus
:exponent: public exponent
:key_mod: private modulus
:returns: void
"""
self.public_modulus = public_modulus
self.public_exponent = public_exponent
self.private_exponent = private_exponent
@staticmethod
def is_existed(key):
"""
Check if provided key is existed
:params: :key: list or RSA instance of the key
:returns: True or False
"""
if isinstance(key,str):
rsa_key = RSAPair.query.filter_by(public_modulus=key).first()
elif isinstance(key,list):
rsa_key = RSAPair.query.filter_by(public_modulus=key[0]).first()
else:
rsa_key = RSAPair.query.filter_by(public_modulus=str(key.n)).first()
return True if rsa_key else False
@staticmethod
def get_RSA_by_public(public_key):
"""
Get stored RSAPair from the public key
:params: :public_key: the corresponding public key
:returns: :RSAPair:
"""
if isinstance(public_key, list):
return RSAPair.query.filter_by(public_modulus=public_key[0]).first()
elif isinstance(public_key, str):
return RSAPair.query.filter_by(public_modulus=public_key).first()
elif isinstance(public_key, int):
return RSAPair.query.filter_by(public_modulus=str(public_key)).first()
else:
return RSAPair.query.filter_by(public_modulus=str(public_key.n)).first()
class BlacklistToken(db.Model):
"""
Token Model for storing JWT tokens
"""
__tablename__ = 'blacklist_tokens'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
token = db.Column(db.String(1024), unique=True, nullable=False)
blacklisted_on = db.Column(db.DateTime, nullable=False)
def __init__(self, token):
self.token = token
self.blacklisted_on = datetime.datetime.now()
def __repr__(self):
return '<id: token: {}'.format(self.token)
@staticmethod
def check_blacklist(auth_token):
result = BlacklistToken.query.filter_by(token=str(auth_token)).first()
return True if result else False
|
import io
from PIL import Image as PilImage
from typing import Tuple, Type, Union
from .attachments import Thumbnail
from .descriptors import StreamDescriptor
from .helpers import validate_width_height_ratio
from .typing_ import FileLike
def generate_thumbnail(
original_image: Union[FileLike, StreamDescriptor],
width: int = None,
height: int = None,
ratio: float = None,
ratio_precision: int = 5,
thumbnail_type: Type[Thumbnail] = Thumbnail,
) -> Tuple[int, int, float, Thumbnail]:
width, height, ratio = validate_width_height_ratio(
width, height, ratio
)
thumbnail_buffer = io.BytesIO()
format_ = 'jpeg'
extension = '.jpg'
# generating thumbnail and storing in buffer
img = PilImage.open(original_image)
# JPEG does not handle Alpha channel, switch to PNG
if img.mode == 'RGBA':
format_ = 'png'
extension = '.png'
with img:
original_size = img.size
if callable(width):
width = width(original_size)
if callable(height):
height = height(original_size)
width = int(width)
height = int(height)
thumbnail_image = img.resize((width, height))
thumbnail_image.save(thumbnail_buffer, format_)
thumbnail_buffer.seek(0)
ratio = round(width / original_size[0], ratio_precision)
thumbnail = thumbnail_type.create_from(
thumbnail_buffer,
content_type=f'image/{format_}',
extension=extension,
dimension=(width, height)
)
return width, height, ratio, thumbnail
|
def printa(z):
for i in z:
print("".join(i))
print('@')
while(True):
p = int(input())
if(p==0):
break
else:
x = []
i = j = int(p/2)
for k in range(p):
x.append(['O']*p)
x[i][j] = 'X'
printa(x)
x[i][j] = 'O'
a = int(0)
while(True):
a+=1
for q in range(a):
j+=1
if p == j:
break
x[i][j] = 'X'
printa(x)
x[i][j] = 'O'
if p ==j:
break
for q in range(a):
i-=1
x[i][j] = 'X'
printa(x)
x[i][j] = 'O'
a+=1
for q in range(a):
j-=1
x[i][j] = 'X'
printa(x)
x[i][j] = 'O'
for q in range(a):
i+=1
x[i][j] = 'X'
printa(x)
x[i][j] = 'O' |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for dit.npdist.
"""
from __future__ import division
from nose.tools import (assert_almost_equal, assert_equal, assert_false,
assert_raises, assert_true)
from numpy.testing import assert_array_almost_equal
from six.moves import map, zip # pylint: disable=redefined-builtin
from dit.npdist import Distribution, ScalarDistribution, _make_distribution
from dit.exceptions import ditException, InvalidDistribution, InvalidOutcome
from dit.samplespace import CartesianProduct
import numpy as np
from itertools import product
def test_init1():
# Invalid initializations.
assert_raises(InvalidDistribution, Distribution, [])
assert_raises(InvalidDistribution, Distribution, [], [])
Distribution([], [], sample_space=[(0, 1)], validate=False)
def test_init2():
# Cannot initialize with an iterator.
# Must pass in a sequence for outcomes.
outcomes = map(int, ['0', '1', '2', '3', '4'])
pmf = [1/5] * 5
assert_raises(TypeError, Distribution, outcomes, pmf)
def test_init3():
dist = {'0': 1/2, '1': 1/2}
d = Distribution(dist)
assert_equal(d.outcomes, ('0', '1'))
assert_array_almost_equal(d.pmf, [1/2, 1/2])
def test_init4():
dist = {'0': 1/2, '1': 1/2}
pmf = [1/2, 1/2]
assert_raises(InvalidDistribution, Distribution, dist, pmf)
def test_init5():
outcomes = ['0', '1', '2']
pmf = [1/2, 1/2]
assert_raises(InvalidDistribution, Distribution, outcomes, pmf)
def test_init6():
outcomes = set(['0', '1', '2'])
pmf = [1/3]*3
assert_raises(ditException, Distribution, outcomes, pmf)
def test_init7():
outcomes = ['0', '1']
pmf = [1/2, 1/2]
d1 = Distribution(outcomes, pmf)
d2 = Distribution.from_distribution(d1)
assert_true(d1.is_approx_equal(d2))
def test_init8():
outcomes = [(0,), (1,)]
pmf = [1/2, 1/2]
d1 = ScalarDistribution(pmf)
d2 = Distribution.from_distribution(d1)
d3 = Distribution(outcomes, pmf)
assert_true(d2.is_approx_equal(d3))
def test_init9():
outcomes = [(0,), (1,)]
pmf = [1/2, 1/2]
d1 = ScalarDistribution(pmf)
d2 = Distribution.from_distribution(d1, base=10)
d3 = Distribution(outcomes, pmf)
d3.set_base(10)
assert_true(d2.is_approx_equal(d3))
def test_atoms():
pmf = [.125, .125, .125, .125, .25, 0, .25]
outcomes = ['000', '011', '101', '110', '222', '321', '333']
d = Distribution(outcomes, pmf)
atoms = d._product(['0','1','2', '3'], repeat=3)
assert_equal(list(d.atoms()), list(atoms))
patoms = ['000', '011', '101', '110', '222', '333']
assert_equal(list(d.atoms(patoms=True)), patoms)
ss = CartesianProduct.from_outcomes(outcomes + ['444'])
d = Distribution(outcomes, pmf, sample_space=ss)
atoms = d._product(['0','1','2', '3', '4'], repeat=3)
assert_equal(list(d.atoms()), list(atoms))
def test_zipped():
pmf = [.125, .125, .125, .125, .25, 0, .25]
outcomes = ['000', '011', '101', '110', '222', '321', '333']
d = Distribution(outcomes, pmf)
outcomes_, pmf_ = list(zip(*d.zipped()))
d2 = Distribution(outcomes_, pmf_)
assert_true(d.is_approx_equal(d2))
outcomes_, pmf_ = list(zip(*d.zipped(mode='atoms')))
d3 = Distribution(outcomes_, pmf_)
assert_true(d.is_approx_equal(d3))
outcomes_, pmf_ = list(zip(*d.zipped(mode='patoms')))
d4 = Distribution(outcomes_, pmf_)
d.make_sparse()
np.testing.assert_allclose(d.pmf, d4.pmf)
def test_make_distribution():
outcomes = ['0', '1']
pmf = [1/2, 1/2]
d = _make_distribution(outcomes, pmf, None)
assert_true(type(d) is Distribution)
assert_equal(d.outcomes, ('0', '1'))
def test_setitem1():
d = Distribution(['0', '1'], [1/2, 1/2])
assert_raises(InvalidOutcome, d.__setitem__, '2', 0)
def test_setitem2():
d = Distribution(['00', '11'], [1, 0])
d.make_sparse()
d['11'] = 1/2
d.normalize()
assert_true('11' in d)
assert_almost_equal(d['11'], 1/3)
def test_coalesce():
outcomes = ['000', '011', '101', '110']
pmf = [1/4]*4
d = Distribution(outcomes, pmf)
d = d.coalesce([[0, 1], [2]])
assert_equal(d.outcome_length(), 2)
def test_copy():
outcomes = ['0', '1']
pmf = [1/2, 1/2]
d1 = Distribution(outcomes, pmf)
d2 = d1.copy(base=10)
d3 = Distribution(outcomes, pmf)
d3.set_base(10)
assert_true(d2.is_approx_equal(d3))
def test_outcome_length():
outcomes = ['000', '011', '101', '110']
pmf = [1/4]*4
d = Distribution(outcomes, pmf)
d = d.marginal([0, 2])
assert_equal(d.outcome_length(), 2)
assert_equal(d.outcome_length(masked=True), 3)
def test_has_outcome1():
d = Distribution(['0', '1'], [1, 0])
d.make_sparse()
assert_false(d.has_outcome('1', null=False))
def test_has_outcome2():
d = Distribution(['0', '1'], [1, 0])
assert_false(d.has_outcome('1', null=False))
def test_is_homogeneous1():
outcomes = ['00', '11']
pmf = [1/2, 1/2]
d = Distribution(outcomes, pmf)
assert_true(d.is_homogeneous())
def test_is_homogeneous2():
outcomes = ['00', '01']
pmf = [1/2, 1/2]
d = Distribution(outcomes, pmf)
assert_false(d.is_homogeneous())
def test_marginalize():
outcomes = ['000', '011', '101', '110']
pmf = [1/4]*4
d = Distribution(outcomes, pmf)
d1 = d.marginal([0, 2])
d2 = d.marginalize([1])
assert_true(d1.is_approx_equal(d2))
def test_set_rv_names1():
outcomes = ['00', '11']
pmf = [1/2, 1/2]
d = Distribution(outcomes, pmf)
assert_raises(ditException, d.set_rv_names, 'X')
def test_set_rv_names2():
outcomes = ['00', '11']
pmf = [1/2, 1/2]
d = Distribution(outcomes, pmf)
assert_raises(ditException, d.set_rv_names, 'XYZ')
|
from .models import Message, Room
import turbo
@turbo.register(Message)
class MessageBroadcast(turbo.ModelBroadcast):
def on_save(self, message, created, *args, **kwargs):
if created:
message.room.turbo.render(
"chat/components/message.html", {"message": message}
).append(id="messages")
def on_delete(self, message, *args, **kwargs):
message.room.turbo.remove(id=f"message-{message.id}")
|
from http import HTTPStatus
from threading import Thread
from flask import request, current_app, redirect
from flask_restful import Resource
from src.config import config
from src.translators.InstallTranslator import InstallTranslator
from src.utilities.logging import get_logger
class InstallResource(Resource):
def __init__(self):
self.logger = get_logger(self.__class__.__name__)
def get(self):
"""Redirect the user to the Slack OAuth flow"""
return redirect(f'https://slack.com/oauth/authorize?client_id={config["CLIENT_ID"]}&scope={config["SCOPES"]}')
def post(self):
"""Receive installation requests from the Strand UI"""
args = request.get_json()
self.logger.debug(f'Received Install request: {args}')
# Intentional: Omitting schema validation due to simplicity
if 'code' in args:
translator = InstallTranslator(code=args['code'],
slack_client_wrapper=current_app.slack_client_wrapper,
strand_api_client_wrapper=current_app.strand_api_client_wrapper)
Thread(target=translator.translate, daemon=True).start()
# TODO Wait until DB has new entry or timeout. Future optimization: replace with socket.
return {}, HTTPStatus.OK
return {'error': 'No code in request body'}, HTTPStatus.BAD_REQUEST
|
#
# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pandas as pd
import matplotlib.pyplot as plt
from pyserini.collection import Collection, Cord19Article
def load(path):
empty_date = dict()
normal_dates = dict()
cnt = 0
collection = Collection('Cord19AbstractCollection', path)
articles = collection.__next__()
#iterate through raw collection
for (i, d) in enumerate(articles):
article = Cord19Article(d.raw)
# documents with empty abstract
metadata = article.metadata()
date = metadata['publish_time']
if len(date) == 0:
empty_date.setdefault(article.cord_uid(), [])
empty_date[article.cord_uid()].append(article.metadata()["doi"])
empty_date[article.cord_uid()].append(len(article.title()))
else:
normal_dates.setdefault(article.cord_uid(), [])
normal_dates[article.cord_uid()].append(article.metadata()["doi"])
normal_dates[article.cord_uid()].append(len(article.title()))
normal_dates[article.cord_uid()].append(date)
cnt = cnt + 1
if cnt % 1000 == 0:
print(f'{cnt} articles read...')
#this df has 4 columns: docid, DOI, title, publish_date
normal_dates_df = pd.DataFrame([([k] + v) for k, v in normal_dates.items()])
normal_dates_df = normal_dates_df.loc[:, [0, 1, 2, 3]]
normal_dates_df.columns = ['docid', 'DOI', 'title', 'publish_date']
df1 = pd.DataFrame(normal_dates_df)
date_df = df1.sort_values('publish_date').groupby('publish_date')
#date_df_counts has two columns: publish_date, counts
date_df_counts = date_df.size().reset_index(name='counts')
#all dfs below have two columns: publish_date, counts (they are massaged df based on date_df_counts)
#two dfs based on year unit
only_year_filter = date_df_counts['publish_date'].str.len() == 4
with_date_filter = date_df_counts['publish_date'].str.len() > 4
only_year = date_df_counts.loc[only_year_filter].loc[date_df_counts['publish_date'] >= '2003'] #before 2003 are all under 2000
exact_year_total = date_df_counts.groupby(date_df_counts['publish_date'].str[:4])['counts'].agg('sum').reset_index(name='counts')
exact_year_total = exact_year_total.loc[exact_year_total['publish_date'] >= '2003']
#on monthly basis
exact_date = date_df_counts.loc[with_date_filter].groupby(date_df_counts['publish_date'].str[:7])['counts'].agg('sum').reset_index(name='counts')
before_2003 = exact_date.loc[exact_date['publish_date'] <= '2002-12']
between_03_19 = exact_date.loc[exact_date['publish_date'] > '2002-12'].loc[exact_date['publish_date'] <= '2019-12']
after_19 = exact_date.loc[exact_date['publish_date'] >= '2019-12']
#weekly basis after 2019-12
weekly_update_19 = date_df_counts.loc[with_date_filter].loc[date_df_counts['publish_date'] >= '2019-12'].groupby(date_df_counts['publish_date'])['counts'].agg('sum').reset_index(name='counts')
weekly_update_19['publish_date'] = pd.to_datetime(weekly_update_19['publish_date'])
weekly_update_19 = weekly_update_19.groupby(pd.Grouper(key='publish_date', freq='W'))['counts'].agg('sum').reset_index(name='counts')
return only_year, exact_year_total, before_2003, between_03_19, after_19, weekly_update_19
def plot_bars(only_year, exact_year_total, before_2003, between_03_19, after_19, weekly_update_19):
only_year.plot.bar(x='publish_date', y='counts', title='number of publishes only has year')
exact_year_total.plot.bar(x='publish_date', y='counts', title='number of publishes for all in year units')
before_2003.plot.bar(x='publish_date', y='counts', title='publish_date before 2003', figsize=(30, 10), fontsize=6)
between_03_19.plot.bar(x='publish_date', y='counts', title='between_03_19', figsize=(30, 10), fontsize=6)
after_19.plot.bar(x='publish_date', y='counts', title='after_19', figsize=(20, 10), fontsize=8)
graph_weekly_19 = weekly_update_19.loc[weekly_update_19['publish_date'] < '2020-08-09'] # omit after 2020-08-09 to make graph readable
graph_weekly_19.plot.bar(x='publish_date', y='counts', title='after 2019-12 weekly growth', figsize=(20, 10))
plt.savefig('bar_plots.pdf')
print(f'draw 6 bar plots for documents based on their publish_dates and saved into file bar_plots.pdf')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Return bar charts of temporal analysis on CORD-19')
parser.add_argument('--path', type=str, required=True, help='Path to input collection')
args = parser.parse_args()
only_year, exact_year_total, before_2003, between_03_19, after_19, weekly_update_19 = load(args.path)
plot_bars(only_year, exact_year_total, before_2003, between_03_19, after_19, weekly_update_19)
|
import graphene
from django.contrib.auth import get_user_model
from graphene_django import DjangoObjectType
from insta.models import Profile
class ProfileType(DjangoObjectType):
class Meta:
model = Profile
class UserType(DjangoObjectType):
class Meta:
model = get_user_model()
class Query(graphene.ObjectType):
me = graphene.Field(UserType)
user = graphene.Field(UserType, id=graphene.Int(required=True))
users = graphene.List(UserType)
profile = graphene.List(ProfileType)
def resolve_users(self, info):
return get_user_model().objects.all()
def resolve_me(self, info):
user = info.context.user
if user.is_anonymous:
raise Exception("Please Log In!!")
return user
def resolve_user(self, info, id):
user = get_user_model().objects.get(id=id)
if not user:
raise Exception("Sorry, user doesn't exist")
return user
class CreateUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
first_name = graphene.String(required=True)
last_name = graphene.String(required=True)
username = graphene.String(required=True)
email = graphene.String(required=True)
password = graphene.String(required=True)
def mutate(self, info, first_name, last_name, username, email, password):
user = get_user_model()(
first_name=first_name, last_name=last_name, username=username, email=email,
)
user.set_password(password)
user.save()
return CreateUser(user=user)
class Mutation(graphene.ObjectType):
create_user = CreateUser.Field()
|
def OcttoBin(n):
if isinstance(n, str) == True:
num = int(n,8)
binnum = bin(num)[2:]
return binnum
else:
strnum = str(n)
num = int(strnum, 8)
binnum = bin(num)[2:]
return binnum
|
# -*- coding: utf-8 -*-
# Third party
from rest_framework import serializers
class DeviceTokenSerializer(serializers.Serializer):
token = serializers.CharField(label='Token', min_length=32, required=True)
|
'''This is an unsupervised ML algo demonstration
Applying the Flat_KMeans clustering algorithms
on the Titanic dataset
Performed by Sahil Chauhan'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
from sklearn.cluster import KMeans
from sklearn import preprocessing
style.use('ggplot')
df = pd.read_excel('titanic.xls')
df.drop(['body','name'], 1, inplace=True)
#df.convert_objects(convert_numeric=True)
df.fillna(0, inplace=True)
def handle_non_numerical_data(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if df[column].dtype != np.int64 and df[column].dtype != np.float64:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
df[column] = list(map(convert_to_int, df[column]))
return df
df = handle_non_numerical_data(df)
# TO see the list of all the columns
#print(df.columns.values)
# TO tweak the columns to see if there is an affect on the accuracy.
#df.drop(['ticket','fare','boat'], 1, inplace=True)
X = np.array(df.drop(['survived'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['survived'])
boolean = True
counters = 0
''' to set the counters that how many time this'''
''' algorithm will work'''
loop = 50
while boolean:
clf = KMeans(n_clusters=2)
clf.fit(X)
correct = 0
for i in range(len(X)):
predict_me = np.array(X[i].astype(float))
predict_me = predict_me.reshape(-1, len(predict_me))
prediction = clf.predict(predict_me)
if prediction[0] == y[i]:
correct += 1
print(correct/len(X))
counters += 1
if counters == loop:
boolean = False
break
|
NO_ROLE_CODE = ''
TRUSTEE_CODE = '0'
STEWARD_CODE = '2'
TGB_CODE = '100'
TRUST_ANCHOR_CODE = '101'
|
from django.test import TestCase, override_settings
from .models import Trunk, Branch, Leaf
@override_settings(ROOT_URLCONF = 'tests.nested_includes.urls')
class NestedIncludesTestCase(TestCase):
def setUp(self):
self.trunk = Trunk.objects.create(name="Trunk")
self.branch = Branch.objects.create(name="Branch", trunk=self.trunk)
self.leaf = Leaf.objects.create(name="Leaf", branch=self.branch)
def test_trunks_include_leaves(self):
response = self.client.get('/trunks?include=branches.leaves')
self.assertEquals(
len(response.data['data'][0]['relationships']['branches']['data']),
1
)
self.assertEquals(
len(response.data['included']),
2
)
def test_leaves_include_trunk(self):
response = self.client.get('/leaves?include=branch.trunk')
self.assertEquals(
response.data['data'][0]['relationships']['branch']['data']['type'],
'branch'
)
self.assertEquals(
len(response.data['included']),
2
)
def test_invalid_include(self):
response = self.client.get('/leaves?include=foo.bar')
self.assertEquals(response.status_code, 400)
|
import argparse
# Showing both short and long argument options
def get_args():
""""""
parser = argparse.ArgumentParser(
description="A simple argument parser",
epilog="This is where you might put example usage"
)
# required argument
parser.add_argument('-x', '--execute', action="store", required=True,
help='Help text for option X')
# optional arguments
parser.add_argument('-y', '--message1', help='Help text for option Y', default=False)
parser.add_argument('-z', '--message2', help='Help text for option Z', type=int)
print(parser.parse_args())
if __name__ == '__main__':
get_args()
'''
$ arg_demo_3.py
usage: arg_demo_3.py [-h] -x EXECUTE [-y MESSAGE1] [-z MESSAGE2]
arg_demo_3.py: error: the following arguments are required: -x/--execute
$ arg_demo_3.py --execute something
Namespace(execute='something', message1=False, message2=None)
$ arg_demo_3.py --execute something --message1 text1 --message2 text2
usage: arg_demo_3.py [-h] -x EXECUTE [-y MESSAGE1] [-z MESSAGE2]
arg_demo_3.py: error: argument -z/--message2: invalid int value: 'text2'
$ arg_demo_3.py --execute something --message1 text1 --message2 100
Namespace(execute='something', message1='text1', message2=100)
''' |
import random
import string
from robotpt_common_utils import lists
import re
def random_string(length=8):
letters = string.ascii_lowercase
return ''.join(random.sample(letters, length))
def wildcard_search_in_list(pattern, list_, wildcard_symbol='*'):
list_ = lists.make_sure_is_iterable(list_)
idxs = []
for idx in range(len(list_)):
if is_wildcard_match(
pattern, list_[idx],
wildcard_symbol=wildcard_symbol
):
idxs.append(idx)
return [list_[i] for i in idxs]
def is_wildcard_match(pattern, str_, wildcard_symbol='*'):
for special_char in ['^', '$', '.', '+', '?', '{', '}', '/', '\\',
'|', '[', ']', '(', ')', ':', '<', '>', '*']:
if special_char in pattern and special_char is not wildcard_symbol:
raise ValueError("May conflict with REGEX parsing and do weird things")
regex_glob_star = ".*"
pattern = str.replace(pattern, wildcard_symbol, regex_glob_star)
pattern = '^' + pattern + '$'
matches = re.search(pattern, str_)
return matches is not None
|
import numpy as np
from tensorflow.keras.optimizers import Optimizer
from tensorflow.keras import backend as K
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops, control_flow_ops
from .. import utils
class LossDiffAvgMaximization(Optimizer):
"""adapow2.LossDiffAvgMaximization is an adaptive gradient descent optimizer, which adjusting the power of 2 of a tiny step size by checking loss difference average up or down after two gradient descent steps on each batch.
Arguments:
config['tiny_step_size']: float > 0. the tiny unit step size on one parameter.
config['pow2_delta']: float > 0. pow2 of step size increase or decrease delta if loss down.
config['store_history_state']: bool. If True, storing 'pow2, loss, loss_ema' of each batch in config['history_state_path'] directory as json file and plot image.
config['history_state_path']: string. directory path for storing history state.
"""
def __init__(self, config = None, **kwargs):
super(LossDiffAvgMaximization, self).__init__(**kwargs)
default_config = {
'tiny_step_size': 1e-6,
'pow2_delta': 1.,
'loss_diff_ema_beta': 0.98,
'store_history_state': False,
'history_state_path': 'data.LossDiffAvgMaximization',
}
if config is None:
self._config = default_config
else:
self._config = dict(list(default_config.items()) + list(config.items()))
self.stop_training_batch = K.variable(True, dtype='bool')
self.pow2 = K.variable(5., dtype='float32')
def get_config(self):
base_config = super(LossDiffAvgMaximization, self).get_config()
return dict(list(base_config.items()) + list(self._config.items()))
def on_epoch_begin(self, epoch_logs):
self._history_state = None
def on_epoch_end(self, epoch_logs):
if self._config['store_history_state']:
utils.mkdir_p(self._config['history_state_path'])
utils.store_history_state(self._history_state, self._config['history_state_path'] + '/epoch-' + str(epoch_logs['epoch'] + 1))
def on_iteration_end(self, batch_logs = None):
stop = K.get_session().run(self.stop_training_batch)
if self._config['store_history_state'] and stop:
if self._history_state is None:
self._history_state = {}
for k, _ in self.state.items():
self._history_state[k] = []
state = K.get_session().run(self.state)
for k, v in state.items():
self._history_state[k].append(v)
return stop
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
grad_norm = math_ops.sqrt(math_ops.reduce_sum([math_ops.reduce_sum(math_ops.square(g)) for g in grads]))
tiny_step_norm = np.float32(self._config['tiny_step_size'] * np.sqrt(np.sum([np.prod(K.int_shape(p)) for p in params])))
loss_now = K.variable(np.inf, dtype='float32')
loss_cmp = K.variable(np.inf, dtype='float32')
loss_diff = K.variable(np.inf, dtype='float32')
loss_diff_ema_i = K.variable(0., dtype='float32')
loss_diff_ema_unfixed = K.variable(0., dtype='float32')
loss_diff_ema = K.variable(0., dtype='float32')
loss_direction = K.variable(1., dtype='float32')
adjustment_period = 1 / (1 - self._config['loss_diff_ema_beta'])
adjustment_i = K.variable(0., dtype='float32')
loss_diff_ema_prev = K.variable(0., dtype='float32')
code = K.variable(0, dtype='float32')
self.state = {
'pow2': self.pow2,
'loss_now': loss_now,
'loss_diff': loss_diff,
'loss_diff_ema': loss_diff_ema,
'code': code,
}
def on_phase_1():
alpha_grads = math_ops.pow(2., self.pow2) * tiny_step_norm / grad_norm
with ops.control_dependencies(
[p.assign_sub(g * alpha_grads) for p, g in zip(params, grads)] +
[
self.stop_training_batch.assign(False),
loss_cmp.assign(loss_now),
]
):
return K.constant(0)
def on_phase_2():
with ops.control_dependencies([
loss_diff.assign(loss_cmp - loss_now),
loss_diff_ema_i.assign_add(1.),
loss_diff_ema_unfixed.assign(
(loss_cmp - loss_now) * (1 - self._config['loss_diff_ema_beta']) +
loss_diff_ema_unfixed * self._config['loss_diff_ema_beta']
),
adjustment_i.assign(control_flow_ops.cond(
math_ops.greater(adjustment_i, adjustment_period),
lambda: 0.,
lambda: adjustment_i + 1.)),
]):
new_loss_diff_ema = loss_diff_ema_unfixed / (1 - math_ops.pow(self._config['loss_diff_ema_beta'], loss_diff_ema_i))
new_loss_direction = control_flow_ops.cond(
math_ops.logical_and(
math_ops.greater(adjustment_i, adjustment_period),
math_ops.less_equal(new_loss_diff_ema, loss_diff_ema_prev)),
lambda: -loss_direction,
lambda: loss_direction)
with ops.control_dependencies([
self.pow2.assign(control_flow_ops.cond(
math_ops.greater(adjustment_i, adjustment_period),
lambda: self.pow2 + new_loss_direction * self._config['pow2_delta'],
lambda: self.pow2)),
]):
alpha_grads = math_ops.pow(2., self.pow2) * tiny_step_norm / grad_norm
with ops.control_dependencies(
[p.assign_sub(g * alpha_grads) for p, g in zip(params, grads)] +
[
self.stop_training_batch.assign(True),
loss_cmp.assign(np.inf),
loss_diff_ema.assign(new_loss_diff_ema),
loss_direction.assign(new_loss_direction),
loss_diff_ema_prev.assign(control_flow_ops.cond(
math_ops.greater(adjustment_i, adjustment_period),
lambda: new_loss_diff_ema,
lambda: loss_diff_ema_prev)),
]
):
return K.constant(1)
def on_zero_grads():
with ops.control_dependencies([
self.stop_training_batch.assign(True),
loss_cmp.assign(np.inf),
]):
return K.constant(2)
with ops.control_dependencies([
loss_now.assign(loss),
self.pow2.assign(math_ops.maximum(0., self.pow2)),
]):
new_code = control_flow_ops.cond(
math_ops.equal(grad_norm, 0.),
on_zero_grads,
lambda: control_flow_ops.cond(
self.stop_training_batch,
on_phase_1,
on_phase_2))
self.updates = [code.assign(new_code)]
return self.updates
|
from django.conf.urls import url, include
urlpatterns = [
url(r'^account/', include("bmaccounter.api.urls.account", namespace='account-api')),
url(r'^account-group/', include("bmaccounter.api.urls.account_group", namespace='account-group-api')),
]
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Code starts here
data=pd.read_csv(path)
loan_status=data.Loan_Status.value_counts()
loan_status.plot(kind='bar')
# --------------
#Code starts here
property_and_loan=data.groupby(['Property_Area','Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar',stacked="False")
plt.xlabel("Property Area",rotation=45)
plt.ylabel('Loan Status')
# --------------
#Code starts here
education_and_loan=data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar')
plt.xlabel("Education Status",rotation=45)
plt.ylabel('Loan Status')
# --------------
#Code starts here
graduate=data[data['Education'] == 'Graduate']
not_graduate=data[data['Education'] == 'Not Graduate']
graduate['LoanAmount'].plot(kind='density',label='Graduate')
not_graduate['LoanAmount'].plot(kind='density',label='Not Graduate')
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig, (ax_1, ax_2,ax_3) = plt.subplots(3,1,figsize=(20,10))
data.plot(x="ApplicantIncome",y="LoanAmount", kind="scatter",ax=ax_1)
ax_1.set_title('Applicant Income')
data.plot(x="CoapplicantIncome",y="LoanAmount", kind="scatter",ax=ax_2)
ax_2.set_title('Coapplicant Income')
data["TotalIncome"]=data["ApplicantIncome"]+data["CoapplicantIncome"]
data.plot(x="TotalIncome",y="LoanAmount", kind="scatter",ax=ax_3)
ax_3.set_title("Total Income")
|
import sys, os, inspect
import os, sys, inspect, inviwopy
path_to_current_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(path_to_current_folder + "/../")
import envisionpy
import envisionpy.hdf5parser
from envisionpy.network import VisualisationManager
VASP_DIR = path_to_current_folder + "/../unit_testing/resources/TiPO4_bandstructure"
HDF5_FILE = path_to_current_folder + "/../demo_force2.hdf5"
try:
os.remove(HDF5_FILE)
except:
print("Somthing went wrong")
envisionpy.hdf5parser.force_parser(HDF5_FILE, VASP_DIR)
inviwopy.app.network.clear()
# Initialize inviwo network
visManager = VisualisationManager(HDF5_FILE, inviwopy.app)
visManager.start("test")
|
from pyparsing import *
import act
change = Forward().setParseAction(act.change)
amount = Forward().setParseAction(act.amount)
uptoamount = Forward().setParseAction(act.uptoamount)
an = Forward().setParseAction(act.an)
another = Forward().setParseAction(act.another)
alll = Forward().setParseAction(act.alll)
quantity = Forward()
target = Forward().setParseAction(act.target)
quantitytarget = Forward().setParseAction(act.quantitytarget)
this = Forward().setParseAction(act.this)
that = Forward().setParseAction(act.that)
other = Forward().setParseAction(act.other)
each = Forward().setParseAction(act.each)
the = Forward().setParseAction(act.the)
det = Forward().setParseAction(act.det)
globaldet = Forward() |
# Import required libraries
import numpy as np
import pandas as pd
import re
import spacy
import pickle
import time
from datetime import timedelta
pd.set_option('display.max_colwidth', 100)
# Load the largest english language vector collection from Spacy
nlp = spacy.load('en_vectors_web_lg')
# Read train and test data
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
print(train.shape, test.shape)
# Check the distribution of positive and negative target labels in the training dataset
print(train['label'].value_counts(normalize = True))
# Define a list of punctuation marks
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', '#', '*', '+', '\\', '•', '~', '@', '£',
'·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…',
'“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─',
'▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞',
'∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√']
# Code to replace punctuation marks with whitespace
def clean_text(x):
x = str(x)
for punct in puncts:
if punct in x:
x = x.replace(punct, ' ')
return x
# Remove URL's from train and test datasets
train['clean_tweet'] = train['tweet'].apply(lambda x: re.sub(r'http\S+', '', x))
test['clean_tweet'] = test['tweet'].apply(lambda x: re.sub(r'http\S+', '', x))
# Remove user handles @
train['clean_tweet'] = train['clean_tweet'].apply(lambda x: re.sub("@[\w]*", '', x))
test['clean_tweet'] = test['clean_tweet'].apply(lambda x: re.sub("@[\w]*", '', x))
# Remove punctuation marks
train['clean_tweet'] = train['clean_tweet'].apply(lambda x: clean_text(x))
test['clean_tweet'] = test['clean_tweet'].apply(lambda x: clean_text(x))
# Convert text to lowercase
train['clean_tweet'] = train['clean_tweet'].str.lower()
test['clean_tweet'] = test['clean_tweet'].str.lower()
# Remove numbers
train['clean_tweet'] = train['clean_tweet'].str.replace("[0-9]", " ")
test['clean_tweet'] = test['clean_tweet'].str.replace("[0-9]", " ")
# Remove whitespaces
train['clean_tweet'] = train['clean_tweet'].apply(lambda x:' '.join(x.split()))
test['clean_tweet'] = test['clean_tweet'].apply(lambda x: ' '.join(x.split()))
# Function to lemmatize the tokens to their basic forms to normalize the tweet text
# and focus on key words for the classification tasks
def lemmatization(texts):
output = []
for i in texts:
s = [token.lemma_ for token in nlp(i)]
output.append(' '.join(s))
return output
# Record the time to lemmatize the tweets and converting them into vectors using spaCy
start_time = time.monotonic()
# Lemmatise the tokens
train['clean_tweet'] = lemmatization(train['clean_tweet'])
train['clean_tweet'] = train['clean_tweet'].str.replace("-PRON-", "")
test['clean_tweet'] = lemmatization(test['clean_tweet'])
test['clean_tweet'] = test['clean_tweet'].str.replace("-PRON-", "")
print(train.tail(), test.tail())
# Convert cleaned tweets into Spacy word vectors
# The model returns 300-dimensional embeddings
tweets = train['clean_tweet']
word_vec = [nlp(word).vector for word in tweets]
X_tr = np.array(word_vec)
test_tweets = test['clean_tweet']
test_word_vec = [nlp(word).vector for word in test_tweets]
X_te = np.array(test_word_vec)
print(X_tr.shape, X_te.shape)
end_time = time.monotonic()
# Print the time taken to finish the process by spaCy
print(f'Time taken to lemmitize and vectorize 1.6m tweets: {timedelta(seconds=end_time - start_time)}')
# Save Spacy_train_new
pickle_out = open("Spacy_train.pickle","wb")
pickle.dump(X_tr, pickle_out)
pickle_out.close()
# Save Spacy_test_new
pickle_out = open("Spacy_test.pickle","wb")
pickle.dump(X_te, pickle_out)
pickle_out.close()
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import time
from absl import logging
import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.last_log_step = 0
self.steps_before_epoch = 0
self.steps_in_epoch = 0
self.start_time = None
if logdir:
self.summary_writer = tf.summary.create_file_writer(logdir)
else:
self.summary_writer = None
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return self.global_steps / sum(self.epoch_runtime_log)
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
return self.average_steps_per_second * self.batch_size
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
if self.summary_writer:
self.summary_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
if not self.start_time:
self.start_time = time.time()
# Record the timestamp of the first global step
if not self.timestamp_log:
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
self.steps_in_epoch = batch + 1
steps_since_last_log = self.global_steps - self.last_log_step
if steps_since_last_log >= self.log_steps:
now = time.time()
elapsed_time = now - self.start_time
steps_per_second = steps_since_last_log / elapsed_time
examples_per_second = steps_per_second * self.batch_size
self.timestamp_log.append(BatchTimestamp(self.global_steps, now))
logging.info(
"TimeHistory: %.2f examples/second between steps %d and %d",
examples_per_second, self.last_log_step, self.global_steps)
if self.summary_writer:
with self.summary_writer.as_default():
tf.summary.scalar('global_step/sec', steps_per_second,
self.global_steps)
tf.summary.scalar('examples/sec', examples_per_second,
self.global_steps)
self.last_log_step = self.global_steps
self.start_time = None
def on_epoch_end(self, epoch, logs=None):
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard,
steps_per_epoch):
"""Validate profile_steps flag value and return profiler callback."""
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
logging.warning(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch)
class ProfilerCallback(tf.keras.callbacks.Callback):
"""Save profiles in specified step range to log directory."""
def __init__(self, log_dir, start_step, stop_step, steps_per_epoch):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
self.start_epoch = start_step // steps_per_epoch
self.stop_epoch = stop_step // steps_per_epoch
self.start_step_in_epoch = start_step % steps_per_epoch
self.stop_step_in_epoch = stop_step % steps_per_epoch
self.should_start = False
self.should_stop = False
def on_epoch_begin(self, epoch, logs=None):
if epoch == self.start_epoch:
self.should_start = True
if epoch == self.stop_epoch:
self.should_stop = True
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step_in_epoch and self.should_start:
self.should_start = False
profiler.start()
logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step_in_epoch and self.should_stop:
self.should_stop = False
results = profiler.stop()
profiler.save(self.log_dir, results)
logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
def set_session_config(enable_eager=False,
enable_xla=False):
"""Sets the session config."""
if is_v2_0():
set_config_v2(enable_xla=enable_xla)
else:
config = get_config_proto_v1(enable_xla=enable_xla)
if enable_eager:
tf.compat.v1.enable_eager_execution(config=config)
else:
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
def get_config_proto_v1(enable_xla=False):
"""Return config proto according to flag settings, or None to use default."""
config = None
if enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
return config
def set_config_v2(enable_xla=False):
"""Config eager context according to flag values using TF 2.0 API."""
if enable_xla:
tf.config.optimizer.set_jit(True)
def is_v2_0():
"""Returns true if using tf 2.0."""
return tf2.enabled()
def set_gpu_thread_mode_and_count(gpu_thread_mode, num_gpus,
per_gpu_thread_count):
"""Set GPU thread mode and count, and recommend dataset threads count."""
cpu_count = multiprocessing.cpu_count()
logging.info('Logical CPU cores: %s', cpu_count)
# Allocate private thread pool for each GPU to schedule and launch kernels
per_gpu_thread_count = per_gpu_thread_count or 2
os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Limit data preprocessing threadpool to CPU cores minus number of total GPU
# private threads and memory copy threads.
total_gpu_thread_count = per_gpu_thread_count * num_gpus
num_runtime_threads = num_gpus
datasets_num_private_threads = min(
cpu_count - total_gpu_thread_count - num_runtime_threads,
num_gpus * 8)
logging.info('Recommended datasets_num_private_threads: %s',
datasets_num_private_threads)
return datasets_num_private_threads
|
import os
from unittest import TestCase
from helpers.consts import *
from db_methods import db
from .initial_test_data import test_students, test_teachers
class DatabaseMethodsTest(TestCase):
def setUp(self) -> None:
self.db = db
test_db_filename = 'db/unittest.db'
# ensure there is no trash file from previous incorrectly handled tests present
for file in [test_db_filename, test_db_filename + '-shm', test_db_filename + '-wal']:
try:
os.unlink(file)
except FileNotFoundError:
pass
# create shiny new db instance from scratch and connect
self.db.setup(test_db_filename)
self.insert_dummy_users()
def insert_dummy_users(self):
for row in test_students + test_teachers:
real_id = self.db.add_user(row)
row['id'] = real_id
def tearDown(self) -> None:
self.db.disconnect()
os.unlink(self.db.db_file)
def test_users_add_and_fetch_all(self):
students = self.db.fetch_all_users_by_type(USER_TYPE.STUDENT)
teachers = self.db.fetch_all_users_by_type(USER_TYPE.TEACHER)
all_users = self.db.fetch_all_users_by_type()
self.assertEqual(len(students), len(test_students))
self.assertEqual(len(teachers), len(test_teachers))
self.assertEqual(len(all_users), len(students) + len(teachers))
self.assertListEqual(students, test_students)
self.assertListEqual(teachers, test_teachers)
self.assertListEqual(all_users, test_students + test_teachers)
def test_users_get_by(self):
student = test_students[-1]
teacher = test_teachers[-1]
self.assertDictEqual(self.db.get_user_by_id(student['id']), student)
self.assertDictEqual(self.db.get_user_by_token(student['token']), student)
self.assertDictEqual(self.db.get_user_by_chat_id(student['chat_id']), student)
self.assertDictEqual(self.db.get_user_by_id(teacher['id']), teacher)
self.assertDictEqual(self.db.get_user_by_token(teacher['token']), teacher)
self.assertDictEqual(self.db.get_user_by_chat_id(teacher['chat_id']), teacher)
self.assertIsNone(self.db.get_user_by_id(-1))
self.assertIsNone(self.db.get_user_by_token('-1'))
self.assertIsNone(self.db.get_user_by_chat_id(-1))
def test_user_set_chat_id(self):
student1 = test_students[-1]
student2 = test_students[0]
teacher = test_teachers[-1]
self.assertDictEqual(student1, self.db.get_user_by_id(student1['id']))
self.assertDictEqual(student2, self.db.get_user_by_id(student2['id']))
new_chat_id = 12345
self.db.set_user_chat_id(student1['id'], new_chat_id)
self.assertEqual(12345, self.db.get_user_by_id(student1['id'])['chat_id'])
self.assertEqual(student2['chat_id'], self.db.get_user_by_id(student2['id'])['chat_id'])
# Тот же юзер заходит под другим паролем
self.db.set_user_chat_id(student2['id'], new_chat_id)
self.assertIsNone(self.db.get_user_by_id(student1['id'])['chat_id'])
self.assertEqual(new_chat_id, self.db.get_user_by_id(student2['id'])['chat_id'])
# Тот же юзер заходит под другим паролем
self.db.set_user_chat_id(teacher['id'], new_chat_id)
self.assertEqual(new_chat_id, self.db.get_user_by_id(teacher['id'])['chat_id'])
def test_user_set_level(self):
student1 = test_students[-1]
student2 = test_students[0]
new_level1 = 'level1'
self.db.set_user_level(student1['id'], new_level1)
self.assertEqual(self.db.get_user_by_id(student1['id'])['level'], new_level1)
self.assertEqual(self.db.get_user_by_id(student2['id'])['level'], student2['level'])
new_level2 = 'level2'
self.db.set_user_level(student2['id'], new_level2)
self.assertEqual(self.db.get_user_by_id(student1['id'])['level'], new_level1)
self.assertEqual(self.db.get_user_by_id(student2['id'])['level'], new_level2)
def test_webtokens(self):
student1 = self.db.get_user_by_token(test_students[-1]['token'])
student2 = self.db.get_user_by_token(test_students[0]['token'])
token1 = 'token1'
token2 = 'token2'
self.assertIsNone(self.db.get_webtoken_by_user_id(student1['id']))
self.assertIsNone(self.db.get_webtoken_by_user_id(student2['id']))
self.assertIsNone(self.db.get_user_id_by_webtoken(token1))
self.assertIsNone(self.db.get_user_id_by_webtoken(token2))
self.db.add_webtoken(student1['id'], token1)
self.assertEqual(self.db.get_webtoken_by_user_id(student1['id']), token1)
self.assertIsNone(self.db.get_webtoken_by_user_id(student2['id']))
self.assertEqual(self.db.get_user_id_by_webtoken(token1), student1['id'])
self.assertIsNone(self.db.get_user_id_by_webtoken(token2))
self.db.add_webtoken(student2['id'], token2)
self.assertEqual(self.db.get_webtoken_by_user_id(student1['id']), token1)
self.assertEqual(self.db.get_webtoken_by_user_id(student2['id']), token2)
self.assertEqual(self.db.get_user_id_by_webtoken(token1), student1['id'])
self.assertEqual(self.db.get_user_id_by_webtoken(token2), student2['id'])
token2new = 'token2new'
self.db.add_webtoken(student2['id'], token2new)
self.assertEqual(self.db.get_webtoken_by_user_id(student1['id']), token1)
self.assertEqual(self.db.get_webtoken_by_user_id(student2['id']), token2new)
self.assertEqual(self.db.get_user_id_by_webtoken(token1), student1['id'])
self.assertEqual(self.db.get_user_id_by_webtoken(token2new), student2['id'])
# def add_problem(self, data: dict)
# def fetch_all_problems(self)
# def fetch_all_lessons(self)
# def get_last_lesson_num(self)
# def fetch_all_problems_by_lesson(self, level: str, lesson: int)
# def get_problem_by_id(self, id: int)
# def get_problem_by_text_number(self, level: str, lesson: int, prob: int, item: '')
# def fetch_all_states(self)
# def get_state_by_user_id(self, user_id: int)
# def update_state(self, user_id: int, state: int, problem_id: int = 0, last_student_id: int = 0, last_teacher_id: int = 0, oral_problem_id: int = None)
# def update_oral_problem(self, user_id: int, oral_problem_id: int = None)
# def add_result(self, student_id: int, problem_id: int, level: str, lesson: int, teacher_id: int, verdict: int, answer: str, res_type: int = None)
# def check_num_answers(self, student_id: int, problem_id: int)
# def delete_plus(self, student_id: int, problem_id: int, verdict: int)
# def check_student_solved(self, student_id: int, level: str, lesson: int)
# def check_student_sent_written(self, student_id: int, lesson: int)
# def insert_into_written_task_queue(self, student_id: int, problem_id: int, cur_status: int, ts: datetime = None)
# def get_written_tasks_to_check(self, teacher_id)
# def get_written_tasks_count(self)
# def upd_written_task_status(self, student_id: int, problem_id: int, new_status: int, teacher_id: int = None)
# def delete_from_written_task_queue(self, student_id: int, problem_id: int)
# def add_user_to_waitlist(self, student_id: int, problem_id: int)
# def remove_user_from_waitlist(self, student_id: int)
# def get_waitlist_top(self, top_n: int)
# def insert_into_written_task_discussion(self, student_id: int, problem_id: int, teacher_id: int, text: str, attach_path: str, chat_id: int, tg_msg_id: int)
# def fetch_written_task_discussion(self, student_id: int, problem_id: int)
# def add_message_to_log(self, from_bot: bool, tg_msg_id: int, chat_id: int, student_id: int, teacher_id: int, msg_text: str, attach_path: str)
# def calc_last_lesson_stat(self)
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.vault_encryption_key
class CreateRemoteVaultSearchJobParameters(object):
"""Implementation of the 'CreateRemoteVaultSearchJobParameters' model.
Specifies settings required to create a search of a
remote Vault for data that has been archived from other Clusters.
Attributes:
cluster_match_string (string): Filter by specifying a Cluster name
prefix string. Only Clusters with names that start with this
prefix string are returned in the search result. If not set, all
Clusters archiving data to the Vault are returned in the search
results.
encryption_keys (list of VaultEncryptionKey): Array of Encryption
Keys. Specifies an optional list of encryption keys that may be
needed to search and restore data that was archived to a remote
Vault. Archived data cannot be searched or restored without the
corresponding encryption key used by the original Cluster to
archive the data. Encryption keys can be uploaded using the REST
API public/remoteVaults/encryptionKeys operation. If the key is
already uploaded, this field does not need to be specified.
end_time_usecs (long|int): Filter by a end time specified as a Unix
epoch Timestamp (in microseconds). Only Job Runs that completed
before the specified end time are included in the search results.
job_match_string (string): Filter by specifying a Protection Job name
prefix string. Only Protection Jobs with names that start with
this prefix string are returned in the search result. If not set,
all Protection Jobs archiving data to the Vault are returned in
the search results.
search_job_name (string): Specifies the search Job name.
start_time_usecs (long|int): Filter by a start time specified as a
Unix epoch Timestamp (in microseconds). Only Job Runs that started
after the specified time are included in the search results.
vault_id (long|int): Specifies the id of the Vault to search. This id
was assigned by the local Cohesity Cluster when Vault was
registered as an External Target.
"""
# Create a mapping from Model property names to API property names
_names = {
"search_job_name":'searchJobName',
"vault_id":'vaultId',
"cluster_match_string":'clusterMatchString',
"encryption_keys":'encryptionKeys',
"end_time_usecs":'endTimeUsecs',
"job_match_string":'jobMatchString',
"start_time_usecs":'startTimeUsecs'
}
def __init__(self,
search_job_name=None,
vault_id=None,
cluster_match_string=None,
encryption_keys=None,
end_time_usecs=None,
job_match_string=None,
start_time_usecs=None):
"""Constructor for the CreateRemoteVaultSearchJobParameters class"""
# Initialize members of the class
self.cluster_match_string = cluster_match_string
self.encryption_keys = encryption_keys
self.end_time_usecs = end_time_usecs
self.job_match_string = job_match_string
self.search_job_name = search_job_name
self.start_time_usecs = start_time_usecs
self.vault_id = vault_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
search_job_name = dictionary.get('searchJobName')
vault_id = dictionary.get('vaultId')
cluster_match_string = dictionary.get('clusterMatchString')
encryption_keys = None
if dictionary.get('encryptionKeys') != None:
encryption_keys = list()
for structure in dictionary.get('encryptionKeys'):
encryption_keys.append(cohesity_management_sdk.models.vault_encryption_key.VaultEncryptionKey.from_dictionary(structure))
end_time_usecs = dictionary.get('endTimeUsecs')
job_match_string = dictionary.get('jobMatchString')
start_time_usecs = dictionary.get('startTimeUsecs')
# Return an object of this model
return cls(search_job_name,
vault_id,
cluster_match_string,
encryption_keys,
end_time_usecs,
job_match_string,
start_time_usecs)
|
#!/usr/bin/env python
# encoding: utf-8
from setuptools import setup
setup(
name='random-wallpaper',
description='Script to maintain a collection of random wallpapers.',
version='1.2',
author='Michael Killough',
author_email='michaeljkillough@gmail.com',
url='https://github.com/mjkillough/random-wallpaper',
platforms=['linux'],
license=['MIT'],
install_requires=[
'requests',
'pyxdg'
],
py_modules=['random_wallpaper'],
entry_points = {
'console_scripts': [
'random-wallpaper=random_wallpaper:main',
],
}
)
|
from textplot.text import Text
from collections import OrderedDict
def test_term_count_buckets():
"""
term_count_buckets() should map integer counts to the list of terms in the
text that appear that many times.
"""
t = Text('aa bb bb cc cc dd dd dd')
assert t.term_count_buckets() == {
1: ['aa'],
2: ['bb', 'cc'],
3: ['dd']
}
|
from .custom_unet_model import CustomUnetModel
from .satellite_unet_model import SatelliteUnetModel
from .small_unet_model import SmallUnetModel
|
"""
To trace the falcon web framework, install the trace middleware::
import falcon
from ddtrace import tracer
from ddtrace.contrib.falcon import TraceMiddleware
mw = TraceMiddleware(tracer, 'my-falcon-app')
falcon.API(middleware=[mw])
"""
from ..util import require_modules
required_modules = ['falcon']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .middleware import TraceMiddleware
from .patch import patch
__all__ = ['TraceMiddleware', 'patch']
|
import os
import torch
import pickle
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
from skimage.transform import resize
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torchvision.transforms import Compose
import warnings
warnings.filterwarnings("ignore")
#with open('./data/velocity64.pickle','rb') as handle:
# velocities = pickle.load(handle)
class SmokeDataset(Dataset):
"""Contains the dataloader for Smoke Simulation images generated from Mantaflow"""
def __init__(self, data,transform=None):
"""pickle - The pickle file containing the simulated dataset of images"""
# for i in range(velocities.shape[0]):
# velocities[i,:,:,:]=(velocities[i,:,:,:]-velocities[i,:,:,:].min())/(velocities[i,:,:,:].max()-velocities[i,:,:,:].min())
self.velocities = data #np.load(data, allow_pickle=True)
self.transform = transform
def __len__(self):
return self.velocities.shape[0]
def __getitem__(self,index):
# img_name = os.path.join(self.root.dir,self.pickle[idx])
image = self.velocities[index,:,:,:]
if self.transform:
resized_image = self.transform(image)
return image,resized_image
class Normalize(object):
"""Normalise the images"""
def __call__(self, image):
image = (image - np.min(image))/(np.max(image)-np.min(image))
# image = (image[:,:,:,0] - np.min(image[:,:,:,0]))/(np.max(image[:,:,:,0])-np.min(image[:,:,:,0]))
# print(image)
# image = (image[:,:,:,1] - np.min(image[:,:,:,1]))/(np.max(image[:,:,:,1])-np.min(image[:,:,:,1]))
return image
def __repr__(self):
return self.__class__.__name__+'()'
class Resize(object):
"""Resize the image for the generator"""
def __call__(self,image):
image = resize(image,(8,8),anti_aliasing=False)
return image
def __repr__(self):
return self.__class__.__name__+'()'
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Reusable command line interface options for Code commands."""
import click
from aiida.cmdline.params import options, types
from aiida.cmdline.params.options.interactive import InteractiveOption, TemplateInteractiveOption
from aiida.cmdline.params.options.overridable import OverridableOption
def is_on_computer(ctx):
return bool(ctx.params.get('on_computer'))
def is_not_on_computer(ctx):
return bool(not is_on_computer(ctx))
def validate_label_uniqueness(ctx, _, value):
"""Validate the uniqueness of the label of the code.
The exact uniqueness criterion depends on the type of the code, whether it is "local" or "remote". For the former,
the `label` itself should be unique, whereas for the latter it is the full label, i.e., `label@computer.label`.
.. note:: For this to work in the case of the remote code, the computer parameter already needs to have been parsed
In interactive mode, this means that the computer parameter needs to be defined after the label parameter in the
command definition. For non-interactive mode, the parsing order will always be determined by the order the
parameters are specified by the caller and so this validator may get called before the computer is parsed. For
that reason, this validator should also be called in the command itself, to ensure it has both the label and
computer parameter available.
"""
from aiida.common import exceptions
from aiida.orm import load_code
computer = ctx.params.get('computer', None)
on_computer = ctx.params.get('on_computer', None)
if on_computer is False:
try:
load_code(value)
except exceptions.NotExistent:
pass
except exceptions.MultipleObjectsError:
raise click.BadParameter(f'multiple copies of the remote code `{value}` already exist.')
else:
raise click.BadParameter(f'the code `{value}` already exists.')
if computer is not None:
full_label = f'{value}@{computer.label}'
try:
load_code(full_label)
except exceptions.NotExistent:
pass
except exceptions.MultipleObjectsError:
raise click.BadParameter(f'multiple copies of the local code `{full_label}` already exist.')
else:
raise click.BadParameter(f'the code `{full_label}` already exists.')
return value
ON_COMPUTER = OverridableOption(
'--on-computer/--store-in-db',
is_eager=False,
default=True,
cls=InteractiveOption,
prompt='Installed on target computer?',
help='Whether the code is installed on the target computer, or should be copied to the target computer each time '
'from a local path.'
)
REMOTE_ABS_PATH = OverridableOption(
'--remote-abs-path',
prompt='Remote absolute path',
required_fn=is_on_computer,
prompt_fn=is_on_computer,
type=types.AbsolutePathParamType(dir_okay=False),
cls=InteractiveOption,
help='[if --on-computer]: Absolute path to the executable on the target computer.'
)
FOLDER = OverridableOption(
'--code-folder',
prompt='Local directory containing the code',
required_fn=is_not_on_computer,
prompt_fn=is_not_on_computer,
type=click.Path(file_okay=False, exists=True, readable=True),
cls=InteractiveOption,
help='[if --store-in-db]: Absolute path to directory containing the executable and all other files necessary for '
'running it (to be copied to target computer).'
)
REL_PATH = OverridableOption(
'--code-rel-path',
prompt='Relative path of executable inside code folder',
required_fn=is_not_on_computer,
prompt_fn=is_not_on_computer,
type=click.Path(dir_okay=False),
cls=InteractiveOption,
help='[if --store-in-db]: Relative path of the executable inside the code-folder.'
)
USE_DOUBLE_QUOTES = OverridableOption(
'--use-double-quotes/--not-use-double-quotes',
default=False,
cls=InteractiveOption,
prompt='Escape CLI arguments in double quotes',
help='Whether the executable and arguments of the code in the submission script should be escaped with single '
'or double quotes.'
)
LABEL = options.LABEL.clone(
prompt='Label',
callback=validate_label_uniqueness,
cls=InteractiveOption,
help="This label can be used to identify the code (using 'label@computerlabel'), as long as labels are unique per "
'computer.'
)
DESCRIPTION = options.DESCRIPTION.clone(
prompt='Description',
cls=InteractiveOption,
help='A human-readable description of this code, ideally including version and compilation environment.'
)
INPUT_PLUGIN = options.INPUT_PLUGIN.clone(
required=False,
prompt='Default calculation input plugin',
cls=InteractiveOption,
help="Entry point name of the default calculation plugin (as listed in 'verdi plugin list aiida.calculations')."
)
COMPUTER = options.COMPUTER.clone(
prompt='Computer',
cls=InteractiveOption,
required_fn=is_on_computer,
prompt_fn=is_on_computer,
help='Name of the computer, on which the code is installed.'
)
PREPEND_TEXT = OverridableOption(
'--prepend-text',
cls=TemplateInteractiveOption,
prompt='Prepend script',
type=click.STRING,
default='',
help='Bash commands that should be prepended to the executable call in all submit scripts for this code.',
extension='.bash',
header='PREPEND_TEXT: if there is any bash commands that should be prepended to the executable call in all '
'submit scripts for this code, type that between the equal signs below and save the file.',
footer='All lines that start with `#=` will be ignored.'
)
APPEND_TEXT = OverridableOption(
'--append-text',
cls=TemplateInteractiveOption,
prompt='Append script',
type=click.STRING,
default='',
help='Bash commands that should be appended to the executable call in all submit scripts for this code.',
extension='.bash',
header='APPEND_TEXT: if there is any bash commands that should be appended to the executable call in all '
'submit scripts for this code, type that between the equal signs below and save the file.',
footer='All lines that start with `#=` will be ignored.'
)
|
import stripe
from stripe.test.helper import StripeResourceTest
class MetadataTest(StripeResourceTest):
def test_noop_metadata(self):
charge = stripe.Charge(id='ch_foo')
charge.description = 'test'
charge.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_foo',
{
'description': 'test',
},
None
)
def test_unset_metadata(self):
charge = stripe.Charge(id='ch_foo')
charge.metadata = {}
charge.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_foo',
{
'metadata': {},
},
None
)
def test_whole_update(self):
charge = stripe.Charge(id='ch_foo')
charge.metadata = {'whole': 'update'}
charge.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_foo',
{
'metadata': {'whole': 'update'},
},
None
)
def test_individual_delete(self):
charge = stripe.Charge(id='ch_foo')
charge.metadata = {'whole': None}
charge.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/charges/ch_foo',
{
'metadata': {'whole': None},
},
None
)
|
import pandas as pd
import os
import pickle
import collections
import random
import tensorflow as tf
from ampligraph.evaluation import train_test_split_no_unseen
from ampligraph.datasets import load_from_csv
from tqdm import tqdm
filedir = os.path.dirname(__file__)
Input_dir = os.path.join(filedir,'INPUT')
code_desc_filename = os.path.join(Input_dir,"code_desc.csv")
code_relation_filename = os.path.join(Input_dir,"code_relation_code.csv") #columns = ['code_1', 'relation', 'code_2']
#code_desc_filename = "./INPUT/code_desc_100.csv"
#code_relation_filename = "./INPUT/code_relation_code_100.csv" #columns = ['code_1', 'relation', 'code_2']
text_out_filename = os.path.join(Input_dir,"codes_input_full.txt")
text_out_filename_train = os.path.join(Input_dir,"codes_input_train.txt")
text_out_filename_test = os.path.join(Input_dir,"codes_input_test.txt")
desc_out_filename = os.path.join(Input_dir,"desc_input_train.txt")
kg_out_filename = os.path.join(Input_dir,"kg_input_train.txt")
code_index_filename = os.path.join(Input_dir,"codes_vocab.pkl")
rel_index_filename = os.path.join(Input_dir,"rel_vocab.pkl")
node_rel_index_filename = os.path.join(Input_dir,"node_rel_vocab.pkl")
#text_out_filename = "./INPUT/codes_input_train.txt"
code_column = "code"
desc_column = "description"
test_size = .2
def _parse(line):
"""Parse train data."""
cols_types = [[1], [1], [1]]
return tf.decode_csv(line, record_defaults=cols_types, field_delim='\t')
# if not os.path.isfile(text_out_filename):
if True:
df_desc = pd.read_csv(code_desc_filename)
df_desc = df_desc.sample(frac=1).reset_index(drop=True)
df_desc[[desc_column]].to_csv(desc_out_filename, header=False, index=False, sep='\t')
# create input text filename for training
#with open(text_out_filename, "w", encoding='utf-8') as text_file:
# for ind, row in df_desc.iterrows():
# text_file.write(row[desc_column] + "\n")
# text_file.close()
num_codes = df_desc.shape[0]
# save dict of code, index and desc (if required)
code_desc = dict(zip(df_desc[code_column], df_desc[desc_column]))
code_index = dict(zip(df_desc[code_column], range(df_desc.shape[0])))
index_code = dict(zip(range(df_desc.shape[0]), df_desc[code_column]))
df_rel = pd.read_csv(code_relation_filename)
print("df_rel Shape" , df_rel.shape)
# save relations tuple in a dict { key : code_index , value: list of list([code_1, relation, code_2])}
num_rel = 0
rel_index = dict()
#rel_tuple = dict(zip(index_code.keys(), [{'t':[],'r':[]} for _ in range(len(index_code))]))
rel_tuple = dict(zip(index_code.keys(), [[] for _ in range(len(index_code))]))
node_rel_dict = dict()
l_h = []
l_t = []
l_r = []
for ind, row in tqdm(df_desc.iterrows()):
df_desc.loc[ind, code_column] = code_index[row[code_column]]
for ind, row in tqdm(df_rel.iterrows()):
try:
code_1 = code_index[row['code_1']]
code_2 = code_index[row['code_2']]
if row['relation'] in rel_index:
relation = rel_index[row['relation']]
else:
rel_index[row['relation']] = num_rel
relation = rel_index[row['relation']]
num_rel += 1
#rel_tuple[code_1]['t'].append(code_2)
#rel_tuple[code_1]['r'].append(relation)
rel_tuple[code_1].append([code_1, relation, code_2])
if code_1 not in node_rel_dict:
node_rel_dict[code_1] = {}
if relation not in node_rel_dict[code_1]:
node_rel_dict[code_1][relation] = [code_2]
else:
node_rel_dict[code_1][relation].append(code_2)
l_h.append(code_1)
l_t.append(code_2)
l_r.append(relation)
except:
pass
new_rel = pd.DataFrame()
new_rel['code_1'] = l_h
new_rel['code_2'] = l_t
new_rel['relation'] = l_r
df_desc = pd.merge(df_desc, new_rel, how='left', left_on=[code_column], right_on=['code_1'])
df_desc['code_1'] = df_desc.apply(lambda x: str(int(x.code_1)) if pd.notnull(x.code_1) else str(int(x[code_column])), axis=1)
df_desc['code_2'] = df_desc.apply(lambda x: str(int(x.code_2)) if pd.notnull(x.code_2) else str(int(x[code_column])), axis=1)
df_desc['relation'] = df_desc['relation'].apply(lambda x: str(int(x)) if pd.notnull(x) else str(1))
df_desc['code_1'] = df_desc['code_1'].astype(str)
df_desc['code_2'] = df_desc['code_2'].astype(str)
df_desc['relation'] = df_desc['relation'].astype(str)
df_desc = df_desc.dropna()
df_desc[[desc_column, 'code_1', 'code_2', 'relation']].to_csv(text_out_filename, header=False, index=False, sep='\t')
pickle.dump(code_index, open(code_index_filename, 'wb'))
pickle.dump(rel_index, open(rel_index_filename, 'wb'))
pickle.dump(node_rel_dict, open(node_rel_index_filename, 'wb'))
#df_desc = df_desc.drop_duplicates(subset=['code_1']).reset_index(drop=True)
print ("Splitting into train and test for KG validation")
X = load_from_csv(directory_path="",file_name=text_out_filename, sep='\t')
X_train, X_test = train_test_split_no_unseen(X, test_size=int(test_size*X.shape[0]))
X_train = pd.DataFrame(X_train, columns=[desc_column, 'code_1', 'code_2', 'relation'])
X_test = pd.DataFrame(X_test, columns=[desc_column, 'code_1', 'code_2', 'relation'])
X_train[[desc_column, 'code_1', 'code_2', 'relation']].to_csv(text_out_filename_train, header=False, index=False, sep='\t')
X_test[[desc_column, 'code_1', 'code_2', 'relation']].to_csv(text_out_filename_test, header=False, index=False, sep='\t')
new_rel.to_csv(kg_out_filename, sep='\t', index=False, header=False)
# save count of relations for each code in dict { key : code_index , value: #relation with head as code_index }
rel_count = dict()
for key in rel_tuple:
rel_count[key] = len(rel_tuple[key])
#print("rel_count dict = ", rel_count)
#print("___rel_tuple = ", rel_tuple)
print ("Building TF KG dataset")
tf_kg_dataset = tf.data.TextLineDataset(kg_out_filename)
tf_kg_dataset = tf_kg_dataset.map(_parse, num_parallel_calls=4)
tf_kg_dataset = tf_kg_dataset.map(lambda hi, ti, ri:
tf.tuple([tf.cast(hi, tf.int32),
tf.cast(ti, tf.int32),
tf.cast(ri, tf.int32),
tf.cast(random.choice(list(rel_tuple.keys())), tf.int32)]))
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This plugin is useful for building parts that use maven.
The maven build system is commonly used to build Java projects.
The plugin requires a pom.xml in the root of the source tree.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- maven-options:
(list of strings)
flags to pass to the build using the maven semantics for parameters.
"""
import glob
import logging
import os
from urllib.parse import urlparse
from xml.etree import ElementTree
import snapcraft
import snapcraft.common
import snapcraft.plugins.jdk
logger = logging.getLogger(__name__)
class MavenPlugin(snapcraft.plugins.jdk.JdkPlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['maven-options'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string',
},
'default': [],
}
schema['properties']['maven-targets'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string',
},
'default': [''],
}
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.build_packages.append('maven')
def _use_proxy(self):
return any(k in os.environ for k in ('http_proxy', 'https_proxy'))
@classmethod
def get_build_properties(cls):
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
return ['maven-options', 'maven-targets']
def build(self):
super().build()
mvn_cmd = ['mvn', 'package']
if self._use_proxy():
settings_path = os.path.join(self.partdir, 'm2', 'settings.xml')
_create_settings(settings_path)
mvn_cmd += ['-s', settings_path]
self.run(mvn_cmd + self.options.maven_options)
for f in self.options.maven_targets:
src = os.path.join(self.builddir, f, 'target')
jarfiles = glob.glob(os.path.join(src, '*.jar'))
warfiles = glob.glob(os.path.join(src, '*.war'))
types = ('*.tar.gz', '*.zip', '.*.tgz', '*.war', '*.jar')
arfiles = []
for files in types:
arfiles.extend(glob.glob(os.path.join(src, files)))
tarfiles = glob.glob(os.path.join(src, '*.tar.gz'))
if len(arfiles) == 0:
raise RuntimeError("Could not find any "
"built files for part")
if len(jarfiles) > 0 and len(f) == 0:
basedir = 'jar'
elif len(warfiles) > 0 and len(f) == 0:
basedir = 'war'
else:
basedir = 'zip'
targetdir = os.path.join(self.installdir, basedir)
print(targetdir)
os.makedirs(targetdir, exist_ok=True)
for f in arfiles:
base = os.path.basename(f)
os.link(f, os.path.join(targetdir, base))
def _create_settings(settings_path):
settings = ElementTree.Element('settings', attrib={
'xmlns': 'http://maven.apache.org/SETTINGS/1.0.0',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': (
'http://maven.apache.org/SETTINGS/1.0.0 '
'http://maven.apache.org/xsd/settings-1.0.0.xsd'),
})
element = ElementTree.Element('interactiveMode')
element.text = 'false'
settings.append(element)
proxies = ElementTree.Element('proxies')
for protocol in ('http', 'https'):
env_name = '{}_proxy'.format(protocol)
if env_name not in os.environ:
continue
proxy_url = urlparse(os.environ[env_name])
proxy = ElementTree.Element('proxy')
proxy_tags = [
('id', env_name),
('active', 'true'),
('protocol', protocol),
('host', proxy_url.hostname),
('port', str(proxy_url.port)),
]
if proxy_url.username is not None:
proxy_tags.extend([
('username', proxy_url.username),
('password', proxy_url.password),
])
proxy_tags.append(('nonProxyHosts', _get_no_proxy_string()))
for tag, text in proxy_tags:
element = ElementTree.Element(tag)
element.text = text
proxy.append(element)
proxies.append(proxy)
settings.append(proxies)
tree = ElementTree.ElementTree(settings)
os.makedirs(os.path.dirname(settings_path), exist_ok=True)
with open(settings_path, 'w') as f:
tree.write(f, encoding='unicode')
f.write('\n')
def _get_no_proxy_string():
no_proxy = [k.strip() for k in
os.environ.get('no_proxy', 'localhost').split(',')]
return '|'.join(no_proxy)
|
#!/usr/bin/python3
import os
import re
import sys
import urllib.parse
import urllib.request
from pathlib import Path
import xml.etree.ElementTree as ET
import shutil
def fetch(url, target: Path):
if target.exists() and target.stat().st_size > 0:
print(f"Skipped: {url}")
return target
try:
print(f"Downloading: {url}")
with urllib.request.urlopen(url) as rs:
os.makedirs(target.parent, exist_ok=True)
with open(target, "wb") as fp:
shutil.copyfileobj(rs, fp)
return target
except urllib.error.HTTPError as e:
print(f"Failed: {url} ({e})")
return
except KeyboardInterrupt:
if target.exists():
target.unlink()
raise
def getMeetingId(url):
for pattern in (
r"^.*/playback/presentation/2\.0/playback.html\?meetingId=(\S+)$",
r"^.*/playback/presentation/2.3/(\S+)$",
):
m = re.match(pattern, url)
if m:
return m.group(1)
raise ValueError(f"Unsupported presentation URL: {url}")
def download(url, outputPath: Path):
meetingId = getMeetingId(url)
base = urllib.parse.urljoin(url, f"/presentation/{meetingId}/")
def sfetch(name):
return fetch(urllib.parse.urljoin(base, name), outputPath / name)
sfetch("metadata.xml")
sfetch("shapes.svg")
with open(outputPath / "shapes.svg", "rb") as fp:
shapes = ET.parse(fp)
for img in shapes.iterfind(".//{http://www.w3.org/2000/svg}image"):
sfetch(img.get("{http://www.w3.org/1999/xlink}href"))
sfetch("panzooms.xml")
sfetch("cursor.xml")
sfetch("deskshare.xml")
sfetch("presentation_text.json")
sfetch("captions.json")
sfetch("slides_new.xml")
sfetch("video/webcams.webm")
sfetch("deskshare/deskshare.webm")
if __name__ == "__main__":
download(sys.argv[1], Path(sys.argv[2]))
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from tqdm import tqdm
import torch
from Utils.FittingNL3DMM_LossUtils import FittingNL3DMMLossUtils
from NL3DMMRenderer import NL3DMMRenderer
import cv2
from tool_funcs import soft_load_model, convert_loss_dict_2_str, draw_res_img
import pickle as pkl
from os.path import join
from DatasetforFitting3DMM import DatasetforFitting3DMM
from HeadNeRFOptions import BaseOptions
import argparse
class FittingNL3DMM(object):
def __init__(self, img_size, intermediate_size, gpu_id, batch_size, img_dir) -> None:
self.img_size = img_size
self.intermediate_size = intermediate_size
self.batch_size = batch_size
self.img_dir = img_dir
self.device = torch.device("cuda:%d" % gpu_id)
self.opt = BaseOptions()
self.iden_code_dims = self.opt.iden_code_dims
self.expr_code_dims = self.opt.expr_code_dims
self.text_code_dims = self.opt.text_code_dims
self.illu_code_dims = self.opt.illu_code_dims
self.build_dataset()
self.build_tool_funcs()
def build_tool_funcs(self):
self.nl3dmm_render = NL3DMMRenderer(self.intermediate_size, self.opt).to(self.device)
self.nl3dmm_render = soft_load_model(self.nl3dmm_render, "ConfigModels/nl3dmm_net_dict.pth")
self.loss_utils = FittingNL3DMMLossUtils()
def build_dataset(self):
self.data_utils = DatasetforFitting3DMM(self.img_dir, self.img_size, self.intermediate_size)
@staticmethod
def compute_rotation(angles):
"""
Return:
rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
Parameters:
angles -- torch.tensor, size (B, 3), radian
"""
cur_device = angles.device
batch_size = angles.shape[0]
ones = torch.ones([batch_size, 1]).to(cur_device)
zeros = torch.zeros([batch_size, 1]).to(cur_device)
x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:],
rot_x = torch.cat([
ones, zeros, zeros,
zeros, torch.cos(x), -torch.sin(x),
zeros, torch.sin(x), torch.cos(x)
], dim=1).reshape([batch_size, 3, 3])
rot_y = torch.cat([
torch.cos(y), zeros, torch.sin(y),
zeros, ones, zeros,
-torch.sin(y), zeros, torch.cos(y)
], dim=1).reshape([batch_size, 3, 3])
rot_z = torch.cat([
torch.cos(z), -torch.sin(z), zeros,
torch.sin(z), torch.cos(z), zeros,
zeros, zeros, ones
], dim=1).reshape([batch_size, 3, 3])
rot = rot_z @ rot_y @ rot_x
return rot.permute(0, 2, 1)
def opt_batch_data(self, w2c_Rmats, w2c_Tvecs, inmats, imgs, lms, base_names):
batch_size = imgs.size(0)
batch_imgs, gt_lm2ds = imgs.to(self.device), lms.to(self.device)
w2c_Rmats, w2c_Tvecs, w2c_inmats = w2c_Rmats.to(self.device), w2c_Tvecs.to(self.device), inmats.to(self.device)
iden_codes = torch.zeros((batch_size, self.iden_code_dims), dtype=torch.float32, requires_grad=True, device=self.device)
expr_codes = torch.zeros((batch_size, self.expr_code_dims), dtype=torch.float32, requires_grad=True, device=self.device)
text_codes = torch.zeros((batch_size, self.text_code_dims), dtype=torch.float32, requires_grad=True, device=self.device)
illu_codes = torch.zeros((batch_size, self.illu_code_dims), dtype=torch.float32, requires_grad=False, device=self.device)
illu_codes = illu_codes.view(batch_size, 9, 3)
illu_codes[:, 0, :] += 0.8
illu_codes = illu_codes.view(batch_size, 27)
illu_codes.requires_grad = True
c2l_eulur = torch.zeros((batch_size, 3), dtype=torch.float32, requires_grad=True, device=self.device)
c2l_Tvecs = torch.zeros((batch_size, 3), dtype=torch.float32, requires_grad=True, device=self.device)
c2l_Scales = 1.0
init_lr_1 = 0.01
params_group = [
{'params': [c2l_eulur, c2l_Tvecs], 'lr': init_lr_1},
]
optimizer = torch.optim.Adam(params_group, betas=(0.9, 0.999))
iter_num_1 = 50
for iter_ in range(iter_num_1):
with torch.set_grad_enabled(True):
c2l_Rmats = self.compute_rotation(c2l_eulur)
rendered_img, mask_c3b, proj_lm2d, sh_vcs = self.nl3dmm_render(
iden_codes, text_codes, expr_codes, illu_codes,
w2c_Rmats, w2c_Tvecs, w2c_inmats, eval = False,
c2l_Scales = c2l_Scales, c2l_Rmats = c2l_Rmats, c2l_Tvecs = c2l_Tvecs
)
# mask_c3b_backup = mask_c3b.clone()
# masks = masks.expand(-1, -1, -1, 3)
# mask_c3b[masks != 1] = False
pred_and_gt_data_dict = {
"batch_vcs":sh_vcs,
"rendered_imgs":rendered_img,
"gt_imgs":batch_imgs,
"mask_c3d":mask_c3b,
"proj_lm2ds":proj_lm2d,
"gt_lm2ds":gt_lm2ds
}
norm_code_info = {
"iden_codes":iden_codes,
"text_codes":text_codes,
"expr_codes":expr_codes
}
batch_loss_dict = self.loss_utils.calc_total_loss(
cur_illus = illu_codes,
**pred_and_gt_data_dict,
**norm_code_info,
lm_w=100.0
)
optimizer.zero_grad()
batch_loss_dict["total_loss"].backward()
optimizer.step()
# print("Step 1, Iter: %04d |"%iter_, convert_loss_dict_2_str(batch_loss_dict))
# res_img = draw_res_img(rendered_img, batch_imgs, mask_c3b_backup, proj_lm2ds=proj_lm2d, gt_lm2ds=gt_lm2ds, num_per_row=3)
# res_img = res_img[:, :, ::-1]
# cv2.imwrite("./temp_res/opt_imgs_2/opt_image_res%03d.png" % iter_, cv2.resize(res_img, (0,0), fx=0.5, fy=0.5))
init_lr_2 = 0.01
params_group = [
{'params': [c2l_eulur, c2l_Tvecs], 'lr': init_lr_2 * 1.0},
{'params': [iden_codes, text_codes, expr_codes], 'lr': init_lr_2 * 0.5},
{'params': [illu_codes], 'lr': init_lr_2 * 0.5},
]
optimizer = torch.optim.Adam(params_group, betas=(0.9, 0.999))
iter_num_2 = iter_num_1 + 200
for iter_ in range(iter_num_1, iter_num_2):
lm_w = 25.0
with torch.set_grad_enabled(True):
c2l_Rmats = self.compute_rotation(c2l_eulur)
rendered_img, mask_c3b, proj_lm2d, sh_vcs = self.nl3dmm_render(
iden_codes, text_codes, expr_codes, illu_codes,
w2c_Rmats, w2c_Tvecs, w2c_inmats, eval = False,
c2l_Scales = c2l_Scales, c2l_Rmats = c2l_Rmats, c2l_Tvecs = c2l_Tvecs
)
# mask_c3b_backup = mask_c3b.clone()
# masks = masks.expand(-1, -1, -1, 3)
# mask_c3b[masks != 1] = False
pred_and_gt_data_dict = {
"batch_vcs":sh_vcs,
"rendered_imgs":rendered_img,
"gt_imgs":batch_imgs,
"mask_c3d":mask_c3b,
"proj_lm2ds":proj_lm2d,
"gt_lm2ds":gt_lm2ds
}
norm_code_info = {
"iden_codes":iden_codes,
"text_codes":text_codes,
"expr_codes":expr_codes
}
batch_loss_dict = self.loss_utils.calc_total_loss(
cur_illus = illu_codes,
**pred_and_gt_data_dict,
**norm_code_info,
lm_w=lm_w
)
optimizer.zero_grad()
batch_loss_dict["total_loss"].backward()
optimizer.step()
# print("Step 1, Iter: %04d |"%iter_, convert_loss_dict_2_str(batch_loss_dict))
# res_img = draw_res_img(rendered_img, batch_imgs, mask_c3b_backup, proj_lm2ds=proj_lm2d, gt_lm2ds=gt_lm2ds, num_per_row=3)
# res_img = res_img[:, :, ::-1]
# cv2.imwrite("./temp_res/opt_imgs_2/opt_image_res%03d.png" % iter_, cv2.resize(res_img, (0,0), fx=0.5, fy=0.5))
w2c_Tvecs = torch.bmm(w2c_Rmats, c2l_Tvecs.view(-1, 3, 1)).view(-1, 3) + w2c_Tvecs.view(-1, 3)
w2c_Rmats = torch.bmm(w2c_Rmats, c2l_Rmats)
# w2c_Tvecs = bmm_self_define_dim3(w2c_Rmats, c2l_Tvecs.view(-1, 3, 1)).view(-1, 3) + w2c_Tvecs.view(-1, 3)
# w2c_Rmats = bmm_self_define_dim3(w2c_Rmats, c2l_Rmats)
self.save_res(iden_codes, expr_codes, text_codes, illu_codes, w2c_Rmats, w2c_Tvecs, inmats, base_names)
def save_res(self, iden_code, expr_code, text_code, illu_code, w2c_Rmats, w2c_Tvecs, inmats, base_names):
iden_expr_text_illu_code = torch.cat([iden_code, expr_code, text_code, illu_code], dim=-1).detach().cpu()
w2c_Rmats = w2c_Rmats.detach().cpu()
w2c_Tvecs = w2c_Tvecs.detach().cpu()
ori_inmats = inmats.detach().cpu()
for cnt, str_name in enumerate(base_names):
cur_code = iden_expr_text_illu_code[cnt]
cur_w2c_Rmat = w2c_Rmats[cnt]
cur_w2c_Tvec = w2c_Tvecs[cnt]
inmat = ori_inmats[cnt]
inmat[:2] /= self.data_utils.lm_scale
cur_c2w_Rmat = cur_w2c_Rmat.t()
cur_c2w_Tvec = -(cur_c2w_Rmat.mm(cur_w2c_Tvec.view(3, 1)))
cur_c2w_Tvec = cur_c2w_Tvec.view(3)
inv_inmat = torch.eye(3, dtype=torch.float32)
inv_inmat[0, 0] = 1.0 / inmat[0, 0]
inv_inmat[1, 1] = 1.0 / inmat[1, 1]
inv_inmat[0, 2] = - (inv_inmat[0, 0] * inmat[0, 2])
inv_inmat[1, 2] = - (inv_inmat[1, 1] * inmat[1, 2])
# print(inmat)
res = {
"code": cur_code,
"w2c_Rmat":cur_w2c_Rmat,
"w2c_Tvec":cur_w2c_Tvec,
"inmat":inmat,
"c2w_Rmat":cur_c2w_Rmat,
"c2w_Tvec":cur_c2w_Tvec,
"inv_inmat":inv_inmat,
}
save_path = join(self.img_dir, str_name + "_nl3dmm.pkl")
with open(save_path, "wb") as f:
pkl.dump(res, f)
def main_process(self):
total_sample_num = self.data_utils.total_sample_num
loop_bar = tqdm(range(0, total_sample_num, self.batch_size), desc="Fitting 3DMM")
for id_s in loop_bar:
if id_s + self.batch_size < total_sample_num:
idx_list = [x for x in range(id_s, id_s + self.batch_size)]
else:
idx_list = [x for x in range(id_s, total_sample_num)]
temp_data = self.data_utils.load_batch_sample(idx_list)
self.opt_batch_data(**temp_data)
# exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='The code for generating 3DMM parameters and camera parameters.')
# parser.add_argument("--gpu_id", type=int, default=0)
parser.add_argument("--img_size", type=int, required=True, help="the size of the input image.")
parser.add_argument("--intermediate_size", type=int, required=True, help="Before fitting, the input image is resized as [intermediate_size, intermediate_size]")
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--img_dir", type=str, required=True)
args = parser.parse_args()
tt = FittingNL3DMM(img_size=args.img_size, intermediate_size=args.intermediate_size,
gpu_id=0, batch_size=args.batch_size, img_dir=args.img_dir)
tt.main_process()
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from tempfile import TemporaryDirectory
import numpy as np
import pytest
import torch
from scipy import stats
from torch import nn
from mmcv.cnn import (Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit,
PretrainedInit, TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init, constant_init,
initialize, kaiming_init, normal_init, trunc_normal_init,
uniform_init, xavier_init)
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
constant_init(conv_module, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
constant_init(conv_module_no_bias, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
def test_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
xavier_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
xavier_init(conv_module, distribution='uniform')
# TODO: sanity check of weight distribution, e.g. mean, std
with pytest.raises(AssertionError):
xavier_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
xavier_init(conv_module_no_bias)
def test_normal_init():
conv_module = nn.Conv2d(3, 16, 3)
normal_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_trunc_normal_init():
def _random_float(a, b):
return (b - a) * random.random() + a
def _is_trunc_normal(tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
conv_module = nn.Conv2d(3, 16, 3)
mean = _random_float(-3, 3)
std = _random_float(.01, 1)
a = _random_float(mean - 2 * std, mean)
b = _random_float(mean, mean + 2 * std)
trunc_normal_init(conv_module, mean, std, a, b, bias=0.1)
assert _is_trunc_normal(conv_module.weight, mean, std, a, b)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
trunc_normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_uniform_init():
conv_module = nn.Conv2d(3, 16, 3)
uniform_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
uniform_init(conv_module_no_bias)
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
caffe2_xavier_init(conv_module)
def test_bias_init_with_prob():
conv_module = nn.Conv2d(3, 16, 3)
prior_prob = 0.1
normal_init(conv_module, bias=bias_init_with_prob(0.1))
# TODO: sanity check of weight distribution, e.g. mean, std
bias = float(-np.log((1 - prior_prob) / prior_prob))
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit():
"""test ConstantInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = ConstantInit(val=1, bias=2, layer='Conv2d')
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 1.))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
func(model)
res = bias_init_with_prob(0.01)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
# test bias input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias='1')
# test bias_prob type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias_prob='1')
# test layer input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, layer=1)
def test_xavierinit():
"""test XavierInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1))
assert not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1))
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
res = bias_init_with_prob(0.01)
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd')
func(model)
assert not torch.all(model[0].weight == 4.)
assert not torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
# test bias input type
with pytest.raises(TypeError):
func = XavierInit(bias='0.1', layer='Conv2d')
# test layer inpur type
with pytest.raises(TypeError):
func = XavierInit(bias=0.1, layer=1)
def test_normalinit():
"""test Normalinit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = NormalInit(mean=100, std=1e-5, bias=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = NormalInit(
mean=300, std=1e-5, bias_prob=0.01, layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = NormalInit(mean=300, std=1e-5, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_truncnormalinit():
"""test TruncNormalInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = TruncNormalInit(
mean=100, std=1e-5, bias=200, a=0, b=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = TruncNormalInit(
mean=300,
std=1e-5,
a=100,
b=400,
bias_prob=0.01,
layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = TruncNormalInit(
mean=300, std=1e-5, a=100, b=400, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_uniforminit():
""""test UniformInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear'])
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10)
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape,
100.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape,
100.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd')
res = bias_init_with_prob(0.01)
func(model)
assert torch.all(model[0].weight == 100.)
assert torch.all(model[2].weight == 100.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_kaiminginit():
"""test KaimingInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = KaimingInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear'])
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = KaimingInit(bias=0.1, layer='_ConvNd')
func(model)
assert torch.all(model[0].bias == 0.1)
assert torch.all(model[2].bias == 0.1)
func = KaimingInit(a=100, bias=10, layer='_ConvNd')
constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd')
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
def test_caffe2xavierinit():
"""test Caffe2XavierInit."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = Caffe2XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit():
"""test PretrainedInit class."""
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
modelB = FooModule()
funcB = PretrainedInit(checkpoint='modelA.pth')
modelC = nn.Linear(1, 2)
funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.')
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
funcB(modelB)
assert torch.equal(modelB.linear.weight,
torch.full(modelB.linear.weight.shape, 1.))
assert torch.equal(modelB.linear.bias,
torch.full(modelB.linear.bias.shape, 2.))
assert torch.equal(modelB.conv2d.weight,
torch.full(modelB.conv2d.weight.shape, 1.))
assert torch.equal(modelB.conv2d.bias,
torch.full(modelB.conv2d.bias.shape, 2.))
assert torch.equal(modelB.conv2d_2.weight,
torch.full(modelB.conv2d_2.weight.shape, 1.))
assert torch.equal(modelB.conv2d_2.bias,
torch.full(modelB.conv2d_2.bias.shape, 2.))
funcC(modelC)
assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.))
assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.))
def test_initialize():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
foonet = FooModule()
# test layer key
init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
assert init_cfg == dict(
type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
# test init_cfg with list type
init_cfg = [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.))
assert init_cfg == [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
# test layer key and override key
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test override key
init_cfg = dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
initialize(foonet, init_cfg)
assert not torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 5.))
assert not torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 6.))
assert not torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 5.))
assert not torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 6.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 5.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 6.))
assert init_cfg == dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
init_cfg = dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test init_cfg type
with pytest.raises(TypeError):
init_cfg = 'init_cfg'
initialize(foonet, init_cfg)
# test override value type
with pytest.raises(TypeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override='conv')
initialize(foonet, init_cfg)
# test override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_3', val=3, bias=4))
initialize(foonet, init_cfg)
# test list override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=[
dict(type='Constant', name='conv2d', val=3, bias=4),
dict(type='Constant', name='conv2d_3', val=5, bias=6)
])
initialize(foonet, init_cfg)
# test override with args except type key
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
# test override without name
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(type='Constant', val=3, bias=4))
initialize(foonet, init_cfg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.