content stringlengths 5 1.05M |
|---|
from django.db import models
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.search import (
SearchVector,
SearchVectorField,
SearchQuery,
SearchRank,
SearchHeadline,
)
from regcore.models import Part
class SearchIndexQuerySet(models.QuerySet):
def effective(self, date):
return self.filter(part__in=models.Subquery(Part.objects.effective(date.today()).values("id")))
def search(self, query):
return self\
.filter(search_vector=SearchQuery(query))\
.annotate(rank=SearchRank("search_vector", SearchQuery(query)))\
.annotate(
headline=SearchHeadline(
"content",
SearchQuery(query),
start_sel='<span class="search-highlight">',
stop_sel='</span>',
),
)\
.order_by('-rank')\
.prefetch_related('part')
class SearchIndexManager(models.Manager.from_queryset(SearchIndexQuerySet)):
pass
class SearchIndex(models.Model):
type = models.CharField(max_length=32)
label = ArrayField(base_field=models.CharField(max_length=32))
content = models.TextField()
parent = models.JSONField(null=True)
part = models.ForeignKey(Part, on_delete=models.CASCADE)
search_vector = SearchVectorField()
objects = SearchIndexManager()
class Meta:
unique_together = ['label', 'part']
def create_search(part, piece, memo, parent=None, ):
if piece.get("node_type", None) == "SECTION":
si = SearchIndex(
label=piece["label"],
part=part,
parent=piece,
type=piece["node_type"],
content=piece.get("title", piece.get("text", "")),
)
children = piece.pop("children", []) or []
for child in children:
si.content = si.content + child.get("text", "")
memo.append(si)
else:
children = piece.pop("children", []) or []
for child in children:
create_search(part, child, memo, parent=piece)
return memo
def update_search(sender, instance, created, **kwargs):
SearchIndex.objects.filter(part=instance).delete()
contexts = create_search(instance, instance.document, [])
SearchIndex.objects.bulk_create(contexts, ignore_conflicts=True)
SearchIndex.objects.filter(part=instance).update(search_vector=SearchVector('content'))
|
import copy
from typing import List, Optional, Union
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
def cast(
value: Union[tf.Tensor, np.ndarray], dtype: tf.DType, name: Optional[str] = None
) -> tf.Tensor:
if not tf.is_tensor(value):
# TODO(awav): Release TF2.2 resolves this issue
# workaround for https://github.com/tensorflow/tensorflow/issues/35938
return tf.convert_to_tensor(value, dtype, name=name)
return tf.cast(value, dtype, name=name)
def eye(num: int, value: tf.Tensor, dtype: Optional[tf.DType] = None) -> tf.Tensor:
if dtype is not None:
value = cast(value, dtype)
return tf.linalg.diag(tf.fill([num], value))
def leading_transpose(
tensor: tf.Tensor, perm: List[Union[int, type(...)]], leading_dim: int = 0
) -> tf.Tensor:
"""
Transposes tensors with leading dimensions. Leading dimensions in
permutation list represented via ellipsis `...`.
When leading dimensions are found, `transpose` method
considers them as a single grouped element indexed by 0 in `perm` list. So, passing
`perm=[-2, ..., -1]`, you assume that your input tensor has [..., A, B] shape,
and you want to move leading dims between A and B dimensions.
Dimension indices in permutation list can be negative or positive. Valid positive
indices start from 1 up to the tensor rank, viewing leading dimensions `...` as zero
index.
Example:
a = tf.random.normal((1, 2, 3, 4, 5, 6))
# [..., A, B, C],
# where A is 1st element,
# B is 2nd element and
# C is 3rd element in
# permutation list,
# leading dimensions are [1, 2, 3]
# which are 0th element in permutation
# list
b = leading_transpose(a, [3, -3, ..., -2]) # [C, A, ..., B]
sess.run(b).shape
output> (6, 4, 1, 2, 3, 5)
:param tensor: TensorFlow tensor.
:param perm: List of permutation indices.
:returns: TensorFlow tensor.
:raises: ValueError when `...` cannot be found.
"""
perm = copy.copy(perm)
idx = perm.index(...)
perm[idx] = leading_dim
rank = tf.rank(tensor)
perm_tf = perm % rank
leading_dims = tf.range(rank - len(perm) + 1)
perm = tf.concat([perm_tf[:idx], leading_dims, perm_tf[idx + 1 :]], 0)
return tf.transpose(tensor, perm)
def broadcasting_elementwise(op, a, b):
"""
Apply binary operation `op` to every pair in tensors `a` and `b`.
:param op: binary operator on tensors, e.g. tf.add, tf.substract
:param a: tf.Tensor, shape [n_1, ..., n_a]
:param b: tf.Tensor, shape [m_1, ..., m_b]
:return: tf.Tensor, shape [n_1, ..., n_a, m_1, ..., m_b]
"""
flatres = op(tf.reshape(a, [-1, 1]), tf.reshape(b, [1, -1]))
return tf.reshape(flatres, tf.concat([tf.shape(a), tf.shape(b)], 0))
def square_distance(X, X2):
"""
Returns ||X - X2ᵀ||²
Due to the implementation and floating-point imprecision, the
result may actually be very slightly negative for entries very
close to each other.
This function can deal with leading dimensions in X and X2.
In the sample case, where X and X2 are both 2 dimensional,
for example, X is [N, D] and X2 is [M, D], then a tensor of shape
[N, M] is returned. If X is [N1, S1, D] and X2 is [N2, S2, D]
then the output will be [N1, S1, N2, S2].
"""
if X2 is None:
Xs = tf.reduce_sum(tf.square(X), axis=-1, keepdims=True)
dist = -2 * tf.matmul(X, X, transpose_b=True)
dist += Xs + tf.linalg.adjoint(Xs)
return dist
Xs = tf.reduce_sum(tf.square(X), axis=-1)
X2s = tf.reduce_sum(tf.square(X2), axis=-1)
dist = -2 * tf.tensordot(X, X2, [[-1], [-1]])
dist += broadcasting_elementwise(tf.add, Xs, X2s)
return dist
def difference_matrix(X, X2):
"""
Returns (X - X2ᵀ)
This function can deal with leading dimensions in X and X2.
For example, If X has shape [M, D] and X2 has shape [N, D],
the output will have shape [M, N, D]. If X has shape [I, J, M, D]
and X2 has shape [K, L, N, D], the output will have shape
[I, J, M, K, L, N, D].
"""
if X2 is None:
X2 = X
diff = X[..., :, tf.newaxis, :] - X2[..., tf.newaxis, :, :]
return diff
Xshape = tf.shape(X)
X2shape = tf.shape(X2)
X = tf.reshape(X, (-1, Xshape[-1]))
X2 = tf.reshape(X2, (-1, X2shape[-1]))
diff = X[:, tf.newaxis, :] - X2[tf.newaxis, :, :]
diff = tf.reshape(diff, tf.concat((Xshape[:-1], X2shape[:-1], [Xshape[-1]]), 0))
return diff
def pca_reduce(X: tf.Tensor, latent_dim: tf.Tensor) -> tf.Tensor:
"""
A helpful function for linearly reducing the dimensionality of the input
points X to `latent_dim` dimensions.
:param X: data array of size N (number of points) x D (dimensions)
:param latent_dim: Number of latent dimensions Q < D
:return: PCA projection array of size [N, Q].
"""
if latent_dim > X.shape[1]: # pragma: no cover
raise ValueError("Cannot have more latent dimensions than observed")
X_cov = tfp.stats.covariance(X)
evals, evecs = tf.linalg.eigh(X_cov)
W = evecs[:, -latent_dim:]
return (X - tf.reduce_mean(X, axis=0, keepdims=True)) @ W
|
import io
from enum import Enum
from typing import Optional, Union
import numpy as np
import onnxruntime as ort
from PIL import Image
from PIL.Image import Image as PILImage
from pymatting.alpha.estimate_alpha_cf import estimate_alpha_cf
from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml
from pymatting.util.util import stack_images
from scipy.ndimage.morphology import binary_erosion
from .detect import ort_session, predict
class ReturnType(Enum):
BYTES = 0
PILLOW = 1
NDARRAY = 2
def alpha_matting_cutout(
img: Image,
mask: Image,
foreground_threshold: int,
background_threshold: int,
erode_structure_size: int,
) -> Image:
img = np.asarray(img)
mask = np.asarray(mask)
is_foreground = mask > foreground_threshold
is_background = mask < background_threshold
structure = None
if erode_structure_size > 0:
structure = np.ones(
(erode_structure_size, erode_structure_size), dtype=np.uint8
)
is_foreground = binary_erosion(is_foreground, structure=structure)
is_background = binary_erosion(is_background, structure=structure, border_value=1)
trimap = np.full(mask.shape, dtype=np.uint8, fill_value=128)
trimap[is_foreground] = 255
trimap[is_background] = 0
img_normalized = img / 255.0
trimap_normalized = trimap / 255.0
alpha = estimate_alpha_cf(img_normalized, trimap_normalized)
foreground = estimate_foreground_ml(img_normalized, alpha)
cutout = stack_images(foreground, alpha)
cutout = np.clip(cutout * 255, 0, 255).astype(np.uint8)
cutout = Image.fromarray(cutout)
return cutout
def naive_cutout(img: Image, mask: Image) -> Image:
empty = Image.new("RGBA", (img.size), 0)
cutout = Image.composite(img, empty, mask)
return cutout
def remove(
data: Union[bytes, PILImage, np.ndarray],
alpha_matting: bool = False,
alpha_matting_foreground_threshold: int = 240,
alpha_matting_background_threshold: int = 10,
alpha_matting_erode_size: int = 10,
session: Optional[ort.InferenceSession] = None,
only_mask: bool = False,
) -> Union[bytes, PILImage, np.ndarray]:
if isinstance(data, PILImage):
return_type = ReturnType.PILLOW
img = data
elif isinstance(data, bytes):
return_type = ReturnType.BYTES
img = Image.open(io.BytesIO(data))
elif isinstance(data, np.ndarray):
return_type = ReturnType.NDARRAY
img = Image.fromarray(data)
else:
raise ValueError("Input type {} is not supported.".format(type(data)))
if session is None:
session = ort_session("u2net")
mask = predict(session, np.array(img.convert("RGB"))).convert("L")
mask = mask.resize(img.size, Image.LANCZOS)
if only_mask:
cutout = mask
elif alpha_matting:
cutout = alpha_matting_cutout(
img,
mask,
alpha_matting_foreground_threshold,
alpha_matting_background_threshold,
alpha_matting_erode_size,
)
else:
cutout = naive_cutout(img, mask)
if ReturnType.PILLOW == return_type:
return cutout
if ReturnType.NDARRAY == return_type:
return np.asarray(cutout)
bio = io.BytesIO()
cutout.save(bio, "PNG")
bio.seek(0)
return bio.read()
|
# -*- coding:utf-8 -*-
"""
Project: rocekpl_api_client
File: /phonecalls.py
File Created: 2022-02-28, 13:04:13
Author: Wojciech Sobczak (wsobczak@gmail.com)
-----
Last Modified: 2022-02-28, 13:07:41
Modified By: Wojciech Sobczak (wsobczak@gmail.com)
-----
Copyright © 2021 - 2022 by vbert
"""
from .api_client import ApiClient
class PhoneCalls(ApiClient):
def __init__(self, config) -> None:
super().__init__(config)
def list(self) -> str:
self.endpoint = 'apiPhoneCalls'
self.params = {}
self.payload = {}
return super().get()
def get(self, id: int) -> str:
self.endpoint = 'apiPhoneCalls'
self.params = {'id': id}
self.payload = {}
return super().get()
def create(self, payload: dict) -> str:
self.endpoint = 'apiPhoneCalls/create'
self.params = {}
self.payload = payload
return super().create()
def update(self, id: int, payload: dict) -> str:
self.endpoint = 'apiPhoneCalls/update'
self.params = {'id': id}
self.payload = payload
return super().update()
def delete(self, id: int) -> str:
self.endpoint = 'apiPhoneCalls/delete'
self.params = {'id': id}
self.payload = {}
return super().delete()
|
"""
The provided code stub will read in a dictionary containing key/value pairs of name:[marks] for a list of students. Print the average of the marks array for the student name provided, showing 2 places after the decimal.
Example
The query_name is 'beta'. beta's average score is .
Input Format
The first line contains the integer , the number of students' records. The next lines contain the names and marks obtained by a student, each value separated by a space. The final line contains query_name, the name of a student to query.
Constraints
Output Format
Print one line: The average of the marks obtained by the particular student correct to 2 decimal places.
Sample Input 0
3
Krishna 67 68 69
Arjun 70 98 63
Malika 52 56 60
Malika
Sample Output 0
56.00
Explanation 0
Marks for Malika are whose average is
Sample Input 1
2
Harsh 25 26.5 28
Anurag 26 28 30
Harsh
Sample Output 1
26.50
"""
def check_num(n):
if n >= 2 and n <= 10:
return True
def average(lst):
print("{0:.2f}".format((sum(lst) / len(lst))))
if __name__ == '__main__':
n = int(input())
if check_num(n):
student_marks = {}
for i in range(n):
name, *line = input().split()
scores = list(map(float, line))
#scores = [eval(a) for a in scores]
student_marks[name] = scores
query_name = input()
average(student_marks[query_name])
|
#!/usr/bin/python3
if __name__ == "__main__":
import sys
if (len(sys.argv) == 1):
print("0 arguments.")
elif (len(sys.argv) == 2):
print("1 argument:")
else:
print("{:d} arguments:".format(len(sys.argv) - 1))
for argv in range(1, len(sys.argv)):
print("{:d}: {}".format(argv, (sys.argv[argv])))
|
# -*- coding: utf-8 -*-
"""
pypages
~~~~~~~
A module that brings easier pagination. Mainly useful for web
applications.
:copyright: (c) 2014 by Shipeng Feng.
:license: BSD, see LICENSE for more details.
"""
import math
class Paginator(object):
"""Paginator.
Basic usage::
p = Paginator(100)
p.object_num
p.per_page
p.current
p.start
p.range_num
p.end
p.page_num
p.pages
p.has_previous
p.has_next
p.previous
p.next
:param object_num: The total number of items.
:param per_page: The maximum number of items to include on a page,
default 10
:param current: The current page number, default 1
:param start: The start index for your page range, default to be current
page minus half of the page range length.
:param range_num: The maximum page range length, default 10
"""
def __init__(self, object_num, per_page=10, current=1, start=None,
range_num=10):
self._start = self._end = self._current = self._page_num = None
self.object_num = int(object_num)
self.per_page = int(per_page)
self.current = current
self.range_num = int(range_num)
assert self.object_num >= 0, "object_num must be positive or 0"
assert self.per_page > 0, "per_page must be positive"
assert self.range_num > 0, "range_num must be positive"
self.start = start
def _get_current(self):
"""Returns the current page.
"""
return self._current
def _set_current(self, current):
"""Set the current page that does make sense. Any invalid value
passed in will be regarded as 1.
"""
try:
current = int(current)
except:
current = 1
if current < 1:
current = 1
elif current > self.page_num:
current = self.page_num
self._current = current
current = property(_get_current, _set_current)
del _get_current, _set_current
def _get_start(self):
"""Returns the start index.
"""
return self._start
def _set_start(self, start):
"""Set the start index that does make sense.
"""
if not start:
start = self.current - self.range_num / 2
self._start = int(start) if int(start) > 0 else 1
start = property(_get_start, _set_start)
del _get_start, _set_start
@property
def end(self):
"""Returns the end index.
"""
if self._end is None:
self._end = min(self.page_num, self.start + self.range_num - 1)
return self._end
@property
def page_num(self):
"""Returns the total number of pages.
"""
if self._page_num is None:
self._page_num = int(math.ceil(self.object_num /
float(self.per_page)))
return self._page_num
@property
def has_previous(self):
return self.current > 1
@property
def has_next(self):
return self.current < self.page_num
@property
def previous(self):
"""Returns the previous page number.
"""
if self.has_previous:
return self.current - 1
@property
def next(self):
"""Returns the next page number.
"""
if self.has_next:
return self.current + 1
@property
def pages(self):
"""Returns a 1-based range of pages for loop.
"""
return range(self.start, self.end + 1)
|
import requests
import json
import argparse
import unicodecsv as csv
api_version = "v6"
base_url = "https://channelstore.roku.com/api/" + api_version + "/channels/detailsunion"
# ======================================================================================================================
# util functions for producing query param tuples (which can be provided to requests lib)
def qp_country(value="US"):
return "country", value
def qp_language(value="en"):
return "language", value
# ======================================================================================================================
def get_channel_details(chan_id):
"""
Get channel details for a given channel.
:param chan_id: The ID of the channel to fetch details for.
:return: The channel details in JSON format.
"""
url = base_url + "/" + str(chan_id)
query_params = [qp_country(), qp_language()]
resp = requests.get(url, params=query_params)
if resp.status_code != requests.codes.ok:
print("WARNING: failed getting details for channel with id=" + str(chan_id))
return None
return resp.json()
def write_json(data, file_out):
"""
Write JSON to a file.
:param data: In-memory JSON.
:param file_out: The file to output the JSON to.
"""
with open(file_out, "w") as jf:
jf.seek(0)
jf.write(json.dumps(data, sort_keys=False, indent=2))
jf.truncate()
def write_csv(json_result, csv_filepath):
"""
Write a subset of the json resulting from the details crawl in csv format (currently only rating and price).
:param json_result: The json resulting from the details crawl (for a set of channels).
:param csv_filepath: The path to the csv file.
:return: None
"""
header_row = ["channel_id", "rating", "star_rating", "star_rating_count", "price_as_number"]
with open(csv_filepath, "wb") as csv_file:
csv_writer = csv.writer(csv_file, encoding='utf-8')
csv_writer.writerow(header_row)
for chan_id in json_result:
details = json_result[chan_id]["details"]
rating = details["rating"]
star_rating = details["starRating"]
star_rating_cnt = details["starRatingCount"]
price_as_number = details["priceAsNumber"]
csv_writer.writerow([chan_id, rating, star_rating, star_rating_cnt, price_as_number])
if __name__ == '__main__':
ap = argparse.ArgumentParser(description="Crawls the Roku Channel Store for channel details for a set of channels.")
ap.add_argument("channel_ids_file", help="A file that defines the set of channels to fetch channel details for. " +
"The format should be one channel ID (integer) per line. Lines starting with '#' are interpreted " +
"as comments and are ignored.")
ap.add_argument("out_json_file", help="Output JSON file where channel details are to be written.")
ap.add_argument("--csv", help="If a path to a .csv file is provided for this argument, a subset of the full " +
"channel details (the JSON) will be written to this csv file (currently only rating and price).")
args = ap.parse_args()
json_result = {}
with open(args.channel_ids_file, "r") as in_file:
# Remove duplicate channel ids in input.
chan_ids = set()
for line in in_file.readlines():
if line.startswith("#"):
continue
chan_ids.add(int(line))
# Crawl channel details for all unique channel ids.
for chan_id in sorted(chan_ids):
progress = round((len(json_result) / len(chan_ids)) * 100)
print(f'[{progress:3d}%] Fetching channel details for channel id={str(chan_id)}.')
details_json = get_channel_details(chan_id)
if details_json is None:
continue
json_result[chan_id] = details_json
print("Writing .json file...")
write_json(json_result, args.out_json_file)
if args.csv is not None:
print("Writing .csv file...")
write_csv(json_result, args.csv)
|
import torch
import random
import torch.nn.functional as F
import os
import numpy as np
from scipy.spatial.distance import cdist
def get_accuracy(prototypes, embeddings, targets):
"""Compute the accuracy of the prototypical network on the test/query points.
Parameters
----------
prototypes : `torch.FloatTensor` instance
A tensor containing the prototypes for each class. This tensor has shape
`(meta_batch_size, num_classes, embedding_size)`.
embeddings : `torch.FloatTensor` instance
A tensor containing the embeddings of the query points. This tensor has
shape `(meta_batch_size, num_examples, embedding_size)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the query points. This tensor has
shape `(meta_batch_size, num_examples)`.
Returns
-------
accuracy : `torch.FloatTensor` instance
Mean accuracy on the query points.
"""
sq_distances = torch.sum((prototypes.unsqueeze(1)
- embeddings.unsqueeze(2)) ** 2, dim=-1)
prototypes -= prototypes.min(-1, keepdim=True)[0]
prototypes /= prototypes.max(-1, keepdim=True)[0]
embeddings -= embeddings.min(-1, keepdim=True)[0]
embeddings /= embeddings.max(-1, keepdim=True)[0]
norm_distances = torch.sum((prototypes.unsqueeze(1)
- embeddings.unsqueeze(2)) ** 2, dim=-1)
tau = 1.0
norm_distances = norm_distances/tau
softprob = -1.0*F.softmax(norm_distances, dim=-1) * F.log_softmax(norm_distances, dim=-1)
min_dist, predictions = torch.min(sq_distances, dim=-1)
return torch.mean(predictions.eq(targets).float()), softprob, predictions
def gauss_kernel(X, y, sigma):
"""
Gaussian kernel.
Parameters
----------
X: np.ndarray (n,d),
y: np.ndarray (d,),
sigma: float,
variance of the kernel.
Returns
-------
k (n,): kernel between each row of X and y
"""
return np.squeeze(np.exp(-cdist(X, y[np.newaxis, :], metric='sqeuclidean') / (2 * sigma ** 2)))
def apply_grad(model, grad):
'''
assign gradient to model(nn.Module) instance. return the norm of gradient
'''
#print('apply grad.keys()', grad.keys())
grad_norm = 0
for name, p in model.named_parameters():
#if p.requires_grad and 'rho' not in name:
if p.requires_grad:
if p.grad is None:
p.grad = grad[name]
else:
p.grad += grad[name]
grad_norm += torch.sum(grad[name]**2)
grad_norm = grad_norm ** (1/2)
return grad_norm.item()
def apply_grad_ind(model, grad):
'''
assign gradient to model(nn.Module) instance. return the norm of gradient
'''
grad_norm = 0
for p, g in zip(model.parameters(), grad):
if p.grad is None:
p.grad = g
else:
p.grad += g
grad_norm += torch.sum(g**2)
grad_norm = grad_norm ** (1/2)
return grad_norm.item()
def mix_grad_ind(grad_list, weight_list):
'''
calc weighted average of gradient
'''
mixed_grad = []
for g_list in zip(*grad_list):
g_list = torch.stack([weight_list[i] * g_list[i] for i in range(len(weight_list))])
mixed_grad.append(torch.sum(g_list, dim=0))
return mixed_grad
def mix_grad(grad_list, weight_list):
'''
calc weighted average of gradient
'''
'''
mixed_grad = []
for g_list in zip(*grad_list):
g_list = torch.stack([weight_list[i] * g_list[i] for i in range(len(weight_list))])
mixed_grad.append(torch.sum(g_list, dim=0))
'''
mixed_grad = {}
index = 0
#print('g_dict', grad_list[0])
for g_dict in grad_list:
for name, grad in g_dict.items():
if index == 0:
mixed_grad[name] = grad*weight_list[index]
else:
mixed_grad[name] += grad*weight_list[index]
#g_list = torch.stack([weight_list[i] * g_list[i] for i in range(len(weight_list))])
#mixed_grad.append(torch.sum(g_list, dim=0))
index += 1
return mixed_grad
def grad_to_cos(grad_list):
'''
generate cosine similarity from list of gradient
'''
cos = 0.
for g_list in zip(*grad_list):
g_list = torch.stack(g_list)
g_list = g_list.reshape(g_list.shape[0], -1) # (n, p)
g_sum = torch.sum(g_list,dim=0) # (p)
cos += torch.sum(g_list * g_sum.unsqueeze(0), dim=1) # (n)
cos = cos/torch.sum(cos)
return cos
def get_accuracy_ANIL(logits, targets):
"""Compute the accuracy (after adaptation) of MAML on the test/query points
Parameters
----------
logits : `torch.FloatTensor` instance
Outputs/logits of the model on the query points. This tensor has shape
`(num_examples, num_classes)`.
targets : `torch.LongTensor` instance
A tensor containing the targets of the query points. This tensor has
shape `(num_examples,)`.
Returns
-------
accuracy : `torch.FloatTensor` instance
Mean accuracy on the query points
"""
_, predictions = torch.max(logits, dim=-1)
return torch.mean(predictions.eq(targets).float())
def loss_to_ent(loss_list, lamb=1.0, beta=1.0):
'''
generate entropy weight from list of loss (uncertainty in loss function)
'''
loss_list = np.array(loss_list)
ent = 1./(lamb + beta * loss_list)
return ent
def set_seed(seed):
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.initial_seed() # dataloader multi processing
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return None
def set_gpu(x):
x = [str(e) for e in x]
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(x)
print('using gpu:', ','.join(x))
def check_dir(args):
# save path
path = os.path.join(args.result_path, args.alg)
if not os.path.exists(path):
os.makedirs(path)
return None |
# -*- coding: utf-8 -*-
from __future__ import division #对未来版本兼容 只能放第一句
import Adafruit_PCA9685 #舵机控制库 pwm、频率等
import time #time库,用于延时
import cv2
import threading
import socket
import RPi.GPIO as GPIO #树莓派的gpio库
GPIO.setmode(GPIO.BCM) #gpio的排序定义方式
GPIO.setup(16, GPIO.OUT) #16为蜂鸣器io口
GPIO.output(16,True) #ture为不响,详细见电路原理图
#----------------------------通信程序分割线-------------------------------------
#设置socket的模式是tcp/ip
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#这里我们就用到了静态ip
#请根据自身情况自行修改port ,根据电脑ip不同而改
address='192.168.137.102'
port =8888
#绑定地址和端口号
s.bind((address,port))
#设置允许接入的服务器的个数
s.listen(2)
print("请运行电脑端的通信程序,确保通信已建立后程序才会运行!")
sock,addr=s.accept()
#初始化PCA9685和舵机
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(60) #设置pwm频率
time.sleep(0.5) #延时0.5s
pwm.set_pwm(1,0,90) # 参数:(通道(哪一个舵机)、(0)、(脉冲数))1为下面的舵机
time.sleep(0.5)
pwm.set_pwm(2,0,325) #此处为控制Y轴(俯仰),脉冲数越大,舵机越向下 设置以在实验室调好的为准,这个325是以前调的
time.sleep(1)
#初始化、引入分类器
face_cascade = cv2.CascadeClassifier( '123.xml' )
eye_cascade = cv2.CascadeClassifier('eye.xml')
#初始化各个参数,之后用到处详细介绍
x=0
y=0
w=0
h=0
thisError_x=0
lastError_x=0
thisError_y=0
lastError_y=0
Y_P = 425 #舵机开始初试位置设置
X_P = 425 #舵机开始初试位置设置
#控制舵机的详细函数
def xx():
while True:
CON=0
if CON==0: #CON=0 代表是一开机时的初试中位
pwm.set_pwm(1,0,650-X_P+200)
pwm.set_pwm(2,0,650-Y_P+200)
CON=1
else:
pwm.set_pwm(1,0,650-X_P) #正常工作时的PWM获取输出 X为下面的舵机轴
pwm.set_pwm(2,0,650-Y_P) #正常工作时的PWM获取输出 Y为上面的舵机轴
class Stack: #设置视频堆栈
def __init__(self, stack_size):
self.items = []
self.stack_size = stack_size #设置想要的堆栈大小=3
def is_empty(self):
return len(self.items) == 0
def pop(self):
return self.items.pop() #弹出(最新的)项目
def peek(self):
if not self.isEmpty():
return self.items[len(self.items) - 1] #返回去掉最后一帧的项目
def size(self):
return len(self.items) #返回项目的长度大小
def push(self, item):
if self.size() >= self.stack_size: #如果项目的长度大小 >= 堆栈大小,即表示即将溢出时
for i in range(self.size() - self.stack_size + 1):
#(self.size() - self.stack_size + 1) 表示看看要存入的项目比堆栈大多少,既会溢出多少个
self.items.remove(self.items[0])#依次删除堆栈底部(一开始)的值
self.items.append(item)#把最新值加入
def capture_thread(video_path, frame_buffer, lock): #存入视频帧函数
print("capture_thread start")
#cap = cv2.VideoCapture(0) #选择开启哪个摄像头
cap = cv2.VideoCapture('http://192.168.137.102:8080/?action=stream') #选择开启哪个摄像头
cap.set(3, 640) #设置图像规格
cap.set(4, 480)
if not cap.isOpened():
raise IOError("摄像头不能被调用")
while True:
return_value, frame = cap.read() #获取布尔值和视频帧
if return_value is not True:
break
lock.acquire() #上锁保护
frame_buffer.push(frame) #添加最新视频帧
lock.release() #解锁
if cv2.waitKey(1)==27: #如果按esc则退出
break
cap.release()
cv2.destroyAllWindows()
def play_thread(frame_buffer, lock): #显示视频帧函数,对最新弹出的视频帧处理函数
print("detect_thread start")
print("detect_thread frame_buffer size is", frame_buffer.size())
global thisError_x,lastError_x,thisError_y,lastError_y,Y_P,X_P
while True:
try:
t=sock.recv(1024).decode('utf8') #接收socket函数,接收的值存入 t (alarm_flag)
if t =='1':
print('请勿疲劳驾驶')
GPIO.output(16,False) #Flase为响
cv2.waitKey(1)
elif t=='0':
GPIO.output(16,True) #True为不响
else:
GPIO.output(16,True) #True为不响
except Exception:
continue
if frame_buffer.size() > 0:#确保设置了自定义堆栈有大小
#print("detect_thread frame_buffer size is", frame_buffer.size())
lock.acquire() #上锁保护
frame = frame_buffer.pop() #弹出最新的视频帧
lock.release()# 解锁
#cv2.waitKey(100)
# TODO 算法
frame = cv2.flip(frame,0,dst=None) #图像上下反转
frame = cv2.flip(frame,1,dst=None) #图像左右反转
gray= cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)#灰度化图像
#对灰度图进行.detectMultiScale()
faces=face_cascade.detectMultiScale( #创建了一个 faces 的向量元组,已经找到了人脸位置
gray, #具体参数可以搜索 “python中face_cascade.detectMultiScale 参数调节”
scaleFactor=1.3, #越小越精确且计算量越大
minNeighbors=2, #连接几次检测出人脸 才认为是真人脸
minSize=(300, 300) #你认为图像中人脸最小的大小,调大有利于减少误判,但可能把小于此值的人脸排除
)
if len(faces)>0: #如果人脸数大于0
#print('face found!')
#temp = (x,y,w,h)
for(x,y,w,h) in faces: #(x,y)为人脸区域左上角的坐标,w、h为人脸区域的宽、高
cv2.rectangle(frame,(x,y),(x+h,y+w),(0,255,0),2) #画矩形
eyeh=int(h/2) #仅保留上半脸
fac_gray = gray[y: (y+eyeh), x: (x+w)] #将脸部全部灰度化
eye_result = [] #清空眼睛位置坐标
eye = eye_cascade.detectMultiScale(fac_gray, 1.1, 7) #眼睛检测器,具体参数可以搜索 “python中eye_cascade.detectMultiScale 参数调节”
for (ex, ey, ew, eh) in eye:
eye_result.append((x+ex, y+ey, ew, eh)) #(x,y)为眼睛区域左上角的坐标,w、h为眼睛区域的宽、高
for (ex, ey, ew, eh) in eye_result:
cv2.rectangle(frame, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
result=(x,y,w,h) #提取出xywh
x=result[0]+w/2 #即 x=x+w/2 定位到人脸图像的正中央,把x当作人脸中央
y=result[1]+h/2 #即 y=y+h/2 定位到人脸图像的正中央,把y当作人脸中央
thisError_x=x-320 # 计算人脸中央距离整体图像中央的差距(x方向)
if thisError_x <10 and thisError_x >-10: #设置死区,小于此值认为现在就在中间
thisError_x = 0
thisError_y=y-240
if thisError_y <10 and thisError_y >-10:
thisError_y = 0
#if thisError_x > -20 and thisError_x < 20 and thisError_y > -20 and thisError_y < 20:
# facebool = False
#自行对P和D两个值进行调整
pwm_x = thisError_x*7+7*(thisError_x-lastError_x) #PD计算
pwm_y = thisError_y*7+7*(thisError_y-lastError_y)
lastError_x = thisError_x #把现在的误差记录下来,当作下次程序中上次误差
lastError_y = thisError_y
XP=pwm_x/100 #缩小
YP=pwm_y/100
X_P=X_P+int(XP) #注意有没有下划线“_”
Y_P=Y_P+int(YP)
if X_P>670: #限位,防止转太多
X_P=650
if X_P<0:
X_P=0
if Y_P>650:
Y_P=650
if X_P<0:
Y_P=0
cv2.imshow("capture", frame)
s.close()
if __name__ == '__main__':
path = 0 #这个path没用了,在capture_thread函数的cap中从新选择
frame_buffer = Stack(3) #设置堆栈大小
lock = threading.RLock()
t1 = threading.Thread(target=capture_thread, args=(path, frame_buffer, lock)) #capture_thread-存入视频帧函数的线程
t1.start()
t2 = threading.Thread(target=play_thread, args=(frame_buffer, lock))#play_thread-处理最新帧函数的线程
t2.start()
tid=threading.Thread(target=xx) # xx - 舵机pwm输出函数的线程
tid.setDaemon(True)
tid.start()
|
from msrest.serialization import Model
class LineGroup(Model):
"""LineGroup.
:param naptan_id_reference:
:type naptan_id_reference: str
:param station_atco_code:
:type station_atco_code: str
:param line_identifier:
:type line_identifier: list of str
"""
_attribute_map = {
'naptan_id_reference': {'key': 'naptanIdReference', 'type': 'str'},
'station_atco_code': {'key': 'stationActoCode', 'type': 'str'},
'line_identifier': {'key': 'lineIdentifier', 'type': '[str]'}
}
def __init__(self, naptan_id_reference=None, station_atco_code=None, line_identifier=None):
super(LineGroup, self).__init__()
self.naptan_id_reference = naptan_id_reference
self.station_atco_code = station_atco_code
self.line_identifier = line_identifier |
import sys
import boto3
try:
def main():
create_s3bucket(bucket_name, region=None)
except Exception as e:
print(e)
def create_s3bucket(bucket_name, region=None):
"""Create an S3 bucket in a specified region
If a region is not specified, the bucket is created in the S3 default
region (us-east-1).
:param bucket_name: Bucket to create
:param region: String region to create bucket in, e.g., 'us-west-2'
:print: Prints the bucket name that was created
"""
# Sets the region if passed as the second argument
region = sys.argv[2]
# If a region isn't specified, use us-east-1 (Virginia)
if region is None:
s3_bucket=boto3.client(
's3',
)
bucket = s3_bucket.create_bucket(
Bucket=bucket_name,
ACL='private',
)
# If a region is specified, pass that in to the client and CreateBucketConfiguration
else:
s3_bucket=boto3.client(
's3',
region_name=region
)
# Formatting the LocationConstraint key-value pair the API is expecting for CreateBucketConfiguration
location = {'LocationConstraint': region}
bucket = s3_bucket.create_bucket(
Bucket=bucket_name,
ACL='private',
CreateBucketConfiguration=location
)
print(bucket)
bucket_name = sys.argv[1]
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
__version__ = '0.1'
from getpass import getpass
from smtplib import SMTP
from email.header import Header
from email.mime.text import MIMEText
class Postbox(object):
host = None
port = None
user = None
password = None
tls = True
prompt_user = 'username? '
prompt_password = 'password? '
debuglevel = None
dry_run = False
def _update(self, attrs):
for key, value in attrs.items():
setattr(self, key, value)
def __init__(self, **attrs):
self.server = None
self.connect(**attrs)
def connect(self, **attrs):
if attrs:
self._update(attrs)
self.server = SMTP(self.host, self.port)
if self.debuglevel:
self.server.set_debuglevel(self.debuglevel)
if self.tls:
self.server.starttls()
if not self.user and self.prompt_user:
self.user = input(self.prompt_user)
if self.user and not self.password and self.prompt_password:
self.password = getpass(self.prompt_password)
if self.user and self.password:
self.server.login(self.user, self.password)
def send(self, body, **headers_dict):
sendmail_args = {'from': self.user, 'to': ''}
message = MIMEText(body, 'plain', 'utf-8')
message['From'] = Header(self.user, 'utf-8')
for key, value in headers_dict.items():
key = key.rstrip('_').lower().replace('_', '-')
if key in sendmail_args:
sendmail_args[key] = value
if hasattr(value, '__iter__') and not isinstance(value, str):
value = ', '.join(value)
message[key] = Header(value, 'utf-8')
if not self.dry_run:
self.server.sendmail(
sendmail_args['from'],
sendmail_args['to'],
message.as_string()
)
def close(self):
self.server.quit()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
class Gmail(Postbox):
host = 'smtp.gmail.com'
port = '587'
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility for Interacting with SDAccel Tools"""
import subprocess
import os
import tvm._ffi
from . import util
@tvm._ffi.register_func("tvm_callback_sdaccel_compile")
def compile_vhls(kernel_info, device_name):
"""Compile Vivado HLS code for SDAccel.
Parameters
----------
kernel_info : list of (str, str)
List of kernel information. The kernel information is a tuple of
function name and source code.
device_name : str
The name of the target device
Return
------
xclbin : bytearray
The bytearray of the xclbin
"""
tmp_dir = util.tempdir()
sdk = os.environ.get("XILINX_SDX", None)
xocc = os.path.join(sdk, "bin/xocc") if sdk else "xocc"
target = os.environ.get("XCL_TARGET",
"sw_emu" if os.environ.get("XCL_EMULATION_MODE") else "hw")
advanced_params = ["--xp", "param:compiler.preserveHlsOutput=1",
"--xp", "param:compiler.generateExtraRunData=true"]
platform = device_name
if not platform:
platform = os.environ.get("XCL_PLATFORM", os.environ.get("AWS_PLATFORM"))
if platform is None:
raise RuntimeError("No Xilinx device specified.")
tmp_xo_files = []
for funcname, code in kernel_info:
funcname = funcname.value
code = code.value
tmp_cpp = tmp_dir.relpath(funcname + ".cpp")
tmp_xo = tmp_dir.relpath(funcname + ".xo")
with open(tmp_cpp, "wb") as out_file:
out_file.write(bytes(code))
# build xo
args = [xocc, "-c", "-t", target, "--platform", platform, "-o", tmp_xo, "-k", funcname] + \
advanced_params + [tmp_cpp]
returncode = subprocess.call(args)
if returncode != 0:
raise RuntimeError("Compile error")
tmp_xo_files.append(tmp_xo)
# build xclbin
tmp_xclbin = tmp_dir.relpath("output.xclbin")
args = [xocc, "-l", "-t", target, "--platform", platform, "-o", tmp_xclbin] + tmp_xo_files + \
advanced_params
returncode = subprocess.call(args)
if returncode != 0:
raise RuntimeError("Link error")
return bytearray(open(tmp_xclbin, "rb").read())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-17 16:42
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Manager', '0008_auto_20170417_1602'),
]
operations = [
migrations.AlterField(
model_name='catagories',
name='regex',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(blank=True, max_length=50), size=None),
),
]
|
from os import environ
from fastapi import FastAPI
from models import Student, Settings
from config import EXAMPLE_KEY
settings = Settings()
app = FastAPI()
database = []
@app.get('/')
async def read_index():
return {'msg': 'welcome'}
@app.get('/health')
async def read_health():
return {'status': 'ok'}
@app.get('/env')
async def read_env():
return dict(environ)
@app.get("/info")
async def info():
return {
"app_name": settings.app_name,
"admin_email": settings.admin_email,
"items_per_user": settings.items_per_user,
"test_key": EXAMPLE_KEY,
}
@app.post('/seed')
def seed():
database.append({'userid': 'ruanb', 'email': 'ruanb@localhost'})
database.append({'userid': 'jamesd', 'email': 'jamesd@localhost'})
database.append({'userid': 'deant', 'email': 'deant@localhost'})
return {'msg': 'seeded 3 users'}
@app.get('/students')
def get_students():
return database
@app.get('/students/{student_userid}')
def get_student(student_userid: str):
return [student for student in database if student['userid'] == student_userid]
@app.post('/students')
def register_student(student: Student):
database.append(student.dict())
return database[-1]
@app.delete('/students/{student_userid}')
def delete_student(student_userid: str):
global database
database = [student for student in database if not (student['userid'] == student_userid)]
return {'msg': 'deleted {}'.format(student_userid)}
|
from django.db import models
from django.db.models import Q
from enum import Enum
# custom
from Tools.model_util import CModel
# Create your models here.
LOW_QTY_COUNT = 5
class ProductStatus(Enum):
ProductAll='0'
ProductOnsale='1'
ProductOffsale='2'
class Category(CModel):
category_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, blank=True, null=True)
description = models.CharField(max_length=200, blank=True, null=True)
parent_id = models.IntegerField(blank=True, null=True)
level = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'category'
class CategoryProduct(CModel):
product_id = models.IntegerField(blank=True, null=True)
category_id = models.IntegerField(blank=True, null=True)
status = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'category_product'
unique_together = (('product_id', 'category_id'),)
class Product(CModel):
product_id = models.AutoField(primary_key=True)
sku = models.CharField(unique=True, max_length=50, blank=True, null=True)
en_name = models.CharField(max_length=200, blank=True, null=True)
name = models.CharField(max_length=200, blank=True, null=True)
price = models.CharField(max_length=11, blank=True, null=True)
purchase_price = models.CharField(max_length=11, blank=True, null=True)
description = models.CharField(max_length=500, blank=True, null=True)
manufacturer = models.CharField(max_length=30, blank=True, null=True)
images_num = models.IntegerField(blank=True, null=True)
search_key = models.CharField(max_length=300, blank=True, null=True)
status = models.IntegerField(blank=True, null=True)
tax_id = models.CharField(max_length=11, blank=True, null=True)
shop_ids = models.TextField(blank=True, null=True)
cat_ids = models.CharField(max_length=200, blank=True, null=True)
special_price = models.CharField(max_length=11, blank=True, null=True)
special_price_start = models.CharField(max_length=32, blank=True, null=True)
special_price_end = models.CharField(max_length=32, blank=True, null=True)
total_sell = models.IntegerField()
create_at = models.CharField(max_length=30, blank=True, null=True)
quality_date = models.CharField(max_length=30, blank=True, null=True)
show_index = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'product'
@classmethod
def get_lowcount_outofdate_products(cls):
onsale_products = Product.objects.filter(status=ProductStatus.ProductOnsale.value)
products = []
lowcount_products = [product for product in onsale_products if ProductStock.if_low(product_id=product.product_id)]
products.extend(lowcount_products)
return products
class ProductComment(CModel):
product_id = models.IntegerField(blank=True, null=True)
order_id = models.IntegerField(blank=True, null=True)
customer_id = models.IntegerField(blank=True, null=True)
content = models.TextField(blank=True, null=True)
create_at = models.CharField(max_length=30, blank=True, null=True)
class Meta:
managed = False
db_table = 'product_comment'
class ProductDetail(CModel):
product_id = models.IntegerField(blank=True, null=True)
detail = models.TextField(blank=True, null=True)
class Meta:
managed = False
db_table = 'product_detail'
class ProductOrder(CModel):
product_id = models.IntegerField(blank=True, null=True)
order_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'product_order'
class ProductStock(CModel):
shop_id = models.IntegerField(blank=True, null=True)
product_id = models.IntegerField(blank=True, null=True)
qty = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'product_stock'
unique_together = (('shop_id', 'product_id'),)
@classmethod
def if_low(cls, product_id):
result = False
try:
product_list = ProductStock.objects.get(product_id=product_id)
product = product_list[0]
result = (product and int(product.qty) < LOW_QTY_COUNT)
except Exception as e:
result = True
return result
class ProductTierPrice(CModel):
product_id = models.IntegerField(blank=True, null=True)
tier_price = models.CharField(max_length=30, blank=True, null=True)
tier_count = models.IntegerField(blank=True, null=True)
customer_group_id = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'product_tier_price'
unique_together = (('product_id', 'customer_group_id'),) |
# Generated by Django 3.2.9 on 2021-12-06 20:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0020_file_facet_values'),
]
operations = [
migrations.CreateModel(
name='CellLine',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line_name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='TissueType',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tissue_type', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='experimentdatafile',
name='cell_lines',
field=models.ManyToManyField(related_name='experiment_data', to='search.CellLine'),
),
migrations.AddField(
model_name='experimentdatafile',
name='tissue_types',
field=models.ManyToManyField(related_name='experiment_data', to='search.TissueType'),
),
]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from pooling.legacy_sort import sort_p2x2, sort_p3x3
def sort_p(x):
_, pool_height, pool_width, channels, elems = x.get_shape().as_list()
x = tf.reshape(x, [-1, elems]) # Reshape tensor
x = tf.contrib.framework.sort (x, axis=-1,
direction='DESCENDING', name=None)
# Reshape
x = tf.reshape(x, [-1, pool_height, pool_width, channels, elems]) # Reshape tensor
return x
def tf_repeat(tensor, repeats):
"""
Args:
input: A Tensor. 1-D or higher.
repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input
Returns:
A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats
"""
with tf.variable_scope("repeat"):
expanded_tensor = tf.expand_dims(tensor, -1)
multiples = [1] + repeats
tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)
repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)
return repeated_tesnor
def ow_pooling(x, weights, padding='SAME', strides=(2, 2), pool_size = (2, 2), norm='None', sort=True):
_, height, width, channels = x.get_shape().as_list()
pad_bottom = pool_size[0] * height%pool_size[0]
pad_right = pool_size[1] * width%pool_size[1]
if(padding=='SAME'): # Complete size to pad 'SAME'
paddings = tf.constant([[0, 0], [0, pad_bottom], [0, pad_right], [0, 0]])
x = tf.pad(x, paddings, "CONSTANT")
# Extract pooling regions
stride = [1, strides[0], strides[1], 1]
ksize = [1, pool_size[0], pool_size[1], 1]
x = tf.extract_image_patches(x, ksizes = ksize, strides = stride,
rates = [1, 1, 1, 1], padding='SAME')
_, pool_height, pool_width, elems = x.get_shape().as_list()
# Extract pooling regions for each channel
elems = int(elems / channels)
x = tf.reshape(x, [-1, pool_height, pool_width, elems, channels]) # Reshape tensor
x = tf.transpose(x,perm = [0, 1, 2, 4, 3])
# Sort values for pooling
if sort:
if ((pool_size[0] == 2) and (pool_size[1] == 2)):
x = sort_p2x2(x)
if ((pool_size[0] == 3) and (pool_size[1] == 3)):
x = sort_p3x3(x)
else:
x = sort_p(x)
if norm == 'w_norm':
assign_op = weights.assign(tf.div(weights,tf.reduce_sum(tf.abs(weights))))
with tf.control_dependencies([assign_op]):
x = weights * x
elif norm == 'w_norm_p':
assign_op = weights.assign(tf.div(tf.maximum(weights, 0.0001),
tf.reduce_sum(tf.maximum(weights, 0.0001))))
with tf.control_dependencies([assign_op]):
x = weights * x
elif norm == 'w2_norm':
assign_op = weights.assign(
tf.div(weights,
tf.transpose(tf_repeat([tf.reduce_sum(tf.abs(weights),1)],
[tf.shape(weights)[1],1])))
)
with tf.control_dependencies([assign_op]):
x = weights * x
elif norm == 'w2_norm_p':
assign_op = weights.assign(
tf.div(tf.maximum(weights, 0.0001),
tf.transpose(tf_repeat([tf.reduce_sum(tf.maximum(weights, 0.0001),1)],
[tf.shape(weights)[1],1])))
)
with tf.control_dependencies([assign_op]):
x = weights * x
else:
x = weights * x
x = tf.reduce_sum(x,4) #Reduce de 4th dimension
return x
|
#!/usr/bin/env python3
import sys, subprocess
# "dm -l -verbose GesuProject.dme"
result = subprocess.run(['dm', '-l', '-verbose', 'GesuProject.dme'], stdout=subprocess.PIPE)
result.stdout.decode('utf-8')
sys.exit(0)
|
"""
Test writing python dictionaries to csv files
"""
import csv
import itertools
#test dictionary
testD1 = {'d1key1':1111, 'd1key2':2222, 'd1key3':3333}
#test dictionary with dictionary
testD2 = {'d2key1':1111, 'd2key2':2222, 'd2dict2':{'d2key3':3333, 'd2key4':4444}}
#test dictionary lists
testD3 = [ {'d3key1':1111, 'd3key2':1222, 'd3key3':1333},
{'d3key1':2111, 'd3key2':2222, 'd3key3':2333},
{'d3key1':3111, 'd3key2':3222, 'd3key3':3333}
]
testD4 = [ {'d4key1':1111, 'd4key2':1222, 'd4dict2':{'d4key3':1333, 'd4key4':1444}},
{'d4key1':1111, 'd4key2':1222, 'd4dict2':{'d4key3':1333, 'd4key4':1444}},
{'d4key1':1111, 'd4key2':1222, 'd4dict2':{'d4key3':1333, 'd4key4':1444}}
]
print(testD1)
print(testD2)
print(testD3)
print(testD4)
#Tests work at this point
#myKeys = list(testD1)#works for the single examples
myKeys = ['d3key1','d3key2', 'd3key3', 'd3key4']
with open('dict-To-CSVtest.csv', 'w', newline='') as csvfile:
#fieldnames = myKeys
writer = csv.DictWriter(csvfile,fieldnames=myKeys,extrasaction='ignore')
writer.writeheader()
#writer.writerows(testD3)
print("finished")
|
import tensorflow as tf
import tensorflow_probability as tfp
import experiments.models.model_factory as model_factory
import experiments.models.unitary_rnn as urnn
def get_unitary_matrix(vector):
triangular_matrix = tfp.math.fill_triangular(vector)
skew_hermitian_matrix = triangular_matrix - tf.linalg.adjoint(triangular_matrix)
unitary_matrix = tf.linalg.expm(skew_hermitian_matrix)
return unitary_matrix
@tf.keras.utils.register_keras_serializable()
class MatrixExponentialUnitaryRNN(tf.keras.layers.AbstractRNNCell):
def __init__(self, state_size, output_size, capacity_measure=1, use_fft=False, trainable_initial_state=False, **kwargs):
super().__init__(**kwargs)
self.state_size_value = state_size
self.output_size_value = output_size
assert 0 <= capacity_measure <= 1
self.full_capacity = self.state_size * (self.state_size + 1) // 2
self.capacity = int(capacity_measure * self.full_capacity)
self.remaining_capacity = self.full_capacity - self.capacity
self.use_fft = use_fft
self.trainable_initial_state = trainable_initial_state
self.real_state_vector = self.add_weight('real_state_vector', (self.capacity,), tf.float32, tf.keras.initializers.Constant())
self.imag_state_vector = self.add_weight('imag_state_vector', (self.capacity,), tf.float32, tf.keras.initializers.Constant())
self.real_initial_state = self.add_weight('real_initial_state', (self.state_size,), tf.float32, tf.keras.initializers.Constant(), trainable=self.trainable_initial_state)
self.imag_initial_state = self.add_weight('imag_initial_state', (self.state_size,), tf.float32, tf.keras.initializers.Constant(), trainable=self.trainable_initial_state)
self.bias = self.add_weight('bias', (self.state_size,), tf.float32, tf.keras.initializers.Constant())
self.output_layer = tf.keras.layers.Dense(self.output_size)
self.real_input_matrix = None
self.imag_input_matrix = None
@property
def state_size(self):
return self.state_size_value
@property
def output_size(self):
return self.output_size_value
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return tf.repeat(tf.complex(self.real_initial_state, self.imag_initial_state)[tf.newaxis, ...], batch_size, 0)
def build(self, input_shape):
inputs_size = model_factory.get_concat_input_shape(input_shape)
factor = 2 if self.use_fft else 1
self.real_input_matrix = self.add_weight('real_input_matrix', (self.state_size, factor * inputs_size), tf.float32, tf.keras.initializers.GlorotUniform())
self.imag_input_matrix = self.add_weight('imag_input_matrix', (self.state_size, factor * inputs_size), tf.float32, tf.keras.initializers.GlorotUniform())
def call(self, inputs, states):
inputs = model_factory.get_concat_inputs(inputs)
state_matrix = get_unitary_matrix(tf.concat((tf.complex(self.real_state_vector, self.imag_state_vector), tf.zeros((self.remaining_capacity,), tf.complex64)), -1))
input_matrix = tf.complex(self.real_input_matrix, self.imag_input_matrix)
time_domain_inputs = tf.cast(inputs, tf.complex64)
if self.use_fft:
frequency_domain_inputs = tf.signal.fft(time_domain_inputs)
augmented_inputs = tf.concat((time_domain_inputs, frequency_domain_inputs), -1)
else:
augmented_inputs = time_domain_inputs
input_parts = tf.matmul(input_matrix, augmented_inputs[..., tf.newaxis])
state_parts = tf.matmul(state_matrix, states[0][..., tf.newaxis])
next_states = tf.squeeze(urnn.modrelu(state_parts + input_parts, self.bias[..., tf.newaxis]), -1)
outputs = self.output_layer(tf.concat((tf.math.real(next_states), tf.math.imag(next_states)), -1))
return outputs, (next_states,)
def get_config(self):
config = super().get_config().copy()
config.update({
'state_size': self.state_size,
'output_size': self.output_size,
'use_fft': self.use_fft,
'trainable_initial_state': self.trainable_initial_state
})
return config
|
from hippy.builtin import wrap
from rpython.rlib.rstring import StringBuilder
@wrap(['interp', str])
def escapeshellarg(interp, arg):
s = StringBuilder(len(arg) + 2)
s.append("'")
for c in arg:
if c == "'":
s.append("'")
s.append("\\")
s.append("'")
s.append("'")
else:
s.append(c)
s.append("'")
return interp.space.wrap(s.build())
@wrap(['interp', str])
def passthru(interp, cmd):
interp.warn("passthru not implemented")
return interp.space.w_False
|
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="timeseries_generator",
description="Library for generating time series data",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
# TODO: Once we use apis to import public data, the `package_data` is no longer required.
package_data={
"timeseries_generator": ["resources/public_data/*.csv"]
},
version="0.1.0",
url='https://github.com/Nike-Inc/ts-generator',
author='Zhe Sun, Jaap Langemeijer',
author_email='zhe.sun@nike.com',
install_requires=[
"pandas==1.2.0",
"workalendar==15.0.1",
"matplotlib==3.3.3"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent"
],
license='Apache License, v2.0',
python_requires='>=3.6',
)
|
from low_rank_data import XY, XY_incomplete, missing_mask
from common import reconstruction_error
from fancyimpute import IterativeSVD
def test_iterative_svd_with_low_rank_random_matrix():
solver = IterativeSVD(rank=3)
XY_completed = solver.fit_transform(XY_incomplete)
_, missing_mae = reconstruction_error(
XY,
XY_completed,
missing_mask,
name="IterativeSVD")
assert missing_mae < 0.1, "Error too high!"
if __name__ == "__main__":
test_iterative_svd_with_low_rank_random_matrix()
|
"""
Define various propagators for the PDP framework.
"""
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file
# in the project root for full license information.
import torch
import torch.nn as nn
import torch.nn.functional as F
from pdp.nn import util
# from transformer.Models import Transformer
# from transformer.Optim import ScheduledOptim
###############################################################
### The Propagator Classes
###############################################################
class NeuralMessagePasser(nn.Module):
"Implements the neural propagator."
def __init__(self, device, edge_dimension, decimator_dimension, meta_data_dimension, hidden_dimension, mem_hidden_dimension,
mem_agg_hidden_dimension, agg_hidden_dimension, dropout):
super(NeuralMessagePasser, self).__init__()
self._device = device
self._module_list = nn.ModuleList()
self._drop_out = dropout
self.n_layers = 6
self.n_head = 8
self._variable_aggregator = util.MessageAggregator(device, decimator_dimension + edge_dimension + meta_data_dimension,
hidden_dimension, mem_hidden_dimension,
mem_agg_hidden_dimension, agg_hidden_dimension, edge_dimension, include_self_message=False)
self._function_aggregator = util.MessageAggregator(device, decimator_dimension + edge_dimension + meta_data_dimension,
hidden_dimension, mem_hidden_dimension,
mem_agg_hidden_dimension, agg_hidden_dimension, edge_dimension, include_self_message=False)
self._module_list.append(self._variable_aggregator)
self._module_list.append(self._function_aggregator)
self._hidden_dimension = hidden_dimension
self._mem_hidden_dimension = mem_hidden_dimension
self._agg_hidden_dimension = agg_hidden_dimension
self._mem_agg_hidden_dimension = mem_agg_hidden_dimension
def forward(self, init_state, decimator_state, sat_problem, is_training, active_mask=None):
loss = None
variable_mask, variable_mask_transpose, function_mask, function_mask_transpose = sat_problem._graph_mask_tuple
b_variable_mask, _, _, _ = sat_problem._batch_mask_tuple
if active_mask is not None:
mask = torch.mm(b_variable_mask, active_mask.float())
mask = torch.mm(variable_mask_transpose, mask)
else:
edge_num = init_state[0].size(0)
mask = torch.ones(edge_num, 1, device=self._device)
if sat_problem._meta_data is not None:
graph_feat = torch.mm(b_variable_mask, sat_problem._meta_data)
graph_feat = torch.mm(variable_mask_transpose, graph_feat)
if len(decimator_state) == 3:
decimator_variable_state, decimator_function_state, edge_mask = decimator_state
else:
decimator_variable_state, decimator_function_state = decimator_state
edge_mask = sat_problem._edge_mask
# variable_state, function_state, var_permutation, func_permutation = init_state
# transformer = Transformer(
# n_src_vocab = variable_state.shape[0],
# len_max_seq = function_state.shape[0],
# d_k=self._hidden_dimension,
# d_v=self._hidden_dimension,
# d_model=self._hidden_dimension,
# d_word_vec=self._hidden_dimension,
# d_inner=self._hidden_dimension,
# n_layers=self.n_layers,
# n_head=self.n_head,
# dropout=self._dropout).to(self._devicedevice)
variable_state, function_state = init_state
## variables --> functions
decimator_variable_state = torch.cat((decimator_variable_state, sat_problem._edge_feature), 1)
if sat_problem._meta_data is not None:
decimator_variable_state = torch.cat((decimator_variable_state, graph_feat), 1)
function_state, func_loss = self._variable_aggregator(
decimator_variable_state, sat_problem._edge_feature, variable_mask, variable_mask_transpose, edge_mask)
function_state = mask * function_state + (1 - mask) * function_state
function_state = F.dropout(function_state, p=self._drop_out, training=is_training)
## functions --> variables
decimator_function_state = torch.cat((decimator_function_state, sat_problem._edge_feature), 1)
if sat_problem._meta_data is not None:
decimator_function_state = torch.cat((decimator_function_state, graph_feat), 1)
variable_state, variable_loss = self._function_aggregator(
decimator_function_state, sat_problem._edge_feature, function_mask, function_mask_transpose, edge_mask)
variable_state = mask * variable_state + (1 - mask) * variable_state
variable_state = F.dropout(variable_state, p=self._drop_out, training=is_training)
del mask
if func_loss is not None and variable_loss is not None:
loss = func_loss + variable_loss
# return (variable_state, function_state), loss, (var_permutation, func_permutation)
return (variable_state, function_state)
def get_init_state(self, graph_map, batch_variable_map, batch_function_map, edge_feature, graph_feat, randomized, batch_replication):
edge_num = graph_map.size(1) * batch_replication
variable_num = batch_variable_map.size()[0]
# function_num = batch_function_map.size()[0]
if randomized:
variable_state = 2.0*torch.rand(edge_num, self._hidden_dimension, dtype=torch.float32, device=self._device) - 1.0
function_state = 2.0*torch.rand(edge_num, self._hidden_dimension, dtype=torch.float32, device=self._device) - 1.0
var_permutation = 2.0*torch.rand(variable_num, self._hidden_dimension, dtype=torch.float32, device=self._device) - 1.0
func_permutation = 2.0 * torch.rand(variable_num, self._hidden_dimension, dtype = torch.float32,
device = self._device) - 1.0
else:
variable_state = torch.zeros(edge_num, self._hidden_dimension, dtype=torch.float32, device=self._device)
function_state = torch.zeros(edge_num, self._hidden_dimension, dtype=torch.float32, device=self._device)
var_permutation = torch.zeros(variable_num, self._hidden_dimension, dtype=torch.float32, device=self._device)
func_permutation = torch.zeros(variable_num, self._hidden_dimension, dtype=torch.float32, device=self._device)
# return (variable_state, function_state, var_permutation, func_permutation)
return (variable_state, function_state)
###############################################################
class SurveyPropagator(nn.Module):
"Implements the Survey Propagator (SP)."
def __init__(self, device, decimator_dimension, include_adaptors=False, pi=0.0):
super(SurveyPropagator, self).__init__()
self._device = device
self._function_message_dim = 3
self._variable_message_dim = 2
self._include_adaptors = include_adaptors
self._eps = torch.tensor([1e-40], device=self._device)
self._max_logit = torch.tensor([30.0], device=self._device)
self._pi = torch.tensor([pi], dtype=torch.float32, device=device)
if self._include_adaptors:
self._variable_input_projector = nn.Linear(decimator_dimension, self._variable_message_dim, bias=False)
self._function_input_projector = nn.Linear(decimator_dimension, 1, bias=False)
self._module_list = nn.ModuleList([self._variable_input_projector, self._function_input_projector])
def safe_log(self, x):
return torch.max(x, self._eps).log()
def safe_exp(self, x):
return torch.min(x, self._max_logit).exp()
def forward(self, init_state, decimator_state, sat_problem, is_training, active_mask=None):
variable_mask, variable_mask_transpose, function_mask, function_mask_transpose = sat_problem._graph_mask_tuple
b_variable_mask, _, _, _ = sat_problem._batch_mask_tuple
p_variable_mask, _, _, _ = sat_problem._pos_mask_tuple
n_variable_mask, _, _, _ = sat_problem._neg_mask_tuple
if active_mask is not None:
mask = torch.mm(b_variable_mask, active_mask.float())
mask = torch.mm(variable_mask_transpose, mask)
else:
edge_num = init_state[0].size(0)
mask = torch.ones(edge_num, 1, device=self._device)
if len(decimator_state) == 3:
decimator_variable_state, decimator_function_state, edge_mask = decimator_state
else:
decimator_variable_state, decimator_function_state = decimator_state
edge_mask = None
variable_state, function_state = init_state
# print('160', function_state.shape)
# functions --> variables
if self._include_adaptors:
decimator_variable_state = F.logsigmoid(self._function_input_projector(decimator_variable_state))
else:
decimator_variable_state = self.safe_log(decimator_variable_state[:, 0]).unsqueeze(1)
if edge_mask is not None:
decimator_variable_state = decimator_variable_state * edge_mask
aggregated_variable_state = torch.mm(function_mask, decimator_variable_state)
aggregated_variable_state = torch.mm(function_mask_transpose, aggregated_variable_state)
aggregated_variable_state = aggregated_variable_state - decimator_variable_state
function_state = mask * self.safe_exp(aggregated_variable_state) + (1 - mask) * function_state[:, 0].unsqueeze(1)
# print('176', function_state.shape)
# functions --> variables
if self._include_adaptors:
decimator_function_state = self._variable_input_projector(decimator_function_state)
decimator_function_state[:, 0] = F.sigmoid(decimator_function_state[:, 0])
decimator_function_state[:, 1] = torch.sign(decimator_function_state[:, 1])
external_force = decimator_function_state[:, 1].unsqueeze(1)
decimator_function_state = self.safe_log(1 - decimator_function_state[:, 0]).unsqueeze(1)
if edge_mask is not None:
decimator_function_state = decimator_function_state * edge_mask
pos = torch.mm(p_variable_mask, decimator_function_state)
pos = torch.mm(variable_mask_transpose, pos)
neg = torch.mm(n_variable_mask, decimator_function_state)
neg = torch.mm(variable_mask_transpose, neg) # edge*1
same_sign = 0.5 * (1 + sat_problem._edge_feature) * pos + 0.5 * (1 - sat_problem._edge_feature) * neg
same_sign = same_sign - decimator_function_state
same_sign += self.safe_log(1.0 - self._pi * (external_force == sat_problem._edge_feature).float())
opposite_sign = 0.5 * (1 - sat_problem._edge_feature) * pos + 0.5 * (1 + sat_problem._edge_feature) * neg
# The opposite sign edge aggregation does not include the current edge by definition, therefore no need for subtraction.
opposite_sign += self.safe_log(1.0 - self._pi * (external_force == -sat_problem._edge_feature).float())
dont_care = same_sign + opposite_sign
bias = 0 # (2 * dont_care) / 3.0
same_sign = same_sign - bias
opposite_sign = opposite_sign - bias
dont_care = self.safe_exp(dont_care - bias)
same_sign = self.safe_exp(same_sign)
opposite_sign = self.safe_exp(opposite_sign)
q_u = same_sign * (1 - opposite_sign)
q_s = opposite_sign * (1 - same_sign)
total = q_u + q_s + dont_care
temp = torch.cat((q_u, q_s, dont_care), 1) / total
variable_state = mask * temp + (1 - mask) * variable_state
del mask
return variable_state, torch.cat((function_state, external_force), 1)
def get_init_state(self, graph_map, batch_variable_map, batch_function_map, edge_feature, graph_feat, randomized, batch_replication):
edge_num = graph_map.size(1) * batch_replication
if randomized:
variable_state = torch.rand(edge_num, self._function_message_dim, dtype=torch.float32, device=self._device)
variable_state = variable_state / torch.sum(variable_state, 1).unsqueeze(1)
function_state = torch.rand(edge_num, self._variable_message_dim, dtype=torch.float32, device=self._device)
function_state[:, 1] = 0
else:
variable_state = torch.ones(edge_num, self._function_message_dim, dtype=torch.float32, device=self._device) / self._function_message_dim
function_state = 0.5 * torch.ones(edge_num, self._variable_message_dim, dtype=torch.float32, device=self._device)
function_state[:, 1] = 0
return (variable_state, function_state)
###############################################################
# class MaxPoolPropagator(nn.Module):
|
import logging
import pytest
@pytest.fixture(scope='session', autouse=True)
def setup_logging():
logging.root.handlers = []
logging.basicConfig(level='INFO')
logging.getLogger('tests').setLevel('DEBUG')
logging.getLogger('peerscout').setLevel('DEBUG')
|
from js9 import j
import sys
import capnp
from collections import OrderedDict
import capnp
from .ModelBase import ModelBase
from .ModelBase import ModelBaseWithData
from .ModelBase import ModelBaseCollection
JSBASE = j.application.jsbase_get_class()
class Tools(JSBASE):
def __init__(self):
JSBASE.__init__(self)
def listInDictCreation(self, listInDict, name, manipulateDef=None):
"""
check name exist in the dict
then check its a dict, if yes walk over it and make sure they become strings or use the manipulateDef function
string 'a,b,c' gets translated to list
@param manipulateDef if None then will make it a string, could be e.g. int if you want to have all elements to be converted to int
"""
if name in listInDict:
if j.data.types.list.check(listInDict[name]):
if manipulateDef is None:
listInDict[name] = [str(item).strip() for item in listInDict[name]]
else:
listInDict[name] = [manipulateDef(item) for item in listInDict[name]]
else:
if manipulateDef is None:
if "," in str(listInDict[name]):
listInDict[name] = [item.strip()
for item in listInDict[name].split(",") if item.strip() != ""]
else:
listInDict[name] = [str(listInDict[name])]
else:
listInDict[name] = [manipulateDef(listInDict[name])]
return listInDict
class Capnp(JSBASE):
"""
"""
def __init__(self):
self.__jslocation__ = "j.data.capnp"
self.__imports__ = "pycapnp"
self._schema_cache = {}
self._capnpVarDir = j.sal.fs.joinPaths(j.dirs.VARDIR, "capnp")
j.sal.fs.createDir(self._capnpVarDir)
if self._capnpVarDir not in sys.path:
sys.path.append(self._capnpVarDir)
self.tools = Tools()
JSBASE.__init__(self)
def getModelBaseClass(self):
return ModelBase
def getModelBaseClassWithData(self):
return ModelBaseWithData
def getModelBaseClassCollection(self):
return ModelBaseCollection
def getModelCollection(self, schema, category, namespace=None, modelBaseClass=None,
modelBaseCollectionClass=None, db=None,indexDb=None):
"""
@param schema is capnp_schema
example to use:
```
#if we use a modelBaseClass do something like
ModelBaseWithData = j.data.capnp.getModelBaseClass()
class MyModelBase(ModelBaseWithData):
def index(self):
# put indexes in db as specified
ind = "%s" % (self.dbobj.path)
self._index.index({ind: self.key})
import capnp
#there is model.capnp in $libdir/JumpScale/tools/issuemanager
from JumpScale9.tools.issuemanager import model as ModelCapnp
mydb=j.data.kvs.getMemoryStore(name="mymemdb")
collection=j.data.capnp.getModelCollection(schema=ModelCapnp,category="issue",modelBaseClass=MyModelBase,db=mydb)
```
"""
if modelBaseCollectionClass is None:
modelBaseCollectionClass = ModelBaseCollection
return modelBaseCollectionClass(schema=schema, category=category, namespace=namespace,
db=db, indexDb=indexDb, modelBaseClass=modelBaseClass)
def getId(self, schemaInText):
id = [item for item in schemaInText.split("\n") if item.strip() != ""][0][3:-1]
return id
def removeFromCache(self, schemaId):
self._schema_cache.pop(schemaId, None)
def resetSchema(self, schemaId):
self._schema_cache.pop(schemaId, None)
nameOnFS = "schema_%s.capnp" % (schemaId)
path = j.sal.fs.joinPaths(self._capnpVarDir, nameOnFS)
if j.sal.fs.exists(path):
j.sal.fs.remove(path)
def _getSchemas(self, schemaInText):
schemaInText = j.data.text.strip(schemaInText)
schemaInText = schemaInText.strip() + "\n"
schemaId = self.getId(schemaInText)
if schemaId not in self._schema_cache:
nameOnFS = "schema_%s.capnp" % (schemaId)
path = j.sal.fs.joinPaths(self._capnpVarDir, nameOnFS)
j.sal.fs.writeFile(filename=path, contents=schemaInText, append=False)
parser = capnp.SchemaParser()
schema = parser.load(path)
self._schema_cache[schemaId] = schema
return self._schema_cache[schemaId]
def getSchemaFromText(self, schemaInText, name="Schema"):
if not schemaInText.strip():
schemaInText = """
@%s;
struct Schema {
}
""" % j.data.idgenerator.generateCapnpID()
schemas = self._getSchemas(schemaInText)
schema = eval("schemas.%s" % name)
return schema
def getSchemaFromPath(self, path, name):
"""
@param path is path to schema
"""
content = j.sal.fs.fileGetContents(path)
return self.getSchemaFromText(schemaInText=content, name=name)
def _ensure_dict(self, args):
"""
make sure the argument schema are of the type dict
capnp doesn't handle building a message with OrderedDict properly
"""
if isinstance(args, OrderedDict):
args = dict(args)
for k, v in args.items():
args[k] = self._ensure_dict(v)
if isinstance(args, list):
for i, v in enumerate(args):
args.insert(i, self._ensure_dict(v))
args.pop(i + 1)
return args
def getObj(self, schemaInText, name="Schema", args={}, binaryData=None):
"""
@PARAM schemaInText is capnp schema
@PARAM name is the name of the obj in the schema e.g. Issue
@PARAM args are the starting date for the obj, normally a dict
@PARAM binaryData is this is given then its the binary data to create the obj from, cannot be sed together with args (its one or the other)
"""
# . are removed from . to Uppercase
args = args.copy() # to not change the args passed in argument
for key in list(args.keys()):
sanitize_key = j.data.text.sanitize_key(key)
if key != sanitize_key:
args[sanitize_key] = args[key]
args.pop(key)
schema = self.getSchemaFromText(schemaInText, name=name)
if binaryData is not None and binaryData != b'':
obj = schema.from_bytes_packed(binaryData).as_builder()
else:
try:
args = self._ensure_dict(args)
obj = schema.new_message(**args)
except Exception as e:
if str(e).find("has no such member") != -1:
msg = "cannot create data for schema from arguments, property missing\n"
msg += "arguments:\n%s\n" % j.data.serializer.json.dumps(args, sort_keys=True, indent=True)
msg += "schema:\n%s" % schemaInText
ee = str(e).split("stack:")[0]
ee = ee.split("failed:")[1]
msg += "capnperror:%s" % ee
self.logger.debug(msg)
raise j.exceptions.Input(message=msg, level=1, source="", tags="", msgpub="")
if str(e).find("Value type mismatch") != -1:
msg = "cannot create data for schema from arguments, value type mismatch.\n"
msg += "arguments:\n%s\n" % j.data.serializer.json.dumps(args, sort_keys=True, indent=True)
msg += "schema:\n%s" % schemaInText
ee = str(e).split("stack:")[0]
ee = ee.split("failed:")[1]
msg += "capnperror:%s" % ee
self.logger.debug(msg)
raise j.exceptions.Input(message=msg, level=1, source="", tags="", msgpub="")
raise e
return obj
def test(self):
'''
js9 'j.data.capnp.test()'
'''
import time
capnpschema = '''
@0x93c1ac9f09464fc9;
struct Issue {
state @0 :State;
enum State {
new @0;
ok @1;
error @2;
disabled @3;
}
#name of actor e.g. node.ssh (role is the first part of it)
name @1 :Text;
}
'''
# dummy test, not used later
obj = self.getObj(capnpschema, name="Issue")
obj.state = "ok"
# now we just get the capnp schema for this object
schema = self.getSchemaFromText(capnpschema, name="Issue")
# mydb = j.data.kvs.getRedisStore(name="mymemdb")
mydb = None # is memory
collection = self.getModelCollection(schema, category="test", modelBaseClass=None, db=mydb)
start = time.time()
self.logger.debug("start populate 100.000 records")
collection.logger.disabled = True
for i in range(100000):
obj = collection.new()
obj.dbobj.name = "test%s" % i
obj.save()
self.logger.debug("population done")
end_populate = time.time()
collection.logger.disabled = False
self.logger.debug(collection.find(name="test839"))
end_find = time.time()
self.logger.debug("population in %.2fs" % (end_populate - start))
self.logger.debug("find in %.2fs" % (end_find - end_populate))
from IPython import embed;embed(colors='Linux')
def testWithRedis(self):
capnpschema = '''
@0x93c1ac9f09464fc9;
struct Issue {
state @0 :State;
enum State {
new @0;
ok @1;
error @2;
disabled @3;
}
#name of actor e.g. node.ssh (role is the first part of it)
name @1 :Text;
tlist @2: List(Text);
olist @3: List(Issue2);
struct Issue2 {
state @0 :State;
enum State {
new @0;
ok @1;
error @2;
disabled @3;
}
text @1: Text;
}
}
'''
# mydb = j.data.kvs.getRedisStore("test")
mydb = j.data.kvs.getRedisStore(name="test", unixsocket="%s/redis.sock" % j.dirs.TMPDIR)
schema = self.getSchemaFromText(capnpschema, name="Issue")
collection = self.getModelCollection(schema, category="test", modelBaseClass=None, db=mydb, indexDb=mydb)
for i in range(100):
obj = collection.new()
obj.dbobj.name = "test%s" % i
obj.save()
self.logger.debug(collection.list())
subobj = collection.list_olist_constructor(state="new", text="something")
obj.addSubItem("olist", subobj)
subobj = collection.list_tlist_constructor("sometext")
obj.addSubItem(name="tlist", data=subobj)
obj.addSubItem(name="tlist", data="sometext2")
self.logger.debug(obj)
obj.initSubItem("tlist")
assert len(obj.list_tlist) == 2
obj.addSubItem(name="tlist", data="sometext3")
assert len(obj.list_tlist) == 3
obj.reSerialize()
def getJSON(self, obj):
configdata2 = obj.to_dict()
ddict2 = OrderedDict(configdata2)
return j.data.serializer.json.dumps(ddict2, sort_keys=True, indent=True)
def getBinaryData(self, obj):
return obj.to_bytes_packed()
# def getMemoryObj(self, schema, *args, **kwargs):
# """
# creates an object similar as a capnp message but without the constraint of the capnpn on the type and list.
# Use this to store capnp object in memory instead of using directly capnp object, BUT BE AWARE THIS WILL TAKE MUCH MORE MEMORY
# It will be converted in capnp message when saved
# """
# msg = schema.new_message(**kwargs)
# obj = MemoryObject(msg.to_dict(verbose=True), schema=schema)
# return obj
|
import wx
import wx.grid as gridlib
from wx.lib import masked
from wx.grid import GridCellNumberEditor
import wx.lib.buttons
import math
import Model
import Utils
from ReorderableGrid import ReorderableGrid
from HighPrecisionTimeEdit import HighPrecisionTimeEdit
from PhotoFinish import TakePhoto
from SendPhotoRequests import SendRenameRequests
import OutputStreamer
def formatTime( secs ):
return Utils.formatTime(
secs,
highPrecision=True, extraPrecision=True,
forceHours=True, twoDigitHours=True,
)
def StrToSeconds( tStr ):
secs = Utils.StrToSeconds( tStr )
# Make sure we don't lose the last decimal accuracy.
if int(secs*1000.0) + 1 == int((secs + 0.0001)*1000.0):
secs += 0.0001
return secs
class HighPrecisionTimeEditor(gridlib.GridCellEditor):
Empty = '00:00:00.000'
def __init__(self):
self._tc = None
self.startValue = self.Empty
super().__init__()
def Create( self, parent, id = wx.ID_ANY, evtHandler = None ):
self._tc = HighPrecisionTimeEdit(parent, id, allow_none = False, style = wx.TE_PROCESS_ENTER)
self.SetControl( self._tc )
if evtHandler:
self._tc.PushEventHandler( evtHandler )
def SetSize( self, rect ):
self._tc.SetSize(rect.x, rect.y, rect.width+2, rect.height+2, wx.SIZE_ALLOW_MINUS_ONE )
def BeginEdit( self, row, col, grid ):
self.startValue = grid.GetTable().GetValue(row, col).strip()
self._tc.SetValue( self.startValue )
self._tc.SetFocus()
def EndEdit( self, row, col, grid, value = None ):
changed = False
val = self._tc.GetValue()
if val != self.startValue:
if val == self.Empty:
val = ''
changed = True
grid.GetTable().SetValue( row, col, val )
self.startValue = self.Empty
self._tc.SetValue( self.startValue )
def Reset( self ):
self._tc.SetValue( self.startValue )
def Clone( self ):
return HighPrecisionTimeEditor()
class TimeTrialRecord( wx.Panel ):
def __init__( self, parent, controller, id = wx.ID_ANY ):
super().__init__(parent, id)
self.SetBackgroundColour( wx.WHITE )
self.controller = controller
self.headerNames = (' {} '.format(_('Time')), ' {} '.format(_('Bib')))
fontSize = 18
self.font = wx.Font( (0,fontSize), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL )
self.bigFont = wx.Font( (0,int(fontSize*1.30)), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL )
self.vbs = wx.BoxSizer(wx.VERTICAL)
self.recordTimeButton = (wx.lib.buttons.ThemedGenButton if 'WXMAC' in wx.Platform else wx.Button)( self, label=_('Tap for Time') )
self.recordTimeButton.Bind( wx.EVT_BUTTON if 'WXMAC' in wx.Platform else wx.EVT_LEFT_DOWN, self.doRecordTime )
self.recordTimeButton.SetFont( self.bigFont )
self.recordTimeButton.SetToolTip( wx.ToolTip('\n'.join([
_('Tap to get a Time, or press "t".'),
_('Then enter the Bib number(s) and Save.')
])) )
tapExplain = wx.StaticText( self, label=_('(or press "t")') )
tapExplain.SetFont( self.font )
hbs = wx.BoxSizer( wx.HORIZONTAL )
hbs.Add( self.recordTimeButton, 0 )
hbs.Add( tapExplain, flag=wx.ALIGN_CENTRE_VERTICAL|wx.LEFT, border=20 )
self.grid = ReorderableGrid( self, style = wx.BORDER_SUNKEN )
self.grid.SetFont( self.font )
self.grid.EnableReorderRows( False )
self.grid.DisableDragColSize()
self.grid.DisableDragRowSize()
dc = wx.WindowDC( self.grid )
dc.SetFont( self.font )
width, height = dc.GetTextExtent(' 999 ')
self.rowLabelSize = width
self.grid.SetRowLabelSize( self.rowLabelSize )
self.grid.CreateGrid( 0, len(self.headerNames) )
self.grid.Bind( gridlib.EVT_GRID_LABEL_LEFT_CLICK, self.doClickLabel )
for col, name in enumerate(self.headerNames):
self.grid.SetColLabelValue( col, name )
self.grid.SetLabelFont( self.font )
for col in range(self.grid.GetNumberCols()):
attr = gridlib.GridCellAttr()
attr.SetFont( self.font )
if col == 0:
attr.SetEditor( HighPrecisionTimeEditor() )
elif col == 1:
attr.SetRenderer( gridlib.GridCellNumberRenderer() )
attr.SetEditor( GridCellNumberEditor() )
self.grid.SetColAttr( col, attr )
self.saveButton = (wx.lib.buttons.ThemedGenButton if 'WXMAC' in wx.Platform else wx.Button)( self, label=_('Save') )
self.saveButton.Bind( wx.EVT_BUTTON, self.doSave )
self.saveButton.SetFont( self.bigFont )
self.saveButton.SetToolTip(wx.ToolTip(_('Save Entries (or press "s")')))
saveExplain = wx.StaticText( self, label=_('(or press "s")') )
saveExplain.SetFont( self.font )
self.cleanupButton = (wx.lib.buttons.ThemedGenButton if 'WXMAC' in wx.Platform else wx.Button)( self, label=_('Cleanup') )
self.cleanupButton.Bind( wx.EVT_BUTTON, self.doCleanup )
self.cleanupButton.SetFont( self.bigFont )
self.cleanupButton.SetToolTip(wx.ToolTip(_('Cleanup Empty Entries (or press "c")')))
hbsCommit = wx.BoxSizer( wx.HORIZONTAL )
hbsCommit.Add( self.saveButton, 0 )
hbsCommit.Add( saveExplain, flag=wx.ALIGN_CENTRE_VERTICAL|wx.RIGHT, border=20 )
hbsCommit.Add( self.cleanupButton, 0 )
self.vbs.Add( hbs, 0, flag=wx.ALL, border = 4 )
self.vbs.Add( self.grid, 1, flag=wx.ALL|wx.EXPAND, border = 4 )
self.vbs.Add( hbsCommit, 0, flag=wx.ALL|wx.ALIGN_RIGHT, border = 4 )
idRecordAcceleratorId, idSaveAccelleratorId, idCleanupAccelleratorId = wx.NewIdRef(), wx.NewIdRef(), wx.NewIdRef()
self.Bind(wx.EVT_MENU, self.doRecordTime, id=idRecordAcceleratorId)
self.Bind(wx.EVT_MENU, self.doSave, id=idSaveAccelleratorId)
self.Bind(wx.EVT_MENU, self.doCleanup, id=idCleanupAccelleratorId)
accel_tbl = wx.AcceleratorTable([
(wx.ACCEL_NORMAL, ord('T'), idRecordAcceleratorId),
(wx.ACCEL_NORMAL, ord('S'), idSaveAccelleratorId),
(wx.ACCEL_NORMAL, ord('C'), idCleanupAccelleratorId),
])
self.SetAcceleratorTable(accel_tbl)
self.SetSizer(self.vbs)
self.Fit()
def doClickLabel( self, event ):
if event.GetCol() == 0:
self.doRecordTime( event )
def doRecordTime( self, event ):
race = Model.race
if not race:
return
t = race.curRaceTime()
# Trigger the camera.
if race.enableUSBCamera:
race.photoCount += TakePhoto( 0, StrToSeconds(formatTime(t)) )
# Grow the table to accomodate the next entry.
with gridlib.GridUpdateLocker(self.grid) as gridLocker:
Utils.AdjustGridSize( self.grid, rowsRequired=self.grid.GetNumberRows()+1 )
self.grid.SetCellValue( self.grid.GetNumberRows()-1, 0, formatTime(t) )
self.grid.AutoSize()
# Set the edit cursor at the first empty bib position.
for row in range(self.grid.GetNumberRows()):
text = self.grid.GetCellValue(row, 1)
if not text or text == '0':
self.grid.SetGridCursor( row, 1 )
break
def getTimesBibs( self ):
self.grid.SetGridCursor( 0, 0, ) # Forces current edit cell to commit.
# Find the last row without a time.
timesBibs, timesNoBibs = [], []
for row in range(self.grid.GetNumberRows()):
tStr = self.grid.GetCellValue(row, 0).strip()
if not tStr:
continue
bib = self.grid.GetCellValue(row, 1).strip()
try:
bib = int(bib)
except (TypeError, ValueError):
bib = 0
if bib:
timesBibs.append( (tStr, bib) )
else:
timesNoBibs.append( tStr )
return timesBibs, timesNoBibs
def doSave( self, event ):
timesBibs, timesNoBibs = self.getTimesBibs()
if timesBibs and Model.race:
with Model.LockRace() as race:
bibRaceSeconds = []
for tStr, bib in timesBibs:
raceSeconds = StrToSeconds(tStr)
race.addTime( bib, raceSeconds )
OutputStreamer.writeNumTime( bib, raceSeconds )
bibRaceSeconds.append( (bib, raceSeconds) )
wx.CallAfter( Utils.refresh )
with gridlib.GridUpdateLocker(self.grid) as gridLocker:
for row, tStr in enumerate(timesNoBibs):
self.grid.SetCellValue(row, 0, tStr )
self.grid.SetCellValue(row, 1, '' )
Utils.AdjustGridSize( self.grid, rowsRequired=len(timesNoBibs) )
if timesNoBibs:
self.grid.SetGridCursor( 0, 1, )
def doCleanup( self, event ):
timesBibs, timesNoBibs = self.getTimesBibs()
with gridlib.GridUpdateLocker(self.grid) as gridLocker:
for row, (tStr, bib) in enumerate(timesBibs):
self.grid.SetCellValue(row, 0, tStr )
self.grid.SetCellValue(row, 1, str(bib) )
Utils.AdjustGridSize( self.grid, rowsRequired=len(timesBibs) )
if timesBibs:
self.grid.SetGridCursor( len(timesBibs)-1, 1 )
def refresh( self ):
race = Model.race
if not race or not race.isRunning():
Utils.AdjustGridSize( self.grid, rowsRequired=0 )
return
self.grid.AutoSizeRows()
dc = wx.WindowDC( self.grid )
dc.SetFont( self.font )
widthTotal = self.rowLabelSize
width, height = dc.GetTextExtent(" 00:00:00.000 ")
self.grid.SetColSize( 0, width )
widthTotal += width
width, height = dc.GetTextExtent(" 9999 ")
self.grid.SetColSize( 1, width )
widthTotal += width
scrollBarWidth = 48
self.grid.SetSize( (widthTotal + scrollBarWidth, -1) )
self.GetSizer().SetMinSize( (widthTotal + scrollBarWidth, -1) )
self.grid.ForceRefresh()
self.GetSizer().Layout()
wx.CallAfter( self.recordTimeButton.SetFocus )
def commit( self ):
pass
if __name__ == '__main__':
Utils.disable_stdout_buffering()
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,600))
Model.setRace( Model.Race() )
Model.getRace()._populate()
timeTrialRecord = TimeTrialRecord(mainWin, None)
timeTrialRecord.refresh()
mainWin.Show()
app.MainLoop()
|
#Filip Jenis, kvinta B
#Úloha: Rigorózka
from itertools import permutations
veta = input("Zadaj vetu:").split(" ")
moznosti = permutations(veta)
for i in moznosti:
print(" ".join(i)) |
node = S(input, "application/json")
childNode1 = node.prop("customers")
childNode2 = node.prop("orderDetails")
list = childNode1.elements()
customerNode = list.get(0)
property1 = node.prop("order")
property2 = customerNode.prop("name")
property3 = childNode2.prop("article")
value1 = property1.stringValue()
value2 = property2.stringValue()
value3 = property3.stringValue()
|
#!/usr/bin/python3
import requests, argparse, sys
from configparser import ConfigParser
requests.packages.urllib3.disable_warnings()
config = ConfigParser()
config.read('/etc/config.edc')
token = config.get('auth', 'token')
url = config.get('instance', 'curl')
headers = {'Authorization': 'Token {}'.format(token)}
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''\
Example:
cred -u cmduser4 -p cmdpass4 -n hashgoeshere -f james -l tubb -r users -d keyboarder'''
)
parser.add_argument("-u","--creduser", default="", help="(Required) enter username")
parser.add_argument("-p","--credpass", default="", help="enter password")
parser.add_argument("-n","--credhash", default="", help="enter hash")
parser.add_argument("-f","--credfirst", default="", help="enter first name")
parser.add_argument("-l","--credlast", default="", help="enter last name")
parser.add_argument("-r","--credrole", default="", help="enter a role")
parser.add_argument("-d","--creddesc", default="", help="enter a description")
args = parser.parse_args()
if not args.creduser:
sys.exit(parser.print_help())
data = {"username":args.creduser,"passwd":args.credpass,"hashw":args.credhash,"first":args.credfirst,"last":args.credlast,"role":args.credrole,"description":args.creddesc}
requests.post(url,data=data,headers=headers,verify=False)
|
"""
Just find file. Needs to be independent to avoid circular imports
"""
import os
def find_file(file_name: str, executing_file: str) -> str:
"""
Create/find a valid file name relative to a source file, e.g.
find_file("foo/bar.txt", __file__)
"""
file_path = os.path.join(
os.path.dirname(os.path.abspath(executing_file)), file_name
).replace("\\", "/")
return file_path
|
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''Extend Python Imaging Library (PIL) wrapper
According to Fredrik Lundh, the author of PIL, the Image class isn't designed
to be subclassed by application code.
If you want custom behaviour, use a delegating wrapper.
See: https://stackoverflow.com/a/5165352
https://mail.python.org/pipermail/image-sig/2006-March/003832.html
http://effbot.org/pyfaq/what-is-delegation.htm
'''
from __future__ import print_function
from PIL import Image
class ExtendedImage(object):
def __init__(self, imageFileName):
'''Opens and identifies the given image file'''
self.__VERSION__ = 'PIL ' + Image.VERSION
self._img = Image.open(imageFileName)
def __getattr__(self, key):
'''Delegate (almost) everything to self._img'''
if key == '_img':
# http://nedbatchelder.com/blog/201010/surprising_getattr_recursion.html
raise AttributeError()
return getattr(self._img, key)
def greyscale(self):
'''Convert to greyscale'''
self._img = self._img.convert(mode='L') #<-- ExtendedImage delegates to self._img
return self
def convert2greyscale(src_image_file):
'''Make a greyscale copy of the source image_file'''
src_image_name, image_extension = os.path.splitext(src_image_file)
dst_image_file = src_image_name + '_grey' + image_extension
print('Save greyscale image to:\n%s' % dst_image_file)
gsimg = ExtendedImage(src_image_file).greyscale()
gsimg.save(dst_image_file)
if __name__ == "__main__":
'''test ExtendedImage Class'''
import os
import sys
for infile in sys.argv[1:]:
convert2greyscale(infile)
|
#!/usr/bin/env python3
"""
Module for interacting with the database
"""
# -*- encoding: utf-8 -*-
#============================================================================================
# Imports
#============================================================================================
# Standard imports
# Third-party imports
import cx_Oracle
from flask import make_response, jsonify
from pydb.database import DatabaseType
from pydb.oracle_db import OracleDB
from pydb.sql_alchemy_oracle_db import SqlAlchemyOracleDB
# Local imports
from flask_utils.config_util import CONFIG_UTIL
from flask_utils.logger_util import get_common_logger
from pylog.pylog import get_common_logging_format
logger = get_common_logger(__name__)
class DBUtil:
"""
Class for interacting with the database
"""
def __init__(self):
self._db = None
def setup(self, db_type: DatabaseType = DatabaseType.ORACLE):
db_config = CONFIG_UTIL.db_config
if db_type == DatabaseType.ORACLE:
self._db = OracleDB(host=db_config['host'],
port=db_config['port'],
service=db_config['dbname'],
user=db_config['username'],
pwd=db_config['password'],
logging_level=logger.level,
logging_format=get_common_logging_format())
elif db_type == DatabaseType.SQL_ALCHEMY_ORACLE:
self._db = SqlAlchemyOracleDB(host=db_config['host'],
port=db_config['port'],
service=db_config['dbname'],
user=db_config['username'],
pwd=db_config['password'],
logging_level=logger.level,
logging_format=get_common_logging_format())
else:
self._db = None
def execute_query(self, query_string, args=None):
"""
Function for executing a query against the database via the session pool
"""
try:
return self._db.execute_query(query_string, args)
except Exception as err:
obj, = err.args
logger.error("Context: %s", obj.context)
logger.error("Message: %s", obj.message)
return make_response(jsonify(
message=str("Unable to execute query against the database")
), 500)
def execute_update(self, query_string, args=None):
"""
Function for executing an insert/update query against the database via the session pool
"""
try:
self._db.execute_update(query_string=query_string, args=args)
except Exception as err:
obj, = err.args
logger.error("Context: %s", obj.context)
logger.error("Message: %s", obj.message)
raise Exception(err)
def health_check(self):
"""
provides a means to verify connectivity with a simple query
:return:
"""
return self._db.health_check()
def cleanup(self):
if self._db is not None:
self._db.cleanup()
def create_connection(self):
if self._db is not None:
return self._db.create_connection()
def get_session(self):
if self._db is not None:
return self._db.get_session()
DB_UTIL = DBUtil()
|
#!/usr/bin/env python3
"""This is a nice simple server that we could be using if there were any JS
ZLIB libraries that actually worked (or the browsers exposed the ones they
have built in).
"""
import http.server
from http.server import HTTPServer, BaseHTTPRequestHandler
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
Handler.extensions_map={
'.css': 'text/css',
'.glsl': 'text/plain',
'.html': 'text/html',
'.jpg': 'image/jpg',
'.js': 'application/x-javascript',
'.manifest': 'text/cache-manifest',
'.png': 'image/png',
'.svg': 'image/svg+xml',
'': 'application/octet-stream', # Default
}
httpd = socketserver.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
|
# a other way to create a function
# way 1
def squre(num): return num * num
print(squre(5))
# way 2
squre2 = lambda num: num * num
print(squre2(5))
print("--------------")
# example way 2
sumNumbers = lambda first, second: first + second
print(sumNumbers(5, 10))
print("---------------")
# lambda not has __name__
print(squre.__name__)
print(squre2.__name__)
|
#!/usr/bin/env python
import time
import six
from flyteidl.core.errors_pb2 import ErrorDocument
from flyteidl.core.literals_pb2 import LiteralMap
from flytekit.clients import helpers as _helpers
from flytekit.clients.friendly import SynchronousFlyteClient
from flytekit.clis.sdk_in_container.pyflyte import update_configuration_file
from flytekit.common.core.identifier import WorkflowExecutionIdentifier as _WorkflowExecutionIdentifier
from flytekit.common.utils import load_proto_from_file
from flytekit.configuration.platform import URL, INSECURE
from flytekit.interfaces.data.s3.s3proxy import AwsS3Proxy
from flytekit.models.literals import LiteralMap as SdkLiteralMap
from flytekit.models.core.execution import WorkflowExecutionPhase as _WorkflowExecutionPhase
PROJECT = 'flytetester'
DOMAIN = 'development'
# These are the names of the launch plans (workflows) kicked off in the run.sh script
# This is what we'll be looking for in Admin.
EXPECTED_EXECUTIONS = [
'app.workflows.failing_workflows.DivideByZeroWf',
'app.workflows.work.WorkflowWithIO',
'app.workflows.failing_workflows.RetrysWf',
'app.workflows.failing_workflows.FailingDynamicNodeWF',
'app.workflows.failing_workflows.RunToCompletionWF',
]
# This tells Python where admin is, and also to hit Minio instead of the real S3
update_configuration_file('end2end/end2end.config')
client = SynchronousFlyteClient(URL.get(), insecure=INSECURE.get())
# For every workflow that we test on in run.sh, have a function that can validate the execution response
# It will be supplied an execution object, a node execution list, and a task execution list
# Should return
# - True, if the execution has completed and everything checks out
# - False, if things have failed for whatever reason, either the execution failed, or the outputs are wrong, whatever
# - None, if things aren't complete yet
def workflow_with_io_validator(execution, node_execution_list, task_execution_list):
"""
Validation logic for app.workflows.work.WorkflowWithIO
:param flytekit.models.execution.Execution execution:
:param list[flytekit.models.node_execution.NodeExecution] node_execution_list:
:param list[flytekit.models.admin.task_execution.TaskExecution] task_execution_list:
:rtype: option[bool]
"""
phase = execution.closure.phase
if not phase == _WorkflowExecutionPhase.SUCCEEDED:
# If not succeeded, either come back later, or see if it failed
if phase == _WorkflowExecutionPhase.ABORTED or phase == _WorkflowExecutionPhase.FAILED or \
phase == _WorkflowExecutionPhase.TIMED_OUT:
return False
else:
return None # come back and check later
# Check node executions
assert len(node_execution_list) == 3 # one task, plus start/end nodes
ne = node_execution_list
task_node = None
for n in ne:
if n.id.node_id == 'odd-nums-task':
task_node = n
assert task_node is not None
assert len(task_execution_list) > 0
print('Done validating app-workflows-work-workflow-with-i-o!')
return True
def failing_workflows_divide_by_zero_wf_validator(execution, node_execution_list, task_execution_list):
"""
Validation logic for app.workflows.failing_workflows.DivideByZeroWf
This workflow should always fail.
:param flytekit.models.execution.Execution execution:
:param list[flytekit.models.node_execution.NodeExecution] node_execution_list:
:param list[flytekit.models.admin.task_execution.TaskExecution] task_execution_list:
:rtype: option[bool]
"""
phase = execution.closure.phase
if not phase == _WorkflowExecutionPhase.FAILED:
# If not failed, fail the test if the execution is in an unacceptable state
if phase == _WorkflowExecutionPhase.ABORTED or phase == _WorkflowExecutionPhase.SUCCEEDED or \
phase == _WorkflowExecutionPhase.TIMED_OUT:
return False
else:
return None # come back and check later
# Check node executions
assert len(node_execution_list) == 2 # one task, plus start
ne = node_execution_list
node_execution = None
for n in ne:
if n.id.node_id == 'div-by-zero':
node_execution = n
assert node_execution is not None
assert len(task_execution_list) > 0
# Download the error document and make sure it contains what we think it should contain
error_message = node_execution.closure.error.message
assert 'division by zero' in error_message
print('Done validating app-workflows-failing-workflows-divide-by-zero-wf!')
return True
def retrys_wf_validator(execution, node_execution_list, task_execution_list):
"""
Validation logic for app.workflows.failing_workflows.RetrysWf
This workflow should always fail, but should run a total of three times, since the task has two retries
:param flytekit.models.execution.Execution execution:
:param list[flytekit.models.node_execution.NodeExecution] node_execution_list:
:param list[flytekit.models.admin.task_execution.TaskExecution] task_execution_list:
:rtype: option[bool]
"""
phase = execution.closure.phase
if not phase == _WorkflowExecutionPhase.FAILED:
# If not failed, fail the test if the execution is in an unacceptable state
if phase == _WorkflowExecutionPhase.ABORTED or phase == _WorkflowExecutionPhase.SUCCEEDED or \
phase == _WorkflowExecutionPhase.TIMED_OUT:
return False
else:
return None # come back and check later
assert len(task_execution_list) == 3
print('Done validating app.workflows.failing_workflows.RetrysWf!')
return True
def retrys_dynamic_wf_validator(execution, node_execution_list, task_execution_list):
"""
Validation logic for app.workflows.failing_workflows.FailingDynamicNodeWF
This workflow should always fail, but the dynamic node should retry twice.
:param flytekit.models.execution.Execution execution:
:param list[flytekit.models.node_execution.NodeExecution] node_execution_list:
:param list[flytekit.models.admin.task_execution.TaskExecution] task_execution_list:
:rtype: option[bool]
"""
phase = execution.closure.phase
if not phase == _WorkflowExecutionPhase.FAILED:
# If not failed, fail the test if the execution is in an unacceptable state
if phase == _WorkflowExecutionPhase.ABORTED or phase == _WorkflowExecutionPhase.SUCCEEDED or \
phase == _WorkflowExecutionPhase.TIMED_OUT:
return False
elif phase == _WorkflowExecutionPhase.RUNNING:
return None # come back and check later
else:
return False
print('FailingDynamicNodeWF finished with {} task(s)'.format(len(task_execution_list)))
assert len(task_execution_list) == 3
print('Done validating app.workflows.failing_workflows.FailingDynamicNodeWF!')
return True
def run_to_completion_wf_validator(execution, node_execution_list, task_execution_list):
"""
Validation logic for app.workflows.failing_workflows.RunToCompletionWF
This workflow should always fail, but the dynamic node should retry twice.
:param flytekit.models.execution.Execution execution:
:param list[flytekit.models.node_execution.NodeExecution] node_execution_list:
:param list[flytekit.models.admin.task_execution.TaskExecution] task_execution_list:
:rtype: option[bool]
"""
phase = execution.closure.phase
if not phase == _WorkflowExecutionPhase.FAILED:
# If not failed, fail the test if the execution is in an unacceptable state
if phase == _WorkflowExecutionPhase.ABORTED or phase == _WorkflowExecutionPhase.SUCCEEDED or \
phase == _WorkflowExecutionPhase.TIMED_OUT:
return False
elif phase == _WorkflowExecutionPhase.RUNNING or phase == _WorkflowExecutionPhase.FAILING:
return None # come back and check later
else:
print('Got unexpected phase [{}]'.format(phase))
return False
print('RunToCompletionWF finished with {} task(s)'.format(len(task_execution_list)))
assert len(task_execution_list) == 4
print('Done validating app.workflows.failing_workflows.RunToCompletionWF!')
return True
validators = {
'app.workflows.work.WorkflowWithIO': workflow_with_io_validator,
'app.workflows.failing_workflows.DivideByZeroWf': failing_workflows_divide_by_zero_wf_validator,
'app.workflows.failing_workflows.RetrysWf': retrys_wf_validator,
'app.workflows.failing_workflows.FailingDynamicNodeWF': retrys_dynamic_wf_validator,
'app.workflows.failing_workflows.RunToCompletionWF': run_to_completion_wf_validator,
}
def process_executions(execution_names):
"""
This is the main loop of the end to end test. Basically it's an infinite loop, that only exits if everything
has finished (either successfully or unsuccessfully), pausing for five seconds at a time.
For each execution in the list of expected executions given in the input, it will query the Admin service for
the execution object, all node executions, and all task executions, and hand these objects off to the validator
function for the respective workflow.
:param dict[Text, Text] execution_names: map of lp names to execution name
"""
succeeded = set()
failed = set()
while True:
if len(succeeded) + len(failed) == len(EXPECTED_EXECUTIONS):
print('All done verifying...')
break
for lp_name, execution_name in six.iteritems(execution_names):
if lp_name in succeeded or lp_name in failed:
continue
# Get an updated execution object
workflow_execution_id = _WorkflowExecutionIdentifier(project=PROJECT, domain=DOMAIN, name=execution_name)
execution_object = client.get_execution(workflow_execution_id)
# Get updated list of all the node executions for it
node_executions = []
for ne in _helpers.iterate_node_executions(client, workflow_execution_id):
node_executions.append(ne)
# Get an updated list of all task executions
task_executions = []
for n in node_executions:
for te in _helpers.iterate_task_executions(client, n.id):
task_executions.append(te)
# Send response to the appropriate handler
result = validators[lp_name](execution_object, node_executions, task_executions)
if result is None:
print('LP {} with execution {} still not ready...'.format(lp_name, execution_name))
elif result:
print('Adding {} to succeeded'.format(lp_name))
succeeded.add(lp_name)
else:
print('Adding {} to failed'.format(lp_name))
failed.add(lp_name)
# This python script will hang forever for now - relies on the timeout implemented in the bash script to exit
# in the failure case
print('Sleeping...')
time.sleep(5)
if len(failed) > 0:
print('Some tests failed :(')
exit(1)
print('All tests passed!')
exit(0)
def get_executions():
"""
Retrieve all relevant executions from Admin
:rtype: list[flytekit.models.execution.Execution]
"""
resp = client.list_executions_paginated(PROJECT, DOMAIN, filters=[])
# The executions returned should correspond to the workflows launched in run.sh
assert len(resp[0]) == len(EXPECTED_EXECUTIONS)
# pagination token should be an empty string, since we're running from an empty database, and we don't kick
# off that many executions in an end-to-end test.
assert resp[1] == ''
return resp[0]
def pair_lp_names_with_execution_ids(lp_names, executions):
"""
:param list[Text] lp_names:
:param list[flytekit.models.execution.Execution] executions:
:rtype: dict[Text, Text]
"""
r = {}
for lp in lp_names:
for e in executions:
if e.spec.launch_plan.name == lp:
r[lp] = e.id.name # the name of the execution identifier, like ff8b30386aafa4daba79
assert lp in r
assert r[lp] != ''
return r
def validate_endtoend_test():
# The test should have been kicked off an an execution
# Get execution objects from Admin
executions = get_executions()
# For each execution, find the execution id associated with that launch plan/workflow name
execution_names = pair_lp_names_with_execution_ids(EXPECTED_EXECUTIONS, executions)
process_executions(execution_names)
if __name__ == '__main__':
validate_endtoend_test()
|
"""
Copyright (c) 2020, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import time
import csv
import logging
import random
import numpy as np
import torch
from classifier_filter.run_filter import *
from transformers import BertConfig, BertTokenizer
from classifier_filter.modeling import BertForMultiLabelSequenceClassification
import json
class DataProcessor():
def __init__(self,path):
self.data = self.load_data(path)
def load_data(self,path):
multi_label_data = {}
with open(path) as f:
data = json.load(f)
for dial in data:
dialog_history = ""
for idx, turn in enumerate(dial["dialogue"]):
label_list = []
turn_domain = turn["domain"]
text_a = dialog_history
text_b = turn["system_transcript"]
dialog_history = dialog_history+" "+turn["system_transcript"]+" "+ turn["transcript"]
dialog_history = dialog_history.strip()
multi_label_data[dial["dialogue_idx"]+str(idx)] = {"text_a":text_a,
"text_b":text_b,
"label_list":label_list}
return multi_label_data
def get_labels(self):
"""See base class."""
return ["attraction-area",
"attraction-name",
"attraction-type",
"hotel-area",
"hotel-book day",
"hotel-book people",
"hotel-book stay",
"hotel-internet",
"hotel-name",
"hotel-parking",
"hotel-pricerange",
"hotel-stars",
"hotel-type",
"restaurant-area",
"restaurant-book day",
"restaurant-book people",
"restaurant-book time",
"restaurant-food",
"restaurant-name",
"restaurant-pricerange",
"taxi-arriveby",
"taxi-departure",
"taxi-destination",
"taxi-leaveat",
"train-arriveby",
"train-book people",
"train-day",
"train-departure",
"train-destination",
"train-leaveat"]
def create_examples(self,dialogue_idx,turn_id,user_utters,turn_label):
examples = []
meta_info = self.data[dialogue_idx+str(turn_id)]
for user_utter in user_utters:
text_a = meta_info["text_a"]
text_b = meta_info["text_b"]+" "+user_utter
labels = []
for label in turn_label:
labels.append(label[0])
# print("text_a: ",text_a.strip())
# print("text_b: ",text_b.strip())
# print("*************************")
examples.append(InputExample(text_a=text_a.strip(),text_b = text_b.strip(),label=labels))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
tokens_c = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if example.text_c:
tokens_c = tokenizer.tokenize(example.text_c)
if tokens_c:
truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)
tokens_b = tokens_b + ["[SEP]"] + tokens_c
elif tokens_b:
truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = len(label_map)*[0]
for label in example.label:
label_id[label_map[label]] = 1
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_tensor(examples,label_list,max_seq_length,tokenizer):
features = convert_examples_to_features(examples, label_list, max_seq_length, tokenizer)
input_ids = []
input_mask = []
segment_ids = []
label_id = []
for f in features:
input_ids.append(f.input_ids)
input_mask.append(f.input_mask)
segment_ids.append(f.segment_ids)
label_id.append([f.label_id])
all_input_ids = torch.tensor(input_ids, dtype=torch.long)
all_input_mask = torch.tensor(input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)
all_label_ids = torch.tensor(label_id, dtype=torch.float32)
data = (all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return data
class BERTFilter(object):
def __init__(self,data_file):
self.processor = DataProcessor(data_file)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.label_list = self.processor.get_labels()
bert_config = BertConfig.from_pretrained("bert-base-uncased",num_labels=len(self.label_list))
self.max_seq_length = 512
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
self.model = BertForMultiLabelSequenceClassification.from_pretrained("bert-base-uncased",config = bert_config)
# import pdb;
# pdb.set_trace();
# import sys
self.model.load_state_dict(torch.load("./classifier_filter/filter/best_model.pt", map_location='cpu'))
self.model.to(self.device)
def query_filter(self,dialogue_idx,turn_id,user_utters,turn_label,thresh):
examples = self.processor.create_examples(dialogue_idx,turn_id,user_utters,turn_label)
data = convert_examples_to_tensor(examples, self.label_list, self.max_seq_length, self.tokenizer)
result = self.evaluation(data,thresh)
# print(result)
return result
def evaluation(self,data,thresh):
self.model.eval()
prediction_list = []
target_list = []
input_ids, input_mask, segment_ids, label_ids = data
input_ids = input_ids.to(self.device)
input_mask = input_mask.to(self.device)
segment_ids = segment_ids.to(self.device)
label_ids = label_ids.to(self.device)
with torch.no_grad():
logits = self.model(input_ids = input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
probs = logits.sigmoid()
prediction_list,target_list = self.acc_pred(probs, label_ids.view(-1,len(self.label_list)),self.label_list,thresh)
result = []
for idx in range(len(prediction_list)):
prediction_set = set(prediction_list[idx])
target_set = set(target_list[idx])
# print("pred: ",prediction_set)
# print("target: ",target_set)
# print("*************************")
if(prediction_set.issubset(target_set)):
result.append(True)
else:
result.append(False)
return result
def acc_pred(self,probs,labels,label_list,thresh):
batch_size = probs.size(0)
preds = (probs>thresh)
preds = preds.cpu().numpy()
labels = labels.byte().cpu().numpy()
prediction_list = []
target_list = []
for idx in range(batch_size):
pred = preds[idx]
label = labels[idx]
prediction_list.append([])
target_list.append([])
for idx,each_pred in enumerate(pred):
if(each_pred):
prediction_list[-1].append(label_list[idx])
for idx,each_label in enumerate(label):
if(each_label):
target_list[-1].append(label_list[idx])
return prediction_list,target_list
if __name__ == "__main__":
classifier_filter = BERTFilter()
while(True):
dialogue_idx = "PMUL3688.json"
turn_id = 4
thresh=0.5
user_utters =["that will work. i will need tickets for 3 people.", "that will work. thank you."]
turn_label = [
[
"train-book people",
"3"
]
]
flag = classifier_filter.query_filter(dialogue_idx,turn_id,user_utters,turn_label,thresh)
import pdb;
pdb.set_trace()
|
# Generated by Django 3.2.8 on 2021-11-09 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_case_description'),
]
operations = [
migrations.AddField(
model_name='case',
name='last_filed',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='case',
name='last_printed',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='case',
name='submission_id',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='case',
name='transaction_id',
field=models.CharField(max_length=100, null=True),
),
migrations.DeleteModel(
name='EFilingSubmission',
),
]
|
def go_to_beach():
print("I will go to the beach more often")
go_to_beach()
|
r"""
This module implements differential operators on cylindrical grids
.. autosummary::
:nosignatures:
make_laplace
make_gradient
make_divergence
make_vector_gradient
make_vector_laplace
make_tensor_divergence
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
from typing import Callable
from ...tools.docstrings import fill_in_docstring
from ...tools.numba import jit_allocate_out, nb
from ..boundaries import Boundaries
from ..cylindrical import CylindricalGrid
from .common import PARALLELIZATION_THRESHOLD_2D
@CylindricalGrid.register_operator("laplace", rank_in=0, rank_out=0)
@fill_in_docstring
def make_laplace(bcs: Boundaries) -> Callable:
"""make a discretized laplace operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(0)
boundary_r, boundary_z = bcs
# calculate preliminary quantities
dim_r, dim_z = bcs.grid.shape
dr_2, dz_2 = 1 / bcs.grid.discretization ** 2
value_outer = boundary_r.high.make_virtual_point_evaluator()
region_z = boundary_z.make_region_evaluator()
# use processing for large enough arrays
parallel = dim_r * dim_z >= PARALLELIZATION_THRESHOLD_2D ** 2
@jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))
def laplace(arr, out=None):
""" apply laplace operator to array `arr` """
for j in nb.prange(0, dim_z): # iterate axial points
# inner radial boundary condition
i = 0
arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))
out[i, j] = (
2 * (arr[i + 1, j] - arr_c) * dr_2
+ (arr_z_l - 2 * arr_c + arr_z_h) * dz_2
)
if dim_r == 1:
continue # deal with singular radial dimension
for i in range(1, dim_r - 1): # iterate radial points
arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))
arr_r_l, arr_r_h = arr[i - 1, j], arr[i + 1, j]
out[i, j] = (
(arr_r_h - 2 * arr_c + arr_r_l) * dr_2
+ (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2
+ (arr_z_l - 2 * arr_c + arr_z_h) * dz_2
)
# outer radial boundary condition
i = dim_r - 1
arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))
arr_r_l, arr_r_h = arr[i - 1, j], value_outer(arr, (i, j))
out[i, j] = (
(arr_r_h - 2 * arr_c + arr_r_l) * dr_2
+ (arr_r_h - arr_r_l) / (2 * i + 1) * dr_2
+ (arr_z_l - 2 * arr_c + arr_z_h) * dz_2
)
return out
return laplace # type: ignore
@CylindricalGrid.register_operator("gradient", rank_in=0, rank_out=1)
@fill_in_docstring
def make_gradient(bcs: Boundaries) -> Callable:
"""make a discretized gradient operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(0)
boundary_r, boundary_z = bcs
# calculate preliminary quantities
dim_r, dim_z = bcs.grid.shape
scale_r, scale_z = 1 / (2 * bcs.grid.discretization)
value_outer = boundary_r.high.make_virtual_point_evaluator()
region_z = boundary_z.make_region_evaluator()
# use processing for large enough arrays
parallel = dim_r * dim_z >= PARALLELIZATION_THRESHOLD_2D ** 2
@jit_allocate_out(parallel=parallel, out_shape=(3, dim_r, dim_z))
def gradient(arr, out=None):
""" apply gradient operator to array `arr` """
for j in nb.prange(0, dim_z): # iterate axial points
# inner radial boundary condition
i = 0
arr_z_l, _, arr_z_h = region_z(arr, (i, j))
out[0, i, j] = (arr[1, i] - arr[0, i]) * scale_r
out[1, i, j] = (arr_z_h - arr_z_l) * scale_z
out[2, i, j] = 0 # no phi dependence by definition
for i in range(1, dim_r - 1): # iterate radial points
arr_z_l, _, arr_z_h = region_z(arr, (i, j))
out[0, i, j] = (arr[i + 1, j] - arr[i - 1, j]) * scale_r
out[1, i, j] = (arr_z_h - arr_z_l) * scale_z
out[2, i, j] = 0 # no phi dependence by definition
# outer radial boundary condition
i = dim_r - 1
arr_z_l, _, arr_z_h = region_z(arr, (i, j))
arr_r_h = value_outer(arr, (i, j))
out[0, i, j] = (arr_r_h - arr[i - 1, j]) * scale_r
out[1, i, j] = (arr_z_h - arr_z_l) * scale_z
out[2, i, j] = 0 # no phi dependence by definition
return out
return gradient # type: ignore
@CylindricalGrid.register_operator("gradient_squared", rank_in=0, rank_out=0)
@fill_in_docstring
def make_gradient_squared(bcs: Boundaries, central: bool = True) -> Callable:
"""make a discretized gradient squared operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
central (bool):
Whether a central difference approximation is used for the gradient
operator. If this is False, the squared gradient is calculated as
the mean of the squared values of the forward and backward
derivatives.
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(0)
boundary_r, boundary_z = bcs
# calculate preliminary quantities
dim_r, dim_z = bcs.grid.shape
value_outer = boundary_r.high.make_virtual_point_evaluator()
region_z = boundary_z.make_region_evaluator()
# use processing for large enough arrays
parallel = dim_r * dim_z >= PARALLELIZATION_THRESHOLD_2D ** 2
if central:
# use central differences
scale_r, scale_z = 1 / (2 * bcs.grid.discretization) ** 2
@jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))
def gradient_squared(arr, out=None):
""" apply gradient operator to array `arr` """
for j in nb.prange(0, dim_z): # iterate axial points
# inner radial boundary condition (Neumann condition)
i = 0
arr_z_l, _, arr_z_h = region_z(arr, (i, j))
term_r = (arr[1, j] - arr[0, j]) ** 2
term_z = (arr_z_h - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
for i in range(1, dim_r - 1): # iterate radial points
arr_z_l, _, arr_z_h = region_z(arr, (i, j))
term_r = (arr[i + 1, j] - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
# outer radial boundary condition
i = dim_r - 1
arr_z_l, _, arr_z_h = region_z(arr, (i, j))
arr_r_h = value_outer(arr, (i, j))
term_r = (arr_r_h - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
return out
else:
# use forward and backward differences
scale_r, scale_z = 1 / (2 * bcs.grid.discretization ** 2)
@jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))
def gradient_squared(arr, out=None):
""" apply gradient operator to array `arr` """
for j in nb.prange(0, dim_z): # iterate axial points
# inner radial boundary condition (Neumann condition)
i = 0
arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))
term_r = (arr[1, j] - arr[0, j]) ** 2
term_z = (arr_z_h - arr_c) ** 2 + (arr_c - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
for i in range(1, dim_r - 1): # iterate radial points
arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))
term_r = (arr[i + 1, j] - arr_c) ** 2 + (arr_c - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_c) ** 2 + (arr_c - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
# outer radial boundary condition
i = dim_r - 1
arr_z_l, arr_c, arr_z_h = region_z(arr, (i, j))
arr_r_h = value_outer(arr, (i, j))
term_r = (arr_r_h - arr_c) ** 2 + (arr_c - arr[i - 1, j]) ** 2
term_z = (arr_z_h - arr_c) ** 2 + (arr_c - arr_z_l) ** 2
out[i, j] = term_r * scale_r + term_z * scale_z
return out
return gradient_squared # type: ignore
@CylindricalGrid.register_operator("divergence", rank_in=1, rank_out=0)
@fill_in_docstring
def make_divergence(bcs: Boundaries) -> Callable:
"""make a discretized divergence operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(0)
boundary_r, boundary_z = bcs
# calculate preliminary quantities
dim_r, dim_z = bcs.grid.shape
dr = bcs.grid.discretization[0]
scale_r, scale_z = 1 / (2 * bcs.grid.discretization)
value_outer = boundary_r.high.make_virtual_point_evaluator()
region_z = boundary_z.make_region_evaluator()
# use processing for large enough arrays
parallel = dim_r * dim_z >= PARALLELIZATION_THRESHOLD_2D ** 2
@jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))
def divergence(arr, out=None):
""" apply divergence operator to array `arr` """
for j in nb.prange(0, dim_z): # iterate axial points
# inner radial boundary condition
i = 0
arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))
d_r = (arr[0, 1, j] + 3 * arr[0, 0, j]) * scale_r
d_z = (arr_z_h - arr_z_l) * scale_z
out[i, j] = d_r + d_z
for i in range(1, dim_r - 1): # iterate radial points
arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))
d_r = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_r
d_r += arr[0, i, j] / ((i + 0.5) * dr)
d_z = (arr_z_h - arr_z_l) * scale_z
out[i, j] = d_r + d_z
# outer radial boundary condition
i = dim_r - 1
arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))
arr_r_h = value_outer(arr[0], (i, j))
d_r = (arr_r_h - arr[0, i - 1, j]) * scale_r
d_r += arr[0, i, j] / ((i + 0.5) * dr)
d_z = (arr_z_h - arr_z_l) * scale_z
out[i, j] = d_z + d_r
return out
return divergence # type: ignore
@CylindricalGrid.register_operator("vector_gradient", rank_in=1, rank_out=2)
@fill_in_docstring
def make_vector_gradient(bcs: Boundaries) -> Callable:
"""make a discretized vector gradient operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(1)
# calculate preliminary quantities
gradient_r = make_gradient(bcs.extract_component(0))
gradient_z = make_gradient(bcs.extract_component(1))
gradient_phi = make_gradient(bcs.extract_component(2))
@jit_allocate_out(out_shape=(3, 3) + bcs.grid.shape)
def vector_gradient(arr, out=None):
""" apply gradient operator to array `arr` """
gradient_r(arr[0], out=out[:, 0])
gradient_z(arr[1], out=out[:, 1])
gradient_phi(arr[2], out=out[:, 2])
return out
return vector_gradient # type: ignore
@CylindricalGrid.register_operator("vector_laplace", rank_in=1, rank_out=1)
@fill_in_docstring
def make_vector_laplace(bcs: Boundaries) -> Callable:
"""make a discretized vector laplace operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(1)
laplace_r = make_laplace(bcs.extract_component(0))
laplace_z = make_laplace(bcs.extract_component(1))
laplace_phi = make_laplace(bcs.extract_component(2))
@jit_allocate_out(out_shape=(3,) + bcs.grid.shape)
def vector_laplace(arr, out=None):
""" apply gradient operator to array `arr` """
laplace_r(arr[0], out=out[0])
laplace_z(arr[1], out=out[1])
laplace_phi(arr[2], out=out[2])
return out
return vector_laplace # type: ignore
@CylindricalGrid.register_operator("tensor_divergence", rank_in=2, rank_out=1)
@fill_in_docstring
def make_tensor_divergence(bcs: Boundaries) -> Callable:
"""make a discretized tensor divergence operator for a cylindrical grid
{DESCR_CYLINDRICAL_GRID}
Args:
bcs (:class:`~pde.grids.boundaries.axes.Boundaries`):
{ARG_BOUNDARIES_INSTANCE}
Returns:
A function that can be applied to an array of values
"""
assert isinstance(bcs.grid, CylindricalGrid)
bcs.check_value_rank(1)
divergence_r = make_divergence(bcs.extract_component(0))
divergence_z = make_divergence(bcs.extract_component(1))
divergence_phi = make_divergence(bcs.extract_component(2))
@jit_allocate_out(out_shape=(3,) + bcs.grid.shape)
def tensor_divergence(arr, out=None):
""" apply gradient operator to array `arr` """
divergence_r(arr[0], out=out[0])
divergence_z(arr[1], out=out[1])
divergence_phi(arr[2], out=out[2])
return out
return tensor_divergence # type: ignore
|
#!/usr/bin/python
import argparse as ap
import sys
import re
from scipy.stats import chisquare
from itertools import compress
gens_d = {'0/0':0, '0/1':1, '1/1':2, '1/2':3, '2/2':4, '0/0/0/0':0, '0/0/0/1':1, '0/0/1/1':2, '0/1/1/1':3, '1/1/1/1':4, '1/1/1/2':5, '1/1/2/2':6, '1/2/2/2':7, '2/2/2/2':8}
gens_tassel = {'0/0':0, '0/1':1, '1/1':2, '0/0/0/0':0, '0/0/0/1':1, '0/0/1/1':2, '0/1/1/1':3, '1/1/1/1':4, '1/1/1/2':5, '1/1/2/2':6, '1/2/2/2':7, '2/2/2/2':8}
gens_pa = {'0/0':0, '0/1':1, '1/1':1, '0/0/0/0':0, '0/0/0/1':1, '0/0/1/1':1, '0/1/1/1':1, '1/1/1/1':1}
par = ap.ArgumentParser(description = 'Convert from VCF format to the SNP Loc format expected by TetraploidSNPMap. Variant sites with more than two alleles, along with variant sites for which one or both parents has an unknown genotype, will be removed because TetraploidSNPMap can\'t handle such sites.')
par.add_argument('-v', '--vcf', metavar = 'input.vcf', required = True, help = 'VCF file to process.', dest = 'v')
par.add_argument('-o', '--output', metavar = 'output.snploc', required = True, help = 'SnpLoc file to output.', dest = 'o')
par.add_argument('-p1', metavar = 'parent1', required = False, help = 'Sample name of first parent (must match a column in the VCF header line).')
par.add_argument('-p2', metavar = 'parent2', required = False, help = 'Sample name of second parent (must match a column in the VCF header line).')
par.add_argument('-f', '--offset', metavar = 'offset', required = False, help = 'Column of first genotype, numbered from 0. Default 9.', dest = 'f')
par.add_argument('-d', '--fix-double-reductions', required = False, help = 'Look for double-reducion genotypes and mark as unknown instead of the actual genotype. TSM can\'t analyze loci with double-reducitons and will refuse any locus that has any.', dest='dr', action = "store_true")
par.add_argument('-z', '--remove-homozygotic-parents', required = False, help = 'Remove loci where the parents are identical homozygotes (AAAAxAAAA or BBBBxBBBB). These loci are not informative for linkage mapping.', dest='rh', action = "store_true")
par.add_argument('-gq', '--min-gen-qscore', metavar = 'phred-qscore', required = False, help = 'Mark as unknown any genotypes with a genotype phred q-score (GQ) less than phred-score', dest = 'q')
par.add_argument('-vq', '--min-var-qscore', metavar = 'phred-qscore', required = False, help = 'Remove any variant sites with a locus phred q-score (GQ) less than phred-score', dest = 'vq')
par.add_argument('-c', '--chromosome', metavar = 'chromosome', required = False, help = 'Output SNPs on the specified chromosome', dest = 'c')
par.add_argument('-r', '--min-genotype-reads', metavar = 'M', required = False, help = 'Drop (mark as 9, unknown) genotypes supported by fewer than N total reads.', dest='r')
par.add_argument('-u', '--max-unknown-genotypes', metavar='f', required = False, help = 'Discard loci for which the fraction of unknown genotypes is more than f. The value f should be a decimal number between 0 and 1. Default is not to do any such filtering (equivalent to f = 1.0). Parents do not count toward this total (but if either is unknown then the locus will be discarded regardless of this setting).', dest='u')
par.add_argument('-a', '--presence-absence', required = False, help = 'Produce binary, presence/absence output only, with 1 = alternative allele is present in the genotype, 0 = it is not present, 9 = unknown', dest='a', action = "store_true")
par.add_argument('-p', '--min-chisq-p-val', metavar='cutoff', required = False, help = 'Cutoff for chi-square significance (p-value). Smaller p means less chance offspring ratios are good. Loci with p < cutoff will be rejected. Default is 0.001, the same as TetraploidSNPMap. Calculated after any adjustments that reject individual genotypes such as -d, -r, and -gq. Setting cutoff = 0.0 disables this filtering.', dest = 'p')
par.add_argument('-m', '--minor-allele-freq', metavar = 'freq', required = False, help = 'Minimum minor allele frequency accross all genotypes at a locus. Loci wherein either allele is less frequent than freq will be dropped. Must be a number between 0.0 and 1.0. Each allele at a locus counts separately, so e.g. three genotypes, AAAA ABBB AABB, would have frequency 5/12 = .42 for allele B.', dest = 'm')
par.add_argument('-t', '--tassel-output', required = False, help = 'Alternative output format for TASSEL 5. Header line with sample names is included instead of matrix dimensions, genotype signifiers up to 8 for 2/2/2/2 are included (with allelic series no longer filtered out) and unknowns are represented by -1 rather than 9. Cannot be used with chi-square filtering (-p)', dest = 't', action = "store_true")
par.add_argument('-us', '--max-unknown-sample-genotpyes', metavar = 'f', required = False, help = 'Discard samples for which the fraction of unknown genotypes is greater than f. The value f should be a decimal number between 0 and 1. If parents are specified, this filtering will not be applied to them.', dest = 'us')
par.add_argument('--debug', action = 'store_true', help = 'Show debug messages', dest = 'debug')
newline = '\n'
# Expected offspring ratios for each possible cross. Used for chi-square
# filtering (-p). 3-dimensional array with first index = parent 1 dosage,
# second index = parent 2 dosage, 3rd index = offspring dosage, value =
# fraction of offspring of those parents expected to have that dosage.
expected = [[[1.0, 0.0, 0.0, 0.0, 0.0], # AAAAxAAAA
[1.0/2.0,1.0/2.0,0.0,0.0,0.0], # AAAAxAAAB
[1.0/6.0,4.0/6.0,1.0/6.0,0.0,0.0], # AAAAxAABB
[0.0,1.0/2.0,1.0/2.0,0.0,0.0], # AAAAxABBB
[0.0,0.0,1.0,0.0,0.0]], # AAAAxBBBB
[[1.0/2.0,1.0/2.0,0.0,0.0,0.0], # AAABxAAAA
[1.0/4.0,2.0/4.0,1.0/4.0,0.0,0.0], # AAABxAAAB
[1.0/12.0,5.0/12.0,5.0/12.0,1.0/12.0,0.0], # AAABxAABB
[0.0,1.0/4.0,2.0/4.0,1.0/4.0,0.0], # AAABxABBB
[0.0,0.0,1.0/2.0,1.0/2.0,0.0]], # AAABxBBBB
[[1.0/6.0,4.0/6.0,1.0/6.0,0.0,0.0], # AABBxAAAA
[1.0/12.0,5.0/12.0,5.0/12.0,1.0/12.0,0.0], # AABBxAAAB
[1.0/36.0,8.0/36.0,18.0/36.0,8.0/36.0,1.0/36.0],# AABBxAABB
[0.0,1.0/12.0,5.0/12.0,5.0/12.0,1.0/12.0], # AABBxABBB
[0.0,0.0,1.0/6.0,4.0/6.0,1.0/6.0]], # AABBxBBBB
[[0.0,1.0/2.0,1.0/2.0,0.0,0.0], # ABBBxAAAA
[0.0,1.0/4.0,2.0/4.0,1.0/4.0,0.0], # ABBBxAAAB
[0.0,1.0/12.0,5.0/12.0,5.0/12.0,1.0/12.0], # ABBBxAABB
[0.0,0.0,1.0/4.0,2.0/4.0,1.0/4.0], # ABBBxABBB
[0.0,0.0,0.0,1.0/2.0,1.0/2.0]], # ABBBxBBBB
[[0.0,0.0,1.0,0.0,0.0], # BBBBxAAAA
[0.0,0.0,1.0/2.0,1.0/2.0,0.0], # BBBBxAAAB
[0.0,0.0,1.0/6.0,4.0/6.0,1.0/6.0], # BBBBxAABB
[0.0,0.0,0.0,1.0/2.0,1.0/2.0], # BBBBxABBB
[0.0,0.0,0.0,0.0,1.0]]] # BBBBxBBBB
args = par.parse_args()
def dbg_msg(message):
if(args.debug):
sys.stderr.write(message+'\n')
have_parents = args.p1 and args.p2
if not have_parents:
if not args.t:
sys.stderr.write('Parents must be specified for TetraploidSNPMap output.')
exit(1)
if args.dr:
sys.stderr.write('Fixing double reductions (-d) requires both parents be specified (-p1 and -p2)')
exit(1)
if args.rh:
sys.stderr.write('Removing heterozygous parents (-z) requires both parents be specified (-p1 and -p2)')
exit(1)
if args.p:
sys.stderr.write('Filtering by p-value (-p) requires both parents be specified (-p1 and -p2)')
exit(1)
if(args.f):
offs = int(args.f)
else:
offs = 9
if(args.u):
u = float(args.u)
else:
u = 0.0
if(args.a):
gens = gens_pa
else:
gens = gens_d
if(args.p):
p_cutoff = float(args.p)
else:
p_cutoff = 0.001
if(args.m):
m = float(args.m)
neg_m = 1.0 - m
else:
m = 0.0
if(args.t):
unknown = 'NA'
separator = '\t'
if(args.p):
sys.stderr.write("Error: Cannot use p-value filtering (-p) with tassel output format (-t)")
exit(1)
if(args.a):
sys.stderr.write("Error: Cannot use presence-absence output (-a) with tassel output format (-t)")
exit(1)
else:
separator = ' '
unknown = 9
try:
infile = open(args.v)
except IOError as e:
sys.stderr.write("Could not open input file '%s': %s.\n"%(args.v,e.strerror))
exit(1)
try:
outfile = open(args.o,'w')
except IOError as e:
sys.stderr.write("Could not open output file '%s' for writing: %s.\n"%(args.o,e.strerror))
exit(1)
# In this section, we read the VCF header and find what columns the two parents
# are in, stored as p1 and p2 (these are stored as their ordering among the
# offspring, so minus the fixed, non-genotype columns as specified by --offset)
while True:
headerline = infile.readline()
if (not headerline): # Ran out of file and haven't found a header line. Exit.
sys.stderr.write("Error: VCF file %s has no header line.\n"%args.v)
exit(1)
if (headerline[0:6] == '#CHROM'): break # Found the header line, continue on
if (headerline[0] != '#'): # Encountered data and haven't found a header line. Exit.
sys.stderr.write("Error: VCF file %s has no header line.\n"%args.v)
exit(1)
p1 = -1
p2 = -1
fields = headerline.split()
n_fields = len(fields)
num_gt = n_fields - offs
if(have_parents):
for i in range(offs,n_fields):
if(fields[i] == args.p1):
p1 = int(i-offs)
if(fields[i] == args.p2):
p2 = int(i-offs)
if(p1 < 0):
sys.stderr.write("Error: Could not find parent %s in header line.\n"%args.p1)
exit(1)
if(p2 < 0):
sys.stderr.write("Error: Could not find parent %s in header line.\n"%args.p2)
exit(1)
# pmin and pmax are here so we can make sure we remove the parents in the
# reverse order they appear in the list of samples, regardless of whether p1 or
# p2 is first. If we removed the first one first then the list would shift over
# and we'd end up removing the wrong one for the second parent.
pmin = min(p1,p2)
pmax = max(p1,p2)
p1_name = fields[p1+offs]
p2_name = fields[p2+offs]
fields.pop(pmax)
fields.pop(pmin)
samples = [p1_name,p2_name]+fields[offs:]
#tassel_header = separator.join(["Chr_loc", p1_name, p2_name] + fields[offs:] + ["Alleles"]) # Output header for tassel format (-t) if parents specified
else:
samples = fields[offs:]
#tassel_header = separator.join(["Chr_loc"] + fields[offs:] + ["Alleles"]) # Output header for tassel format (-t) if parents unspecified
all_dosages = [] # 2D array that contains all genotype dosages in the file. First index is locus, second is sample. Created progressively by appending the array of locus genotypes to it for each locus in the input file (i.e. for each loop iteration)
alleles_list = [] # List of strings describing alleles at each locus
names_list = [] # List of locus names
unknown_by_sample = [0] * (num_gt+1) # Number of unknown genotypes for each sample. Used in -us filtering.
snp_n = 0
havent_warned_no_qscore = True
havent_warned_no_reads = True
qscore_pos = -1
reads_pos = -1
if(args.q): q = float(args.q)
if(args.r): r = int(args.r)
if(args.vq): vq = float(args.vq)
# outstr will hold the entire contents of the snploc file we're going to write.
# We cache everything in this string and write it out at the end so that we can
# put an accurate count of SNPs in the snploc header line, since we won't know
# until the end how many SNPs will end up making it through filtering.
outstr = ''
# Counts of loci and genotypes rejected for various reasons, used to output the
# statistics at the end
rejected_chr = 0
rejected_alleles = 0
rejected_monomorphic = 0
rejected_unk_parent = 0
rejected_homozygous = 0
rejected_vqscore = 0
rejected_unknown = 0
rejected_chisq = 0
rejected_maf = 0
rejected_qscore = 0
rejected_reads = 0
rejected_dr = 0
retained = 0
# The main loop, one iteration per line in the VCF file
while True:
line = infile.readline()
if (not line): break # Reached EOF
fields = line.split()
chromosome = fields[0]
if (args.c and chromosome != args.c):
rejected_chr += 1
continue
unknown_gt = 0 # Number of unknown genotypes at this locus. Used for filtering by fraction of unknown genotypes (-u option)
loc = fields[1]
# Name the locus with chromosome and physical map position, so we can
# easily compare the physical vs linkage map afterward
name = '%s_%s'%(chromosome,loc)
try:
if args.vq and float(fields[5]) < vq:
rejected_vqscore += 1
continue
except ValueError:
pass
dosage = [None]*(n_fields-offs)
# Find which field within the genotypes has the q-score (GQ)
if (args.q and qscore_pos < 0 and havent_warned_no_qscore):
gen_field_names = fields[offs-1].split(':')
for i in range(0,len(gen_field_names)):
if (gen_field_names[i] == 'GQ'):
qscore_pos = i
break
if (qscore_pos < 0 and havent_warned_no_qscore):
sys.stderr.write("Warning: Filter by genotype q-score requested, but one or more loci do not have genotype q-scores.\n")
havent_warned_no_qscore = False
# Find which field within the genotypes has the reads (DP)
if (args.r and reads_pos < 0 and havent_warned_no_reads):
gen_field_names = fields[offs-1].split(':')
for i in range(0,len(gen_field_names)):
if (gen_field_names[i] == 'DP'):
reads_pos = i
break
if (reads_pos < 0 and havent_warned_no_reads):
sys.stderr.write("Warning: Filter by genotype read depth requested, but one or more loci do not have genotype read depth.\n")
havent_warned_no_reads = False
try:
# Iterate over the genotypes at this locus
ploidy = 0
for g in range(offs,n_fields):
gen_fields = fields[g].split(':')
ploidy = max(ploidy, len(gen_fields[0].split('/')))
if (gen_fields[0] == '.'):
dosage[g-offs] = unknown
unknown_gt += 1
continue
# Filter by qscore if requested (a nonnegative qscore_pos must mean
# it was requested, so no need to check again explicitly)
if qscore_pos >= 0 and float(gen_fields[qscore_pos]) < q:
rejected_qscore += 1
dosage[g-offs] = unknown
unknown_gt += 1
continue
# Filter by read depth if requested (a nonnegative reads_pos must
# mean it was requested, so no need to check again explicitly)
if reads_pos >= 0 and int(gen_fields[reads_pos]) < r:
rejected_reads += 1
dosage[g-offs] = unknown
unknown_gt += 1
continue
# Look up the genotype number (will throw an exception if it isn't
# found, caught below)
dosage[g-offs] = gens[gen_fields[0]]
# Rescue non-reference genotypes. If no genotypes have the reference
# (allele 0), then we may still have a useable locus even if it at
# first appears to be an allelic series. Try to fix it by moving all
# alleles "down" by 1 (i.e. 1 -> 0, 2 -> 1)
ref_absent = True
allelic_series = False
# Go through and determine whether there are no reference alleles and
# whether this is an allelic series
for i in range(0,len(dosage)):
if(dosage[i] != unknown):
if(dosage[i] <= ploidy-1):
ref_absent = False
if(dosage[i] > ploidy):
allelic_series = True
if (ref_absent):
# No reference alleles, go through and apply the correction
for i in range(0,len(dosage)):
if(dosage[i] != unknown):
dosage[i] = dosage[i]-ploidy
elif (allelic_series):
# There was a reference allele 0 somewhere (ref_absent is false)
# and there's a 2 somewhere (allelic_series is true) and if there's
# a 2 then there's also a 1. It's therefore a real allelic series,
# and TPM can't handle those. Reject.
dbg_msg('Rejected '+name+' due to series logic')
rejected_alleles += 1
continue
except KeyError as e:
# Found a locus with >3 alleles. Can't rescue these at all. Reject.
dbg_msg('Rejected '+name+' due to KeyError')
rejected_alleles += 1
continue
# If either parent is unknown, TPM won't be able to do anything with this
# snp. Reject.
if have_parents and (dosage[p1] == unknown or dosage[p2] == unknown):
rejected_unk_parent += 1
continue
# Fix (some) double reductions if requested by the user; if parents are
# AAAB (1) x AAAA (0) then mark any dosages in the offspring other than
# AAAB and AAAA as unknown. Also do the equivalent for the reverse
# (ABBB (3) x BBBB (4)).
if (args.dr):
if (ploidy != 4):
sys.stderr.write('Error: fixing double reductions only works with ploidy 4, but this file has genotypes of ploidy %i. Genotype is %s\n'%(ploidy,gen_fields[0]))
exit(1)
if ((dosage[p1] == 0 and dosage [p2] == 1) or (dosage[p1] == 1 and dosage[p2] == 0)):
for i in range(0,len(dosage)):
if(dosage[i] != 1 and dosage[i] != 0):
rejected_dr += 1
dosage[i] = unknown
unknown_gt += 1
if ((dosage[p1] == 3 and dosage [p2] == 4) or (dosage[p1] == 4 and dosage[p2] == 3)):
for i in range(0,len(dosage)):
if(dosage[i] != 4 and dosage[i] != 3):
rejected_dr += 1
dosage[i] = unknown
unknown_gt += 1
if(args.u):
if(float(unknown_gt) / num_gt > u):
rejected_unknown += 1
continue
for i in range(num_gt):
if(dosage[i] == unknown):
unknown_by_sample[i+1] += 1
# Remove AAAA x AAAA, BBBB x BBBB, and AAAA x BBBB loci if requested by the user
if (args.rh):
if((dosage[p1] == 0 or dosage[p1] == 4) and (dosage[p2] == 4 or dosage[p2] == 0)):
rejected_homozygous += 1
continue
if(have_parents):
p1_str = dosage[p1]
p2_str = dosage[p2]
dosage.pop(pmax)
dosage.pop(pmin)
# Now we will calculate the p-value for difference between the observed
# offspring ratios at this locus and what we'd expect based on the parents'
# genotypes. A low p-value means less chance that observed deviation from
# expected ratios could be due to chance rather than an actual problem with
# the calls. TetraploidSNPMap rejects loci where p < 0.001, so that is our
# cutoff unless overridden by the user. In these calculations, we ignore
# offspring genotypes for which the expected number is 0 (which would cause
# a divide by zero error) because this is what TPM does and we're trying to
# replicate their p-value calculation.
if(args.p):
if (ploidy !=4 and ploidy != 1):
sys.stderr.write('Error: p-value filtering is only supported for ploidy 4 at this time; ploidy is %i.\n'%ploidy)
exit(1)
# Iterate over offspring and get totals of each genotype
observed_offspr = [0,0,0,0,0]
expected_offspr = expected[p1_str][p2_str]
known_offspr = 0
for offspr in dosage:
offspr = int(offspr)
if(offspr <= 4):
observed_offspr[offspr] += 1
known_offspr += 1
# Iterate over the five possible genotypes and keep only those for
# which the expected number is nonzero, since chi-squared will divide
# by it
expected_nz = []
observed_nz = []
for i in range(0,len(expected_offspr)):
if(expected_offspr[i] != 0):
expected_nz += [expected_offspr[i]]
observed_nz += [observed_offspr[i]]
# Calculate the chi-squared and p value and reject this locus if the
# latter is below the cutoff
(chisq,p) = chisquare(observed_nz,[e*known_offspr for e in expected_nz])
if(p < p_cutoff):
rejected_chisq += 1
continue
# Filter by minor allele frequency > m
n_alt_allele = 0
known_alleles = 0
for offspr in dosage:
if(offspr != unknown):
n_alt_allele += offspr
known_alleles += 4
maf = float(n_alt_allele) / known_alleles
if args.m is not None and (maf <= m or maf >= neg_m):
rejected_maf += 1
continue
snp_n += 1
all_dosages += [[name]+dosage]
if args.t:
alleles_list += [separator+fields[3] + ',' + fields[4]]
else:
alleles_list += [""]
samples = ['<Marker>']+samples
rejected_sample_unknowns = 0
if(args.us is not None): # Filtering samples by fraction unknown genotypes
if have_parents: # We don't want to filter out either parent even if they fail this test because the whole file would be rendered useless if we did. If there are parents, then we want to remove their entries from the list of unknown counts and replace them with 0's tacked onto the beginning, to match the configuration of the all_dosages arrays. The 0's garauntee that they'll be kept no mater what the cutoff is.
unknown_by_sample.pop(pmax)
unknown_by_sample.pop(pmin)
unknown_by_sample = [0,0]+unknown_by_sample
# Here we're going to do the filtering. First calculate a threshold telling us how many unknowns are allowable for the given number of snps (faster then dividing each time), then generate a list of booleans telling us which samples are under this limit and should therefore be kept (samples_to_keep), then apply that to each entry in all_dosages using the compress() funciton
keep_sample_threshold = float(args.us) * snp_n
dbg_msg('Unknowns by sample: %s'%unknown_by_sample)
dbg_msg('Keep threshold: %s'%keep_sample_threshold)
samples_to_keep = [sample_unknowns <= keep_sample_threshold for sample_unknowns in unknown_by_sample]
rejected_sample_unknowns = len(samples)-sum(samples_to_keep)
dbg_msg('Keeping samples: %s'%samples_to_keep)
all_dosages = [list(compress(dosages_for_locus, samples_to_keep)) for dosages_for_locus in all_dosages]
samples = list(compress(samples, samples_to_keep))
if(args.t):
all_dosages = [samples] + all_dosages # Add the header for the TASSEL format
all_dosages = map(list, zip(*all_dosages)) # Transpose the whole things since TASSEL wants samples in rows and markers in columns, the opposite arrangement from that of VCF and TPM
else: # If we're outputting for TPM, we need to truncate to 300 samples, since TPM won't accept a file with more than this
all_dosages = [dosages_for_locus[0:min(len(dosages_for_locus),301)] for dosages_for_locus in all_dosages]
outfile.write(('%i %i'+newline)%(n_fields-11,snp_n)) # The header for the TPM format
for l in all_dosages:
outfile.write(separator.join([str(d) for d in l])+newline)
#outfile.write(outstr)
infile.close()
outfile.close()
sys.stderr.write("Retained Loci: %i\nRejected Loci:\n\tWrong Chromosome: %i\n\tMore than Two Alleles: %i\n\tUnknown Parent(s): %i\n\tHomozygous Parents: %i\n\tLow Q-score: %i\n\tP < %f: %i\n\tMinor Allele Frequency <= %f: %i\n\tMore than %i%% Unknown Genotypes: %i\nRejected Genotypes:\n\tLow Q-Score: %i\n\tDouble Reduction: %i\n\tRead Count: %i\nRejected Samples:\n\tUnknown Genotypes: %i\n"%(snp_n, rejected_chr, rejected_alleles, rejected_unk_parent, rejected_homozygous, rejected_vqscore, p_cutoff, rejected_chisq, m, rejected_maf, int(100*u), rejected_unknown, rejected_qscore, rejected_dr, rejected_reads, rejected_sample_unknowns))
if(snp_n > 8000) and not args.t:
sys.stderr.write("Warning: Number of retained loci is %i. As of March 2017, the current version of TetraploidSNPMap cannot handle more than 8,000 loci. This output file may be rejected by the software."%snp_n)
|
import os
from simplySQL import SQL
from flask_session import Session
from flask import Flask, render_template, redirect, request, session, jsonify
from datetime import datetime
import sqlalchemy.dialects.postgresql
DATABASE_URL = os.environ.get('DATABASE_URL', None)
DATABASE_URL = 'postgresql' + DATABASE_URL[8:]
# # Instantiate Flask object named app
app = Flask(__name__)
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
# # Configure sessions
app.config["SESSION_PERMANENT"] = False
# app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# cloud heroku
db = SQL (DATABASE_URL)
#local
#db = SQL ('postgresql://postgres:123456@localhost/snapcartt') # database engine object from SQLAlchemy that manages connections to the database
@app.route("/")
def index():
shirts = db.execute("SELECT * FROM shirts ORDER BY team ASC")
shirtsLen = len(shirts)
# Initialize variables
shoppingCart = []
shopLen = len(shoppingCart)
totItems, total, display = 0, 0, 0
if 'user' in session:
shoppingCart = db.execute("SELECT * FROM cart WHERE uid=:uid", uid=str(session["uid"]))
shopLen = len(shoppingCart)
for i in range(shopLen):
total += shoppingCart[i]["subtotal"]
totItems += shoppingCart[i]["qty"]
shirts = db.execute("SELECT * FROM shirts ORDER BY team ASC")
shirtsLen = len(shirts)
return render_template ("index.html", shoppingCart=shoppingCart, shirts=shirts, shopLen=shopLen, shirtsLen=shirtsLen, total=total, totItems=totItems, display=display, session=session )
return render_template ( "index.html", shirts=shirts, shoppingCart=shoppingCart, shirtsLen=shirtsLen, shopLen=shopLen, total=total, totItems=totItems, display=display)
@app.route("/buy/")
def buy():
# Initialize shopping cart variables
shoppingCart = []
shopLen = len(shoppingCart)
totItems, total, display = 0, 0, 0
qty = int(request.args.get('quantity'))
if session:
# Store id of the selected shirt
id = int(request.args.get('id'))
# Select info of selected shirt from database
goods = db.execute("SELECT * FROM shirts WHERE id = :id", id=id)
# Extract values from selected shirt record
# Check if shirt is on sale to determine price
if(goods[0]["onsale"] == 1):
price = goods[0]["onsaleprice"]
else:
price = goods[0]["price"]
team = goods[0]["team"]
image = goods[0]["image"]
subtotal = qty * price
# Insert selected shirt into shopping cart
db.execute("INSERT INTO cart (id, qty, team, image, price, subtotal, uid) VALUES (:id, :qty, :team, :image, :price, :subtotal, :uid)", id=id, qty=qty, team=team, image=image, price=price, subtotal=subtotal, uid=str(session["uid"]))
shoppingCart = db.execute("SELECT * FROM cart WHERE uid=:uid", uid=str(session["uid"]))
shopLen = len(shoppingCart)
# Rebuild shopping cart
for i in range(shopLen):
total += shoppingCart[i]["subtotal"]
totItems += shoppingCart[i]["qty"]
# Select all shirts for home page view
shirts = db.execute("SELECT * FROM shirts ORDER BY team ASC")
shirtsLen = len(shirts)
# Go back to home page
return render_template ("index.html", shoppingCart=shoppingCart, shirts=shirts, shopLen=shopLen, shirtsLen=shirtsLen, total=total, totItems=totItems, display=display, session=session )
@app.route("/update/")
def update():
# Initialize shopping cart variables
shoppingCart = []
shopLen = len(shoppingCart)
totItems, total, display = 0, 0, 0
qty = int(request.args.get('quantity'))
if session:
# Store id of the selected shirt
id = int(request.args.get('id'))
db.execute("DELETE FROM cart WHERE id = :id and uid = :uid", id=id, uid=str(session["uid"]))
# Select info of selected shirt from database
goods = db.execute("SELECT * FROM shirts WHERE id = :id", id=id)
# Extract values from selected shirt record
# Check if shirt is on sale to determine price
if(goods[0]["onsale"] == 1):
price = goods[0]["onsaleprice"]
else:
price = goods[0]["price"]
team = goods[0]["team"]
image = goods[0]["image"]
subtotal = qty * price
# Insert selected shirt into shopping cart
db.execute("INSERT INTO cart (id, qty, team, image, price, subtotal, uid) VALUES (:id, :qty, :team, :image, :price, :subtotal, :uid)", id=id, qty=qty, team=team, image=image, price=price, subtotal=subtotal, uid=str(session["uid"]))
shoppingCart = db.execute("SELECT * FROM cart WHERE uid=:uid", uid=str(session["uid"]))
shopLen = len(shoppingCart)
# Rebuild shopping cart
for i in range(shopLen):
total += shoppingCart[i]["subtotal"]
totItems += shoppingCart[i]["qty"]
# Go back to cart page
return render_template ("cart.html", shoppingCart=shoppingCart, shopLen=shopLen, total=total, totItems=totItems, display=display, session=session )
@app.route("/filter/")
def filter():
if request.args.get('continent'):
query = request.args.get('continent')
shirts = db.execute("SELECT * FROM shirts WHERE continent = :query ORDER BY team ASC", query=query )
if request.args.get('sale'):
query = request.args.get('sale')
shirts = db.execute("SELECT * FROM shirts WHERE onsale = :query ORDER BY team ASC", query=query)
if request.args.get('id'):
query = int(request.args.get('id'))
shirts = db.execute("SELECT * FROM shirts WHERE id = :query ORDER BY team ASC", query=query)
if request.args.get('kind'):
query = request.args.get('kind')
shirts = db.execute("SELECT * FROM shirts WHERE kind = :query ORDER BY team ASC", query=query)
if request.args.get('price'):
query = request.args.get('price')
shirts = db.execute("SELECT * FROM shirts ORDER BY onsaleprice ASC")
shirtsLen = len(shirts)
# Initialize shopping cart variables
shoppingCart = []
shopLen = len(shoppingCart)
totItems, total, display = 0, 0, 0
if 'user' in session:
# Rebuild shopping cart
shoppingCart = db.execute("SELECT * FROM cart WHERE uid=:uid", uid=str(session["uid"]))
shopLen = len(shoppingCart)
for i in range(shopLen):
total += shoppingCart[i]["subtotal"]
totItems += shoppingCart[i]["qty"]
# Render filtered view
return render_template ("index.html", shoppingCart=shoppingCart, shirts=shirts, shopLen=shopLen, shirtsLen=shirtsLen, total=total, totItems=totItems, display=display, session=session )
# Render filtered view
return render_template ( "index.html", shirts=shirts, shoppingCart=shoppingCart, shirtsLen=shirtsLen, shopLen=shopLen, total=total, totItems=totItems, display=display)
@app.route("/checkout/")
def checkout():
order = db.execute("SELECT * from cart WHERE uid=:uid", uid=str(session["uid"]))
# Update purchase history of current customer
for item in order:
db.execute("INSERT INTO purchases (uid, id, team, image, quantity) VALUES(:uid, :id, :team, :image, :quantity)", uid=str(session["uid"]), id=item["id"], team=item["team"], image=item["image"], quantity=item["qty"] )
# Clear shopping cart
db.execute("DELETE from cart WHERE uid=:uid", uid=str(session["uid"]))
shoppingCart = []
shopLen = len(shoppingCart)
totItems, total, display = 0, 0, 0
# Redirect to home page
return redirect('/')
@app.route("/remove/", methods=["GET", "POST"])
def remove():
# Get the id of shirt selected to be removed
out = int(request.args.get("id"))
# Remove shirt from shopping cart
db.execute("DELETE from cart WHERE id=:id", id=out)
# Initialize shopping cart variables
totItems, total, display = 0, 0, 0
# Rebuild shopping cart
shoppingCart = db.execute("SELECT * FROM cart WHERE uid=:uid", uid=str(session["uid"]))
shopLen = len(shoppingCart)
for i in range(shopLen):
total += shoppingCart[i]["subtotal"]
totItems += shoppingCart[i]["qty"]
# Turn on "remove success" flag
display = 1
# Render shopping cart
return render_template ("cart.html", shoppingCart=shoppingCart, shopLen=shopLen, total=total, totItems=totItems, display=display, session=session )
@app.route("/login/", methods=["GET"])
def login():
return render_template("login.html")
@app.route("/new/", methods=["GET"])
def new():
# Render log in page
return render_template("new.html")
@app.route("/logged/", methods=["POST"] )
def logged():
# Get log in info from log in form
user = request.form["username"].lower()
pwd = request.form["password"]
#pwd = str(sha1(request.form["password"].encode('utf-8')).hexdigest())
# Make sure form input is not blank and re-render log in page if blank
if user == "" or pwd == "":
return render_template ( "login.html" )
# Find out if info in form matches a record in user database
query = "SELECT * FROM users WHERE username = :user AND password = :pwd"
rows = db.execute ( query, user=user, pwd=pwd )
# If username and password match a record in database, set session variables
if len(rows) == 1:
session['user'] = user
session['time'] = datetime.now( )
session['uid'] = str(rows[0]["id"])
# Redirect to Home Page
if 'user' in session:
return redirect ( "/" )
# If username is not in the database return the log in page
return render_template ( "login.html", msg="Wrong username or password." )
@app.route("/history/")
def history():
# Initialize shopping cart variables
shoppingCart = []
shopLen = len(shoppingCart)
totItems, total, display = 0, 0, 0
# Retrieve all shirts ever bought by current user
myShirts = db.execute("SELECT * FROM purchases WHERE uid=:uid", uid=str(session["uid"]))
myShirtsLen = len(myShirts)
# Render table with shopping history of current user
return render_template("history.html", shoppingCart=shoppingCart, shopLen=shopLen, total=total, totItems=totItems, display=display, session=session, myShirts=myShirts, myShirtsLen=myShirtsLen)
@app.route("/logout/")
def logout():
# clear shopping cart
#db.execute("DELETE from cart WHERE uid=:uid", uid=str(session["uid"]))
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/register/", methods=["POST"] )
def registration():
# Get info from form
username = request.form["username"]
password = request.form["password"]
confirm = request.form["confirm"]
fname = request.form["fname"]
lname = request.form["lname"]
email = request.form["email"]
if password != confirm:
return render_template("new.html", msg="Passwords did not match !!")
# See if username already in the database
rows = db.execute( "SELECT * FROM users WHERE username = :username ", username = username )
# If username already exists, alert user
if len( rows ) > 0:
return render_template ( "new.html", msg="Username already exists!" )
# If new user, upload his/her info into the users database
new = db.execute ( "INSERT INTO users (username, password, fname, lname, email) VALUES (:username, :password, :fname, :lname, :email)",
username=username, password=password, fname=fname, lname=lname, email=email )
# Render login template
return render_template ( "login.html" )
@app.route("/cart/")
def cart():
if 'user' in session:
# Clear shopping cart variables
totItems, total, display = 0, 0, 0
# Grab info currently in database
shoppingCart = db.execute("SELECT * FROM cart WHERE uid=:uid", uid=str(session["uid"]))
# Get variable values
shopLen = len(shoppingCart)
for i in range(shopLen):
total += shoppingCart[i]["subtotal"]
totItems += shoppingCart[i]["qty"]
# Render shopping cart
return render_template("cart.html", shoppingCart=shoppingCart, shopLen=shopLen, total=total, totItems=totItems, display=display, session=session)
@app.errorhandler(404)
def pageNotFound( e ):
if 'user' in session:
return render_template ( "404.html", session=session )
return render_template ( "404.html" ), 404
# Only needed if Flask run is not used to execute the server
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
#if __name__ == '__main__':
# app.run()
|
import os
import unittest
from common_helper_yara.yara_scan import _parse_yara_output, scan
DIR_OF_CURRENT_FILE = os.path.dirname(os.path.abspath(__file__))
class TestYaraScan(unittest.TestCase):
def test_parse_yara_output(self):
with open(os.path.join(DIR_OF_CURRENT_FILE, 'data', 'yara_matches'), 'r') as fd:
match_file = fd.read()
matches = _parse_yara_output(match_file)
self.assertIsInstance(matches, dict, 'matches should be dict')
self.assertIn('PgpPublicKeyBlock', matches.keys(), 'Pgp block should have been matched')
self.assertIn(0, matches['PgpPublicKeyBlock']['strings'][0], 'first block should start at 0x0')
def test_scan(self):
signature_file = os.path.join(DIR_OF_CURRENT_FILE, 'data/rules', 'signatures.yara')
scan_file = os.path.join(DIR_OF_CURRENT_FILE, 'data/data_files', 'scan_file')
result = scan(signature_file, scan_file)
self.assertIsInstance(result, dict, "result is not a dict")
self.assertEqual(len(result), 2, "number of matches not correct")
self.assertEqual(result['another_test_rule']['meta']['description'], 'test rule', 'meta data not correct')
def test_scan_ext_variable_and_magic(self):
signature_file = os.path.join(DIR_OF_CURRENT_FILE, 'data/rules', 'signatures_ext_var.yara')
scan_file = os.path.join(DIR_OF_CURRENT_FILE, 'data/data_files', 'scan_file')
result = scan(signature_file, scan_file, external_variables={'test_flag': "true"})
self.assertEqual(len(result), 1, "number of results not correct")
result = scan(signature_file, scan_file, external_variables={'test_flag': "false"})
self.assertEqual(len(result), 0, "number of results not correct")
def test_scan_recursive(self):
signature_file = os.path.join(DIR_OF_CURRENT_FILE, 'data/rules', 'signatures.yara')
scan_file = os.path.join(DIR_OF_CURRENT_FILE, 'data/data_files')
result = scan(signature_file, scan_file, recursive=True)
self.assertEqual(len(result['another_test_rule']['strings']), 2, 'string in second file not found')
|
import numpy as np
from ...utils import mkvc
import discretize
import warnings
def edge_basis_function(t, a1, l1, h1, a2, l2, h2):
"""
Edge basis functions
"""
x1 = a1 + t * l1
x2 = a2 + t * l2
w0 = (1.0 - x1 / h1) * (1.0 - x2 / h2)
w1 = (x1 / h1) * (1.0 - x2 / h2)
w2 = (1.0 - x1 / h1) * (x2 / h2)
w3 = (x1 / h1) * (x2 / h2)
return np.r_[w0, w1, w2, w3]
def _simpsons_rule(a1, l1, h1, a2, l2, h2):
"""Return weights for Simpson's rule."""
wl = edge_basis_function(0.0, a1, l1, h1, a2, l2, h2)
wc = edge_basis_function(0.5, a1, l1, h1, a2, l2, h2)
wr = edge_basis_function(1.0, a1, l1, h1, a2, l2, h2)
return (wl + 4.0 * wc + wr) / 6.0
# TODO: Extend this when current is defined on cell-face
def getStraightLineCurrentIntegral(hx, hy, hz, ax, ay, az, bx, by, bz):
"""
Compute integral int(W . J dx^3) in brick of size hx x hy x hz
where W denotes the 12 local bilinear edge basis functions
and where J prescribes a unit line current
between points (ax,ay,az) and (bx,by,bz).
"""
# length of line segment
lx = bx - ax
ly = by - ay
lz = bz - az
# integration using Simpson's rule
sx = _simpsons_rule(ay, ly, hy, az, lz, hz) * lx
sy = _simpsons_rule(ax, lx, hx, az, lz, hz) * ly
sz = _simpsons_rule(ax, lx, hx, ay, ly, hy) * lz
return sx, sy, sz
def findlast(x):
if x.sum() == 0:
return -1
else:
return np.arange(x.size)[x][-1]
def segmented_line_current_source_term(mesh, locs):
"""Calculate a source term for a line current source on a mesh
Given a discretize mesh, compute the source vector for a unit current flowing
along the segmented line path with vertices defined by `locs`.
Parameters
----------
mesh : discretize.TreeMesh or discretize.TensorMesh
The Mesh (3D) for the system.
locs : numpy.ndarray
The array of locations of consecutive points along the polygonal path.
in a shape of (n_points, 3)
Returns
-------
numpy.ndarray of length (mesh.nE)
Contains the source term for all x, y, and z edges of the mesh.
Notes
-----
You can create a closed loop by setting the first and end point to be the same.
"""
if isinstance(mesh, discretize.TensorMesh):
return _poly_line_source_tens(mesh, locs)
elif isinstance(mesh, discretize.TreeMesh):
return _poly_line_source_tree(mesh, locs)
def _poly_line_source_tens(mesh, locs):
"""
Given a tensor product mesh with origin at (x0,y0,z0) and cell sizes
hx, hy, hz, compute the source vector for a unit current flowing along
the polygon with vertices px, py, pz.
The 3-D arrays sx, sy, sz contain the source terms for all x/y/z-edges
of the tensor product mesh.
Modified from matlab code:
getSourceTermLineCurrentPolygon(x0,y0,z0,hx,hy,hz,px,py,pz)
Christoph Schwarzbach, February 2014
"""
# Get some mesh properties
nx, ny, nz = mesh.shape_cells
hx, hy, hz = mesh.h
x = mesh.nodes_x
y = mesh.nodes_y
z = mesh.nodes_z
# Source points
px = locs[:, 0]
py = locs[:, 1]
pz = locs[:, 2]
# discrete edge function
sx = np.zeros((nx, ny + 1, nz + 1))
sy = np.zeros((nx + 1, ny, nz + 1))
sz = np.zeros((nx + 1, ny + 1, nz))
# number of line segments
nP = len(px) - 1
# check that all polygon vertices are inside the mesh
for ip in range(nP + 1):
ax = px[ip]
ay = py[ip]
az = pz[ip]
ix = findlast(np.logical_and(ax >= x[: nx - 1], ax <= x[1:nx]))
iy = findlast(np.logical_and(ay >= y[: ny - 1], ay <= y[1:ny]))
iz = findlast(np.logical_and(az >= z[: nz - 1], az <= z[1:nz]))
if (ix < 0) or (iy < 0) or (iz < 0):
msg = "Polygon vertex (%.1f, %.1f, %.1f) is outside the mesh"
print((msg) % (ax, ay, az))
# integrate each line segment
for ip in range(nP):
# start and end vertices
ax = px[ip]
ay = py[ip]
az = pz[ip]
bx = px[ip + 1]
by = py[ip + 1]
bz = pz[ip + 1]
# find intersection with mesh planes
dx = bx - ax
dy = by - ay
dz = bz - az
d = np.sqrt(dx**2 + dy**2 + dz**2)
tol = d * np.finfo(float).eps
if abs(dx) > tol:
tx = (x - ax) / dx
tx = tx[np.logical_and(tx >= 0, tx <= 1)]
else:
tx = []
if abs(dy) > tol:
ty = (y - ay) / dy
ty = ty[np.logical_and(ty >= 0, ty <= 1)]
else:
ty = []
if abs(dz) > tol:
tz = (z - az) / dz
tz = tz[np.logical_and(tz >= 0, tz <= 1)]
else:
tz = []
t = np.unique(np.r_[0.0, tx, ty, tz, 1.0])
nq = len(t) - 1
tc = 0.5 * (t[:nq] + t[1 : nq + 1])
for iq in range(nq):
cx = ax + tc[iq] * dx
cy = ay + tc[iq] * dy
cz = az + tc[iq] * dz
# locate cell id
ix = findlast(np.logical_and(cx >= x[: nx - 1], cx <= x[1:nx]))
iy = findlast(np.logical_and(cy >= y[: ny - 1], cy <= y[1:ny]))
iz = findlast(np.logical_and(cz >= z[: nz - 1], cz <= z[1:nz]))
# local coordinates
hxloc = hx[ix]
hyloc = hy[iy]
hzloc = hz[iz]
axloc = ax + t[iq] * dx - x[ix]
ayloc = ay + t[iq] * dy - y[iy]
azloc = az + t[iq] * dz - z[iz]
bxloc = ax + t[iq + 1] * dx - x[ix]
byloc = ay + t[iq + 1] * dy - y[iy]
bzloc = az + t[iq + 1] * dz - z[iz]
# integrate
sxloc, syloc, szloc = getStraightLineCurrentIntegral(
hxloc, hyloc, hzloc, axloc, ayloc, azloc, bxloc, byloc, bzloc
)
# integrate
sx[ix, iy : iy + 2, iz : iz + 2] += np.reshape(sxloc, (2, 2), order="F")
sy[ix : ix + 2, iy, iz : iz + 2] += np.reshape(syloc, (2, 2), order="F")
sz[ix : ix + 2, iy : iy + 2, iz] += np.reshape(szloc, (2, 2), order="F")
return np.r_[mkvc(sx), mkvc(sy), mkvc(sz)]
def _poly_line_source_tree(mesh, locs):
"""Calculate a source term for a line current source on a OctTreeMesh
Given an OcTreeMesh compute the source vector for a unit current flowing
along the polygon with vertices px, py, pz.
Parameters
----------
mesh : discretize.TreeMesh
The OctTreeMesh (3D) for the system.
px, py, pz : 1D numpy.array
The 1D arrays contain the x, y, and z, locations of consecutive points
along the polygonal path
Returns
-------
numpy.ndarray of length (mesh.nE)
Contains the source term for all x, y, and z edges of the OcTreeMesh.
"""
px = locs[:, 0]
py = locs[:, 1]
pz = locs[:, 2]
# discrete edge vectors
sx = np.zeros(mesh.ntEx)
sy = np.zeros(mesh.ntEy)
sz = np.zeros(mesh.ntEz)
points = np.c_[px, py, pz]
# number of line segments
nP = len(points) - 1
x0 = mesh.x0
dim = mesh.dim
for ip in range(nP + 1):
A = points[0]
xF = np.array([mesh.vectorNx[-1], mesh.vectorNy[-1], mesh.vectorNz[-1]])
if np.any(A < x0) or np.any(A > xF):
msg = "Polygon vertex ({.1f}, {.1f}, {.1f}) is outside the mesh".format(*A)
raise ValueError(msg)
# Loop over each line segment
for ip in range(nP):
# Start and end vertices
A = points[ip]
B = points[ip + 1]
# Components of vector (dx, dy, dz) along the wirepath
ds = B - A
# Find indices of all cells intersected by the wirepath
srcCellIds = mesh.get_cells_along_line(A, B)
levels = mesh.cell_levels_by_index(srcCellIds)
if np.any(levels != levels[0]):
warnings.warn("Warning! Line path crosses a cell level change.")
# Starts at point A!
p0 = A
for cell_id in srcCellIds:
cell = mesh[cell_id]
x0 = cell.x0
h = cell.h
xF = x0 + h
edges = cell.edges
edges_x = edges[0:4]
edges_y = edges[4:8]
edges_z = edges[8:12]
# find next intersection along path
ts = np.ones(dim)
for i in range(dim):
if ds[i] > 0:
ts[i] = (xF[i] - A[i]) / ds[i]
elif ds[i] < 0:
ts[i] = (x0[i] - A[i]) / ds[i]
else:
ts[i] = np.inf
t = min(*ts, 1) # the last value should be 1
p1 = A + t * ds # the next intersection point
cA = p0 - x0
cB = p1 - x0
cell_s = getStraightLineCurrentIntegral(*h, *cA, *cB)
sx[edges_x] += cell_s[0]
sy[edges_y] += cell_s[1]
sz[edges_z] += cell_s[2]
p0 = p1
s = np.r_[sx, sy, sz]
R = mesh._deflate_edges()
s = R.T.dot(s)
return s
def line_through_faces(
mesh,
locations,
normalize_by_area=True,
check_divergence=True,
tolerance_divergence=1e-9,
):
"""
Define the current through cell faces given path locations. Note that this
will perform best of your path locations are at cell centers. Only paths
that align with the mesh (e.g. a straight line in x, y, z) are currently
supported
"""
current = np.zeros(mesh.n_faces)
def not_aligned_error(i):
raise NotImplementedError(
"With the current implementation, the line between points "
"must align with the axes of the mesh. The points "
f"{locations[i, :]} and {locations[i+1, :]} do not."
)
# pre-processing step: find closest cell centers
cell_centers = discretize.utils.closest_points_index(mesh, locations, "CC")
locations = mesh.gridCC[cell_centers, :]
# next step: find segments between lines
for i in range(locations.shape[0] - 1):
dimension = np.nonzero(np.abs(locations[i, :] - locations[i + 1, :]))[0]
if len(dimension) > 1:
not_aligned_error(i)
dimension = dimension[0]
direction = np.sign(locations[i, dimension] - locations[i + 1, dimension])
if dimension == 0:
grid_loc = "x"
current_inds = slice(0, mesh.n_faces_x)
elif dimension == 1:
grid_loc = "y"
start = mesh.n_faces_x
current_inds = slice(start, start + mesh.n_faces_y)
elif dimension == 2:
grid_loc = "z"
start = mesh.n_faces_x + mesh.n_faces_y
current_inds = slice(start, start + mesh.n_faces_z)
# interpolate to closest face
loca = discretize.utils.closest_points_index(
mesh, locations[i, :], f"F{grid_loc}"
)
locb = discretize.utils.closest_points_index(
mesh, locations[i + 1, :], f"F{grid_loc}"
)
if len(loca) > 1 or len(locb) > 1:
raise Exception(
"Current across multiple faces is not implemented. "
"Please put path through a cell rather than along edges"
)
grid = getattr(mesh, f"faces_{grid_loc}")
loca = grid[loca[0]]
locb = grid[locb[0]]
# find all faces between these points
if dimension == 0:
xlocs = np.r_[locations[i, 0], locations[i + 1, 0]]
if not (np.allclose(loca[1], locb[1]) and np.allclose(loca[2], locb[2])):
not_aligned_error(i)
ylocs = loca[1] + mesh.hy.min() / 4 * np.r_[-1, 1]
zlocs = loca[2] + mesh.hz.min() / 4 * np.r_[-1, 1]
elif dimension == 1:
ylocs = np.r_[locations[i, 1], locations[i + 1, 1]]
if not (np.allclose(loca[0], locb[0]) and np.allclose(loca[2], locb[2])):
not_aligned_error(i)
xlocs = loca[0] + mesh.hx.min() / 4 * np.r_[-1, 1]
zlocs = loca[2] + mesh.hz.min() / 4 * np.r_[-1, 1]
elif dimension == 2:
zlocs = np.r_[locations[i, 2], locations[i + 1, 2]]
if not (np.allclose(loca[0], locb[0]) and np.allclose(loca[1], locb[1])):
not_aligned_error(i)
xlocs = loca[0] + mesh.hx.min() / 4 * np.r_[-1, 1]
ylocs = loca[1] + mesh.hy.min() / 4 * np.r_[-1, 1]
src_inds = (
(grid[:, 0] >= xlocs.min())
& (grid[:, 0] <= xlocs.max())
& (grid[:, 1] >= ylocs.min())
& (grid[:, 1] <= ylocs.max())
& (grid[:, 2] >= zlocs.min())
& (grid[:, 2] <= zlocs.max())
)
current[current_inds][src_inds] = direction
if normalize_by_area:
current = current / mesh.area
# check that there is only a divergence at the ends if not a loop
if check_divergence:
div = mesh.vol * mesh.face_divergence * current
nonzero = np.abs(div) > tolerance_divergence
# check if the source is a loop or grounded
if not np.allclose(locations[0, :], locations[-1, :]): # grounded source
if nonzero.sum() > 2:
raise Exception(
"The source path is not connected. Check that all points go through cell centers"
)
if np.abs(div.sum()) > tolerance_divergence:
raise Exception(
"The integral of the divergence is not zero. Something is wrong :("
)
else: # loop source
if nonzero.sum() > 0:
raise Exception(
"The source path is not connected. Check that all points go through cell centers"
)
return current
def getSourceTermLineCurrentPolygon(xorig, hx, hy, hz, px, py, pz):
warnings.warn(
"getSourceTermLineCurrentPolygon has been deprecated and will be"
"removed in SimPEG 0.17.0. Please use segmented_line_current_source_term.",
FutureWarning,
)
mesh = discretize.TensorMesh((hx, hy, hz), x0=xorig)
locs = np.c_[px, py, pz]
return segmented_line_current_source_term(mesh, locs)
|
from inspect import isabstract
from typing import Dict
from clean.request.filters.abs import BaseFilter
class FooFilter(BaseFilter):
def __init__(self, gte: str = "", lte: str = ""):
self.gte = gte
self.lte = lte
def to_dict(self):
return {
'gte': self.gte,
'lte': self.lte
}
def is_valid(self):
return True
@classmethod
def from_dict(cls, params: Dict, defaults: Dict = None):
return cls(gte=params.get('gte'), lte=params.get('lte'))
def test_base_filter_is_abstract_class():
assert isabstract(BaseFilter)
def test_factory_abs_has_create_method_as_abstract():
ab = BaseFilter.__abstractmethods__
assert ab == frozenset(['from_dict', 'to_dict', 'is_valid'])
def test_create_filter():
foo = FooFilter(gte='20160101000000', lte='20170101235959')
assert foo.gte == '20160101000000'
assert foo.lte == '20170101235959'
def test_create_from_dict():
params = dict(gte='20160101000000', lte='20170101235959')
foo = FooFilter.from_dict(params)
assert foo.gte == '20160101000000'
assert foo.lte == '20170101235959'
def test_create_filter_to_dict():
foo = FooFilter(gte='20160101000000', lte='20170101235959')
assert foo.to_dict()['gte'] == '20160101000000'
assert foo.to_dict()['lte'] == '20170101235959'
|
"""
...
"""
devicekeys = {'0001': 'asdfsfgw3g',
'0002': 'uhwefuihwef',
'0003': 'uihwefiuhawefawef',
'0004': 'uoihwefiuhwef',
'0005': 'iqweroih1r'}
def getDeviceKey(deviceid):
return devicekeys.get(deviceid)
|
dog_breeds = ['french_bulldog', 'dalmation', 'shihtzu', 'poodle', 'collie']
for breed in dog_breeds:
print(breed)
# for <temporary variable> in <list variable>:
# <action>
# for dog in dog_breeds:
# print(dog)
## Examples
board_games = ['Settlers of Catan', 'Carcassone', 'Power Grid', 'Agricola', 'Scrabble']
sport_games = ['football', 'football - American', 'hockey', 'baseball', 'cricket']
for game in board_games:
print(game)
for sport in sport_games:
print(sport) |
"""Integration tests configuration file."""
from {{cookiecutter.package_name}}.tests.conftest import pytest_configure # pylint: disable=unused-import
|
"""Setup oaff-app"""
from setuptools import find_namespace_packages, setup # type: ignore
with open("README.md") as f:
long_description = f.read()
inst_reqs = [
"uvicorn==0.13.4",
"gunicorn==20.1.0",
"uvloop==0.15.2",
"httptools==0.2.0",
"pygeofilter==0.0.2",
"psycopg2==2.8.6",
"asyncpg==0.23.0",
]
extra_reqs = {
"test": ["pytest"],
}
setup(
name="oaff.app",
version="0.0.1",
description=u"Business logic for oaff",
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=3.8",
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_namespace_packages(exclude=["tests*"]),
include_package_data=False,
zip_safe=True,
install_requires=inst_reqs,
extras_require=extra_reqs,
)
|
"""
Coeficient rule
f(n) == O(g(n)) -> c * f(n) == O(g(n)); c > 0
It means that if n -> oo, then c is not important, 'cause it's not related
to the input (n) lengh
# TIP: think in infite when form bigO notations
"""
def linear(n: int) -> int:
accum: int = 0
for i in range(n):
accum += 1
return accum
def c_linear(n: int) -> int:
accum: int = 0
c: int = 10
for i in range(c * n):
accum += 1
return accum
if __name__ == "__main__":
n = 4
print(f"linear -> {linear(n)}")
print(f"c_linear -> {c_linear(n)}")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import time
'''
这种方式的问题在于,读取文件写入文件的过程中,文件有可能有其他的写入
'''
def del_lines(path, key_word):
sign = False
with open(path, 'r') as f:
lines = f.readlines()
with open(path, 'w') as fw:
for line in lines:
if key_word in line or sign:
sign = True
if '}' in line:
sign = False
else:
fw.write(line)
if __name__ == "__main__":
path = sys.argv[1]
ip = sys.argv[2]
local_time = time.asctime(time.localtime(time.time()))
print '[%s] 操作%s释放IP:%s' % (local_time, path, ip)
del_lines(path, ip)
|
"""定义learning_logs的URL模式"""
from django.urls import path
from . import views
app_name = 'learning_logs'
urlpatterns = [
# Home page
path('', views.index, name='index'),
# 显示所有主题
path('topics/', views.topics, name='topics'),
# 特定主题的详细页面
path('topics/<int:topic_id>/', views.topic, name='topic'),
]
|
import random
import discord
client = discord.Client()
@client.event
async def on_ready():
print("Running as {}".format(client.user))
@client.event
async def on_message(message):
if "@someone" in str(message.content).lower():
members_array = []
async for member in message.guild.fetch_members(limit=None):
members_array.append(member)
await message.channel.send(random.choice(members_array).mention)
client.run("your bot token here")
|
#!/usr/bin/env python3
import logging
from dataclasses import dataclass, field
from .parsing import parse_input, parse_output
LOGGER = logging.getLogger(__name__)
@dataclass
class Score:
scores: list = field(default_factory=list)
total: int = 0
def add(self, other):
self.scores.append(other)
self.total += other
def set_log_level(args):
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
def compute_score(file_in, file_out):
"""
Compute score (with bonus) of submission
:param file_in: input file
:param file_out: output file (solution)
:return: Score
"""
# read input and output files
problem_set = parse_input(file_in)
solution = parse_output(file_out)
s = Score()
for client in problem_set[0]:
# check if all likable ingredients are in,
# and none of the disliked ones
if client.likes.issubset(solution) and client.dislikes.isdisjoint(solution):
s.add(1)
return s
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='print score', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file_in', type=str, help='input file e.g. a_example.in')
parser.add_argument('file_out', type=str, help='output file e.g. a_example.out')
parser.add_argument('--debug', action='store_true', help='set debug level')
args = parser.parse_args()
set_log_level(args)
score = compute_score(args.file_in, args.file_out)
print("Score for {}: {} points".format(args.file_out, score.total))
|
"""
Utilities functions for A* algorithm.
Author : Ismail Sunni
Email : imajimatika@gmail.com
Date : Jun 2019
"""
import json
from osgeo import ogr, osr
def get_nodes(G, key, value):
"""Return list of nodes that has attribute key = value"""
result_nodes = []
for node in G.nodes:
if G.node[node].get(key) == value:
result_nodes.append(node)
return result_nodes
def pretty_node(node):
"""Helper to convert node to string"""
return '(%s, %s)' % node
def print_dictionary(dictionary, prefix=''):
"""Helper to print simple dictionary."""
for k, v in dictionary.items():
print("%s%s: %s" % (prefix, k, v))
def print_node(graph, node):
"""Helper to print `node` from `graph` with the attribute"""
print("Key: %s" % pretty_node(node))
print_dictionary(graph.node[node], prefix='\t')
def calculate_distance(a, b):
"""Helper function to calculate the distance between node a and b."""
(x1, y1) = a
(x2, y2) = b
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def nodes_from_path(G, path, key=''):
"""Helper to get list of node from a path with key"""
if not key:
return path
else:
keys = [G.node[node][key] for node in path]
return keys
def get_spatial_reference(path):
"""Helper to get spatial reference from a path of layer"""
layers = ogr.Open(path)
if layers is None:
raise RuntimeError("Unable to open {}".format(path))
for layer in layers:
spatial_reference = layer.GetSpatialRef()
if spatial_reference:
break
return spatial_reference
def create_path_layer(G, path, output_file, spatial_reference):
"""Helper to create layer from a path"""
# Create geometry from the edges
print('Create geometry')
# set up the shapefile driver
driver = ogr.GetDriverByName("ESRI Shapefile")
# create the data source
data_source = driver.CreateDataSource(output_file)
# create the layer
layer = data_source.CreateLayer("A Star Shortest Path", spatial_reference, ogr.wkbLineString)
# Create fields (using default field first, TODO:read from data)
# fid
# layer.CreateField(ogr.FieldDefn("fid", ogr.OFTReal))
# streetID
layer.CreateField(ogr.FieldDefn("streetID", ogr.OFTInteger64))
# length
layer.CreateField(ogr.FieldDefn("length", ogr.OFTReal))
# u
layer.CreateField(ogr.FieldDefn("u", ogr.OFTInteger64))
# v
layer.CreateField(ogr.FieldDefn("v", ogr.OFTInteger64))
# length_sc
layer.CreateField(ogr.FieldDefn("length_sc", ogr.OFTReal))
# order (added field to get the order)
layer.CreateField(ogr.FieldDefn("order", ogr.OFTInteger))
fields = [
# 'fid',
'streetID',
'length',
'u',
'v',
'length_sc'
]
# Iterate over the path edges
for i in range(len(path) - 1):
node1 = path[i]
node2 = path[i+1]
# print(node1)
# print(node2)
# print(G.edges[node1, node2])
edge = G.edges[node1, node2]
feature = ogr.Feature(layer.GetLayerDefn())
for field in fields:
feature.SetField(field, edge[field])
feature.SetField('order', i)
# Create geometry from the Wkt
geom = ogr.CreateGeometryFromWkt(edge['Wkt'])
# Set the feature geometry using the geom
feature.SetGeometry(geom)
# Create the feature in the layer (shapefile)
layer.CreateFeature(feature)
# Dereference the feature
feature = None
# Save and close the data source
data_source = None
return output_file
def get_points(G, edge_key):
"""Return list of points of edge `edge_key` in G"""
edge = G.edges[edge_key]
json_string_edge = edge['Json']
json_edge = json.loads(json_string_edge)
return json_edge['coordinates']
def graph_summary(graph):
"""Print the summary of a `graph`"""
print('Summary of Graph:')
print('Number of nodes in G: %s' % graph.number_of_nodes())
print('Number of edges in G: %s' % graph.number_of_edges())
if __name__ == "__main__":
path = '/home/ismailsunni/Documents/GeoTech/Routing/processed/small_data/'
spatial_reference = get_spatial_reference(path)
print(spatial_reference)
print(spatial_reference.exportToEPSG())
print('fin')
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50, resnet18
from torch.hub import download_url_to_file
from face_recognition.utils.constants import TRAINED_WEIGHTS_DIR, FACESECURE_MODEL
device = "cuda" if torch.cuda.is_available() else "cpu"
def load_pretrained(weight_path=TRAINED_WEIGHTS_DIR):
model = FaceNetResnet()
model.load_state_dict(torch.load(weight_path, map_location=torch.device(device))['model_state_dict'])
return model
def get_model(pretrained=True, model_path=FACESECURE_MODEL):
model = FaceNet(pretrained)
model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
return model
class FaceNet(nn.Module):
def __init__(self, pretrained=False, embedding_size=128):
super(FaceNet, self).__init__()
resnet = resnet50(pretrained)
self.backbone = nn.Sequential(
resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3,
resnet.layer4
)
in_features = 2048 * 7 * 7 # input image of backbone is of shape (3, 224, 244)
self.embedder = nn.Sequential(
nn.Flatten(),
nn.Linear(in_features, embedding_size)
)
def forward(self, x):
x = self.backbone(x)
x = self.embedder(x)
x = F.normalize(x, p=2, dim=1)
x = x * 10 # alpha = 10
return x
class FaceNetResnet(nn.Module):
'''FaceNet with Resnet backbone, inspired by pre_trained model'''
def __init__(self, embedding_dimension=256, pretrained=False):
super(FaceNetResnet, self).__init__()
self.model = resnet18(pretrained=pretrained)
input_features_fc_layer = self.model.fc.in_features
self.model.fc = nn.Sequential(
nn.Linear(input_features_fc_layer, embedding_dimension, bias=False),
nn.BatchNorm1d(embedding_dimension, eps=0.001, momentum=0.1, affine=True)
)
def forward(self, x):
x = self.model(x)
x = F.normalize(x, p=2, dim=1)
alpha = 10
x = x * alpha
return x
|
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
from multiprocessing import Process, Queue
from queue import Empty
import RPi.GPIO as GPIO
import os
import json
import requests
import serial
import time
from signal import *
import secrets
DEBUG = os.environ.get('DEBUG', False)
RELAY_PIN = 17
RFID_EN_PIN = 27
CARDS_FILE = 'card_data.json'
OPEN_DURATION = 4
API_STATS = 'https://api.my.protospace.ca/stats/'
API_DOOR = 'https://api.my.protospace.ca/door/'
API_SEEN = lambda x: 'https://api.my.protospace.ca/door/{}/seen/'.format(x)
ser = None
def unlock_door():
GPIO.output(RELAY_PIN, GPIO.HIGH)
GPIO.output(RFID_EN_PIN, GPIO.HIGH)
time.sleep(OPEN_DURATION)
GPIO.output(RELAY_PIN, GPIO.LOW)
GPIO.output(RFID_EN_PIN, GPIO.LOW)
def lock_door_on_exit(*args):
logging.info('Exiting, locking door...')
GPIO.output(RELAY_PIN, GPIO.LOW)
GPIO.output(RFID_EN_PIN, GPIO.LOW)
os._exit(0)
def init():
global ser, cards
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(RELAY_PIN, GPIO.OUT)
GPIO.output(RELAY_PIN, GPIO.LOW)
GPIO.setup(RFID_EN_PIN, GPIO.OUT)
GPIO.output(RFID_EN_PIN, GPIO.LOW)
logging.info('GPIO initialized')
ser = serial.Serial(port='/dev/ttyAMA0', baudrate=2400, timeout=0.1)
logging.info('Serial initialized')
for sig in (SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM):
signal(sig, lock_door_on_exit)
logging.info('Signals initialized')
def reader_thread(card_data_queue):
recent_scans = {}
with open(CARDS_FILE, 'r') as f:
card_data = json.load(f)
logging.info('Read {} card numbers from disk'.format(str(len(card_data))))
while True:
try:
card_data = card_data_queue.get_nowait()
except Empty:
pass
card = ser.readline()
if not card: continue
card = card.decode().strip()
if len(card) != 10: continue
# debounce card scans
now = time.time()
if card in recent_scans:
if now - recent_scans[card] < 5.0:
continue
recent_scans[card] = now
logging.info('Read card: ' + card)
if card in card_data:
logging.info('Card recognized')
else:
logging.info('Card not recognized, denying access')
continue
logging.info('DOOR ACCESS - Card: {} | Name: {}'.format(
card, card_data[card],
))
unlock_door()
try:
res = requests.post(API_SEEN(card), timeout=2)
res.raise_for_status()
except BaseException as e:
logging.error('Problem POSTing seen: {} - {}'.format(e.__class__.__name__, str(e)))
continue
def update_thread(card_data_queue):
last_card_change = None
while True:
time.sleep(5)
try:
res = requests.get(API_STATS, timeout=5)
res.raise_for_status()
res = res.json()
except BaseException as e:
logging.error('Problem GETting stats: {} - {}'.format(e.__class__.__name__, str(e)))
continue
if res['last_card_change'] == last_card_change:
continue
last_card_change = res['last_card_change']
logging.info('Cards changed, pulling update from API')
try:
headers = {'Authorization': 'Bearer ' + secrets.DOOR_API_KEY}
res = requests.get(API_DOOR, headers=headers, timeout=5)
res.raise_for_status()
res = res.json()
except BaseException as e:
logging.error('Problem GETting door: {} - {}'.format(e.__class__.__name__, str(e)))
last_card_change = None
continue
logging.info('Got {} cards from API'.format(str(len(res))))
card_data_queue.put(res)
logging.info('Writing data to file')
with open(CARDS_FILE, 'w') as f:
json.dump(res, f)
def watchdog_thread():
while True:
with open('/dev/watchdog', 'w') as wdt:
wdt.write('1')
time.sleep(1)
if __name__ == '__main__':
logging.info('Initializing...')
init()
card_data = Queue()
Process(target=reader_thread, args=(card_data,)).start()
Process(target=update_thread, args=(card_data,)).start()
if not DEBUG: Process(target=watchdog_thread).start()
|
# Copyright 2013 the Neutrino authors (see AUTHORS).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# Test case generator for python
from framework import *
class PythonAssembly(AbstractAssembly):
def __init__(self):
self.ops = []
def tag(self, code):
self.ops.append('tag(%i)' % code)
return self
def int32(self, value):
self.ops.append('int32(%i)' % value)
return self
def uint32(self, value):
self.ops.append('uint32(%i)' % value)
return self
def blob(self, bytes):
self.ops.append('blob(bytearray(%s))' % str(list(bytes)))
return self
def to_string(self):
return "lambda assm: assm.%s" % (".".join(self.ops))
class PythonGenerator(object):
def new_assembly(self):
return PythonAssembly()
def emit_value(self, value, out):
if isinstance(value, int) or (value == None):
out.append(str(value))
elif isinstance(value, str):
out.append("\"%s\"" % value)
elif isinstance(value, tuple) or isinstance(value, list):
out.append('[')
first = True
for element in value:
if first:
first = False
else:
out.append(', ')
out.append(E(element))
out.append(']')
elif isinstance(value, dict):
out.append('{\n').indent(+1)
first = True
for key in sorted(value.keys()):
if first:
first = False
else:
out.append(',\n')
out.append(E(key), ': ', E(value[key]))
out.indent(-1).append('}')
elif isinstance(value, EnvironmentReference):
serial = value.serial
if serial in out.refs:
out.append('ctx.get_ref(%i)' % serial)
else:
out.refs[serial] = value
out.append('ctx.new_env_ref(', E(value.key), ', id=%i)' % serial)
else:
assert isinstance(value, Object)
serial = value.serial
if serial in out.refs:
out.append('ctx.get_ref(%i)' % serial)
else:
out.refs[serial] = value
(out
.append('(ctx.new_object(id=%i)\n' % serial)
.indent(+1)
.append('.set_header(', E(value.header), ')\n')
.append('.set_payload(', E(value.payload), '))')
.indent(-1))
def emit_test(self, test, out):
out.refs = {}
(out
.append('\ndef test_%s(self):\n' % test.name)
.indent(+1)
.append('ctx = self.new_context()\n')
.append('input = ', E(test.input), '\n')
.append('assemble = ', test.assembly.to_string(), '\n')
.append('self.run_test(input, assemble)\n')
.indent(-1))
def emit_preamble(self, out):
(out
.append('# This test was generated from tests/gen/plankton/testdata.py.\n')
.append('# Don\'t modify it by hand.\n\n\n')
.append('import unittest\n')
.append('import planktontest\n\n\n')
.append('class PlanktonTests(planktontest.TestCase):\n')
.indent(+1))
def emit_epilogue(self, out):
(out
.indent(-1)
.append('\n\nif __name__ == \'__main__\':\n')
.indent(+1)
.append('runner = unittest.TextTestRunner(verbosity=0)\n')
.append('unittest.main(testRunner=runner)\n')
.indent(-1))
|
from django.db import models
class BaseTable(models.Model):
"""
公共字段列
"""
class Meta:
abstract = True
verbose_name = "公共字段表"
db_table = 'BaseTable'
create_time = models.DateTimeField('创建时间', auto_now_add=True)
update_time = models.DateTimeField('更新时间', auto_now=True)
class Project(BaseTable):
"""
项目信息表
"""
class Meta:
verbose_name = "项目信息"
verbose_name_plural = verbose_name
name = models.CharField("项目名称", unique=True, null=False, max_length=100)
desc = models.CharField("简要介绍", max_length=100, null=False)
responsible = models.CharField("负责人", max_length=20, null=False)
def __str__(self):
return self.name
class Config(BaseTable):
"""
环境信息表
"""
class Meta:
verbose_name = "配置信息"
verbose_name_plural = verbose_name
unique_together = [['project', 'name']]
name = models.CharField("环境名称", null=False, max_length=100)
body = models.TextField("主体信息", null=False)
base_url = models.CharField("请求地址", null=True, blank=True, max_length=100)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.name
class API(BaseTable):
"""
API信息表
"""
class Meta:
verbose_name = "接口信息"
verbose_name_plural = verbose_name
name = models.CharField("接口名称", null=False, max_length=100)
body = models.TextField("主体信息", null=False)
url = models.CharField("请求地址", null=False, max_length=200)
method = models.CharField("请求方式", null=False, max_length=10)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
relation = models.IntegerField("节点id", null=False)
def __str__(self):
return self.name
class Case(BaseTable):
"""
用例信息表
"""
class Meta:
verbose_name = "用例信息"
verbose_name_plural = verbose_name
tag = (
(1, "冒烟用例"),
(2, "集成用例"),
(3, "监控脚本")
)
name = models.CharField("用例名称", null=False, max_length=500)
project = models.ForeignKey(Project, on_delete=models.CASCADE, help_text="所属项目")
relation = models.IntegerField("节点id", null=False)
length = models.IntegerField("API个数", null=False)
tag = models.IntegerField("用例标签", choices=tag, default=2)
def __str__(self):
return self.name
class CaseStep(BaseTable):
"""
Test Case Step
"""
class Meta:
verbose_name = "用例信息 Step"
verbose_name_plural = verbose_name
name = models.CharField("api名称", null=False, max_length=100)
body = models.TextField("主体信息", null=False)
url = models.CharField("请求地址", null=False, max_length=300)
method = models.CharField("请求方式", null=False, max_length=10)
case = models.ForeignKey(Case, on_delete=models.CASCADE, help_text="所属case")
step = models.IntegerField("api顺序", null=False)
apiId = models.IntegerField('所属api_id', null=False, default=0)
def __str__(self):
return self.name
class HostIP(BaseTable):
"""
环境域名
"""
class Meta:
verbose_name = "HOST配置"
verbose_name_plural = verbose_name
unique_together = [['project', 'name']]
name = models.CharField(null=False, max_length=20, help_text="环境名称")
hostInfo = models.TextField(null=False, help_text="环境信息详情")
project = models.ForeignKey(Project, on_delete=models.CASCADE, help_text="所属项目")
base_url = models.URLField(null=True, blank=True, help_text="环境根地址")
# desc = models.CharField(null=True, blank=True, help_text="描述信息", max_length=100, default="")
def __str__(self):
return self.name
class Variables(BaseTable):
"""
全局变量
"""
class Meta:
verbose_name = "全局变量"
verbose_name_plural = verbose_name
key = models.CharField(null=False, max_length=100)
value = models.CharField(null=False, max_length=1024)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.key
class Report(BaseTable):
"""
报告存储
"""
report_type = (
(1, "调试"),
(2, "异步"),
(3, "定时")
)
class Meta:
verbose_name = "测试报告"
verbose_name_plural = verbose_name
name = models.CharField("报告名称", null=False, max_length=100)
type = models.IntegerField("报告类型", choices=report_type)
summary = models.TextField("简要主体信息", null=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.name
class ReportDetail(BaseTable):
"""
报告主题信息存储
"""
class Meta:
verbose_name = "测试报告详情"
verbose_name_plural = verbose_name
name = models.CharField("报告名称", null=False, max_length=100)
summary = models.TextField("主体信息", null=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
report = models.OneToOneField(Report, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Relation(models.Model):
"""
树形结构关系
"""
class Meta:
verbose_name = "树形结构关系"
verbose_name_plural = verbose_name
project = models.ForeignKey(Project, on_delete=models.CASCADE)
tree = models.TextField("结构主题", null=False, default=[])
type = models.IntegerField("树类型", default=1)
class ModelWithFileField(BaseTable):
"""
文件信息表
"""
class Meta:
verbose_name = "文件信息表"
verbose_name_plural = verbose_name
unique_together = [['project', 'name']]
project = models.ForeignKey(Project, on_delete=models.CASCADE)
name = models.CharField(max_length=50)
file = models.FileField(upload_to='testdatas', unique=True, null=True, blank=True)
relation = models.IntegerField("节点id", null=False, default=1)
excel_tree = models.TextField("excel的级联数据", null=True, blank=True)
def __str__(self):
return self.name
class LockFiles(BaseTable):
"""
锁定的文件信息表
"""
class Meta:
verbose_name = "锁定文件信息表"
verbose_name_plural = verbose_name
unique_together = [['project', 'lock_type', 'file_id']]
default_permissions = ('add',)
tag = (
(1, "测试数据"),
)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
lock_type = models.CharField(choices=tag, max_length=2, verbose_name="锁定哪个信息表")
file_id = models.IntegerField(verbose_name="锁定文件的id")
def __str__(self):
return 'table:%s - id:%s' % (self.lock_type, self.file_id)
class Pycode(BaseTable):
"""
驱动文件表
"""
class Meta:
verbose_name = "驱动文件库"
verbose_name_plural = verbose_name
unique_together = [['project', 'name']]
code = models.TextField("python代码", default="# _*_ coding:utf-8 _*_", null=False)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
name = models.CharField(max_length=30, null=False)
desc = models.CharField("简要介绍", max_length=100, null=True, blank=True)
def __str__(self):
return self.name
|
"""
TeSS to iAnn events sync v 0.0
This module aims to provide a schedulable process to make automatic sync
from TeSS to iAnn event registry.
"""
from dateutil.parser import parse
from pytz import UTC as utc
from datetime import datetime
import pysolr
import urllib2
import json
from docs import conf
import logging
import daemon
import time
import click
import os
import sys
WELCOME_MSJ = 'ELIXIR TeSS to iAnn events synchronizer script V 0.0'
def init():
logging.basicConfig(filename=conf.LOG_FILE, level=logging.INFO,
format='%(levelname)s[%(asctime)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p', filemode='a')
def map_tess_to_iann(tess_event=None):
"""
Map an TeSS event to a iAnn event
:param tess_event: dictionary that represents an event in TeSS
:return: dictionary that represents an event in iAnn
"""
iann_event = dict()
if not tess_event:
return iann_event
for tess_field, iann_field in conf.TESS_TO_IANN_MAPPER.iteritems():
if tess_field in tess_event:
if tess_event[tess_field]:
iann_event[iann_field] = tess_event[tess_field]
else:
iann_event[iann_field] = None
iann_event['category'] = iann_event['category'] + ['event']\
if 'category' in iann_event and iann_event['category'] else ['event']
iann_event['city'] = " " if not iann_event['city'] else iann_event['city']
iann_event['country'] = " " if not iann_event['country'] else iann_event['country']
for index, item in enumerate(iann_event['category']):
if item == 'workshops_and_courses':
iann_event['category'][index] = 'course'
if item == 'meetings_and_conferences':
iann_event['category'][index] = 'meeting'
return iann_event
def get_tess_events_from_url(query_url):
"""
Gets the events data for a given TeSS query URL
:param query_url: query url
:return: all the events retrieved for the page
"""
# Execute the HTTP query
logging.info("Query to TeSS web service: " + query_url)
try:
page_data = urllib2.urlopen(query_url)
# Load the JSON data into a Python dict
tess_events = json.load(page_data)
except urllib2.URLError:
logging.error("Failed to get events from tess, retrying in 60 seconds")
time.sleep(60)
tess_events = get_tess_events_from_url(query_url)
return tess_events
def get_tess_all_events(start_dt, expired=False):
"""
Retrieves all the data available from TeSS
:param start_dt: date to start harversting events
:param expired: flag to indicate if expired events must be fetched
:return: all the events available from TeSS
"""
page = 1
tess_events = []
end_dt = utc.localize(datetime.now())
start_dt = start_dt
logging.info('Starting harvesting of TeSS events data from ' + str(start_dt) + ' to ' + str(end_dt))
# Repeat the data gathering for every possible page until there is no results
while True:
logging.info('Retrieving TeSS data for page # %d' % page)
# Generate the URL to make the query
query_url = conf.TESS_URL + 'events.json' + '?' + 'page=%d' % page
if expired:
query_url += '&include_expired=true'
page_results = get_tess_events_from_url(query_url)
if not page_results:
logging.info('Finishing harvesting')
break
tess_events += page_results
page += 1
logging.info('Performing conversion from TeSS events to iAnn events')
# Iterate over the result set and map the TeSS events fields to iAnn events fields
iann_events = [map_tess_to_iann(tess_event) for tess_event in tess_events
if start_dt < parse(tess_event['updated_at']) < end_dt]
logging.info('Conversion done! %d updated events retrieved and converted' % len(iann_events))
return dict(start=start_dt, end=end_dt, events=iann_events)
def push_to_iann(events):
"""
Adds data to iAnn Solr from a Solr data structure
:param events: list of events to be pushed to iAnn Solr
"""
# Instantiates Solr service
solr = pysolr.Solr(conf.IANN_URL, timeout=10)
# Add Solr documents to service
solr.add(events)
def delete_all_iann():
solr = pysolr.Solr(conf.IANN_URL, timeout=10)
solr.delete(q='*:*')
def reset_iann():
"""
Deletes all the data contained in iAnn
"""
start = utc.localize(parse('2000-01-01'))
results = get_tess_all_events(start, True)
logging.info('Trying to delete all iAnn Events')
try:
delete_all_iann()
push_to_iann(results['events'])
logging.info('Last End:' + str(results['end']))
return
except pysolr.SolrError:
logging.info('There was an error trying to push the events to iAnn Solr, will try again in 10 minutes')
time.sleep(36000)
reset_iann()
def get_start_from_log():
log_start = None
for line in reversed(list(open(conf.LOG_FILE))):
if 'Last End:' in line:
log_start = line.split('Last End:')[1]
break
return log_start
@click.command()
@click.option('--delay', default=10, help='Seconds between executions when the script is run as a daemon (eg. 60)')
@click.option('--log', default=conf.LOG_FILE, help='Log file absolute path, if not defined will use'
'the one on the conf.py (eg. /Users/niceusername/logs/ny_log.txt)')
@click.option('--tess_url', default=conf.TESS_URL, help='TeSS service URL, if not defined will use the one on conf.py'
'(eg. http://tess.elixir-uk.org/)')
@click.option('--iann_url', default=conf.IANN_URL, help='iAnn Solr URL, if not defined will use the one on conf.py'
'(eg. http://localhost:8983/solr/iann)')
@click.option('--daemonize', is_flag=True, help='Flag to run the script as a daemon')
@click.option('--include_expired', is_flag=True, help='Flag to fetch expired events from TeSS')
@click.option('--start', default=None, help='Start date (eg. 2000-01-01)')
@click.option('--reset', is_flag=True, help='Flag to reset the Solr target instance and retrieve all the TeSS events'
'\nBE CAREFUL!!! '
'This option will erase all the events on iAnn and will do a'
'complete fetch of the TeSS events.')
@click.option('--daily_reset_time', default=None, help='Time of the day to do the Solr instance reset (eg. 10:30)'
'\nBE CAREFUL!!!'
'This option will erase all the events on iAnn and will do a'
'complete fetch of the TeSS events.')
def run(delay, log, tess_url, iann_url, daemonize, start, include_expired, reset, daily_reset_time):
"""
ELIXIR TeSS to iAnn events synchronizer script V 0.0
Script to get TeSS events and push them to iAnn. It can be used as a batch process or as a daemon process.
"""
conf.LOG_FILE = log
conf.TESS_URL = tess_url
conf.IANN_URL = iann_url
log_start = get_start_from_log()
if start:
start = utc.localize(parse(start))
elif log_start:
start = parse(log_start)
else:
start = utc.localize(parse('2000-01-01'))
click.secho(WELCOME_MSJ, fg='yellow', bg='red', bold=True)
if not daemonize:
click.secho('Fetching events from TeSS', fg='blue', bold=True)
init()
if reset:
reset_iann()
return
results = get_tess_all_events(start, include_expired)
try:
push_to_iann(results['events'])
click.secho('Done!', fg='blue', bold=True)
logging.info('Last End:' + str(results['end']))
except pysolr.SolrError:
logging.info('There was an error trying to push the events to iAnn Solr')
return
click.secho('Fetching events from TeSS every %d seconds' % delay, fg='blue', bold=True)
with daemon.DaemonContext(stdout=sys.stdout, stderr=sys.stdout):
init()
click.secho('Process ID: %d' % os.getpid(), fg='red', bold=True, blink=True)
logging.info('Fetching events from TeSS every %d seconds' % delay)
logging.info('Process ID: %d' % os.getpid())
while True:
if daily_reset_time:
hour = int(daily_reset_time.split(':')[0])
minute = int(daily_reset_time.split(':')[1])
t = utc.localize(datetime.now())
reset_time = utc.localize(datetime(t.year, t.month, t.day, hour, minute))
if start < reset_time < t:
reset_iann()
start = t
continue
results = get_tess_all_events(start, include_expired)
try:
push_to_iann(results['events'])
start = results['end']
logging.info('Last End:' + str(start))
except pysolr.SolrError:
logging.info('There was an error trying to push the events to iAnn Solr, will try again in '
+ str(delay) + ' seconds')
time.sleep(delay)
if __name__ == "__main__":
try:
run()
except AttributeError, err:
print err
print "Try 'python tess_to_iann.py --help' to get information about usage"
|
# https://www.acmicpc.net/problem/2581
m = int(input())
n = int(input())
isPrime = True
firstNum = 0
sumNum = 0
for i in range(m, n+1):
if i > 1:
for j in range(2, i):
if i%j == 0:
isPrime = False
break
isPrime = True
if isPrime:
if firstNum == 0:
firstNum = i
sumNum += i
if sumNum == 0:
print(-1)
else:
print(sumNum)
print(firstNum) |
# Copyright (C) 2015-2020 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from hamcrest import ( assert_that,
contains_exactly,
equal_to,
has_item,
has_entries,
has_entry,
matches_regexp )
from pprint import pformat
import os
import pytest
import requests
from ycmd.utils import ReadFile
from ycmd.tests.python import PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
CombineRequest,
LocationMatcher,
ErrorMatcher )
TYPESHED_PATH = os.path.normpath(
PathToTestFile( '..', '..', '..', '..', 'third_party', 'jedi_deps', 'jedi',
'jedi', 'third_party', 'typeshed', 'stdlib', '2and3', 'builtins.pyi' ) )
def RunTest( app, test ):
contents = ReadFile( test[ 'request' ][ 'filepath' ] )
# We ignore errors here and check the response code ourself.
# This is to allow testing of requests returning errors.
response = app.post_json(
'/run_completer_command',
CombineRequest( test[ 'request' ], {
'contents': contents,
'filetype': 'python',
'command_arguments': ( [ test[ 'request' ][ 'command' ] ]
+ test[ 'request' ].get( 'arguments', [] ) )
} ),
expect_errors = True
)
print( 'completer response: {0}'.format( pformat( response.json ) ) )
assert_that( response.status_code,
equal_to( test[ 'expect' ][ 'response' ] ) )
assert_that( response.json, test[ 'expect' ][ 'data' ] )
def Subcommands_GoTo( app, test, command ):
if isinstance( test[ 'response' ], tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher( PathToTestFile( 'goto',
test[ 'response' ][ 0 ] ),
test[ 'response' ][ 1 ],
test[ 'response' ][ 2 ] )
}
else:
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, test[ 'response' ] )
}
RunTest( app, {
'description': command + ' jumps to the right location',
'request': {
'command' : command,
'filetype' : 'python',
'filepath' : PathToTestFile( 'goto', test[ 'request' ][ 0 ] ),
'line_num' : test[ 'request' ][ 1 ],
'column_num': test[ 'request' ][ 2 ]
},
'expect': expect,
} )
@pytest.mark.parametrize( 'cmd', [ 'GoTo',
'GoToDefinition',
'GoToDeclaration' ] )
@pytest.mark.parametrize( 'test', [
# Nothing
{ 'request': ( 'basic.py', 3, 5 ), 'response': 'Can\'t jump to '
'definition.' },
# Keyword
{ 'request': ( 'basic.py', 4, 3 ), 'response': 'Can\'t jump to '
'definition.' },
# Builtin
{ 'request': ( 'basic.py', 1, 4 ), 'response': ( 'basic.py', 1, 1 ) },
{ 'request': ( 'basic.py', 1, 12 ), 'response': ( TYPESHED_PATH, 947, 7 ) },
{ 'request': ( 'basic.py', 2, 2 ), 'response': ( 'basic.py', 1, 1 ) },
# Class
{ 'request': ( 'basic.py', 4, 7 ), 'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 4, 11 ), 'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 7, 19 ), 'response': ( 'basic.py', 4, 7 ) },
# Instance
{ 'request': ( 'basic.py', 7, 1 ), 'response': ( 'basic.py', 7, 1 ) },
{ 'request': ( 'basic.py', 7, 11 ), 'response': ( 'basic.py', 7, 1 ) },
{ 'request': ( 'basic.py', 8, 23 ), 'response': ( 'basic.py', 7, 1 ) },
# Instance reference
{ 'request': ( 'basic.py', 8, 1 ), 'response': ( 'basic.py', 8, 1 ) },
{ 'request': ( 'basic.py', 8, 5 ), 'response': ( 'basic.py', 8, 1 ) },
{ 'request': ( 'basic.py', 9, 12 ), 'response': ( 'basic.py', 8, 1 ) },
# Member access
{ 'request': ( 'child.py', 4, 12 ),
'response': ( 'parent.py', 2, 7 ) },
# Builtin from different file
{ 'request': ( 'multifile1.py', 2, 30 ),
'response': ( 'multifile2.py', 1, 24 ) },
{ 'request': ( 'multifile1.py', 4, 5 ),
'response': ( 'multifile1.py', 2, 24 ) },
# Function from different file
{ 'request': ( 'multifile1.py', 1, 24 ),
'response': ( 'multifile3.py', 3, 5 ) },
{ 'request': ( 'multifile1.py', 5, 4 ),
'response': ( 'multifile1.py', 1, 24 ) },
# Alias from different file
{ 'request': ( 'multifile1.py', 2, 47 ),
'response': ( 'multifile2.py', 1, 51 ) },
{ 'request': ( 'multifile1.py', 6, 14 ),
'response': ( 'multifile1.py', 2, 36 ) },
] )
@SharedYcmd
def Subcommands_GoTo_test( app, cmd, test ):
Subcommands_GoTo( app, test, cmd )
def Subcommands_GetType( app, position, expected_message ):
filepath = PathToTestFile( 'GetType.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = position[ 0 ],
column_num = position[ 1 ],
contents = contents,
command_arguments = [ 'GetType' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
has_entry( 'message', expected_message )
)
@pytest.mark.parametrize( 'position,expected_message', [
( ( 11, 7 ), 'instance int' ),
( ( 11, 20 ), 'def some_function()' ),
( ( 12, 15 ), 'class SomeClass(*args, **kwargs)' ),
( ( 13, 8 ), 'instance SomeClass' ),
( ( 13, 17 ), 'def SomeMethod(first_param, second_param)' ),
( ( 19, 4 ), matches_regexp( '^(instance str, instance int|'
'instance int, instance str)$' ) )
] )
@SharedYcmd
def Subcommands_GetType_test( app, position, expected_message ):
Subcommands_GetType( app, position, expected_message )
@SharedYcmd
def Subcommands_GetType_NoTypeInformation_test( app ):
filepath = PathToTestFile( 'GetType.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 6,
column_num = 3,
contents = contents,
command_arguments = [ 'GetType' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'No type information available.' ) )
@SharedYcmd
def Subcommands_GetDoc_Method_test( app ):
# Testcase1
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 17,
column_num = 9,
contents = contents,
command_arguments = [ 'GetDoc' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
has_entry( 'detailed_info', '_ModuleMethod()\n\n'
'Module method docs\n'
'Are dedented, like you might expect' )
)
@SharedYcmd
def Subcommands_GetDoc_Class_test( app ):
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 19,
column_num = 6,
contents = contents,
command_arguments = [ 'GetDoc' ] )
response = app.post_json( '/run_completer_command', command_data ).json
assert_that( response, has_entry(
'detailed_info', 'TestClass()\n\nClass Documentation',
) )
@SharedYcmd
def Subcommands_GetDoc_NoDocumentation_test( app ):
filepath = PathToTestFile( 'GetDoc.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 8,
column_num = 23,
contents = contents,
command_arguments = [ 'GetDoc' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'No documentation available.' ) )
@pytest.mark.parametrize( 'test', [
{ 'request': ( 'basic.py', 2, 1 ), 'response': ( TYPESHED_PATH, 947, 7 ) },
{ 'request': ( 'basic.py', 8, 1 ), 'response': ( 'basic.py', 4, 7 ) },
{ 'request': ( 'basic.py', 3, 1 ),
'response': 'Can\'t jump to type definition.' },
] )
@SharedYcmd
def Subcommands_GoToType_test( app, test ):
Subcommands_GoTo( app, test, 'GoToType' )
@SharedYcmd
def Subcommands_GoToReferences_Function_test( app ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 4,
column_num = 5,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
contains_exactly(
has_entries( {
'filepath': filepath,
'line_num': 1,
'column_num': 5,
'description': 'def f'
} ),
has_entries( {
'filepath': filepath,
'line_num': 4,
'column_num': 5,
'description': 'f'
} ),
has_entries( {
'filepath': filepath,
'line_num': 5,
'column_num': 5,
'description': 'f'
} ),
has_entries( {
'filepath': filepath,
'line_num': 6,
'column_num': 5,
'description': 'f'
} )
)
)
@SharedYcmd
def Subcommands_GoToReferences_Builtin_test( app ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 8,
column_num = 1,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
assert_that(
app.post_json( '/run_completer_command', command_data ).json,
has_item(
has_entries( {
'filepath': filepath,
'line_num': 8,
'column_num': 1,
'description': 'str'
} )
)
)
@SharedYcmd
def Subcommands_GoToReferences_NoReferences_test( app ):
filepath = PathToTestFile( 'goto', 'references.py' )
contents = ReadFile( filepath )
command_data = BuildRequest( filepath = filepath,
filetype = 'python',
line_num = 2,
column_num = 5,
contents = contents,
command_arguments = [ 'GoToReferences' ] )
response = app.post_json( '/run_completer_command',
command_data,
expect_errors = True ).json
assert_that( response,
ErrorMatcher( RuntimeError, 'Can\'t find references.' ) )
|
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseServerError, HttpResponsePermanentRedirect, HttpResponseNotAllowed
from django.core.exceptions import ObjectDoesNotExist
from uriredirect.models import UriRegister,Profile
from uriredirect.http import HttpResponseNotAcceptable, HttpResponseSeeOther
import re
import json
from rdflib import Graph,namespace
from rdflib.term import URIRef, Literal
from rdflib.namespace import Namespace,NamespaceManager,RDF, RDFS
from django.template.loader import render_to_string
from mimeparse import best_match
from django.conf import settings
ALTR="http://www.w3.org/ns/dx/conneg/altr"
ALTRNS = Namespace("http://www.w3.org/ns/dx/conneg/altr#")
ALTR_HASREPRESENTATION = URIRef( "#".join( (ALTR,'hasRepresentation')))
ALTR_REPRESENTATION = URIRef( "#".join( (ALTR,'Representation')))
DCT = Namespace("http://purl.org/dc/terms/")
DCT_CONFORMSTO= URIRef("http://purl.org/dc/terms/conformsTo")
DCT_FORMAT= URIRef("http://purl.org/dc/terms/format")
PROF_TOKEN= URIRef("http://www.w3.org/ns/prof/token")
RDFLIBFORMATS = {
'application/ld+json': 'json-ld' ,
'text/html' :'html',
'text/turtle': 'ttl',
'application/json': 'json-ld' ,
'application/rdf+xml': 'xml' }
ALTR_PROFILE = None
def getALTR():
global ALTR_PROFILE
if not ALTR_PROFILE:
ALTR_PROFILE,created = Profile.objects.get_or_create(token="alt", uri=ALTR, defaults={ 'label': 'alternates using W3C model' , 'comment' : 'Implements the https://www.w3.org/TR/dx-prof-conneg/ standard alternates view of available profiles and media types.' } )
return ALTR_PROFILE
def resolve_register_uri(request, registry_label=None,requested_extension=None):
"""
resolve a request to the register itself - just another URI
"""
return resolve_uri(request, registry_label, None, requested_extension )
def resolve_registerslash_uri(request, registry_label,requested_extension=None):
"""
resolve a request to the register itself - with a trailing slash
"""
return resolve_uri(request, registry_label, "/", requested_extension )
def qordered_prefs(prefstring):
qprofs= [x.strip() for x in prefstring.split(',')]
profile_qs = {}
for qprof in qprofs:
if (not qprof ) :
continue
parts = [x.strip() for x in qprof.split(';')]
prof = parts[0]
if prof[0] == '<':
prof = prof[1:-1]
profile_qs[prof]=1 # default value
for p in parts[1:]:
kvp= [x.strip() for x in p.split('=')]
if kvp[0] == 'q':
profile_qs[prof]=kvp[1]
return sorted( profile_qs, key=profile_qs.get )
def resolve_uri(request, registry_label, requested_uri, requested_extension=None):
if request.META['REQUEST_METHOD'] == 'GET':
req=request.GET
head=False
elif request.META['REQUEST_METHOD'] == 'HEAD':
req=request.GET
head=True
else:
return HttpResponseNotAllowed([request.META['REQUEST_METHOD']])
if requested_extension :
requested_extension = requested_extension.replace('.','')
debug = False
try:
if req['pdb'] :
import pdb; pdb.set_trace()
except:
pass
try:
if req['debug'] :
debug=True
except: pass
try:
requested_uri.replace('http:', request.headers['X-Forwarded-Proto'] + ":",1)
except:
pass
clientaccept = request.META.get('HTTP_ACCEPT', '*')
try:
profile_prefs = qordered_prefs(request.META['HTTP_ACCEPT_PROFILE'])
except:
profile_prefs = []
# Determine if this server is aware of the requested registry
requested_register=None
default_register=None
try:
requested_register = UriRegister.objects.get(label=registry_label)
except UriRegister.DoesNotExist:
if requested_uri in [ "/", None ] :
requested_uri = "".join( filter(None,(registry_label,requested_uri if requested_uri else '')))
else:
requested_uri = "/".join( filter(None,(registry_label,requested_uri)))
try:
default_register = UriRegister.objects.get(label='*')
except UriRegister.DoesNotExist:
if not requested_register:
return HttpResponseNotFound('The requested URI registry does not exist')
# Determine if this server can resolve a URI for the requested registry
if requested_register and not requested_register.can_be_resolved:
return HttpResponsePermanentRedirect(requested_register.construct_remote_uri(requested_uri))
# Find rewrite rules matching the requested uri Base - including the rule that binds the rules to a service - so bundled into
# rulechains - where first rule is the one bound to the register and service location.
#
# we need to find this anyway (unless we have cached it in some future enhancement - as we need to be able to spit out the alternates view
# based on the matching rules even if we are not trying to then match a rule to the conneg parameters
#
rulechains = []
if requested_register:
rulechains = requested_register.find_matching_rules(requested_uri)
if default_register and (not requested_register or len(rulechains) == 0) :
if requested_register and registry_label:
# register but no rules, so havent joined yest
requested_uri = "/".join( (registry_label, "" if requested_uri == '/' else requested_uri )) if requested_uri else registry_label
rulechains = default_register.find_matching_rules(requested_uri)
requested_register= default_register
if len(rulechains) == 0:
if debug:
return HttpResponse("Debug mode: Not register found with matching rules. \n Headers %s\n" % ( request.headers, ),content_type="text/plain")
else:
return HttpResponseNotFound('The requested URI does not match any resource - no rules found for URI base')
# at this point we have all the URIs that match the base URI - thats enough to list the available profiles for the base resource
if requested_register.url:
register_uri_base = requested_register.url
else:
host_base = "://".join((request.scheme,request.get_host()))
register_uri_base = "".join((host_base,request.path[:request.path.index(registry_label)-1]))
# rebuild full URI
if requested_uri :
uri= "/".join((register_uri_base.replace("http",request.scheme,1) ,requested_uri))
else:
uri= register_uri_base
links,tokens,labels,descs = collate_alternates(rulechains)
response_body = None
try:
if ALTR in profile_prefs or request.GET['_profile'] == "alt" :
matched_profile = getALTR()
try:
content_type=request.GET['_mediatype']
except:
content_type= best_match( RDFLIBFORMATS.keys() , clientaccept)
if content_type == 'text/html' :
# call templating to turn to HTMLmake_altr_graph
template = 'altrbase.html'
try:
template = settings.URIREDIRECT_ALTR_BASETEMPLATE
except:
pass
response_body= render_to_string('altr.html', { 'page_template': template, 'links':links, 'uri':uri, 'tokens':tokens, 'labels':labels, 'descs':descs , 'stylesheets': [] })
else:
response_body = make_altr_graph (uri,links,tokens,labels,RDFLIBFORMATS[content_type])
except Exception as e:
print (e)
pass
if not response_body:
# now to resolve redirect we need to find subset of rules matching viewname, and other query param constraints
rule,matched_profile,content_type,exception,url,substitutable_vars = match_rule( request , uri, rulechains, requested_register, register_uri_base, registry_label, requested_uri, profile_prefs, requested_extension,clientaccept)
else:
rule=None
substitutable_vars= None
url=None
#import pdb; pdb.set_trace()
proflinks = generate_links_for_profiles("/".join(filter(None,(register_uri_base.replace("http",request.scheme,1) ,requested_uri))), links, tokens, matched_profile, content_type)
# Perform the redirection if the resolver returns something, or a 404 instead
if debug:
response = HttpResponse("Debug mode: rule matched (%s , %s) generated %s \n\n template variables available: \n %s \n\n Link: \n\t%s\n\n Body \n%s \nHeaders %s\n" % ( rule, content_type, url, json.dumps(substitutable_vars , indent = 4),'\n\t'.join( proflinks.split(',')), response_body, request.headers ),content_type="text/plain")
elif response_body:
response = HttpResponse(response_body,content_type=content_type)
elif url:
response = HttpResponseSeeOther(url)
elif exception:
response = HttpResponseNotFound(exception)
else:
response = HttpResponseNotFound('The requested URI did not return any document')
if matched_profile:
mps = "<" + matched_profile.uri + ">"
for p in matched_profile.profilesTransitive.values_list('uri'):
mps += ",<%s>" % p
response.setdefault("Content-Profile", mps)
response.setdefault('Access-Control-Allow-Origin', '*' )
response.setdefault("Link",proflinks)
return response
def match_rule( request, uri, rulechains,requested_register,register_uri_base,registry_label, requested_uri, profile_prefs, requested_extension ,clientaccept ):
rule = None # havent found anything yet until we check params
matched_profile = None
content_type = None
exception = None
# note will ignore accept header and allow override format/lang in conneg if LDA convention in use
for rulechain in rulechains :
if rule:
break
binding = rulechain[0]
for patrule in rulechain[1:] :
if rule:
break
(use_lda, ignore) = patrule.get_prop_from_tree('use_lda')
if use_lda :
try:
requested_extension= request.GET['_format']
except :
try:
requested_extension= request.GET['_mediatype']
except : pass
if requested_extension :
accept = None
else :
accept = clientaccept # allow content negotiation only if not specified
else :
accept = clientaccept
# check query string args before HTTP headers
#(matchpatterns, prule) = patrule.get_prop_from_tree('view_pattern')
matchpatterns = patrule.view_pattern
if matchpatterns :
viewprops = getattr(patrule,'view_param') # prule ?
if not viewprops :
exception = 'resource matches pattern set but the query parameter to match is not set for rule %s' % patrule
return (rule,matched_profile,content_type,exception)
else:
for viewprop in re.split(',|;',viewprops) :
try:
requested_view = request.GET[viewprop]
break
except:
requested_view = None
viewpats = re.split(',|;',matchpatterns)
for viewpat in viewpats :
if ((viewpat == "") and not requested_view) or ( requested_view and re.match(requested_view,viewpat)):
url_template,content_type,default_profile = patrule.get_url_template(requested_extension, accept)
if url_template :
rule = patrule
matched_profile = default_profile
break
if rule:
break
elif patrule.profile.exists() :
# may be set in header - but try to match query string arg with profile first
rplist = getattr(patrule,'view_param')
requested_profile = None
matched_profile = None
if rplist:
for rp in re.split(',|;',rplist):
try:
requested_profile_list = request.GET[rp]
except:
continue
for requested_profile in qordered_prefs(requested_profile_list):
for p in patrule.profile.all() :
if( requested_profile in p.token.split(',')):
matched_profile = p
else:
for toklist in p.profilesTransitive.values_list('token', flat=True):
if( requested_profile in toklist.split(',')):
matched_profile = p
if matched_profile :
url_template,content_type,default_profile = patrule.get_url_template(requested_extension, accept)
if url_template :
rule = patrule
break;
if rule:
break ;
if not rule and not requested_profile and profile_prefs:
for rp in profile_prefs :
for p in patrule.profile.all() :
if( p.uri==rp):
matched_profile = p
else:
try:
matched_profile = p.profilesTransitive.get(uri=rp)
except:
matched_profile = None
if matched_profile :
url_template,content_type,default_profile = patrule.get_url_template(requested_extension, accept)
if url_template :
rule = patrule
matched_profile=p
break
if rule:
break
elif not rule : # if no specific query set, then set - otherwise respect any match made by the more specific rule
url_template,content_type,profile = binding.get_url_template(requested_extension, accept)
if url_template :
rule = patrule
vars = {
'uri_base' : "://".join((request.scheme,request.get_host())) ,
'server' : binding.service_location.replace("http:",request.scheme+":",1) if binding.service_location else '' ,
'server_http' : binding.service_location.replace("https:","http:",1) if binding.service_location else '' ,
'server_https' : binding.service_location.replace("http:","https:",1) if binding.service_location else '' ,
'path' : requested_uri,
'register_name' : registry_label,
'register' : requested_register.url.replace("http",request.scheme,1),
'profile' : matched_profile.token if matched_profile else ''
}
if not rule :
exception = 'A profile for the requested URI base exists but no rules match for the requested format'
url=None
else:
# print url_template
# set up all default variables
if requested_uri :
try:
term = requested_uri[requested_uri.rindex("/")+1:]
vars.update({ 'uri' : uri, 'term' : term , 'path_base' : requested_uri[: requested_uri.rindex("/")] })
except:
vars.update({ 'uri' : uri , 'term' : requested_uri , 'path_base' : requested_uri })
else:
vars.update({ 'uri' : register_uri_base , 'term' : '' , 'path_base' : '' })
# Convert the URL template to a resolvable URL - passing context variables, query param values and headers)
url = rule.resolve_url_template(requested_uri, url_template, vars, request )
return rule,matched_profile,content_type,exception, url, vars
def generate_links_for_profiles(uri,links,tokens,matched_profile,content_type):
""" Generate the set of link headers and token mappings for a set of rulechains for a resource uri
returns {links} and {tokens} dicts - keys are profile URI
"""
return ",".join( (",".join(tokenmappings(tokens)), ",".join(makelinkheaders(uri,links,tokens, matched_profile, content_type))))
def collate_alternates(rulechains):
""" Collate available representations
cachable collation of links and token mappings for a set of resolving rules that determine what resources are available.
Always add W3C canonical ALTR view
"""
links = { ALTR : RDFLIBFORMATS.keys() }
tokens = { ALTR: 'alt'}
labels ={ ALTR: getALTR().label}
descs = {ALTR: getALTR().comment}
for rc in rulechains:
for rule in rc[1:]:
if rule.profile :
for prof in rule.profile.all():
links[prof.uri] = rule.extension_list()
tokens[prof.uri] = prof.token
labels[prof.uri] = prof.label if prof.label else prof.uri
descs[prof.uri] = prof.comment
return links,tokens,labels,descs
def makelinkheaders (uri,links,tokens,matched_profile,content_type):
""" make a serialisation of available profiles in Link Header syntax """
proflinks= []
if matched_profile:
proflinks = ['<%s>; rel="profile" ; anchor=<%s>' % (matched_profile.uri, uri)]
for prof in links.keys():
isprof = matched_profile and matched_profile.uri == prof
for media_type in links[prof]:
ismedia = media_type == content_type
proflinks.append( '<%s>; rel="%s"; type="%s"; profile="%s"' % ( uri, 'self' if isprof and ismedia else 'alternate', media_type, prof) )
return proflinks
def make_altr_graph (uri,links,tokens,labels,content_type):
""" make a serialisation of the altR model for W3C list_profiles using content type requested """
gr = Graph()
nsgr = NamespaceManager(gr)
nsgr.bind("altr", ALTRNS)
nsgr.bind("dct", DCT)
id = URIRef(uri)
for prof in links.keys():
puri = URIRef(prof)
rep = URIRef( "?_profile=".join((uri, tokens[prof])))
gr.add( (id, ALTR_HASREPRESENTATION , rep) )
gr.add( (puri, RDFS.label , Literal(labels[prof])) )
gr.add( (rep, DCT_CONFORMSTO , puri) )
gr.add( (rep, RDF.type , ALTR_REPRESENTATION) )
gr.add( (rep, PROF_TOKEN , Literal(tokens[prof])) )
for media_type in links[prof]:
gr.add( (rep, DCT_FORMAT , Literal( media_type)) )
return gr.serialize(format=content_type)
def tokenmappings (tokens):
tms= []
for prof in tokens.keys():
for tok in tokens[prof].split(','):
tms.append( '<http://www.w3.org/ns/dx/prof/Profile>; rel="type"; token="%s"; anchor=<%s>' % ( tok,prof) )
return tms
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
{% if group == 'beat' %}from setuptools import setup, find_packages
def load_requirements(f):
retval = [str(k.strip()) for k in open(f, 'rt')]
return [k for k in retval if k and k[0] not in ('#', '-')]
install_requires=load_requirements('requirements.txt')
{% else %}from setuptools import setup, dist
dist.Distribution(dict(setup_requires=['bob.extension']))
from bob.extension.utils import find_packages, load_requirements
install_requires = load_requirements()
{% endif %}
setup(
name='{{ name }}',
version=open("version.txt").read().rstrip(),
description='{{ title }}',
url='https://gitlab.idiap.ch/{{ package }}',
{% if license == 'gplv3' %}license='GPLv3'{% else %}license='BSD'{% endif %},
# there may be multiple authors (separate entries by comma)
author='{{ author }}',
author_email='{{ email }}',
# there may be a maintainer apart from the author - you decide
#maintainer='?',
#maintainer_email='email@example.com',
# you may add more keywords separating those by commas (a, b, c, ...)
keywords = "{{ group }}",
long_description=open('README.rst').read(),
# leave this here, it is pretty standard
packages=find_packages(),
include_package_data=True,
zip_safe = False,
install_requires=install_requires,
entry_points={
# add entry points (scripts, {{ group }} resources here, if any)
},
# check classifiers, add and remove as you see fit
# full list here: https://pypi.org/classifiers/
# don't remove the Bob framework unless it's not a {{ group }} package
classifiers = [
{% if group == 'bob' %}'Framework :: Bob',
{% endif %}'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
{% if license == 'gplv3' %}'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'{% else %}'License :: OSI Approved :: BSD License'{% endif %},
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
import datetime, io, os, socket
from contextlib import contextmanager
from unittest.mock import patch
from umatobi import constants
constants.SIMULATION_DIR = os.path.join(os.path.dirname(__file__), 'umatobi-simulation')
constants.FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
constants.LOGGER_STREAM = open(os.path.join(constants.SIMULATION_DIR, 'stdout.log'), 'w')
from umatobi.constants import *
SIMULATION_SECONDS = 30
D_TIMEDELTA = {
"test_watson_start": \
datetime.timedelta(0, SIMULATION_SECONDS - 1, 0),
"test_elapsed_time": \
datetime.timedelta(0, 73, 138770),
}
TD_ZERO = datetime.timedelta(0, 0, 0)
class MockIO(io.BytesIO):
def recv(self, bufsize, flags=0):
return self.read(bufsize)
@contextmanager
def recv_the_script_from_sock(speaking, bufsize=0):
# He is such a ventriloquist.
# This must be a feat of ventriloquism.
with patch('umatobi.lib.sock_recv') as script:
script.return_value = speaking
try:
yield
finally:
pass
@contextmanager
def time_machine(the_era):
with patch('umatobi.lib.datetime_now') as mocked_Foo:
mocked_Foo.return_value = the_era
try:
yield
finally:
pass
|
from data_structure_and_algorithms.python.code_challenges.stack-queue-brackets.stack_queue_brackets import check
def test_true():
exbected = True
actual = check('{}')
assert actual == exbected
def test_true_0():
exbected = True
actual = check('{}(){}')
assert actual == exbected
def test_true_1():
actual = check('()[[Extra Characters]]')
exbected = True
assert actual == exbected
|
from configparser import ConfigParser
from spaghettiqueue.logparser import LogsParser
from spaghettiqueue.client import APIClient
from spaghettiqueue.ui import Spaghetti
from PyQt5.QtWidgets import QApplication
from sys import platform, exit
from os import getenv
from os.path import expanduser, join, isfile
def main():
#Initializing configuration file parser anc checking if config file exists. if not it creates one with default values
config = ConfigParser()
if isfile('spaghetti.ini'):
config.read('spaghetti.ini')
else:
config['spaghettiqueue'] = {
'mcpath': join(getenv('APPDATA') if platform == "win32" else expanduser('~'), ".minecraft"),
'hostip': 'https://api.spaghettiqueue.app',
'uuid': "",
"queuesubstring": "Position in queue:"
}
config.write(open('spaghetti.ini', 'w'))
# Init Classes. the client which comunicates between the app and the api server, the parser which parses the data from the latest.log file and the ui
latestfilepath = join(config['spaghettiqueue']['mcpath'], "logs", "latest.log") #path of the latest.log file in .minecraft/logs
queuesubstring = config['spaghettiqueue']['queuesubstring']
client = APIClient(uuid=config['spaghettiqueue']['uuid'], host=config['spaghettiqueue']['hostip'])
config['spaghettiqueue']['uuid'] = client.uuid
config.write(open('spaghetti.ini', 'w'))
app = QApplication([""])
parser = LogsParser(client, latestfilepath, queuesubstring)
ui = Spaghetti(client, parser, config)
ui.setupUi()
ui.show()
exit(app.exec_())
if __name__ == "__main__":
main() |
"""
pipeline_utils.py
------------------------------------
Contains utilities for:
- Saving and loading piplelines
- Constructing pipelines
- Running pipelines
- Loading primitives
"""
import sys
import glob
import pdb
import shutil
import uuid
import json
import random
import pandas
import numpy as np
import os
import traceback
import time
import copy
from d3m.metadata import base as metadata_base, hyperparams as hyperparams_module, pipeline as pipeline_module, problem
import d3m
from d3m import metadata
from d3m.metadata import problem
from d3m.metadata.problem import *
from d3m.metadata.pipeline import Pipeline, PrimitiveStep
from d3m.metadata.base import ArgumentType, Context
from d3m.metadata.base import Metadata
from d3m.container.dataset import D3MDatasetLoader, Dataset
from d3m import primitives
import d3m_outputs
from d3m_outputs import Predictions
from d3m.runtime import Runtime
def load_pipeline(pipeline_json):
return Pipeline.from_json(pipeline_json)
def get_primitive_names():
"""Return list of names of primitives."""
return d3m.index.search()
def get_primitive_with_name(name):
"""Loads and returns a primitive with given name."""
return d3m.index.get_primitive(name)
def get_data_primitives_names():
"""Helper to return names of all primitives that deal with data."""
return [x for x in get_primitive_names() if ".data." in x or ".datasets." in x]
def get_sklearn_primitives_names():
"""Helper to return names of all sklearn primitives."""
return [x for x in get_primitive_names() if ".SKlearn" in x]
def load_all_primitives():
"""Loads all primitives."""
d3m.index.load_all()
def get_loaded_primitives():
"""Returns loaded primitives."""
return d3m.index.get_loaded_primitives()
def get_primitive_attribute(primitive_or_primitive_name, attribute_selector):
"""Given a primitive or primitive name, gets the value in the metadata of primitive associated with attribute_selector.
Example:
=> get_primitive_attribute("d3m.primitives.bbn.sklearn_wrap.BBNMLPClassifier", ("primitive_code","class_type_arguments","Inputs"))
=> <class 'd3m.container.pandas.DataFrame'>
"""
primitive = primitive_or_primitive_name
if type(primitive_or_primitive_name) == str:
primitive = get_primitive_with_name(primitive_or_primitive_name)
metadata_selector = primitive.metadata.query()
for attribute in attribute_selector:
metadata_selector = metadata_selector.get(attribute)
return metadata_selector
def get_primitive_input_type(primitive_or_primitive_name):
"""Returns the input type of given primitive or primitive name."""
return get_primitive_attribute(primitive_or_primitive_name, ("primitive_code", "class_type_arguments", "Inputs"))
def get_primitive_output_type(primitive_or_primitive_name):
"""Returns the output type of given primitive or primitive name."""
return get_primitive_attribute(primitive_or_primitive_name, ("primitive_code", "class_type_arguments", "Outputs"))
def get_primitive_family_type(primitive_or_primitive_name):
"""Returns the primitive type of given primitive or primitive name."""
return get_primitive_attribute(primitive_or_primitive_name, ("primitive_family",))
class PipelineWrapper(object):
"""
Wrapper around the d3m pipeline object which allows fast and easy construction
of pipelines.
"""
PIPELINE_INPUT = "PIPELINE_INPUT"
def __init__(self, pipeline_architecture):
"""
Create a new PipelineWrapper object. pipeline_architecture should be a
list of dictionaries specifiying the stages of the pipeline.
Each stage dictionary in the pipeline_architecture list should specify:
"primitive" - primitive name or class to instantiate (required)
"stage_name" - name of the stage (required)
"input" - name of the stage whose output should be passed
as input to the current stage (required). Use
PipelineWrapper.PIPELINE_INPUT for referencing the
input of the pipeline.
"hyperparameters" - dict mapping names to data. (optional)
Example:
[
{
"stage_name" : "denormalize",
"primitive" : "d3m.primitives.dsbox.Denormalize",
"input" : PipelineWrapper.PIPELINE_INPUT,
},
{
"stage_name" : "dataset_to_dataframe",
"primitive" : "d3m.primitives.data_transformation.dataset_to_dataframe.Common",
"input" : "denormalize",
},
{
"stage_name" : "column_parser",
"primitive" : "d3m.primitives.data_transformation.column_parser.DataFrameCommon",
"input" : "dataset_to_dataframe",
},
{
"stage_name" : "extract_attrs",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Attribute']
}
},
{
"stage_name" : "extract_targets",
"primitive" : "d3m.primitives.data_transformation.extract_columns_by_semantic_types.DataFrameCommon",
"input" : "column_parser",
"hyperparameters" : {
"semantic_types" : ['https://metadata.datadrivendiscovery.org/types/Target']
}
}
]
"""
self.pipeline_architecture_dict = copy.deepcopy(pipeline_architecture)
self.verify_pipeline_architecture_dict(self.pipeline_architecture_dict)
def get_stage_names(self):
return set([x["stage_name"] for x in self.pipeline_architecture_dict])
def run(self, input_dataset, eval_datasets=[], return_pipeline=False):
"""
Converts internal pipeline architecture dict into pipeline and runs it.
Args:
- input_dataset: Input dataset to train
- eval_dataset: Dataset to evaluate
- return_pipeline: Whether to return the pipeline which fitted and produced the preds
Returns:
- If return_pipeline is False, returns just the predictions, otherwise returns a tuple
(preds, pipeline)
"""
pipeline = self.load_pipeline_architecture(self.pipeline_architecture_dict)
pipeline.check()
runtime = Runtime(pipeline, context=Context.TESTING)
runtime.fit(inputs=[input_dataset], return_values=['outputs.0'])
all_preds = []
for dataset in eval_datasets:
all_preds.append(runtime.produce(inputs=[dataset], return_values=['outputs.0']))
results = all_preds
if return_pipeline:
results = (all_preds, pipeline)
return results
def load_pipeline_architecture(self, pipeline_architecture_dict):
"""
Loads pipeline architecture dictionary and returns a d3m Pipeline object.
Return pipeline
"""
pipeline_description = Pipeline(context=Context.TESTING)
pipeline_description.add_input(name='inputs')
# For each corresponding stage in the dictionary create a step
steps = []
stage_name_to_reference_name = {}
for stage_dict in pipeline_architecture_dict:
# Extract stage attributes
primitive = stage_dict["primitive"]
if type(primitive) == str:
primitive = get_primitive_with_name(primitive)
cur_stage_name = stage_dict["stage_name"]
input_stage = stage_dict["input"]
# Create primitive step
step = PrimitiveStep(primitive_description=primitive.metadata.query())
data_reference = "inputs.0" if input_stage == PipelineWrapper.PIPELINE_INPUT else stage_name_to_reference_name[input_stage]
step.add_argument(name="inputs", argument_type=ArgumentType.CONTAINER, data_reference=data_reference)
if "hyperparameters" in stage_dict:
for k,v in stage_dict["hyperparameters"].items():
step.add_hyperparameter(name=k, argument_type=ArgumentType.VALUE, data=v)
if "arguments" in stage_dict:
for k,v in stage_dict["arguments"].items():
step.add_argument(name=k, argument_type=ArgumentType.CONTAINER, data_reference=stage_name_to_reference_name[v])
step.add_output("produce")
pipeline_description.add_step(step)
reference_name = next(iter(step.get_output_data_references()))
# Update accounting
stage_name_to_reference_name[cur_stage_name] = reference_name
steps.append(step)
# Output is output of the last step
last_output_reference = next(iter(steps[-1].get_output_data_references()))
pipeline_description.add_output(name="output", data_reference=last_output_reference)
return pipeline_description
def get_branches(self):
"""
Returns stagenames that are not being used as inputs for
any other stages.
"""
stage_names = [x["stage_name"] for x in self.pipeline_architecture_dict]
for stage_dict in self.pipeline_architecture_dict:
input_name = stage_dict["input"]
if input_name in stage_names:
stage_names.remove(input_name)
return stage_names
def add_stage(self, new_stage):
"""
Adds a stage to the pipeline architecture dict; then asserts validity
of stages.
"""
# If new_stage does not have a stage_name, give it a unique name
if "stage_name" not in new_stage:
primitive_name = str(new_stage["primitive"])
stage_names = [x["stage_name"] for x in self.pipeline_architecture_dict]
candidate_stage_name = primitive_name
while candidate_stage_name in stage_names:
candidate_stage_name = primitive_name + "_" + str(uuid.uuid4())
new_stage["stage_name"] = candidate_stage_name
self.pipeline_architecture_dict.append(new_stage)
self.verify_pipeline_architecture_dict(self.pipeline_architecture_dict)
def verify_pipeline_architecture_dict(self, pipeline_json):
"""
Verifies correctness of pipeline architecture dict.
- Make sure that stage dictionaries have "stage_name", "primitive", "input"
- Make sure that stage dictionaries have valid "primitive"
- Make sure that stage_names are unique.
- Make sure the inputs reference an existing stage name.
"""
# Verify that dictionaries have "stage_name" "primitive" "input" keys
for stage_dict in pipeline_json:
keys_to_check = ["stage_name", "primitive", "input"]
for k in keys_to_check:
if k not in stage_dict.keys():
raise Exception("Key '%s' not in pipeline dictionary" % k)
# Verify that "primitives" are valid
valid_primitives_names = get_primitive_names()
for stage_dict in pipeline_json:
primitive_or_primitive_name = stage_dict["primitive"]
if type(primitive_or_primitive_name) == str:
if primitive_or_primitive_name not in valid_primitives_names:
raise Exception("Primitive name '%s' not in list of valid primitives." % primitive_or_primitive_name)
elif type(primitive_or_primitive_name) != d3m.primitive_interfaces.base.PrimitiveBaseMeta:
raise Exception("Primitive '%s' not the right type (got object of type %s instead of %s" %
(str(primitive_or_primitive_name),
str(type(primitive_or_primitive_name)),
str(type(d3m.primitive_interfaces.base.PrimitiveBaseMeta))))
# Verify that stage names are unique
stage_names = set([x["stage_name"] for x in pipeline_json])
n_unique_stage_names = len(stage_names)
if n_unique_stage_names != len(pipeline_json):
raise Exception("Stage names in pipeline dictionary are not unique.")
# Make sure inputs reference previous stage names
stage_names_so_far = set()
for stage_dict in pipeline_json:
input_stage = stage_dict["input"]
if input_stage != PipelineWrapper.PIPELINE_INPUT and input_stage not in stage_names_so_far:
raise Exception("Stage not found: %s" % input_stage)
stage_names_so_far.add(stage_dict["stage_name"])
class SKLearnPipeliner(object):
"""
Represents an object which can construct random simple sklearn classification
pipelines and run them on inputs. One can either
(1) provide a specific pipeline with 'data_loading_pipeline', or
(2) provide a list of candidate piplines via 'data_loading_pipeline_candidates'
"""
def __init__(self, data_loading_pipeline=None, data_loading_pipeline_candidates=None, sklearn_predictor=None):
"""
Create SimpleSKLearnPipeliner.
Note: a "pipeline" specifies data loading pipeline (see PipelineWrapper).
Expect stage output branches to be ["attributes", "targets"]
- data_loading_pipeline - a pipeline architecture
- data_loading_pipelines - a list of pipeline architecture dicts
- sklearn_predictor - the sklearn predictor to use as last step of the pipeline
"""
self.data_loading_pipeline = data_loading_pipeline
self.data_loading_pipeline_candidates = data_loading_pipeline_candidates
self.sklearn_predictor = sklearn_predictor
# check that we are given either a specific pipeline or candidates
assert bool(data_loading_pipeline) ^ bool(data_loading_pipeline_candidates)
if sklearn_predictor is None:
self.sklearn_primitives = get_sklearn_primitives_names()
def run(self, dataset_train, eval_datasets=[], return_pipeline=False):
"""
If we are given a configuration of primitives, create a pipeline
and run it on the given input datasets
If we are not given a specific configuration, samples a simple sklearn
based classification pipeline and runs it on the given input datasets.
"""
# Load data loading pipeline
pipeline = random.choice(self.data_loading_pipeline_candidates) if (self.data_loading_pipeline is None) \
else self.data_loading_pipeline
pipeline_wrapper = PipelineWrapper(pipeline)
expected_stage_outputs = set(["attributes", "targets"])
stage_outputs = set(pipeline_wrapper.get_branches())
if expected_stage_outputs != stage_outputs:
raise Exception("Expected stage outputs (%s) does not match actual (%s)." % (str(expected_stage_outputs),
str(stage_outputs)))
# Make sure we have `dataset_to_dataframe` since that's required for predictions
assert("column_parser" in pipeline_wrapper.get_stage_names())
# Sample a random sklearn primitive
sklearn_primitive = random.choice(self.sklearn_primitives) if (self.sklearn_predictor is None) \
else self.sklearn_predictor
pipeline_wrapper.add_stage({
"stage_name": "predict",
"input" : "attributes",
"primitive": sklearn_primitive,
"arguments": {
"outputs": "targets"
},
"hyperparameters": {
# Todo(maxlam): Better way to handle GeneralRelationalDataset....
"use_semantic_types": True if random.random() <= .5 else False,
}
})
# Make sure to write predictions in the correct format
pipeline_wrapper.add_stage({
"input": "predict",
"primitive": "d3m.primitives.data_transformation.construct_predictions.DataFrameCommon",
"arguments": {
"reference": "column_parser"
}
})
# Run the pipeline
return pipeline_wrapper.run(dataset_train, eval_datasets=eval_datasets,
return_pipeline=return_pipeline)
\
|
# Falola Yusuf, Github: falfat
class Matrix:
"""
A matrix class to handle intersection matrix appropriately
Attributes
----------
matrix: matrix
intersection matrix
rows: int
number of rows of matrix
cols: int
number of column of matrix
size: int
matrix size
Methods
-------
PrintMatrix()
prints matrix in an appropriate form
MatrixToFile()
save matirx to text file
ConvertObjectToIndex(object_type, object_guid, boundary_list,
domain_fractures)
converts a object guid to its index in the matrix
"""
def __init__(self, matrix):
"""
matrix: matrix
intersection matrix
"""
self.matrix = matrix
self.rows = len(matrix)
self.cols = len(matrix[0])
self.size = self.rows * self.cols
def PrintMatrix(self):
"""
prints matrix in an appropriate form
Parameters
----------
None
"""
# loop through the rows
for i in range(self.rows):
# intialise the matrix
mat = []
# loop through the column
for j in range(self.cols):
# append matrix element
mat.append(self.matrix[i][j])
# print the matrix
print(mat)
def MatrixToFile(self):
"""
function to save matirx to text file, returns none
"""
# open text file
file = open("intersection_matrix.txt", 'w')
# write opening square bracket for matrix
file.write("[")
# use for loop to write in the matrix
for i in range(self.rows):
# square brackets to append in elements of a row of the matrix
mat = []
if i != 0:
# separate each row with a comma
file.write(",")
for j in range(self.cols):
# append elements of the row
mat.append(self.matrix[i][j])
# avoid having space as the first row in the text file
if i != 0:
file.write("\n")
# write in the row
file.write(str(mat))
# write closing bracket for the matrix
file.write("]")
# close file
file.close()
return
def ConvertObjectToIndex(self, object_type, object_guid,
boundary_list, domain_fractures):
"""
function converts a object guid to its index in the matrix.
Returns the index
Parameters
----------
object_type: str
the type of object (either "boundary" of "fracture")
object_guid: guid
guid of the object
boundary_list: list
list of boundary guids
domain_fractures: list
list of fractures guids
"""
if object_type == "boundary":
# get index of init_guid in the boundary list
p1 = [i for i in range(len(boundary_list)) if boundary_list[i] == object_guid]
# row of init_guid in the matrix
# len(intersection_matrix[1]) = matrix row
# len(boundary_list) = number of boundarues
# p1[0] = index of initial guid in the list
b_position = p1[0] + self.rows - len(boundary_list)
return b_position
# if the object is a fracture
if object_type == "fracture":
# get index of fracture in the fracture list
p1 = [i for i in range(len(domain_fractures)) if domain_fractures[i] == object_guid]
# fracture index in matix is same as in the list
return p1[0]
|
from classes import Api
class AptTradeDetail(Api):
pass |
from ds2.orderedmapping import BSTMapping as BST
from ds_viz.style import Style
def x(node, offset):
nodewidth = 40
lenleft = len(node.left) if node.left is not None else 0
return nodewidth * (lenleft + 1) + offset
# textstyle = {"stroke_width" : "0", "stroke" : "black",
# "fill" : "black", "fill_opacity" : "1",
# "font_size" : "20pt"}
nodestyle = [Style(radius = 18, stroke= (0,0,0), stroke_width=2, fill=(1,1,1))]
nodelabel = [Style(fontfamily='monospace', fontsize=20, fill=(0,0,0))]
treeedge = [Style(stroke=(0,0,0), stroke_width=2)]
highlight = [Style(stroke=(0.9,0.9,0.3), stroke_width=20)]
def drawtree(T, canvas):
if isinstance(T, BST): T = T._root
drawsubtree(T, 0, 20, canvas)
def drawsubtree(T, xoffset, yoffset, canvas):
if T is None: return
radius = 18
levelheight = 50
a,b = x(T, xoffset), yoffset
c = yoffset + levelheight
if T.left is not None and len(T.left) > 0:
canvas.drawline((a, b), (x(T.left, xoffset), c), treeedge)
drawsubtree(T.left, xoffset, c, canvas)
if T.right is not None and len(T.right) > 0:
canvas.drawline((a, b), (x(T.right, a), c), treeedge)
drawsubtree(T.right, a, c, canvas)
canvas.drawcircle((a, b), radius, nodestyle)
canvas.text(T.key, (a, b), nodelabel)
def drawpathtoroot(T, key, canvas):
if isinstance(T, BST):
T = T._root
_drawpathtoroot(T, key, 0, 20, canvas)
def _drawpathtoroot(T, key, xoffset, yoffset, canvas):
if T is None: return
levelheight = 50
a,b = x(T, xoffset), yoffset
c = yoffset + levelheight
if key < T.key and T.left is not None:
canvas.drawline((a, b), (x(T.left, xoffset), c), highlight)
_drawpathtoroot(T.left, key, xoffset, c, canvas)
elif key > T.key and T.right is not None:
canvas.drawline((a, b), (x(T.right, a), c), highlight)
_drawpathtoroot(T.right, key, a, c, canvas)
|
# Copyright 2020 The Caer Authors. All Rights Reserved.
#
# Licensed under the MIT License (see LICENSE);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at <https://opensource.org/licenses/MIT>
#
# ==============================================================================
def _check_target_size(size):
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a tuple of size 2 (width, height)
:returns: True, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
raise ValueError("Size must be a tuple")
if len(size) != 2:
raise ValueError("Size must be a tuple of length 2")
if size[0] < 0 or size[1] < 0:
raise ValueError("Width and height must be >= 0")
return True
def _check_mean_sub_values(value, channels):
"""
Checks if mean subtraction values are valid based on the number of channels
'value' must be a tuple of dimensions = number of channels
Returns boolean:
True -> Expression is valid
False -> Expression is invalid
"""
if value is None:
raise ValueError('Value(s) specified is of NoneType()')
if isinstance(value, tuple):
# If not a tuple, we convert it to one
try:
value = tuple(value)
except TypeError:
value = tuple([value])
if channels not in [1,3]:
raise ValueError('Number of channels must be either 1 (Grayscale) or 3 (RGB/BGR)')
if len(value) not in [1,3]:
raise ValueError('Tuple length must be either 1 (subtraction over the entire image) or 3 (per channel subtraction)', value)
if len(value) == channels:
return True
else:
raise ValueError(f'Expected a tuple of dimension {channels}', value) |
class Wrapper(object):
def __init__(self, data, env):
self.data = data.decode("utf8")
self.env = env
def get_resp(self):
# Note: Nginx add additional response headers if and only if
# format of response accord with HTTP protocol
# 200
return "HTTP/1.1 200 OK\r\nContent-Length: {0}\r\n\r\n{1}" \
.format(len(self.data), self.data)
# 500: TODO
pass
|
from .base import *
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/storage/db.sqlite3',
}
}
|
#%%
# Objective, determining a basic normalized hero data format.
|
#!/usr/bin/env python
import json
import logging
import os
import sys
from collections import defaultdict
from typing import Union, Dict, Tuple
import fire
import numpy as np
import pandas as pd
from datasets import load_dataset
from gensim.models import KeyedVectors
from pandas import DataFrame
from tqdm.auto import tqdm
from experiments import basic_logger_config
from experiments.evaluation.utils import get_avg_precision, get_reciprocal_rank, compute_dcg_at_k
from hf_datasets.paperswithcode_aspects import get_test_split
from experiments.utils import get_local_hf_dataset_path
logging.basicConfig(**basic_logger_config)
logger = logging.getLogger(__name__)
def evaluate_vectors(
hf_dataset: str,
aspect: str,
input_path: str,
name: str,
folds: Union[str, list],
top_ks: Union[str, list],
output_path: str
):
"""
Run with: $ ./eval_cli.py evaluate_vectors paperswithcode_aspects task ./output/pwc_doc_id2st.txt --name=sentence_transformers --folds=1,2,3,4 --top_ks=5,10,25,50 --output_path=./output/eval.csv
:param aspect:
:param folds:
:param top_ks:
:param name:
:param hf_dataset:
:param input_path:
:param output_path:
:return:
"""
if isinstance(folds, str):
folds = folds.split(',')
elif isinstance(folds, int):
folds = [folds]
if isinstance(top_ks, str):
top_ks = top_ks.split(',')
elif isinstance(top_ks, int):
top_ks = [top_ks]
logger.info(f'Folds: {folds}')
logger.info(f'Top-Ks: {top_ks}')
if len(folds) < 1:
logger.error('No folds provided')
return
if len(top_ks) < 1:
logger.error('No top-k values provided')
return
# Load documents
doc_model = KeyedVectors.load_word2vec_format(input_path)
logger.info(f'Document vectors: {doc_model.vectors.shape}')
# Normalize vectors
doc_model.init_sims(replace=True)
# Init dataframe
metrics = ['retrieved_docs', 'relevant_docs', 'relevant_retrieved_docs', 'precision', 'recall', 'avg_p',
'reciprocal_rank']
df = pd.DataFrame([], columns=['name', 'fold', 'top_k'] + metrics)
# Iterate over folds
for fold in folds:
logger.info(f'Current fold: {fold}')
# Dataset
test_ds = load_dataset(
get_local_hf_dataset_path(hf_dataset),
name='relations',
cache_dir='./data/nlp_cache',
split=get_test_split(aspect, fold)
)
logger.info(f'Test samples: {len(test_ds):,}')
# Unique paper IDs in test set
test_paper_ids = set(test_ds['from_paper_id']).union(set(test_ds['to_paper_id']))
logger.info(f'Test paper IDs: {len(test_paper_ids):,}')
logger.info(f'Examples: {list(test_paper_ids)[:10]}')
# Relevance mapping
doc_id2related_ids = defaultdict(set) # type: Dict[Set[str]]
for row in test_ds:
if row['label'] == 'y':
a = row['from_paper_id']
b = row['to_paper_id']
doc_id2related_ids[a].add(b)
doc_id2related_ids[b].add(a)
# Filter for documents in test set
test_doc_model = KeyedVectors(vector_size=doc_model.vector_size)
test_doc_ids = []
test_doc_vectors = []
missed_doc_ids = 0
for doc_id in doc_model.vocab:
if doc_id in test_paper_ids:
vec = doc_model.get_vector(doc_id)
if len(vec) != doc_model.vector_size:
raise ValueError(f'Test document as invalid shape: {doc_id} => {vec.shape}')
test_doc_ids.append(doc_id)
test_doc_vectors.append(vec)
else:
missed_doc_ids += 1
# logger.warning(f'Document ID is not part of test set: {doc_id} ({type(doc_id)})')
if len(test_doc_ids) != len(test_doc_vectors):
raise ValueError(f'Test document IDs does not match vector count: {len(test_doc_ids)} vs {len(test_doc_vectors)}')
logger.info(f'Test document IDs: {len(test_doc_ids)} (missed {missed_doc_ids})')
logger.info(f'Test document vectors: {len(test_doc_vectors)}')
test_doc_model.add(test_doc_ids, test_doc_vectors)
test_doc_model.init_sims(replace=True)
logger.info(f'Test document vectors: {test_doc_model.vectors.shape}')
# Actual evaluation
# k2eval_rows = defaultdict(list)
seed_ids_without_recommendations = []
max_top_k = max(top_ks)
eval_rows = {top_k: defaultdict(list) for top_k in top_ks} # top_k => metric_name => list of value
for seed_id in tqdm(test_paper_ids, desc=f'Evaluation (fold={fold})'):
try:
rel_docs = doc_id2related_ids[seed_id]
max_ret_docs = [d for d, score in test_doc_model.most_similar(seed_id, topn=max_top_k)]
for top_k in top_ks:
ret_docs = max_ret_docs[:top_k]
rel_ret_docs_count = len(set(ret_docs) & set(rel_docs))
if ret_docs and rel_docs:
# Precision = No. of relevant documents retrieved / No. of total documents retrieved
precision = rel_ret_docs_count / len(ret_docs)
# Recall = No. of relevant documents retrieved / No. of total relevant documents
recall = rel_ret_docs_count / len(rel_docs)
# Avg. precision (for MAP)
avg_p = get_avg_precision(ret_docs, rel_docs)
# Reciprocal rank (for MRR)
reciprocal_rank = get_reciprocal_rank(ret_docs, rel_docs)
# # NDCG@k
# predicted_relevance = [1 if ret_doc_id in rel_docs else 0 for ret_doc_id in ret_docs]
# true_relevances = [1] * len(rel_docs)
# ndcg_value = self.compute_dcg_at_k(predicted_relevance, top_k) / self.compute_dcg_at_k(true_relevances, top_k)
# Save metrics
eval_rows[top_k]['retrieved_docs'].append(len(ret_docs))
eval_rows[top_k]['relevant_docs'].append(len(rel_docs))
eval_rows[top_k]['relevant_retrieved_docs'].append(rel_ret_docs_count)
eval_rows[top_k]['precision'].append(precision)
eval_rows[top_k]['recall'].append(recall)
eval_rows[top_k]['avg_p'].append(avg_p)
eval_rows[top_k]['reciprocal_rank'].append(reciprocal_rank)
except (IndexError, ValueError, KeyError) as e:
seed_ids_without_recommendations.append(seed_id)
logger.warning(f'Cannot retrieve recommendations for #{seed_id}: {e}')
logger.info(
f'Completed with {len(eval_rows[top_ks[0]][metrics[0]]):,} rows (missed {len(seed_ids_without_recommendations):,})')
# Summarize evaluation
for top_k in top_ks:
try:
row = [name, fold, top_k]
for metric in metrics:
# mean over all metrics
values = eval_rows[top_k][metric]
if len(values) > 0:
row.append(np.mean(values))
else:
row.append(None)
df.loc[len(df)] = row
except ValueError as e:
logger.error(f'Cannot summarize row: {top_k} {fold} {metrics} {e}')
#
#
# df = pd.DataFrame(k2eval_rows[top_k],
# columns=['seed_id', 'retrieved_docs', 'relevant_docs', 'relevant_retrieved_docs',
# 'precision', 'recall', 'avg_p', 'reciprocal_rank'])
#
# print(df.mean())
#
# print(df.mean().to_frame().transpose().iloc[0])
logger.info(f'Writing {len(df)} rows to {output_path}')
if os.path.exists(output_path):
# Append new rows to evaluation file
df.to_csv(output_path, mode='a', header=False, index=False)
else:
# Write new files
df.to_csv(output_path, header=True, index=False)
logger.info('Done')
def reevaluate():
"""
Evaluate all systems again!
:return:
"""
hf_dataset = 'paperswithcode_aspects'
folds = [1, 2, 3, 4]
aspects = ['task', 'method', 'dataset']
top_ks = [1, 2, 3, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 75, 100]
output_path = './output/pwc'
eval_path = os.path.join(output_path, 'reeval.csv')
def get_evaluation_df(name, doc_model, hf_dataset, aspect, fold) -> Tuple[DataFrame, Dict]:
# Init dataframe
metrics = ['retrieved_docs', 'relevant_docs', 'relevant_retrieved_docs', 'precision', 'recall', 'avg_p',
'reciprocal_rank', 'ndcg']
df = pd.DataFrame([], columns=['name', 'aspect', 'fold', 'top_k'] + metrics)
# Dataset
test_ds = load_dataset(
get_local_hf_dataset_path(hf_dataset),
name='relations',
cache_dir='./data/nlp_cache',
split=get_test_split(aspect, fold)
)
logger.info(f'Test samples: {len(test_ds):,}')
# Unique paper IDs in test set
test_paper_ids = set(test_ds['from_paper_id']).union(set(test_ds['to_paper_id']))
logger.info(f'Test paper IDs: {len(test_paper_ids):,}')
logger.info(f'Examples: {list(test_paper_ids)[:10]}')
# Relevance mapping
doc_id2related_ids = defaultdict(set) # type: Dict[Set[str]]
for row in test_ds:
if row['label'] == 'y':
a = row['from_paper_id']
b = row['to_paper_id']
doc_id2related_ids[a].add(b)
doc_id2related_ids[b].add(a)
# Filter for documents in test set
test_doc_model = KeyedVectors(vector_size=doc_model.vector_size)
test_doc_ids = []
test_doc_vectors = []
missed_doc_ids = 0
for doc_id in doc_model.vocab:
if doc_id in test_paper_ids:
vec = doc_model.get_vector(doc_id)
if len(vec) != doc_model.vector_size:
raise ValueError(f'Test document as invalid shape: {doc_id} => {vec.shape}')
test_doc_ids.append(doc_id)
test_doc_vectors.append(vec)
else:
missed_doc_ids += 1
# logger.warning(f'Document ID is not part of test set: {doc_id} ({type(doc_id)})')
if len(test_doc_ids) != len(test_doc_vectors):
raise ValueError(
f'Test document IDs does not match vector count: {len(test_doc_ids)} vs {len(test_doc_vectors)}')
logger.info(f'Test document IDs: {len(test_doc_ids)} (missed {missed_doc_ids})')
logger.info(f'Test document vectors: {len(test_doc_vectors)}')
test_doc_model.add(test_doc_ids, test_doc_vectors)
test_doc_model.init_sims(replace=True)
logger.info(f'Test document vectors: {test_doc_model.vectors.shape}')
# Actual evaluation
# k2eval_rows = defaultdict(list)
seed_ids_without_recommendations = []
max_top_k = max(top_ks)
eval_rows = {top_k: defaultdict(list) for top_k in top_ks} # top_k => metric_name => list of value
seed_id2ret_docs = {}
for seed_id in tqdm(test_paper_ids, desc=f'Evaluation ({name},aspect={aspect},fold={fold})'):
try:
rel_docs = doc_id2related_ids[seed_id]
max_ret_docs = [d for d, score in test_doc_model.most_similar(seed_id, topn=max_top_k)]
seed_id2ret_docs[seed_id] = max_ret_docs
for top_k in top_ks:
ret_docs = max_ret_docs[:top_k]
rel_ret_docs_count = len(set(ret_docs) & set(rel_docs))
if ret_docs and rel_docs:
# Precision = No. of relevant documents retrieved / No. of total documents retrieved
precision = rel_ret_docs_count / len(ret_docs)
# Recall = No. of relevant documents retrieved / No. of total relevant documents
recall = rel_ret_docs_count / len(rel_docs)
# Avg. precision (for MAP)
avg_p = get_avg_precision(ret_docs, rel_docs)
# Reciprocal rank (for MRR)
reciprocal_rank = get_reciprocal_rank(ret_docs, rel_docs)
# # NDCG@k
predicted_relevance = [1 if ret_doc_id in rel_docs else 0 for ret_doc_id in ret_docs]
true_relevances = [1] * len(rel_docs)
ndcg_value = compute_dcg_at_k(predicted_relevance, top_k) / compute_dcg_at_k(true_relevances,
top_k)
# Save metrics
eval_rows[top_k]['retrieved_docs'].append(len(ret_docs))
eval_rows[top_k]['relevant_docs'].append(len(rel_docs))
eval_rows[top_k]['relevant_retrieved_docs'].append(rel_ret_docs_count)
eval_rows[top_k]['precision'].append(precision)
eval_rows[top_k]['recall'].append(recall)
eval_rows[top_k]['avg_p'].append(avg_p)
eval_rows[top_k]['reciprocal_rank'].append(reciprocal_rank)
eval_rows[top_k]['ndcg'].append(ndcg_value)
except (IndexError, ValueError, KeyError) as e:
seed_ids_without_recommendations.append(seed_id)
logger.warning(f'Cannot retrieve recommendations for #{seed_id}: {e}')
logger.info(
f'Completed with {len(eval_rows[top_ks[0]][metrics[0]]):,} rows (missed {len(seed_ids_without_recommendations):,})')
# Summarize evaluation
for top_k in top_ks:
try:
row = [
name,
aspect,
fold,
top_k
]
for metric in metrics:
# mean over all metrics
values = eval_rows[top_k][metric]
if len(values) > 0:
row.append(np.mean(values))
else:
row.append(None)
df.loc[len(df)] = row
except ValueError as e:
logger.error(f'Cannot summarize row: {top_k} {fold} {metrics} {e}')
return df, seed_id2ret_docs
# generic embeddings
generic_models = {aspect: {fold: {} for fold in folds} for aspect in aspects}
generic_seed_id2ret_docs = {aspect: {fold: {} for fold in folds} for aspect in aspects}
for fn in os.listdir(output_path):
if fn.endswith('.w2v.txt') and (fn != 'fasttext.w2v.txt' or '_cls' in fn): # exclude word vectors, CLS pooling
input_path = os.path.join(output_path, fn)
name = fn.replace('.w2v.txt', '')
# Load documents
doc_model = KeyedVectors.load_word2vec_format(input_path)
logger.info(f'Document vectors: {doc_model.vectors.shape}')
# Normalize vectors
doc_model.init_sims(replace=True)
# For folds and aspects
for aspect in aspects:
for fold in folds:
# Compute results
df, seed_id2ret_docs = get_evaluation_df(name, doc_model, hf_dataset, aspect, fold)
generic_models[aspect][fold][name] = doc_model
generic_seed_id2ret_docs[aspect][fold][name] = seed_id2ret_docs
logger.info(f'Writing {len(df)} rows to {eval_path}')
if os.path.exists(eval_path):
# Append new rows to evaluation file
df.to_csv(eval_path, mode='a', header=False, index=False)
else:
# Write new files
df.to_csv(eval_path, header=True, index=False)
# save to disk
json.dump(generic_seed_id2ret_docs, open(os.path.join(output_path, 'generic_seed_id2ret_docs.json'), 'w'))
# special embeddings
special_models = {aspect: {fold: {} for fold in folds} for aspect in aspects}
special_seed_id2ret_docs = {aspect: {fold: {} for fold in folds} for aspect in aspects}
for aspect in aspects:
for fold in folds:
aspect_fold_dir = os.path.join(output_path, aspect, str(fold))
for name in os.listdir(aspect_fold_dir):
input_path = os.path.join(aspect_fold_dir, name, 'pwc_id2vec.w2v.txt')
if not os.path.exists(input_path):
continue
if name in special_models[aspect][fold] or name in special_seed_id2ret_docs[aspect][fold]:
# results exist already
continue
# Load documents
doc_model = KeyedVectors.load_word2vec_format(input_path)
logger.info(f'Document vectors: {doc_model.vectors.shape}')
# Normalize vectors
doc_model.init_sims(replace=True)
# Compute results
df, seed_id2ret_docs = get_evaluation_df(name, doc_model, hf_dataset, aspect, fold)
special_models[aspect][fold][name] = doc_model
special_seed_id2ret_docs[aspect][fold][name] = seed_id2ret_docs
logger.info(f'Writing {len(df)} rows to {eval_path}')
if os.path.exists(eval_path):
# Append new rows to evaluation file
df.to_csv(eval_path, mode='a', header=False, index=False)
else:
# Write new files
df.to_csv(eval_path, header=True, index=False)
# save retrieved docs to disk
json.dump(special_seed_id2ret_docs, open(os.path.join(output_path, 'special_seed_id2ret_docs.json'), 'w'))
logger.info('done')
if __name__ == '__main__':
fire.Fire()
sys.exit(0)
|
import torch.nn.functional as F
import scipy.sparse as ssp
import numpy as np
import torch
from models import AGD
from deeprobust.graph.data import Dataset, PrePtbDataset
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=15, help='Random seed.')
parser.add_argument('--fastmode', type=bool, default=True)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--cuda_id', type=int, default=0)
parser.add_argument('--gcn_model', type=str, default='GCN')
parser.add_argument('--dataset', type=str, default='pubmed')
parser.add_argument('--lr', type=float, default=0.02)
parser.add_argument('--weight_decay', type=float, default=5e-4)
parser.add_argument('--ptb_rate', type=float, default=0.2)
parser.add_argument('--attack_rate', type=float, default=0.2)
parser.add_argument('--denoise_rate', type=float, default=0.01)
parser.add_argument('--lmda', type=float, default=0.1)
args = parser.parse_args()
args.device = device = torch.device(
f'cuda:{args.cuda_id:d}' if torch.cuda.is_available() else 'cpu')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
data = Dataset(root='./datasets/',
name=args.dataset, seed=15, setting='nettack')
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
if args.ptb_rate != 0:
adj = ssp.load_npz(
f'./datasets/{args.dataset}_meta_adj_{args.ptb_rate:g}.npz')
features = torch.Tensor(features.todense()).to(args.device)
adj = adj.tocoo()
edge_index = torch.LongTensor([adj.row, adj.col]).to(args.device)
edge_values = torch.Tensor(adj.data).to(args.device)
labels = torch.LongTensor(labels).to(args.device)
idx_train = torch.LongTensor(idx_train).to(args.device)
idx_val = torch.LongTensor(idx_val).to(args.device)
idx_test = torch.LongTensor(idx_test).to(args.device)
args.num_nodes = features.shape[0]
args.num_features = features.shape[1]
args.num_classes = int(max(labels) + 1)
args.num_hiddens = 16
args.num_heads = 8
model: AGD = AGD(args).to(device)
best_gval, best_gtest = 0, 0
best_eval, best_etest = 0, 0
for args.epoch in range(1, args.epochs + 1):
gval, gtest, eval, etest = model.train_all(
features, edge_index, edge_values, labels, idx_train, idx_val, idx_test)
if gval > best_gval:
best_gval = gval
best_gtest = gtest
if eval > best_eval:
best_eval = eval
best_etest = etest
print(f"This is the result of {args.dataset}")
print(f"{args.gcn_model} test accuracy: {best_gtest:.4f}")
print(f"Encoder test accuracy: {best_etest:.4f}") |
import ast
import tingle
class ExtraSyntax(ast.NodeTransformer):
def visit_FunctionDef(self, node): return node
visit_AsyncFunctionDef = visit_FunctionDef
def visit_Return(self, node):
replace = ast.parse(
'''__import__('IPython').display.display()''').body[0]
replace.value.args = node.value.elts if isinstance(
node.value, ast.Tuple) else [node.value]
return ast.copy_location(replace, node)
def visit_Expr(self, node):
if isinstance(node.value, (ast.Yield, ast.YieldFrom)):
return ast.copy_location(self.visit_Return(node.value), node)
return node
visit_Expression = visit_Expr
def load_ipython_extension(shell):
tingle.util.append_ast_transformers(shell, ExtraSyntax)
def unload_ipython_extension(shell):
tingle.util.remove_ast_transformers(shell, ExtraSyntax)
|
from selenium.webdriver import ChromeOptions,Chrome,PhantomJS
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import time
class SeleniumDriver:
def __init__(self):
self.options = ChromeOptions()
self.options.add_argument(
'user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36')
# self.options.add_argument('--headless') # 去掉可视窗
self.options.add_argument('--no-sandbox')
self.options.add_argument('--disable-gpu')
self.options.add_argument('--log-level=3')
# self.options.add_experimental_option('excludeSwitches', ['enable-automation'])
# self.options.add_experimental_option('debuggerAddress', '127.0.0.1:9222')
prefs = {"profile.managed_default_content_settings.images": 2}
self.options.add_experimental_option("prefs", prefs) # 图片不加载
# selenium2.4 ~ 3.141
# Chrome 浏览器
def Chrome(self):
# PROXY_IP = get_proxy()
# PROXY_IP = proxyclient.get_ip()
# self.options.add_argument('--proxy-server=http://{}'.format(PROXY_IP))
driver = Chrome(executable_path="./chromedriver.exe", chrome_options=self.options)
return driver
# selenium3.141
def FireFox(self):
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
options = Options()
options.add_argument('-headless')
options.add_argument('--disable-gpu') # 禁用GPU加速
options.set_preference('permissions.default.image', 2) # 禁止加载图片
options.add_argument('--window-size=1280,800') # 设置窗口大小
browser = webdriver.Firefox(executable_path='./geckodriver.exe',
firefox_options=options)
return browser
# PhantomJS 浏览器
@classmethod
def PhantomJS(cls):
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36")
# proxy = get_proxy()
SERVICE_ARGS = [
'--disk-cache=true', # 图片不加载
'--load-images=false',# 图片不加载
# '--proxy={}'.format(proxy), # 设置的代理ip
# '--proxy-type=http', # 代理类型
'--ignore-ssl-errors=true',
]
driver = PhantomJS(executable_path='./geckodriver.exe', desired_capabilities=dcap,
service_args=SERVICE_ARGS, service_log_path='./log/ghostdriver.log')
return driver
@classmethod
def wait_load(cls, driver, text, timeout=30):
now_time = time.time()
while text not in driver.page_source:
if time.time() > now_time + timeout:
print("等待超时")
return
time.sleep(1)
print("获取到: {}".format(text))
return |
"""
abc-classroom.roster
====================
"""
import csv
from pathlib import Path
from . import config as cf
def column_to_split_exists(input_file_path, column_to_split):
"""Given a path to the input file and a column name to
split into first_name, last_name, this method checks that the
column_to_split exists. Returns True is present and False if
not present.
"""
column_exists = False
with open(input_file_path, newline="") as csv_input:
reader = csv.DictReader(csv_input)
columns = reader.fieldnames
if column_to_split in columns:
column_exists = True
return column_exists
def create_roster(
input_file, output_file="nbgrader_roster.csv", column_to_split="name"
):
"""Given a roster file downloaded from GitHub Classroom, creates
a roster file suitable for use with abc-classroom and nbgrader.
Parameters
----------
input_file : string
Path to the GitHub Classroom roster
output_file: string
Name of the output file. Default is abc_roster.csv
column_to_split : string
The column that we want to split to create the new columns
first_name and last_name. Default is "name". If the column
provided does not exist, does not create first and last name
columns.
"""
# create path to input file
classroom_roster_path = Path(input_file)
# get the materials_dir from the config and set the path of the
# output file
config = cf.get_config()
materials_dir = cf.get_config_option(config, "course_materials", False)
# if the course materials dir does not exist, return
if not Path(materials_dir).is_dir():
print(
"Course materials directory '{}' as specified in config "
"file does not exist. Please create "
"it and then re-run abc-roster".format(materials_dir)
)
return
output_file_path = Path(materials_dir, output_file)
# if the output file exists, return
if output_file_path.exists():
print(
"Output file '{}' already exists. Please delete, rename, "
"or move this file (or specify a different output file "
"with the -o or --output flag) "
"before re-running abc-roster.".format(output_file_path)
)
return
# check whether we are going to split an input columm into
# first_name, last_name
split_column = True
if not column_to_split_exists(classroom_roster_path, column_to_split):
print(
"Warning: The column '{}' does not exist in {}. Will not "
"create first_name, last_name columns in output "
"file".format(column_to_split, classroom_roster_path)
)
split_column = False
try:
with open(classroom_roster_path, newline="") as csv_input, open(
output_file_path, "w", newline=""
) as csv_output:
reader = csv.DictReader(csv_input)
columns = reader.fieldnames
columns.append("id")
if split_column:
columns.append("first_name")
columns.append("last_name")
writer = csv.DictWriter(csv_output, fieldnames=columns)
writer.writeheader()
for row in reader:
newrow = row
ghname = row["github_username"]
if ghname == "":
print("Warning: Skipping row; no GitHub username found:")
print(" ", list(row.values()))
continue
row["id"] = ghname
if split_column:
name = row[column_to_split]
# split into two parts based on final space in field
# assume first part is first name and second part is
# last name
twonames = name.rsplit(" ", 1)
try:
newrow["first_name"] = twonames[0]
except IndexError:
newrow["first_name"] = ""
try:
newrow["last_name"] = twonames[1]
except IndexError:
newrow["last_name"] = ""
writer.writerow(newrow)
print("New roster file at {}".format(output_file_path))
except FileNotFoundError as err:
# prints the error [Errno 2] No such file or directory:
# 'classroom_roster_path'
print(err)
except KeyError as ke:
# prints error Error: Input file does not contain column
# Happens when no github_username column
print(
"Error: Input file does not contain required column {}".format(ke)
)
|
import streamlit as st
import numpy as np
import pandas as pd
from prophet import Prophet
from prophet.diagnostics import performance_metrics
from prophet.diagnostics import cross_validation
from prophet.plot import plot_cross_validation_metric
import base64
st.title('📈 Automated Time Series Forecasting')
"""
Nama Kelompok 1 :
1. Alif Hafian Fathurrahman
2. Vincent Wongso
3. Nina Indah Gusmiarti
"""
"""
### Step 1: Import Data
"""
df = st.file_uploader('Import the time series csv file here. Columns must be labeled ds and y. The input to Prophet is always a dataframe with two columns: ds and y. The ds (datestamp) column should be of a format expected by Pandas, ideally YYYY-MM-DD for a date or YYYY-MM-DD HH:MM:SS for a timestamp. The y column must be numeric, and represents the measurement we wish to forecast.', type='csv', encoding='auto')
if df is not None:
data = pd.read_csv(df)
data['ds'] = pd.to_datetime(data['ds'],errors='coerce')
st.write(data)
max_date = data['ds'].max()
#st.write(max_date)
"""
### Step 2: Select Forecast Horizon
Keep in mind that forecasts become less accurate with larger forecast horizons.
"""
periods_input = st.number_input('How many periods would you like to forecast into the future?',
min_value = 1, max_value = 365)
if df is not None:
m = Prophet()
m.fit(data)
"""
### Step 3: Visualize Forecast Data
The below visual shows future predicted values. "yhat" is the predicted value, and the upper and lower limits are (by default) 80% confidence intervals.
"""
if df is not None:
future = m.make_future_dataframe(periods=periods_input)
forecast = m.predict(future)
fcst = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
fcst_filtered = fcst[fcst['ds'] > max_date]
st.write(fcst_filtered)
"""
The next visual shows the actual (black dots) and predicted (blue line) values over time.
"""
fig1 = m.plot(forecast)
st.write(fig1)
"""
The next few visuals show a high level trend of predicted values, day of week trends, and yearly trends (if dataset covers multiple years). The blue shaded area represents upper and lower confidence intervals.
"""
fig2 = m.plot_components(forecast)
st.write(fig2)
"""
### Step 4: Download the Forecast Data
The below link allows you to download the newly created forecast to your computer for further analysis and use.
"""
if df is not None:
csv_exp = fcst_filtered.to_csv(index=False)
# When no file name is given, pandas returns the CSV as a string, nice.
b64 = base64.b64encode(csv_exp.encode()).decode() # some strings <-> bytes conversions necessary here
href = f'<a href="data:file/csv;base64,{b64}">Download CSV File</a> (right-click and save as ** <forecast_name>.csv**)'
st.markdown(href, unsafe_allow_html=True)
|
import pickle, gnosis.xml.pickle
import funcs
funcs.set_parser()
gnosis.xml.pickle.setParanoia(0)
print "DESIRED BEHAVIOR:"
print " X.__init__() should be called when instances are created,"
print " and also when either a plain or xml pickle is restored."
class X:
__safe_for_unpickling__ = 1
def __init__(self):
print "In __init__()"
def __getinitargs__(self):
return ()
print "\nCREATE X INSTANCE:"
x = X()
print "\nPICKLE DUMP:"
p = pickle.dumps(x)
print `p`
print "\nPICKLE LOAD:"
x2 = pickle.loads(p)
print "\nGNOSIS.XML.PICKLE DUMP:"
px = gnosis.xml.pickle.dumps(x)
print px,
print "\nGNOSIS.XML.PICKLE LOAD:"
x3 = gnosis.xml.pickle.loads(px)
|
from .login import watch_login
from .login import watch_logout
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-"
# vim: set expandtab tabstop=4 shiftwidth=4:
"""
$Id$
This file is part of the xsser project, http://xsser.03c8.net
Copyright (c) 2011/2016 psy <epsylon@riseup.net>
xsser is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation version 3 of the License.
xsser is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with xsser; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import urllib
import urllib2
import urlparse
import pycurl
import time
import traceback
import curlcontrol
import threadpool
from Queue import Queue
from collections import defaultdict
from BeautifulSoup import BeautifulSoup
class EmergencyLanding(Exception):
pass
class Crawler(object):
"""
Crawler class.
Crawls a webpage looking for url arguments.
Dont call from several threads! You should create a new one
for every thread.
"""
def __init__(self, parent, curlwrapper=None, crawled=None, pool=None):
# verbose: 0-no printing, 1-prints dots, 2-prints full output
self.verbose = 1
self._parent = parent
self._to_crawl = []
self._parse_external = True
self._requests = []
self._ownpool = False
self._reporter = None
self._armed = True
self._poolsize = 10
self._found_args = defaultdict(list)
self.pool = pool
if crawled:
self._crawled = crawled
else:
self._crawled = []
if curlwrapper:
self.curl = curlwrapper
else:
self.curl = curlcontrol.Curl
def report(self, msg):
if self._reporter:
self._reporter.report(msg)
else:
print msg
def set_reporter(self, reporter):
self._reporter = reporter
def _find_args(self, url):
"""
find parameters in given url.
"""
parsed = urllib2.urlparse.urlparse(url)
qs = urlparse.parse_qs(parsed.query)
if parsed.scheme:
path = parsed.scheme + "://" + parsed.netloc + parsed.path
else:
path = parsed.netloc + parsed.path
for arg_name in qs:
key = (arg_name, parsed.netloc)
zipped = zip(*self._found_args[key])
if not zipped or not path in zipped[0]:
self._found_args[key].append([path, url])
self.generate_result(arg_name, path, url)
ncurrent = sum(map(lambda s: len(s), self._found_args.values()))
if ncurrent >= self._max:
self._armed = False
def cancel(self):
self._armed = False
def crawl(self, path, depth=3, width=0, local_only=True):
"""
setup and perform a crawl on the given url.
"""
if not self._armed:
return []
parsed = urllib2.urlparse.urlparse(path)
basepath = parsed.scheme + "://" + parsed.netloc
self._parse_external = not local_only
if not self.pool:
self.pool = threadpool.ThreadPool(self._poolsize)
if self.verbose == 2:
self.report("crawling: " + path)
if width == 0:
self._max = 1000000000
else:
self._max = int(width)
self._path = path
self._depth = depth
attack_urls = []
if not self._parent._landing and self._armed:
self._crawl(basepath, path, depth, width)
if self._ownpool:
self.pool.dismissWorkers(len(self.pool.workers))
self.pool.joinAllDismissedWorkers()
return attack_urls
def shutdown(self):
if self._ownpool:
self.pool.dismissWorkers(len(self.pool.workers))
self.pool.joinAllDismissedWorkers()
def generate_result(self, arg_name, path, url):
parsed = urllib2.urlparse.urlparse(url)
qs = urlparse.parse_qs(parsed.query)
qs_joint = {}
for key, val in qs.iteritems():
qs_joint[key] = val[0]
attack_qs = dict(qs_joint)
attack_qs[arg_name] = "VECTOR"
attack_url = path + '?' + urllib.urlencode(attack_qs)
if not attack_url in self._parent.crawled_urls:
self._parent.crawled_urls.append(attack_url)
def _crawl(self, basepath, path, depth=3, width=0):
"""
perform a crawl on the given url.
this function downloads and looks for links.
"""
self._crawled.append(path)
if not path.startswith("http"):
return
def _cb(request, result):
self._get_done(depth, width, request, result)
self._requests.append(path)
self.pool.addRequest(self._curl_main, [[path, depth, width, basepath]],
self._get_done_dummy, self._get_error)
def _curl_main(self, pars):
path, depth, width, basepath = pars
if not self._armed or len(self._parent.crawled_urls) >= self._max:
raise EmergencyLanding
c = self.curl()
c.set_timeout(5)
try:
res = c.get(path)
except Exception as error:
c.close()
del c
raise error
c_info = c.info().get('content-type', None)
c.close()
del c
self._get_done(basepath, depth, width, path, res, c_info)
def _get_error(self, request, error):
try:
path, depth, width, basepath = request.args[0]
e_type, e_value, e_tb = error
if e_type == pycurl.error:
errno, message = e_value.args
if errno == 28:
print("requests pyerror -1")
self.enqueue_jobs()
self._requests.remove(path)
return # timeout
else:
self.report('crawler curl error: '+message+' ('+str(errno)+')')
elif e_type == EmergencyLanding:
pass
else:
traceback.print_tb(e_tb)
self.report('crawler error: '+str(e_value)+' '+path)
if not e_type == EmergencyLanding:
for reporter in self._parent._reporters:
reporter.mosquito_crashed(path, str(e_value))
self.enqueue_jobs()
self._requests.remove(path)
except:
return
def _emergency_parse(self, html_data, start=0):
links = set()
pos = 0
if not html_data:
return
data_len = len(html_data)
while pos < data_len:
if len(links)+start > self._max:
break
pos = html_data.find("href=", pos)
if not pos == -1:
sep = html_data[pos+5]
if sep == "h":
pos -= 1
sep=">"
href = html_data[pos+6:html_data.find(sep, pos+7)].split("#")[0]
pos = pos+1
links.add(href)
else:
break
return map(lambda s: {'href': s}, links)
def _get_done_dummy(self, request, result):
path = request.args[0][0]
self.enqueue_jobs()
self._requests.remove(path)
def enqueue_jobs(self):
if len(self.pool.workRequests) < int(self._max/2):
while self._to_crawl:
next_job = self._to_crawl.pop()
self._crawl(*next_job)
def _get_done(self, basepath, depth, width, path, html_data, content_type): # request, result):
if not self._armed or len(self._parent.crawled_urls) >= self._max:
raise EmergencyLanding
try:
encoding = content_type.split(";")[1].split("=")[1].strip()
except:
encoding = None
try:
soup = BeautifulSoup(html_data, from_encoding=encoding)
links = None
except:
soup = None
links = self._emergency_parse(html_data)
for reporter in self._parent._reporters:
reporter.start_crawl(path)
if not links and soup:
links = soup.find_all('a')
forms = soup.find_all('form')
for form in forms:
pars = {}
if form.has_key("action"):
action_path = urlparse.urljoin(path, form["action"])
else:
action_path = path
for input_par in form.find_all('input'):
if not input_par.has_key("name"):
continue
value = "foo"
if input_par.has_key("value") and input_par["value"]:
value = input_par["value"]
pars[input_par["name"]] = value
for input_par in form.findAll('select'):
pars[input_par["name"]] = "1"
if pars:
links.append({"url":action_path + '?' + urllib.urlencode(pars)})
else:
self.report("form with no pars")
links.append({"url":action_path})
links += self._emergency_parse(html_data, len(links))
if self.verbose == 2:
self.report(" "*(self._depth-depth) + path +" "+ str(len(links)))
elif self.verbose:
sys.stdout.write(".")
sys.stdout.flush()
if not links:
return
if len(links) > self._max:
links = links[:self._max]
for a in links:
try:
href = str(a['href'].encode('utf-8'))
except KeyError:
# this link has no href
continue
except:
# can't decode or something darker..
continue
if href.startswith("javascript") or href.startswith('mailto:'):
continue
href = urlparse.urljoin(path, href)
if not href.startswith("http") or not "." in href:
continue
href = href.split('#',1)[0]
scheme_rpos = href.rfind('http://')
if not scheme_rpos in [0, -1]:
# looks like some kind of redirect so we try both too ;)
href1 = href[scheme_rpos:]
href2 = href[:scheme_rpos]
self._check_url(basepath, path, href1, depth, width)
self._check_url(basepath, path, href2, depth, width)
self._check_url(basepath, path, href, depth, width)
return self._found_args
def _check_url(self, basepath, path, href, depth, width):
"""
process the given url for a crawl
check to see if we have to continue crawling on the given url.
"""
do_crawling = self._parse_external or href.startswith(basepath)
if do_crawling and not href in self._crawled:
self._find_args(href)
for reporter in self._parent._reporters:
reporter.add_link(path, href)
self.report("\n[Info] Spidering: " + str(href))
if self._armed and depth>0:
if len(self._to_crawl) < self._max:
self._to_crawl.append([basepath, href, depth-1, width])
|
# Author: Mohit Sakhuja
def karatsuba(x, y):
"""Performs Karatsuba multiplication on two numbers."""
# Calculate the length of the two integers and take the minimum of the two.
n = min(len(str(x)), len(str(y)))
# Perform simple multiplication if any of the two numbers is less than 10.
if n == 1:
return x * y
# Calculate half length and the power of 10 raised to that half length.
half = n // 2
power_of_10 = 10 ** half
# Divide and conquer.
# Here, x = (a * power_of_10) + b
# Here, y = (c * power_of_10) + d
a = x // power_of_10
b = x % power_of_10
c = y // power_of_10
d = y % power_of_10
# 3 recursive calls to calculate (a * c), (b * d) and ((a + b) * (c + d)).
ac = karatsuba(a, c)
bd = karatsuba(b, d)
a_plus_b_into_c_plus_d = karatsuba(a + b, c + d)
# Multiply the results with the appropriate powers of 10,
# and add them.
return (ac * pow(power_of_10, 2)) + bd + \
((a_plus_b_into_c_plus_d - ac - bd) * power_of_10)
def main():
"""The main function."""
# Given numbers (saved for testing's sake):
# x = 3141592653589793238462643383279502884197169399375105820974944592
# y = 2718281828459045235360287471352662497757247093699959574966967627
x = input("Enter first number: ")
y = input("Enter second number: ")
result = karatsuba(int(x), int(y))
print(result)
main()
|
from pyparsing import *
class FirewallFilter(object):
def __init__(self):
startDelim = Literal('{').suppress()
endDelim = Literal('}').suppress()
word = Word(alphanums + ',:-=[]"\'_\\')
ip_octet = Word(nums, min=1, max=3)
ip = Combine(ip_octet + '.' + ip_octet + '.' + ip_octet + '.' + ip_octet)
cidr = Combine(ip + Optional('/' + Word(nums, min=1, max=2) + Literal(';').suppress()))
expr = Group(ZeroOrMore(word) + ';').suppress()
label = word.suppress()
source_addr_blk = Group( Keyword('source-address') + startDelim + ZeroOrMore(cidr) + endDelim)
destination_addr_blk = Group( Keyword('destination-address').suppress() + startDelim + ZeroOrMore(cidr) + endDelim)
from_then_blk = Group( (Keyword('from') | Keyword('then')) + startDelim + ZeroOrMore(expr) + ZeroOrMore(source_addr_blk | destination_addr_blk) + ZeroOrMore(expr) + endDelim)
term_keyword = Keyword('term').suppress() + word
term = Group(term_keyword) + startDelim + ZeroOrMore(from_then_blk) + endDelim
term_original = originalTextFor(term)
filters = Keyword('filter').suppress() + word.suppress() + startDelim + ZeroOrMore(term) + endDelim
doc = Keyword('firewall').suppress() + startDelim + label + ZeroOrMore(filters) + endDelim
doc.ignore(cStyleComment)
self.doc = doc
self.term = term
self.term_keyword = term_keyword
self.term_original = term_original
self.destination_addr_blk = destination_addr_blk
def firewall_filter_full_grammar(self):
return self.doc
def firewall_filter_term(self):
return self.term
def firewall_filter_term_key(self):
return self.term_keyword
def firewall_filter_term_original(self):
return self.term_original
def firewall_filter_dest_addr_block(self):
return self.destination_addr_blk
|
import hashlib
import responses
from georef_ar_etl.extractors import DownloadURLStep
from georef_ar_etl.exceptions import ProcessException
from . import ETLTestCase
# pylint: disable=no-member
class TestDownloadURLStep(ETLTestCase):
_uses_db = False
@responses.activate
def test_download(self):
"""El paso debería descargar un archivo remoto, y devolver la ruta
local del mismo."""
filename = 'file.txt'
url = 'https://example.com/file.txt'
body = 'foobar'
responses.add(responses.GET, url, status=200, body=body, stream=True)
step = DownloadURLStep(filename, url)
path = step.run(None, self._ctx)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(path, filename)
with self._ctx.fs.open(path) as f:
contents = f.read()
self.assertEqual(body, contents)
@responses.activate
def test_download_error(self):
"""El paso debería lanzar una excepción apropiada si falla la petición
HTTP."""
url = 'https://example.com/file.txt'
responses.add(responses.GET, url, status=404)
step = DownloadURLStep('foobar', url)
with self.assertRaises(ProcessException):
step.run(None, self._ctx)
@responses.activate
def test_download_hash(self):
"""El paso debería calcular el hash MD5 del archivo de descarga."""
filename = 'file.txt'
url = 'https://example.com/file.txt'
body = 'testing de hash md5'
responses.add(responses.GET, url, status=200, body=body, stream=True)
DownloadURLStep(filename, url).run(None, self._ctx)
md5 = hashlib.md5()
md5.update(body.encode())
report_data = self._ctx.report.get_data('download_url')
self.assertEqual(report_data[url], md5.hexdigest())
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ContainerImageLayer(object):
"""
The container image layer metadata.
"""
def __init__(self, **kwargs):
"""
Initializes a new ContainerImageLayer object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param digest:
The value to assign to the digest property of this ContainerImageLayer.
:type digest: str
:param size_in_bytes:
The value to assign to the size_in_bytes property of this ContainerImageLayer.
:type size_in_bytes: int
:param time_created:
The value to assign to the time_created property of this ContainerImageLayer.
:type time_created: datetime
"""
self.swagger_types = {
'digest': 'str',
'size_in_bytes': 'int',
'time_created': 'datetime'
}
self.attribute_map = {
'digest': 'digest',
'size_in_bytes': 'sizeInBytes',
'time_created': 'timeCreated'
}
self._digest = None
self._size_in_bytes = None
self._time_created = None
@property
def digest(self):
"""
**[Required]** Gets the digest of this ContainerImageLayer.
The sha256 digest of the image layer.
:return: The digest of this ContainerImageLayer.
:rtype: str
"""
return self._digest
@digest.setter
def digest(self, digest):
"""
Sets the digest of this ContainerImageLayer.
The sha256 digest of the image layer.
:param digest: The digest of this ContainerImageLayer.
:type: str
"""
self._digest = digest
@property
def size_in_bytes(self):
"""
**[Required]** Gets the size_in_bytes of this ContainerImageLayer.
The size of the layer in bytes.
:return: The size_in_bytes of this ContainerImageLayer.
:rtype: int
"""
return self._size_in_bytes
@size_in_bytes.setter
def size_in_bytes(self, size_in_bytes):
"""
Sets the size_in_bytes of this ContainerImageLayer.
The size of the layer in bytes.
:param size_in_bytes: The size_in_bytes of this ContainerImageLayer.
:type: int
"""
self._size_in_bytes = size_in_bytes
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this ContainerImageLayer.
An RFC 3339 timestamp indicating when the layer was created.
:return: The time_created of this ContainerImageLayer.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this ContainerImageLayer.
An RFC 3339 timestamp indicating when the layer was created.
:param time_created: The time_created of this ContainerImageLayer.
:type: datetime
"""
self._time_created = time_created
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
try:
from pathlib import Path
Path().expanduser()
except (ImportError,AttributeError): # Python < 3.5
from pathlib2 import Path
#
from . import hwm14
import logging
from numpy import append, arange, ceil, floor, meshgrid, ones,reshape
try:
from matplotlib.pyplot import figure,show,subplots,cm
from matplotlib.colors import Normalize
import seaborn
except (ImportError, RuntimeError):
figure=None
#
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
Basemap=None
from os import environ
# Defines a shell variable 'HWMPATH' which indicates the location of
# 'hwm123114.bin', 'dwm07b104i.dat', and 'gd2qd.dat'
#
environ['HWMPATH'] = str(Path(__file__).parent/ 'data')
class HWM14:
def __init__( self, alt=300., altlim=[0., 400.], altstp=25., ap=[-1, 35],
f107=-1., f107a=-1., day=323, glat=-11.95, glatlim=[-10.,10.],
glatstp=2., glon=-76.77, glonlim=[-20., 20.], glonstp=2., option=1,
stl=-1, ut=12., utlim=[0., 23.], utstp=1., verbose=True, year=1993 ):
""" Constructor for the Horizontal Wind Model 14
Input arguments:
alt - altitude in km
altlim - altitude range in km
altstp - altitude resolution in km
ap - 2-element list with
ap[0] -> not used
ap[1] -> 3hr ap index
day - day of year (DOY)
glat - geog. latitude
glon - geog. longitude
f107a - not used
f107 - not used
option - profile selection:
1 (height)
2 (latitude)
3 (local time)
4 (geog. longitude)
stl - solar local time in hr (not used)
ut - universal time in hr
verbose - print message to user
year - year (YYYY)
Output:
The zonal and meridional winds are stored in "self" as follows
Zonal quiet -> self.QUwind
Zonal disturbance -> self.DUwind
Zonal total -> self.Uwind
Meridional quiet -> self.QVwind
Meridional disturbance -> self.DUwind
Meridional total -> self.Vwind
"""
self.option = option
self.year, self.doy = year, day
if option == 1: # Height profile
self.glat = glat
self.glon = glon
self.stl = stl
self.altlim = altlim
self.altstp = altstp
elif option == 2: # Latitude profile
self.alt = alt
self.glon = glon
self.stl = stl
self.glatlim = glatlim
self.glatstp = glatstp
elif option == 3: # GMT profile
self.alt = alt
self.glat = glat
self.glon = glon
self.utlim = utlim
self.utstp = utstp
elif option == 4: # Longitude profile
self.ut = ut
self.alt = alt
self.glat = glat
self.glonlim = glonlim
self.glonstp = glonstp
self.stl = stl
else:
logging.error('Invalid option!')
return
self.iyd = int((year - (2000 if year > 1999 else 1900)) * 1000) + day
if option != 3:
self.sec = ut * 3600.
self.stl = stl
self.ut = ut
self.ap = ap
self.apqt = -ones(2) # Required for quiet time component
self.f107 = f107
self.f107a = f107a
self.verbose = verbose
self.QUwind = []
self.DUwind = []
self.Uwind = []
self.QVwind = []
self.DVwind = []
self.Vwind = []
if not 'alt' in self.__dict__.keys(): self.HeiProfile()
elif not 'glat' in self.__dict__.keys(): self.LatProfile()
elif not 'ut' in self.__dict__.keys(): self.GMTProfile()
elif not 'glon' in self.__dict__.keys(): self.LonProfile()
else:
print()
def HeiProfile( self ):
""" Height Profile """
if self.verbose:
print( 'HEIGHT PROFILE' )
print( ' quiet disturbed total' )
print( ' alt mer zon mer zon mer zon' )
self.altbins = arange( self.altlim[ 0 ], self.altlim[ 1 ] + self.altstp, self.altstp )
for alt in self.altbins:
wqt = hwm14.hwm14( self.iyd, self.sec, alt, self.glat, self.glon, self.stl, \
self.f107a, self.f107, self.apqt )
wdt = hwm14.dwm07( self.iyd, self.sec, alt, self.glat, self.glon, self.ap )
w = hwm14.hwm14( self.iyd, self.sec, alt, self.glat, self.glon, self.stl, \
self.f107a, self.f107, self.ap )
if self.verbose : print( ' %3i %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f' % \
( alt, wqt[0], wqt[1], wdt[0], wdt[1], w[0], w[1] ) )
self.Uwind.append( w[ 1 ] )
self.Vwind.append( w[ 0 ] )
self.QUwind.append( wqt[1] )
self.DUwind.append( wdt[1] )
self.QVwind.append( wqt[0] )
self.DVwind.append( wdt[0] )
def LatProfile( self ):
""" Latitude Profile """
if self.verbose:
print( 'LATITUDE PROFILE' )
print( ' quiet disturbed total' )
print( ' glat mer zon mer zon mer zon' )
self.glatbins = arange( self.glatlim[0], self.glatlim[1] + self.glatstp, self.glatstp )
for glat in self.glatbins:
wqt = hwm14.hwm14( self.iyd, self.sec, self.alt, glat, self.glon, self.stl, \
self.f107a, self.f107, self.apqt )
wdt = hwm14.dwm07( self.iyd, self.sec, self.alt, glat, self.glon, self.ap )
w = hwm14.hwm14( self.iyd, self.sec, self.alt, glat, self.glon, self.stl, \
self.f107a, self.f107, self.ap )
if self.verbose: print( ' %5.1f %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f' % \
( glat, wqt[0], wqt[1], wdt[0], wdt[1], w[0], w[1] ) )
self.Uwind.append( w[ 1 ] )
self.Vwind.append( w[ 0 ] )
self.QUwind.append( wqt[1] )
self.DUwind.append( wdt[1] )
self.QVwind.append( wqt[0] )
self.DVwind.append( wdt[0] )
def GMTProfile( self ):
""" GMT Profile """
if self.verbose:
print( 'GMT PROFILE' )
print( ' quiet disturbed total' )
print( ' stl mer zon mer zon mer zon' )
self.utbins = arange(self.utlim[0], self.utlim[1] + self.utstp, self.utstp)
self.mltbins = []
for ut in self.utbins:
if True:
self.toMLT(ut)
self.mltbins.append(self.mlt)
sec = ut * 3600
wqt = hwm14.hwm14( self.iyd, sec, self.alt, self.glat, self.glon, -1, \
self.f107a, self.f107, self.apqt )
wdt = hwm14.dwm07( self.iyd, sec, self.alt, self.glat, self.glon, self.ap )
w = hwm14.hwm14( self.iyd, sec, self.alt, self.glat, self.glon, -1, \
self.f107a, self.f107, self.ap )
if self.verbose: print( ' %5.1f %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f' % \
( ut, wqt[0], wqt[1], wdt[0], wdt[1], w[0], w[1] ) )
self.Uwind.append( w[ 1 ] )
self.Vwind.append( w[ 0 ] )
self.QUwind.append( wqt[1] )
self.DUwind.append( wdt[1] )
self.QVwind.append( wqt[0] )
self.DVwind.append( wdt[0] )
def LonProfile( self ):
""" Longitude Profile """
if self.verbose:
print( 'LONGITUDE PROFILE' )
print( ' quiet disturbed total' )
print( ' glon mer zon mer zon mer zon' )
self.glonbins = arange(self.glonlim[0], self.glonlim[1] + self.glonstp, self.glonstp)
for glon in self.glonbins:
wqt = hwm14.hwm14( self.iyd, self.sec, self.alt, self.glat, glon, -1, \
self.f107a, self.f107, self.apqt )
wdt = hwm14.dwm07( self.iyd, self.sec, self.alt, self.glat, glon, self.ap )
w = hwm14.hwm14( self.iyd, self.sec, self.alt, self.glat, glon, -1, \
self.f107a, self.f107, self.ap )
if self.verbose: print( ' %5.1f %8.3f %8.3f %8.3f %8.3f %8.3f %8.3f' % \
( glon, wqt[0], wqt[1], wdt[0], wdt[1], w[0], w[1] ) )
self.Uwind.append( w[ 1 ] )
self.Vwind.append( w[ 0 ] )
self.QUwind.append( wqt[1] )
self.DUwind.append( wdt[1] )
self.QVwind.append( wqt[0] )
self.DVwind.append( wdt[0] )
def toMLT(self, ut):
""" Magnetic Local Time """
hwm14.inithwm()
mlat, mlon, f1e, f1n, f2e, f2n = hwm14.gd2qd(self.glat, self.glon)
self.mlt = hwm14.mltcalc(mlat, mlon, self.doy, ut)
class HWM14Plot:
def __init__( self, profObj=None ):
"""
Constructor of class resposible of graphical reports for the
Horizontal Wind Model 14. It requires the methods (instance)
returned by class "HWM14"
"""
if profObj != None:
self.option = profObj.option
self.year, self.doy = profObj.year, profObj.doy
if self.option != 3: self.ut = profObj.ut
if self.option != 2: self.glat = profObj.glat
if self.option != 4: self.glon = profObj.glon
if self.option != 1: self.alt = profObj.alt
self.ap = profObj.ap
if self.option >= 1 and self.option <= 4:
self.Uwind = profObj.Uwind
self.Vwind = profObj.Vwind
valid = True
if self.option == 1:
self.altbins = profObj.altbins
self.HeiProfPlot()
elif self.option == 2:
self.glatbins = profObj.glatbins
self.LatProfPlot()
elif self.option == 3:
self.utbins = profObj.utbins
self.GMTProfPlot()
elif self.option == 4:
self.glonbins = profObj.glonbins
self.LonProfPlot()
else:
print( 'Invalid option!' )
valid = False
if valid and figure is not None:
show()
else:
print( 'Wrong inputs!' )
def GetHHMMSS(self):
hh = floor(self.ut)
dummy = self.ut - hh
mm = floor(dummy * 60)
dummy = dummy * 60 - mm
self.second = int(floor(dummy * 60))
self.hour, self.minute = int(hh), int(mm)
def GetTitle(self):
dateStr = 'DATE: {:4d}.{:03d}'.format(self.year, self.doy)
try:
self.GetHHMMSS()
timeStr = 'TIME: {:02d}:{:02d} UT'.format(self.hour, self.minute)
except Exception:
pass
apStr = 'ap: {:3d}'.format(self.ap[1])
try:
altStr = 'ALT: {:7.2f} km'.format(self.alt)
except Exception:
pass
try:
latStr = '{:6.2f}$^\circ${:s}'.format(abs(self.glat),
'N' if self.glat > 0 else 'S')
except Exception:
pass
try:
lonStr = '{:6.2f}$^\circ${:s}'.format(abs(self.glon),
'E' if self.glon > 0 else 'W')
except Exception:
pass
try:
locStr = '{:s}, {:s}'.format(latStr, lonStr)
except Exception:
pass
if self.option == 1:
self.title = '{:s} - {:s} - {:s} - {:s}'.format(dateStr, timeStr, apStr, locStr)
elif self.option == 2:
self.title = '{:s} - {:s} - {:s} - {:s} - GEOG. LON.: {:s}'.format(dateStr, timeStr, apStr, altStr, lonStr)
elif self.option == 3:
self.title = '{:s} - {:s} - {:s} - {:s}'.format(dateStr, apStr, altStr, locStr)
elif self.option == 4:
self.title = '{:s} - {:s} - {:s} - {:s} - GEOG. LAT.: {:s}'.format(dateStr, timeStr, apStr, altStr, latStr)
def HeiProfPlot( self ):
if figure is None:
return
self.GetTitle()
ax = figure().gca()
ax.plot( self.Uwind, self.altbins, label='U' )
ax.plot( self.Vwind, self.altbins, label='V' )
ax.set_ylim(self.altbins[[0, -1]])
ax.set_title(self.title)
ax.set_xlabel( r'(m/s)' );
ax.set_ylabel( r'(km)')
ax.legend( loc='best' )
def LatProfPlot( self ):
if figure is None:
return
self.GetTitle()
ax = figure().gca()
ax.plot( self.glatbins, self.Uwind, label='U' )
ax.plot( self.glatbins, self.Vwind, label='V' )
ax.set_xlim(self.glatbins[[0, -1]])
ax.set_title(self.title)
ax.set_xlabel( r'Geog. Lat. ($^\circ$)' );
ax.set_ylabel( r'Wind speed (m/s)')
ax.legend( loc='best' )
def GMTProfPlot( self ):
if figure is None:
return
self.GetTitle()
ax = figure().gca()
ax.plot( self.utbins, self.Uwind, label='U' )
ax.plot( self.utbins, self.Vwind, label='V' )
ax.set_xlim(self.utbins[[0, -1]])
ax.set_title(self.title)
ax.set_xlabel( r'Hour (GMT)' )
ax.set_ylabel( r'Wind speed (m/s)')
ax.legend( loc='best' )
def LonProfPlot( self ):
if figure is None:
return
self.GetTitle()
ax = figure().gca()
ax.plot( self.glonbins, self.Uwind, label='U' )
ax.plot( self.glonbins, self.Vwind, label='V' )
ax.set_xlim(self.glonbins[[0, -1]])
ax.set_title(self.title)
ax.set_xlabel( r'Geog. Lon. ($^\circ$)' );
ax.set_ylabel( r'Wind speed (m/s)')
ax.legend( loc='best' )
class HWM142D:
def __init__( self, alt=300., altlim=[0., 400.], altstp=25., ap=[-1, 35],
day=323, f107=-1, f107a=-1, glat=-11.95, glatlim=[-40., 40.],
glatstp=5., glon=-76.77, glonlim=[-40., 40], glonstp=5., option=1,
stl=-1, utlim=[0., 24.], utstp=1., ut=12., verbose=True, year=1993 ):
"""
"""
self.option = option
self.year, self.doy = year, day
if not option in [3, 5]: self.ut = ut
if option == 1: # Time vs Height
self.glat = glat
self.glon = glon
self.stl = stl
self.utlim = utlim
self.utstp = utstp
self.altlim = altlim
self.altstp = altstp
elif option == 2: # Latitude vs Height
self.alt = alt
self.glon = glon
self.stl = stl
self.altlim = altlim
self.altstp = altstp
self.glatlim = glatlim
self.glatstp = glatstp
elif option == 3: # GMT vs Latitude
self.alt = alt
self.glon = glon
self.glatlim = glatlim
self.glatstp = glatstp
self.utlim = utlim
self.utstp = utstp
elif option == 4: # Longitude vs Height
self.alt = alt
self.glat = glat
self.altlim = altlim
self.altstp = altstp
self.glonlim = glonlim
self.glonstp = glonstp
elif option == 5: # GMT vs Longitude
self.alt = alt
self.glon = glon
self.glonlim = glonlim
self.glonstp = glonstp
self.utlim = utlim
self.utstp = utstp
elif option == 6: # Longitude vs Latitude
self.alt = alt
self.glatlim = glatlim
self.glatstp = glatstp
self.glonlim = glonlim
self.glonstp = glonstp
else:
logging.error('Invalid option!')
return
self.iyd = int((year - (2000 if year > 1999 else 1900)) * 10000) + day
if option != 3: self.sec = ut * 3600.
self.ap = ap
self.apqt = -ones( 2 ) # Required for quiet time component
self.f107 = f107
self.f107a = f107a
self.verbose = verbose
self.Uwind = []
self.Vwind = []
if not 'alt' in self.__dict__.keys(): self.HeiVsLTArray()
elif not 'glat' in self.__dict__.keys() and not 'glon' in self.__dict__.keys(): self.LonVsLatArray()
elif not 'glat' in self.__dict__.keys() and not 'ut' in self.__dict__.keys(): self.LatVsGMTArray()
elif not 'glat' in self.__dict__.keys(): self.LatVsHeiArray()
elif not 'glon' in self.__dict__.keys(): self.LonVsHeiArray()
else: print( '' )
def HeiVsLTArray( self ):
"""
"""
self.utbins = arange( self.utlim[ 0 ], self.utlim[ 1 ] + self.utstp, self.utstp )
for ut in self.utbins:
# Generates model data
hwm14Obj = HWM14( altlim=self.altlim, altstp=self.altstp, ap=self.ap,
glat=self.glat, glon=self.glon, option=self.option, ut=ut, \
verbose=self.verbose )
Uwind = reshape( hwm14Obj.Uwind, ( len( hwm14Obj.Uwind ), 1 ) )
Vwind = reshape( hwm14Obj.Vwind, ( len( hwm14Obj.Vwind ), 1 ) )
self.Uwind = Uwind if ut == self.utlim[ 0 ] else append( self.Uwind, Uwind, axis=1 )
self.Vwind = Vwind if ut == self.utlim[ 0 ] else append( self.Vwind, Vwind, axis=1 )
self.altbins = hwm14Obj.altbins
def LatVsHeiArray(self):
""" """
self.altbins = arange(self.altlim[0], self.altlim[1] + self.altstp, self.altstp)
for _alt in self.altbins:
if True:
hwm14Obj = HWM14( alt=_alt, ap=self.ap, glatlim=self.glatlim,
glatstp=self.glatstp, glon=self.glon, option=self.option,
verbose=self.verbose, ut=self.ut )
else:
pass
Uwind = reshape( hwm14Obj.Uwind, ( len( hwm14Obj.Uwind ), 1 ) )
Vwind = reshape( hwm14Obj.Vwind, ( len( hwm14Obj.Vwind ), 1 ) )
self.Uwind = Uwind if _alt == self.altlim[ 0 ] else append( self.Uwind, Uwind, axis=1 )
self.Vwind = Vwind if _alt == self.altlim[ 0 ] else append( self.Vwind, Vwind, axis=1 )
self.glatbins = hwm14Obj.glatbins
self.Uwind = self.Uwind.T
self.Vwind = self.Vwind.T
def LonVsHeiArray(self):
""" """
self.altbins = arange(self.altlim[0], self.altlim[1] + self.altstp, self.altstp)
for alt in self.altbins:
if True:
hwm14Obj = HWM14(alt=alt, ap=self.ap, glat=self.glat,
glonlim=self.glonlim, glonstp=self.glonstp,
option=self.option, verbose=self.verbose, ut=self.ut )
else:
pass
Uwind = reshape( hwm14Obj.Uwind, ( len( hwm14Obj.Uwind ), 1 ) )
Vwind = reshape( hwm14Obj.Vwind, ( len( hwm14Obj.Vwind ), 1 ) )
self.Uwind = Uwind if alt == self.altlim[ 0 ] else append( self.Uwind, Uwind, axis=1 )
self.Vwind = Vwind if alt == self.altlim[ 0 ] else append( self.Vwind, Vwind, axis=1 )
self.glonbins = hwm14Obj.glonbins
self.Uwind = self.Uwind.T
self.Vwind = self.Vwind.T
def LonVsLatArray(self):
""" """
self.glatbins = arange(self.glatlim[0], self.glatlim[1] + self.glatstp, self.glatstp)
for glat in self.glatbins:
hwm14Obj = HWM14(alt=self.alt, ap=self.ap, glat=glat,
glonlim=self.glonlim, glonstp=self.glonstp,
option=4, verbose=self.verbose, ut=self.ut )
Uwind = reshape( hwm14Obj.Uwind, ( len( hwm14Obj.Uwind ), 1 ) )
Vwind = reshape( hwm14Obj.Vwind, ( len( hwm14Obj.Vwind ), 1 ) )
self.Uwind = Uwind if glat == self.glatlim[ 0 ] else append( self.Uwind, Uwind, axis=1 )
self.Vwind = Vwind if glat == self.glatlim[ 0 ] else append( self.Vwind, Vwind, axis=1 )
self.glonbins = hwm14Obj.glonbins
self.Uwind = self.Uwind.T
self.Vwind = self.Vwind.T
def LatVsGMTArray(self):
pass
#
# End of 'HWM142D'
#####
class HWM142DPlot:
def __init__( self, profObj=None, WF=False, zMax=[None]*2, zMin=[None]*2 ):
"""
Constructor of class resposible of graphical reports for the
Horizontal Wind Model 14. It requires the methods (instance)
returned by class "HWM142D"
"""
if profObj != None:
self.zMin, self.zMax = zMin, zMax
self.WF = WF
self.option = profObj.option
self.year, self.doy = profObj.year, profObj.doy
self.ut = profObj.ut
if self.option != 1: self.alt = profObj.alt
if self.option != 2 and self.option != 6: self.glat = profObj.glat
if self.option != 4 and self.option != 6: self.glon = profObj.glon
self.ap = profObj.ap
if self.option >= 1 and self.option <= 6:
self.Uwind = profObj.Uwind
self.Vwind = profObj.Vwind
valid = True
if self.option == 1:
self.altbins = profObj.altbins
self.altlim = profObj.altlim
self.utbins = profObj.utbins
self.utlim = profObj.utlim
self.HeiVsLTPlot()
elif self.option == 2:
self.glatbins = profObj.glatbins
self.glatlim = profObj.glatlim
self.altbins = profObj.altbins
self.altlim = profObj.altlim
self.LatVsHeiPlot()
# elif self.option == 3:
# self.ltbins = profObj.ltbins
# self.LTProfPlot()
elif self.option == 4:
self.glonbins = profObj.glonbins
self.glonlim = profObj.glonlim
self.altbins = profObj.altbins
self.altlim = profObj.altlim
self.LonVsHeiPlot()
elif self.option == 6:
self.glonbins = profObj.glonbins
self.glonlim = profObj.glonlim
self.glatbins = profObj.glatbins
self.glatlim = profObj.glatlim
self.LonVsLatPlot()
else:
print( 'Invalid option!' )
valid = False
if valid:
show()
else:
print( 'Wrong inputs!' )
#
# End of '__init__'
#####
def GetHHMMSS(self):
hh = floor(self.ut)
dummy = self.ut - hh
mm = floor(dummy * 60)
dummy = dummy * 60 - mm
self.second = int(floor(dummy * 60))
self.hour, self.minute = int(hh), int(mm)
#
# End of 'GetHHMMSS'
#####
def GetTitle(self):
dateStr = 'DATE: {:4d}.{:03d}'.format(self.year, self.doy)
self.GetHHMMSS()
timeStr = 'TIME: {:02d}:{:02d} UT'.format(self.hour, self.minute)
apStr = 'ap: {:3d}'.format(self.ap[1])
try:
altStr = 'ALT: {:7.2f} km'.format(self.alt)
except Exception:
pass
try:
latStr = '{:6.2f}$^\circ${:s}'.format(abs(self.glat),
'N' if self.glat > 0 else 'S')
except Exception:
pass
try:
lonStr = '{:6.2f}$^\circ${:s}'.format(abs(self.glon),
'E' if self.glon > 0 else 'W')
except Exception:
pass
try:
locStr = '{:s}, {:s}'.format(latStr, lonStr)
except Exception:
pass
if self.option == 1:
self.title = '{:s} - {:s} - {:s}'.format(dateStr, apStr, locStr)
elif self.option == 2:
self.title = '{:s} - {:s} - {:s} - GEOG. LON.: {:s}'.format(dateStr, timeStr, apStr, lonStr)
elif self.option == 4:
self.title = '{:s} - {:s} - {:s} - GEOG. LAT.: {:s}'.format(dateStr, timeStr, apStr, latStr)
elif self.option == 6:
self.title = '{:s} - {:s} - {:s} - {:s}'.format(dateStr, timeStr, apStr, altStr)
#
# End of 'GetTitle'
#####
def XVsY2DWindMap(self, ax, xVal, yVal, uVal, vVal, title=None, xlabel=None,
xlim=None, ylabel=None, ylim=None, zlabel=None, zMax=None, zMin=None):
if Basemap is None:
return
m = Basemap(llcrnrlon=self.glonlim[0], llcrnrlat=self.glatlim[0],
urcrnrlon=self.glonlim[-1], urcrnrlat=self.glatlim[-1], resolution='l')
m.drawcoastlines()
# Lines at constant "latitude"
parallelsLim = self._RoundLim([yVal[0], yVal[-1]])
m.drawparallels(arange(parallelsLim[0], parallelsLim[1], 20.), labels=[True,False,False,True])
# Lines at constant "longitude"
meridiansLim = self._RoundLim([xVal[0], xVal[-1]])
m.drawmeridians(arange(meridiansLim[0], meridiansLim[1], 30.), labels=[True,False,False,True])
X, Y = meshgrid(xVal, yVal)
totalWind = (uVal**2 + vVal**2)**.5
ipc = m.quiver(X, Y, uVal.T, vVal.T, totalWind.T,
alpha=.5, angles='uv', cmap=cm.jet, pivot='middle', units='xy')
ipc2 = m.quiver(X, Y, uVal.T, vVal.T,
angles='uv', edgecolor='k', facecolor='None', linewidth=.5, pivot='middle',
units='xy')
ax.set_xlim( xlim )
ax.set_ylim( ylim )
ax.set_title( title )
cbpn = m.colorbar(ipc)
cbpn.set_label(zlabel)
def XVsY2DMap(self, ax, xVal, yVal, zVal, cmap=None, title=None, xlabel=None,
xlim=None, ylabel=None, ylim=None, zlabel=None, zMax=None, zMin=None):
if Basemap is None:
return
m = Basemap(llcrnrlon=self.glonlim[0], llcrnrlat=self.glatlim[0],
urcrnrlon=self.glonlim[-1], urcrnrlat=self.glatlim[-1], resolution='l')
m.drawcoastlines()
# Lines at constant "latitude"
parallelsLim = self._RoundLim([yVal[0], yVal[-1]])
m.drawparallels(arange(parallelsLim[0], parallelsLim[1], 20.), labels=[True,False,False,True])
# Lines at constant "longitude"
meridiansLim = self._RoundLim([xVal[0], xVal[-1]])
m.drawmeridians(arange(meridiansLim[0], meridiansLim[1], 30.), labels=[True,False,False,True])
X, Y = meshgrid(xVal, yVal)
ipc = m.pcolor(X, Y, zVal.T, cmap=cmap, edgecolors='None',
norm=Normalize(), vmax=zMax, vmin=zMin)
# m.contour(X, Y, transpose(self.data2D['dip']), colors='k', linestyles='--')
ax.set_xlim( xlim )
ax.set_ylim( ylim )
ax.set_title( title )
# ax.set_xlabel( xlabel )
# ax.set_ylabel( ylabel )
cbpn = m.colorbar(ipc)
cbpn.set_label(zlabel)
#
# End of 'XVsY2DMap'
#####
def XVsY2DPlot( self, ax, xVal, yVal, zVal, cmap=None, title=None,
xlabel=None, xlim=None, ylabel=None, ylim=None, zlabel=None, zMax=None, zMin=None ):
if figure is None:
return
X, Y = meshgrid( xVal, yVal )
X = X.T
Y = Y.T
C = zVal.T
ipn = ax.pcolor( X, Y, C, cmap=cmap, edgecolors='None', norm=Normalize(),
vmax=zMax, vmin=zMin )
ax.set_xlim( xlim )
ax.set_ylim( ylim )
ax.set_title( title )
ax.set_xlabel( xlabel )
ax.set_ylabel( ylabel )
cbpn = ax.figure.colorbar( ipn,ax=ax )
cbpn.set_label( zlabel )
#
# End of 'XVsY2DPlot'
#####
def HeiVsLTPlot( self ):
self.GetTitle()
cmap = cm.RdBu_r
fg,axs = subplots(1,2, figsize=(15,6) )
self.XVsY2DPlot( axs[0], self.utbins, self.altbins, self.Uwind, cmap=cmap,
title=self.title, xlabel=r'Hour (GMT)', xlim=self.utlim, ylabel=r'Altitude (km)',
ylim=self.altlim, zlabel=r'Zonal (U), m/s', zMax=self.zMax[0], zMin=self.zMin[0] )
self.XVsY2DPlot( axs[1], self.utbins, self.altbins, self.Vwind, cmap=cmap,
title=self.title, xlabel=r'Hour (GMT)', xlim=self.utlim, ylabel=r'Altitude (km)',
ylim=self.altlim, zlabel=r'Meridional (V), m/s', zMax=self.zMax[1], zMin=self.zMin[1] )
#
# End of 'HeiProfPlot'
#####
def LatVsHeiPlot(self):
self.GetTitle()
cmap = cm.RdBu_r
fg,axs = subplots(1,2, figsize=(15,6) )
self.XVsY2DPlot( axs[0], self.glatbins, self.altbins, self.Uwind, cmap=cmap,
title=self.title, xlabel=r'Geog. Lat. ($^o$)', xlim=self.glatlim,
ylabel=r'Altitude (km)', ylim=self.altlim, zlabel=r'Zonal (U), m/s',
zMax=self.zMax[0], zMin=self.zMin[0] )
self.XVsY2DPlot( axs[1], self.glatbins, self.altbins, self.Vwind, cmap=cmap,
title=self.title, xlabel=r'Geog. Lat. ($^o$)',
xlim=self.glatlim, ylabel=r'Altitude (km)', ylim=self.altlim,
zlabel=r'Meridional (V), m/s', zMax=self.zMax[1], zMin=self.zMin[1] )
def LonVsHeiPlot(self):
self.GetTitle()
cmap = cm.RdBu_r
fg,axs = subplots(1,2, figsize=(15,6) )
self.XVsY2DPlot( axs[0], self.glonbins, self.altbins, self.Uwind, cmap=cmap,
title=self.title, xlabel=r'Geog. Lon. ($^o$)', xlim=self.glonlim,
ylabel=r'Altitude (km)', ylim=self.altlim, zlabel=r'Zonal (U), m/s',
zMax=self.zMax[0], zMin=self.zMin[0] )
self.XVsY2DPlot( axs[1], self.glonbins, self.altbins, self.Vwind, cmap=cmap,
title=self.title, xlabel=r'Geog. Lon. ($^o$)',
xlim=self.glonlim, ylabel=r'Altitude (km)', ylim=self.altlim,
zlabel=r'Meridional (V), m/s', zMax=self.zMax[1], zMin=self.zMin[1] )
def LonVsLatPlot(self):
self.GetTitle()
if not self.WF:
cmap = cm.RdBu_r
fg,axs = subplots(2,1, figsize=(8,8) )
# XVsY2DMap or XVsY2DPlot
self.XVsY2DMap( axs[0], self.glonbins, self.glatbins, self.Uwind, cmap=cmap,
title=self.title, xlabel=r'Geog. Lon. ($^o$)', xlim=self.glonlim,
ylabel=r'Geog. Lat. ($^o$)', ylim=self.glatlim, zlabel=r'Zonal (U), m/s',
zMax=self.zMax[0], zMin=self.zMin[0] )
self.XVsY2DMap( axs[1], self.glonbins, self.glatbins, self.Vwind, cmap=cmap,
title=self.title, xlabel=r'Geog. Lon. ($^o$)',
xlim=self.glonlim, ylabel=r'Geog. Lat. ($^o$)', ylim=self.glatlim,
zlabel=r'Meridional (V), m/s', zMax=self.zMax[1], zMin=self.zMin[1] )
else:
ax = figure(figsize=(16,12)).gca()
self.XVsY2DWindMap(ax, self.glonbins, self.glatbins, self.Uwind, self.Vwind,
title=self.title, \
xlabel=r'Geog. Lon. ($^o$)', xlim=self.glonlim, \
ylabel=r'Geog. Lat. ($^o$)', ylim=self.glatlim, \
zlabel='Wind (m/s)', zMax=self.zMax[0], zMin=self.zMin[0])
def _RoundLim(self, lim):
return list(map(lambda x : x * 10., [floor(lim[0] / 10.), ceil(lim[1] / 10.)]))
if __name__ == '__main__':
def main():
""" Example """
# Generates model data
hwm14Obj = HWM14( altlim=[0, 200], altstp=5., glat=-12., glon=283.13, option=4, verbose=False )
# Produces simple graphical report
hwm14Gbj = HWM14Plot( profObj=hwm14Obj )
main()
|
import numpy as np
# TODO(Jefferson): after switching to use numpy for general calculation, data model
# here feels less optimal and could likely be improved.
class Vec3:
def __init__(self, data = np.array([0., 0., 0.])):
self.data = np.array(data)
def x(self):
return self.data[0]
def y(self):
return self.data[1]
def z(self):
return self.data[2]
def r(self):
return self.data[0]
def g(self):
return self.data[1]
def b(self):
return self.data[2]
def __add__(self, other):
return Vec3(data = (self.x() + other.x(), self.y() + other.y(), self.z() + other.z()))
def __sub__(self, other):
return Vec3(data = (self.x() - other.x(), self.y() - other.y(), self.z() - other.z()))
def __truediv__(self, scalar):
return Vec3(data = (self.x() / scalar, self.y() / scalar, self.z() / scalar))
def __mul__(self, scalar):
return Vec3(data = (self.x() * scalar, self.y() * scalar, self.z() * scalar))
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __str__(self):
return 'Vec3: ' + str(self.data)
def dot(self, other):
return self.x() * other.x() + self.y() * other.y() + self.z() * other.z()
def cross(self, other):
'''cross product of two vectors'''
return Vec3(data=(
self[1] * other[2] - self[2] * other[1],
-(self[0] * other[2] - self[2] * other[1]),
self[0] * other[1] - self[1] * other[0]
))
def squared_length(self):
return self.dot(self)
def length(self):
return np.sqrt(self.squared_length())
def normalized(self):
return self / self.length()
|
import requests
import re
from util import Profile, write_poem
from bs4 import BeautifulSoup
import time
from random import randint
BASE_URL = 'http://www.zgshige.com'
list_reg = re.compile(r"javascript:window.location.href='(http://www.zgshige.com/zcms/poem/list\?SiteID=\d+&poetname=)'\+encodeURI\('(.+?)'\)\+'(&articleID=\d+&articleContributeUID=\d+&catalogID=\d+)';")
title_reg = re.compile(r'class="sr_dt_title">\s*<a href="(http://www.zgshige.com/c/([\d-]+)/\d+.shtml)">《(.+?)》</a>')
def get(url):
response = requests.get(url)
response.encoding = 'utf-8'
return response.text
def read_content(url):
text = get(url).replace(u'<br>', '\n').replace('<BR>', '\n').replace(u'<br/>', '\n').replace('<BR/>', '\n').replace(u'</P>', '\n</P>').replace(u'</p>', '\n</p>')
soup = BeautifulSoup(text, 'lxml')
result = soup.find('div', id='content').text
return result
def read_list(poet_name, list_url):
'''
读取所有的标题和链接
'''
text = get(list_url).replace(' ', '')
titles = title_reg.findall(text)
for title in titles:
url = title[0]
date = title[1]
content = read_content(url) + '\n' + date
write_poem(Profile(href=url, author=poet_name, title=title[2]), content)
def read_by_poet(url):
'''
根据诗人的 url 获取作品绝对链接
'''
text = get(BASE_URL + url)
list_result = list_reg.findall(text)
if len(list_result):
result = list_result[0]
poet_name = result[1]
url = ''.join(result)
read_list(poet_name, url)
poet_reg = re.compile(r'<a href="(/c/[\d-]+?/\d+.shtml)" class="p-t-xs p-b-xs block text-center" target="_blank">.*?</a>')
def read_teyao():
'''特邀诗人'''
text = get('http://www.zgshige.com/tysr/')
poets = poet_reg.findall(text)
for poet_url in poets:
read_by_poet(poet_url)
print(BASE_URL + poet_url)
time.sleep(randint(4, 10))
def read_zhuzhan():
'''驻站诗人'''
text = get('http://www.zgshige.com/zzsr/')
poets = poet_reg.findall(text)
for poet_url in poets:
read_by_poet(poet_url)
print(BASE_URL + poet_url)
time.sleep(randint(4, 10))
mingjia_reg = re.compile(r'<a class="h4 bold" href="(/c/[\d-]+/\d+?.shtml)" target="_blank">')
def read_mingjia_page(page_num):
if page_num == 1:
url = 'http://www.zgshige.com/zzmjx/index.shtml'
else:
url = 'http://www.zgshige.com/zzmjx/index_{}.shtml'.format(str(page_num))
text = get(url)
poets = mingjia_reg.findall(text)
for url in poets:
read_by_poet(url)
print(BASE_URL + url)
# time.sleep(randint(4, 10))
def read_mingjia():
for i in range(13):
read_mingjia_page(i + 1)
print(i + 1)
read_mingjia()
# read_zhuzhan()
# read_teyao()
# read_by_poet('/c/2018-08-26/6985461.shtml')
# read_content('http://www.zgshige.com/c/2021-01-15/16545743.shtml')
|
import json
import os
import socket
import subprocess
from pathlib import Path
import requests
from repo2docker.app import Repo2Docker
from repo2docker.utils import chdir
from tqdm import tqdm
from . import __version__
from .cache import REPO2SINGULARITY_CACHEDIR, TMPDIR
class Repo2Singularity(Repo2Docker):
"""
An application for converting git repositories to singularity images.
"""
name = 'repo2singularity'
version = __version__
description = __doc__
def build_sif(self):
"""
Build Singularity Image File (SIF) from built docker image
"""
if os.path.exists(self.sif_image) and not self.force:
return
docker_uri = f'docker-daemon://{self.output_image_spec}:latest'
cmd = ['singularity', 'build']
if self.force:
cmd.append('--force')
cmd.extend([self.sif_image, docker_uri])
self.log.info(
f'\nBuilding singularity container from the built docker image...\n{cmd}\n',
extra=dict(phase='building'),
)
subprocess.check_output(cmd)
def push_image(self):
"""
Push Singularity image to registry
"""
URI = f'library://{self.username_prefix}/{self.output_image_spec}:latest'
self.log.info(
f'Pushing image to {URI}\n', extra=dict(phase='pushing'),
)
cmd = [
'singularity',
'push',
'-U',
self.sif_image,
URI,
]
self.log.info(
f'{cmd}', extra=dict(phase='pushing'),
)
subprocess.check_output(cmd)
def create_container_sandbox(self):
"""
Pre-convert the Singularity Image File (SIF) to a directory based format (sandbox)
"""
if (Path(TMPDIR) / self.sandbox_name).exists() and not self.force:
self.log.info('Using existing sandbox directory\n', extra=dict(phase='launching'))
return
self.log.info('Creating sandbox directory\n', extra=dict(phase='launching'))
cmd = ['singularity', 'build']
if self.force:
cmd.append('--force')
cmd.extend(['--sandbox', f'{TMPDIR}/{self.sandbox_name}', self.sif_image])
self.log.info(
f'{cmd}\n', extra=dict(phase='building'),
)
subprocess.check_output(cmd)
def start_container(self):
"""
Start singularity container from built image
Returns running container
"""
host_name = socket.gethostname()
self.host_name = host_name
if not self.run_cmd:
port = str(self._get_free_port())
ports = {f'{port}/tcp': port}
run_cmd = [
'jupyter',
'lab',
'--ip',
self.host_name,
'--port',
port,
f'--NotebookApp.custom_display_url=http://{host_name}:{port}',
'--notebook-dir',
'/opt/notebooks',
]
else:
run_cmd = self.run_cmd
if self.ports:
ports = self.ports
else:
ports = {}
self.ports = ports
with chdir(Path(self.container_sandbox_dir).parent):
cmd = ['singularity', 'exec', '--writable', '--userns']
if self.bind:
cmd.extend(['--bind', self.bind])
cmd.append(self.sandbox_name)
cmd += run_cmd
self.log.info(
f'{cmd}\n', extra=dict(phase='launching'),
)
subprocess.check_output(cmd)
def run_image(self):
"""
Run docker container from built image
"""
self.create_container_sandbox()
self.start_container()
# TODO: wait for it to finish.
def start(self):
self.singularity_image_name = f'{self.output_image_spec}.sif'
self.sif_image = f'{REPO2SINGULARITY_CACHEDIR}/{self.singularity_image_name}'
self.sandbox_name = f'sandbox-{self.output_image_spec}'
self.container_sandbox_dir = f'{TMPDIR}/{self.sandbox_name}'
if self.run and os.path.exists(self.sif_image):
self.run_image()
elif self.run and self.username_prefix:
try:
URI = f'library://{self.username_prefix}/{self.output_image_spec}'
cmd = [
'singularity',
'pull',
'--allow-unsigned',
'--dir',
REPO2SINGULARITY_CACHEDIR.as_posix(),
'--name',
self.singularity_image_name,
]
if self.force:
cmd.append('--force')
cmd.append(URI)
self.log.info(
f'{cmd}\n', extra=dict(phase='pulling'),
)
subprocess.check_output(cmd)
self.run_image()
except Exception:
pass
if self.remote:
if self.ref is None:
ref = 'master'
else:
ref = self.ref
data = {'url': self.repo, 'ref': ref, 'image_name': self.output_image_spec}
downloader(data, self.sif_image, self.endpoint_url)
else:
self.build()
self.build_sif()
if self.push:
self.push_image()
if self.run:
self.run_image()
def downloader(
data: dict, output_file: str, endpoint_url: str, chunk_size: int = 2048,
):
with requests.Session() as session:
response = session.post(endpoint_url, data=json.dumps(data))
response.raise_for_status()
content = response.iter_content(chunk_size=chunk_size)
total = int(response.headers.get('content-length', 0))
progressbar = tqdm(
total=total, ncols=82, unit='B', unit_scale=True, leave=True, desc='Downloading image'
)
with open(output_file, 'w+b') as fout:
for chunk in content:
if chunk:
fout.write(chunk)
fout.flush()
progressbar.update(chunk_size)
progressbar.reset()
progressbar.update(total)
progressbar.close()
|
import Keywords
import sys
class Evaluator:
def __init__(self, AST):
self.AST = AST
def execute(self, loc):
if isinstance(loc[1], list):
self.run(loc[1])
elif loc[0] == Keywords.t_print:
self.echo(loc[1])
elif loc[0] == Keywords.t_stop:
self.stop()
elif loc[0] == Keywords.t_call:
self.call(loc[0])
def run(self, node):
if isinstance(node, list):
for n in node:
for key, value in n.items():
self.execute([key, value])
elif isinstance(node, dict):
for key, value in node.items():
self.execute([key, value])
def call(self, value):
for node in self.AST:
if value in node:
self.run(node[value])
def echo(self, value):
print(value)
def stop(self):
quit() |
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_by_distance
from floodsystem.station import MonitoringStation
from floodsystem.geo import stations_within_radius
from floodsystem.station import inconsistent_typical_range_stations
def test_stations_by_distance():
## create a set of three imaginary stations
s_id = "test-s-id"
m_id = "test-m-id"
trange = (-2.3, 3.4445)
river = "River X"
town = "My Town"
label1 = "Station 1"
coord1 = (1,1)
a = MonitoringStation(s_id, m_id, label1, coord1, trange, river, town)
label2 = "Station 2"
coord2 = (2,2)
b = MonitoringStation(s_id, m_id, label2, coord2, trange, river, town)
label3 = "Station 3"
coord3 = (3,3)
c = MonitoringStation(s_id, m_id, label3, coord3, trange, river, town)
## put the stations in a list
stations = [a,c,b]
## test that the function returns a list of objects in order a,b,c
distances = stations_by_distance(stations, (0, 0))
stations_and_distances = []
for station, distance in distances:
data = [station.name, distance]
stations_and_distances.append(data)
assert stations_and_distances[1][0] == "Station 2"
def test_stations_within_radius():
## create a set of three imaginary stations
s_id = "test-s-id"
m_id = "test-m-id"
trange = (-2.3, 3.4445)
river = "River X"
town = "My Town"
label1 = "Station 1"
coord1 = (1,1)
a = MonitoringStation(s_id, m_id, label1, coord1, trange, river, town)
label2 = "Station 2"
coord2 = (2,2)
b = MonitoringStation(s_id, m_id, label2, coord2, trange, river, town)
label3 = "Station 3"
coord3 = (3,3)
c = MonitoringStation(s_id, m_id, label3, coord3, trange, river, town)
## put the stations in a list
stations = [a,c,b]
## checking how many stations are within a radius of 400 - should be stations 1 and 2 with radius 100 and 314 respectively
in_radius = stations_within_radius(stations, (0,0), 400)
stations_in_radius = []
for station, distance in in_radius:
data = station.name
stations_in_radius.append(data)
assert len(stations_in_radius) == 2
def test_inconsistent_typical_range_stations():
## create a set of three imaginary stations
s_id = "test-s-id"
m_id = "test-m-id"
river = "River X"
town = "My Town"
label1 = "Station 1"
coord1 = (1,1)
trange1 = (2, -2)
a = MonitoringStation(s_id, m_id, label1, coord1, trange1, river, town)
label2 = "Station 2"
coord2 = (2,2)
trange2 = (3, 10)
b = MonitoringStation(s_id, m_id, label2, coord2, trange2, river, town)
label3 = "Station 3"
coord3 = (3, 3)
trange3 = (1, -1)
c = MonitoringStation(s_id, m_id, label3, coord3, trange3, river, town)
## put the stations in a list
stations = [a,c,b]
## tests function on stations and creates a list of inconsistent stations
inconsistent_stations = (inconsistent_typical_range_stations(stations))
names = []
for station in inconsistent_stations:
s = station.name
names.append(s)
##check that names contains the two stations - 1 and 3 - that have inconsistent ranges
assert len(names) == 2
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .aperture import aperture_photometry
from .detection import (background, sepfind, daofind, calc_fwhm,
recenter_sources, starfind)
from .solve_photometry import (solve_photometry_median,
solve_photometry_average,
solve_photometry_montecarlo)
# from ._phot import process_photometry
psf_available_models = ['gaussian', 'moffat']
photometry_available_methods = ['aperture']
solve_photometry_available_methods = ['median', 'average', 'montecarlo']
|
import click
import requests
import time
import os
import psycopg2
from flask import current_app, g
from flask.cli import with_appcontext
from bs4 import BeautifulSoup
def get_db():
if 'db' not in g:
g.db = psycopg2.connect(os.environ["DATABASE_URL"], sslmode='require')
return g.db
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db = get_db()
cursor = db.cursor()
with current_app.open_resource('schema.sql') as f:
cursor.execute(f.read().decode('utf8'))
# populate servers
# url="https://www.dofus.com/fr/mmorpg/communaute/serveurs#jt_list"
print('Servers')
servers = {}
for (i, commu) in [(0,"fr"),(2,"int"),(4,"es"),(6, "port")]:
url = f"https://www.dofus.com/fr/mmorpg/communaute/serveurs?server_community%5B%5D={i}#jt_list"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find("table").find("tbody").find_all("tr")
for elem in results:
name = elem.find("span", {"class":None}).next_element
servers[name] = { "lang" : commu }
time.sleep(3)
url="https://www.dofus.com/fr/mmorpg/communaute/serveurs?server_access%5B%5D=1&server_access%5B%5D=0#jt_list"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find("table").find("tbody").find_all("tr")
for elem in results:
name = elem.find("span", {"class":None}).next_element
if name not in servers :
servers[name] = { "lang" : None }
for name in servers:
cursor.execute('INSERT INTO server (name, lang) VALUES (%s, %s)', (name, servers[name]["lang"]))
db.commit()
print(servers)
# add wanted notices
url="https://www.dofus.com/fr/mmorpg/encyclopedie/monstres?monster_category[]=32&monster_category[]=156&monster_category[]=127&monster_category[]=90"
print('Scraping notices')
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find("table").find("tbody").find_all("tr")
for elem in results:
nameFr = elem.find("a").getText()
img = elem.find("img")["src"]
cursor.execute(
'INSERT INTO monster (nameFr, img, zoneId, monsterType)'
' VALUES (%s, %s, %s, %s)',
(nameFr, img, -1, 2)
)
db.commit()
print('Scraped ! Waiting 3s ...')
time.sleep(3)
# add cania bandits
for name in ["Eratz le revendicateur", "Nomekop le Crapoteur", "Edasse le Trouble Fête"]:
cursor.execute(
'INSERT INTO monster (nameFr, img, zoneId, monsterType)'
' VALUES (%s, %s, %s, %s)',
(name, "", -1, 3)
)
db.commit()
# add archimonsters
url="https://www.dofus.com/fr/mmorpg/encyclopedie/monstres?monster_type[0]=archimonster&size=96&page="
n_pages=3
for i in range(1,n_pages+1):
print('Scraping page',i,'/',n_pages)
page = requests.get(url + str(i))
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find("table").find("tbody").find_all("tr")
for elem in results:
nameFr = elem.find("a").getText()
img = elem.find("img")["src"]
cursor.execute(
'INSERT INTO monster (nameFr, img, zoneId, monsterType)'
' VALUES (%s, %s, %s, %s)',
(nameFr, img, -1, 1)
)
db.commit()
print('Scraped ! Waiting 3s ...')
time.sleep(3)
@click.command('init-db')
@with_appcontext
def init_db_command():
"""Clear the existing data and create new tables."""
init_db()
click.echo('Initialized the database.')
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
__author__ = '4ikist'
import data_handler
if __name__ == '__main__':
dh = data_handler.FlickrDataHandler()
photo_objects = dh.get_photo_objects(size='Square', tags=['enot'])
photo_objects2 = dh.get_photo_objects(size=['Square', 'Original'], tags='test')
print '' |
# queen.py - QuantumChess
# Author: Cody Lewis
# Date: 24-FEB-2018
import piece
import functions
from functions import Direction
class Queen(piece.Piece):
def __init__(self, superposNum, frstSuperPos, col, idT):
idT = 'Q ' + str(idT)
piece.Piece.__init__(self, superposNum, frstSuperPos, col, idT)
def canMove(self, movement):
moveArr = functions.splitMovement(movement)
direction = moveArr[0]
if direction in [Direction.DOWN.value, Direction.UP.value, Direction.RIGHT.value, Direction.LEFT.value, Direction.DOWNRIGHT.value, Direction.DOWNLEFT.value, Direction.UPRIGHT.value, Direction.UPLEFT.value]:
for i in range(1, len(moveArr)):
if(direction != moveArr[i]):
return False
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.