max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
basic/exercise1.py
|
jspw/Basic_Python
| 6
|
12779251
|
<gh_stars>1-10
print(" this is \\\\ double backslash \nthis is //\\//\\//\\//\\//\\ mountain \nhe is awesome\b y")
print("\\\"\\n\\t\\\'")
| 2.234375
| 2
|
Exercicios/ex033_Maior e menor valores.py
|
GabrielMazzuchello/Curso-Em-Video
| 2
|
12779252
|
num1 = int(input('Primeiro valor: '))
num2 = int(input('Segundo valor: '))
num3 = int(input('Terceiro valor: '))
# Verificação do menor numero
menor = num3
if num2 < num3 and num2 < num1:
menor = num2
if num1 < num3 and num1 < num2:
menor = num1
# verificação do maior numero
maior = num3
if num2 > num1 and num2 > num3:
maior = num2
if num1 > num2 and num1 > num3:
maior = num1
print('O menor numero é {}'.format(menor))
print('O maior numero é {}'.format(maior))
| 4.15625
| 4
|
compare_one_sr_alpha_mask.py
|
lmmx/emoji-liif
| 1
|
12779253
|
<gh_stars>1-10
from pathlib import Path
import sqlite3
import pandas as pd
from tqdm import tqdm
from sys import stderr
from imageio import imread, imwrite
import numpy as np
from skimage import transform as tf
from matplotlib import pyplot as plt
from transform_utils import scale_pixel_box_coordinates, crop_image
SAVING_PLOT = False
JUPYTER = True
osx_dir = Path("osx/catalina/").absolute()
source_dir = osx_dir / "png"
preproc_dir = osx_dir / "bg/"
png_dir = Path("enlarged/").absolute()
out_dir = Path("transparent/").absolute()
png = png_dir / "glyph-u1F343.png"
osx_bw_db = osx_dir / "emoji_bw_calc.db"
NO_OVERWRITE = False
def get_emoji_rgb_bg(glyph):
with sqlite3.connect(osx_bw_db) as conn:
query_sql = "SELECT * FROM images WHERE filename == (?)"
query_df = pd.read_sql(query_sql, con=conn, params=[glyph])
[r] = [g] = [b] = query_df.loc[:, "furthest_shade"].values
return r, g, b
def alpha_composite_bg(img, background_shade):
"""
Linearly composite an RGBA image against a grayscale background. Image dtype
is preserved. Output height/width will match those of `im`, but the alpha
channel dimension will be dropped making it only RGB.
"""
if not isinstance(background_shade, int):
raise TypeError("background_shade must be an integer")
im = img.astype(float)
bg = background_shade / 255
im_max = im.max()
im /= im_max # scale im to [0,1]
im_rgb = im[:,:,:3]
bg_rgb = np.ones_like(im_rgb) * bg
# Scale RGB according to A
alpha_im = im[:,:,3]
alpha_bg = 1 - alpha_im
im_rgb *= alpha_im[:,:,None]
bg_rgb *= alpha_bg[:,:,None]
composited = im_rgb + bg_rgb
# Rescale to original range and return to original dtype
composited *= im_max
composited = composited.astype(img.dtype)
return composited
def plot_comparison(
scaled_source_img_sub_alpha,
scaled_source_img_sub,
img_sub,
composited_grad,
decomp_alpha,
SAVING_PLOT
):
fig, (ax0, ax1, ax2, ax3, ax4) = plt.subplots(1, 5, sharex=True, sharey=True)
ax0.imshow(scaled_source_img_sub_alpha)
ax0.set_title("Alpha")
#ax1.imshow(scaled_preproc_img_sub[:,:,:3])
ax1.imshow(np.zeros_like(scaled_source_img_sub[:,:,:3]))
ax1.imshow(scaled_source_img_sub)
ax1.set_title("Source image (resize: nearest neighbour)")
ax2.imshow(img_sub)
ax2.set_title("LIIF superresolution")
ax3.imshow(composited_grad)
ax3.set_title("Difference of LIIF\nfrom resized composite")
ax4.imshow(decomp_alpha)
ax4.set_title("Difference of LIIF alpha from\nresized composite (estimated)")
fig.tight_layout()
if SAVING_PLOT:
fig.set_size_inches((20,6))
fig_name = "alpha_composite_comparison.png"
fig.savefig(fig_name)
reload_fig = imread(fig_name)
fig_s = reload_fig.shape
clip_y_t = fig_s[0] // 15 # ~7% top crop
clip_y_b = -(fig_s[0] // 10) # ~10% bottom crop
clip_x_l = fig_s[1] // 17 # ~6% left crop
clip_x_r = -(fig_s[1] // 50) # ~ 2% right crop
cropped_fig = reload_fig[clip_y_t:clip_y_b, clip_x_l:clip_x_r]
imwrite(fig_name, cropped_fig)
else:
return fig, (ax0, ax1, ax2, ax3, ax4)
| 2.359375
| 2
|
clusterpy/core/data/spatialLag.py
|
CentroGeo/clusterpy_python3
| 48
|
12779254
|
<reponame>CentroGeo/clusterpy_python3
# encoding: latin1
"""spatial lag of a variable
"""
__author__ = "<NAME>, <NAME>"
__credits__ = "Copyright (c) 2010-11 <NAME>"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "<EMAIL>"
__all__ = ['spatialLag']
import numpy
def spatialLag(data,w):
"""
This method recives a dictionary of variables an
return a copy of the dictionary with variables
spatially lagged.
:param data: data dictionary to be lagged
:type data: dictionary
:rtype: dictionary (Y dictionary with the lag of vars)
"""
data = [data[x] for x in data]
data = numpy.matrix(data)
data = data.transpose()
w = numpy.matrix(w)
data = data*w
data = data.transpose()
y = {}
for nd, d in enumerate(data):
y[nd] = d.tolist()[0]
return y
| 3.015625
| 3
|
AI Project/Movielens Experiment/kmeans_weighted_average.py
|
anshikam/CSCE-625
| 0
|
12779255
|
# -*- coding: utf-8 -*-
"""
A program that carries out mini batch k-means clustering on Movielens datatset"""
from __future__ import print_function, division, absolute_import, unicode_literals
from decimal import *
#other stuff we need to import
import csv
import numpy as np
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics.cluster import v_measure_score
from math import *
def distance(user_id,i):
distance=0
for j in range(len(user_movie_matrix[0])):
if user_movie_matrix[user_id][j] !=0 and user_movie_matrix[i][j]!=0:
distance+=Decimal(pow(Decimal(user_movie_matrix[user_id][j] - user_movie_matrix[i][j]),2))
distance=sqrt(distance)
return distance
#beginning of main program
#read in u1.base
training_file = open('ml-100k/u1.base','r')
rows = training_file.readlines()
training_file.close()
training_data=[]
for row in rows:
list = row.split('\t')
int_list = [int(item) for item in list]
training_data.append(int_list)
#read in u1.test
test_file = open('ml-100k/u1.test','r')
rows = test_file.readlines()
test_file.close()
test_data=[]
for row in rows:
list = row.split('\t')
int_list = [int(item) for item in list]
test_data.append(int_list)
print(len(training_data))
print(len(test_data))
user_ids = [row[0] for row in training_data]
user_ids = set(user_ids)
user_ids = sorted(user_ids)
number_of_users = len(user_ids)
#print(user_ids)
print(number_of_users)
movie_ids = [row[1] for row in training_data]
movie_ids = set(movie_ids)
movie_ids = sorted(movie_ids)
number_of_movies = len(movie_ids)
#print(movie_ids)
print(number_of_movies)
#create a user movie matrix
#pre-processing could be in two ways :
# a. either ignore ratings <= 3 so rating of 4 or 5 = 1 in matrix and <=3 is 0
# b. calculate a mean for each user
# c. or simply give 1 if rated and 0 if not rated
user_movie_matrix = np.zeros((number_of_users,number_of_movies))
#user_movie_matrix.fill(0.001)
for row in training_data:
user_id = user_ids.index(row[0])
movie_id = movie_ids.index(row[1])
user_movie_matrix[user_id,movie_id] = row[2]
#user_movie_matrix[user_id,movie_id] = row[2]
#print(user_movie_matrix[0])
#print(user_movie_matrix[942][1])
#print(user_movie_matrix[942][8])
#Normalizing user-movie matrix
#Additional step
'''for i in range(number_of_users):
tempList = []
tempList = user_movie_matrix[i].tolist()
print('templist')
print(tempList)
minVal = min(tempList)
maxVal = max(tempList)
for j in tempList:
j=Decimal(Decimal(j-minVal)/Decimal(maxVal-minVal))
j=j*5
user_movie_matrix[i] = tempList'''
print(user_movie_matrix)
print(len(user_movie_matrix))
print(len(user_movie_matrix[0]))
#print(user_movie_matrix)
#initialize and carry out clustering
K=50
#km = KMeans(n_clusters = K)
#km.fit(user_movie_matrix)
#km = KMeans(n_clusters = K)
km = MiniBatchKMeans(n_clusters = K)
km.fit(user_movie_matrix)
#labels
labels = km.labels_
print(str(labels))
#find which cluster each user is in
cluster_num_users=np.zeros(K)
#maintain a list of users per cluster
cluster_list_users=[]
for i in range(K):
cluster_list_users.append([])
print(cluster_list_users)
prediction = km.predict(user_movie_matrix)
print('\n--------Which cluster each user is in--------')
print('{:<15}\t{}'.format('User','Cluster'))
for i in range(len(prediction)):
print('{:<15}\t{}'.format(user_ids[i],prediction[i]))
cluster_num_users[prediction[i]]+=1
list_of_users = []
list_of_users = cluster_list_users[prediction[i]]
list_of_users.append(i)
cluster_list_users[prediction[i]]=list_of_users
f=open('cluster_num_users','w')
for i in range(K):
f.write(str(i))
f.write('\t')
f.write(str(cluster_num_users[i]))
f.write('\n')
f.close()
print(cluster_num_users)
print(cluster_list_users)
#Number of users in each cluster
print('\n--------Number of users in a cluster--------')
for i in range(K):
print('{:<15}\t{}'.format(i,cluster_num_users[i]))
print(sum(cluster_num_users))
print('The total distance of the solution found is',sum((km.transform(user_movie_matrix)).min(axis=1)))
#predicting rating for a movie by a user
print('Number of test data ')
print(len(test_data))
accuracy=0
root_mean_accuracy=0
weighted_sum=0
sum_of_weights=0
for row in test_data:
print('Testing for user and movie in test : ' + str(row))
movie = row[1]
rating = row[2]
#print('Cluster for this user : ')
user = row[0]
#print(user)
user_id = user_ids.index(user)
#print(user_id)
#print(labels)
cluster_index = labels[user_id]
#print(cluster_index)
print('Other user ids in this cluster : ')
print(cluster_num_users[cluster_index])
#print(len(cluster_list_users[cluster_index]))
other_user_ids_in_same_cluster=cluster_list_users[cluster_index]
print(other_user_ids_in_same_cluster)
#print('Have they rated movie ')
#print(movie)
if movie in movie_ids:
movie_id=movie_ids.index(movie)
else:
continue
number_of_users_who_rated_movie=0
sum_total_rating=0
for i in other_user_ids_in_same_cluster:
if user_movie_matrix[i][movie_id] > 0:
#print(i)
#print('index has rated movie ')
#print(movie_id)
#print(user_movie_matrix[i][movie_id])
if(Decimal(round(distance(user_id,i),2)) > Decimal(0.0)):
weight = Decimal(1/(distance(user_id,i)))
weighted_sum += weight*Decimal(user_movie_matrix[i][movie_id])
sum_of_weights += Decimal(weight)
number_of_users_who_rated_movie += 1
sum_total_rating += user_movie_matrix[i][movie_id]
print('Predicted Rating for this movie :')
#print(sum_total_rating)
if(number_of_users_who_rated_movie > 0 and sum_of_weights > 0):
print(weighted_sum)
print(sum_of_weights)
rating_predicted = weighted_sum/sum_of_weights
print(rating_predicted)
print(rating)
#rating_predicted = round(rating_predicted)
root_mean_accuracy += Decimal(pow(Decimal(rating_predicted-rating),2))
if abs(Decimal(rating_predicted - rating)) <= Decimal(1.0):
print("HERE")
accuracy += 1
'''elif Decimal(rating - rating_predicted) < Decimal(0.5):
print("HERE")
accuracy += 1'''
print(accuracy)
print('% accuracy')
print(accuracy*100/len(test_data))
root_mean_accuracy = root_mean_accuracy/len(test_data)
root_mean_accuracy = sqrt(root_mean_accuracy)
print(root_mean_accuracy)
| 3.0625
| 3
|
example/sales/models.py
|
browniebroke/django-admin-lightweight-date-hierarchy
| 91
|
12779256
|
from django.db import models
class Sale(models.Model):
created = models.DateTimeField()
def __str__(self):
return f'[{self.id}] {self.created:%Y-%m-%d}'
class SaleWithDrilldown(Sale):
"""
We will use this model in the admin to illustrate the difference
between date hierarchy with and without drilldown.
"""
class Meta:
proxy = True
verbose_name = 'Sale model with default drilldown'
class SaleWithCustomDrilldown(Sale):
class Meta:
proxy = True
verbose_name = 'Sale model with custom drilldown'
| 2.328125
| 2
|
libs/gym/tests/spaces/test_spaces.py
|
maxgold/icml22
| 0
|
12779257
|
import json # note: ujson fails this test due to float equality
import copy
import numpy as np
import pytest
from gym.spaces import Tuple, Box, Discrete, MultiDiscrete, MultiBinary, Dict
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(5, start=-2),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Tuple((Discrete(5), Discrete(2, start=6), Discrete(2, start=-4))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_roundtripping(space):
sample_1 = space.sample()
sample_2 = space.sample()
assert space.contains(sample_1)
assert space.contains(sample_2)
json_rep = space.to_jsonable([sample_1, sample_2])
json_roundtripped = json.loads(json.dumps(json_rep))
samples_after_roundtrip = space.from_jsonable(json_roundtripped)
sample_1_prime, sample_2_prime = samples_after_roundtrip
s1 = space.to_jsonable([sample_1])
s1p = space.to_jsonable([sample_1_prime])
s2 = space.to_jsonable([sample_2])
s2p = space.to_jsonable([sample_2_prime])
assert s1 == s1p, "Expected {} to equal {}".format(s1, s1p)
assert s2 == s2p, "Expected {} to equal {}".format(s2, s2p)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(5, start=-2),
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=-np.inf, high=np.inf, shape=(1, 3)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Tuple((Discrete(5), Discrete(2), Discrete(2, start=-6))),
MultiDiscrete([2, 2, 100]),
MultiBinary(6),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_equality(space):
space1 = space
space2 = copy.copy(space)
assert space1 == space2, "Expected {} to equal {}".format(space1, space2)
@pytest.mark.parametrize(
"spaces",
[
(Discrete(3), Discrete(4)),
(Discrete(3), Discrete(3, start=-1)),
(MultiDiscrete([2, 2, 100]), MultiDiscrete([2, 2, 8])),
(MultiBinary(8), MultiBinary(7)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
Box(low=np.array([-10, 0]), high=np.array([10, 9]), dtype=np.float32),
),
(
Box(low=-np.inf, high=0.0, shape=(2, 1)),
Box(low=0.0, high=np.inf, shape=(2, 1)),
),
(Tuple([Discrete(5), Discrete(10)]), Tuple([Discrete(1), Discrete(10)])),
(
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5, start=7), Discrete(10)]),
),
(Dict({"position": Discrete(5)}), Dict({"position": Discrete(4)})),
(Dict({"position": Discrete(5)}), Dict({"speed": Discrete(5)})),
],
)
def test_inequality(spaces):
space1, space2 = spaces
assert space1 != space2, "Expected {} != {}".format(space1, space2)
@pytest.mark.parametrize(
"space",
[
Discrete(5),
Discrete(8, start=-20),
Box(low=0, high=255, shape=(2,), dtype="uint8"),
Box(low=-np.inf, high=np.inf, shape=(3, 3)),
Box(low=1.0, high=np.inf, shape=(3, 3)),
Box(low=-np.inf, high=2.0, shape=(3, 3)),
],
)
def test_sample(space):
space.seed(0)
n_trials = 100
samples = np.array([space.sample() for _ in range(n_trials)])
expected_mean = 0.0
if isinstance(space, Box):
if space.is_bounded():
expected_mean = (space.high + space.low) / 2
elif space.is_bounded("below"):
expected_mean = 1 + space.low
elif space.is_bounded("above"):
expected_mean = -1 + space.high
else:
expected_mean = 0.0
elif isinstance(space, Discrete):
expected_mean = space.start + space.n / 2
else:
raise NotImplementedError
np.testing.assert_allclose(expected_mean, samples.mean(), atol=3.0 * samples.std())
@pytest.mark.parametrize(
"spaces",
[
(Discrete(5), MultiBinary(5)),
(
Box(low=np.array([-10, 0]), high=np.array([10, 10]), dtype=np.float32),
MultiDiscrete([2, 2, 8]),
),
(
Box(low=0, high=255, shape=(64, 64, 3), dtype=np.uint8),
Box(low=0, high=255, shape=(32, 32, 3), dtype=np.uint8),
),
(Dict({"position": Discrete(5)}), Tuple([Discrete(5)])),
(Dict({"position": Discrete(5)}), Discrete(5)),
(Tuple((Discrete(5),)), Discrete(5)),
(
Box(low=np.array([-np.inf, 0.0]), high=np.array([0.0, np.inf])),
Box(low=np.array([-np.inf, 1.0]), high=np.array([0.0, np.inf])),
),
],
)
def test_class_inequality(spaces):
assert spaces[0] == spaces[0]
assert spaces[1] == spaces[1]
assert spaces[0] != spaces[1]
assert spaces[1] != spaces[0]
@pytest.mark.parametrize(
"space_fn",
[
lambda: Dict(space1="abc"),
lambda: Dict({"space1": "abc"}),
lambda: Tuple(["abc"]),
],
)
def test_bad_space_calls(space_fn):
with pytest.raises(AssertionError):
space_fn()
def test_seed_Dict():
test_space = Dict(
{
"a": Box(low=0, high=1, shape=(3, 3)),
"b": Dict(
{
"b_1": Box(low=-100, high=100, shape=(2,)),
"b_2": Box(low=-1, high=1, shape=(2,)),
}
),
"c": Discrete(5),
}
)
seed_dict = {
"a": 0,
"b": {
"b_1": 1,
"b_2": 2,
},
"c": 3,
}
test_space.seed(seed_dict)
# "Unpack" the dict sub-spaces into individual spaces
a = Box(low=0, high=1, shape=(3, 3))
a.seed(0)
b_1 = Box(low=-100, high=100, shape=(2,))
b_1.seed(1)
b_2 = Box(low=-1, high=1, shape=(2,))
b_2.seed(2)
c = Discrete(5)
c.seed(3)
for i in range(10):
test_s = test_space.sample()
a_s = a.sample()
assert (test_s["a"] == a_s).all()
b_1_s = b_1.sample()
assert (test_s["b"]["b_1"] == b_1_s).all()
b_2_s = b_2.sample()
assert (test_s["b"]["b_2"] == b_2_s).all()
c_s = c.sample()
assert test_s["c"] == c_s
def test_box_dtype_check():
# Related Issues:
# https://github.com/openai/gym/issues/2357
# https://github.com/openai/gym/issues/2298
space = Box(0, 2, tuple(), dtype=np.float32)
# casting will match the correct type
assert space.contains(0.5)
# float64 is not in float32 space
assert not space.contains(np.array(0.5))
assert not space.contains(np.array(1))
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(3, start=-4),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_returns_list(space):
def assert_integer_list(seed):
assert isinstance(seed, list)
assert len(seed) >= 1
assert all([isinstance(s, int) for s in seed])
assert_integer_list(space.seed(None))
assert_integer_list(space.seed(0))
def convert_sample_hashable(sample):
if isinstance(sample, np.ndarray):
return tuple(sample.tolist())
if isinstance(sample, (list, tuple)):
return tuple(convert_sample_hashable(s) for s in sample)
if isinstance(sample, dict):
return tuple(
(key, convert_sample_hashable(value)) for key, value in sample.items()
)
return sample
def sample_equal(sample1, sample2):
return convert_sample_hashable(sample1) == convert_sample_hashable(sample2)
@pytest.mark.parametrize(
"space",
[
Discrete(3),
Discrete(3, start=-4),
Box(low=0.0, high=np.inf, shape=(2, 2)),
Tuple([Discrete(5), Discrete(10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
MultiDiscrete([2, 2, 100]),
MultiBinary(10),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_reproducibility(space):
space1 = space
space2 = copy.deepcopy(space)
space1.seed(None)
space2.seed(None)
assert space1.seed(0) == space2.seed(0)
assert sample_equal(space1.sample(), space2.sample())
@pytest.mark.parametrize(
"space",
[
Tuple([Discrete(100), Discrete(100)]),
Tuple([Discrete(5), Discrete(10)]),
Tuple([Discrete(5), Discrete(5, start=10)]),
Tuple(
[
Discrete(5),
Box(low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32),
]
),
Tuple((Discrete(5), Discrete(2), Discrete(2))),
Dict(
{
"position": Discrete(5),
"velocity": Box(
low=np.array([0, 0]), high=np.array([1, 5]), dtype=np.float32
),
}
),
],
)
def test_seed_subspace_incorrelated(space):
subspaces = space.spaces if isinstance(space, Tuple) else space.spaces.values()
space.seed(0)
states = [
convert_sample_hashable(subspace.np_random.bit_generator.state)
for subspace in subspaces
]
assert len(states) == len(set(states))
def test_multidiscrete_as_tuple():
# 1D multi-discrete
space = MultiDiscrete([3, 4, 5])
assert space.shape == (3,)
assert space[0] == Discrete(3)
assert space[0:1] == MultiDiscrete([3])
assert space[0:2] == MultiDiscrete([3, 4])
assert space[:] == space and space[:] is not space
assert len(space) == 3
# 2D multi-discrete
space = MultiDiscrete([[3, 4, 5], [6, 7, 8]])
assert space.shape == (2, 3)
assert space[0, 1] == Discrete(4)
assert space[0] == MultiDiscrete([3, 4, 5])
assert space[0:1] == MultiDiscrete([[3, 4, 5]])
assert space[0:2, :] == MultiDiscrete([[3, 4, 5], [6, 7, 8]])
assert space[:, 0:1] == MultiDiscrete([[3], [6]])
assert space[0:2, 0:2] == MultiDiscrete([[3, 4], [6, 7]])
assert space[:] == space and space[:] is not space
assert space[:, :] == space and space[:, :] is not space
def test_multidiscrete_subspace_reproducibility():
# 1D multi-discrete
space = MultiDiscrete([100, 200, 300])
space.seed(None)
assert sample_equal(space[0].sample(), space[0].sample())
assert sample_equal(space[0:1].sample(), space[0:1].sample())
assert sample_equal(space[0:2].sample(), space[0:2].sample())
assert sample_equal(space[:].sample(), space[:].sample())
assert sample_equal(space[:].sample(), space.sample())
# 2D multi-discrete
space = MultiDiscrete([[300, 400, 500], [600, 700, 800]])
space.seed(None)
assert sample_equal(space[0, 1].sample(), space[0, 1].sample())
assert sample_equal(space[0].sample(), space[0].sample())
assert sample_equal(space[0:1].sample(), space[0:1].sample())
assert sample_equal(space[0:2, :].sample(), space[0:2, :].sample())
assert sample_equal(space[:, 0:1].sample(), space[:, 0:1].sample())
assert sample_equal(space[0:2, 0:2].sample(), space[0:2, 0:2].sample())
assert sample_equal(space[:].sample(), space[:].sample())
assert sample_equal(space[:, :].sample(), space[:, :].sample())
assert sample_equal(space[:, :].sample(), space.sample())
def test_space_legacy_state_pickling():
legacy_state = {
"shape": (
1,
2,
3,
),
"dtype": np.int64,
"np_random": np.random.default_rng(),
"n": 3,
}
space = Discrete(1)
space.__setstate__(legacy_state)
assert space.shape == legacy_state["shape"]
assert space._shape == legacy_state["shape"]
assert space.np_random == legacy_state["np_random"]
assert space._np_random == legacy_state["np_random"]
assert space.n == 3
assert space.dtype == legacy_state["dtype"]
| 2.28125
| 2
|
tests/integration_tests/tests/agentless_tests/policies/__init__.py
|
yeshess/cloudify-manager
| 0
|
12779258
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from integration_tests.tests.test_cases import BaseTestCase
from integration_tests.framework import riemann
from integration_tests.tests.utils import do_retries
from integration_tests.tests.utils import get_resource as resource
class PoliciesTestsBase(BaseTestCase):
NUM_OF_INITIAL_WORKFLOWS = 2
def tearDown(self):
super(PoliciesTestsBase, self).tearDown()
riemann.reset_data_and_restart()
def launch_deployment(self, yaml_file, expected_num_of_node_instances=1):
deployment, _ = self.deploy_application(resource(yaml_file))
self.deployment = deployment
self.node_instances = self.client.node_instances.list(
deployment_id=deployment.id
)
self.assertEqual(
expected_num_of_node_instances,
len(self.node_instances)
)
self.wait_for_executions(self.NUM_OF_INITIAL_WORKFLOWS,
expect_exact_count=False)
def get_node_instance_by_name(self, name):
for nodeInstance in self.node_instances:
if nodeInstance.node_id == name:
return nodeInstance
def wait_for_executions(self, expected_count, expect_exact_count=True):
def assertion():
executions = self.client.executions.list(
deployment_id=self.deployment.id)
if expect_exact_count:
self.assertEqual(len(executions), expected_count)
else:
self.assertGreaterEqual(len(executions), expected_count)
self.do_assertions(assertion)
def wait_for_invocations(self, deployment_id, expected_count):
def assertion():
invocations = self.get_plugin_data(
plugin_name='testmockoperations',
deployment_id=deployment_id
)['mock_operation_invocation']
self.assertEqual(expected_count, len(invocations))
return invocations
return do_retries(assertion)
def publish(self, metric, ttl=60, node_name='node',
service='service', node_id=''):
if node_id == '':
node_id = self.get_node_instance_by_name(node_name).id
deployment_id = self.deployment.id
self.publish_riemann_event(
deployment_id,
node_name=node_name,
node_id=node_id,
metric=metric,
service='{}.{}.{}.{}'.format(
deployment_id,
service,
node_name,
node_id
),
ttl=ttl
)
| 1.8125
| 2
|
Cours 3 - Language Models/solutions/ngrams.py
|
AntoineSimoulin/m2-data-sciences
| 7
|
12779259
|
def sentence_2_n_grams(sentences, n=3, start_token='<s>', end_token='</s>'):
ngrams = []
for s in sentences:
tokens = [start_token] + s + [end_token]
ngrams += zip(*[tokens[i:] for i in range(n)])
return Counter([" ".join(ngram) for ngram in ngrams])
| 3.28125
| 3
|
src/niweb/apps/noclook/migrations/0009_auto_20190902_0759.py
|
SUNET/ni
| 0
|
12779260
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-09-02 07:59
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
from apps.noclook.models import DEFAULT_ROLEGROUP_NAME, DEFAULT_ROLE_KEY, DEFAULT_ROLES
def init_default_roles(Role):
# and then get or create the default roles and link them
for role_slug, roledict in DEFAULT_ROLES.items():
role = Role.objects.get(slug=role_slug)
if role:
# add a default description and name to the roles
if not role.description and roledict['description']:
role.description = roledict['description']
role.save()
if roledict['name']:
role.name = roledict['name']
role.save()
role.save()
def forwards_func(apps, schema_editor):
Role = apps.get_model("noclook", "Role")
init_default_roles(Role)
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('noclook', '0008_role_squashed_0013_auto_20190725_1153'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| 1.929688
| 2
|
03a_sec-dsrg/DSRG.py
|
lyndonchan/wsss-analysis
| 47
|
12779261
|
import numpy as np
import tensorflow as tf
from lib.crf import crf_inference
from lib.CC_labeling_8 import CC_lab
def single_generate_seed_step(params):
"""Implemented seeded region growing
Parameters
----------
params : 3-tuple of numpy 4D arrays
(tag) : numpy 4D array (size: B x 1 x 1 x C), where B = batch size, C = number of classes
GT label
(cue) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Weak cue
(prob) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Final feature map
Returns
-------
(cue) : numpy 4D array (size: B x H_c x W_c x C), where H_c = cue height, W_c = cue width
Weak cue, after seeded region growing
"""
# th_f,th_b = 0.85,0.99
th_f, th_b = 0.5, 0.7
tag, cue, prob = params
existing_prob = prob * tag
existing_prob_argmax = np.argmax(existing_prob,
axis=2) + 1 # to tell the background pixel and the not-satisfy-condition pixel
tell_where_is_foreground_mask = (existing_prob_argmax > 1).astype(np.uint8)
existing_prob_fg_th_mask = (np.sum((existing_prob[:, :, 1:] > th_f).astype(np.uint8), axis=2) > 0.5).astype(
np.uint8) # if there is one existing category's score is bigger than th_f, the the mask is 1 for this pixel
existing_prob_bg_th_mask = (np.sum((existing_prob[:, :, 0:1] > th_b).astype(np.uint8), axis=2) > 0.5).astype(
np.uint8)
label_map = (existing_prob_fg_th_mask * tell_where_is_foreground_mask + existing_prob_bg_th_mask * (
1 - tell_where_is_foreground_mask)) * existing_prob_argmax
# the label map is a two-dimensional map to show which category satisify the following three conditions for each pixel
# 1. the category is in the tags of the image
# 2. the category has a max probs among the tags
# 3. the prob of the category is bigger that the threshold
# and those three conditions is the similarity criteria
# for the value in label_map, 0 is for no category satisifies the conditions, n is for the category n-1 satisifies the conditions
cls_index = np.where(tag > 0.5)[2] # the existing labels index
for c in cls_index:
mat = (label_map == (c + 1))
mat = mat.astype(int)
cclab = CC_lab(mat)
cclab.connectedComponentLabel() # this divide each connected region into a group, and update the value of cclab.labels which is a two-dimensional list to show the group index of each pixel
high_confidence_set_label = set() # this variable colloects the connected region index
for (x, y), value in np.ndenumerate(mat):
if value == 1 and cue[x, y, c] == 1:
high_confidence_set_label.add(cclab.labels[x][y])
elif value == 1 and np.sum(cue[x, y, :]) == 1:
cclab.labels[x][y] = -1
for (x, y), value in np.ndenumerate(np.array(cclab.labels)):
if value in high_confidence_set_label:
cue[x, y, c] = 1
return np.expand_dims(cue, axis=0)
class DSRG():
"""Class for the DSRG method"""
def __init__(self, config):
self.config = config
self.dataset = self.config.get('dataset')
self.h, self.w = (self.config.get('img_size'), self.config.get('img_size'))
self.num_classes = self.config.get('num_classes')
self.batch_size = self.config.get("batch_size")
self.phase = self.config.get('phase')
self.img_mean = self.config.get('img_mean')
self.seed_size = self.config.get('seed_size')
self.init_model_path = self.config.get('init_model_path', None)
self.crf_config_train = {"g_sxy":3/12,"g_compat":3,"bi_sxy":80/12,"bi_srgb":13,"bi_compat":10,"iterations":5}
self.crf_config_test = {"g_sxy":3,"g_compat":3,"bi_sxy":80,"bi_srgb":13,"bi_compat":10,"iterations":10}
self.net = {}
self.weights = {}
self.trainable_list = []
self.loss = {}
self.metric = {}
self.variables={"total":[]}
self.min_prob = 0.0001
self.stride = {}
self.stride["input"] = 1
# different lr for different variable
self.lr_1_list = []
self.lr_2_list = []
self.lr_10_list = []
self.lr_20_list = []
self.pool = self.config.get('pool')
def build(self,net_input=None,net_label=None,net_cues=None,net_id=None,phase='train'):
"""Build DSRG model
Parameters
----------
net_input : Tensor, optional
Input images in batch, after resizing and normalizing
net_label : Tensor, optional
GT segmentation in batch, after resizing
net_cues : Tensor, optional
Weak cue labels in batch, after resizing
net_id : Tensor, optional
Filenames in batch
phase : str, optional
Phase to run DSRG model
Returns
-------
(output) : Tensor
Final layer of FCN model of DSRG
"""
if "output" not in self.net:
if phase == 'train':
with tf.name_scope("placeholder"):
self.net["input"] = net_input
self.net["label"] = net_label # [None, self.num_classes], int32
self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.5, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
elif phase in ['val', 'tuning', 'segtest', 'test']:
with tf.name_scope("placeholder"):
self.net["input"] = net_input
# self.net["label"] = net_label # [None, self.num_classes], int32
# self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.0, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
elif phase == 'debug':
with tf.name_scope("placeholder"):
self.net["input"] = net_input
self.net["label"] = net_label # [None, self.num_classes], int32
self.net["cues"] = net_cues # [None,41,41,self.num_classes])
self.net["id"] = net_id
self.net["drop_prob"] = tf.Variable(0.0, trainable=False)
self.net["output"] = self.create_network(phase)
self.pred()
self.net["epoch"] = tf.Variable(0.0, trainable=False)
return self.net["output"]
def create_network(self, phase):
"""Helper function to build DSRG model
Parameters
----------
phase : str, optional
Phase to run DSRG model
Returns
-------
(crf) : Tensor
Final layer of FCN model of DSRG
"""
if self.init_model_path is not None:
self.load_init_model()
with tf.name_scope("vgg") as scope:
# build block
block = self.build_block("input",["conv1_1","relu1_1","conv1_2","relu1_2","pool1"])
block = self.build_block(block,["conv2_1","relu2_1","conv2_2","relu2_2","pool2"])
block = self.build_block(block,["conv3_1","relu3_1","conv3_2","relu3_2","conv3_3","relu3_3","pool3"])
block = self.build_block(block,["conv4_1","relu4_1","conv4_2","relu4_2","conv4_3","relu4_3","pool4"])
block = self.build_block(block,["conv5_1","relu5_1","conv5_2","relu5_2","conv5_3","relu5_3","pool5","pool5a"])
fc1 = self.build_fc(block,["fc6_1","relu6_1","drop6_1","fc7_1","relu7_1","drop7_1","fc8_1"], dilate_rate=6)
fc2 = self.build_fc(block,["fc6_2","relu6_2","drop6_2","fc7_2","relu7_2","drop7_2","fc8_2"], dilate_rate=12)
fc3 = self.build_fc(block,["fc6_3","relu6_3","drop6_3","fc7_3","relu7_3","drop7_3","fc8_3"], dilate_rate=18)
fc4 = self.build_fc(block,["fc6_4","relu6_4","drop6_4","fc7_4","relu7_4","drop7_4","fc8_4"], dilate_rate=24)
self.net["fc8"] = self.net[fc1]+self.net[fc2]+self.net[fc3]+self.net[fc4]
# DSRG
softmax = self.build_sp_softmax("fc8","fc8-softmax")
if phase in ['train', 'debug']:
new_seed = self.build_dsrg_layer("cues","fc8-softmax","new_cues")
crf = self.build_crf("fc8-softmax", "crf") # new
return self.net[crf] # NOTE: crf is log-probability
def build_block(self,last_layer,layer_lists):
"""Build a block of the DSRG model
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer_lists : list of str
List of strings of layer names to build inside the current block
Returns
-------
last_layer : Tensor
The output layer of the current block
"""
for layer in layer_lists:
if layer.startswith("conv"):
if layer[4] != "5":
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
weights,bias = self.get_weights_and_bias(layer)
self.net[layer] = tf.nn.conv2d( self.net[last_layer], weights, strides = [1,1,1,1], padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer[4] == "5":
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
weights,bias = self.get_weights_and_bias(layer)
self.net[layer] = tf.nn.atrous_conv2d( self.net[last_layer], weights, rate=2, padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer.startswith("relu"):
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.relu( self.net[last_layer],name="relu")
last_layer = layer
elif layer.startswith("pool5a"):
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.avg_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,1,1,1],padding="SAME",name="pool")
last_layer = layer
elif layer.startswith("pool"):
if layer[4] not in ["4","5"]:
with tf.name_scope(layer) as scope:
self.stride[layer] = 2 * self.stride[last_layer]
self.net[layer] = tf.nn.max_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,2,2,1],padding="SAME",name="pool")
last_layer = layer
if layer[4] in ["4","5"]:
with tf.name_scope(layer) as scope:
self.stride[layer] = self.stride[last_layer]
self.net[layer] = tf.nn.max_pool( self.net[last_layer], ksize=[1,3,3,1], strides=[1,1,1,1],padding="SAME",name="pool")
last_layer = layer
return last_layer
def build_fc(self,last_layer, layer_lists, dilate_rate=12):
"""Build a block of fully-connected layers
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer_lists : list of str
List of strings of layer names to build inside the current block
dilate_rate : int, optional
Dilation rate for atrous 2D convolutional layers
Returns
-------
last_layer : Tensor
The output layer of the current block
"""
for layer in layer_lists:
if layer.startswith("fc"):
with tf.name_scope(layer) as scope:
weights,bias = self.get_weights_and_bias(layer)
if layer.startswith("fc6"):
self.net[layer] = tf.nn.atrous_conv2d( self.net[last_layer], weights, rate=dilate_rate, padding="SAME", name="conv")
else:
self.net[layer] = tf.nn.conv2d( self.net[last_layer], weights, strides = [1,1,1,1], padding="SAME", name="conv")
self.net[layer] = tf.nn.bias_add( self.net[layer], bias, name="bias")
last_layer = layer
if layer.startswith("batch_norm"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.contrib.layers.batch_norm(self.net[last_layer])
last_layer = layer
if layer.startswith("relu"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.nn.relu( self.net[last_layer])
last_layer = layer
if layer.startswith("drop"):
with tf.name_scope(layer) as scope:
self.net[layer] = tf.nn.dropout( self.net[last_layer], keep_prob=1-self.net["drop_prob"])
last_layer = layer
return last_layer
def build_sp_softmax(self,last_layer,layer):
"""Build a block of a fully-connected layer and softmax
Parameters
----------
last_layer : Tensor
The most recent layer used to build the DSRG model
layer : str
Name of the softmax output layer
Returns
-------
layer : str
Name of the softmax output layer
"""
preds_max = tf.reduce_max(self.net[last_layer],axis=3,keepdims=True)
preds_exp = tf.exp(self.net[last_layer] - preds_max)
self.net[layer] = preds_exp / tf.reduce_sum(preds_exp,axis=3,keepdims=True) + self.min_prob
self.net[layer] = self.net[layer] / tf.reduce_sum(self.net[layer],axis=3,keepdims=True)
return layer
def build_crf(self,featmap_layer,layer):
"""Build a custom dense CRF layer
Parameters
----------
featemap_layer : str
Layer name of the feature map inputted to dense CRF layer
layer : str
Layer name of the dense CRF layer
Returns
-------
layer : str
Layer name of the dense CRF layer
"""
origin_image = self.net["input"] + self.img_mean
origin_image_zoomed = tf.image.resize_bilinear(origin_image,(self.seed_size, self.seed_size))
featemap = self.net[featmap_layer]
featemap_zoomed = tf.image.resize_bilinear(featemap,(self.seed_size, self.seed_size))
def crf(featemap,image):
batch_size = featemap.shape[0]
image = image.astype(np.uint8)
ret = np.zeros(featemap.shape,dtype=np.float32)
for i in range(batch_size):
ret[i,:,:,:] = crf_inference(image[i],self.crf_config_train,self.num_classes,featemap[i],use_log=True)
ret[ret < self.min_prob] = self.min_prob
ret /= np.sum(ret,axis=3,keepdims=True)
ret = np.log(ret)
return ret.astype(np.float32)
crf = tf.py_func(crf,[featemap_zoomed,origin_image_zoomed],tf.float32) # shape [N, h, w, C], RGB or BGR doesn't matter
self.net[layer] = crf
return layer
def build_dsrg_layer(self,seed_layer,prob_layer,layer):
"""Build DSRG layer
Parameters
----------
seed_layer : str
Layer name of the weak cues
prob_layer : str
Layer name of softmax
layer : str
Layer name of the DSRG layer
Returns
-------
layer : str
Layer name of the DSRG layer
"""
def generate_seed_step(tags,cues,probs):
tags = np.reshape(tags,[-1,1,1,self.num_classes])
params_list = []
for i in range(self.batch_size):
params_list.append([tags[i],cues[i],probs[i]])
ret = self.pool.map(single_generate_seed_step, params_list)
new_cues = ret[0]
for i in range(1,self.batch_size):
new_cues = np.concatenate([new_cues,ret[i]],axis=0)
return new_cues
self.net[layer] = tf.py_func(generate_seed_step,[self.net["label"],self.net[seed_layer],self.net[prob_layer]],tf.float32)
return layer
def load_init_model(self):
"""Load initialized layer"""
model_path = self.config["init_model_path"]
self.init_model = np.load(model_path, encoding="latin1", allow_pickle=True).item()
def get_weights_and_bias(self,layer,shape=None):
"""Load saved weights and biases for saved network
Parameters
----------
layer : str
Name of current layer
shape : list of int (size: 4), optional
4D shape of the convolutional or fully-connected layer
Returns
-------
weights : Variable
Saved weights
bias : Variable
Saved biases
"""
if layer in self.weights:
return self.weights[layer]
if shape is not None:
pass
elif layer.startswith("conv"):
shape = [3,3,0,0]
if layer == "conv1_1":
shape[2] = 3
else:
shape[2] = 64 * self.stride[layer]
if shape[2] > 512: shape[2] = 512
if layer in ["conv2_1","conv3_1","conv4_1"]: shape[2] = int(shape[2]/2)
shape[3] = 64 * self.stride[layer]
if shape[3] > 512: shape[3] = 512
elif layer.startswith("fc"):
if layer.startswith("fc6"):
shape = [3,3,512,1024]
if layer.startswith("fc7"):
shape = [1,1,1024,1024]
if layer.startswith("fc8"):
shape = [1,1,1024,self.num_classes]
if self.init_model_path is None:
init = tf.random_normal_initializer(stddev=0.01)
weights = tf.get_variable(name="%s_weights" % layer,initializer=init, shape = shape)
init = tf.constant_initializer(0)
bias = tf.get_variable(name="%s_bias" % layer,initializer=init, shape = [shape[-1]])
else:
if layer.startswith("fc8"):
init = tf.contrib.layers.xavier_initializer(uniform=True)
else:
init = tf.constant_initializer(self.init_model[layer]["w"])
weights = tf.get_variable(name="%s_weights" % layer,initializer=init,shape = shape)
if layer.startswith("fc8"):
init = tf.constant_initializer(0)
else:
init = tf.constant_initializer(self.init_model[layer]["b"])
bias = tf.get_variable(name="%s_bias" % layer,initializer=init,shape = [shape[-1]])
self.weights[layer] = (weights,bias)
if layer.startswith("fc8"):
self.lr_10_list.append(weights)
self.lr_20_list.append(bias)
else:
self.lr_1_list.append(weights)
self.lr_2_list.append(bias)
self.trainable_list.append(weights)
self.trainable_list.append(bias)
self.variables["total"].append(weights)
self.variables["total"].append(bias)
return weights,bias
def pred(self):
"""Implement final segmentation prediction as argmax of final feature map"""
if self.h is not None:
self.net["rescale_output"] = tf.image.resize_bilinear(self.net["output"], (self.h, self.w))
else:
label_size = tf.py_func(lambda x: x.shape[1:3], [self.net["input"]], [tf.int64, tf.int64])
self.net["rescale_output"] = tf.image.resize_bilinear(self.net["output"], [tf.cast(label_size[0], tf.int32),
tf.cast(label_size[1],
tf.int32)])
self.net["pred"] = tf.argmax(self.net["rescale_output"], axis=3)
def getloss(self):
"""Construct overall loss function
Returns
-------
loss : Tensor
Output of overall loss function
"""
loss = 0
# for DSRG
seed_loss = self.get_balanced_seed_loss(self.net["fc8-softmax"],self.net["new_cues"])
constrain_loss = self.get_constrain_loss(self.net["fc8-softmax"],self.net["crf"])
self.loss["seed"] = seed_loss
self.loss["constrain"] = constrain_loss
loss += seed_loss + constrain_loss
return loss
def get_balanced_seed_loss(self,softmax,cues):
"""Balanced seeding loss function
Parameters
----------
softmax : Tensor
Final feature map
cues : Tensor
Weak cues
Returns
-------
(loss) : Tensor
Output of balanced seeding loss function (sum of foreground/background losses)
"""
count_bg = tf.reduce_sum(cues[:,:,:,0:1],axis=(1,2,3),keepdims=True)
loss_bg = -tf.reduce_mean(tf.reduce_sum(cues[:,:,:,0:1]*tf.log(softmax[:,:,:,0:1]),axis=(1,2,3),keepdims=True)/(count_bg+1e-8))
count_fg = tf.reduce_sum(cues[:,:,:,1:],axis=(1,2,3),keepdims=True)
loss_fg = -tf.reduce_mean(tf.reduce_sum(cues[:,:,:,1:]*tf.log(softmax[:,:,:,1:]),axis=(1,2,3),keepdims=True)/(count_fg+1e-8))
return loss_bg+loss_fg
def get_constrain_loss(self,softmax,crf):
"""Constrain loss function
Parameters
----------
softmax : Tensor
Final feature map
crf : Tensor
Output of dense CRF
Returns
-------
loss : Tensor
Output of constrain loss function
"""
probs_smooth = tf.exp(crf)
loss = tf.reduce_mean(tf.reduce_sum(probs_smooth * tf.log(probs_smooth/(softmax+1e-8)+1e-8), axis=3))
return loss
| 2.375
| 2
|
asv_oggm_plugin.py
|
skachuck/oggm
| 156
|
12779262
|
<reponame>skachuck/oggm
import subprocess
import requests
import tempfile
import os
import logging
from asv.plugins.conda import _find_conda, Conda
from asv.console import log
from asv import util
logging.getLogger("requests").setLevel(logging.WARNING)
OGGM_CONDA_ENV_URL = ("https://raw.githubusercontent.com/OGGM/"
"OGGM-dependency-list/master/Linux-64/{0}")
OGGM_CONDA_ENVS = {
"36": "oggmdev-1.2.0.202002022248_20200202_py36.yml",
"37": "oggmdev-1.2.0.202002022248_20200202_py37.yml",
}
class OggmVirtualenv(Conda):
tool_name = "oggm_conda"
def _setup(self):
log.info("Creating oggm conda environment for {0}".format(self.name))
env_file = tempfile.NamedTemporaryFile(mode="w", delete=False,
suffix=".yml")
try:
pyver = str(self._python).replace(".", "")[:2]
oggm_env = OGGM_CONDA_ENVS[pyver]
req = requests.get(OGGM_CONDA_ENV_URL.format(oggm_env))
req.raise_for_status()
for line in req.text.splitlines():
if line.startswith("prefix:"):
continue
elif line.startswith("name:"):
env_file.write("name: {0}\n".format(self.name))
else:
env_file.write(line + "\n")
env_file.close()
self._conda_channels = ["conda-forge", "defaults"]
self._conda_environment_file = env_file.name
return super()._setup()
except Exception as exc:
if os.path.isfile(env_file.name):
with open(env_file.name, "r") as f:
text = f.read()
log.info("oggm conda env create failed: in {} with:\n{}"
.format(self._path, text))
raise
finally:
os.unlink(env_file.name)
| 2.3125
| 2
|
src/fleets/water_heater_fleet/load_config.py
|
GMLC-1-4-2/battery_interface
| 1
|
12779263
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 13:46:46 2019
Description:
Last update:
Version: 1.0
Author: <NAME> (NREL)
"""
import pandas as pd
class LoadConfig(object):
def __init__(self, config_file):
self.config_file = config_file
def str_to_bool(self, s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError('Insert boolean values in the config file')
def get_config_models(self):
df = pd.DataFrame()
df['MaxAnnualConditions'] = self.config_file.get('Water Heater Models', 'MaxAnnualConditions', fallback = None).split(',')
#df['TtankInitial'] = self.config_file.get('Water Heater Models', 'TtankInitial', fallback = None).split(',')
#df['TsetInitial'] = list(map(float, self.config_file.get('Water Heater Models', 'TsetInitial', fallback = None).split(',')))
#df['Capacity'] = list(map(int, self.config_file.get('Water Heater Models', 'Capacity', fallback = None).split(',')))
#df['Type'] = list(map(float, self.config_file.get('Water Heater Models', 'Type', fallback = None).split(',')))
#df['Location'] = list(map(float, self.config_file.get('Water Heater Models', 'Location', fallback = None).split(',')))
#df['MaxServiceCalls'] = list(map(float, self.config_file.get('Water Heater Models', 'MaxServiceCalls', fallback = None).split(',')))
return df
def get_n_subfleets(self):
return int(self.config_file.get('Water Heater Fleet', 'NumberSubfleets', fallback = 100))
def get_run_baseline(self):
return self.str_to_bool(self.config_file.get('Water Heater Fleet', 'RunBaseline', fallback = False))
def get_n_days_MC(self):
return int(self.config_file.get('Water Heater Fleet', 'NumberDaysBase', fallback = 10))
def get_fleet_config(self):
is_p_priority = self.str_to_bool(self.config_file.get('Fleet Configuration', 'Is_P_Priority', fallback = True))
is_aut = self.str_to_bool(self.config_file.get('Fleet Configuration', 'IsAutonomous', fallback = False))
return [is_p_priority, is_aut]
def get_FW(self):
fw_enabled = list()
fw_enabled.append(self.str_to_bool(self.config_file.get('FW', 'FW21_Enabled', fallback = True)))
# Discrete version of the response to frequency deviations (artificial inertia service)
fw_enabled.append(list(map(float, (self.config_file.get('FW', 'db_UF', fallback = None).split(',')))))
fw_enabled.append(list(map(float, (self.config_file.get('FW', 'db_OF', fallback = None).split(',')))))
# TODO: These parameters must be removed in future realeases of the API
fw_enabled.append(float(self.config_file.get('FW', 'k_UF', fallback = 0.05)))
fw_enabled.append(float(self.config_file.get('FW', 'k_UF', fallback = 0.05)))
fw_enabled.append(float(self.config_file.get('FW', 'P_avl', fallback = 1.0)))
fw_enabled.append(float(self.config_file.get('FW', 'P_min', fallback = 0.0)))
fw_enabled.append(float(self.config_file.get('FW', 'P_pre', fallback = 1.0)))
return fw_enabled
def get_impact_metrics_params(self):
metrics = list()
# Aveage tank baseline
metrics.append(float(self.config_file.get('Impact Metrics', 'ave_Tin_base', fallback = 123)))
# Aveage tank temperature under grid service
metrics.append(float(self.config_file.get('Impact Metrics', 'ave_Tin_grid', fallback = 123)))
# Cylces in baseline
metrics.append(float(self.config_file.get('Impact Metrics', 'cycle_basee', fallback = 100)))
# Cylces in grid operation
metrics.append(float(self.config_file.get('Impact Metrics', 'cycle_grid', fallback = 100)))
# State of Charge of the battery equivalent model under baseline
metrics.append(float(self.config_file.get('Impact Metrics', 'SOCb_metric', fallback = 100)))
# State of Charge of the battery equivalent model
metrics.append(float(self.config_file.get('Impact Metrics', 'SOC_metric', fallback = 1.0)))
# Unmet hours of the fleet
metrics.append(float(self.config_file.get('Impact Metrics', 'unmet_hours', fallback = 1.0)))
return metrics
def get_service_weight(self):
return float(self.config_file.get('Service Weighting Factor', 'ServiceWeight', fallback=0.5))
| 2.84375
| 3
|
test/utils_fetch_pytest.py
|
LaraFerCue/jail_manager
| 1
|
12779264
|
import os
import shutil
from pathlib import PosixPath
from tempfile import TemporaryDirectory, mkdtemp
from urllib.error import URLError
import pytest
from jmanager.models.distribution import Architecture, Version, VersionType, Component
from jmanager.utils.fetch import HTTPFetcher
from test.globals import TEST_DISTRIBUTION
TEMPORARY_RELEASE_FTP_DIR = "releases/amd64/12.0-RELEASE"
TEMPORARY_SNAPSHOT_FTP_DIR = "snapshots/amd64/12.0-STABLE"
class MockingFetcher(HTTPFetcher):
FTP_BASE_DIRECTORY = PosixPath()
def __init__(self):
self.tmp_dir = mkdtemp()
self.SERVER_URL = f"file://{self.tmp_dir}"
def __enter__(self):
for folder in [TEMPORARY_RELEASE_FTP_DIR, TEMPORARY_SNAPSHOT_FTP_DIR]:
temporary_folder = f"{self.tmp_dir}/{folder}"
os.makedirs(temporary_folder)
with open(f"{temporary_folder}/base.txz", "w") as base_file:
base_file.write("base.txz")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
class TestFetchUtils:
class ErrorToBeRaised(BaseException):
pass
def test_fetch_base_tarball(self):
with MockingFetcher() as http_fetcher:
with TemporaryDirectory() as temp_dir:
temp_dir_path = PosixPath(temp_dir)
http_fetcher.fetch_tarballs_into(
version=TEST_DISTRIBUTION.version,
architecture=TEST_DISTRIBUTION.architecture,
components=TEST_DISTRIBUTION.components,
temp_dir=temp_dir_path)
assert temp_dir_path.joinpath('base.txz').is_file()
def test_fetch_tarballs_invalid_version(self):
distribution_version = Version(major=10, minor=6, version_type=VersionType.RELEASE)
with MockingFetcher() as http_fetcher:
with pytest.raises(URLError, match=r'\[Errno 2\] No such file or directory: '):
http_fetcher.fetch_tarballs_into(
version=distribution_version,
architecture=Architecture.AMD64,
components=[Component.BASE],
temp_dir=PosixPath('/tmp'))
def test_fetch_tarballs_from_snapshots(self):
distribution_version = Version(major=12, minor=0, version_type=VersionType.STABLE)
with MockingFetcher() as http_fetcher:
with TemporaryDirectory() as temp_dir:
temp_dir_path = PosixPath(temp_dir)
http_fetcher.fetch_tarballs_into(
version=distribution_version,
architecture=Architecture.AMD64,
components=[Component.BASE],
temp_dir=temp_dir_path)
assert temp_dir_path.joinpath('base.txz').is_file()
def test_fetch_with_callback_function(self):
def callback_function(text_to_show: str, received_bytes: int, total_bytes: int, time_elapsed: float):
assert isinstance(text_to_show, str)
assert isinstance(received_bytes, int)
assert isinstance(total_bytes, int)
assert isinstance(time_elapsed, float)
raise TestFetchUtils.ErrorToBeRaised(f"test message")
distribution_version = Version(major=12, minor=0, version_type=VersionType.STABLE)
with pytest.raises(TestFetchUtils.ErrorToBeRaised):
with MockingFetcher() as http_fetcher:
http_fetcher.fetch_tarballs_into(
version=distribution_version,
architecture=Architecture.AMD64,
components=[Component.BASE],
temp_dir=PosixPath('/tmp'),
callback=callback_function)
| 2.203125
| 2
|
Source/GUI_1/SimulatorGUI.py
|
pranavjain110/CarHealthMonitor
| 1
|
12779265
|
<gh_stars>1-10
from tkinter import *
from tkinter import ttk
import sender
window = Tk()
window.iconbitmap(r'carsensors.ico')
window.title("Car Health Monitor")
window.minsize(800, 600)
window.maxsize(800, 600)
window.configure(bg='black')
temp = 40
press = 35
oil_time_days = 0
tire_dist_kms = 0
mailID = StringVar()
email = "<EMAIL>"
# setting up the font size and style
fontstyle = "Helvetica"
small_fsize = 13
large_fsize = 15
combostyle = ttk.Style()
combostyle.theme_create('combostyle', parent='alt',
settings={'TCombobox':
{'configure':
{'selectbackground': 'black',
'foreground': 'white',
'fieldbackground': 'black',
'background': 'dark grey',
'arrowcolor': 'black',
'bordercolor': 'white'
}}})
combostyle.theme_use('combostyle')
# Initializing an object of sender
rabbit_mq = sender.RabbitMq(queue='Hello',
host='localhost',
exchange='',
routing_key='Hello')
# x = 0
sensor1Data = []
sensor2Data = []
# Main titles
lbl_Title = Label(window, text="\n Sensor Data \n", bg="black",
fg="cyan3", font=(large_fsize, large_fsize))
lbl_Temp = Label(window, text="Engine Temperature (°C)", font=(
fontstyle, small_fsize), bg="black", fg="white")
lbl_Press = Label(window, text="Tire Pressure (psi)", font=(
fontstyle, small_fsize), bg="black", fg="white")
lbl_blank2 = Label(window, text=" ", bg="black")
lbl_cat1 = Label(window, text="Vehicle Type", font=(
fontstyle, small_fsize), bg="black", fg="white")
lbl_blank3 = Label(window, text=" ", bg="black")
lbl_cat2 = Label(window, text="Driving Weather Condition",
font=(fontstyle, small_fsize), bg="black", fg="white")
lbl_blank4 = Label(window, text=" ", bg="black")
lbl_heading = Label(window, text="\nTime elapsed after last replacement\n ",
bg="black", fg="cyan3",
font=(fontstyle, large_fsize))
lbl_blank5 = Label(window, text=" ", bg="black")
lbl_oilTime = Label(window, text="For Oil (in days)", font=(
fontstyle, small_fsize), bg="black", fg="white")
lbl_blank3 = Label(window, text=" ", bg="black")
lbl_tireDist = Label(window, text="For Tire (in kms)", font=(
fontstyle, small_fsize), bg="black", fg="white")
lbl_blank6 = Label(window, text=" \n ", bg="black")
lbl_blank7 = Label(window, text=" \n ", bg="black")
email_input = Entry(window, textvariable=mailID, bg="dark grey",
font=(fontstyle, large_fsize), fg="black")
lbl_blank8 = Label(window, text=" \n ", bg="black")
lbl_email = Label(window, text="Please enter email id to receive notifications",
font=(fontstyle, small_fsize),
bg="black", fg="white")
# Number labels
count_Temp = Label(window, text=temp, bg="black",
fg="white", font=(fontstyle, small_fsize))
count_Press = Label(window, text=press, bg="black",
fg="white", font=(fontstyle, small_fsize))
count_oil_time = Label(window, text=oil_time_days,
bg="black", fg="white", font=(fontstyle, small_fsize))
count_tire_dist = Label(window, text=tire_dist_kms,
bg="black", fg="white", font=(fontstyle, small_fsize))
makes = ['Car', 'Truck']
make_select = ttk.Combobox(window, values=makes, width=0)
weather_condition = ['Summer', 'Winter']
weather_select = ttk.Combobox(window, values=weather_condition, width=0)
# Placing Labels using grid function
# Text Labels
lbl_Title.grid(row=0, column=1, sticky=N + S + E + W, columnspan=3)
lbl_Temp.grid(row=4, column=0, sticky=N + S + E + W)
lbl_Press.grid(row=6, column=0, sticky=N + S + E + W)
lbl_blank2.grid(row=7, column=0, sticky=N + S + E + W)
lbl_cat1.grid(row=8, column=0, sticky=N + S + E + W)
lbl_blank3.grid(row=9, column=0, sticky=N + S + E + W)
lbl_cat2.grid(row=10, column=0, sticky=N + S + E + W)
lbl_blank4.grid(row=11, column=0, sticky=N + S + E + W)
lbl_heading.grid(row=12, column=1, sticky=N + S + E + W, columnspan=3)
lbl_blank5.grid(row=13, column=0, sticky=N + S + E + W)
lbl_oilTime.grid(row=14, column=0, sticky=N + S + E + W)
lbl_blank6.grid(row=15, column=4, sticky=N + S + E + W)
lbl_tireDist.grid(row=16, column=0, sticky=N + S + E + W)
lbl_blank7.grid(row=17, column=4, sticky=N + S + E + W)
email_input.grid(row=18, column=2, sticky=N + S + E + W)
lbl_email.grid(row=18, column=0, sticky=N + S + E + W, columnspan=1)
lbl_blank8.grid(row=19, column=4, sticky=N + S + E + W)
# Number Labels
count_Temp.grid(row=4, column=2, sticky=N + S + E + W)
count_Press.grid(row=6, column=2, sticky=N + S + E + W)
make_select.grid(row=8, column=2, sticky=N + S + E + W)
weather_select.grid(row=10, column=2, sticky=N + S + E + W)
count_oil_time.grid(row=14, column=2, sticky=N + S + E + W)
count_tire_dist.grid(row=16, column=2, sticky=N + S + E + W)
# Reading image files
# image file used for + and - button
img_increase = PhotoImage(file='increase.png')
img_decrease = PhotoImage(file='decrease.png')
def increase_temp():
"""Function to increase temperature variable
"""
global temp
temp = temp + 10
count_Temp.configure(text=temp)
def decrease_temp():
"""Function to decrease temperature variable
"""
global temp
if temp > 0:
temp = temp - 10
count_Temp.configure(text=temp)
def increase_press():
"""Function to increase pressure variable
"""
global press
if press < 40:
press = press + 1
count_Press.configure(text=press)
def decrease_press():
"""Function to decrease pressure variable
"""
global press
if press > 0:
press = press - 1
count_Press.configure(text=press)
def increase_oilTime():
"""Function to increase oil variable
Oil time represents time elapsed since
oil was replaced.
"""
global oil_time_days
oil_time_days = oil_time_days + 5
count_oil_time.configure(text=oil_time_days)
def decrease_oilTime():
"""Function to decrease oilTime variable
Oil time represents time elapsed since
oil was replaced.
"""
global oil_time_days
# Condition to limit the minimum value of the variable to 0
if oil_time_days > 0:
oil_time_days = oil_time_days - 5
count_oil_time.configure(text=oil_time_days)
def increase_tireDist():
"""Function to increase tire Distance variable
Tire distance represents distance car has moved
since tire was changed.
"""
global tire_dist_kms
tire_dist_kms = tire_dist_kms + 5000
count_tire_dist.configure(text=tire_dist_kms)
def decrease_tireDist():
"""Function to decrease tire Distance variable
Tire distance represents distance car has moved
since tire was changed.
"""
global tire_dist_kms
# Condition to limit the minimum value of the variable to 0
if tire_dist_kms > 0:
tire_dist_kms = tire_dist_kms - 5000
count_tire_dist.configure(text=tire_dist_kms)
def transmit():
"""Function to set the value of email to
the one entered by the user in the text box
"""
global mailID
global email
email = mailID.get()
# Button to increment and decrement the parameters
btn_decTemp = Button(window, image=img_decrease, command=decrease_temp,
bg="black", borderwidth=0, activebackground="black")
btn_incTemp = Button(window, image=img_increase, command=increase_temp,
bg="black", borderwidth=0, activebackground="black")
btn_incPress = Button(window, image=img_decrease, command=decrease_press,
bg="black", borderwidth=0, activebackground="black")
btn_decPress = Button(window, image=img_increase, command=increase_press,
bg="black", borderwidth=0, activebackground="black")
btn_decOilTime = Button(window, image=img_decrease, command=decrease_oilTime,
bg="black", borderwidth=0,
activebackground="black")
btn_incOilTime = Button(window, image=img_increase, command=increase_oilTime,
bg="black", borderwidth=0,
activebackground="black")
btn_decTireDist = Button(window, image=img_decrease, command=decrease_tireDist,
bg="black", borderwidth=0,
activebackground="black")
btn_incTireDist = Button(window, image=img_increase, command=increase_tireDist,
bg="black", borderwidth=0,
activebackground="black")
btn_mailID = Button(window, text="Enter", command=transmit,
bg="black", fg="cyan3", height=2, width=4)
# Placing buttons using grid function
btn_decTemp.grid(row=4, column=1, sticky=N + S + E + W)
btn_incTemp.grid(row=4, column=3, sticky=N + S + E + W)
btn_incPress.grid(row=6, column=1, sticky=N + S + E + W)
btn_decPress.grid(row=6, column=3, sticky=N + S + E + W)
btn_decOilTime.grid(row=14, column=1, sticky=N + S + E + W)
btn_incOilTime.grid(row=14, column=3, sticky=N + S + E + W)
btn_decTireDist.grid(row=16, column=1, sticky=N + S + E + W)
btn_incTireDist.grid(row=16, column=3, sticky=N + S + E + W)
btn_mailID.grid(row=18, column=3, sticky=N + S + E + W)
window.grid_columnconfigure(0, weight=1)
window.grid_columnconfigure(1, weight=1)
window.grid_columnconfigure(2, weight=1)
window.grid_columnconfigure(3, weight=1)
window.grid_rowconfigure(0, weight=4)
window.grid_rowconfigure(5, weight=4)
def timer():
# global x
# if x < 1100:
# call this function again in 1,000 milliseconds
window.after(1000, timer)
print("Updating ...")
sensor1Data.append(temp)
sensor2Data.append(press)
category1 = weather_select.get()
category2 = make_select.get()
# store 10 reading in a list and then publish at once
if len(sensor1Data) == 10:
rabbit_mq.publish(payload={"sensor1Data": sensor1Data,
"sensor2Data": sensor2Data,
"oilTime_hrs": oil_time_days * 24,
"tire_dist_kms": tire_dist_kms,
"email": email,
"category1": category1,
"category2": category2})
sensor2Data.clear()
sensor1Data.clear()
# Set X=0 to run timer function in an infinite loop
# if x > 1000:
# x = 0
# x += 1
timer()
window.mainloop()
| 2.828125
| 3
|
code/kuantum-komputers/randomp python shit.py
|
SoftwareCornwall/m2m-teams-july-2019
| 0
|
12779266
|
<gh_stars>0
users = ["will", "ok", "roberto", "nou"]
LoggedIn=False
while not LoggedIn:
user = input("Username>> ")
for i in range(len(users)-1):
if user == users[i]:
password = input("Password>> ")
if password == users[i+1]:
print("Correct login details")
LoggedIn=True
else:
print("User details wrong")
| 3.796875
| 4
|
python/132_Palindrome_Partition_II.py
|
liaison/LeetCode
| 17
|
12779267
|
<filename>python/132_Palindrome_Partition_II.py
class Solution:
def minCut(self, s: str) -> int:
# element indicates the minimal number of partitions
# we need to divide the corresponding substring
dp = [0] * (len(s)+1)
for right in range(1, len(s)+1):
num_parts = float('inf')
for left in range(0, right):
substr = s[left:right]
# isPalindrome(s[left:right])
if substr == substr[::-1]:
num_parts = min(num_parts, dp[left]+1)
if left == 0:
# cannot have less than one partition
break
dp[right] = num_parts
return dp[len(s)] - 1
| 3.390625
| 3
|
model_trainer/tf_logger.py
|
NeverendingNotification/nnlibs
| 0
|
12779268
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 30 12:07:19 2018
@author: nn
"""
from collections import OrderedDict
import os
import cv2
from . import tf_metrics
def get_logger(arc_type, logger_params):
if arc_type == "sl":
logger = SlLogger(**logger_params)
elif arc_type == "ae":
logger = AELogger(**logger_params)
elif arc_type == "gan":
logger = GanLogger(**logger_params)
return logger
class BaseLogger:
def __init__(self, log_dir=None, out_root=None, metrics={}, metric_period=1,
sample_dirname="sample"):
if out_root is not None:
log_dir = os.path.join(out_root, log_dir)
if log_dir is not None:
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
self.log_dir = log_dir
self.metrics = metrics
self.sample = sample_dirname
def start_epoch(self, trainer, loader, epoch):
self.losses = OrderedDict()
def log_batch(self, batch, loss_keys=["loss"]):
for key in loss_keys:
if key not in batch:
continue
if key not in self.losses:
self.losses[key] = 0.0
self.losses[key] += batch[key]
def end_epoch(self, trainer, loader, epoch):
raise NotImplementedError()
def get_loss_str(self):
key = ", ".join(["{} : {:.04f}".format(k, v) for k, v in self.losses.items()])
return key
def log_end(self, trainer, loader):
pass
class SlLogger(BaseLogger):
def end_epoch(self, trainer, loader, epoch):
out = tf_metrics.get_metrics_classifier(loader, trainer,
metrics=self.metrics)
loss_key = self.get_loss_str()
key = ", ".join(["{} : {}".format(metric, out[metric]) for metric in self.metrics])
print("Epoch : {}, {}, {}".format(epoch, loss_key, key))
class AELogger(BaseLogger):
def end_epoch(self, trainer, loader, epoch):
out, images = tf_metrics.get_metrics_generator(loader, trainer,
metrics=self.metrics)
o_dir = os.path.join(self.log_dir, self.sample)
if not os.path.isdir(o_dir):
os.makedirs(o_dir)
for i, image in enumerate(images):
cv2.imwrite(os.path.join(o_dir, "{:05d}_{:04d}.png".format(epoch, i)), image)
loss_key = self.get_loss_str()
key = ", ".join(["{} : {}".format(metric, out[metric]) for metric in self.metrics])
print("Epoch : {}, {}, {}".format(epoch, loss_key, key))
class GanLogger(BaseLogger):
def end_epoch(self, trainer, loader, epoch):
out, images = tf_metrics.get_metrics_generator(loader, trainer,
metrics=self.metrics)
o_dir = os.path.join(self.log_dir, self.sample)
if not os.path.isdir(o_dir):
os.makedirs(o_dir)
for i, image in enumerate(images):
cv2.imwrite(os.path.join(o_dir, "{:05d}_{:04d}.png".format(epoch, i)), image)
loss_key = self.get_loss_str()
key = ", ".join(["{} : {}".format(metric, out[metric]) for metric in self.metrics])
print("Epoch : {}, {}, {}".format(epoch, loss_key, key))
| 2.03125
| 2
|
utils/fortran_utils.py
|
IAEA-NDS/FENDL-Code
| 1
|
12779269
|
<gh_stars>1-10
from fortranformat import FortranRecordReader, FortranRecordWriter
from .generic_utils import flatten, static_vars
@static_vars(frr_cache={})
def fort_read(fobj, formatstr, none_as=None, varnames=None, debug=False):
"""Read from a file or string using a format descriptor
Keyword arguments:
fobj -- file object or string to read from
formatstr -- the Fortran format description string
none_as -- conversion of None resulting from incomplete reads
varnames -- a list with variable names used as keys in the resulting
dictionary that contains the values read.
If None, return the values read as a list
debug -- print extra information on stdout for debugging purposes
"""
if formatstr not in fort_read.frr_cache:
fort_read.frr_cache[formatstr] = FortranRecordReader(formatstr)
frr = fort_read.frr_cache[formatstr]
if not isinstance(fobj, str):
fname = fobj.name
inpline = fobj.readline()
else:
fname = 'console'
inpline = fobj
res = frr.read(inpline)
if none_as is not None:
res = [none_as if x is None else x for x in res]
if varnames:
res = {k: res[i] for i, k in enumerate(varnames)}
if debug:
print('--- reading ---')
print('file: ' + fname)
print('fmt: ' + formatstr)
print('str: ' + inpline)
return res
@static_vars(frw_cache={})
def fort_write(fobj, formatstr, values, debug=False):
"""Write values to a file in a specified Fortran format.
Keyword arguments:
fobj -- file object for output
formatstr -- the Fortran format description string
values -- values given in a (potentially nested) list
debug -- print extra information for debugging purposes
"""
vals = list(flatten(values))
vals = [v for v in vals if v is not None]
if debug:
print('--- writing ---')
try:
print('file: ' + fobj.name)
except AttributeError:
print('file: console')
print('fmt: ' + formatstr)
print('values: ')
print(vals)
if formatstr not in fort_write.frw_cache:
fort_write.frw_cache[formatstr] = FortranRecordWriter(formatstr)
frw = fort_write.frw_cache[formatstr]
line = frw.write(vals)
if fobj is None:
print(line)
else:
fobj.write(line + '\n')
def fort_range(*args):
"""Specify a range Fortran style.
For instance, fort_range(1,3) equals range(1,4).
"""
if len(args) == 2:
return range(args[0], args[1]+1)
elif len(args) == 3:
return range(args[0], args[1]+1, args[2])
else:
raise IndexError
| 2.8125
| 3
|
tests/test_auth.py
|
ported-pw/asgi-webdav
| 0
|
12779270
|
from base64 import b64encode
import pytest
from asgi_webdav.constants import DAVPath, DAVUser
from asgi_webdav.config import update_config_from_obj, get_config
from asgi_webdav.auth import DAVPassword, DAVPasswordType, DAVAuth
from asgi_webdav.request import DAVRequest
USERNAME = "username"
PASSWORD = "password"
HASHLIB_USER = "user-hashlib"
basic_authorization = b"Basic " + b64encode(
"{}:{}".format(USERNAME, PASSWORD).encode("utf-8")
)
basic_authorization_bad = b"Basic bad basic_authorization"
def get_basic_authorization(username, password) -> bytes:
return b"Basic " + b64encode("{}:{}".format(username, password).encode("utf-8"))
def fake_call():
pass
request = DAVRequest(
{"method": "GET", "headers": {b"authorization": b"placeholder"}, "path": "/"},
fake_call,
fake_call,
)
def test_dev_password_class():
pw_obj = DAVPassword("password")
assert pw_obj.type == DAVPasswordType.RAW
pw_obj = DAVPassword(
"<hashlib>:sha256:salt:291e247d155354e48fec2b579637782446821935fc96a5a08a0b7885179c408b"
)
assert pw_obj.type == DAVPasswordType.HASHLIB
pw_obj = DAVPassword("<digest>:ASGI-WebDAV:c1d34f1e0f457c4de05b7468d5165567")
assert pw_obj.type == DAVPasswordType.DIGEST
pw_obj = DAVPassword(
"<ldap>#1#ldaps://rexzhang.myds.me#SIMPLE#"
"uid=user-ldap,cn=users,dc=rexzhang,dc=myds,dc=me"
)
assert pw_obj.type == DAVPasswordType.LDAP
@pytest.mark.asyncio
async def test_basic_access_authentication():
config_data = {
"account_mapping": [
{"username": USERNAME, "password": PASSWORD, "permissions": list()},
{
"username": HASHLIB_USER,
"password": "<<PASSWORD>:"
"<PASSWORD>",
"permissions": list(),
},
]
}
update_config_from_obj(config_data)
dav_auth = DAVAuth(get_config())
request.headers[b"authorization"] = get_basic_authorization(USERNAME, PASSWORD)
user, message = await dav_auth.pick_out_user(request)
print(basic_authorization)
print(user)
print(message)
assert isinstance(user, DAVUser)
request.headers[b"authorization"] = get_basic_authorization(HASHLIB_USER, PASSWORD)
user, message = await dav_auth.pick_out_user(request)
assert isinstance(user, DAVUser)
request.headers[b"authorization"] = basic_authorization_bad
user, message = await dav_auth.pick_out_user(request)
print(user)
print(message)
assert user is None
def test_verify_permission():
username = USERNAME
password = PASSWORD
admin = False
# "+"
permissions = ["+^/aa"]
dav_user = DAVUser(username, password, permissions, admin)
assert not dav_user.check_paths_permission([DAVPath("/a")])
assert dav_user.check_paths_permission([DAVPath("/aa")])
assert dav_user.check_paths_permission([DAVPath("/aaa")])
permissions = ["+^/bbb"]
dav_user = DAVUser(username, password, permissions, admin)
assert not dav_user.check_paths_permission(
[DAVPath("/aaa")],
)
# "-"
permissions = ["-^/aaa"]
dav_user = DAVUser(username, password, permissions, admin)
assert not dav_user.check_paths_permission(
[DAVPath("/aaa")],
)
# "$"
permissions = ["+^/a$"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/ab")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/a/b")],
)
# multi-rules
permissions = ["+^/a$", "+^/a/b"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert dav_user.check_paths_permission(
[DAVPath("/a/b")],
)
permissions = ["+^/a$", "+^/a/b", "-^/a/b/c"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert dav_user.check_paths_permission(
[DAVPath("/a/b")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/a/b/c")],
)
permissions = ["+^/a$", "+^/a/b1", "-^/a/b2"]
dav_user = DAVUser(username, password, permissions, admin)
assert dav_user.check_paths_permission(
[DAVPath("/a")],
)
assert dav_user.check_paths_permission(
[DAVPath("/a/b1")],
)
assert not dav_user.check_paths_permission(
[DAVPath("/a/b2")],
)
| 2.203125
| 2
|
examples/basic_bot.py
|
nkpro2000sr/discord-reargparse
| 1
|
12779271
|
<filename>examples/basic_bot.py
import discord
from discord.ext import commands
from discord_reargparse import *
import random
import shlex
description = "An example bot to showcase the discord_reargparse module."
bot = commands.Bot(command_prefix='?', description=description)
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
re_convert = RegExArgConverter(
r"(\d+) (\d+)",
left = Parameter(int),
right = Parameter(int),
)
@bot.command()
async def add(ctx, *, param:re_convert=re_convert.defaults):
"""Adds two numbers together."""
await ctx.send(param["left"] + param["right"])
re_convert = RegExArgConverter(
r"(\d+d\d+)",
dice = Parameter(),
)
@bot.command()
async def roll(ctx, *, param:re_convert=re_convert.defaults):
"""Rolls a dice in NdN format."""
rolls, limit = map(int, param["dice"].split('d'))
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await ctx.send(result)
re_convert = RegExArgConverter(
r"(\S+)",
choices = Parameter(shlex.split),
)
@bot.command(description='For when you wanna settle the score some other way')
async def choose(ctx, *, param:re_convert=re_convert.defaults):
"""Chooses between multiple choices."""
await ctx.send(random.choice(param["choices"]))
re_convert = RegExArgConverter(
r"(\d+)(?:\ (\S+))?",
times = Parameter(int),
content = Parameter(default='repeating...'),
)
@bot.command()
async def repeat(ctx, *, param:re_convert=re_convert.defaults):
"""Repeats a message multiple times."""
for i in range(param["times"]):
await ctx.send(param["content"])
re_convert = RegExArgConverter(
r"(\S+)",
member = Parameter(discord.Member),
)
@bot.command()
async def joined(ctx, *, param:re_convert=re_convert.defaults):
"""Says when a member joined."""
await ctx.send('{0.name} joined in {0.joined_at}'.format(param["member"]))
@bot.group()
async def cool(ctx):
"""Says if a user is cool.
In reality this just checks if a subcommand is being invoked.
"""
if ctx.invoked_subcommand is None:
await ctx.send('No, {0.subcommand_passed} is not cool'.format(ctx))
@cool.command(name='bot')
async def _bot(ctx):
"""Is the bot cool?"""
await ctx.send('Yes, the bot is cool.')
bot.run('token')
| 3.296875
| 3
|
code/wheel_examples.py
|
garyjames/building-skills-oo-design-book
| 32
|
12779272
|
<filename>code/wheel_examples.py
"""
Building Skills in Object-Oriented Design V4
Wheel Examples
"""
from typing import List, Any
import random
Bin = Any
class Wheel_RNG:
def __init__(self, bins: List[Bin], rng: random.Random=None) -> None:
self.bins = bins
self.rng = rng or random.Random()
def choose(self) -> Bin:
return self.rng.choice(self.bins)
class Wheel:
def __init__(self, bins: List[Bin]) -> None:
self.bins = bins
self.rng = random.Random()
def choose(self) -> Bin:
return self.rng.choice(self.bins)
| 3.703125
| 4
|
evaluate.py
|
sYeaLumin/SketchGNN
| 11
|
12779273
|
import os
import ndjson
import json
import time
from options import TestOptions
from framework import SketchModel
from utils import load_data
from writer import Writer
import numpy as np
from evalTool import *
def run_eval(opt=None, model=None, loader=None, dataset='test', write_result=False):
if opt is None:
opt = TestOptions().parse()
if model is None:
model = SketchModel(opt)
if loader is None:
loader = load_data(opt, datasetType=dataset, permutation=opt.permutation)
# print(len(loader))
if opt.eval_way == 'align':
predictList, lossList = eval_align_batchN(model, loader, P=opt.points_num)
elif opt.eval_way == 'unalign':
predictList, lossList = eval_unalign_batch1(model, loader)
else:
raise NotImplementedError('eval_way {} not implemented!'.format(opt.eval_way))
# print(predictList.shape)
testData = []
with open(os.path.join('data', opt.dataset, 'train',
'{}_{}.ndjson'.format(opt.class_name, dataset)), 'r') as f:
testData = ndjson.load(f)
if opt.metric_way == 'wlen':
p_metric_list, c_metric_list = eval_with_len(testData, predictList)
elif opt.metric_way == 'wolen':
p_metric_list, c_metric_list = eval_without_len(testData, predictList)
else:
raise NotImplementedError('metric_way {} not implemented!'.format(opt.metric_way))
if write_result:
testData = get_eval_result(testData, predictList)
result_path = os.path.join('data', opt.dataset, 'train', '{}_{}.ndjson'.format(opt.class_name, 'res'))
with open(result_path, 'w') as f:
ndjson.dump(testData, f)
loss_avg = np.average(lossList)
P_metric = np.average(p_metric_list)
C_metric = np.average(c_metric_list)
# print('P_metric:{:.4}%\tC_metric:{:.4}%'.format(P_metric*100, C_metric*100))
return loss_avg, P_metric, C_metric
if __name__ == "__main__":
_, P_metric, C_metric = run_eval(write_result=True)
print('P_metric:{:.4}%\tC_metric:{:.4}%'.format(P_metric*100, C_metric*100))
| 2.125
| 2
|
tests/modeling/test_split_modeled_energy_trace.py
|
tsennott/eemeter
| 0
|
12779274
|
<reponame>tsennott/eemeter
import tempfile
from datetime import datetime
import pandas as pd
import numpy as np
from numpy.testing import assert_allclose
import pytest
import pytz
from eemeter.modeling.formatters import ModelDataFormatter
from eemeter.modeling.models.seasonal import SeasonalElasticNetCVModel
from eemeter.modeling.split import SplitModeledEnergyTrace
from eemeter.structures import (
EnergyTrace,
ModelingPeriod,
ModelingPeriodSet,
)
from eemeter.testing.mocks import MockWeatherClient
from eemeter.weather import ISDWeatherSource
@pytest.fixture
def trace():
data = {
"value": np.tile(1, (365,)),
"estimated": np.tile(False, (365,)),
}
columns = ["value", "estimated"]
index = pd.date_range('2000-01-01', periods=365, freq='D', tz=pytz.UTC)
df = pd.DataFrame(data, index=index, columns=columns)
return EnergyTrace("ELECTRICITY_CONSUMPTION_SUPPLIED", df, unit="KWH")
@pytest.fixture
def mock_isd_weather_source():
tmp_url = "sqlite:///{}/weather_cache.db".format(tempfile.mkdtemp())
ws = ISDWeatherSource("722880", tmp_url)
ws.client = MockWeatherClient()
return ws
@pytest.fixture
def modeling_period_set():
modeling_period_1 = ModelingPeriod(
"BASELINE",
end_date=datetime(2000, 9, 1, tzinfo=pytz.UTC),
)
modeling_period_2 = ModelingPeriod(
"REPORTING",
start_date=datetime(2001, 1, 1, tzinfo=pytz.UTC),
)
modeling_periods = {
"modeling_period_1": modeling_period_1,
"modeling_period_2": modeling_period_2,
}
grouping = [
("modeling_period_1", "modeling_period_2"),
]
return ModelingPeriodSet(modeling_periods, grouping)
def test_basic_usage(trace, modeling_period_set, mock_isd_weather_source):
# create SplitModeledEnergyTrace
formatter = ModelDataFormatter('D')
model_mapping = {
'modeling_period_1': SeasonalElasticNetCVModel(65, 65),
'modeling_period_2': SeasonalElasticNetCVModel(65, 65),
}
smet = SplitModeledEnergyTrace(
trace, formatter, model_mapping, modeling_period_set)
# fit normally
outputs = smet.fit(mock_isd_weather_source)
assert 'modeling_period_1' in smet.fit_outputs
assert 'modeling_period_2' in smet.fit_outputs
assert len(smet.fit_outputs) == 2
assert outputs['modeling_period_1']['status'] == 'SUCCESS'
assert outputs['modeling_period_1']['start_date'] == \
datetime(2000, 1, 1, tzinfo=pytz.UTC)
assert outputs['modeling_period_1']['end_date'] == \
datetime(2000, 9, 1, tzinfo=pytz.UTC)
assert outputs['modeling_period_1']['n_rows'] == 245
index = pd.date_range('2001-01-01', periods=6, freq='D', tz=pytz.UTC)
demand_fixture_data = \
smet.formatter.create_demand_fixture(index, mock_isd_weather_source)
mp1_pred, variance = smet.predict(
'modeling_period_1', demand_fixture_data, summed=False)
mp2_pred = smet.predict('modeling_period_2', demand_fixture_data)
assert mp1_pred.shape == (6,)
assert mp2_pred is None
assert variance > 0
with pytest.raises(KeyError):
smet.predict('modeling_period_3', demand_fixture_data)
def callable_(formatter, model, returnme):
return returnme
mp1_deriv = smet.compute_derivative(
'modeling_period_1', callable_, {"returnme": "A"})
mp2_deriv = smet.compute_derivative(
'modeling_period_2', callable_, {"returnme": "A"})
assert mp1_deriv == "A"
assert mp2_deriv is None
pred, variance = smet.predict(
'modeling_period_1', demand_fixture_data, summed=True)
# predict summed
assert_allclose(pred, 6.035919)
assert variance > 0
# bad weather source
smet.fit(None)
assert outputs['modeling_period_1']['status'] == 'FAILURE'
def test_bad_weather_source(trace, modeling_period_set):
# create SplitModeledEnergyTrace
formatter = ModelDataFormatter('D')
model_mapping = {
'modeling_period_1': SeasonalElasticNetCVModel(65, 65),
'modeling_period_2': SeasonalElasticNetCVModel(65, 65),
}
smet = SplitModeledEnergyTrace(
trace, formatter, model_mapping, modeling_period_set)
# need to see that it gives a data sufficiency exception
outputs = smet.fit(None)
assert 'DataSufficiencyException' in outputs['modeling_period_1']['traceback']
assert 'DataSufficiencyException' in outputs['modeling_period_2']['traceback']
| 2.140625
| 2
|
AutoApiTest/apps.py
|
github-xiaoh/httpRunnerManager
| 1
|
12779275
|
<reponame>github-xiaoh/httpRunnerManager
from django.apps import AppConfig
class AutoapitestConfig(AppConfig):
name = 'AutoApiTest'
| 1.257813
| 1
|
2020/day6/1.py
|
darkterbear/advent-of-code-2015
| 0
|
12779276
|
import re
file = open('./input', 'r')
lines = file.readlines()
lines = list(map(lambda line: line[:-1], lines))
sum = 0
exists = set()
for line in lines:
if len(line) == 0:
sum += len(exists)
exists = set()
else:
for c in line:
exists.add(c)
print(sum)
| 3.609375
| 4
|
efficientdet/test_now.py
|
Rambledeng/automl
| 0
|
12779277
|
<reponame>Rambledeng/automl
import os
import sys
import tensorflow.compat.v1 as tf
import PIL
import os
import wget
def download(m):
if m not in os.listdir():
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tar.gz
dl_url = 'https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/'+m+'.tar.gz'
wget.download(dl_url)
!tar zxf {m}.tar.gz
ckpt_path = os.path.join(os.getcwd(), m)
return ckpt_path
if __name__ == '__main__':
MODEL = 'efficientdet-d0' #@param
# Download checkpoint.
ckpt_path = download(MODEL)
print('Use model in {}'.format(ckpt_path))
# Prepare image and visualization settings.
image_url = 'https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png'#@param
image_name = 'test0.png' #@param
# wget.download(image_url, 'img.png')
img_path = os.path.join(os.getcwd(), 'test0.png')
min_score_thresh = 0.35 #@param
max_boxes_to_draw = 200 #@param
line_thickness = 2#@param
# Get the largest of height/width and round to 128.
image_size = max(PIL.Image.open(img_path).size)
# In case you need to specify different image size or batch size or #boxes, then
# you need to export a new saved model and run the inferernce.
serve_image_out = 'serve_image_out'
# !mkdir {serve_image_out}
saved_model_dir = 'savedmodel'
# !rm -rf {saved_model_dir}
# # Step 1: export model
# !python model_inspect.py --runmode=saved_model \
# --model_name=efficientdet-d0 --ckpt_path=efficientdet-d0 \
# --hparams="image_size=1920x1280" --saved_model_dir={saved_model_dir}
# Step 2: do inference with saved model.
!python model_inspect.py --runmode=saved_model_infer \
--model_name=efficientdet-d0 --saved_model_dir={saved_model_dir} \
--input_image=test0.png --output_image_dir={serve_image_out} \
--min_score_thresh={min_score_thresh} --max_boxes_to_draw={max_boxes_to_draw} \
from IPython import display
display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))
| 2.625
| 3
|
application/response/LinearClassifier.py
|
librairy/explainable-qa
| 1
|
12779278
|
<reponame>librairy/explainable-qa
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 25 12:54:44 2021
@author: cbadenes
"""
import logging
import os
import pandas
import joblib
import spacy
from time import time
from scipy.sparse import csr_matrix
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
logger = logging.getLogger(__name__)
class TRECClassifier:
def __init__(self):
self.en_nlp_l = spacy.load("en_core_web_lg")
training_data_path = os.path.join("./application/question/train/train_5500.csv")
self.df_question_train = pandas.read_csv(training_data_path, sep='|', header=0, encoding = "ISO-8859-1")
self.df_question_class = self.df_question_train.pop('Class')
self.df_question_train.pop('Question')
self.df_question_train.pop('WH-Bigram')
self.df_question_train = pandas.get_dummies(self.df_question_train)
self.question_clf = joblib.load("./application/question/question_classifier.pkl")
print("TREC Question Classifier ready")
def get_category(self, question):
en_doc_l = self.en_nlp_l(u'' + question)
question_class = self.classify_question(en_doc=en_doc_l, df_question_train=self.df_question_train)
return question_class
def pre_process(self,dta):
return pandas.get_dummies(dta)
def remove_irrelevant_features(self, df_question):
df_question_class = df_question.pop('Class')
df_question.pop('Question')
df_question.pop('WH-Bigram')
return df_question_class
def transform_data_matrix(self, df_question_train, df_question_predict):
df_question_train_columns = list(df_question_train.columns)
df_question_predict_columns = list(df_question_predict.columns)
df_question_trans_columns = list(set(df_question_train_columns + df_question_predict_columns))
logger.debug("Union Columns: {0}".format(len(df_question_trans_columns)))
trans_data_train = {}
for feature in df_question_trans_columns:
if feature not in df_question_train:
trans_data_train[feature] = [0 for i in range(len(df_question_train.index))]
else:
trans_data_train[feature] = list(df_question_train[feature])
df_question_train = pandas.DataFrame(trans_data_train)
logger.debug("Training data: {0}".format(df_question_train.shape))
df_question_train = csr_matrix(df_question_train)
trans_data_predict = {}
for feature in trans_data_train:
if feature not in df_question_predict:
trans_data_predict[feature] = 0
else:
trans_data_predict[feature] = list(df_question_predict[feature]) # KeyError
df_question_predict = pandas.DataFrame(trans_data_predict)
logger.debug("Target data: {0}".format(df_question_predict.shape))
df_question_predict = csr_matrix(df_question_predict)
return df_question_train, df_question_predict
def naive_bayes_classifier(self, x_train, y, x_predict):
gnb = GaussianNB()
gnb.fit(x_train, y)
prediction = gnb.predict(x_predict)
return prediction
def support_vector_machine(self, df_question_train, df_question_class, df_question_predict):
lin_clf = LinearSVC()
lin_clf.fit(df_question_train, df_question_class)
prediction = lin_clf.predict(df_question_predict)
return prediction, lin_clf
def predict_question_class(self, question_clf, df_question_predict):
return question_clf.predict(df_question_predict), question_clf
def load_classifier_model(self, model_type="linearSVC"):
# HELP: Not using the persistent classifier. SVC fails when it encounters previously unseen features at training.
# Refer the comment in query_container
training_model_path = os.path.join("./application/question/question_classifier.pkl")
if model_type == "linearSVC":
return joblib.load(training_model_path)
def get_question_predict_data(self, en_doc=None, df_question_test=None):
if df_question_test is None:
# currently only supports single sentence classification
sentence_list = list(en_doc.sents)[0:1]
else:
sentence_list = df_question_test["Question"].tolist()
import spacy
en_nlp = spacy.load("en_core_web_lg")
question_data_frame = []
for sentence in sentence_list:
wh_bi_gram = []
root_token, wh_pos, wh_nbor_pos, wh_word = [""] * 4
if df_question_test is not None:
en_doc = en_nlp(u'' + sentence)
sentence = list(en_doc.sents)[0]
for token in sentence:
if token.tag_ == "WDT" or token.tag_ == "WP" or token.tag_ == "WP$" or token.tag_ == "WRB":
wh_pos = token.tag_
wh_word = token.text
wh_bi_gram.append(token.text)
wh_bi_gram.append(str(en_doc[token.i + 1]))
wh_nbor_pos = en_doc[token.i + 1].tag_
if token.dep_ == "ROOT":
root_token = token.tag_
question_data_frame_obj = {'WH': wh_word, 'WH-POS': wh_pos, 'WH-NBOR-POS': wh_nbor_pos, 'Root-POS': root_token}
question_data_frame.append(question_data_frame_obj)
logger.debug("WH : {0} | WH-POS : {1} | WH-NBOR-POS : {2} | Root-POS : {3}"
.format(wh_word, wh_pos, wh_nbor_pos, root_token))
df_question = pandas.DataFrame(question_data_frame)
return df_question
def classify_question(self, en_doc=None, df_question_train=None, df_question_test=None):
""" Determine whether this is a who, what, when, where or why question """
if df_question_test is None:
df_question_predict = self.get_question_predict_data(en_doc=en_doc)
else:
df_question_predict = self.get_question_predict_data(df_question_test=df_question_test)
df_question_predict = self.pre_process(df_question_predict)
df_question_train, df_question_predict = self.transform_data_matrix(self.df_question_train, df_question_predict)
#logger.debug("Classifier: {0}".format(question_clf))
predicted_class, svc_clf = self.support_vector_machine(df_question_train, self.df_question_class, df_question_predict)
if df_question_test is not None:
return predicted_class, svc_clf, self.df_question_class, df_question_train
else:
return predicted_class
| 2.640625
| 3
|
bot/python/handlers/message_handlers.py
|
darrso/parse_channels
| 0
|
12779279
|
import sys
import time
import aiogram.types
from aiogram import types, Dispatcher, Bot
from aiogram.dispatcher import FSMContext
sys.path.append('bot')
from database.sess import get_users_by_link
from database.sess import create_new_user, check_on_off, switch_on_off, check_parse_channels, check_channel, \
add_channels, reemove_channels
from python.States.StatesClasses import Adding, Removing
from python.config import bToken, admin_chat, admin_id
bot = Bot(token=bToken)
async def start_command(message: types.Message):
await message.answer("Hey!\n"
"I am a parser for telegram channels.\n\n"
"For the main menu, send - /menu.\n"
"Everything will be written in detail there.")
await message.answer_sticker(r'CAACAgIAAxkBAAIKpWHbI3SO<KEY>')
# ДОБАВЛЕНИЕ НОВОГО ПОЛЬЗОВАТЕЛЯ В БАЗУ ДАННЫХ
await create_new_user(message.from_user.id, message.from_user.username, 'on')
async def main_menu(message: types.Message):
await message.answer(f"Welcome to the main menu, {message.from_user.username}!\n"
f"I am a bot that parses telegram channels.\n\n"
"Here are my commands:\n"
"/menu - main menu\n"
"/parse_channels - a list of channels you are following\n"
"/add_parse_channel - add to the channel list\n"
"/remove_parse_channel - remove a channel from the list\n"
"/off - turn off parsing of your channels\n"
"/on - enable parsing of your channels\n\n"
f"P.S. now - {await check_on_off(message.from_user.id)}\n\n"
"My creator is @darrso")
await message.answer_sticker(r'CAACAgIAAxkBAAIKrmHbJw6ckgI0IrCLe_TJrbUyCJ_xAALRAAM27BsFCW1Sl32PAAEsIwQ')
async def switch_parametr(message: types.Message):
text = (message.text).replace("/", "")
if text == 'on':
await switch_on_off('on', message.from_user.id)
elif text == 'off':
await switch_on_off('off', message.from_user.id)
await message.answer(f'Parameter changed to: {text}'
f'\nSend /menu to check it out!')
async def parse_channel(message: types.Message):
data = (await check_parse_channels(message.from_user.id))
if data:
await message.answer(f'Here is the list of channels you are parsing:\n{data}\n\n'
f'Delete channel - /remove_parse_channel\n'
f'Add channel - /add_parse_channel\n'
f'Main menu - /menu')
else:
await message.answer("You are not parsing any channels yet.\n\nTo add channels send /add_parse_channel")
async def add_channel(message: types.Message):
await message.answer("To add a new channel send LINK TO CHANNEL\n\n"
"Example:\n"
"https://t.me/test\n\n"
"P.S. The bot cannot join private channels.\n"
"You can add a channel to the list of those that you are parsing, but the bot will subscribe to it only after a while\n"
"(you will receive a notification about this)")
await Adding.first.set()
async def adding_channel(message: types.Message, state: FSMContext):
res = await check_channel(message.text, message.from_user.id)
if res == 'NOT LINK!':
await message.answer('This link is not working!\n'
'Try again - /add_parse_channel')
elif res:
await bot.send_message(chat_id=admin_chat, text="/add " + message.text)
await add_channels(message.from_user.id, message.text)
await message.answer('Successfully!\n\nSend /menu for main menu!')
else:
if await add_channels(message.from_user.id, message.text):
await message.answer('Successfully!\n\nSend /menu for main menu!')
else:
await message.answer('This channel is already on your list!\n\n'
'View a list of your channels - /parse_channels')
await state.finish()
async def remove_channel(message: types.Message):
data = (await check_parse_channels(message.from_user.id))
if data == 'No one channels':
await message.answer("You cannot remove telegram channels from the list, because you have not added any!\n\n"
"Checking the list of channels - /parse_channels")
else:
await message.answer("Choose number of channel and send it!\n"
"Example:\n"
"1\n\n"
f"Here is the list of channels you are parsing:\n{data}")
await Removing.first.set()
async def removing_channel(message: types.Message, state: FSMContext):
data = await reemove_channels(message.from_user.id, message.text)
if data:
await message.answer('Success!\n\n'
'List of your channels - /parse_channels')
else:
await message.answer('Error!\n\n'
'Try again - /remove_parse_channel\n'
'Main menu - /menu')
await state.finish()
async def new_post(message: types.Message):
try:
time.sleep(2);
if message.chat.id == admin_chat:
if message.text[0:9] != "/NEW_POST":
pass
else:
messageid = message.message_id + 1
users = await get_users_by_link(message.text[10:])
if users:
for i in users:
await bot.forward_message(chat_id=int(i), from_chat_id=(admin_chat), message_id=messageid)
else:
pass
except:
pass
def register_message_handlers(dp: Dispatcher):
dp.register_message_handler(start_command, commands="start")
dp.register_message_handler(main_menu, commands="menu")
dp.register_message_handler(switch_parametr, commands=['on', 'off'])
dp.register_message_handler(parse_channel, commands="parse_channels")
dp.register_message_handler(add_channel, commands='add_parse_channel')
dp.register_message_handler(adding_channel, state=Adding.first)
dp.register_message_handler(remove_channel, commands='remove_parse_channel')
dp.register_message_handler(removing_channel, state=Removing.first)
dp.register_channel_post_handler(new_post, lambda message: message.text[0:9] == "/NEW_POST")
| 2.421875
| 2
|
art-generator/genius.py
|
GFX-Automated/GFX
| 0
|
12779280
|
<filename>art-generator/genius.py
#!/bin/bash/python3.8
import os
import lyricsgenius as genius
credentials = {
"id": "",
"secret": "",
"access_token": "",
}
os.environ["GENIUS_ACCESS_TOKEN"] = credentials["access_token"]
GENIUS_ACCESS_TOKEN
| 1.6875
| 2
|
CommandLineCalculator.py
|
CoderDuino/CommandLineCalculator
| 0
|
12779281
|
import time
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
return False
def ValidateInput(equation):
if len(equation) != 3:
return False
if not (is_number(equation[0]) and is_number(equation[2])):
return False
if not equation[1] in ["*", "/", "-", "+"]:
return False
return True
print("Welcome to command line calculator!")
time.sleep(1) # sleep for a second
while True:
equation = input("Enter your calculation: ") # look for calculation
if equation.lower() in ["exit", "stop", "quit", "end", "leave", "done", "finish"]:
break
if equation.lower() in ["expression", "test", "expr", "exec"]:
expressionInput = input("Enter Expression: ")
print(eval(expressionInput()))
continue
equation = equation.split(" ") # split equation into three pieces
if not ValidateInput(equation):
continue
equation[0] = float(equation[0])
equation[2] = float(equation[2])
if equation[1] == "*":
print(str(equation[0] * equation[2]))
elif equation[1] == "/":
print(str(equation[0] / equation[2]))
elif equation[1] == "+":
print(str(equation[0] + equation[2]))
elif equation[1] == "-":
print(str(equation[0] - equation[2]))
| 4
| 4
|
HW Tasks/hw6.py
|
bzhumakova/FirstProject
| 2
|
12779282
|
# Дан список обьектов спам-писем пришедших на почту. С помощью метода split найти слово
# которое повторяется в списке больше всего (напомню что split возвращает из строки – список).
# Найденное слово сохранить в переменную.
# Далее на вход подается единственная строка – нужно проверить спам это или нет
data = [
{'text':'oh hi duuuude how r uy??check this 1xbet'},
{'text':'Dear <NAME>, i am <NAME> i represent 1xbet company.Best bet service'},
{'text':'wooooh yoow harry look at my jackpot 100000000$ at 1xbet service'},
{'text':'Harry , today i saw the man who looks like Hawkeye from Avengers on 100% and he dont use 1xbet service'},
]
final_mail = 'Hello Harry, my name is Maksim, Im still waiting for the letter from Hogwarts'
list=final_mail.split()
# print(list)
q_spam=0
spam_word=''
database=[]
for mail in data:
str=mail['text'].lower().split()
database.extend(str)
for word in database:
quantity=database.count(word)
#print(quantity)
if quantity>q_spam:
q_spam=quantity
spam_word=word
print(q_spam,spam_word)
if spam_word in final_mail.lower():
print('Mail is not OK')
else:
print("Mail is OK")
| 3.5625
| 4
|
00_Code/01_LeetCode/590_N-aryTreePostorderTraversal.py
|
KartikKannapur/Data_Structures_and_Algorithms_Python
| 1
|
12779283
|
"""
Given an n-ary tree, return the postorder traversal of its nodes' values.
For example, given a 3-ary tree:
Return its postorder traversal as: [5,6,3,2,4,1].
"""
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
"""
Method 1: BFS + Stack + Return in Reverse Order
Your runtime beats 81.43 % of python submissions.
"""
# #Boundary Conditions:
if not root:
return []
res = []
stack = [root]
while stack:
node = stack.pop()
for ele in node.children:
stack.append(ele)
res += [node.val]
return res[::-1]
| 4.09375
| 4
|
losses.py
|
Liut2016/ecg-supcontrast
| 2
|
12779284
|
"""
Author: <NAME> (<EMAIL>)
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
from itertools import combinations
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
a = anchor_feature.detach().cpu().numpy()
b = contrast_feature.T.detach().cpu().numpy()
c = anchor_dot_contrast.detach().cpu().numpy()
d = np.matmul(a, b)
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
def testNan(self, x):
x = x.detach().cpu().numpy()
return np.isnan(x).any()
# CLOCS 中用于对比学习的loss
def obtain_contrastive_loss(latent_embeddings, pids, trial):
""" Calculate NCE Loss For Latent Embeddings in Batch
Args:
latent_embeddings (torch.Tensor): embeddings from model for different perturbations of same instance (BxHxN)
pids (list): patient ids of instances in batch
Outputs:
loss (torch.Tensor): scalar NCE loss
"""
if trial in ['CMSC', 'CMLC', 'CMSMLC']:
pids = np.array(pids, dtype=np.object)
pid1, pid2 = np.meshgrid(pids, pids)
pid_matrix = pid1 + '-' + pid2
pids_of_interest = np.unique(pids + '-' + pids) # unique combinations of pids of interest i.e. matching
bool_matrix_of_interest = np.zeros((len(pids), len(pids)))
for pid in pids_of_interest:
bool_matrix_of_interest += pid_matrix == pid
rows1, cols1 = np.where(np.triu(bool_matrix_of_interest, 1))
rows2, cols2 = np.where(np.tril(bool_matrix_of_interest, -1))
nviews = set(range(latent_embeddings.shape[2]))
view_combinations = combinations(nviews, 2)
loss = 0
ncombinations = 0
loss_terms = 2
# 如果报错误 UnboundLocalError: local variable 'loss_terms' referenced before assignment
# 那就重启PyCharm吧!
for combination in view_combinations:
view1_array = latent_embeddings[:, :, combination[0]] # (BxH)
view2_array = latent_embeddings[:, :, combination[1]] # (BxH)
norm1_vector = view1_array.norm(dim=1).unsqueeze(0)
norm2_vector = view2_array.norm(dim=1).unsqueeze(0)
sim_matrix = torch.mm(view1_array, view2_array.transpose(0, 1))
norm_matrix = torch.mm(norm1_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = sim_matrix / (norm_matrix * temperature)
sim_matrix_exp = torch.exp(argument)
if trial == 'CMC':
""" Obtain Off Diagonal Entries """
# upper_triangle = torch.triu(sim_matrix_exp,1)
# lower_triangle = torch.tril(sim_matrix_exp,-1)
# off_diagonals = upper_triangle + lower_triangle
diagonals = torch.diag(sim_matrix_exp)
""" Obtain Loss Terms(s) """
loss_term1 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 1)))
loss_term2 = -torch.mean(torch.log(diagonals / torch.sum(sim_matrix_exp, 0)))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial == 'SimCLR':
self_sim_matrix1 = torch.mm(view1_array, view1_array.transpose(0, 1))
self_norm_matrix1 = torch.mm(norm1_vector.transpose(0, 1), norm1_vector)
temperature = 0.1
argument = self_sim_matrix1 / (self_norm_matrix1 * temperature)
self_sim_matrix_exp1 = torch.exp(argument)
self_sim_matrix_off_diagonals1 = torch.triu(self_sim_matrix_exp1, 1) + torch.tril(self_sim_matrix_exp1, -1)
self_sim_matrix2 = torch.mm(view2_array, view2_array.transpose(0, 1))
self_norm_matrix2 = torch.mm(norm2_vector.transpose(0, 1), norm2_vector)
temperature = 0.1
argument = self_sim_matrix2 / (self_norm_matrix2 * temperature)
self_sim_matrix_exp2 = torch.exp(argument)
self_sim_matrix_off_diagonals2 = torch.triu(self_sim_matrix_exp2, 1) + torch.tril(self_sim_matrix_exp2, -1)
denominator_loss1 = torch.sum(sim_matrix_exp, 1) + torch.sum(self_sim_matrix_off_diagonals1, 1)
denominator_loss2 = torch.sum(sim_matrix_exp, 0) + torch.sum(self_sim_matrix_off_diagonals2, 0)
diagonals = torch.diag(sim_matrix_exp)
loss_term1 = -torch.mean(torch.log(diagonals / denominator_loss1))
loss_term2 = -torch.mean(torch.log(diagonals / denominator_loss2))
loss += loss_term1 + loss_term2
loss_terms = 2
elif trial in ['CMSC', 'CMLC', 'CMSMLC']: # ours #CMSMLC = positive examples are same instance and same patient
triu_elements = sim_matrix_exp[rows1, cols1]
tril_elements = sim_matrix_exp[rows2, cols2]
diag_elements = torch.diag(sim_matrix_exp)
triu_sum = torch.sum(sim_matrix_exp, 1)
tril_sum = torch.sum(sim_matrix_exp, 0)
loss_diag1 = -torch.mean(torch.log(diag_elements / triu_sum))
loss_diag2 = -torch.mean(torch.log(diag_elements / tril_sum))
loss_triu = -torch.mean(torch.log(triu_elements / triu_sum[rows1]))
loss_tril = -torch.mean(torch.log(tril_elements / tril_sum[cols2]))
loss = loss_diag1 + loss_diag2
loss_terms = 2
if len(rows1) > 0:
loss += loss_triu # technically need to add 1 more term for symmetry
loss_terms += 1
if len(rows2) > 0:
loss += loss_tril # technically need to add 1 more term for symmetry
loss_terms += 1
# print(loss,loss_triu,loss_tril)
ncombinations += 1
loss = loss / (loss_terms * ncombinations)
return loss
| 2.78125
| 3
|
tests/test_helper_node.py
|
GNaive/naive-rete
| 54
|
12779285
|
# -*- coding: utf-8 -*-
from rete import Has, Filter, Rule
from rete.common import WME, Bind
from rete.network import Network
def test_filter_compare():
net = Network()
c0 = Has('spu:1', 'price', '$x')
f0 = Filter('$x>100')
f1 = Filter('$x<200')
f2 = Filter('$x>200 and $x<400')
f3 = Filter('$x>300')
p0 = net.add_production(Rule(c0, f0, f1))
p1 = net.add_production(Rule(c0, f2))
p2 = net.add_production(Rule(c0, f3))
net.add_wme(WME('spu:1', 'price', '100'))
net.add_wme(WME('spu:1', 'price', '150'))
net.add_wme(WME('spu:1', 'price', '300'))
assert len(p0.items) == 1
token = p0.items.pop()
assert token.get_binding('$x') == '150'
assert len(p1.items) == 1
token = p1.items.pop()
assert token.get_binding('$x') == '300'
assert not p2.items
def test_bind():
net = Network()
c0 = Has('spu:1', 'sales', '$x')
b0 = Bind('len(set($x) & set(range(1, 100)))', '$num')
f0 = Filter('$num > 0')
p0 = net.add_production(Rule(c0, b0, f0))
b1 = Bind('len(set($x) & set(range(100, 200)))', '$num')
p1 = net.add_production(Rule(c0, b1, f0))
b2 = Bind('len(set($x) & set(range(300, 400)))', '$num')
p2 = net.add_production(Rule(c0, b2, f0))
net.add_wme(WME('spu:1', 'sales', 'range(50, 110)'))
assert len(p0.items) == 1
assert len(p1.items) == 1
assert len(p2.items) == 0
t0 = p0.items[0]
t1 = p1.items[0]
assert t0.get_binding('$num') == 50
assert t1.get_binding('$num') == 10
| 2.078125
| 2
|
util/util.py
|
VMReyes/keypointgan
| 11
|
12779286
|
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import time
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = (image_numpy - np.min(image_numpy)) / (np.max(image_numpy) - np.min(image_numpy))
image_numpy = image_numpy * 2 - 1
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
image_numpy = np.clip(image_numpy, 0.0, 255.0)
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class Timer(object):
def __init__(self, name=None, acc=False, avg=False):
self.name = name
self.acc = acc
self.avg = avg
self.total = 0.0
self.iters = 0
def __enter__(self):
self.start()
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
self.tstart = time.time()
def stop(self):
self.iters += 1
self.total += time.time() - self.tstart
if not self.acc:
self.reset()
def reset(self):
name_string = ''
if self.name:
name_string = '[' + self.name + '] '
value = self.total
msg = 'Elapsed'
if self.avg:
value /= self.iters
msg = 'Avg Elapsed'
print('%s%s: %.4f' % (name_string, msg, value))
self.total = 0.0
| 2.71875
| 3
|
gradertools/isolation/isolate_simple.py
|
david58/gradertools
| 0
|
12779287
|
import subprocess
import shutil
import os
import time
from .interface import IsolateInterface
class IsolateSimple(IsolateInterface):
def isolate(self, files, command, parameters, envvariables, directories, allowmultiprocess, stdinfile, stdoutfile):
if os.path.isdir("/tmp/gradertools/isolation/"):
shutil.rmtree("/tmp/gradertools/isolation/")
os.makedirs("/tmp/gradertools/isolation/")
box = "/tmp/gradertools/isolation/"
for file in files:
shutil.copy(file, os.path.join(box, os.path.basename(file)))
isolateio=" "
if stdinfile is not None:
isolateio+="< "+stdinfile
if stdoutfile is not None:
isolateio+="> "+stdoutfile
t0 = time.perf_counter()
out = subprocess.run(" ".join(["cd "+ box+ ";"]+[command]+parameters+[isolateio]), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
t1 = time.perf_counter()
self._boxdir = box
self._status = 'OK'
self._runtime = t1-t0
self._walltime = t1-t0
self._maxrss = 0 # Maximum resident set size of the process (in kilobytes).
self._cswv = 0 # Number of context switches caused by the process giving up the CPU voluntarily.
self._cswf = 0 # Number of context switches forced by the kernel.
self._cgmem = 0 # Total memory use by the whole control group (in kilobytes).
self._exitcode = out.returncode
self._stdout = out.stdout
def clean(self):
shutil.rmtree("/tmp/gradertools/isolation/")
| 2.390625
| 2
|
apps/common/api/exceptions.py
|
YC-Cheung/hattori
| 1
|
12779288
|
from rest_framework import status
class APIException(Exception):
"""
通用 API 异常
"""
def __init__(self, message='', code=4999, status_code=status.HTTP_400_BAD_REQUEST):
self.message = message
self.code = code
self.status_code = status_code
@property
def data(self):
return {
'err_code': self.code,
'msg': self.message,
}
class AuthFailedException(APIException):
"""
登录认证失败异常
"""
def __init__(self, message='Unauthenticated.', code=4001, status_code=status.HTTP_401_UNAUTHORIZED):
super().__init__(message, code, status_code)
class CrossDomainException(AuthFailedException):
"""
Token 跨域认证
"""
def __init__(self, message='Invalid token.', code=4003, status_code=status.HTTP_403_FORBIDDEN):
super().__init__(message, code, status_code)
class AdminUserForbiddenException(AuthFailedException):
"""
后台管理员被禁用
"""
def __init__(self, message='You have been banned.', code=4003, status_code=status.HTTP_403_FORBIDDEN):
super().__init__(message, code, status_code)
| 2.609375
| 3
|
main_tts.py
|
archity/rg_text_to_sound
| 1
|
12779289
|
import os, sys
sys.path.append( os.path.join(os.path.dirname(os.path.abspath(__file__)),'tts_websocketserver','src') )
from tts_websocketserver.tts_server import run
if __name__ == '__main__':
run()
| 1.65625
| 2
|
libs/visualization/vis.py
|
FullStackD3vs/Detectron-PYTORCH
| 37
|
12779290
|
<reponame>FullStackD3vs/Detectron-PYTORCH<gh_stars>10-100
import cv2
import numpy as np
import PIL.ImageColor as ImageColor
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Crimson',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'MistyRose', 'OliveDrab', 'Cornsilk', 'Cyan', 'Violet',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Wheat', 'White', 'Coral',
'WhiteSmoke', 'Yellow', 'YellowGreen', 'Beige', 'Bisque', 'CornflowerBlue'
]
STANDARD_COLORS = [
'darkblue', 'aqua', 'blueviolet', 'brown', 'chocolate', 'darkcyan', 'darkgreen', 'darkmagenta',
'darkolivegreen', 'darkturquoise', 'deeppink', 'deepskyblue', 'dodgerblue', 'gold', 'indigo',
'lawngreen', 'lightseagreen', 'limegreen', 'magenta', 'olive', 'orange', 'purple', 'seagreen',
'violet', 'yellowgreen', 'tomato', 'sienna',
] + STANDARD_COLORS
# STANDARD_COLORS.sort()
# for display
############################
def _to_color(indx, base):
""" return (b, r, g) tuple"""
base2 = base * base
b = 2 - indx / base2
r = 2 - (indx % base2) / base
g = 2 - (indx % base2) % base
return r * 127, g * 127, b * 127
# def get_color(indx, cls_num=-1):
# if indx < 0:
# return (255, 255, 255)
# if indx >= cls_num:
# return (23 * indx % 255, 47 * indx % 255, 137 * indx % 255)
# base = int(np.ceil(pow(cls_num, 1. / 3)))
# return _to_color(indx, base)
def get_color(indx, cls_num=-1):
return ImageColor.getrgb(STANDARD_COLORS[indx])[::-1] # BGR
def draw_detection(im, bboxes, scores=None, cls_inds=None, cls_name=None, color=None, thick=None, ellipse=False):
# draw image
bboxes = np.round(bboxes).astype(np.int)
if cls_inds is not None:
cls_inds = cls_inds.astype(np.int)
cls_num = len(cls_name) if cls_name is not None else -1
imgcv = np.copy(im)
h, w, _ = imgcv.shape
for i, box in enumerate(bboxes):
cls_indx = cls_inds[i] if cls_inds is not None else None
color_ = get_color(cls_indx, cls_num) if color == None else color
color_ = (0, 0, 0) if cls_indx < 0 else color_
thick = int((h + w) / 500) if thick == None else thick
if not ellipse:
cv2.rectangle(imgcv,
(box[0], box[1]), (box[2], box[3]),
color_, thick)
else:
cv2.ellipse(imgcv,
(box[0]/2 + box[2]/2, box[1]/2 + box[3]/2),
(box[2]/2 - box[0]/2, box[3]/2 - box[1]/2),
0, 0, 360,
color=color_, thickness=thick)
if cls_indx is not None:
score = scores[i] if scores is not None else 1
name = cls_name[cls_indx] if cls_name is not None else str(cls_indx)
name = 'ign' if cls_indx < 0 else name
mess = '%s: %.2f' % (name[:4], score)
cv2.putText(imgcv, mess, (box[0], box[1] - 8),
0, 1e-3 * h, color_, thick // 3)
return imgcv
| 1.96875
| 2
|
tests/utils.py
|
rolandmueller/rita-dsl
| 0
|
12779291
|
import re
import pytest
import rita
def load_rules(rules_path):
with open(rules_path, "r") as f:
return f.read()
def spacy_engine(rules, **kwargs):
spacy = pytest.importorskip("spacy", minversion="2.1")
patterns = rita.compile_string(rules, **kwargs)
nlp = spacy.load("en")
ruler = spacy.pipeline.EntityRuler(nlp, overwrite_ents=True)
print(patterns)
ruler.add_patterns(patterns)
nlp.add_pipe(ruler)
def parse(text):
doc = nlp(text)
return list([(e.text, e.label_) for e in doc.ents])
return parse
def standalone_engine(rules, **kwargs):
parser = rita.compile_string(rules, use_engine="standalone", **kwargs)
print(parser.patterns)
def parse(text):
results = list(parser.execute(text, include_submatches=False))
return list([(r["text"], r["label"]) for r in results])
return parse
def rust_engine(rules, **kwargs):
from rita.engine.translate_rust import load_lib
lib = load_lib()
if lib is None:
pytest.skip("Missing rita-rust dynamic lib, skipping related tests")
print("Trying to run: {}".format(rules))
parser = rita.compile_string(rules, use_engine="rust", **kwargs)
print(parser.patterns)
def parse(text):
results = list(parser.execute(text, include_submatches=False))
return list([(r["text"], r["label"]) for r in results])
return parse
def normalize_output(r):
return re.sub(r"\s+", " ", r.strip().replace("\n", ""))
def raw_compare(r1, r2):
r1 = normalize_output(r1)
r2 = normalize_output(r2)
assert r1 == r2
| 2.265625
| 2
|
clash/fastest/fast-clash1.py
|
a93-git/codingame-solutions
| 0
|
12779292
|
import sys
import math
# Auto-generated code below aims at helping you parse
# the standard input according to the problem statement.
n = int(input())
w = input()
# Write an action using print
# To debug: print("Debug messages...", file=sys.stderr)
for i in range(n):
print(w)
| 3.578125
| 4
|
mlcomp/parallelm/components/external_component.py
|
lisapm/mlpiper
| 7
|
12779293
|
"""
This file in intended to be used by external component that can use python (like R + reticulate).
This will be a single place needed to be imported (thus the short name) in order to get the api functions to
work with connected components, and possibly more in the future.
"""
from parallelm.common.singleton import Singleton
from parallelm.components.connectable_external_component import ConnectableExternalComponent
# An instance of the MLOps to be used when importing the pm library.
@Singleton
class ConnectableExternalComponentSingleton(ConnectableExternalComponent):
pass
mlcomp = ConnectableExternalComponentSingleton.Instance()
| 1.804688
| 2
|
src/main-gui.py
|
FlingJLJ/ThrowawayNameGenerator
| 0
|
12779294
|
import generator
from tkinter import Tk
| 1.21875
| 1
|
sailenv/dynamics/uniform_movement_random_bounce.py
|
sailab-code/SAILenv
| 0
|
12779295
|
from dataclasses import dataclass
from sailenv import Vector3
from sailenv.dynamics import Dynamic
@dataclass
class UniformMovementRandomBounce(Dynamic):
start_direction: Vector3 = Vector3(0, 0, 1)
speed: float = 5
angular_speed: float = 2
seed: int = 42
@staticmethod
def get_type():
return "uniform_movement_random_bounce"
| 2.921875
| 3
|
tests/unit/compress/CompressCssCompiler.py
|
wangjeaf/CSSCheckStyle
| 21
|
12779296
|
from helper import *
def doTest():
_no_space()
_has_space()
_just_prefix()
def _no_space():
msg = doCssCompress('@-css-compiler{selector-compile:no-combinator;rule-compile:all}html{width:100px;}')
equal(msg, 'html{width:100px}', '@css-compiler compressed')
def _has_space():
msg = doCssCompress('@-css-compiler {selector-compile:no-combinator;rule-compile:all}html{width:100px;}')
equal(msg, 'html{width:100px}', '@css-compiler compressed')
def _just_prefix():
msg = doCssCompress('@-css-compiler-prefix fdsafdsafdsa;html{width:100px;}')
equal(msg, 'html{width:100px}', '@css-compiler compressed')
| 2.328125
| 2
|
features/bvp_features.py
|
anascacais/MLB-P5-P6
| 0
|
12779297
|
<filename>features/bvp_features.py<gh_stars>0
# built-in
import json
import os
# third-party
import numpy as np
# local
from biosppy import utils
from biosppy import ppg
from biosppy import tools as st
from . import statistic_features, hrv_features
def bvp_features(signal=None, sampling_rate=1000.):
""" Compute BVP characteristic metrics describing the signal.
Parameters
----------
signal : array
Input signal.
sampling_rate : float
Sampling frequency.
Returns
-------
ons : list
Signal onsets.
hr: list
Bvp heart rate.
"""
# ensure numpy array
signal = np.array(signal)
args, names = [], []
ons = ppg.find_onsets_elgendi2013(signal, sampling_rate)['onsets']
_, hr = st.get_heart_rate(beats=ons, sampling_rate=sampling_rate, smooth=True, size=3)
hr_stats = statistic_features.signal_stats(hr)
hr_ppg_ft = hrv_features.hrv_features(np.diff(ons))
return utils.ReturnTuple(tuple(args), tuple(names))
| 2.390625
| 2
|
main/views.py
|
ArighnaIITG/NoDues-Portal
| 0
|
12779298
|
<filename>main/views.py
from django.http import HttpResponse
from django.template import loader
from .models import *
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404, redirect, HttpResponseRedirect
from django.db.models import Q
from .forms import UserForm
def login_user(request):
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
role = request.POST['role']
# print str(role)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
role = str(role)
username=request.user.username
username = str(username)
if role == "Student":
students = Student.objects.all()
flag=0
for stud in students:
if stud.webmail == username:
flag=1
if flag==1:
return redirect('/student_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Faculty":
faculty = Faculty.objects.all()
flag=0
for fac in faculty:
if fac.webmail == username:
flag=1
if flag==1:
return redirect('/faculty_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Lab":
labs = Lab.objects.all()
flag=0
for lab in labs:
if lab.webmail == username:
flag=1
if flag==1:
return redirect('/lab_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Caretaker":
caretaker = Caretaker.objects.all()
flag=0
for care in caretaker:
if care.webmail == username:
flag=1
if flag==1:
return redirect('/caretaker_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Warden":
warden = Warden.objects.all()
flag=0
for ward in warden:
if ward.webmail == username:
flag=1
if flag==1:
return redirect('/warden_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Gymkhana":
gymkhana = Gymkhana.objects.all()
flag=0
for gym in gymkhana:
if gym.webmail == username:
flag=1
if flag==1:
return redirect('/gymkhana_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "OnlineCC":
onlinecc = OnlineCC.objects.all()
flag=0
for onl in onlinecc:
if onl.webmail == username:
flag=1
if flag==1:
return redirect('/onlinecc_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "CC":
cc = CC.objects.all()
flag=0
for c in cc:
if c.webmail == username:
flag=1
if flag==1:
return redirect('cc_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Thesis Manager":
thesis = SubmitThesis.objects.all()
flag=0
for thes in thesis:
if thes.webmail == username:
flag=1
if flag==1:
return redirect('/thesis_manager_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Library":
library = Library.objects.all()
flag=0
for lib in library:
if lib.webmail == username:
flag=1
if flag==1:
return redirect('/library_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Assistant Registrar":
asst = asstreg.objects.all()
flag=0
for a in asst:
if a.webmail == username:
flag=1
if flag==1:
return redirect('/assistant_registrar_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "HOD":
hod = HOD.objects.all()
flag=0
for h in hod:
if h.webmail == username:
flag=1
if flag==1:
return redirect('/hod_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
elif role == "Account":
account = Account.objects.all()
flag=0
for acc in account:
if acc.webmail == username:
flag=1
if flag==1:
return redirect('/account_profile')
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Role'})
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Credentials'})
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Credentials'})
else:
return render(request, 'main/login.html', {'error_message': 'Invalid Credentials'})
return render(request, 'main/login.html',{'error_message': 'Valid Login'})
def logout_user(request):
logout(request)
form = UserForm(request.POST or None)
context = {
"form": form,
}
return render(request, 'main/login.html', context)
def student_profile(request):
username=request.user.username
username=str(username)
student = Student.objects.get(webmail=username)
return render(request, 'main/student.html',{'error_message': 'valid login', 'student': student})
def student_dept_detail(request):
username=request.user.username
username=str(username)
student = Student.objects.get(webmail=username)
faculty_dept = Faculty.objects.filter(dept=student.dept)
stud_fac_status = StudFacStatus.objects.filter(student=student)
return render(request,'main/student_dept_detail.html',{'error_message': 'valid login','student': student, 'faculty':faculty_dept, 'StudFacStatus': stud_fac_status})
def student_lab_detail(request):
username = request.user.username
username = str(username)
student = Student.objects.get(webmail=username)
labs = Lab.objects.all()
stud_lab_status = StudLabStatus.objects.filter(student=student)
return render(request, 'main/student_lab_detail.html', {'error_message': 'valid login', 'student': student, 'labs' : labs, 'StudLabStatus':stud_lab_status})
def account_profile(request):
if request.method == "GET":
username = request.user.username
account = Account.objects.get(webmail=username)
students = Student.objects.filter(hod_approval=True)
return render(request, 'main/account.html',
{'error_message': 'valid login', 'students': students,
'account': account})
elif request.method == "POST":
username = request.user.username
account = Account.objects.get(webmail=username)
students = Student.objects.filter(hod_approval=True)
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.account_approval = True
stud.save()
else:
stud.account_approval = False
stud.save()
return redirect('/account_profile',{'error_message': 'valid login', 'students': students,'account': account})
def assistant_registrar_profile(request):
if request.method == "GET":
username = request.user.username
assistant_registrar= asstreg.objects.get(webmail=username)
students = Student.objects.filter(caretaker_approval=True,warden_approval=True,gymkhana_approval=True)
return render(request, 'main/assistant_registrar.html',
{'error_message': 'valid login', 'students': students, 'assistant_registrar': assistant_registrar})
elif request.method == "POST":
username = request.user.username
assistant_registrar= asstreg.objects.get(webmail=username)
students = Student.objects.filter(caretaker_approval=True, warden_approval=True, gymkhana_approval=True)
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.assistant_registrar_approval = True
stud.save()
else:
stud.assistant_registrar_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/assistant_registrar_profile',{'students': students, 'assistant_registrar': assistant_registrar})
def caretaker_profile(request):
if request.method == "GET":
username = request.user.username
caretaker = Caretaker.objects.get(webmail=username)
hostel = caretaker.hostel
students = Student.objects.filter(hostel=hostel)
return render(request, 'main/caretaker.html',
{'error_message': 'valid login', 'students': students, 'caretaker': caretaker, 'hostel': hostel})
elif request.method=="POST":
username = request.user.username
caretaker = Caretaker.objects.get(webmail=username)
hostel = caretaker.hostel
students = Student.objects.filter(hostel=hostel)
for stud in students:
if request.POST.get(stud.webmail,"") == 'on':
stud.caretaker_approval=True
stud.save()
else :
stud.caretaker_approval = False
stud.warden_approval = False
stud.assistant_registrar_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/caretaker_profile',{ 'students': students, 'caretaker': caretaker, 'hostel': hostel })
def onlinecc_profile(request):
if request.method == "GET":
username = request.user.username
onlinecc = OnlineCC.objects.get(webmail=username)
students = Student.objects.all()
return render(request, 'main/onlinecc.html',
{'error_message': 'valid login', 'students': students, 'onlinecc': onlinecc})
elif request.method == "POST":
username = request.user.username
onlinecc = OnlineCC.objects.get(webmail=username)
students = Student.objects.all()
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.online_cc_approval = True
stud.save()
else:
stud.online_cc_approval = False
stud.cc_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/onlinecc_profile',{'students': students, 'onlinecc': onlinecc})
def cc_profile(request):
if request.method == "GET":
username = request.user.username
cc = CC.objects.get(webmail=username)
students = Student.objects.filter(online_cc_approval=True)
return render(request, 'main/cc.html',
{'error_message': 'valid login', 'students': students, 'cc': cc})
elif request.method == "POST":
username = request.user.username
cc = CC.objects.get(webmail=username)
students = Student.objects.filter(online_cc_approval=True)
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.cc_approval = True
stud.save()
else:
stud.cc_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/cc_profile',{'students': students, 'cc': cc})
def faculty_profile(request):
if request.method == "GET":
username = request.user.username
fac = Faculty.objects.get(webmail=username)
dept = fac.dept
students = Student.objects.filter(dept=dept)
stud_fac_status = StudFacStatus.objects.filter(faculty=fac)
return render(request, 'main/faculty.html',
{'error_message': 'valid login', 'students': students, 'faculty': fac, 'dept': dept,'StudFacStatus': stud_fac_status})
elif request.method=="POST":
username = request.user.username
fac = Faculty.objects.get(webmail=username)
dept = fac.dept
students = Student.objects.filter(dept=dept)
stud_fac_status = StudFacStatus.objects.filter(faculty=fac)
for stud in students:
for i in stud_fac_status :
if i.student.name == stud.name:
if request.POST.get(stud.webmail,"") == 'on':
x=StudFacStatus.objects.get(student=stud, faculty=fac)
x.approval=True
x.save()
else :
x = StudFacStatus.objects.get(student=stud, faculty=fac)
print x
x.approval=False
x.save()
stud.dept_status = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/faculty_profile',{'students': students, 'faculty': fac, 'dept':dept,'StudFacStatus': stud_fac_status})
def gymkhana_profile(request):
if request.method == "GET":
username = request.user.username
gymkhana = Gymkhana.objects.get(webmail=username)
students = Student.objects.all()
return render(request, 'main/gymkhana.html',
{'error_message': 'valid login', 'students': students, 'gymkhana': gymkhana})
elif request.method == "POST":
username = request.user.username
gymkhana = Gymkhana.objects.get(webmail=username)
students = Student.objects.all()
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.gymkhana_approval = True
stud.save()
else:
stud.gymkhana_approval = False
stud.assistant_registrar_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/gymkhana_profile',{'students': students, 'gymkhana': gymkhana})
def hod_profile(request):
if request.method == "GET":
username = request.user.username
hod = HOD.objects.get(webmail=username)
students = Student.objects.filter(dept=hod.dept, assistant_registrar_approval=True,library_approval=True, cc_approval=True)
return render(request, 'main/hod.html',
{'error_message': 'valid login', 'students': students,
'hod': hod})
elif request.method == "POST":
username = request.user.username
hod = HOD.objects.get(webmail=username)
students = Student.objects.filter(dept=hod.dept, assistant_registrar_approval=True,library_approval=True, cc_approval=True)
for stud in students:
if stud.lab_status() == True and stud.dept_status() == True:
if request.POST.get(stud.webmail, "") == 'on':
stud.hod_approval = True
stud.save()
else:
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/hod_profile',{'error_message': 'valid login', 'students': students,'hod': hod})
def lab_profile(request):
if request.method == "GET":
username = request.user.username
lab = Lab.objects.get(webmail=username)
students = Student.objects.all()
stud_lab_status = StudLabStatus.objects.filter(lab=lab)
return render(request, 'main/lab.html',
{'error_message': 'valid login', 'students': students, 'lab': lab, 'StudLabStatus': stud_lab_status})
elif request.method=="POST":
username = request.user.username
lab = Lab.objects.get(webmail=username)
students = Student.objects.all()
stud_lab_status = StudLabStatus.objects.filter(lab=lab)
for stud in students:
for i in stud_lab_status :
if i.student.name == stud.name:
if request.POST.get(stud.webmail,"") == 'on':
x=StudLabStatus.objects.get(student=stud, lab=lab)
x.approval=True
x.save()
else :
x = StudLabStatus.objects.get(student=stud, lab=lab)
x.approval=False
x.save()
stud.lab_status = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/lab_profile',{'students': students, 'lab': lab, 'StudLabStatus': stud_lab_status})
def library_profile(request):
if request.method == "GET":
username = request.user.username
library = Library.objects.get(webmail=username)
students = Student.objects.filter(submit_thesis=True)
return render(request, 'main/library.html',
{'error_message': 'valid login', 'students': students, 'library': library})
elif request.method == "POST":
username = request.user.username
library = Library.objects.get(webmail=username)
students = Student.objects.filter(submit_thesis=True)
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.library_approval = True
stud.save()
else:
stud.library_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/library_profile',{'students': students, 'library': library})
def thesis_manager_profile(request):
if request.method == "GET":
username = request.user.username
thesis_manager = SubmitThesis.objects.get(webmail=username)
students = Student.objects.all()
return render(request, 'main/thesis_manager.html',
{'error_message': 'valid login', 'students': students, 'thesis_manager':thesis_manager})
elif request.method == "POST":
username = request.user.username
thesis_manager = SubmitThesis.objects.get(webmail=username)
students = Student.objects.all()
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.submit_thesis = True
stud.save()
else:
stud.submit_thesis = False
stud.library_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/thesis_manager_profile',{'students': students, 'thesis_manager':thesis_manager})
def warden_profile(request):
if request.method == "GET":
username = request.user.username
warden = Warden.objects.get(webmail=username)
hostel = warden.hostel
students = Student.objects.filter(hostel=hostel, caretaker_approval=True)
return render(request, 'main/warden.html',
{'error_message': 'valid login', 'students': students, 'warden': warden,
'hostel': hostel})
elif request.method == "POST":
username = request.user.username
warden = Warden.objects.get(webmail=username)
hostel = warden.hostel
students = Student.objects.filter(hostel=hostel, caretaker_approval=True)
for stud in students:
if request.POST.get(stud.webmail, "") == 'on':
stud.warden_approval = True
stud.save()
else:
stud.warden_approval = False
stud.assistant_registrar_approval = False
stud.hod_approval = False
stud.account_approval = False
stud.save()
return redirect('/warden_profile', {'students': students, 'warden': warden,
'hostel': hostel })
def rules(request):
return render(request,'main/rules.html')
def contact(request):
return render(request,'main/contact.html')
| 2.21875
| 2
|
pdpy/core.py
|
olihawkins/pdpy
| 13
|
12779299
|
# -*- coding: utf-8 -*-
"""Core download functions."""
# Imports ---------------------------------------------------------------------
import datetime
import json
import numpy as np
import pandas as pd
import requests
from . import constants
from . import errors
from . import settings
# Functions ------------------------------------------------------------------
def request(query):
"""Send an http request with a query and return the response.
request sends a SPARQL query to the api endpoint and returns the response
object. It is a simple wrapper around request.post. It sets the appropriate
headers and sends the query as the request body. It does not validate the
query or handle the response in any way. The response format is JSON.
Parameters
----------
query : str
A SPARQL query as a string.
Returns
-------
out : Response
The http response object from requests.
"""
url = settings.get_api_url()
headers = {}
headers['content-type'] = 'application/sparql-query'
headers['accept'] = 'application/sparql-results+json'
response = requests.post(url, headers=headers, data=query)
return response
def sparql_select(query):
"""Send a select query and return the response as a DataFrame.
sparql_select sends a SPARQL query to the api endpoint and returns the
response as a DataFrame. The SPARQL should be a SELECT query as the
response is processed as tabular data. The function will convert datatypes
that it recognises. It currently recognises date types. All other data
returned in the DataFrame will be strings. If the query syntax is not valid
or the request fails for any other reason a RequestError will be raised
with the response text.
Parameters
----------
query : str
A SPARQL SELECT query as a string.
Returns
-------
out : DataFrame
A pandas dataframe containing the results of the query.
"""
# Send the query and get the response
response = request(query)
# If the server returned an error raise it with the response text
if not response.ok:
raise errors.RequestError(response.text)
# Process the response as tabular data and return it as a DataFrame
json = response.json()
rows = []
headers = json['head']['vars']
records = json['results']['bindings']
# For each record build a row and assign values based on the data type
for record in records:
row = []
for header in headers:
if header in record:
if 'datatype' in record[header] and \
record[header]['datatype'] == constants.XML_DATE:
row.append(
datetime.datetime.strptime(
record[header]['value'], '%Y-%m-%d+%H:%M').date())
else:
row.append(record[header]['value'].strip())
else:
row.append(None)
rows.append(row)
return pd.DataFrame(data=rows, columns=headers).fillna(value=np.NaN)
| 3.40625
| 3
|
wavelet_product_edge_detector.py
|
ryagi97/wavelet-product-edge-detection
| 0
|
12779300
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 14 09:10:29 2021
Author: <NAME>
Functions for implementing the edge detection scheme first proposed by Zhang and Bao [1].
Modified for use with pywt's SWT2 transform and employs double thresholding similar to canny to improve noise resilience and revovery of weak edges.
Portions of code adapted from scikit-image's implementation of the canny edge detector;
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
[1] <NAME>. and <NAME>., 2002. Edge detection by scale multiplication in wavelet domain. Pattern Recognition Letters, 23(14), pp.1771-1784.
"""
import numpy as np
from pywt import swt2, Wavelet
from scipy.ndimage import generate_binary_structure, binary_erosion, label
from scipy import ndimage as ndi
def wavelet_edge_detector(image, start_level=0, levels=2, wavelet='rbio3.1', c=0.15, noise_var=40, t1=1, t2=2, dbl_th=True):
"""
Extracts the edge local maxima of the passed image using the product of two
consecutive stationary wavelet coefficients
-----------
image : 2D array
Input image, grayscale
start_level : int
Initial coefficient scale level to be extracted by the SWT
levels : int
number of levels to consider, must be even
wavelet : string
Name of wavelet as listed by pywt.wavelist()
c : float
Multiplier for calculating the threshold
noise_var : float
Estimate of the Gaussian Noise variance present in the image
t1 : float
Threshold multiplier for the lower threshold
t2 : float
Threshold multiplier for the lower threshold
Returns
-------
local_maxima : 2D array
local maxima extracted by the local maxima method
edge_mask : 2D array
Binary array marking edges present in the local maxima
-----
"""
assert(levels%2 == 0)
#calculate the maximum level to decompose the image with
max_level = start_level+levels
#Decompse the image to its detail coefficients using the 2D SWT
coeffs = swt2(image, wavelet=wavelet, level=max_level,
start_level=start_level, norm=False,
trim_approx=True)
#create empty arrays to store the detail coefficients
#algoritmhs only require Horizontal and Vertical details, so Diagonal is not calculated
coeff_arr_H = np.empty((image.shape + (max_level-start_level,)))
coeff_arr_V = np.empty((image.shape + (max_level-start_level,)))
#offset the coefficients based on the decomposition scale
for i in range(max_level-start_level):
coeff_arr_H[:,:,i] = np.roll(coeffs[-1-i][0], 2**(i+start_level))
coeff_arr_V[:,:,i] = np.roll(coeffs[-1-i][1], 2**(i+start_level))
#Get the Horizontal and Vertical products; the magnitude gradient matrices
Mdx = np.prod(coeff_arr_H, axis=2)
Mdy = np.prod(coeff_arr_V, axis=2)
#Remove negative coefficients, as these are solely due to noise
pts_Mdx_plus = (Mdx >= 0)
Mdx = pts_Mdx_plus * Mdx
pts_Mdy_plus = (Mdy >= 0)
Mdy = pts_Mdy_plus * Mdy
#Get the angle gradient matrices
Adx = np.sign(coeff_arr_H[:,:,1])*np.sqrt(Mdx)
Ady = np.sign(coeff_arr_V[:,:,1])*np.sqrt(Mdy)
#Obtain the local modulus maximum in the direction of the normal of the edge
local_maxima = local_modulus_maxima(Adx, Ady, Mdx, Mdy)
if dbl_th:
#Perform double thresholding and return the edge mask
edge_mask = dbl_thresholding_ZhangBao(local_maxima, wavelet=wavelet,
start_level=start_level,
c=c, noise_var=noise_var,
t1=t1, t2=t2)
else:
edge_mask = None
return local_maxima, edge_mask
def local_modulus_maxima(Adx, Ady, Mdx, Mdy, mask=None):
"""
Code adapted from scikit-image's canny implementation for faster execution
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
"""
"""Fast computation of the local maxima using custom gradient and angle matrices
Parameters
-----------
Adx : 2D array
Gradient array along axis 0 (Horizontal Detail Coefficients) to be used
for calculating the normal to the edges
Ady : 2D array
Gradient array along axis 1 (Vertical Detail Coefficients) to be used
for calculating the normal to the edges
Mdx : 2D array
Gradient array along axis 0 (Horizontal Detail Coefficients) to be used
for calculating the value of the edges
Mdy : 2D array
Gradient array along axis 1 (Vertical Detail Coefficients) to be used
for calculating the value of the edges
mask : array, dtype=bool, optional
Mask to limit the application of Canny to a certain area.
Returns
-------
output : 2D array
The local maxima
-----
The steps of the algorithm are as follows:
* Thin potential edges to 1-pixel wide curves. First, find the normal
to the edge at each point. This is done by looking at the
signs and the relative magnitude of the X-Sobel and Y-Sobel
to sort the points into 4 categories: horizontal, vertical,
diagonal and antidiagonal. Then look in the normal and reverse
directions to see if the values in either of those directions are
greater than the point in question. Use interpolation to get a mix of
points instead of picking the one that's the closest to the normal.
"""
#
# The steps involved:
#
# * Find the normal to the edge at each point using the arctangent of the
# ratio of the Y sobel over the X sobel - pragmatically, we can
# look at the signs of X and Y and the relative magnitude of X vs Y
# to sort the points into 4 categories: horizontal, vertical,
# diagonal and antidiagonal.
#
# * Look in the normal and reverse directions to see if the values
# in either of those directions are greater than the point in question.
# Use interpolation to get a mix of points instead of picking the one
# that's the closest to the normal.
#
assert (Mdx.shape == Mdy.shape)
assert (Mdx.shape == Adx.shape)
assert (Adx.shape == Ady.shape)
if mask is None:
mask = np.ones(Mdx.shape, dtype=bool)
jsobel = Ady
isobel = Adx
abs_isobel = np.abs(isobel)
abs_jsobel = np.abs(jsobel)
magnitude = np.hypot(Mdx, Mdy)
#
# Make the eroded mask. Setting the border value to zero will wipe
# out the image edges for us.
#
s = generate_binary_structure(2, 2)
eroded_mask = binary_erosion(mask, s, border_value=0)
eroded_mask = eroded_mask & (magnitude > 0)
#
#--------- Find local maxima --------------
#
# Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
# 90-135 degrees and 135-180 degrees.
#
local_maxima = np.zeros(Mdx.shape)
#----- 0 to 45 degrees ------
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
# Get the magnitudes shifted left to make a matrix of the points to the
# right of pts. Similarly, shift left and down to get the points to the
# top right of pts.
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 45 to 90 degrees ------
# Mix diagonal and vertical
#
pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:, 1:][pts[:, :-1]]
c2 = magnitude[1:, 1:][pts[:-1, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[:-1, :-1][pts[1:, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 90 to 135 degrees ------
# Mix anti-diagonal and vertical
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1a = magnitude[:, 1:][pts[:, :-1]]
c2a = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_isobel[pts] / abs_jsobel[pts]
c_plus = c2a * w + c1a * (1.0 - w) <= m
c1 = magnitude[:, :-1][pts[:, 1:]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1.0 - w) <= m
local_maxima[pts] = c_plus & c_minus
#----- 135 to 180 degrees ------
# Mix anti-diagonal and anti-horizontal
#
pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
pts = pts_plus | pts_minus
pts = eroded_mask & pts
c1 = magnitude[:-1, :][pts[1:, :]]
c2 = magnitude[:-1, 1:][pts[1:, :-1]]
m = magnitude[pts]
w = abs_jsobel[pts] / abs_isobel[pts]
c_plus = c2 * w + c1 * (1 - w) <= m
c1 = magnitude[1:, :][pts[:-1, :]]
c2 = magnitude[1:, :-1][pts[:-1, 1:]]
c_minus = c2 * w + c1 * (1 - w) <= m
local_maxima[pts] = c_plus & c_minus
return local_maxima * magnitude
def dbl_thresholding_ZhangBao(local_maxima, start_level=0, wavelet='rbio3.1', c=20, noise_var=1, t1=1, t2=2):
"""
Portions of code adapted from scikit-image's canny implementation for faster execution
Title: canny.py - Canny Edge detector
Author: <NAME>
Date: 11/02/2020
Code version: 0.17.2
Availability: https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py
"""
"""Performs double thresholding based the wavelet energy and noise variance values
Parameters
-----------
local_maxima : 2D array
Local maxima extracted by the local maxima method, same shape as input image
wavelet : string
Name of wavelet as listed by pywt.wavelist()
start_level : int
Initial coefficient scale level to be extracted by the SWT
c : float
Multiplier for calculating the threshold
noise_var : float
Estimate of the Gaussian Noise variance present in the image
t1 : float
Threshold multiplier for the lower threshold
t2 : float
Threshold multiplier for the lower threshold
Returns
-------
edge_mask : 2D array
Binary array marking edges present in the local maxima
-----
"""
#
#---- Create two masks at the two thresholds.
#
# * Label all points above the high threshold as edges.
# * Recursively label any point above the low threshold that is 8-connected
# to a labeled point as an edge.
#
# Regarding masks, any point touching a masked point will have a gradient
# that is "infected" by the masked point, so it's enough to erode the
# mask by one and then mask the output. We also mask out the border points
# because who knows what lies beyond the edge of the image?
#
#First lower threshold is the same as in Zhang and Bao's paper
#Set to remove the majority of the noise present
#threshold = c * energy of wavelet at scale j, energy at scale j+1,
#noise_var, scaled noise_var
#get wavelet coefficients
w = Wavelet(wavelet)
if w.orthogonal:
(_, psi_d1, _) = w.wavefun(level=start_level+1)
(_, psi_d2, _) = w.wavefun(level=start_level+2)
else:
(_, psi_d1, _, _, _) = w.wavefun(level=start_level+1)
(_, psi_d2, _, _, _) = w.wavefun(level=start_level+2)
#compute their enegries (in reality, square root of energy)
energy_psi_d1 = np.sqrt(np.sum(psi_d1**2))
energy_psi_d2 = np.sqrt(np.sum(psi_d2**2))
#add zeros to psi_d1 to compute the next variable
psi_d1_up = psi_d1.repeat(2)
psi_d1_up[1::2] = 0
if wavelet == 'haar':
psi_d1_up = psi_d1_up[1:-1]
#get the sigma_i value
sigma_i_sq = 2*np.sum((psi_d1_up/energy_psi_d1 + psi_d2/energy_psi_d2)**2)
t = c * energy_psi_d1 * energy_psi_d2 * noise_var * sigma_i_sq
T_low = t*t1
T_high = t*t2
high_mask = (local_maxima >= T_high)
low_mask = (local_maxima >= T_low)
# Segment the low-mask, then only keep low-segments that have
# some high_mask component in them
strel = np.ones((3, 3), bool)
labels, count = label(low_mask, strel)
if count == 0:
return low_mask
sums = (np.array(ndi.sum(high_mask, labels, np.arange(count, dtype=np.int32) + 1),
copy=False, ndmin=1))
good_label = np.zeros((count + 1,), bool)
good_label[1:] = sums > 0
output_mask = good_label[labels]
return output_mask
#run demo
if __name__ == "__main__":
import cv2 as cv
lvl = 0
c = 0.345
t1 = 1.0
t2 = 2.75
noise_var = 7237.754103671255
cv.namedWindow('Camera Capture', cv.WINDOW_NORMAL)
cv.namedWindow('Product Local Maxima - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Edges - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Edges - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Overlay - Haar Wavelet', cv.WINDOW_NORMAL)
cv.namedWindow('Overlay - Reverse Biorthogonal 3.1 Wavelet', cv.WINDOW_NORMAL)
image = cv.imread('test_images/USAF.tiff', cv.IMREAD_GRAYSCALE)
#convert image from 8-bit to 12-bit, same as camera depth
image = image.astype(np.float)
image = image * 4095/256
image = image.astype(np.uint16)
#find local maxima and edges using the Haar wavelet
local_maxima_hr, edges_hr = wavelet_edge_detector(image, start_level=lvl,
wavelet='haar',c=c,
noise_var=noise_var, t1=t1, t2=t2)
local_maxima_hr = local_maxima_hr / np.max(local_maxima_hr) * 65535
local_maxima_hr = local_maxima_hr.astype(np.uint16)
edges_hr = edges_hr * np.ones(edges_hr.shape) * 65535
edges_hr = edges_hr.astype(np.uint16)
comb_hr = np.zeros((image.shape + (3,)))
comb_hr[:,:,0] = image / 4096
comb_hr[:,:,1] = comb_hr[:,:,0]
comb_hr[:,:,2] = comb_hr[:,:,0]
comb_hr[:,:,2] += (edges_hr/65535)
comb_hr[:,:,2] = np.clip(comb_hr[:,:,2], 0, 1)
#find local maxima and edges using the Reverse Biorthogonal 3.1 wavelet
local_maxima_rb, edges_rb = wavelet_edge_detector(image, start_level=lvl,
wavelet='rbio3.1',c=c,
noise_var=noise_var, t1=t1, t2=t2)
local_maxima_rb = local_maxima_rb / np.max(local_maxima_rb) * 65535
local_maxima_rb = local_maxima_rb.astype(np.uint16)
edges_rb = edges_rb * np.ones(edges_rb.shape) * 65535
edges_rb = edges_rb.astype(np.uint16)
comb_rb = np.zeros((image.shape + (3,)))
comb_rb[:,:,0] = image / 4096
comb_rb[:,:,1] = comb_rb[:,:,0]
comb_rb[:,:,2] = comb_rb[:,:,0]
comb_rb[:,:,2] += (edges_rb/65535)
comb_rb[:,:,2] = np.clip(comb_rb[:,:,2], 0, 1)
image = image.astype(np.float)
image = image * 65535/4096
image = image.astype(np.uint16)
try:
while True:
cv.imshow('Camera Capture', image)
cv.imshow('Product Local Maxima - Haar Wavelet', local_maxima_hr)
cv.imshow('Product Local Maxima - Reverse Biorthogonal 3.1 Wavelet', local_maxima_rb)
cv.imshow('Edges - Haar Wavelet', edges_hr)
cv.imshow('Edges - Reverse Biorthogonal 3.1 Wavelet', edges_rb)
cv.imshow('Overlay - Haar Wavelet', comb_hr)
cv.imshow('Overlay - Reverse Biorthogonal 3.1 Wavelet', comb_rb)
cv.waitKey(1)
except KeyboardInterrupt:
cv.destroyAllWindows()
| 3.078125
| 3
|
flaskr/blog.py
|
LSWarss/Flask-Blog-App
| 1
|
12779301
|
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flask_login import login_required, current_user
from flaskr.models import Post, db, PostComment, User
from flaskr import csrf
blog = Blueprint('blog', __name__)
@blog.route('/')
def index():
posts = Post.query.order_by(Post.created).all()
return render_template('blog/index.html', posts=posts, getPostUser=getPostUser)
@blog.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
data = Post(current_user.id,title,body)
db.session.add(data)
db.session.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = Post.query.get(id)
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
return post
@blog.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
post.title = title
post.body = body
db.session.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@blog.route("/<int:id>/delete", methods=('POST',))
@login_required
def delete(id):
Post.query.filter_by(id=id).delete()
db.session.commit()
return redirect(url_for('blog.index'))
@blog.route("/<int:id>/<action>")
@login_required
def like(id, action):
post = get_post(id)
if action == 'like':
current_user.like_post(post)
db.session.commit()
if action == 'unlike':
current_user.unlike_post(post)
db.session.commit()
return redirect(request.referrer)
@blog.route("/<int:id>/comments", methods=('GET', 'POST'))
def showComments(id):
post = get_post(id)
comments = post.comments
return render_template('blog/comments.html', comments = comments, post = post)
@blog.route("/addComment", methods=('POST',))
def addComment():
if request.method == 'POST':
post_id = int(request.form['post_id'])
print(post_id)
body = request.form['body']
error = None
if body == '':
error = 'Body is required.'
if error is not None:
flash(error)
else:
comment = PostComment(current_user.id, post_id, body)
print(current_user.id)
db.session.add(comment)
db.session.commit()
return redirect(request.referrer)
def getPostUser(id):
return User.query.get(id).username
| 2.375
| 2
|
pyvmodule/tools/modules/amba/axi/axi2ram.py
|
tanhongze/pyvmodule
| 0
|
12779302
|
from pyvmodule.develope import *
from pyvmodule.tools.modules.sram.dual import SRamR,SRamW
from pyvmodule.tools.modules.fifo import Fifo
from .common import AxiComponent,update_data_burst_addr,compute_address
class Axi2RamR(SRamR):
class FifoAR(Fifo):
def update_data_araddr(self,field):
return update_data_burst_addr(field,self.data.arburst)
def __init__(self,axi,io=None,**kwargs):
SRamR.__init__(self,awidth=axi.awidth,bwidth=axi.bwidth,io=io,**kwargs)
self.reset = ~axi.aresetn
self.rcur = Reg(4)
axi.rresp[:]=0
for name in ['rid','rlast','rvalid']:
driver = Reg(len(getattr(axi,name)))
getattr(axi,name)[:] = driver
setattr(self,name,driver)
self.a = self.FifoAR(self.reset,axi,push=axi.arvalid,
names=['arid','araddr','arlen','arsize','arburst'],depth=0)
compute_address(self.a.data.araddr,self.a.data.arlen,axi.size_v)
axi.arready[:]= ~self.a.full
self.allow_out = Wire(axi.rready|~axi.rvalid)
self.a.data.arlen.last = Wire(self.a.data.arlen.equal_to(self.rcur))
self.a.pop[:] = self.en&self.a.data.arlen.last
self.a.data.araddr.update[:] = self.en&~self.a.data.arlen.last
self.rcur.reset = Wire(self.reset|self.a.pop)
When(self.rcur.reset)[self.rcur:0]\
.When(self.en)[self.rcur:self.rcur+1]
When(self.reset)[self.rvalid:0]\
.When(self.en)[self.rvalid:1]\
.When(axi.rready)[self.rvalid:0]
When(self.a.pop)[self.rid:self.a.data.arid]
When(self.en)[self.rlast:self.a.data.arlen.last]
self.en [:] = self.a.valid&self.allow_out
self.addr[:] = self.a.data.araddr
axi.rdata[:] = self.data
class Axi2RamW(SRamW):
class FifoAW(Fifo):
def update_data_awaddr(self,field):
return update_data_burst_addr(field,self.data.awburst)
def __init__(self,axi,io=None,**kwargs):
SRamW.__init__(self,awidth=axi.awidth,bwidth=axi.bwidth,io=io,**kwargs)
self.reset = ~axi.aresetn
self.w = VStruct()
for name in ['wdata','wstrb','wlast','wvalid']:
setattr(self,name,Reg(len(getattr(axi,name))))
self.a = self.FifoAW(self.reset,axi,push=axi.awvalid,
names=['awid','awaddr','awlen','awsize','awburst'],depth=0)
self.b = Fifo(self.reset,self.a.data.awid,pop=axi.bready)
axi.bid [:]= self.b.data
axi.bvalid[:]= self.b.valid
axi.bresp [:]= 0
compute_address(self.a.data.awaddr,self.a.data.awlen,axi.size_v)
self.allow_out = Wire(self.a.valid&~self.b.full)
self.a.data.awaddr.update[:] = self.en&~self.wlast
self.a.pop [:] = self.en& self.wlast
self.b.push [:] = self.a.pop
self.go = Wire(axi.wvalid&axi.wready)
blk = When(self.go)
for name in ['wdata','wstrb','wlast']:
blk[getattr(self,name):getattr(axi,name)]
When(self.reset)[self.wvalid:0]\
.When(self.go)[self.wvalid:1]\
.When(self.en)[self.wvalid:0]
axi.awready[:] = ~self.a.full
axi.wready [:] = self.allow_out|~self.wvalid
self.en [:] = self.wvalid&self.allow_out&~self.reset
self.addr[:] = self.a.data.awaddr
self.data[:] = self.wdata
self.strb[:] = self.wstrb
| 2.46875
| 2
|
bankManage/backEndService/test.py
|
ShangziXue/A-simple-bank-system
| 0
|
12779303
|
<reponame>ShangziXue/A-simple-bank-system
from flask import Flask
from flask import request
from flask import jsonify
from flask import make_response
from flask_cors import *
import json
import time
app = Flask(__name__)
CORS(app, supports_credentials=True)
#==============================================================================================
# Oracle 数据字典化函数
def makeDictFactory(cursor):
columnNames = [d[0].lower() for d in cursor.description]
def createRow(*args):
return dict(zip(columnNames, args))
return createRow
#==============================================================================================
# 登录 后台功能
@app.route('/login', methods=['POST'])
def login():
username = request.form['username']
password = request.form['password']
custype = request.form['custype']
print(username)
print(password)
print(custype)
# print("登录成功")
response = make_response(jsonify({
'code':200,
'msg':'get',
'token':username
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
#==============================================================================================
# 支行管理 后台功能
@app.route('/bank',methods=['POST'])
def bank():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
response = make_response(jsonify({
'code':200,
'list':[
{'name': '合肥城南支行','city': '合肥','money': 100000000},
{'name': '南京城北支行','city': '南京','money': 102500000},
{'name': '无锡城北支行','city': '无锡','money': 1000}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/staff',methods=['POST'])
def staff():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id':'331002199802021545','name': '张三','dept':'人事处','tel':'10086','addr':'黄山路','date_s':'2010-12-30'},
{'id':'33100220001002002X','name': '李四','dept':'财务处','tel':'10010','addr':'合作化路','date_s':'2011-02-00'},
{'id':'331002199011110010','name': '王五','dept':'前台','tel':'10000','addr':'肥西路','date_s':'2019-04-30'} ]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
print('Update')
date_s=request.form['date_s']
print(date_s)
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/staffCustomer',methods=['POST'])
def staffCustomer():
rstype=request.form['type']
# staffID=request.form['staffID'] # 员工身份证号,用于查询和修改、删除
# custID=request.form['custID'] # 客户身份证号,用于修改、删除
# serviceType=request.form['serviceType'] # 服务类型,用于修改
# old_custID=request.form['old_custID'] # 旧的客户身份证号,用于修改,null代表新增
# old_staffID=request.form['old_staffID'] # 旧的员工身份证号,用于修改
if (rstype=="SearchByStaff"):
# Todo: 实现数据库操作,返回查询的结果
staffID=request.form['staffid'] # 员工身份证号,查找所有关于该员工的客户联系
print('SearchByStaff')
print(staffID)
response = make_response(jsonify({
'code':200,
'list':[
{'id':'331002199802021545','name': '张三','type':'1'},
{'id':'331002195602021545','name': '李四','type':'0'},
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=='SearchByCustomer'):
# Todo: 实现数据库操作,返回查询的结果
custID=request.form['custid'] # 客户身份证号,查找所有关于该客户的员工联系
print('SearchByCustomer')
print(custID)
response = make_response(jsonify({
'code':200,
'list':[
{'staffid':'331002199802021545','staffname': '张三','type':'1'},
{'staffid':'331002195602021545','staffname': '李四','type':'0'},
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录(建议使用视图)
# 并将修改或新增的记录返回给前端(前端需要的主要是名字,但是为了兼容性,应该将整条记录都返回)
print('Update')
response = make_response(jsonify({
'code':200,
'record': {'id':'331002199802021545','name': '张三','staffid':'331002199802021545','staffname': '李四','type':'1'}
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
staffID=request.form['staffid'] # 员工身份证号
custID=request.form['custid'] # 客户身份证号,这两个主键可以用于删除联系
print(staffID)
print(custID)
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/pay',methods=['POST'])
def pay():
rstype=request.form['type']
# id=request.form['loanID'] # 贷款号,用于查询和新增支付记录
# date=request.form['date'] # 支付日期,用于新增记录
# money=request.form['money'] # 支付金额,用于新增记录
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':400,
'list':[
#{'date':'2019-05-03','money':2500},
#{'date':'2019-05-04','money':2000},
#{'date':'2019-05-05','money':3000}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Insert"):
# Todo: 实现数据库操作,修改或新增记录
print('Insert')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/accountCustomer',methods=['POST'])
def accountCustomer():
rstype=request.form['type']
# id=request.form['accID'] # 账户号,用于查询和新增户主
# bank=request.form['bank'] # 开户银行
# ownerID=request.form['ownerID'] # 户主身份证号,用于新增记录
print(rstype)
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'ownerID':'11111','ownerName':'柳树'},
{'ownerID':'11112','ownerName':'杨树'},
{'ownerID':'11222','ownerName':'柏树'}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Insert"):
# Todo: 实现数据库操作,新增记录
print('Insert')
id=request.form['accID'] # 账户号,用于查询和新增户主
bank=request.form['bank'] # 开户银行
ownerID=request.form['ownerID'] # 户主身份证号,用于新增记录
response = make_response(jsonify({
'code':200,
'record': {'ID':id,'bank':bank,'ownerID':ownerID,'ownerName':'王五'}
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/customer',methods=['POST'])
def customer():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id':'331002199802021545','name': '张三','tel':'10086','addr':'黄山路',
'name_link':'张三丰','tel_link':'112','email_link':'<EMAIL>','relation':'父子'},
{'id':'331002195602021545','name': '李四','tel':'10086','addr':'黄山路',
'name_link':'张三丰','tel_link':'112','email_link':'<EMAIL>','relation':'父子'},
{'id':'331002199802021555','name': '王五','tel':'10086','addr':'黄山路',
'name_link':'张三丰','tel_link':'112','email_link':'<EMAIL>','relation':'父子'}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
print('Update')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/account',methods=['POST'])
def account():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id': "123000",'owner': "张三,李四,王五,马云,刘强东",'bank': "合肥支行",'money':2563.00,
'open_date': '2016-2-20','visit_date': '2018-5-6','type': '0','interest': 0.043,'cashtype': '1'},
{'id': "123020",'owner': "刘强东",'bank': "合肥支行",'money':23563.00,
'open_date': '2016-2-20','visit_date': '2018-5-6','type': '1','overdraft': 25000000}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,修改或新增记录
print('Update')
ownerid=request.form['ownerid']
print(ownerid)
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/loan',methods=['POST'])
def loan():
rstype=request.form['type']
if (rstype=="Search"):
# Todo: 实现数据库操作,返回查询的结果
print('Search')
response = make_response(jsonify({
'code':200,
'list':[
{'id': "123000",'customer': "10000 张三",'bank': "合肥支行",'amount':2563.00,'status':'0'},
{'id': "123001",'customer': "10001 李四",'bank': "合肥支行",'amount':252263.00,'status':'1'},
{'id': "123023",'customer': "10002 王五",'bank': "合肥支行",'amount':25.00,'status':'2'}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Update"):
# Todo: 实现数据库操作,新增记录,注意customer字段是所有贷款人的身份证号,使用英文逗号分隔,建议使用事务发放贷款
print('Update')
response = make_response(jsonify({
'code':200,
'customer': '10000 张三\n10001 李四\n10002 王五'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if (rstype=="Delete"):
# Todo: 实现数据库操作,删除记录
print('Delete')
response = make_response(jsonify({
'code':200,
'msg': 'ok'
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
@app.route('/summary',methods=['POST'])
def summary():
# Todo: 根据前端返回的要求,实现数据库操作,返回统计数据。另外,可以生成统计图,路径为static/summary.png,以供前端调用
response = make_response(jsonify({
'code':200,
'columnList':['合肥支行','南京支行','上海支行','杭州支行','宁波支行'],
'rawData':[
{'time':'2018.4','合肥支行':52,'南京支行':5,'杭州支行':52,'宁波支行':20},
{'time':'2018.12','合肥支行':25,'南京支行':45,'上海支行':21,'杭州支行':41,'宁波支行':25},
{'time':'2020.2','南京支行':35,'上海支行':54,'杭州支行':29,'宁波支行':17}
]
})
)
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0')
# app.run()
| 2.984375
| 3
|
server01.py
|
maysrp/ESP32-monitor
| 0
|
12779304
|
<reponame>maysrp/ESP32-monitor<filename>server01.py
import psutil
import serial
import time
import requests
uid="1369152"
ser=serial.Serial("com3",115200,timeout=0.5)
url="http://api.bilibili.com/x/relation/stat?vmid="+uid
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE' }
def x(cv):
if cv<1024*1024*1024:
return str(cv//(1024*1024))+"MB"
else:
return str(round(cv/(1024*1024*1024)))+"GB"
re=requests.get(url,headers=headers)
my=re.json()
if my['code']==0:
e= my['data']['follower']
else:
e=0
while True:
tm=time.localtime()
if tm.tm_min%5==0:
re=requests.get(url,headers=headers)
my=re.json()
if my['code']==0:
e= my['data']['follower']
else:
e=0
f=str(tm.tm_hour)+':'+str(tm.tm_min)
a=psutil.cpu_count()
b=psutil.cpu_percent()
c=psutil.virtual_memory()
# svmem(total=8071716864, available=6532554752, percent=19.1, used=1258717184, free=6526308352, active=1153519616, inactive=194592768, buffers=2129920, cached=284561408, shared=9011200, slab=39006208)
d=psutil.disk_usage('c://')
# sdiskusage(total=85857402880, used=3858100224, free=81999302656, percent=4.5)
while b==0:
b=psutil.cpu_percent()
aa=str(a)+" CPU "+str(b)+"%"
bb=x(c.total)+" Mem "+str(c.percent)+"%"
cc="C: "+x(d.total)+" "+str(d.percent)+"%"
time.sleep(1)
print(aa,bb,cc)
q="tu('%s','%s','%s','%s','%s')\r" % (aa,bb,cc,e,f)
print(q)
ser.write(q.encode())
| 2.328125
| 2
|
ics/icalendar.py
|
tomschr/ics.py
| 0
|
12779305
|
from typing import Dict, Iterable, List, Optional, Union
import attr
from attr.validators import instance_of
from ics.component import Component
from ics.event import Event
from ics.grammar.parse import Container, calendar_string_to_containers
from ics.parsers.icalendar_parser import CalendarParser
from ics.serializers.icalendar_serializer import CalendarSerializer
from ics.timeline import Timeline
from ics.todo import Todo
@attr.s
class CalendarAttrs(Component):
version: str = attr.ib(validator=instance_of(str)) # default set by Calendar.Meta.DEFAULT_VERSION
prodid: str = attr.ib(validator=instance_of(str)) # default set by Calendar.Meta.DEFAULT_PRODID
scale: Optional[str] = attr.ib(default=None)
method: Optional[str] = attr.ib(default=None)
version_params: Dict[str, List[str]] = attr.ib(factory=dict)
prodid_params: Dict[str, List[str]] = attr.ib(factory=dict)
scale_params: Dict[str, List[str]] = attr.ib(factory=dict)
method_params: Dict[str, List[str]] = attr.ib(factory=dict)
_timezones: Dict = attr.ib(factory=dict, init=False, repr=False, eq=False, order=False, hash=False)
events: List[Event] = attr.ib(factory=list, converter=list)
todos: List[Todo] = attr.ib(factory=list, converter=list)
class Calendar(CalendarAttrs):
"""
Represents an unique RFC 5545 iCalendar.
Attributes:
events: a list of `Event`s contained in the Calendar
todos: a list of `Todo`s contained in the Calendar
timeline: a `Timeline` instance for iterating this Calendar in chronological order
"""
class Meta:
name = 'VCALENDAR'
parser = CalendarParser
serializer = CalendarSerializer
DEFAULT_VERSION = "2.0"
DEFAULT_PRODID = "ics.py - http://git.io/lLljaA"
def __init__(
self,
imports: Union[str, Container] = None,
events: Optional[Iterable[Event]] = None,
todos: Optional[Iterable[Todo]] = None,
creator: str = None,
**kwargs
):
"""Initializes a new Calendar.
Args:
imports (**str**): data to be imported into the Calendar,
events (**Iterable[Event]**): `Event`s to be added to the calendar
todos (**Iterable[Todo]**): `Todo`s to be added to the calendar
creator (**string**): uid of the creator program.
"""
if events is None:
events = tuple()
if todos is None:
todos = tuple()
kwargs.setdefault("version", self.Meta.DEFAULT_VERSION)
kwargs.setdefault("prodid", creator if creator is not None else self.Meta.DEFAULT_PRODID)
super(Calendar, self).__init__(events=events, todos=todos, **kwargs) # type: ignore
self.timeline = Timeline(self, None)
if imports is not None:
if isinstance(imports, Container):
self._populate(imports)
else:
containers = calendar_string_to_containers(imports)
if len(containers) != 1:
raise NotImplementedError(
'Multiple calendars in one file are not supported by this method. Use ics.Calendar.parse_multiple()')
self._populate(containers[0]) # Use first calendar
@property
def creator(self) -> str:
return self.prodid
@creator.setter
def creator(self, value: str):
self.prodid = value
@classmethod
def parse_multiple(cls, string):
""""
Parses an input string that may contain mutiple calendars
and retruns a list of :class:`ics.event.Calendar`
"""
containers = calendar_string_to_containers(string)
return [cls(imports=c) for c in containers]
def __repr__(self) -> str:
return "<Calendar with {} event{} and {} todo{}>" \
.format(len(self.events),
"s" if len(self.events) > 1 else "",
len(self.todos),
"s" if len(self.todos) > 1 else "")
def __iter__(self) -> Iterable[str]:
"""Returns:
iterable: an iterable version of __str__, line per line
(with line-endings).
Example:
Can be used to write calendar to a file:
>>> c = Calendar(); c.events.append(Event(name="My cool event"))
>>> open('my.ics', 'w').writelines(c)
"""
return iter(str(self).splitlines(keepends=True))
| 2.390625
| 2
|
secret/tests/test_utils.py
|
MinisterioPublicoRJ/apilabcontas
| 2
|
12779306
|
from unittest import TestCase
from freezegun import freeze_time
from secret.utils import create_secret
class Utils(TestCase):
@freeze_time('2019-03-12 12:00:00')
def test_create_secret_key(self):
secret = create_secret()
self.assertEqual(secret, 'd3a4646728a9de9a74d8fc4c41966a42')
| 2.71875
| 3
|
tests/test_utils.py
|
cyan-at/cq-cam
| 7
|
12779307
|
import unittest
from typing import List
import cadquery as cq
from cq_cam.utils import utils
class ProjectFaceTest(unittest.TestCase):
def setUp(self):
pass
def test_face_with_hole(self):
# This should create a projected face that is 2x4 (XY)
box = (
cq.Workplane('XZ')
.lineTo(2, 0)
.lineTo(2, 6)
.close()
.extrude(4)
.faces('<Z')
.workplane()
.moveTo(1, 2)
.rect(1, 1)
.cutThruAll()
)
face_wp = cq.Workplane(obj=box.faces().objects[1])
plane = face_wp.workplane().plane
# Make sure we picked the right face
self.assertEqual(plane.xDir, cq.Vector(0.0, -1.0, 0.0))
self.assertEqual(plane.yDir, cq.Vector(0.316227766016838, 0.0, 0.9486832980505139))
self.assertEqual(plane.zDir, cq.Vector(-0.9486832980505139, -0.0, 0.316227766016838))
result = utils.project_face(face_wp.objects[0])
class TestVector(cq.Vector):
def __eq__(self, other):
if getattr(other, 'wrapped', None):
return super().__eq__(other)
return False
expected_outer_wire = [
TestVector(2, 0, 0),
TestVector(2, -4, 0),
TestVector(0, -4, 0),
TestVector(0, 0, 0)
]
expected_inner_wire = [
TestVector(0.5, -1.5, 0),
TestVector(0.5, -2.5, 0),
TestVector(1.5, -1.5, 0),
TestVector(1.5, -2.5, 0)
]
def wire_to_vectors(wire: cq.Wire) -> List[cq.Vector]:
return [to_vector(vertex) for vertex in wire.Vertices()]
def to_vector(vertex: cq.Vertex) -> cq.Vector:
return TestVector(vertex.toTuple())
self.assertCountEqual(wire_to_vectors(result.outerWire()), expected_outer_wire)
inner_wires = result.innerWires()
self.assertEqual(len(inner_wires), 1)
self.assertCountEqual(wire_to_vectors(inner_wires[0]), expected_inner_wire)
| 2.671875
| 3
|
secretsharing/charset.py
|
ml31415/secret-sharing
| 0
|
12779308
|
<gh_stars>0
from six import integer_types
def int_to_charset(x, charset):
""" Turn a non-negative integer into a string.
"""
if not (isinstance(x, integer_types) and x >= 0):
raise ValueError("x must be a non-negative integer.")
if x == 0:
return charset[0]
output = ""
while x > 0:
x, digit = divmod(x, len(charset))
output += charset[digit]
return output
def charset_to_int(s, charset):
""" Turn a string into a non-negative integer.
"""
if not isinstance(s, (str)):
raise ValueError("s must be a string.")
if (set(s) - set(charset)):
raise ValueError("s has chars that aren't in the charset.")
return sum(len(charset)**i * charset.index(char) for i, char in enumerate(s))
""" Base16 includes numeric digits and the letters a through f. Here,
we use the lowecase letters.
"""
base16_chars = '0123456789abcdef'
""" The Base58 character set allows for strings that avoid visual ambiguity
when typed. It consists of all the alphanumeric characters except for
"0", "O", "I", and "l", which look similar in some fonts.
https://en.bitcoin.it/wiki/Base58Check_encoding
"""
base58_chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
""" The Base32 character set allows for accurate transcribing by hand.
It consists of uppercase letters + numerals, excluding "0", "1", + "8",
which could look similar to "O", "I", and "B" and so are omitted.
http://en.wikipedia.org/wiki/Base32
"""
base32_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
""" The z-base-32 character set is similar to the standard Base32 character
set, except it uses lowercase letters + numerals and chooses to exclude
"0", "l", "v", + "2". The set is also permuted so that easier chars
occur more frequently.
http://philzimmermann.com/docs/human-oriented-base-32-encoding.txt
"""
zbase32_chars = "ybndrfg8ejkmcpqxot1uwisza345h769"
""" The Base64 character set is a popular encoding for transmitting data
over media that are designed for textual data. It includes all alphanumeric
characters plus two bonus characters, usually "+" and "/".
http://en.wikipedia.org/wiki/Base64
"""
base64_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
"0123456789+/")
| 3.875
| 4
|
scripts/generate-database-data.py
|
Crown-Commercial-Service/digitalmarketplace-scripts
| 1
|
12779309
|
<filename>scripts/generate-database-data.py
#!/usr/bin/env python3
"""
N.B.: this is a work in progress with only few steps implemented.
The aim is to populate your local database with enough randomly generated data to run the DMp using the API.
Usage:
./scripts/generate-database-data.py
"""
import sys
from docopt import docopt
from dmapiclient import DataAPIClient
sys.path.insert(0, '.')
from dmscripts.generate_database_data import (
open_gcloud_12,
make_gcloud_12_live,
create_buyer_email_domain_if_not_present,
generate_user,
set_all_frameworks_to_expired,
)
from dmscripts.helpers.auth_helpers import get_auth_token
from dmscripts.helpers.updated_by_helpers import get_user
from dmutils.env_helpers import get_api_endpoint_from_stage
STAGE = 'development'
if __name__ == "__main__":
args = docopt(__doc__)
print("Generating test data...")
user = get_user()
api_token = get_auth_token('api', STAGE)
data = DataAPIClient(
base_url=get_api_endpoint_from_stage(STAGE),
auth_token=api_token,
user=user,
)
# TODO complete the minimum set of data (see rest of this comment).
# This document shows the data needed to make functional tests pass:
# https://docs.google.com/document/d/1NE7owPrdUO3pW8Wri6sQu57LDsHH8CJd3kfMqXAd678
# If you can't access the document, the steps are:
# - add users: one for each type of admin, 2 buyers, 2 users per supplier
# Nice to have: add also the test logins the team knows about so that people can login to test
# - add suppliers and services: 2 suppliers per lot, each supplier is on somewhere between 1 and all of the lots,
# 1 service per lot per supplier on DOS frameworks, 2 services per lot per supplier on G-Cloud frameworks
# - add opportunities: 1 closed per lot, 1 awarded per lot, 1 withdrawn per lot, 2 open per lot
# Applying only the database migrations to an empty database creates several frameworks in various states. Set
# them all to expired before we start adding new data
set_all_frameworks_to_expired(data)
open_gcloud_12(data)
create_buyer_email_domain_if_not_present(data, "user.marketplace.team")
generate_user(data, "buyer")
make_gcloud_12_live(data)
print("Generation has been completed.")
| 2.640625
| 3
|
main.py
|
QIN2DIM/armour-email
| 6
|
12779310
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Time : 2021/12/16 16:28
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description: SSPanel-Uim 邮箱验证案例
# ==============================================
# TODO [√]用于项目演示的运行实例,(也许)需要使用代理
# ==============================================
# - `anti_email` 表示需要邮箱验证
# - 无标记实例为对照组
ActionNiuBiCloud = {
"register_url": "https://niubi.cyou/auth/register",
}
ActionFreeDogCloud = {
"register_url": "https://www.freedog.pw/auth/register",
"anti_email": True
}
ActionSavierCloud = {
"register_url": "https://savier.xyz/auth/register",
"anti_email": True
}
# ==============================================
# TODO [√]运行前请检查 chromedriver 配置
# ==============================================
from examples import demo_email2walk
if __name__ == '__main__':
demo_email2walk(
# 无验证对照组
# atomic=ActionNiuBiCloud,
# 邮箱验证实验组
atomic=ActionSavierCloud,
)
| 1.804688
| 2
|
src/class_namespaces/__init__.py
|
mwchase/class-namespace
| 1
|
12779311
|
"""Class Namespaces.
Class namespaces implemented using metaclasses and context managers.
Classes that contain namespaces need to have NamespaceableMeta as a metaclass.
Namespaces are context managers within a class definition. They can be
manipulated after they're defined.
"""
from . import namespaces
NamespaceableMeta = namespaces.NamespaceableMeta
Namespaceable = namespaces.Namespaceable
Namespace = namespaces.Namespace
| 2.484375
| 2
|
utils/LSL_Tests/SendSphericalData.py
|
xfleckx/BeMoBI_Tools
| 0
|
12779312
|
<reponame>xfleckx/BeMoBI_Tools
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInfo, StreamOutlet
import random
import time
import math
#Send spherical coordinats
info = StreamInfo('RandomSpehricalData','3DCoord',3,100,'float32','myuid34234')
# next make an outlet
outlet = StreamOutlet(info)
print("name="+ info.name() + "\n" + "type=" + info.type() + "\n")
print("now sending data...")
while True:
current = time.time()
# make a new random 3-channel sample; this is converted into a pylsl.vectorf (the data type that is expected by push_sample)
sample = [1 + math.sin(current), 1 + math.cos(current), 1 + math.sin(current) ]
# now send it and wait for a bit
outlet.push_sample(sample)
time.sleep(0.01)
| 2.53125
| 3
|
dockerfiles/examples/read-bytes-seed/scale-job.py
|
kaydoh/scale
| 121
|
12779313
|
<gh_stars>100-1000
import argparse
import datetime
import json
import logging
import sys
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG, stream=sys.stdout)
def run_algorithm(bytes_total, input_file, out_dir):
"""Read the indicated number of bytes from input file and store in output directory
:param bytes_total:
:param input_file:
:param out_dir:
:return:
"""
bytes_read = 0
chunk_size = 512
logging.info('Reading %s bytes from %s and storing at %s.' % (bytes_total, input_file, out_dir))
base_path = os.path.join(out_dir, 'output_file')
start = datetime.datetime.utcnow().isoformat()
os.makedirs(base_path)
output_file = os.path.join(base_path, os.path.basename(input_file))
logging.info('Data being stored in %s' % output_file)
with open(input_file, 'rb') as infile:
with open(output_file, 'wb') as outfile:
while bytes_read <= bytes_total:
if bytes_read + chunk_size > bytes_total:
chunk_size = bytes_total - bytes_read
chunk = infile.read(chunk_size)
# Break if EOF is encountered
if not chunk: break
outfile.write(chunk)
bytes_read += chunk_size
logging.info('Copy complete')
end = datetime.datetime.utcnow().isoformat()
# Output metadata file for testing capture
metadata = {
'type': 'Feature',
'geometry': None,
'properties':
{
'dataStarted': start + 'Z',
'dataEnded': end + 'Z'
}
}
metadata_file = output_file + '.metadata.json'
with open(metadata_file, 'w') as outfile:
json.dump(metadata, outfile)
logging.info('Metadata written to %s' % metadata_file)
return output_file
if __name__ == '__main__':
for key in os.environ.keys():
print "%30s %s" % (key,os.environ[key])
parser = argparse.ArgumentParser(description='Copy x number of bytes from input file to output file.')
parser.add_argument('bytes_total', type=int, help='number of bytes to copy from input to output file')
parser.add_argument('input_file', help='absolute path to input file')
parser.add_argument('output_dir', help='absolute output directory path')
args = parser.parse_args()
logging.debug('Bytes to copy: {}'.format(args.bytes_total))
logging.debug('Input file: {}'.format(args.input_file))
logging.debug('Output directory: {}'.format(args.output_dir))
output_file = run_algorithm(args.bytes_total, args.input_file, args.output_dir)
# Write an output manifest for testing JSON property capture
with open(os.path.join(args.output_dir, 'seed.outputs.json'), 'w') as output_json:
input_size = os.path.getsize(args.input_file)
contents = {'INPUT_FILE_NAME': args.input_file, 'INPUT_SIZE': input_size}
json.dump(contents, output_json)
sys.exit(0)
| 2.40625
| 2
|
phy/gui/tests/test_gui.py
|
m-beau/phy
| 0
|
12779314
|
# -*- coding: utf-8 -*-
"""Test gui."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from pytest import raises
from ..qt import Qt, QApplication, QWidget, QMessageBox
from ..gui import (GUI, GUIState,
_try_get_matplotlib_canvas,
_try_get_vispy_canvas,
)
from phy.utils import Bunch
from phy.utils._color import _random_color
#------------------------------------------------------------------------------
# Utilities and fixtures
#------------------------------------------------------------------------------
def _create_canvas():
"""Create a VisPy canvas with a color background."""
from vispy import app
c = app.Canvas()
c.color = _random_color()
@c.connect
def on_draw(e): # pragma: no cover
c.context.clear(c.color)
return c
#------------------------------------------------------------------------------
# Test views
#------------------------------------------------------------------------------
def test_vispy_view():
from vispy.app import Canvas
assert isinstance(_try_get_vispy_canvas(Canvas()), QWidget)
def test_matplotlib_view():
from matplotlib.pyplot import Figure
assert isinstance(_try_get_matplotlib_canvas(Figure()), QWidget)
#------------------------------------------------------------------------------
# Test GUI
#------------------------------------------------------------------------------
def test_gui_noapp(tempdir):
if not QApplication.instance():
with raises(RuntimeError): # pragma: no cover
GUI(config_dir=tempdir)
def test_gui_1(tempdir, qtbot):
gui = GUI(position=(200, 100), size=(100, 100), config_dir=tempdir)
qtbot.addWidget(gui)
assert gui.name == 'GUI'
# Increase coverage.
@gui.connect_
def on_show():
pass
gui.unconnect_(on_show)
qtbot.keyPress(gui, Qt.Key_Control)
qtbot.keyRelease(gui, Qt.Key_Control)
assert isinstance(gui.dialog("Hello"), QMessageBox)
view = gui.add_view(_create_canvas(), floating=True, closable=True)
gui.add_view(_create_canvas())
view.setFloating(False)
gui.show()
assert gui.get_view('Canvas')
assert len(gui.list_views('Canvas')) == 2
# Check that the close_widget event is fired when the gui widget is
# closed.
_close = []
@view.connect_
def on_close_widget():
_close.append(0)
@gui.connect_
def on_close_view(view):
_close.append(1)
view.close()
assert _close == [1, 0]
gui.close()
assert gui.state.geometry_state['geometry']
assert gui.state.geometry_state['state']
gui.default_actions.exit()
def test_gui_status_message(gui):
assert gui.status_message == ''
gui.status_message = ':hello world!'
assert gui.status_message == ':hello world!'
gui.lock_status()
gui.status_message = ''
assert gui.status_message == ':hello world!'
gui.unlock_status()
gui.status_message = ''
assert gui.status_message == ''
def test_gui_geometry_state(tempdir, qtbot):
_gs = []
gui = GUI(size=(100, 100), config_dir=tempdir)
qtbot.addWidget(gui)
gui.add_view(_create_canvas(), 'view1')
gui.add_view(_create_canvas(), 'view2')
gui.add_view(_create_canvas(), 'view2')
@gui.connect_
def on_close():
_gs.append(gui.save_geometry_state())
gui.show()
qtbot.waitForWindowShown(gui)
assert len(gui.list_views('view')) == 3
assert gui.view_count() == {
'view1': 1,
'view2': 2,
}
gui.close()
# Recreate the GUI with the saved state.
gui = GUI(config_dir=tempdir)
gui.add_view(_create_canvas(), 'view1')
gui.add_view(_create_canvas(), 'view2')
gui.add_view(_create_canvas(), 'view2')
@gui.connect_
def on_show():
gui.restore_geometry_state(_gs[0])
assert gui.restore_geometry_state(None) is None
qtbot.addWidget(gui)
gui.show()
assert len(gui.list_views('view')) == 3
assert gui.view_count() == {
'view1': 1,
'view2': 2,
}
gui.close()
#------------------------------------------------------------------------------
# Test GUI state
#------------------------------------------------------------------------------
def test_gui_state_view(tempdir):
view = Bunch(name='MyView0')
state = GUIState(config_dir=tempdir)
state.update_view_state(view, dict(hello='world'))
assert not state.get_view_state(Bunch(name='MyView'))
assert not state.get_view_state(Bunch(name='MyView1'))
assert state.get_view_state(view) == Bunch(hello='world')
| 2.09375
| 2
|
elm/model/gconv_vae.py
|
jinxu06/gsubsampling
| 12
|
12779315
|
import os
import sys
from collections import OrderedDict
from absl import logging
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms.functional as TF
import pytorch_lightning as pl
import e2cnn.gspaces
import e2cnn.nn
from .base import VariationalAutoEncoderModule
from elm.nn import MLP, GConvNN, GConvTransposeNN
class GConvVAE(VariationalAutoEncoderModule):
def __init__(self,
in_channels,
out_channels,
n_channels,
img_size,
dim_latent,
activation=F.relu,
readout_fn=None,
fiber_group='rot_2d',
n_rot=4,
optim_lr=0.0001,
profiler=None):
super().__init__(in_channels=in_channels,
out_channels=out_channels,
n_channels=n_channels,
img_size=img_size,
dim_latent=dim_latent,
activation=activation,
readout_fn=readout_fn,
optim_lr=optim_lr,
profiler=profiler)
self.fiber_group = fiber_group
self.n_rot = n_rot
self._create_networks()
self.params = self.parameters()
logging.debug("-------- GConv VAE ---------")
logging.debug("-------- Trainable Variables ---------")
for name, p in self.named_parameters():
logging.debug("{}, {}".format(name, p.size()))
logging.debug("--------------------------------------")
def _create_networks(self):
self.n_flip = 1
if 'flip' in self.fiber_group:
self.n_flip = 2
nc = self.n_channels
self.encoder = torch.nn.Sequential(
GConvNN(in_channels=self.in_channels,
out_channels=[nc,nc,2*nc,2*nc,2*nc],
kernel_size=[3,3,3,3,5],
stride=[2,2,2,2,2],
padding_mode='circular',
activation=self.activation,
out_activation=None,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot))
self.flatten = torch.nn.Flatten()
self.encoder_mlp = MLP(in_sizes=self.n_rot*self.n_flip*2*2*2*nc,
out_sizes=[2*self.dim_latent])
self.decoder = torch.nn.Sequential(
GConvTransposeNN(in_channels=2*nc,
out_channels=[2*nc,2*nc,nc,nc,nc,self.in_channels],
kernel_size=[5,3,3,3,3,3],
stride=[2,2,2,2,2,1],
padding_mode='circular',
activation=self.activation,
out_activation=self.readout_fn,
use_bias=True,
fiber_group=self.fiber_group,
n_rot=self.n_rot))
self.decoder_mlp = MLP(in_sizes=self.dim_latent,
out_sizes=[self.n_rot*self.n_flip*2*2*2*nc],
out_activation=self.activation)
self.unflatten = torch.nn.Unflatten(dim=1, unflattened_size=(2*nc*self.n_flip*self.n_rot,2,2))
def encode(self, x):
z = self.encoder(x)
z = self.encoder_mlp(self.flatten(z))
mu, log_sigma_sq = torch.chunk(z, chunks=2, dim=-1)
return mu, log_sigma_sq
def decode(self, z):
z = self.unflatten(self.decoder_mlp(z))
x_hat = self.decoder(z)
return x_hat
def reparameterize(self, mu, log_sigma_sq):
sigma = torch.exp(log_sigma_sq/2.)
eps = torch.normal(torch.zeros_like(mu), torch.ones_like(sigma))
return eps * sigma + mu
def reconstruct(self, x):
mu, _ = self.encode(x)
x_hat = self.decode(mu)
return x_hat
def generate(self, n_samples=16):
z = torch.normal(torch.zeros(n_samples, self.dim_latent), torch.ones(n_samples, self.dim_latent))
x_hat = self.decode(z)
return x_hat
def forward(self, x):
mu, _ = self.encode(x)
return mu
def compute_loss_and_metrics(self, x, y=None):
mu, log_sigma_sq = self.encode(x)
z = self.reparameterize(mu, log_sigma_sq)
x_hat = self.decode(z)
recon_loss = F.mse_loss(x_hat, x, reduction='sum') / x.size()[0]
kl_loss = torch.sum(-0.5 * torch.sum(1 + log_sigma_sq - mu ** 2 - log_sigma_sq.exp(), dim = 1), dim = 0) / x.size()[0]
loss = recon_loss + kl_loss
logs = {
"recon": recon_loss,
"kl": kl_loss,
"elbo": loss
}
return loss, logs
| 2.015625
| 2
|
code/oldtmpcodes/biasplay.py
|
modichirag/21cmhod
| 0
|
12779316
|
<filename>code/oldtmpcodes/biasplay.py
import numpy as np
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, BigFileMesh, FFTPower
from nbodykit.source.mesh.field import FieldMesh
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
#
import sys
sys.path.append('./utils')
import tools, dohod #
from time import time
#
#Global, fixed things
scratch = '/global/cscratch1/sd/yfeng1/m3127/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
aafiles = [0.1429, 0.1538, 0.1667, 0.1818, 0.2000, 0.2222, 0.2500, 0.2857, 0.3333]
#aafiles = aafiles[4:]
zzfiles = [round(tools.atoz(aa), 2) for aa in aafiles]
#Paramteres
#Maybe set up to take in as args file?
bs, nc = 256, 512
#ncsim, sim, prefix = 256, 'lowres/%d-9100-fixed'%256, 'lowres'
ncsim, sim, prefix = 2560, 'highres/%d-9100-fixed'%2560, 'highres'
def HI_masscutfiddle(mhalo,aa, mcutf=1.0):
"""Makes a 21cm "mass" from a box of halo masses.
Use mcutf to fiddle with Mcut
"""
print('Assigning weights')
zp1 = 1.0/aa
zz = zp1-1
alp = (1+2*zz)/(2+2*zz)
norm = 2e9*np.exp(-1.9*zp1+0.07*zp1**2)
mcut= 1e10*(6.11-1.99*zp1+0.165*zp1**2)*mcutf
xx = mhalo/mcut+1e-10
mHI = xx**alp * np.exp(-1/xx)
mHI*= norm
return(mHI)
def fiddlebias(aa, saveb=False, bname='h1bias.txt', ofolder=None):
#Fiddling bias for halos. Deprecated.
print('Read in catalogs')
halocat = readincatalog(aa, matter=False)
hpos = halocat['Position']
hmass = halocat['Mass']
h1mass = dohod.HI_mass(hmass, aa)
dm = BigFileMesh(project + sim + '/fastpm_%0.4f/'%aa + '/dmesh_N%04d'%nc, '1').paint()
if ofolder is None: ofolder = project + '/%s/fastpm_%0.4f/'%(sim, aa)
#measure power
pm = ParticleMesh(BoxSize = bs, Nmesh = [nc, nc, nc])
pkm = FFTPower(dm/dm.cmean(), mode='1d').power
k, pkm = pkm['k'], pkm['power']
##H1
print("H1")
h1mesh = pm.paint(hpos, mass=h1mass)
pkh1 = FFTPower(h1mesh/h1mesh.cmean(), mode='1d').power['power']
pkh1m = FFTPower(h1mesh/h1mesh.cmean(), second=dm/dm.cmean(), mode='1d').power['power']
#Bias
b1h1 = pkh1m/pkm
b1h1sq = pkh1/pkm
if saveb:
np.savetxt(ofolder+bname, np.stack((k, b1h1, b1h1sq**0.5), axis=1),
header='k, pkh1xm/pkm, pkh1/pkm^0.5')
return k, b1h1, b1h1sq
def fiddlebiasgal(aa, suff, nc=nc, mcfv=[1.], saveb=False, bname='h1bias', ofolder=None):
'''Fiddle bias for galaxies'''
if ofolder is None: ofolder = project + '/%s/fastpm_%0.4f/'%(sim, aa)
pm = ParticleMesh(BoxSize = bs, Nmesh = [nc, nc, nc])
print('Read in catalogs')
cencat = BigFileCatalog(project + sim + '/fastpm_%0.4f/cencat'%aa)
satcat = BigFileCatalog(project + sim + '/fastpm_%0.4f/satcat'%aa+suff)
cpos, spos = cencat['Position'], satcat['Position']
cmass, smass = cencat['Mass'], satcat['Mass']
pos = np.concatenate((cpos, spos), axis=0)
dm = BigFileMesh(project + sim + '/fastpm_%0.4f/'%aa + '/dmesh_N%04d'%nc, '1').paint()
pkm = FFTPower(dm/dm.cmean(), mode='1d').power
k, pkm = pkm['k'], pkm['power']
b1, b1sq = np.zeros((k.size, len(mcfv))), np.zeros((k.size, len(mcfv)))
for imc, mcf in enumerate(mcfv):
print(mcf)
ch1mass = HI_masscutfiddle(cmass, aa, mcutf=mcf)
sh1mass = HI_masscutfiddle(smass, aa, mcutf=mcf)
h1mass = np.concatenate((ch1mass, sh1mass), axis=0)
#
h1mesh = pm.paint(pos, mass=h1mass)
pkh1 = FFTPower(h1mesh/h1mesh.cmean(), mode='1d').power['power']
pkh1m = FFTPower(h1mesh/h1mesh.cmean(), second=dm/dm.cmean(), mode='1d').power['power']
#Bias
b1[:, imc] = pkh1m/pkm
b1sq[:, imc] = pkh1/pkm
np.savetxt(ofolder+bname+'auto'+suff+'.txt', np.concatenate((k.reshape(-1, 1), b1sq**0.5), axis=1),
header='mcut factors = %s\nk, pkh1xm/pkm, pkh1/pkm^0.5'%mcfv)
np.savetxt(ofolder+bname+'cross'+suff+'.txt', np.concatenate((k.reshape(-1, 1), b1), axis=1),
header='mcut factors = %s\nk, pkh1xm/pkm, pkh1mx/pkm'%mcfv)
return k, b1, b1sq
if __name__=="__main__":
#bs, nc, sim are set in the global parameters at the top
print('Starting')
for satsuff in ['-m1_5p0min-alpha_0p9', '-m1_8p0min-alpha_0p9']:
fig, ax = plt.subplots(3, 3, figsize=(12, 12))
for ia, aa in enumerate(aafiles):
mcfv = [0.5, 0.8, 1.0, 1.2, 1.5, 2]
print('Redshift = %0.2f'%(zzfiles[ia]))
ofolder = project + '/%s/fastpm_%0.4f/'%(sim, aa)
k, b1, b1sq = fiddlebiasgal(aa, mcfv=mcfv, nc=256, suff=satsuff, ofolder=ofolder, saveb=False)
axis=ax.flatten()[ia]
axis.plot(mcfv, b1[1:5].mean(axis=0), 'C%do'%0, label='cross')
axis.plot(mcfv, (b1sq**0.5)[1:5].mean(axis=0), 'C%d*'%0, label='auto')
axis.set_title('z = %0.2f'%zzfiles[ia])
axis.grid(which='both', lw=0.3)
ax[0, 0].legend()
for axis in ax[:, 0]:axis.set_ylabel('b$_1$$')
for axis in ax[-1, :]:axis.set_xlabel('M_{cut} factor$')
fig.savefig('./figs/biasmcf-%s.png'%(satsuff))
#
# fig, ax = plt.subplots(1, 2, figsize = (9, 4))
# for i, aa in enumerate(aafiles):
# ofolder = project + '/%s/fastpm_%0.4f/'%(sim, aa)
# print('Redshift = %0.2f'%(1/aa-1))
# k, b1, b1sq = fiddlebiasgal(aa, nc=256, suff=satsuff, ofolder=ofolder, saveb=False)
# ax[0].plot(k, b1, 'C%d'%i, lw=1.5)
# ax[0].plot(k, b1sq**0.5, 'C%d--'%i, lw=2)
# ax[1].plot(1/aa-1, b1[1:5].mean(), 'C%do'%i, label='%0.2f'%(1/aa-1))
# ax[1].plot(1/aa-1, (b1sq**0.5)[1:5].mean(), 'C%d*'%i)
#
# for axis in ax:
# axis.legend()
# ax[0].set_xscale('log')
# axis.set_ylim(1, 5)
# #axis.grid(which='both', lw=0.3)
# fig.savefig('./figs/bias%s.png'%satsuff
| 1.773438
| 2
|
error404detector/crawlertest.py
|
hayj/404Detector
| 0
|
12779317
|
from webcrawler.crawler import *
def crawlingCallback(data, browser=None):
print(data)
crawler = Crawler\
(
["https://github.com/hayj/WebCrawler"],
crawlingCallback=crawlingCallback,
browsersDriverType=DRIVER_TYPE.chrome,
proxies=getAllProxies(),
browserCount=10,
stopCrawlerAfterSeconds=10000000,
banditRoundDuration=10000000,
browserParams=\
{
"chromeDriverPath": "/home/hayj/Programs/browserdrivers/chromedriver",
"phantomjsPath": "/home/hayj/Programs/headlessbrowsers/phantomjs-2.1.1-linux-x86_64/bin/phantomjs",
},
)
crawler.start()
| 2.453125
| 2
|
ftprelayer/__init__.py
|
meteogrid/FTPRelayer
| 0
|
12779318
|
<reponame>meteogrid/FTPRelayer<filename>ftprelayer/__init__.py
import re
import sys
import datetime
import os
import logging
import shutil
from threading import Thread, Event
from fnmatch import fnmatchcase
import zipfile
from logging import Formatter
try:
import queue
from io import BytesIO
except ImportError:
# support python < 3
import Queue as queue
from cStringIO import StringIO as BytesIO
import validate
from configobj import ConfigObj
from ftputil import FTPHost
from pkg_resources import resource_filename
import pyinotify
import davclient
from .util import import_string
log = logging.getLogger(__name__)
LOG_FORMAT = "%(asctime)s %(process)d %(levelname)-5.5s [%(name)s] %(message)s"
SYSLOG_FORMAT = "%(name)s [%(process)d]: %(levelname)-5.5s %(message)s"
class Application(object):
_watch_mask = (pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_TO)
configspec_filename = resource_filename(__name__, 'configspec.ini')
error_subdir = 'failed'
now = datetime.datetime.now # To mock in tests
def __init__(self, archive_dir=None):
self._relayers = []
self._processors = {}
self._wm = pyinotify.WatchManager()
self._notifier = pyinotify.ThreadedNotifier(self._wm)
self._queue_processor = Thread(target=self._process_queue)
self._queue = queue.Queue()
self._stopping = Event()
self._archive_dir = archive_dir
@classmethod
def from_config(cls, configfile):
spec = ConfigObj(cls.configspec_filename, list_values=False,
_inspec=True)
config = ConfigObj(configfile, configspec=spec)
if config.validate(validate.Validator()) is not True:
raise AssertionError("Config is not valid")
cls._setup_logging(config['logging'])
self = cls(**dict(config['main']))
for r in self._relayers_from_config(config['relayers']):
self.add_relayer(r)
return self
@classmethod
def _setup_logging(cls, config):
logging.basicConfig(
level = getattr(logging, config['level']),
filename = config['filename'],
mode = 'a+',
stream = sys.stderr if not config['filename'] else None,
format = LOG_FORMAT,
)
if config['syslog']['address']:
from logging.handlers import SysLogHandler
cfg = dict(config['syslog'])
if ':' in cfg['address']:
cfg['address'] = cfg['address'].split(',')
handler = logging.handlers.SysLogHandler(**cfg)
handler.setFormatter(Formatter(SYSLOG_FORMAT))
logging.root.addHandler(handler)
def _relayers_from_config(self, section):
for name in section.sections:
yield Relayer.from_config(name, section[name])
def start(self, block=False):
self._notifier.start()
self._queue_processor.start()
if block:
while True:
self._stopping.wait(1)
def stop(self):
self._stopping.set()
self._notifier.stop()
self._queue_processor.join()
def add_relayer(self, relayer):
self._relayers.append(relayer)
for p in relayer.paths:
self._add_watch(relayer, p)
def _add_watch(self, relayer, path):
dir = os.path.dirname(path)
processor = self._get_or_make_processor(dir)
processor.add_relayer(relayer)
def _get_or_make_processor(self, dir):
processor = self._processors.get(dir)
if processor is None:
processor = self._processors[dir] = _EventProcessor(self._queue)
self._wm.add_watch(dir, self._watch_mask,
proc_fun=processor)
return processor
def _process_queue(self):
while not self._stopping.isSet():
try:
relayer, path = self._queue.get(True, .5)
except queue.Empty:
pass
else:
try:
relayer.process(path)
self._archive(relayer, path)
except Exception as e:
try:
log.exception("When processing %r, %r, %r", relayer.name, path, e)
self._archive(relayer, path, has_error=True)
except:
pass
def _archive(self, relayer, path, has_error=False):
if self._archive_dir is None:
return
dest = self._archive_path(relayer, path, has_error=has_error)
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
log.info("Archiving %s -> %s", path, dest)
shutil.move(path, dest)
_serial_re = re.compile(r'^(.*?)\.(\d+)$')
def _archive_path(self, relayer, path, no_clobber=True, has_error=False):
dir = self._archive_dir
if has_error:
dir = os.path.join(dir, self.error_subdir)
subdir = os.path.join(dir, relayer.name, self.now().strftime('%Y/%m/%d'))
ret = os.path.join(subdir, relayer.relpathto(path))
while no_clobber and os.path.exists(ret):
m = self._serial_re.match(ret)
if m:
serial = int(m.group(2))
ret = m.group(1) + ('.%d'%(serial+1))
else:
ret += '.1'
return ret
class _EventProcessor(pyinotify.ProcessEvent):
def __init__(self, queue):
self.queue = queue
self.relayers = []
super(_EventProcessor, self).__init__()
def _process(self, event):
log.debug("got event: %r", event)
for r in self.relayers:
if r.path_matches(event.pathname):
self.queue.put((r, event.pathname))
process_IN_CLOSE_WRITE = _process
process_IN_MOVED_TO = _process
def add_relayer(self, relayer):
self.relayers.append(relayer)
class Relayer(object):
def __init__(self, name, uploader, paths, processor=None):
self.name = name
self.uploader = uploader if uploader is not None else _NullUploader()
self.paths = paths
self.processor = processor
@classmethod
def from_config(cls, name, section):
uploader = Uploader.from_config(section['uploader'])
processor = cls._make_processor(section['processor'])
return cls(name=name,
paths=section['paths'],
uploader=uploader,
processor=processor)
@classmethod
def _make_processor(cls, section):
cls_or_func = import_string(section['use'])
if section.extra_values:
args = dict((k, section[k]) for k in section.extra_values)
return cls_or_func(**args)
else:
return cls_or_func
def path_matches(self, path):
return any(fnmatchcase(path, p) for p in self.paths)
def relpathto(self, path):
base = os.path.commonprefix(self.paths+[path])
return os.path.relpath(path, base)
def process(self, path):
log.info("Relayer '%s' processing '%s'", self.name, path)
if self.processor is not None:
self._process_with_processor(path)
else:
self._process_without_processor(path)
def _process_with_processor(self, path):
for filename, data in self.processor(path):
self.uploader.upload(filename, data)
def _process_without_processor(self, path):
with open(path) as f:
self.uploader.upload(os.path.basename(path), f.read())
class Uploader(object):
__uploaders__ = {}
def __init__(self, host, username, password=<PASSWORD>, dir='/'):
self.host = host
self.username = username
self.password = password
self.dir = dir
@classmethod
def register(cls, key):
def wrap(subcls):
cls.__uploaders__[key] = subcls
return subcls
return wrap
@classmethod
def from_config(cls, section):
if section['use'] and ':' in section['use']:
subcls = import_string(section['use'])
else:
subcls = cls.__uploaders__[section['use']]
if cls is subcls:
raise AssertionError("%r must override from_config()"%subcls)
return subcls.from_config(section)
def upload(self, filename, data):
raise NotImplementedError("Abstract method must be overriden")
@Uploader.register(None)
class _NullUploader(object):
def upload(self, filename, data):
pass
@classmethod
def from_config(cls, section):
return cls()
@Uploader.register('composite')
class CompositeUploader(Uploader):
@classmethod
def from_config(cls, section):
build = Uploader.from_config
uploaders = [build(section[name]) for name in sorted(section.sections)]
return cls(uploaders)
def __init__(self, uploaders):
self.uploaders = uploaders
def upload(self, filename, data):
for uploader in self.uploaders:
try:
uploader.upload(filename, data)
except:
log.exception("executing %r, %r", uploader, filename)
@Uploader.register('ftp')
class FTPUploader(Uploader):
FTPHost = FTPHost # for mock inyection in tests
@classmethod
def from_config(cls, section):
return cls(section['host'], section['username'],
section.get('password'), section.get('dir','/'))
def upload(self, filename, data):
with self.FTPHost(self.host, self.username, self.password) as ftp:
dir = self.dir.rstrip('/') + '/'
ftp.makedirs(dir)
destname = dir + filename
dest = ftp.file(destname, 'wb')
log.info("FTPUploader.upload: %s -> %s", filename, destname)
ftp.copyfileobj(BytesIO(data), dest)
dest.close()
@Uploader.register('dav')
class DAVUploader(Uploader):
DAVClient = davclient.DAVClient # for mock inyection in tests
@classmethod
def from_config(cls, section):
return cls(section['host'], section['username'],
section.get('password'))
def upload(self, filename, data):
client = self.DAVClient(self.host)
client.set_basic_auth(self.username, self.password)
destname = self.host + filename
log.info("DAVUploader.upload: %s -> %s", filename, destname)
client.put(destname, data)
assert 200 <= client.response.status < 300, client.response.reason
@Uploader.register('scp')
class SCPUploader(FTPUploader):
def upload(self, filename, data):
raise NotImplementedError("TODO")
class add_prefix(object):
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, path):
new_name = self.prefix + os.path.basename(path)
with open(path) as f:
yield new_name, f.read()
class add_prefix_to_zip_contents(object):
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, path):
buff = BytesIO()
source = zipfile.ZipFile(path, 'r')
target = zipfile.ZipFile(buff, 'w', zipfile.ZIP_DEFLATED)
for zi in source.filelist:
new_name = self.prefix + zi.filename
target.writestr(new_name, source.read(zi))
source.close()
target.close()
yield os.path.basename(path), buff.getvalue()
class add_date_prefix(object):
"""
Adds current date as a prefix
>>> processor = add_date_prefix('%Y_%m_')
>>> processor.now = lambda: datetime.datetime(2007,3,1)
>>> processor._new_name('foo')
'2007_03_foo'
"""
now = datetime.datetime.now # To mock in tests
def __init__(self, format='%Y%m%d'):
self.format = format
def __call__(self, path):
with open(path) as f:
yield self._new_name(path), f.read()
def _new_name(self, path):
return self.now().strftime(self.format) + os.path.basename(path)
def main(args=sys.argv):
if len(args)<2:
print>>sys.stderr, "Usage %s <configfile>"%args[0]
return -1
app = Application.from_config(args[1])
try:
log.info("Starting app")
app.start(True)
except (KeyboardInterrupt, SystemExit):
log.info("Stopping app")
app.stop()
else:
app.stop()
| 1.867188
| 2
|
bayleef/utils.py
|
Kelvinrr/bayleef
| 1
|
12779319
|
<filename>bayleef/utils.py
import json
import os
import re
import subprocess
import sys
from datetime import datetime
from functools import partial, reduce
from glob import glob
from subprocess import PIPE, Popen
import yaml
import gdal
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plio
import pvl
import wget
import osr
from plio.io.io_gdal import GeoDataset
import autocnet
from autocnet import CandidateGraph
from autocnet.graph.edge import Edge
from autocnet.matcher import suppression_funcs
from autocnet.matcher.subpixel import (clip_roi, subpixel_phase,
subpixel_template)
from pysis.exceptions import ProcessError
from pysis.isis import (cam2map, campt, footprintinit, jigsaw, map2map,
pointreg, spiceinit)
sys.path.insert(0, os.path.abspath('..'))
import logging
logger = logging.getLogger('Bayleef')
from bayleef import config
def get_path(response, root, dataset):
"""
"""
if isinstance(response, dict):
response = [response]
try:
for data in response:
scene_id = data['entityId']
s = data['summary']
date = datetime.strptime(data['acquisitionDate'], '%Y-%m-%d')
pathstr, rowstr = re.findall(r'Path: \b\d+\b|Row: \b\d+\b', s)
path = pathstr.split(' ')[1]
row = rowstr.split(' ')[1]
return os.path.join(root, dataset, str(date.year), row, path, scene_id)
except Exception as e:
logger.error('Failed to process request: {}'.format(e))
def keys_to_lower(dictionary):
"""
"""
for key in dictionary.keys():
if isinstance(dictionary[key], dict):
keys_to_lower(dictionary[key])
dictionary[key.lower()] = dictionary.pop(key)
def apply_dict(dictionary, func, *args, **kwargs):
"""
"""
for key in dictionary.keys():
if isinstance(dictionary[key], dict):
apply_dict(dictionary[key], func)
dictionary[key] = func(dictionary[key], *args, **kwargs)
def geolocate(infile, outfile, lats, lons, dstSRS="EPSG:4326", format="GTiff", woptions={}, toptions={}):
"""
"""
image = gdal.Open(infile, gdal.GA_Update)
geoloc= {
'X_DATASET' : lons,
'X_BAND' : '1',
'Y_DATASET' : lats,
'Y_BAND' : '1',
'PIXEL_OFFSET' : '0',
'LINE_OFFSET' : '0',
'PIXEL_STEP' : '1',
'LINE_STEP' : '1'
}
image.SetMetadata(geoloc, 'GEOLOCATION')
# explicity close image
del image
gdal.Warp(outfile, infile, format=format, dstSRS=dstSRS)
return GeoDataset(outfile)
def master_isvalid(file):
if len(gdal.Open(file).GetSubDatasets()) != 17:
return False
calibrated_image = GeoDataset('HDF4_SDS:UNKNOWN:"{}":37'.format(file))
lats = GeoDataset('HDF4_SDS:UNKNOWN:"{}":30'.format(file))
lons = GeoDataset('HDF4_SDS:UNKNOWN:"{}":31'.format(file))
res = []
for ds in [calibrated_image, lats, lons]:
arr = ds.read_array()
test = np.empty(arr.shape)
test[:] = ds.metadata['_FillValue']
res.append(not (test == arr).all())
return all(res)
def run_davinci(script, infile=None, outfile=None, bin_dir=config.davinci_bin, args=[]):
'''
'''
command = ['davinci', '-f', os.path.join(bin_dir, script), 'from={}'.format(infile), 'to={}'.format(outfile)]
# add additional positional args
if args:
command.extend(args)
logger.info(' '.join(command))
p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate(b"input data that is passed to subprocess' stdin")
rc = p.returncode
if rc != 0:
raise Exception('Davinci returned non-zero error code {} : \n{}\n{}'.format(rc, err.decode('utf-8'), output.decode('utf-8') ))
return output.decode('utf-8'), err.decode('utf-8')
def init(outfile, additional_kernels={}):
try:
logger.info("Running Spiceinit on {}".format(outfile))
spiceinit(from_=outfile, spksmithed=True, **additional_kernels)
except ProcessError as e:
logger.error('file: {}'.format(outfile))
logger.error("STDOUT: {}".format(e.stdout.decode('utf-8')))
logger.error("STDERR: {}".format(e.stderr.decode('utf-8')))
raise Exception('Spice Init Error')
try:
logger.info("Running Footprintinit on {}".format(outfile))
footprintinit(from_=outfile)
except ProcessError as e:
logger.error('file: {}'.format(outfile))
logger.error("STDOUT: {}".format(e.stdout.decode('utf-8')))
logger.error("STDERR: {}".format(e.stderr.decode('utf-8')))
raise Exception('Footprint Init Error')
def thm_crop(infile, outfile, minlat, maxlat):
run_davinci('thm_crop_lat.dv', infile, outfile, args=['minlat={}'.format(str(minlat)), 'maxlat={}'.format(maxlat)])
def match_pair(img1_path, img2_path, figpath=None):
src_points = point_grid(GeoDataset(img1_path), step=50)
f = open('temp.txt', 'w+')
f.write('\n'.join('{}, {}'.format(int(x),int(y)) for x,y in src_points))
del f
label = pvl.loads(campt(from_=img1_path, coordlist='temp.txt', coordtype='image'))
points = []
for group in label:
try:
lat = group[1]['PlanetocentricLatitude'].value
lon = group[1]['PositiveEast360Longitude'].value
points.append([lat, lon])
except Exception as e:
continue
logger.info("{} points from image1 successfully reprojected to image2, rejected {}".format(str(len(points)), str(len(src_points)-len(points))))
if len(points) == 0:
raise Exception("No valid points were found for pair {} {}".format(img1_path, img2_path))
f = open('temp.txt', 'w+')
f.write('\n'.join('{}, {}'.format(x,y) for x,y in points))
del f
img2label = pvl.loads(campt(from_=img2_path, coordlist='temp.txt', coordtype='ground', allowoutside=False))
dst_lookup = {}
for i,group in enumerate(img2label):
if not group[1]['Error']:
line = group[1]['Line']
sample = group[1]['Sample']
dst_lookup[i] = [sample, line]
filelist = [img1_path, img2_path]
cg = CandidateGraph.from_filelist(filelist)
edge = cg[0][1]['data']
img1 = GeoDataset(img1_path)
img2 = GeoDataset(img2_path)
src_keypoints = pd.DataFrame(data=src_points, columns=['x', 'y'])
src_keypoints['response'] = 0
src_keypoints['angle'] = 0
src_keypoints['octave'] = 0
src_keypoints['layer'] = 0
src_keypoints
edge.source._keypoints = src_keypoints
results = []
dst_keypoints = []
dst_index = 0
distances = []
arr1 = img1.read_array()
arr2 = img2.read_array()
del img1
del img2
for keypoint in edge.source.keypoints.iterrows():
index, row = keypoint
sx, sy = row['x'], row['y']
try:
dx, dy = dst_lookup[index]
except KeyError:
continue
try:
ret = refine_subpixel(sx, sy, dx, dy, arr1, arr2, size=50, reduction=10, convergence_threshold=1)
except Exception as ex:
continue
if ret is not None:
x,y,metrics = ret
else:
continue
dist = np.linalg.norm([x-dx, y-dy])
results.append([0, index, 1, dst_index, dist])
dst_keypoints.append([x,y, 0,0,0,0,0])
dst_index += 1
matches = pd.DataFrame(data=results, columns=['source_image', 'source_idx',
'destination_image', 'destination_idx',
'distance'])
if matches.empty:
logger.error("After matching points, matches dataframe returned empty.")
dst_keypoints = pd.DataFrame(data=dst_keypoints, columns=['x', 'y', 'response', 'size', 'angle', 'octave', 'layer'])
edge.destination._keypoints = dst_keypoints
edge._matches = matches
edge.compute_fundamental_matrix()
distance_check(edge, clean_keys=['fundamental'])
if figpath:
plt.figure(figsize=(10,25))
cg[0][1]['data'].plot(clean_keys=['fundamental', 'distance'], nodata=-32768.0)
plt.savefig(figpath)
plt.close()
return cg
def refine_subpixel(sx, sy, dx, dy, s_img, d_img, size=100, reduction=25, convergence_threshold=.5):
"""
Iteratively apply a subpixel phase matcher to source (s_img) amd destination (d_img)
images. The size parameter is used to set the initial search space. The algorithm
is recursively applied to reduce the total search space by reduction until the convergence criteria
are met. Convergence is defined as the point at which the computed shifts (x_shift,y_shift) are
less than the convergence_threshold. In instances where the size is reducted to 1 pixel the
algorithm terminates and returns None.
Parameters
----------
sx : numeric
The x position of the center of the template to be matched to
sy : numeric
The y position of the center of the template to be matched to
dx : numeric
The x position of the center of the search to be matched from
dy : numeric
The y position of the center of the search to be matched to
s_img : object
A plio geodata object from which the template is extracted
d_img : object
A plio geodata object from which the search is extracted
size : int
One half of the total size of the template, so a 251 default results in a 502 pixel search space
reduction : int
With each recursive call to this func, the size is reduced by this amount
convergence_threshold : float
The value under which the result can shift in the x and y directions to force a break
Returns
-------
dx : float
The new x value for the match in the destination (d) image
dy : float
The new y value for the match in the destination (d) image
metrics : tuple
A tuple of metrics. In the case of the phase matcher this are difference
and RMSE in the phase dimension.
"""
s_template, _, _ = clip_roi(s_img, sx, sy,
size_x=size, size_y=size)
d_search, dxr, dyr = clip_roi(d_img, dx, dy,
size_x=size, size_y=size)
if s_template.shape != d_search.shape:
s_size = s_template.shape
d_size = d_search.shape
updated_size = int(min(s_size + d_size) / 2)
s_template, _, _ = clip_roi(s_img, sx, sy,
size_x=updated_size, size_y=updated_size)
d_search, dxr, dyr = clip_roi(d_img, dx, dy,
size_x=updated_size, size_y=updated_size)
# Apply the phase matcher
shift_x, shift_y, metrics = subpixel_phase(s_template, d_search, upsample_factor=100)
# Apply the shift to d_search and compute the new correspondence location
dx += (shift_x + dxr)
dy += (shift_y + dyr)
# Break if the solution has converged
if abs(shift_x) < convergence_threshold and abs(shift_y) < convergence_threshold:
return dx, dy, metrics
else:
size -= reduction
if size < 1:
return
return refine_subpixel(sx, sy, dx, dy, s_img, d_img, size)
def normalize_image_res(image1, image2, image2out, image1out, out_type='ISIS3', nodata=-32768.0):
width = max(image1.pixel_width, image2.pixel_width)
f1 = gdal.Warp('/vsimem/temp1.out', image1.file_name, targetAlignedPixels=True, xRes = width, yRes = width, format = out_type)
f2 = gdal.Warp('/vsimem/temp2.out', image2.file_name, targetAlignedPixels=True, xRes = width, yRes = width, format = out_type)
del(f1, f2)
temp1 = GeoDataset('/vsimem/temp1.out')
temp2 = GeoDataset('/vsimem/temp2.out')
minx = 0
miny = 0
maxx = max(temp1.read_array().shape[1], temp2.read_array().shape[1])
maxy = max(temp1.read_array().shape[0], temp2.read_array().shape[0])
fp1 = gdal.Translate(image1out, '/vsimem/temp1.out', srcWin = [minx, miny, maxx - minx, maxy - miny], noData=nodata)
fp2 = gdal.Translate(image2out, '/vsimem/temp2.out', srcWin = [minx, miny, maxx - minx, maxy - miny], noData=nodata)
del(fp1, fp2)
def preprocess(thm_id, outdir, day=True, validate=False, projected_images=True, map_file=config.themis.map_file, originals=True, gtiffs=False, meta=True, index=True):
'''
Downloads Themis file by ID and runs it through spice init and
footprint init.
'''
original = os.path.join(outdir, 'original')
images = os.path.join(outdir, 'images')
ogcube = os.path.join(original, 'l1.cub')
projcube = os.path.join(original, 'l2.cub')
metafile = os.path.join(outdir, 'meta.json')
indexfile = os.path.join(outdir, 'index.json')
os.makedirs(original, exist_ok=True)
os.makedirs(images, exist_ok=True)
kerns = get_controlled_kernels(thm_id)
if os.path.exists(outdir) and os.path.exists(original) and os.path.exists(metafile) and os.path.exists(indexfile) :
logger.info("File {} Exists, skipping redownload.".format(outdir))
return bool(kerns)
if originals:
if day:
out, err = run_davinci('thm_pre_process.dv', infile=thm_id, outfile=ogcube)
else:
out, err = run_davinci('thm_pre_process_night.dv', infile=thm_id, outfile=ogcube)
if validate:
try:
init(ogcube, additional_kernels=kerns)
label = pvl.loads(campt(from_=ogcube))
except ProcessError as e:
logger.info('campt Error')
logger.info('file: {}'.format(outfile))
logger.error("STDOUT: {}".format(e.stdout.decode('utf-8')))
logger.error("STDERR: {}".format(e.stderr.decode('utf-8')))
incidence_angle = label['GroundPoint']['Incidence'].value
if day and incidence_angle > 90:
logger.info("incidence angle suggests night, but {} was proccessed for day, reprocessing".format(thm_id))
out, err = run_davinci('thm_pre_process_night.dv', infile=thm_id, outfile=ogcube)
init(ogcube, additional_kernels=kerns)
elif not day and incidence_angle <= 90:
logger.info("incidence angle suggests day, but {} was proccessed for night, reprocessing".format(thm_id))
out, err = run_davinci('thm_pre_process.dv', infile=thm_id, outfile=ogcube)
init(ogcube, additional_kernels=kerns)
else:
init(ogcube, additional_kernels=kerns)
if projected_images:
project(ogcube, projcube, map_file)
img = GeoDataset(ogcube)
if meta:
meta = json.loads(json.dumps(img.metadata, default = lambda o:str(o) if isinstance(o, datetime) else o))
try:
meta['map_file'] = str(pvl.load(map_file))
except Exception as e:
logger.error("Failed to load map file {}:\n{}".format(map_file, e))
raise Exception("Invalid map file.")
json.dump(meta, open(metafile, 'w+'))
if kerns:
logger.info('Used Controlled Kernels')
meta['used_control_kernels'] = True
if index:
date = img.metadata['IsisCube']['Instrument']['StartTime']
index_meta = {}
index_meta['geom'] = img.footprint.ExportToWkt()
index_meta['id'] = thm_id
index_meta['time'] = {}
index_meta['time']['year'] = date.year
index_meta['time']['month'] = date.month
index_meta['time']['day'] = date.day
index_meta['time']['hour'] = date.hour
nbands = img.nbands
json.dump(index_meta, open(indexfile, 'w+'))
del img
if gtiffs:
for band in range(1,nbands+1):
tiffpath = os.path.join(images, 'b{}.tiff'.format(band))
logger.info('Writing: {}'.format(tiffpath))
gdal.Translate(tiffpath, ogcube, bandList=[band], format='GTiff')
return bool(kerns)
def date_converter(o):
if isinstance(o, np.ndarray):
return o.tolist()
if isinstance(o, datetime):
return o.isoformat()
def print_dict(d):
print(str(yaml.dump(json.loads(json.dumps(d, default=date_converter)), default_flow_style=False )))
def point_grid(img, nodata=-32768.0, step=50):
arr = img.read_array()
xs = np.linspace(0, arr.shape[1]-1, num=arr.shape[1]/step)
ys = np.linspace(0, arr.shape[0]-1, num=arr.shape[0]/step)
# array of x,y pairs
points = np.transpose([np.tile(xs, len(ys)), np.repeat(ys, len(xs))])
points = [p for p in points if arr[int(p[1])][int(p[0])] != nodata]
return points
def distance_check(e, clean_keys=[]):
matches, mask = e.clean(clean_keys)
thresh = np.percentile(matches['distance'], 90)
mask = np.ones(mask.shape[0], dtype=bool)
mask[e.matches['distance'] >= thresh] = False
e.masks['distance'] = mask
def project(img, to, mapfile, matchmap=False):
params = {
'from_' : img,
'map' : mapfile,
'to' : to,
'matchmap': matchmap
}
if GeoDataset(img).metadata['IsisCube'].get('Mapping', False):
try:
params['interp'] = 'NEARESTNEIGHBOR'
logger.info('Running map2map on {} with params {}'.format(img, params))
map2map(**params)
except ProcessError as e:
logger.info('map2map Error')
logger.error("STDOUT: {}".format(e.stdout.decode('utf-8')))
logger.error("STDERR: {}".format(e.stderr.decode('utf-8')))
else:
try:
logger.info('Running cam2map on {}'.format(img))
cam2map(**params)
except ProcessError as e:
logger.info('cam2map Error')
logger.error("STDOUT: {}".format(e.stdout.decode('utf-8')))
logger.error("STDERR: {}".format(e.stderr.decode('utf-8')))
def get_controlled_kernels(thmid, kernel_dir=config.themis.controlled_kernels, day=True):
if not kernel_dir:
return {}
found = False
if day:
kernels = os.path.join(kernel_dir, 'DIR')
else:
kernels = os.path.join(kernel_dir, 'NIR')
files = glob(os.path.join(kernels, '*.txt'))
for f in files:
contents = open(f, 'r').read()
if thmid in contents:
found = True
break
return {
'ck' : glob(os.path.join(kernels, '*_ck.bc'))[0],
'spk' : glob(os.path.join(kernels, '*_spk.bsp'))[0]
} if found else {}
def array2raster(rasterfn, array, newRasterfn):
"""
Writes an array to a GeoDataset using another dataset as reference. Borrowed
from: https://pcjericks.github.io/py-gdalogr-cookbook/raster_layers.html
Parameters
----------
rasterfn : str, GeoDataset
Dataset or path to the dataset to use as a reference. Geotransform
and spatial reference information is copied into the new image.
array : np.array
Array to write
newRasterfn : str
Filename for new raster image
Returns
-------
: GeoDataset
File handle for the new raster file
"""
naxis = len(array.shape)
assert naxis == 2 or naxis == 3
if naxis == 2:
# exapnd the third dimension
array = array[:,:,None]
nbands = array.shape[2]
if isinstance(rasterfn, GeoDataset):
rasterfn = rasterfn.file_name
raster = gdal.Open(rasterfn)
geotransform = raster.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
cols = array.shape[1]
rows = array.shape[0]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, nbands, gdal.GDT_Float32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
for band in range(1,nbands+1):
outband = outRaster.GetRasterBand(band)
# Bands use indexing starting at 1
outband.WriteArray(array[:,:,band-1])
outband.FlushCache()
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromWkt(raster.GetProjectionRef())
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outRaster = None
return GeoDataset(newRasterfn)
| 1.976563
| 2
|
graph-db/extractor/src/ncbi/taxonomy_LMDB_annotation.py
|
SBRG/lifelike
| 8
|
12779320
|
from ncbi.ncbi_taxonomy_parser import TaxonomyParser, Taxonomy
from common.database import *
from common.utils import get_data_dir
import os
# default strain for their species for organism searching
LMDB_SPECIES_MAPPING_STRAIN = ['367830','511145', '272563', '208964', '559292']
DATA_SOURCE = 'NCBI Taxonomy'
def write_LMDB_annotation_file(database, base_dir, excluded_names=['environmental sample']):
'''
If species_only, export species and their children only
:param excluded_names: list of tax names that should not be used for annotation
:return:
'''
# find the strains that are used to replace its parent species for annotation
query = """
match p=(n:Taxonomy)-[:HAS_PARENT*0..]->(:Taxonomy {rank: 'species'})
where n.{PROP_ID} in $tax_ids
with nodes(p) as nodes, n
unwind nodes as p
with n, p where n <> p
return n.{PROP_ID} as tax_id, p.{PROP_ID} as parent_id, p.name as parent_name
"""
df = database.get_data(query, {'tax_ids': LMDB_SPECIES_MAPPING_STRAIN})
replace_id_map = {}
for index, row in df.iterrows():
replace_id_map[row['parent_id']] = row['tax_id']
parser = TaxonomyParser(base_dir)
nodes = parser.parse_files()
outfile = os.path.join(parser.output_dir, 'species_for_LMDB.tsv')
with open(outfile, 'w') as f:
f.write('tax_id\trank\tcategory\tname\tname_class\torig_tax_id\tdata_source\n')
for node in nodes.values():
if node.top_category and node.rank == 'species':
_write_node_names(node, f, excluded_names, replace_id_map)
def _write_node_names(tax, file, exclude_node_names=[], replace_id_map=None):
"""
recursively write node names and children names
:param tax: tax node
:param file: outfile
:param exclude_node_names:
:param replace_id_map:
:return:
"""
if exclude_node_names:
# if tax name contains the exclude_node_name, return without writing
for name in tax.names.keys():
for exclude in exclude_node_names:
if exclude in name:
return
if replace_id_map and tax.tax_id in replace_id_map:
tax.orig_id = tax.tax_id
tax.tax_id = replace_id_map[tax.orig_id]
lines = ''
for name, name_class in tax.names.items():
lines = lines + '\t'.join([tax.tax_id, tax.rank, tax.top_category, name, name_class, tax.orig_id, DATA_SOURCE]) + '\n'
file.write(lines)
for child in tax.children:
_write_node_names(child, file, exclude_node_names, replace_id_map)
def main():
database = get_database()
# pass the write base_data_dir for the parser
write_LMDB_annotation_file(database, get_data_dir())
database.close()
if __name__ == "__main__":
main()
| 2.21875
| 2
|
parse/parse_uniprot_header.py
|
Hua-CM/HuaSmallTools
| 21
|
12779321
|
# -*- coding: utf-8 -*-
# @Time : 2020/11/15 13:49
# @Author : <NAME>
# @FileName: parse_uniprot_header.py
# @Usage:
# @Note:
# @E-mail: <EMAIL>
import pandas as pd
import re
class UniprotParse:
def __init__(self, _input_fasta):
self.input = _input_fasta
self.output = None
def parse(self):
with open(self.input, 'r', encoding='utf-8') as f:
_header_list = [line.strip('>') for line in f if line.startswith('>')]
_header_parse_list = []
for _header in _header_list:
_ele_dict = {'ID': _header.split()[0].split('|')[1],
'Entry': _header.split()[0].split('|')[2]}
_description_list = []
for _ele in _header.split()[1:]:
pre_fix = re.match('OX=|OS=|GN=|PE=|SV=', _ele)
if pre_fix:
_ele_dict[pre_fix.group().strip("=")] = _ele.split('=')[1]
else:
_description_list.append(_ele)
_ele_dict['Description'] = ' '.join(_description_list)
_header_parse_list.append(_ele_dict)
self.output = pd.DataFrame(_header_parse_list)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="This is the script to get Uniprot fasta header information"
"and use it intrepret BLAST results")
sub_parser = parser.add_subparsers(title='', dest='interpret/parse')
sub_parser.required = True
parse_parser = sub_parser.add_parser(
'parse', help='Parse Uniprot fasta headers to a table')
parse_parser.add_argument('-i', '--input_file', required=True,
help='<filepath> The uniprot fasta')
parse_parser.add_argument('-o', '--output_file', required=True,
help='<filepath> The output path')
parse_parser.set_defaults(subcmd="parse")
interpret_parser = sub_parser.add_parser(
'interpret', help='Interpret BLAST results')
interpret_parser.add_argument('-i', '--input_file', required=True,
help='<filepath> The BLAST result, only format six is acceptable')
interpret_parser.add_argument('-u', '--uniprot', required=True,
help='<filepath> The niprot fasta header information generated by "parse" function')
interpret_parser.add_argument('-c', '--column', required=True, type=int,
help='<int> Specify which column in BLAST result contains the identifier of Uniprot')
interpret_parser.add_argument('-o', '--output_file', required=True,
help='<filepath> The output path')
interpret_parser.set_defaults(subcmd="interpret")
args = parser.parse_args()
if args.subcmd == "parse":
uni = UniprotParse(args.input_file)
uni.parse()
uni.output.to_csv(args.output_file, index=False, sep='\t')
if args.subcmd == "interpret":
blast_result = pd.read_table(args.input_file, header=None)
uniprot_info = pd.read_table(args.uniprot)
blast_result[args.column-1] = blast_result[args.column-1].apply(lambda x: x.split('|')[1])
result = pd.merge(blast_result, uniprot_info[['ID', 'GN', 'Description']],
left_on=args.column-1,
right_on='ID',
how='left')
result.drop('ID', axis=1, inplace=True)
result.to_csv(args.output_file, header=False, index=False, sep='\t', float_format='%.3g')
| 3.015625
| 3
|
migrations/versions/da6f10c8ebf4_add_site_airtable.py
|
bwlynch/FbScraper
| 10
|
12779322
|
"""add site airtable
Revision ID: da6f10c8ebf4
Revises: 9<PASSWORD>e<PASSWORD>
Create Date: 2019-11-29 07:48:18.074193
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "da6f10c8ebf4"
down_revision = "aaae4ae18288"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"Site", sa.Column("airtable_id", sa.String(256), nullable=True, unique=True)
)
try:
upgrade_data()
op.alter_column(
"Site", "airtable_id", nullable=False, existing_type=sa.String(256)
)
except Exception as e:
op.drop_column("Site", "airtable_id")
raise e
def downgrade():
op.drop_column("Site", "airtable_id")
sites = sa.sql.table(
"Site",
sa.Column("site_id", sa.Integer),
sa.Column("url", sa.String(1024)),
sa.Column("airtable_id", sa.String(256)),
)
def upgrade_data():
for site_id, url in [
(111, "https://www.youtube.com/channel/UCLZBXiS9ZrIXgKBs_SMfGBQ"),
(58, "https://www.facebook.com/blesseverydayaroundyou/"),
]:
op.execute(
sites.delete().where(sites.c.site_id == site_id and sites.c.url == url)
)
for url, airtable_id in airtable_id_map.items():
op.execute(
sites.update()
.where(sites.c.url == url)
.values({"airtable_id": airtable_id})
)
airtable_id_map = {
## XXX duplicated
# "https://www.youtube.com/channel/UCLZBXiS9ZrIXgKBs_SMfGBQ":"rec8rzS7SqKqnQuio",
"https://www.youtube.com/channel/UCmgDmqjxbkqIXu4rNbrKodA": "rectV3bxAU2YrpWQW",
"https://www.youtube.com/channel/UCLZBXiS9ZrIXgKBs_SMfGBQ": "rec6OsbxedCXaW1j1",
"https://www.youtube.com/channel/UCpu3bemTQwAU8PqM4kJdoEQ": "recYUDT5JflPA2OoF",
"https://www.youtube.com/channel/UCN2e8dLY9KnH-B15LyBM7Fg": "recQWxWEaVUWUcWYX",
"https://www.ptt.cc/bbs/HatePolitics/index.html": "recsh7wPi68vLDNWk",
"https://www.ptt.cc/bbs/Gossiping/index.html": "recLVrfhLyQDCzDA8",
"https://taronews.tw/": "recJFqGr5a1xfaf8o",
"https://www.cna.com.tw/": "recQUiciCUROnBe4A",
"http://news.ltn.com.tw/": "recMqu8b2B0fjCWIF",
"https://udn.com/": "reci0cxTv83iSeHl8",
"https://tw.appledaily.com/new/realtime": "recgBO5TsaGP8MLbg",
"https://tw.appledaily.com/": "recW0Y3DQ3DaeRQ7Y",
"https://www.ettoday.net/": "recJ9pSXGsxE4kmn9",
"https://news.ebc.net.tw/": "recBW5P0o0fKX2T1L",
"https://www.chinatimes.com/": "recslfJAoVKDbdh24",
"https://www.eatnews.net/": "rec3Wrnrb3GTcDivT",
"https://www.taiwanmazu.org/": "recB4NpLrTvUwWovp",
"http://tailian.taiwan.cn/": "recG8g1JoHti4T8fO",
"https://www.toutiao.com/": "recirH5ayaKXA633m",
"http://www.itaiwannews.cn/": "reczA8cBEGIcvwo1B",
"http://nooho.net": "recXoBEAH8TRdhZYj",
"http://taiwan-madnews.com": "recXa7wpjdcrWT8X7",
"http://taiwan-politicalnews.com": "recPnWuwH01QTAZPX",
"http://hssszn.com": "recBpcl1dLZQpY2Q5",
"http://fafa01.com": "recGN46B3LnnA8LbF",
"http://qiqi.today": "recRl8ORrU0IKWkBZ",
"http://defense.rocks": "recgFCKXWH8hBt6Rw",
"http://cnba.live": "rec3HARifvZvpwmzE",
"http://i77.today": "recSV8S0hvZY3ZTuA",
"http://77s.today": "recgTV83ZY5NWWnGT",
"http://www.qiqi.world/": "reca6qh8fo3mCqfCh",
"http://www.mission-tw.com/mission": "recJjdr5Jb4fGe9Os",
"http://www.taiwan.cn/": "recuC7NzKlui3dcd6",
"https://www.facebook.com/eatnews/": "recQshOYa9lZin1AU",
"https://www.facebook.com/search/top/?q=%E6%96%87%E5%B1%B1%E4%BC%AF&epa=SEARCH_BOX": "rec7xrqokEMg3s5L9",
"https://www.facebook.com/znk168/": "recIjToauNtBJdyQu",
"https://www.facebook.com/almondbrother/": "receHukPdyaKCtBMj",
"https://www.facebook.com/Colorlessrise/": "recMSPrWl8AuExQMk",
"https://www.facebook.com/pg/KSMissLin/groups/?referrer=pages_groups_card_cta&ref=page_internal": "rech9IRKLxxB0kx2w",
"https://www.facebook.com/%E5%BC%B7%E5%BC%B7%E6%BB%BE%E5%A4%A7%E5%93%A5-%E9%98%BF%E8%AA%8C-1088027454701943/?__tn__=%2Cd%2CP-R&eid=ARBiDxJohZf5_icvMw2BXVNG2nHG4VR9b_ArA_Tc6PfA98MtdnGw1xVKWvIdE-X1wfSteOnhr6PxVDUX": "recx88UIQkLjJ10wU",
"https://www.facebook.com/twherohan/": "recAY2H12zcSbCfhv",
"https://www.facebook.com/%E8%A8%B1%E6%B7%91%E8%8F%AF-130771133668155/": "recvnH2Lot8ypWNrl",
"https://www.facebook.com/hsiweiC/": "recVBlckyMtFmlh82",
"https://www.facebook.com/groups/260112827997606/": "recIttcUl3HPUoqzj",
"https://www.facebook.com/groups/391568921608431/": "recpKtnBclXwY4aqG",
"https://www.facebook.com/groups/2072874742809044/": "recDffdw3nHCyVE3j",
"https://www.facebook.com/groups/488305761638330/": "recm3eGGXPkOtfFLr",
"https://www.facebook.com/groups/389408621877306/": "recdvH8v3SJX5TpRZ",
"https://www.facebook.com/groups/768267203515635/": "recvKtQ84sirCdMzD",
"https://www.facebook.com/straitstoday/": "recLmQSJ5BUyrpKE6",
"https://www.facebook.com/youth86/": "recH6lOgxmwbsfu6N",
"https://www.facebook.com/groups/1148919035153224/": "recw8GIqZ6a4HXzR4",
"https://www.facebook.com/knowledge.practice.studio/": "rec7YTWk5wIUlQ25Z",
"https://www.facebook.com/Templelivenetwork/": "recwAHI4ZH36ZOeeb",
"https://www.facebook.com/%E4%B8%80%E8%B5%B7%E8%BF%BD%E5%8A%87%E5%90%A7-2407159445968433/": "recBYOI6sd8UPLnsm",
"https://www.facebook.com/KMTTCC/": "reciCaICxxil0pnSj",
"https://www.facebook.com/Quotations.life168/": "recGSreihqP7XX1C0",
"https://www.facebook.com/ZhongHuaYRM/": "recfLM0dY6CKhVNuR",
"https://www.facebook.com/happyworld88": "recMx7tumAkDqZulR",
"https://www.facebook.com/traveltheworld168/": "recSdwgOnLSFlZajU",
"https://www.facebook.com/yifanfengshun888/": "recQTMyEWf2xsCelK",
"https://www.facebook.com/world.tw/": "rec5cEt7NvB3TcI79",
"https://www.facebook.com/HaterIsHere/": "recTMSPJmmQXBfcDO",
"https://www.facebook.com/jesusSavesF13/": "rechrvzObklDq6Xcj",
"https://www.facebook.com/TaiwanNeutra/": "recANFv93ormFlTiT",
"https://www.facebook.com/%E9%9F%93%E5%AE%B6%E8%BB%8D%E9%90%B5%E7%B2%89%E8%81%AF%E7%9B%9F-837868789721803/": "recc9xwpmhaoLMgzx",
"https://www.facebook.com/%E7%B5%B1%E4%B8%80%E4%B8%AD%E5%9C%8B%E4%B8%AD%E5%9C%8B%E7%B5%B1%E4%B8%80-%E7%BB%9F%E4%B8%80%E4%B8%AD%E5%9B%BD%E4%B8%AD%E5%9B%BD%E7%BB%9F%E4%B8%80-1403317033298680/": "recmv1QvbaruPxERN",
"https://www.facebook.com/%E5%8F%8D%E8%94%A1%E8%8B%B1%E6%96%87%E7%B2%89%E7%B5%B2%E5%9C%98-257062087822640/": "recLTcnCQdOyMgZX4",
"https://www.facebook.com/CTTATTACK/": "recuhN7EituL81XfD",
"https://www.facebook.com/Gyhappyboard/": "recUfUuenCqEXY13X",
"https://www.facebook.com/%E8%A9%B1%E8%AA%AA%E9%82%A3%E4%BA%9B%E7%B7%A8%E9%80%A0%E7%9A%84%E4%BA%8B-304688810020434/": "rec4z05fcic3vlQyq",
## XXX duplicated
# "https://www.facebook.com/blesseverydayaroundyou/":"recUUs0ITu6PrpVIo",
"https://www.facebook.com/%E8%94%A1%E8%8B%B1%E6%96%87%E4%B8%8B%E5%8F%B0%E7%BD%AA%E7%8B%80%E9%9B%86%E7%B5%90%E7%B8%BD%E9%83%A8-121570255108696/": "reclAN9s2yWASp9A8",
"https://www.facebook.com/CapricornStory4U/": "recLduxn9D5XD2w3p",
"https://www.facebook.com/blesseverydayaroundyou/": "recVQ6iGSGFFAuK3I",
"https://www.facebook.com/inability.dpp/": "recojKVhcsmrUQVrV",
"https://www.facebook.com/%E8%97%8D%E8%89%B2%E6%AD%A3%E7%BE%A9%E5%8A%9B%E9%87%8F-1100522356652838/": "recm0Qil3pdQRPJq3",
"https://www.facebook.com/LIKEHISTORYWORLD/": "recaSQDs9KIuUZL3g",
"https://www.facebook.com/GCAironbloodarmy/": "recxjVgJQ4QA7vnP2",
"https://www.facebook.com/globalchinesenewsunion/": "recS0IahdjcUZ2uV5",
"https://www.facebook.com/GlobalChineselove/": "recXvfkeYIWRS1yDG",
"https://www.facebook.com/cbcarmy/": "rec0GLO9KrkL26Hl9",
"https://www.facebook.com/Islandofghost/": "recaxv1mbJzhBUmvh",
"https://www.facebook.com/GhostIslandNews/": "recnfmS6KQq8ADPdq",
"https://www.facebook.com/lovebakinglovehealthy/": "recqDcHtzstSEYuEN",
"https://www.facebook.com/getoutdpp/": "recGhjG3J67YawoV3",
"https://www.facebook.com/%E7%BD%B7%E5%85%8D%E6%B0%91%E9%80%B2%E9%BB%A8-2129370967290567/": "rec3rJ5tNg2otD5qz",
"https://www.facebook.com/johncelayo/": "rec8n4wKSsbOAyq1J",
"https://www.facebook.com/grumbledpp/": "rec64LvmyaPlP4kBP",
"https://www.facebook.com/%E6%96%87%E9%9D%92%E5%B7%A5%E4%BD%9C%E6%9C%83-510052339062419/": "rec8Z1YuT8hWKYbG2",
"https://www.facebook.com/%E9%9D%A0%E5%8C%97%E6%B0%91%E9%80%B2%E9%BB%A8-454656575008713/": "recwLUUVEocoCeT8g",
"https://www.facebook.com/bigchinalove/": "recPUgrixj8HPlVUp",
"https://www.facebook.com/shengser/": "rec63fhQeP0MU3357",
"https://www.facebook.com/%E8%A8%8E%E5%8E%AD%E6%B0%91%E9%80%B2%E9%BB%A8-504021696772145/": "rec7l2nBPLFj4sOmr",
"https://www.facebook.com/%E9%9D%A0%E5%8C%97%E6%99%82%E4%BA%8B-165534787282102/": "recGCFPh0DWZ6MG4i",
"https://www.facebook.com/taiwan1314520/": "rec9BS2RnG7Bi773d",
"https://www.facebook.com/fuqidao168/": "recVbbS2hFI2S39z7",
"https://www.facebook.com/GlobalChineseAlliance/": "recEvRHB5bqjxS6ES",
"https://www.facebook.com/%E5%A4%A9%E5%8D%97%E5%9C%B0%E5%8C%97-1063653903655415/": "recdWAeftdXBwOLIX",
"https://www.facebook.com/kmtdppisshit/": "rec6s2d1TXlmUI2nG",
"https://www.facebook.com/catssssssssssssss/": "recpu60Ei5EqoEXxn",
"https://www.facebook.com/qiqi.news/": "recOpNLBJ4R2mmCqM",
"https://www.facebook.com/dogcat101300/": "recXy5Rkxp0PhMpCs",
"https://www.facebook.com/travelmoviemusic/": "recw9FN2e3jZFJwqX",
"https://www.facebook.com/imangonews/": "recVrU412hfv2dChw",
"https://www.facebook.com/%E4%BA%BA%E7%94%9F%E6%AD%A3%E8%83%BD%E9%87%8F-1349834938455966/": "reccVfkXwa6u8R4o3",
"https://www.facebook.com/%E4%BA%BA%E7%94%9F%E7%AC%91%E8%91%97%E8%B5%B0-1958092751106755/": "recEnSF53PkWENrhs",
"https://www.facebook.com/thumbsuplifenews/": "recqbh2I61V2JArRi",
"https://www.facebook.com/hssszn/": "recODAxW73l6JpJJ7",
"https://www.facebook.com/aroundtheworld01/": "recjrgKJKwH1ru67m",
"https://www.facebook.com/%E5%8F%8D%E8%94%A1%E8%8B%B1%E6%96%87%E8%81%AF%E7%9B%9F%E5%85%A8%E5%9C%8B%E6%B0%91%E6%80%A8%E5%97%86%E8%94%A1%E7%B8%BD%E9%83%A8-1566024720346478/": "rectYderJ2wfojGfN",
"https://www.facebook.com/%E9%9D%92%E5%A4%A9%E7%99%BD%E6%97%A5%E6%AD%A3%E7%BE%A9%E5%8A%9B%E9%87%8F-1006889099430655/": "recjnR3SPoTTEUT15",
"https://www.guancha.cn/": "recE5pFRI2dRUdsBB",
"https://news.163.com": "recqZh8SLNtPITFo9",
"https://kknews.cc/": "recKJwOC1QvSQJgKB",
"http://www.readthis.one/": "recxMOjlGZDoUbLWc",
"https://www.coco01.today/": "recjILSLlLRmkgP5I",
"https://www.ptt01.cc/": "recj4kR6ExZgXdzOk",
"https://www.xuehua.us/": "recbEZkJV8k2Fg91E",
"https://www.orgs.one/": "recJVUAsWSsbKz9N0",
"http://www.how01.com/": "rec03ujV04yeDHeAu",
"https://read01.com/zh-tw/": "recwO0vYEkxI4JbBl",
"https://www.youtube.com/channel/UCgkHTZsCdH8P9z7lazTXN3g": "recnUmD0TFC1UPMPH",
"https://www.youtube.com/channel/UCJHq28mKJowPCGQ0WDIDU9A": "recjh6Rzp8iCarxF3",
"https://www.youtube.com/channel/UCMcDqLHgIuXWtWsqPEkqnWA": "recyUFTVMNsGGuCAV",
}
| 1.773438
| 2
|
xrspatial/tests/test_curvature.py
|
brendancol/xarray-spatial
| 0
|
12779323
|
import pytest
import numpy as np
import xarray as xr
import dask.array as da
from xrspatial import curvature
from xrspatial.utils import doesnt_have_cuda
from xrspatial.tests.general_checks import general_output_checks
elevation = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[1584.8767, 1584.8767, 1585.0546, 1585.2324, 1585.2324, 1585.2324],
[1585.0546, 1585.0546, 1585.2324, 1585.588, 1585.588, 1585.588],
[1585.2324, 1585.4102, 1585.588, 1585.588, 1585.588, 1585.588],
[1585.588, 1585.588, 1585.7659, 1585.7659, 1585.7659, 1585.7659],
[1585.7659, 1585.9437, 1585.7659, 1585.7659, 1585.7659, 1585.7659],
[1585.9437, 1585.9437, 1585.9437, 1585.7659, 1585.7659, 1585.7659]],
dtype=np.float32
)
def test_curvature_on_flat_surface():
# flat surface
test_arr1 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.array([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, 0, 0, 0, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster1 = xr.DataArray(test_arr1, attrs={'res': (1, 1)})
curv = curvature(test_raster1)
general_output_checks(test_raster1, curv, expected_results)
def test_curvature_on_convex_surface():
# convex
test_arr2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, -1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0., 100., 0., np.nan],
[np.nan, 100., -400., 100., np.nan],
[np.nan, 0., 100., 0., np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster2 = xr.DataArray(test_arr2, attrs={'res': (1, 1)})
curv = curvature(test_raster2)
general_output_checks(test_raster2, curv, expected_results)
def test_curvature_on_concave_surface():
# concave
test_arr3 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected_results = np.asarray([
[np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, 0., -100., 0., np.nan],
[np.nan, -100., 400., -100., np.nan],
[np.nan, 0., -100., 0., np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan]
])
test_raster3 = xr.DataArray(test_arr3, attrs={'res': (1, 1)})
curv = curvature(test_raster3)
general_output_checks(test_raster3, curv, expected_results)
@pytest.mark.skipif(doesnt_have_cuda(), reason="CUDA Device not Available")
def test_curvature_gpu_equals_cpu():
import cupy
agg_numpy = xr.DataArray(elevation, attrs={'res': (10.0, 10.0)})
cpu = curvature(agg_numpy, name='numpy_result')
agg_cupy = xr.DataArray(
cupy.asarray(elevation), attrs={'res': (10.0, 10.0)}
)
gpu = curvature(agg_cupy, name='cupy_result')
general_output_checks(agg_cupy, gpu)
np.testing.assert_allclose(cpu.data, gpu.data.get(), equal_nan=True)
# NOTE: Dask + GPU code paths don't currently work because of
# dask casting cupy arrays to numpy arrays during
# https://github.com/dask/dask/issues/4842
def test_curvature_numpy_equals_dask():
agg_numpy = xr.DataArray(elevation, attrs={'res': (10.0, 10.0)})
numpy_curvature = curvature(agg_numpy, name='numpy_curvature')
agg_dask = xr.DataArray(
da.from_array(elevation, chunks=(3, 3)), attrs={'res': (10.0, 10.0)}
)
dask_curvature = curvature(agg_dask, name='dask_curvature')
general_output_checks(agg_dask, dask_curvature)
# both produce same results
np.testing.assert_allclose(
numpy_curvature.data, dask_curvature.data.compute(), equal_nan=True)
| 2.125
| 2
|
rfapi/auth.py
|
cestrada-rf/rfapi-python
| 32
|
12779324
|
# Copyright 2016 Recorded Future, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth provider for RF tokens stored in environment."""
import os
import email
import hashlib
import hmac
import requests
from .error import MissingAuthError
# pylint: disable=too-few-public-methods
class RFTokenAuth(requests.auth.AuthBase):
"""Authenticate using a token stored in an environment variable.
The class will look for tokens in RF_TOKEN and RECFUT_TOKEN (legacy).
"""
def __init__(self, token, api_version=1):
"""Initialize the class. Provide a valid token."""
self.token = self._find_token() if token == 'auto' else token
self._api_version = api_version
def __call__(self, req):
"""Add the authentication header when class is called."""
# If we still haven't a token we need to bail.
if not self.token:
raise MissingAuthError
if self._api_version == 1:
req.headers['Authorization'] = "RF-TOKEN token=%s" % self.token
else:
req.headers['X-RFToken'] = self.token
return req
@staticmethod
def _find_token():
if 'RF_TOKEN' in os.environ:
return os.environ['RF_TOKEN']
if 'RECFUT_TOKEN' in os.environ:
return os.environ['RECFUT_TOKEN']
raise MissingAuthError('Auth method auto selected but no token '
'found in environment (RF_TOKEN or '
'RECFUT_TOKEN).')
class SignatureHashAuth(requests.auth.AuthBase):
"""Authenticate using signed queries."""
def __init__(self, username, userkey):
"""Initialize. Provide a valid username and key."""
self.username = username
self.userkey = userkey
def __call__(self, req):
"""Add the auth headers to a request."""
# pylint: disable=no-member
timestamp = email.Utils.formatdate()
split = req.path_url.split("?")
path_params = split[1] if len(split) > 1 else ""
body = req.body if req.body else ""
if "v2" in req.path_url:
v2_url = req.path_url.replace("/rfq", "")
hash_text = v2_url + body + timestamp
else:
hash_text = "?" + path_params + body + timestamp
hmac_hash = hmac.new(self.userkey,
hash_text,
hashlib.sha256).hexdigest()
req.headers['Date'] = timestamp
req.headers['Authorization'] = 'RF-HS256 user=%s, hash=%s' % (
self.username, hmac_hash
)
return req
| 2.328125
| 2
|
genoplot/genoplot.py
|
ddnewell/genoplot
| 0
|
12779325
|
<reponame>ddnewell/genoplot
# Copyright (c) 2016 by Welded Anvil Technologies (<NAME>). All Rights Reserved.
# This software is the confidential and proprietary information of
# Welded Anvil Technologies (<NAME>) ("Confidential Information").
# You shall not disclose such Confidential Information and shall use it
# only in accordance with the terms of the license agreement you entered
# into with Welded Anvil Technologies (<NAME>).
# @author <EMAIL>
import logging, itertools, svgwrite, time
import networkx as nx
from .family import Family
from .familygraph import FamilyGraph
from .pedigree import Pedigree
from .utils import calculate_text_size
logger = logging.getLogger("genoplot")
class GenoPlot(object):
def __init__(self,
name,
gedcom_file,
output_file=None,
font_size=10,
hmargin=20,
symbol_size=25,
page_margin=100
):
"""
GenoPlot - defines a pedigree plot based on specified gedcom file
:param name: Plot name/title
:type name: str
:param gedcom_file: GEDCOM file path
:type gedcom_file: str
"""
logger.info("Creating GenoPlot named '%s' from GEDCOM '%s'", name, gedcom_file)
self.name = name
self._pedigree = Pedigree(name, gedcom_file, font_size=font_size, hmargin=hmargin)
if output_file is None:
self._output_file = "{0}.svg".format(self.name)
else:
self._output_file = output_file
if ".svg" not in self._output_file:
self._output_file += ".svg"
self._graph = None
self._layout = None
self._font_size = font_size
self._symbol_size = symbol_size
self._hmargin = hmargin
self._node_height = self._symbol_size*2#*6
self._page_margin = page_margin
self._connectors = []
self._image_layers = {
"-1:duplicates": [],
"0:connectors": [],
"1:individuals": [],
"2:text": [],
"3:textextent": []
}
def draw(self):
"""Draws pedigree plot based on specified parameters"""
logger.info("Starting plot draw")
draw_start = time.time()
self._graph = FamilyGraph(self._pedigree,
font_size=self._font_size,
hmargin=self._hmargin,
node_height=self._node_height,
page_margin=self._page_margin)
extremes = self._graph.extremes()
self._svg = svgwrite.Drawing(filename=self._output_file,
size=(extremes[1]+self._page_margin*2, extremes[3]*1.2+self._page_margin*2))
# for vid, loc in self._layout.items():
for vid, d in self._graph.items():
if vid[0] == "F":
# Draw family
family = d["el"]
# father = family.father()
# if not father is None:
# fx = father.x
# fy = father.y
# else:
# mother = family.mother()
# fx = mother.x
# fy = mother.y
self._draw_family(family.id, family.x, family.y)
# self._draw_family(int(vid[1:]), *loc)
else:
# Draw individual
individual = d["el"]
self._draw_individual(individual.id, individual.x, individual.y)
# self._draw_individual(int(vid[1:]), *loc)
# for vid, loc in self._layout.items():
for vid, d in self._graph.items():
if vid[0] == "F":
# family = self._pedigree.family(int(vid[1:]))
family = d["el"]
father = family.father()
parent = father
if father is None:
fwidth = self._hmargin
parent = family.mother()
else:
fwidth = calculate_text_size(father.output_text(), self._font_size)[0]
if parent is None:
logger.critical("Parent is none for family %s; father: %s; parent: %s; cannot continue drawing", vid, father, parent)
# Draw child connectors
midpoint_x = parent.x+fwidth/2+self._hmargin+self._symbol_size/2
midpoint_y = parent.y+self._symbol_size/2
# midpoint_x = loc[0]+fwidth/2+self._hmargin+self._symbol_size/2
# midpoint_y = loc[1]+self._symbol_size/2
# Collect chlid coordinates
start = (midpoint_x, midpoint_y)
targets = []
# Draw edges to children, if the edges exist in branched graph
for child in family.children():
nid = "P{0}".format(child.id)
# if nid in self._branched_graph[vid] and nid in self._layout:
if self._graph.has_edge(vid, nid):
if not child.x is None and not child.y is None:
child_x = child.x
child_y = child.y
else:
logger.warn("Coordinates not persisted to %i", child.id)
# child_x, child_y = self._layout["P{0}".format(child.id)]
targets.append((child_x+self._symbol_size/2, child_y))
elif child.is_parent():
for fam in self._pedigree.individual_families(child.id, role="parent"):
fid = "F{0}".format(fam.id)
# if fid in self._branched_graph[vid]:
if self._graph.has_edge(vid, fid):
if not child.x is None and not child.y is None:
child_x = child.x
child_y = child.y
else:
logger.warn("Position not found for %i - %s, using family %s position", child.id, child.name, fid)
logger.warn("Coordinates not persisted to %i", child.id)
# child_x, child_y = self._layout[fid]
targets.append((child_x+self._symbol_size/2, child_y))
# Draw elbow connectors
if len(targets) > 0:
self._draw_connector_to_multiple(start, targets)
# Draw duplicate people connectors
[self._draw_duplicate_person_link(individual) for individual in self._graph.duplicate_individuals()]
# Draw connectors between added duplicate nodes
for (nid1, nid2) in self._graph.branch_links():
individual = self._pedigree.individual(nid1)
if not individual.x is None and not individual.y is None:
start = (individual.x, individual.y)
else:
logger.warn("Coordinates not persisted to %i", individual.id)
# start = self._layout["P{0}".format(individual.id)]
duplicate = self._pedigree.individual(nid2)
if not duplicate.x is None and not duplicate.y is None:
end = (duplicate.x, duplicate.y)
else:
logger.warn("Coordinates not persisted to %i", duplicate.id)
# end = self._layout["P{0}".format(duplicate.id)]
logger.debug("Drawing added duplicate node connector: %s %s", start, end)
self._draw_duplicate_connector(individual.sex, start, end)
# Add cached drawing items to image
[self._svg.add(item) for layer in sorted(self._image_layers) for item in self._image_layers[layer]]
# Save image
self._svg.save()
logger.info("Plot draw complete, took %.2fs", time.time() - draw_start)
def _draw_family(self, fid, x, y):
"""Draws family on drawing"""
logger.debug("Drawing family %s at (%.1f, %.1f)", fid, x, y)
family = self._pedigree.family(fid)
# family.set_coordinates(x, y)
father = family.father()
mother = family.mother()
if father is None:
# Draw virtual father
self._draw_virtual_individual("M", x, y)
else:
self._draw_individual(father.id, father.x, father.y)
if mother is None:
# Draw virtual mother
if father is None:
logger.warn("Family %s has no parents: drawing both virtual mother and father", fid)
fwidth = self._hmargin
else:
fwidth = calculate_text_size(father.output_text(), self._font_size)[0]
mwidth = self._symbol_size
mx = x + self._hmargin*2+fwidth/2+mwidth/2
self._draw_virtual_individual("F", mx, y)
end = (mx+self._symbol_size/2, y+self._symbol_size/2)
else:
self._draw_individual(mother.id, mother.x, mother.y)
end = (mother.x+self._symbol_size/2, y+self._symbol_size/2)
# Draw connector between parents
start = (x+self._symbol_size, y+self._symbol_size/2)
self._draw_connector(start, end)
def _draw_virtual_individual(self, sex, x, y):
"""Draws individual on drawing"""
if sex == "M":
self._image_layers["1:individuals"].append(
self._svg.rect(
(x, y),
(self._symbol_size, self._symbol_size),
fill="white",
stroke="#555555",
style="stroke-dasharray: 4,5;"
)
)
else:
self._image_layers["1:individuals"].append(
self._svg.ellipse(
(x+self._symbol_size/2, y+self._symbol_size/2),
(self._symbol_size/2, self._symbol_size/2),
fill="white",
stroke="#555555",
style="stroke-dasharray: 4,5;"
)
)
def _draw_individual(self, pid, x, y):
"""Draws individual on drawing"""
individual = self._pedigree.individual(pid)
individual.set_coordinates(x, y)
if individual.sex == "M":
self._image_layers["1:individuals"].append(
self._svg.rect(
(x, y),
(self._symbol_size, self._symbol_size),
fill=individual.color(),
stroke="black"
)
)
else:
self._image_layers["1:individuals"].append(
self._svg.ellipse(
(x+self._symbol_size/2, y+self._symbol_size/2),
(self._symbol_size/2, self._symbol_size/2),
fill=individual.color(),
stroke="black"
)
)
text_y = y + 1.6*self._symbol_size
for text in individual.output_text():
self._image_layers["2:text"].append(
self._svg.text(
text,
insert=(x+self._symbol_size/2, text_y),
color="black",
style="font-size: {0}px; text-anchor: middle; font-family: 'Helvetica Neue';".format(self._font_size)
)
)
### Temporary
text_width, text_height = calculate_text_size(text, self._font_size)
self._image_layers["3:textextent"].append(
self._svg.rect(
(x+self._symbol_size/2-text_width/2, text_y-text_height),
(text_width, text_height),
fill="none",
stroke="blue",
style="stroke-dasharray: 1,2;"
)
)
logger.debug("Text %s has width %.2f and height %.2f", text, text_width, text_height)
text_y += text_height
def _detect_straight_connector_overlap(self, x1, y1, x2, y2, fid=None):
"""Returns whether there is an overlapping straight line connector"""
if y1 == y2:
for cxn in self._connectors:
if cxn[0][1] == y1 and cxn[1][1] == y2 and (
x1 <= cxn[0][0] <= x2 or
x1 <= cxn[1][0] <= x2 or
cxn[0][0] <= x1 <= cxn[1][0] or
cxn[0][0] <= x2 <= cxn[1][0]
):
return True
# If no overlaps detected, return non-overlapping
return False
def _find_nonoverlapping_y(self, x1, x2, y):
"""Returns non-overlapping y value for connector"""
i = 0
while self._detect_straight_connector_overlap(x1, y, x2, y):
y -= 8
i += 1
if i > 100:
logger.error("Drawing overlapping connector. Iterated 100 times and could not find open space on layout. X1: %i Y1: %i X2: %i Y2: %i", x1, y1, x2, y2)
break
if i > 0:
logger.debug("Detected overlapping connector. Iterating %i times", i)
return y
def _draw_connector_to_multiple(self, start, targets):
"""Draws connector from start coordinate to one or more targets"""
start_x, start_y = start
max_x = start_x
min_x = start_x
max_y = start_y
min_y = start_y
for x, y in targets:
if x > max_x:
max_x = x
if x < min_x:
min_x = x
if y > max_y:
max_y = y
if y < min_y:
min_y = y
middle_y = self._find_nonoverlapping_y(min_x, max_x, max_y - self._symbol_size)
logger.debug("Drawing connector to multiple targets (%i). Max_X: %i Min_X: %i Max_Y: %i Min_Y: %i Middle_Y: %i", len(targets), max_x, min_x, max_y, min_y, middle_y)
# Draw vertical section from start
self._draw_connector(start, (start_x, middle_y))
# Draw horizontal section
self._draw_connector((min_x, middle_y), (max_x, middle_y))
# Draw vertical sections to targets
for tgt in targets:
self._draw_connector((tgt[0], middle_y), tgt)
def _draw_connector(self, start, end):
"""Draws connector between specified coordinates"""
x1, y1 = start
x2, y2 = end
if y1 == y2 or x1 == x2:
# Straight line connector
if (start, end) not in self._connectors:
self._image_layers["0:connectors"].append(
self._svg.line(
start=start,
end=end,
stroke="black"
)
)
self._connectors.append((
start, end
))
else:
# Elbow connector
middle_y = self._find_nonoverlapping_y(x1, x2, y2 - self._symbol_size)
if (start, (x1, middle_y)) not in self._connectors:
self._image_layers["0:connectors"].append(
self._svg.line(
start=start,
end=(x1, middle_y),
stroke="black"
)
)
self._connectors.append((
start, (x1, middle_y)
))
if ((x1, middle_y), (x2, middle_y)) not in self._connectors:
self._image_layers["0:connectors"].append(
self._svg.line(
start=(x1, middle_y),
end=(x2, middle_y),
stroke="black"
)
)
self._connectors.append((
(x1, middle_y), (x2, middle_y)
))
if ((x2, middle_y), end) not in self._connectors:
self._image_layers["0:connectors"].append(
self._svg.line(
start=(x2, middle_y),
end=end,
stroke="black"
)
)
self._connectors.append((
(x2, middle_y), end
))
def _draw_duplicate_person_link(self, individual):
"""Draws connectors for duplicate person"""
coords = individual.coordinate_history()
if len(coords) < 2:
logger.warn("Individual %i - %s marked as duplicate but only has %i coordinates", individual.id, individual.name, len(coords))
return
elif len(coords) > 2:
coords = itertools.combinations(coords, 2)
else:
coords = [coords]
logger.debug("Drawing duplicate person link for %s - coords: %s", individual.name, ", ".join(repr(c) for c in coords))
[self._draw_duplicate_connector(individual.sex, start, end) for (start, end) in coords]
def _draw_duplicate_connector(self, sex, start, end):
"""Draws connector between specified coordinates"""
x1 = start[0] + self._symbol_size/2
x2 = end[0] + self._symbol_size/2
y1 = start[1] + self._symbol_size/2
y2 = end[1] + self._symbol_size/2
sx, sy = start
ex, ey = end
# curve1_x = (x1 - x2) * 0.2 + x1
# curve1_y = (y1 - y2) * 0.3 + y1
# curve2_x = (x2 - x1) * 0.2 + x2
# curve2_y = (y2 - y1) * 0.3 + y2
# path = "M{0} {1} C {2} {3}, {4} {5}, {6} {7}".format(x1, y1, curve1_x, curve1_y, curve2_x, curve2_y, x2, y2)
curve_dist = 100
if y1 == y2:
curve1_x = (x1 + x2) / 2
curve1_y = y1 - curve_dist
elif x1 == x2:
curve1_x = x1 - curve_dist if sex == "M" else x1 + curve_dist
curve1_y = (y1 + y2) / 2
else:
dx = abs(x2 - x1)
dy = abs(y2 - y1)
if dy > dx:
curve1_x = min(x1, x2) + dx * 0.2
curve1_y = min(y1, y2) + dy * 0.2
elif dy < dx:
curve1_x = min(x1, x2) + dx * 0.1
curve1_y = max(y1, y2) - dy * 0.3
else:
curve1_x = min(x1, x2)
curve1_y = min(y1, y2)
path = "M{0} {1} Q {2} {3}, {4} {5}".format(x1, y1, curve1_x, curve1_y, x2, y2)
self._image_layers["-1:duplicates"].append(
self._svg.path(
d=path,
stroke="#BAFFD2",
fill="none"
)
)
if sex == "M":
self._image_layers["-1:duplicates"].append(
self._svg.rect(
(sx - self._symbol_size*0.2, sy - self._symbol_size*0.2),
(self._symbol_size*1.4, self._symbol_size*1.4),
fill="white",
stroke="#BAFFD2"
)
)
self._image_layers["-1:duplicates"].append(
self._svg.rect(
(ex - self._symbol_size*0.2, ey - self._symbol_size*0.2),
(self._symbol_size*1.4, self._symbol_size*1.4),
fill="white",
stroke="#BAFFD2"
)
)
else:
self._image_layers["-1:duplicates"].append(
self._svg.ellipse(
(sx + self._symbol_size/2, sy + self._symbol_size/2),
(self._symbol_size*1.4/2, self._symbol_size*1.4/2),
fill="white",
stroke="#BAFFD2"
)
)
self._image_layers["-1:duplicates"].append(
self._svg.ellipse(
(ex + self._symbol_size/2, ey + self._symbol_size/2),
(self._symbol_size*1.4/2, self._symbol_size*1.4/2),
fill="white",
stroke="#BAFFD2"
)
)
| 2.546875
| 3
|
examples/notebooks/test_notebooks.py
|
mjc87/SHTOOLS
| 251
|
12779326
|
#!/usr/bin/env python3
"""
This script will run all jupyter notebooks in order to test for errors.
"""
import sys
import os
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
if os.path.dirname(sys.argv[0]) != '':
os.chdir(os.path.dirname(sys.argv[0]))
notebooks = ('grids-and-coefficients.ipynb',
'localized-spectral-analysis.ipynb',
'gravity-and-magnetic-fields.ipynb',
'plotting-maps.ipynb',
'low-level-spherical-harmonic-analyses.ipynb',
'advanced-localized-spectral-analysis.ipynb',
'advanced-shcoeffs-and-shgrid-usage.ipynb',
'spherical-harmonic-normalizations.ipynb',
'advanced-shwindow-usage.ipynb',
'3d-plots.ipynb')
if sys.version_info.major == 3:
kname = 'python3'
else:
raise ('Python version {:d} not supported.'.format(sys.version_info.major))
for i in range(len(notebooks)):
with open(notebooks[i]) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=240, kernel_name=kname)
print('Processing file {:s}'.format(notebooks[i]))
ep.preprocess(nb, {'metadata': {'path': '.'}})
| 2.4375
| 2
|
test/testDenseFactorization.py
|
TtheBC01/pEigen
| 0
|
12779327
|
import unittest
import sys
sys.path.append('/pEigen/src/peigen')
import libpeigen as peigen
class DenseFactorizationTest(unittest.TestCase):
def setUp(self):
self.rows = 1000
self.cols = 1000
self.dense_matrix = peigen.denseMatrixDouble(self.rows, self.cols)
self.dense_matrix.setRandom(1)
self.factorizer = peigen.denseDecomposition(self.dense_matrix)
def test_thin_svd(self):
self.factorizer.BDCSVD()
S = self.factorizer.getSingularValues()
norm_greater_than_0 = (S.diagonal(0).norm() > 0)
U = self.factorizer.getU()
UtU = U.transpose()*U
trace = UtU.trace()
residual = (trace - UtU.rows())**2/(UtU.rows()**2)
res_less_than_eps = (residual < 1e-9)
self.assertEqual(res_less_than_eps, True)
self.assertEqual(norm_greater_than_0, True)
def test_qr_decomp(self):
self.factorizer.HouseholderQR()
Q = self.factorizer.getQ()
QtQ = Q.transpose()*Q
trace = QtQ.trace()
residual = (trace - QtQ.rows())**2/(QtQ.rows()**2)
res_less_than_eps = (residual < 1e-9)
self.assertEqual(res_less_than_eps, True)
if __name__ == '__main__':
unittest.main()
| 2.671875
| 3
|
tests/unit/asn/test_brcreateasncommand.py
|
ivcmartello/registrobrepp
| 0
|
12779328
|
<gh_stars>0
import pytest
from eppy.doc import EppResponse
from lxml import etree
from registrobrepp.asn.brcreateasncommand import BrEppCreateAsnCommand
from registrobrepp.asn.contactasn import ContactAsn
class TestBrCreateAsnCommand:
@pytest.fixture
def createasncommand(self):
number = 12345
organization = 'BR-ABCD-LACNIC'
contacts = [ContactAsn.build('fan', routing=True), ContactAsn.build('hkk')]
asIn = ['from AS2 10 accept AS1 A2']
asOut = ['to AS2 announce AS3 AS4']
command = BrEppCreateAsnCommand(number, organization, contacts, asIn, asOut)
command.add_clTRID('ABC-12345')
return command
def test_create_asn_command(self, createasncommand, asnxmlschema, createasncommandxmlexpected):
xml = createasncommand.to_xml(force_prefix=True).decode()
assert asnxmlschema.validate(etree.fromstring(xml))
assert xml == createasncommandxmlexpected
def test_create_asn_response(self, asnxmlschema, responseasncommandxmlexpected):
response = EppResponse.from_xml(responseasncommandxmlexpected,
extra_nsmap={'asn': 'urn:ietf:params:xml:ns:asn-1.0'})
xml = response.to_xml(force_prefix=True).decode()
data = response['epp']['response']['resData']['asn:creData']
assert data.number == '64500'
assert data.roid == '64500-REP'
assert data.crDate == '1999-04-03T22:00:00.0Z'
assert response['epp']['response']['trID']['clTRID'] == 'ABC-12345'
assert response['epp']['response']['trID']['svTRID'] == '54321-XYZ'
assert asnxmlschema.validate(etree.fromstring(xml))
| 1.960938
| 2
|
main.py
|
FDMZ17/log4j-test
| 0
|
12779329
|
<reponame>FDMZ17/log4j-test<gh_stars>0
from flask import Flask
app = Flask(__name__)
@app.route("/exploit.java")
def exploit():
return open("exploit.java").read()
app.run(port=8080)
| 1.953125
| 2
|
date_sorter/main.py
|
ImTheSquid/Image-Dupicate-Detector
| 2
|
12779330
|
<gh_stars>1-10
import os
import shutil
from datetime import datetime
from os.path import basename, join
from pathlib import Path
from PIL import Image, ExifTags
from PyQt5.QtCore import pyqtSignal, QThreadPool, Qt
from PyQt5.QtWidgets import QWidget, QGroupBox, QVBoxLayout, QProgressBar, QLabel, QHBoxLayout, QLineEdit, \
QFileDialog, QRadioButton, QPushButton, QMessageBox
from worker import Worker
# Checks if a directory exists, then moves the specified file to that directory
def check_exists(new_dir, file):
# Makes directories and moves files
os.makedirs(new_dir, exist_ok=True)
shutil.move(file, join(new_dir, basename(file)))
def convert_to_month(mon):
return {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August',
9: 'September', 10: 'October', 11: 'November', 12: 'December'}[mon]
class DateSorter(QWidget):
progress_signal = pyqtSignal(tuple)
files = []
def __init__(self):
super().__init__()
self.choose_dest_dir = QPushButton('Choose...')
self.start = QPushButton('Start')
self.days = QRadioButton('Years, Months, and Days')
self.months = QRadioButton('Years and Months')
self.years = QRadioButton('Years')
self.sorted_text = QLineEdit()
self.choose_dir = QPushButton('Choose...')
self.read_text = QLineEdit()
# Thread stuff
self.progress_signal.connect(self.update_progress)
self.thread_pool = QThreadPool()
self.thread_worker = Worker(self.sort_photos)
self.thread_worker.setAutoDelete(False)
self.thread_worker.signals.progress.connect(self.progress_signal)
self.thread_worker.signals.finished.connect(self.update_after_completion)
layout = QVBoxLayout()
options = QGroupBox('Options')
options.setLayout(self.setup_options())
progress = QGroupBox('Progress')
progress_layout = QVBoxLayout()
self.progress_bar = QProgressBar()
self.progress_bar.setValue(0)
self.progress_bar.setFormat('Waiting (%p%)')
progress_layout.addWidget(self.progress_bar)
progress.setLayout(progress_layout)
layout.addWidget(options)
layout.addWidget(progress)
self.setLayout(layout)
def setup_options(self):
options = QVBoxLayout()
read_dir_label = QLabel('Read Directory')
options.addWidget(read_dir_label)
read_box = QHBoxLayout()
self.read_text.textEdited.connect(self.can_start_sort)
read_box.addWidget(self.read_text)
self.choose_dir.clicked.connect(self.open_chooser)
read_box.addWidget(self.choose_dir)
options.addLayout(read_box)
dest_dir_label = QLabel('Destination Directory')
options.addWidget(dest_dir_label)
dest_box = QHBoxLayout()
self.sorted_text.textEdited.connect(self.can_start_sort)
dest_box.addWidget(self.sorted_text)
self.choose_dest_dir.clicked.connect(self.open_dest_chooser)
dest_box.addWidget(self.choose_dest_dir)
options.addLayout(dest_box)
radios = QVBoxLayout()
sort_type_label = QLabel('Sort Type')
radios.addWidget(sort_type_label)
self.years.setChecked(True)
radios.addWidget(self.years)
radios.addWidget(self.months)
radios.addWidget(self.days)
options.addLayout(radios)
options.addStretch()
warning = QLabel('WARNING: There is no check for file permissions.\n'
'If you do not have permissions to access the selected directories the program will crash.')
warning.setStyleSheet('color:#FF0000')
warning.setAlignment(Qt.AlignCenter)
self.start.setEnabled(False)
self.start.clicked.connect(self.start_sorter)
options.addWidget(warning)
options.addWidget(self.start)
return options
def can_start_sort(self):
if os.path.isdir(self.read_text.text()) and os.path.isdir(self.sorted_text.text()):
self.start.setEnabled(True)
self.progress_bar.setFormat('Ready (%p%)')
else:
self.start.setEnabled(False)
self.progress_bar.setFormat('Waiting (%p%)')
def open_chooser(self):
dialog = QFileDialog.getExistingDirectory(self, 'Open Directory', '/home')
if dialog:
self.read_text.setText(dialog)
self.can_start_sort()
def open_dest_chooser(self):
dialog = QFileDialog.getExistingDirectory(self, 'Open Directory', '/home')
if dialog:
self.sorted_text.setText(dialog)
self.can_start_sort()
def update_progress(self, val):
self.progress_bar.setValue(val[0])
def start_sorter(self):
self.read_text.setEnabled(False)
self.sorted_text.setEnabled(False)
self.start.setEnabled(False)
self.choose_dir.setEnabled(False)
self.choose_dest_dir.setEnabled(False)
self.years.setEnabled(False)
self.months.setEnabled(False)
self.files.clear()
self.find_photos()
self.thread_pool.start(self.thread_worker)
QMessageBox.information(self, 'Date Sorter', 'Sort completed successfully.')
def sort_photos(self, update):
for f in range(0, len(self.files)):
file = self.files[f]
with Image.open(file) as img:
if file.lower().endswith('.jpeg'):
exif = {ExifTags.TAGS[k]: v for k, v in img.getexif().items() if k in ExifTags.TAGS}
elif file.lower().endswith('.png'):
exif = img.text
else:
exif = img._getexif()
if exif is not None and 36867 in exif and not exif[36867][0] == '{':
date = datetime.strptime(exif[36867], '%Y:%m:%d %H:%M:%S')
self.find_dir(file, date)
else:
self.find_dir(file, None)
update.emit((f,))
def find_dir(self, file, date):
if date is None:
check_exists(self.read_text.text() + '/Not_Sortable/', file)
else:
check_exists(os.path.join(self.read_text.text(), str(date.year),
(convert_to_month(date.month)
if self.months.isChecked() or self.days.isChecked() else ''),
(str(date.day) if self.days.isChecked() else '')), file)
def update_after_completion(self):
self.progress_bar.setValue(0)
self.progress_bar.setFormat('Waiting (%p%)')
self.read_text.setEnabled(True)
self.sorted_text.setEnabled(True)
self.start.setEnabled(True)
self.choose_dir.setEnabled(True)
self.choose_dest_dir.setEnabled(True)
self.years.setEnabled(True)
self.months.setEnabled(True)
self.files.clear()
def find_photos(self):
self.progress_bar.setFormat('Sorting (%p%)')
for filename in Path(self.read_text.text()).rglob('**/*.*'):
if filename.as_posix().lower().endswith(('.png', '.jpg', '.jpeg')):
self.files.append(filename.as_posix())
self.progress_bar.setMaximum(len(self.files))
| 2.46875
| 2
|
forms/file_metadata_form.py
|
dvrpc/tmc-uploader
| 0
|
12779331
|
<gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextField, IntegerField, FloatField
class UpdateMetadataForm(FlaskForm):
title = StringField("Title")
model_id = StringField("model ID")
lat = FloatField("lat")
lng = FloatField("lng")
legs = TextField("legs")
leg_names = TextField("leg names")
movements = TextField("movements")
modes = TextField("modes")
| 2.4375
| 2
|
efb/terminal.py
|
dorpvom/efb
| 0
|
12779332
|
from typing import Optional
from prompt_toolkit import PromptSession
from prompt_toolkit import print_formatted_text as print_
from efb.validator import YesNoValidator
SESSION = PromptSession()
def make_decision(question: str, default: Optional[bool] = None) -> bool:
default_string = f'(default {"y" if default else "n"})' if default is not None else ''
while True:
answer = SESSION.prompt(f'{question} [y/n] {default_string}: ', validator=YesNoValidator())
if answer == 'y':
return True
if answer == 'n':
return False
if not answer and default is not None:
return default
print_(f'Please state your decision as y or n (not {answer}')
| 3.0625
| 3
|
pynrc/psfs.py
|
kammerje/pynrc
| 1
|
12779333
|
<reponame>kammerje/pynrc
from __future__ import absolute_import, division, print_function, unicode_literals
# The six library is useful for Python 2 and 3 compatibility
import six, os
# Import libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import datetime, time
import sys, platform
import multiprocessing as mp
import traceback
import scipy
from scipy.interpolate import griddata, RegularGridInterpolator, interp1d
from numpy.polynomial import legendre
from astropy.io import fits, ascii
import astropy.units as u
import logging
_log = logging.getLogger('pynrc')
from . import conf
from .logging_utils import setup_logging
from .nrc_utils import read_filter, S, grism_res
from .opds import opd_default, OPDFile_to_HDUList
from .maths.image_manip import frebin, pad_or_cut_to_size
from .maths.fast_poly import jl_poly_fit, jl_poly
from .maths.coords import Tel2Sci_info, NIRCam_V2V3_limits, dist_image
# Program bar
from tqdm.auto import trange, tqdm
__epsilon = np.finfo(float).eps
###########################################################################
#
# WebbPSF Stuff
#
###########################################################################
try:
import webbpsf
except ImportError:
raise ImportError('WebbPSF is not installed. pyNRC depends on its inclusion.')
# Check that minimum required version meets requirements
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
_webbpsf_version_min = (0,9,0)
_ = webbpsf.utils.get_webbpsf_data_path(_webbpsf_version_min)
from webbpsf.opds import OTE_Linear_Model_WSS
# Link to WebbPSF's instance of poppy
from webbpsf.webbpsf_core import poppy
# Set up some poppy and webbpsf defaults
# Turn off multiprocessing, which is faster now due to improved
# underlying vectorization of poppy and numpy. Swapping large
# amount of data between processes is now the bottleneck for mp (5/18/2020).
poppy.conf.use_multiprocessing = False
# Only use this if you have the FFTW C library installed
# In general, numpy fft is actually pretty fast now, so default use_fftw=False
# It also doesn't play well with multiprocessing
poppy.conf.use_fftw = False
# Make sure we can use multiprocessing!
# Apple's Accelerate framework in 2.7 doesn't work with mp
d = np.__config__.blas_opt_info
accel_bool = ('extra_link_args' in d.keys() and ('-Wl,Accelerate' in d['extra_link_args']))
if (sys.version_info < (3,4,0)) and (platform.system()=='Darwin') and accel_bool:
poppy.conf.use_multiprocessing = False
# If the machine has 2 or less CPU cores, then no mp
if mp.cpu_count()<3:
poppy.conf.use_multiprocessing = False
# n_processes will be considered the max number of processors we use for multiprocessing
poppy.conf.n_processes = int(0.75 * mp.cpu_count()) #if poppy.conf.use_multiprocessing else 1
webbpsf.conf.default_output_mode = u'detector'
###########################################################################
#
# Create WebbPSF Coefficients and Images
#
###########################################################################
# Subclass of the WebbPSF NIRCam class to modify aberrations
from webbpsf import NIRCam as webbpsf_NIRCam
class webbpsf_NIRCam_mod(webbpsf_NIRCam):
def __init__(self):
webbpsf_NIRCam.__init__(self)
# Remove limits for detector position
# Values outside of [0,2047] will get transformed to the correct V2/V3 location
@webbpsf_NIRCam.detector_position.setter
def detector_position(self, position):
try:
x, y = map(int, position)
except ValueError:
raise ValueError("Detector pixel coordinates must be a pair of numbers, not {}".format(position))
# if x < 0 or y < 0:
# raise ValueError("Detector pixel coordinates must be nonnegative integers")
# if x > self._detector_npixels - 1 or y > self._detector_npixels - 1:
# raise ValueError("The maximum allowed detector pixel coordinate value is {}".format(
# self._detector_npixels - 1))
self._detector_position = (int(position[0]), int(position[1]))
def nproc_use(fov_pix, oversample, nwavelengths, coron=False):
"""Estimate Number of Processors
Attempt to estimate a reasonable number of processors to use
for a multi-wavelength calculation. One really does not want
to end up swapping to disk with huge arrays.
NOTE: Requires ``psutil`` package. Otherwise defaults to ``mp.cpu_count() / 2``
Parameters
-----------
fov_pix : int
Square size in detector-sampled pixels of final PSF image.
oversample : int
The optical system that we will be calculating for.
nwavelengths : int
Number of wavelengths.
coron : bool
Is the nproc recommendation for coronagraphic imaging?
If so, the total RAM usage is different than for direct imaging.
"""
try:
import psutil
except ImportError:
nproc = int(mp.cpu_count() // 2)
if nproc < 1: nproc = 1
_log.info("No psutil package available, cannot estimate optimal nprocesses.")
_log.info("Returning nproc=ncpu/2={}.".format(nproc))
return nproc
mem = psutil.virtual_memory()
avail_GB = mem.available / 1024**3
# Leave 10% for other things
avail_GB *= 0.9
fov_pix_over = fov_pix * oversample
# For multiprocessing, memory accumulates into the main process
# so we have to subtract the total from the available amount
reserve_GB = nwavelengths * fov_pix_over**2 * 8 / 1024**3
# If not enough available memory, then just return nproc=1
if avail_GB < reserve_GB:
_log.warn('Not enough available memory ({} GB) to \
to hold resulting PSF info ({} GB)!'.\
format(avail_GB,reserve_GB))
return 1
avail_GB -= reserve_GB
# Memory formulas are based on fits to memory usage stats for:
# fov_arr = np.array([16,32,128,160,256,320,512,640,1024,2048])
# os_arr = np.array([1,2,4,8])
if coron: # Coronagraphic Imaging (in MB)
mem_total = (oversample*1024*2.4)**2 * 16 / (1024**2) + 500
if fov_pix > 1024: mem_total *= 1.6
else: # Direct Imaging (also spectral imaging)
mem_total = 5*(fov_pix_over)**2 * 8 / (1024**2) + 300.
# Convert to GB
mem_total /= 1024
# How many processors to split into?
nproc = int(avail_GB / mem_total)
nproc = np.min([nproc, mp.cpu_count(), poppy.conf.n_processes])
# Each PSF calculation will constantly use multiple processors
# when not oversampled, so let's divide by 2 for some time
# and memory savings on those large calculations
if oversample==1:
nproc = np.ceil(nproc / 2)
_log.debug('avail mem {}; mem tot: {}; nproc_init: {:.0f}'.\
format(avail_GB, mem_total, nproc))
nproc = np.min([nproc, nwavelengths])
# Resource optimization:
# Split iterations evenly over processors to free up minimally used processors.
# For example, if there are 5 processes only doing 1 iteration, but a single
# processor doing 2 iterations, those 5 processors (and their memory) will not
# get freed until the final processor is finished. So, to minimize the number
# of idle resources, take the total iterations and divide by two (round up),
# and that should be the final number of processors to use.
np_max = np.ceil(nwavelengths / nproc)
nproc = int(np.ceil(nwavelengths / np_max))
if nproc < 1: nproc = 1
# Multiprocessing can only swap up to 2GB of data from the child
# process to the master process. Return nproc=1 if too much data.
im_size = (fov_pix_over)**2 * 8 / (1024**3)
nproc = 1 if (im_size * np_max) >=2 else nproc
_log.debug('avail mem {}; mem tot: {}; nproc_fin: {:.0f}'.\
format(avail_GB, mem_total, nproc))
return int(nproc)
def _wrap_coeff_for_mp(args):
"""
Internal helper routine for parallelizing computations across multiple processors
for multiple WebbPSF monochromatic calculations.
args => (inst,w,fov_pix,oversample)
"""
# No multiprocessing for monochromatic wavelengths
mp_prev = poppy.conf.use_multiprocessing
poppy.conf.use_multiprocessing = False
inst,w,fov_pix,oversample = args
# fov_pix_orig = fov_pix # Does calc_psf change fov_pix??
try:
hdu_list = inst.calc_psf(fov_pixels=fov_pix, oversample=oversample, monochromatic=w*1e-6,
add_distortion=False, crop_psf=True)
# Distortions are ignored here. It's prefered do perform these later.
# See the WebbPSF functions in webbpsf.distortion
except Exception as e:
print('Caught exception in worker thread (w = {}):'.format(w))
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
print('')
#raise e
poppy.conf.use_multiprocessing = mp_prev
return None
# Return to previous setting
poppy.conf.use_multiprocessing = mp_prev
# return pad_or_cut_to_size(hdu_list[2].data, fov_pix_orig*oversample)
return hdu_list[0]
def gen_psf_coeff(filter_or_bp, pupil=None, mask=None, module='A',
fov_pix=11, oversample=None, npsf=None, ndeg=None, nproc=None,
offset_r=None, offset_theta=None, jitter=None, jitter_sigma=0.007,
tel_pupil=None, opd=None, wfe_drift=None, include_si_wfe=False,
detector=None, detector_position=None, apname=None, bar_offset=None,
force=False, save=True, save_name=None, return_save_name=False,
quick=False, return_webbpsf=False, add_distortion=False, crop_psf=True,
use_legendre=True, pynrc_mod=True, **kwargs):
"""Generate PSF coefficients
Creates a set of coefficients that will generate a simulated PSF at any
arbitrary wavelength. This function first uses ``WebbPSF`` to simulate
a number of evenly spaced PSFs throughout some specified bandpass.
An nth-degree polynomial is then fit to each oversampled pixel using
a linear-least square fitting routine. The final set of coefficients
for each pixel is returned as an image cube. The returned set of
coefficient can be used to produce a set of PSFs by:
>>> psfs = pynrc.nrc_utils.jl_poly(waves, coeffs)
where 'waves' can be a scalar, nparray, list, or tuple. All wavelengths
are in microns.
Distortions should be applied after creation of an image scene. For
NIRCam, this involves first rotating the focal plane and then
applying the distortions (see `webbpsf.distortion`).
>>> psf_rotated = distortion.apply_rotation(psf, crop=True) # apply rotation
>>> psf_distorted = distortion.apply_distortion(psf_rotated) # apply siaf distortion
Parameters
----------
filter_or_bp : str, :mod:`pysynphot.obsbandpass`
Either the name of a filter or a Pysynphot bandpass.
pupil : str, None
NIRCam pupil elements such as grisms or lyot stops.
mask : str, None
Specify the coronagraphic occulter (spots or bar).
module : str
Module 'A' or 'B'.
fov_pix : int
Size of the FoV in pixels (real SW or LW pixels)
oversample : int
Factor to oversample pixels (in one dimension).
The resulting coefficients will have x/y dimensions of
fov_pix*oversample. Default 2 for coronagraphy and 4 otherwise.
npsf : int
Number of evenly-spaced (with wavelength) monochromatic PSFs to
generate with webbPSF. If not specified, then the default is to
produce 20 PSFs/um. The wavelength range is determined by
choosing those wavelengths where throughput is >0.001.
There's a minimum of 5 monochromatic PSFs calculated over
the bandpass.
ndeg : int
Polynomial degree for PSF fitting.
Default = 10 (7 if quick=True).
offset_r : float
Radial offset from the center in arcsec.
offset_theta : float
Position angle for radial offset, in degrees CCW.
bar_offset : float
Offset along coronagraphic bar (wedge) occulter, in arcseconds.
Keeps the resulting PSF at zero tip/tilt, unlike `offset_r` and `offset_theta`.
Defaults to 0 offset. Use :func:`offset_bar` for filter-dependent offsets.
opd : str, tuple, HDUList
OPD specifications. If a tuple, then it should contain two elements
(filename, slice index). Can also specify just the filename, which
will default to the first image slice. Can also pass an HDUList
where the OPD data is stored at HDUList[0].data.
wfe_drift : float
Wavefront error drift amplitude in nm.
include_si_wfe : bool
Include SI WFE measurements? Default=False.
detector : str, None
Name of detector [NRCA1, ..., NRCA5, NRCB1, ..., NRCB5].
detector_position : tuple, None
The pixel position in (X, Y) on the detector ("science" coordinates)
tel_pupil : str, HDUList, None
Telescope entrance pupil mask.
Should either be a filename string or HDUList.
If None, then default: jwst_pupil_RevW_npix1024.fits.gz.
jitter : str or None
Currently either 'gaussian' or None.
jitter_sigma : float
If ``jitter = 'gaussian'``, then this is the size of the blurring effect.
force : bool
Forces a recalcuation of PSF even if saved PSF exists. (default: False)
save : bool
Save the resulting PSF coefficients to a file? (default: True)
save_name : str, None
Full path name of FITS file to save/load coefficents.
If None, then a name is automatically generated.
quick : bool
Only perform a fit over the filter bandpass with a smaller default
polynomial degree fit. Auto filename will have filter name appended.
return_save_name : bool
Return only the name for saving.
return_webbpsf : bool
Return the WebbPSF generated PSF rather than coefficients.
add_distortion : bool
Add instrument distortions to the PSF. Includes 2 new extensions to
the PSF HDUlist object.
crop_psf : bool
Crop distorted PSF to match undistorted pixel shape.
use_legendre : bool
Use Legendre polynomials for coefficient fitting.
"""
from .version import __version__
grism_obs = (pupil is not None) and ('GRISM' in pupil)
dhs_obs = (pupil is not None) and ('DHS' in pupil)
coron_obs = (pupil is not None) and ('LYOT' in pupil)
if oversample is None:
oversample = 2 if coron_obs else 4
if opd is None: # Default OPD
opd = opd_default
elif isinstance(opd, six.string_types):
opd = (opd, 0)
# Default WFE drift
wfe_drift = 0 if wfe_drift is None else wfe_drift
assert wfe_drift >= 0, "wfe_drift must not be negative"
# Update module in case detector is specific
if detector is not None:
module = 'A' if 'A' in detector else 'B'
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, pupil=pupil, mask=mask, module=module, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
chan_str = 'SW' if bp.avgwave() < 24000 else 'LW'
# Change log levels to WARNING for pyNRC, WebbPSF, and POPPY
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
# Create a simulated PSF with WebbPSF
inst = webbpsf_NIRCam_mod() if pynrc_mod else webbpsf.NIRCam()
# inst = webbpsf_NIRCam_mod()
#inst.options['output_mode'] = 'oversampled'
# The fov_pix keyword overrides parity
#inst.options['parity'] = 'odd'
inst.filter = filter
# Check if mask and pupil names exist in WebbPSF lists.
# We don't want to pass values that WebbPSF does not recognize
# but are otherwise completely valid in the pynrc framework.
if mask in list(inst.image_mask_list): inst.image_mask = mask
if pupil in list(inst.pupil_mask_list): inst.pupil_mask = pupil
# If WLP4 is used, always using a narrow band, so turn on quick flag
wl4_list = ['WEAK LENS +12 (=4+8)', 'WEAK LENS -4 (=4-8)', 'WEAK LENS +4',
'WLP12', 'WLM8', 'WLP4']
if (pupil in wl4_list):
quick = True
# Should we include field-dependent aberrations?
inst.include_si_wfe = include_si_wfe
# Set the SIAF aperture name
if apname is not None:
inst.auto_aperturename = False
inst.aperturename = apname
# Detector position
# define defaults
det_switch = {'SWA': 'A1', 'SWB':'B1', 'LWA':'A5', 'LWB':'B5'}
detpos_switch = {'SW':(1024,1024), 'LW':(1024,1024)}
if (detector is None) and (detector_position is None):
inst.detector = 'NRC' + det_switch.get(chan_str+module)
inst.detector_position = detpos_switch.get(chan_str)
elif detector is None:
inst.detector = 'NRC' + det_switch.get(chan_str+module)
inst.detector_position = detector_position
elif detector_position is None:
inst.detector_position = detpos_switch.get(chan_str)
inst.detector = detector
else:
inst.detector = detector
inst.detector_position = detector_position
# Print aperture and detector info
_log.debug(inst.aperturename, inst.detector, inst.detector_position)
# Telescope Pupil
if tel_pupil is not None:
inst.pupil = tel_pupil
mtemp = 'NONE' if mask is None else mask
ptemp = 'CLEAR' if pupil is None else pupil
# Get source offset positions
# 1. Round masks - Always assume theta=0 due to symmetry.
# 2. Bar Masks - PSF positioning is different depending on r and theta.
# 3. All other imaging - Just perform nominal r=theta=0.
# Any PSF movement is more quickly applied with sub-pixel shifting routines.
# NB: Implementation of field-dependent OPD maps may change these settings.
if offset_r is None: offset_r = 0
if offset_theta is None: offset_theta = 0
rtemp, ttemp = (offset_r, offset_theta)
inst.options['source_offset_r'] = rtemp
inst.options['source_offset_theta'] = ttemp
# Bar offsets (auto_offset not supported)
# If observing with bar mask, default to 0 offset
if 'B' in mtemp:
bar_offset = 0 if bar_offset is None else bar_offset
else:
# Set to None if not observing with bar mask
bar_offset = None
bar_str = '' if bar_offset is None else '_bar{:.1f}'.format(bar_offset)
inst.options['bar_offset'] = bar_offset
jitter_sigma = 0 if jitter is None else jitter_sigma
inst.options['jitter'] = jitter
inst.options['jitter_sigma'] = jitter_sigma
setup_logging(log_prev, verbose=False)
# Deal with OPD file name
#print(opd)
if isinstance(opd, tuple):
if not len(opd)==2:
raise ValueError("opd passed as tuple must have length of 2.")
# Filename info
opd_name = opd[0] # OPD file name
opd_num = opd[1] # OPD slice
rev = [s for s in opd_name.split('_') if "Rev" in s]
rev = '' if len(rev)==0 else rev[0]
otemp = '{}slice{:.0f}'.format(rev,opd_num)
opd = OPDFile_to_HDUList(opd_name, opd_num)
elif isinstance(opd, fits.HDUList):
# A custom OPD is passed. Consider using force=True.
otemp = 'OPDcustom'
opd_name = 'OPD from FITS HDUlist'
opd_num = 0
elif isinstance(opd, poppy.OpticalElement):
# OTE Linear Model
# No need to do anything
pass
else:
raise ValueError("OPD must be a string, tuple, or HDUList.")
if wfe_drift>0:
otemp = '{}-{:.0f}nm'.format(otemp,wfe_drift)
if save_name is None:
# Name to save array of oversampled coefficients
save_dir = conf.PYNRC_PATH + 'psf_coeffs/'
# Create directory if it doesn't already exist
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# Build final filename to save coeff
# Append filter name if using quick keyword
fstr = '_{}'.format(filter) if quick else ''
# fname = '{}{}{}_{}_{}'.format(chan_str,module,fstr,ptemp,mtemp)
# fname = fname + '_pix{}_os{}'.format(fov_pix,oversample)
# fname = fname + '_jsig{:.0f}_r{:.1f}_th{:.1f}'.format(jitter_sigma*1000,rtemp,ttemp)
# fname = fname + '{}_{}'.format(bar_str,otemp)
fname = '{}{}{}_{}_{}_pix{}_os{}_jsig{:.0f}_r{:.2f}_th{:+.1f}{}_{}'.\
format(chan_str,module,fstr, ptemp,mtemp,fov_pix,oversample,\
jitter_sigma*1000,rtemp,ttemp,bar_str,otemp)
# Add SI WFE tag if included
if inst.include_si_wfe:
fname = fname + '_siwfe'
if use_legendre:
fname = fname + '_legendre'
fname = fname + '.fits'
save_name = save_dir + fname
if return_save_name:
return save_name
# Load data from already saved FITS file
if os.path.exists(save_name) and (not force) and (not return_webbpsf):
#return np.load(save_name)
# return fits.getdata(save_name)
hdul = fits.open(save_name)
data = hdul[0].data.astype(np.float)
header = hdul[0].header
hdul.close()
return data, header
if return_webbpsf:
_log.info('Generating and returning WebbPSF HDUList')
else:
temp_str = 'and saving' if save else 'but not saving'
_log.info('Generating {} new PSF coefficient'.format(temp_str))
# If there is wfe_drift, create a OTE Linear Model
if (wfe_drift > 0):
_log.debug('Performing WFE drift of {}nm'.format(wfe_drift))
# OPD should already be an HDUList or OTE LM by now
# If we want more realistic time evolution, then need to use
# procedure in dev_utils/WebbPSF_OTE_LM.ipynb to create a time
# series of OPDs then pass those OPDs directly to create unique PSFs
if isinstance(opd, fits.HDUList):
hdul = opd
header = hdul[0].header
header['ORIGINAL'] = (opd_name, "Original OPD source")
header['SLICE'] = (opd_num, "Slice index of original OPD")
header['WFEDRIFT'] = (wfe_drift, "WFE drift amount [nm]")
name = 'Modified from ' + opd_name
opd = OTE_Linear_Model_WSS(name=name, opd=hdul, opd_index=opd_num, transmission=inst.pupil)
# Apply WFE drift to OTE Linear Model (Amplitude of frill drift)
inst.pupilopd = opd
inst.pupil = opd
# Split WFE drift amplitude between three processes
# 1) IEC Heaters; 2) Frill tensioning; 3) OTE Thermal perturbations
# Give IEC heaters 1 nm
wfe_iec = 1 if np.abs(wfe_drift) > 2 else 0
# Split remainder evenly between frill and OTE thermal slew
wfe_remain_var = wfe_drift**2 - wfe_iec**2
wfe_frill = np.sqrt(0.8*wfe_remain_var)
wfe_therm = np.sqrt(0.2*wfe_remain_var)
# wfe_th_frill = np.sqrt((wfe_drift**2 - wfe_iec**2) / 2)
# Negate amplitude if supplying negative wfe_drift
if wfe_drift < 0:
wfe_frill *= -1
wfe_therm *= -1
wfe_iec *= -1
# Apply IEC
opd.apply_iec_drift(wfe_iec, delay_update=True)
# Apply frill
opd.apply_frill_drift(wfe_frill, delay_update=True)
# Apply OTE thermal slew amplitude
# This is slightly different due to how thermal slews are specified
import astropy.units as u
delta_time = 14*24*60 * u.min
wfe_scale = (wfe_therm / 24)
if wfe_scale == 0:
delta_time = 0
opd.thermal_slew(delta_time, case='BOL', scaling=wfe_scale)
else:
inst.pupilopd = opd
# By default, WebbPSF has wavelength limits depending on the channel
# which can interfere with pynrc calculations, so set these to low/high values
inst.SHORT_WAVELENGTH_MIN = inst.LONG_WAVELENGTH_MIN = 1e-7
inst.SHORT_WAVELENGTH_MAX = inst.LONG_WAVELENGTH_MAX = 10e-6
# Select which wavelengths to use
# If doing a "quick" PSF, only fit the filter wavelength range.
# Otherwise, we fit the full channel wavelength.
if quick:
w1 = bp.wave.min() / 1e4
w2 = bp.wave.max() / 1e4
else:
w1, w2 = (0.5,2.5) if 'SW' in chan_str else (2.4,5.1)
# First determine polynomial fit degree
# Must ensure npsf>ndeg
if ndeg is None:
if use_legendre:
# TODO: Quantify these better
ndeg = 7 if quick else 9
else:
# TODO: Quantify these better
ndeg = 7 if quick else 9
# Create set of monochromatic PSFs to fit.
if npsf is None:
dn = 20 # 20 PSF simulations per um
npsf = np.ceil(dn * (w2-w1))
npsf = 5 if npsf<5 else int(npsf)
npsf = ndeg+1 if npsf<=ndeg else int(npsf)
waves = np.linspace(w1, w2, npsf)
# Change log levels to WARNING for pyNRC, WebbPSF, and POPPY
if return_webbpsf:
setup_logging('WARN', verbose=False)
if 'sp_norm' in list(kwargs.keys()):
sp_norm = kwargs['sp_norm']
else:
waveset = waves * 1e4
sp_flat = S.ArraySpectrum(waveset, 0*waveset + 10.)
sp_flat.name = 'Flat spectrum in flam'
# Bandpass unit response is the flux (in flam) of a star that
# produces a response of one count per second in that bandpass
sp_norm = sp_flat.renorm(bp.unit_response(), 'flam', bp)
t0 = time.time()
hdu_list = inst.calc_psf(source=sp_norm, fov_pixels=fov_pix, oversample=oversample,
add_distortion=add_distortion, crop_psf=crop_psf)
t1 = time.time()
setup_logging(log_prev, verbose=False)
time_string = 'Took {:.2f} seconds to generate WebbPSF images'.format(t1-t0)
_log.info(time_string)
# Take into account reduced beam factor for grism data
# Account for the circular pupil that does not allow all grism grooves to have their
# full length illuminated (Erickson & Rabanus 2000), effectively broadening the FWHM.
# It's actually a hexagonal pupil, so the factor is 1.07, not 1.15.
# We want to stretch the PSF in the dispersion direction
if grism_obs:
wfact = 1.07
scale = (1,wfact) if 'GRISM0' in pupil else (wfact,1)
for hdu in hdu_list:
im_scale = frebin(hdu.data, scale=scale)
hdu.data = pad_or_cut_to_size(im_scale, hdu.data.shape)
return hdu_list
# How many processors to split into?
if nproc is None:
nproc = nproc_use(fov_pix, oversample, npsf)
_log.debug('nprocessors: {}; npsf: {}'.format(nproc, npsf))
setup_logging('WARN', verbose=False)
t0 = time.time()
# Setup the multiprocessing pool and arguments to pass to each pool
worker_arguments = [(inst, wlen, fov_pix, oversample) for wlen in waves]
if nproc > 1:
pool = mp.Pool(nproc)
# Pass arguments to the helper function
try:
hdu_arr = pool.map(_wrap_coeff_for_mp, worker_arguments)
if hdu_arr[0] is None:
raise RuntimeError('Returned None values. Issue with multiprocess or WebbPSF??')
except Exception as e:
_log.error('Caught an exception during multiprocess.')
_log.error('Closing multiprocess pool.')
pool.terminate()
pool.close()
raise e
else:
_log.debug('Closing multiprocess pool.')
pool.close()
else:
# Pass arguments to the helper function
hdu_arr = []
for wa in worker_arguments:
hdu = _wrap_coeff_for_mp(wa)
if hdu is None:
raise RuntimeError('Returned None values. Issue with WebbPSF??')
hdu_arr.append(hdu)
t1 = time.time()
# Reset to original log levels
setup_logging(log_prev, verbose=False)
time_string = 'Took {:.2f} seconds to generate WebbPSF images'.format(t1-t0)
_log.info(time_string)
# Extract image data from HDU array
images = []
for hdu in hdu_arr:
images.append(hdu.data)
# Take into account reduced beam factor for grism data
# Account for the circular pupil that does not allow all grism grooves to have their
# full length illuminated (Erickson & Rabanus 2000), effectively broadening the FWHM.
# It's actually a hexagonal pupil, so the factor is 1.07, not 1.15.
# We want to stretch the PSF in the dispersion direction
if grism_obs:
wfact = 1.07
scale = (1,wfact) if 'GRISM0' in pupil else (wfact,1)
for i,im in enumerate(images):
im_scale = frebin(im, scale=scale)
images[i] = pad_or_cut_to_size(im_scale, im.shape)
# Turn results into an numpy array (npsf,ny,nx)
images = np.array(images)
# Simultaneous polynomial fits to all pixels using linear least squares
coeff_all = jl_poly_fit(waves, images, deg=ndeg, use_legendre=use_legendre, lxmap=[w1,w2])
hdu = fits.PrimaryHDU(coeff_all)
hdr = hdu.header
head_temp = hdu_arr[0].header
hdr['DESCR'] = ('PSF Coeffecients', 'File Description')
hdr['NWAVES'] = (npsf, 'Number of wavelengths used in calculation')
hdr['PUPILOPD'] = (opd_name, 'Pupil wavefront OPD source')
hdr['OPDSLICE'] = (opd_num, 'OPD slice index')
copy_keys = ['PUPILINT', 'EXTNAME', 'OVERSAMP', 'DET_SAMP',
'PIXELSCL', 'FOV', 'JITRTYPE', 'JITRSIGM',
'INSTRUME', 'CHANNEL', 'DET_NAME', 'TEL_WFE',
'DET_X', 'DET_Y', 'DET_V2', 'DET_V3',
'DATE', 'AUTHOR', 'VERSION', 'DATAVERS']
for key in copy_keys:
try:
hdr[key] = (head_temp[key], head_temp.comments[key])
except (AttributeError, KeyError):
hdr[key] = ('none', 'No key found')
# gen_psf_coeff() Keyword Values
hdr['FILTER'] = (filter, 'Filter Input')
hdr['PUPIL'] = (ptemp, 'Pupil Setting')
hdr['MASK'] = (mtemp, 'Coronagraphic Mask Setting')
hdr['MODULE'] = (module, 'NIRCam Module A or B')
hdr['FOVPIX'] = (fov_pix, 'WebbPSF pixel FoV')
hdr['OSAMP'] = (oversample, 'WebbPSF pixel oversample')
hdr['NPSF'] = (npsf, 'Number of wavelengths to calc')
hdr['NDEG'] = (ndeg, 'Polynomial fit degree')
hdr['WAVE1'] = (w1, 'First wavelength in calc')
hdr['WAVE2'] = (w2, 'Last of wavelength in calc')
hdr['LEGNDR'] = (use_legendre, 'Legendre polynomial fit?')
if tel_pupil is None:
hdr['TELPUP'] = ('None', 'Telescope pupil')
elif isinstance(tel_pupil, fits.HDUList):
hdr['TELPUP'] = ('HDUList', 'Telescope pupil')
elif isinstance(tel_pupil, six.string_types):
hdr['TELPUP'] = (tel_pupil, 'Telescope pupil')
else:
hdr['TELPUP'] = ('UNKNOWN', 'Telescope pupil')
hdr['OFFR'] = (offset_r, 'Radial offset')
hdr['OFFTH'] = (offset_theta, 'Position angle OFFR (CCW)')
if jitter is None:
hdr['JITTER'] = ('None', 'Jitter type')
else:
hdr['JITTER'] = (jitter, 'Jitter type')
hdr['JITSIG'] = (jitter_sigma, 'Jitter sigma')
if opd is None:
hdr['OPD'] = ('None', 'Telescope OPD')
elif isinstance(opd, fits.HDUList):
hdr['OPD'] = ('HDUList', 'Telescope OPD')
elif isinstance(opd, six.string_types):
hdr['OPD'] = (opd, 'Telescope OPD')
elif isinstance(opd, poppy.OpticalElement):
hdr['OPD'] = ('OTE Linear Model', 'Telescope OPD')
else:
hdr['OPD'] = ('UNKNOWN', 'Telescope OPD')
hdr['WFEDRIFT'] = (wfe_drift, "WFE drift amount [nm]")
hdr['SIWFE'] = (include_si_wfe, "Was SI WFE included?")
hdr['FORCE'] = (force, "Forced calculations?")
hdr['SAVE'] = (save, "Save file?")
hdr['FILENAME'] = (os.path.basename(save_name), "File save name")
hdr['PYNRCVER'] = (__version__, "pyNRC version")
hdr.insert('DATAVERS', '', after=True)
hdr.insert('DATAVERS', ('','psf_coeff() Keyword Values'), after=True)
hdr.insert('DATAVERS', '', after=True)
hdr.add_history(time_string)
if save:
#np.save(save_name, coeff_all)
hdu.writeto(save_name, overwrite=True)
return coeff_all, hdr
def gen_webbpsf_psf(filter_or_bp, pupil=None, mask=None, module='A',
fov_pix=11, oversample=None, tel_pupil=None, opd=None,
wfe_drift=None, drift_file=None, include_si_wfe=False,
offset_r=None, offset_theta=None, jitter=None, jitter_sigma=0.007,
detector=None, detector_position=None, apname=None, bar_offset=None,
add_distortion=False, crop_psf=True, pynrc_mod=True, **kwargs):
"""Create WebbPSF PSF
Kind of clunky way of generating a single PSF directly from WebbPSF
by passing all the different options through keyword arguments.
"""
return gen_psf_coeff(filter_or_bp, pupil=pupil, mask=mask, module=module,
fov_pix=fov_pix, oversample=oversample, tel_pupil=tel_pupil, opd=opd,
wfe_drift=wfe_drift, drift_file=drift_file, include_si_wfe=include_si_wfe,
offset_r=offset_r, offset_theta=offset_theta, jitter=jitter, jitter_sigma=jitter_sigma,
detector=detector, detector_position=detector_position, apname=apname, bar_offset=bar_offset,
add_distortion=add_distortion, crop_psf=crop_psf, pynrc_mod=pynrc_mod,
return_webbpsf=True, **kwargs)
def gen_webbpsf_siwfe(filter_or_bp, coords, pynrc_mod=True, **kwargs):
""" Generate Location-specific PSF from WebbPSF
Parameters
----------
filter_or_bp : str, :mod:`pysynphot.obsbandpass`
Either the name of a filter or a Pysynphot bandpass.
coords : tuple
(V2,V3) coordinates in (arcmin)
Keyword Args
------------
pynrc_mod : bool
Use `webbpsf_NIRCam_mod` instead of `webbpsf.NIRCam`.
This is a slightly modified version of NIRCam to fix
minor coordinate issues.
"""
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
chan_str = 'SW' if bp.avgwave() < 24000 else 'LW'
coords_asec = np.array(coords)*60
detector, detector_position, apname = Tel2Sci_info(chan_str, coords_asec, output='sci', return_apname=True, **kwargs)
print(detector, detector_position, apname)
kwargs['include_si_wfe'] = True
kwargs['apname'] = apname
kwargs['detector'] = detector
kwargs['detector_position'] = detector_position
kwargs['add_distortion'] = False
return gen_webbpsf_psf(filter, pynrc_mod=pynrc_mod, **kwargs)
def _wrap_wfed_coeff_for_mp(arg):
args, kwargs = arg
wfe = kwargs['wfe_drift']
# print('WFE Drift: {} nm'.format(wfe))
cf, _ = gen_psf_coeff(*args, **kwargs)
return cf
def wfed_coeff(filter_or_bp, force=False, save=True, save_name=None, nsplit=None, **kwargs):
"""PSF Coefficient Mod for WFE Drift
This function finds a relationship between PSF coefficients
in the presence of WFE drift. For a series of WFE drift values,
we generate corresponding PSF coefficients and fit a polynomial
relationship to the residual values. This allows us to quickly
modify a nominal set of PSF image coefficients to generate a
new PSF where the WFE has drifted by some amplitude.
Keyword Arguments match those in :func:`gen_psf_coeff`.
Parameters
----------
filter_or_bp : str
Name of a filter.
force : bool
Forces a recalcuation of coefficients even if saved
PSF already exists. (default: False)
save : bool
Save the resulting WFE drift coefficents to a file?
(default: True)
save_name : str, None
Full path name of save file (.npy) to save/load.
If None, then a name is automatically generated,
matching the :func:`gen_psf_coeff` function.
nsplit : int
Number of processors to split over. There are checks to
make sure you're not requesting more processors than the
current machine has available.
Example
-------
Generate PSF coefficient, WFE drift modifications, then
create an undrifted and drifted PSF. (pseudo-code)
>>> fpix, osamp = (128, 4)
>>> coeff, _ = gen_psf_coeff('F210M', fov_pix=fpix, oversample=osamp)
>>> wfe_cf = wfed_coeff('F210M', fov_pix=fpix, oversample=osamp)
>>> psf0 = gen_image_coeff('F210M', coeff=coeff, fov_pix=fpix, oversample=osamp)
>>> # Drift the coefficients
>>> wfe_drift = 5 # nm
>>> cf_fit = wfe_cf.reshape([wfe_cf.shape[0], -1])
>>> cf_mod = jl_poly(np.array([wfe_drift]), cf_fit).reshape(coeff.shape)
>>> cf_new = coeff + cf_mod
>>> psf5nm = gen_image_coeff('F210M', coeff=cf_new, fov_pix=fpix, oversample=osamp)
"""
kwargs['force'] = True
kwargs['save'] = False
kwargs['save_name'] = None
#kwargs['opd'] = opd_default
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
# defaults
fov_pix = kwargs['fov_pix'] if 'fov_pix' in list(kwargs.keys()) else 33
oversample = kwargs['oversample'] if 'oversample' in list(kwargs.keys()) else 4
# Final filename to save coeff
if save_name is None:
# Name to save array of oversampled coefficients
save_dir = conf.PYNRC_PATH + 'psf_coeffs/'
# Create directory if it doesn't already exist
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# Final filename to save coeff
save_name = gen_psf_coeff(bp, return_save_name=True, **kwargs)
save_name = os.path.splitext(save_name)[0] + '_wfedrift.npz'
# Load file if it already exists
if (not force) and os.path.exists(save_name):
out = np.load(save_name)
return out['arr_0'], out['arr_1']
_log.warn('Generating WFE Drift coefficients. This may take some time...')
# _log.warn('{}'.format(save_name))
# Cycle through WFE drifts for fitting
wfe_list = np.array([0,1,2,5,10,20,40])
npos = len(wfe_list)
# Split over multiple processors?
nsplit_max = nproc_use(fov_pix, oversample, npos)#, coron=coron_obs)
if nsplit is None:
pupil = kwargs['pupil'] if 'pupil' in list(kwargs.keys()) else None
coron_obs = (pupil is not None) and ('LYOT' in pupil)
nsplit = nproc_use(fov_pix, oversample, npos)#, coron=coron_obs)
# Compare to number of PSFs
if ('quick' in list(kwargs.keys())) and (kwargs['quick']==True):
w1 = bp.wave.min() / 1e4
w2 = bp.wave.max() / 1e4
dw = w2 - w1
else:
dw = 2.5
npsf = np.ceil(20 * dw)
npsf = 5 if npsf<5 else int(npsf)
nsplit_psf = nproc_use(fov_pix, oversample, npsf)#, coron=coron_obs)
if nsplit_psf > nsplit:
nsplit = 1
# Double check we're not requesting too many processors
nsplit = nsplit_max if nsplit > nsplit_max else nsplit
# Create worker arguments with kwargs as an argument input
worker_args = []
args = [bp]
for wfe in wfe_list:
kw = kwargs.copy()
kw['wfe_drift'] = wfe
worker_args.append((args, kw))
if nsplit>1:
# If splitting, then cannot split in subprocess for each PSF (ie., across wavelengths)
poppy_nproc_prev = poppy.conf.n_processes
poppy.conf.n_processes = 1
cf_wfe = []
# pool = mp.Pool(nsplit)
try:
# cf_wfe = pool.map(_wrap_wfed_coeff_for_mp, worker_args)
with mp.Pool(nsplit) as pool:
for res in tqdm(pool.imap_unordered(_wrap_wfed_coeff_for_mp, worker_args), total=npos):
cf_wfe.append(res)
pool.close()
if cf_wfe[0] is None:
raise RuntimeError('Returned None values. Issue with multiprocess or WebbPSF??')
except Exception as e:
_log.error('Caught an exception during multiprocess.')
_log.error('Closing multiprocess pool.')
pool.terminate()
pool.close()
poppy.conf.n_processes = poppy_nproc_prev
raise e
else:
_log.debug('Closing multiprocess pool.')
# pool.close()
poppy.conf.n_processes = poppy_nproc_prev
else:
# No multiprocessor
cf_wfe = []
for wa in tqdm(worker_args):
cf = _wrap_wfed_coeff_for_mp(wa)
cf_wfe.append(cf)
# cf_wfe = [_wrap_wfed_coeff_for_mp(wa) for wa in worker_args]
# Get residuals
cf_wfe = np.array(cf_wfe) - cf_wfe[0]
# Fit each pixel with a polynomial and save the coefficient
cf_shape = cf_wfe.shape[1:]
cf_wfe = cf_wfe.reshape([npos, -1])
lxmap = np.array([np.min(wfe_list), np.max(wfe_list)])
cf_fit = jl_poly_fit(wfe_list, cf_wfe, deg=4, use_legendre=True, lxmap=lxmap)
cf_fit = cf_fit.reshape([-1, cf_shape[0], cf_shape[1], cf_shape[2]])
if save:
np.savez(save_name, cf_fit, lxmap)
_log.info('Done.')
return cf_fit, lxmap
def _wrap_field_coeff_for_mp(arg):
args, kwargs = arg
apname = kwargs['apname']
det = kwargs['detector']
det_pos = kwargs['detector_position']
v2, v3 = kwargs['coords']
_log.info('V2/V3 Coordinates and det pixel (sci) on {}/{}: ({:.2f}, {:.2f}), ({:.1f}, {:.1f})'
.format(det, apname, v2/60, v3/60, det_pos[0], det_pos[1]))
cf, _ = gen_psf_coeff(*args, **kwargs)
return cf
def field_coeff_resid(filter_or_bp, coeff0, force=False, save=True, save_name=None,
return_raw=False, nsplit=None, **kwargs):
"""PSF Coefficient Residuals w.r.t. Field Position
Keyword Arguments match those in :func:`gen_psf_coeff`.
Parameters
----------
filter_or_bp : str
Name of a filter.
ceoff0 : ndarray
PSF coefficient to perform relative comparison.
force : bool
Forces a recalcuation of coefficients even if saved
PSF already exists. (default: False)
save : bool
Save the resulting WFE drift coefficents to a file?
(default: True)
save_name : str, None
Full path name of save file (.npy) to save/load.
If None, then a name is automatically generated,
matching the :func:`gen_psf_coeff` function.
nsplit : int
Number of processors to split over. There are checks to
make sure you're not requesting more processors than the
current machine has available.
return_raw : bool
Return PSF coefficients of unevenly sampled V2/V3 grid
along with the V2/V3 coordinates (cf_resid, v2_all, v3_all).
Example
-------
Generate PSF coefficient, field position modifications, then
create a PSF at some (V2,V3) location. (pseudo-code)
>>> fpix, osamp = (128, 4)
>>> coeff, _ = gen_psf_coeff('F210M', fov_pix=fpix, oversample=osamp)
>>> cf_resid = field_coeff_resid('F210M', coeff, fov_pix=fpix, oversample=osamp)
>>> # Some (V2,V3) location (arcmin)
>>> v2, v3 = (1.2, -7)
>>> cf_mod = field_model(v2, v3, cf_resid)
>>> cf_new = coeff + cf_mod
>>> psf = gen_image_coeff('F210M', coeff=cf_new, fov_pix=fpix, oversample=osamp)
"""
from astropy.table import Table
kwargs['force'] = True
kwargs['save'] = False
kwargs['save_name'] = None
if return_raw:
save = False
force = True
_log.warn("return_raw=True; Setting 'save=False' and 'force=True'")
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
channel = 'SW' if bp.avgwave() < 24000 else 'LW'
# Set a default fov_pix and oversample
fov_pix = kwargs['fov_pix'] if 'fov_pix' in list(kwargs.keys()) else 33
oversample = kwargs['oversample'] if 'oversample' in list(kwargs.keys()) else 4
# Cycle through a list of field points
# These are the measured CV3 field positions
module = kwargs.get('module', 'A') # If not specified, choose 'A'
kwargs['module'] = module
# Check if coronagraphy
pupil = kwargs.get('pupil', 'CLEAR') # If not specified, choose 'CLEAR'
kwargs['pupil'] = pupil
# Read in measured SI Zernike data
if (pupil is not None) and ('LYOT' in pupil):
zfile = 'si_zernikes_coron_wfe.fits'
if module=='B':
raise NotImplementedError("There are no Full Frame SIAF apertures defined for Mod B coronagraphy")
else:
zfile = 'si_zernikes_isim_cv3.fits'
data_dir = webbpsf.utils.get_webbpsf_data_path() + '/'
zernike_file = data_dir + zfile
ztable_full = Table.read(zernike_file)
mod = channel + module
ind_nrc = ['NIRCam'+mod in row['instrument'] for row in ztable_full]
ind_nrc = np.where(ind_nrc)
v2_all = np.array(ztable_full[ind_nrc]['V2'].tolist())
v3_all = np.array(ztable_full[ind_nrc]['V3'].tolist())
# Add detector corners
v2_min, v2_max, v3_min, v3_max = NIRCam_V2V3_limits(module, channel=channel, pupil=pupil, rederive=True, border=1)
igood = v3_all > v3_min
v2_all = np.append(v2_all[igood], [v2_min, v2_max, v2_min, v2_max])
v3_all = np.append(v3_all[igood], [v3_min, v3_min, v3_max, v3_max])
npos = len(v2_all)
# First is default value
#kwargs['detector'] = None
#kwargs['detector_position'] = None
#kwargs['include_si_wfe'] = False
#cf0 = gen_psf_coeff(filter, **kwargs)
kwargs['include_si_wfe'] = True
# Final filename to save coeff
if save_name is None:
# Name to save array of oversampled coefficients
save_dir = conf.PYNRC_PATH + 'psf_coeffs/'
# Create directory if it doesn't already exist
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# Final filename to save coeff
save_name = gen_psf_coeff(bp, return_save_name=True, **kwargs)
save_name = os.path.splitext(save_name)[0] + '_cffields.npz'
# Load file if it already exists
if (not force) and os.path.exists(save_name):
out = np.load(save_name)
return out['arr_0'], out['arr_1'], out['arr_2']
_log.warn('Generating field-dependent coefficients. This may take some time...')
# Split over multiple processors?
nsplit_max = nproc_use(fov_pix, oversample, npos)#, coron=coron_obs)
if nsplit is None:
pupil = kwargs['pupil'] if 'pupil' in list(kwargs.keys()) else None
coron_obs = (pupil is not None) and ('LYOT' in pupil)
nsplit = nproc_use(fov_pix, oversample, npos)#, coron=coron_obs)
# Compare to number of PSFs
if ('quick' in list(kwargs.keys())) and (kwargs['quick']==True):
w1 = bp.wave.min() / 1e4
w2 = bp.wave.max() / 1e4
dw = w2 - w1
else:
dw = 2.5
npsf = np.ceil(20 * dw)
npsf = 5 if npsf<5 else int(npsf)
nsplit_psf = nproc_use(fov_pix, oversample, npsf)#, coron=coron_obs)
if (nsplit <= 2) and (nsplit_psf > nsplit):
nsplit = 1
# Double check we're not requesting too many processors
nsplit = nsplit_max if nsplit > nsplit_max else nsplit
# Create worker arguments with kwargs as an input dict
worker_args = []
args = [filter]
for (v2, v3) in zip(v2_all, v3_all):
# Get the detector and pixel position
coords = (v2*60, v3*60) # in arcsec
det, det_pos, apname = Tel2Sci_info(channel, coords, pupil=pupil, output="sci", return_apname=True)
kw = kwargs.copy()
kw['apname'] = apname
kw['detector'] = det
kw['detector_position'] = det_pos
kw['coords'] = coords
worker_args.append((args, kw))
# Multiprocessing?
if nsplit > 1:
# If splitting, then cannot split in subprocess for each PSF (ie., across wavelengths)
poppy_nproc_prev = poppy.conf.n_processes
poppy.conf.n_processes = 1
cf_fields = []
ntot = len(worker_args)
# pool = mp.Pool(nsplit)
try:
with mp.Pool(nsplit) as pool:
for res in tqdm(pool.imap_unordered(_wrap_field_coeff_for_mp, worker_args), total=ntot):
cf_fields.append(res)
pool.close()
# cf_fields = pool.map(_wrap_field_coeff_for_mp, worker_args)
if cf_fields[0] is None:
raise RuntimeError('Returned None values. Issue with multiprocess or WebbPSF??')
except Exception as e:
_log.error('Caught an exception during multiprocess.')
_log.error('Closing multiprocess pool.')
pool.terminate()
pool.close()
poppy.conf.n_processes = poppy_nproc_prev
raise e
else:
_log.debug('Closing multiprocess pool.')
# pool.close()
poppy.conf.n_processes = poppy_nproc_prev
else: # No multiprocessor
cf_fields = []
for wa in tqdm(worker_args):
cf = _wrap_field_coeff_for_mp(wa)
cf_fields.append(cf)
# cf_fields = [_wrap_field_coeff_for_mp(wa) for wa in worker_args]
# Get residuals
cf_fields_resid = np.array(cf_fields) - coeff0
if return_raw:
return cf_fields_resid, v2_all, v3_all
# Create an evenly spaced grid of V2/V3 coordinates
nv23 = 8
v2grid = np.linspace(v2_min, v2_max, num=nv23)
v3grid = np.linspace(v3_min, v3_max, num=nv23)
# Interpolate onto an evenly space grid
res = make_coeff_resid_grid(v2_all, v3_all, cf_fields_resid, v2grid, v3grid)
if save:
np.savez(save_name, *res)
_log.warn('Done.')
return res
def make_coeff_resid_grid(xin, yin, cf_resid, xgrid, ygrid):
# Create 2D grid arrays of coordinates
xnew, ynew = np.meshgrid(xgrid,ygrid)
nx, ny = len(xgrid), len(ygrid)
_log.warn("Interpolating coefficient residuals onto regular grid...")
sh = cf_resid.shape
cf_resid_grid = np.zeros([ny,nx,sh[1],sh[2],sh[3]])
# Cycle through each coefficient to interpolate onto V2/V3 grid
for i in range(sh[1]):
cf_resid_grid[:,:,i,:,:] = griddata((xin, yin), cf_resid[:,i,:,:], (xnew, ynew), method='cubic')
return (cf_resid_grid, xgrid, ygrid)
def field_coeff_func(v2grid, v3grid, cf_fields, v2_new, v3_new):
"""Interpolation function for PSF coefficient residuals
Uses RegularGridInterpolator to quickly determine new coefficient
residulas at specified points.
Parameters
----------
v2grid : ndarray
V2 values corresponding to `cf_fields`.
v3grid : ndarray
V3 values corresponding to `cf_fields`.
cf_fields : ndarray
Coefficient residuals at different field points
Shape is (nV3, nV2, ncoeff, ypix, xpix)
v2_new : ndarray
New V2 point(s) to interpolate on. Same units as v2grid.
v3_new : ndarray
New V3 point(s) to interpolate on. Same units as v3grid.
"""
func = RegularGridInterpolator((v3grid, v2grid), cf_fields, method='linear',
bounds_error=False, fill_value=None)
pts = np.array([v3_new,v2_new]).transpose()
# If only 1 point, remove first axes
res = func(pts)
res = res.squeeze() if res.shape[0]==1 else res
return res
def wedge_coeff(filter, pupil, mask, force=False, save=True, save_name=None, **kwargs):
"""PSF Coefficient Mod w.r.t. Wedge Coronagraph Location
Keyword Arguments match those in :func:`gen_psf_coeff`.
Parameters
----------
filter : str
Name of a filter.
force : bool
Forces a recalcuation of coefficients even if saved
PSF already exists. (default: False)
save : bool
Save the resulting WFE drift coefficents to a file?
(default: True)
save_name : str, None
Full path name of save file (.npy) to save/load.
If None, then a name is automatically generated,
matching the :func:`gen_psf_coeff` function.
Example
-------
Generate PSF coefficient at bar_offset=0, generate position modifications,
then use these results to create a PSF at some arbitrary offset location.
(pseudo-code)
>>> fpix, osamp = (320, 2)
>>> filt, pupil, mask = ('F430M', 'WEDGELYOT', 'MASKLWB')
>>> coeff = gen_psf_coeff(filt, pupil, mask, fov_pix=fpix, oversample=osamp)
>>> cf_resid = wedge_coeff(filt, pupil, mask, fov_pix=fpix, oversample=osamp)
>>> # The narrow location (arcsec)
>>> bar_offset = 8
>>> cf_fit = cf_resid.reshape([cf_resid.shape[0], -1])
>>> cf_mod = jl_poly(np.array([bar_offset]), cf_fit).reshape(coeff.shape)
>>> cf_new = coeff + cf_mod
>>> psf = gen_image_coeff(filt, pupil, mask, coeff=cf_new, fov_pix=fpix, oversample=osamp)
"""
kwargs['force'] = True
kwargs['save'] = False
kwargs['save_name'] = None
kwargs['pupil'] = pupil
kwargs['mask'] = mask
module = kwargs.get('module', 'A')
kwargs['module'] = module
# Get filter throughput and create bandpass
bp = read_filter(filter)
channel = 'SW' if bp.avgwave() < 24000 else 'LW'
# Final filename to save coeff
if save_name is None:
# Name to save array of oversampled coefficients
save_dir = conf.PYNRC_PATH + 'psf_coeffs/'
# Create directory if it doesn't already exist
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# Final filename to save coeff
save_name = gen_psf_coeff(filter, return_save_name=True, **kwargs)
save_name = os.path.splitext(save_name)[0] + '_wedge.npy'
# Load file if it already exists
if (not force) and os.path.exists(save_name):
return np.load(save_name)
_log.warn('Generating wedge field-dependent coefficients. This may take some time...')
# Cycle through a list of bar offset locations
values = np.arange(-8,8,1)
nvals = len(values)
# First is default value
kwargs['include_si_wfe'] = False
cf0, _ = gen_psf_coeff(filter, bar_offset=0, **kwargs)
cf_offset = []
for val in tqdm(values):
_log.debug('Bar Offset: {:.1f} arcsec'.format(val))
kwargs['bar_offset'] = val
cf, _ = gen_psf_coeff(filter, **kwargs)
cf_offset.append(cf)
cf_offset = np.array(cf_offset)
# Get residuals
cf_offset -= cf0
# Fit each pixel with a polynomial and save the coefficient
cf_offset = cf_offset.reshape([nvals, -1])
cf_fit = jl_poly_fit(values, cf_offset, 4)
cf_fit = cf_fit.reshape([-1, cf.shape[0], cf.shape[1], cf.shape[2]])
if save:
np.save(save_name, cf_fit)
_log.warn('Done.')
return cf_fit
def gen_image_from_coeff(coeff, coeff_hdr, **kwargs):
"""Generate PSF from coefficient
Wrapper for :func:`gen_image_coeff` that uses information in the header to
populate certain input parameters (filter, mask, pupil, fov_pix, oversample)
so as to avoid any confusion.
"""
kwargs['pupil'] = None if 'NONE' in coeff_hdr['PUPIL'] else coeff_hdr['PUPIL']
kwargs['mask'] = coeff_hdr['MASK']
kwargs['module'] = coeff_hdr['MODULE']
kwargs['fov_pix'] = coeff_hdr['FOVPIX']
kwargs['oversample'] = coeff_hdr['OSAMP']
kwargs['coeff'] = coeff
kwargs['coeff_hdr'] = coeff_hdr
return gen_image_coeff(coeff_hdr['FILTER'], **kwargs)
def gen_image_coeff(filter_or_bp, pupil=None, mask=None, module='A',
coeff=None, coeff_hdr=None, sp_norm=None, nwaves=None,
fov_pix=11, oversample=4, return_oversample=False, use_sp_waveset=False,
**kwargs):
"""Generate PSF
Create an image (direct, coronagraphic, grism, or DHS) based on a set of
instrument parameters and PSF coefficients. The image is noiseless and
doesn't take into account any non-linearity or saturation effects, but is
convolved with the instrument throughput. Pixel values are in counts/sec.
The result is effectively an idealized slope image.
If no spectral dispersers (grisms or DHS), then this returns a single
image or list of images if sp_norm is a list of spectra.
Parameters
----------
filter_or_bp : str, :mod:`pysynphot.obsbandpass`
Either the name of a filter or a Pysynphot bandpass.
pupil : str, None
NIRCam pupil elements such as grisms or lyot stops.
mask : str, None
Specify the coronagraphic occulter (spots or bar).
module : str
Module 'A' or 'B'.
sp_norm : :mod:`pysynphot.spectrum`
A normalized Pysynphot spectrum to generate image. If not specified,
the default is flat in phot lam (equal number of photons per spectral bin).
The default is normalized to produce 1 count/sec within that bandpass,
assuming the telescope collecting area. Coronagraphic PSFs will further
decrease this flux.
coeff : ndarray
A cube of polynomial coefficients for generating PSFs. This is
generally oversampled with a shape (fov_pix*oversamp, fov_pix*oversamp, deg).
If not set, this will be calculated using the :func:`gen_psf_coeff` function.
coeff_hdr : FITS header
Header information saved while generating coefficients.
nwaves : int
Option to specify the number of evenly spaced wavelength bins to
generate and sum over to make final PSF. Useful for wide band filters
with large PSFs over continuum source.
use_sp_waveset : bool
Set this option to use `sp_norm` waveset instead of bandpass waveset.
Useful if user inputs a high-resolution spectrum with line emissions,
so may wants to keep a grism PSF (for instance) at native resolution
rather than blurred with the bandpass waveset. TODO: Test.
fov_pix : int
Number of detector pixels in the image coefficient and PSF.
oversample : int
Factor of oversampling of detector pixels.
return_oversample: bool
If True, then also returns the oversampled version of the PSF.
Keyword Args
------------
grism_order : int
Grism spectral order (default=1).
npsf : int
Number of evenly-spaced (with wavelength) monochromatic PSFs to
generate with webbPSF. If not specified, then the default is to
produce 20 PSFs/um. The wavelength range is determined by
choosing those wavelengths where throughput is >0.001.
ndeg : int
Polynomial degree for PSF fitting.
read_filter - ND_acq
ND_acq : bool
ND acquisition square in coronagraphic mask.
"""
is_grism = ((pupil is not None) and ('GRISM' in pupil))
is_dhs = ((pupil is not None) and ('DHS' in pupil))
if is_dhs:
raise NotImplementedError('DHS has yet to be fully included')
t0 = time.time()
# Get filter throughput and create bandpass
if isinstance(filter_or_bp, six.string_types):
filter = filter_or_bp
bp = read_filter(filter, pupil=pupil, mask=mask, module=module, **kwargs)
else:
bp = filter_or_bp
filter = bp.name
if (coeff is not None) and (coeff_hdr is not None):
fov_pix = coeff_hdr['FOVPIX']
oversample = coeff_hdr['OSAMP']
module = coeff_hdr['MODULE']
elif (coeff is None) and (coeff_hdr is not None):
raise AttributeError("`coeff_hdr` parameter set, but `coeff` is None")
elif ((coeff is not None) and (coeff_hdr is None)):
raise AttributeError("`coeff` parameter set, but `coeff_hdr` is None")
else:
coeff, coeff_hdr = gen_psf_coeff(bp, pupil=pupil, mask=mask, module=module,
fov_pix=fov_pix, oversample=oversample, **kwargs)
t1 = time.time()
waveset = np.copy(bp.wave)
if nwaves is not None:
# Evenly spaced waves
waveset = np.linspace(waveset.min(), waveset.max(), nwaves)
elif not (is_grism or is_dhs):
# For generating the PSF, let's save some time and memory by not using
# ever single wavelength in the bandpass.
# Do NOT do this for dispersed modes.
binsize = 1
if coeff.shape[-1]>2000:
binsize = 7
elif coeff.shape[-1]>1000:
binsize = 5
elif coeff.shape[-1]>700:
binsize = 3
if binsize>1:
excess = waveset.size % binsize
waveset = waveset[:waveset.size-excess]
waveset = waveset.reshape(-1,binsize) # Reshape
waveset = waveset[:,binsize//2] # Use the middle values
waveset = np.concatenate(([bp.wave[0]],waveset,[bp.wave[-1]]))
wgood = waveset / 1e4
w1 = wgood.min()
w2 = wgood.max()
wrange = w2 - w1
# print('nwaves: {}'.format(len(wgood)))
t2 = time.time()
# Flat spectrum with equal photon flux in each spectal bin
if sp_norm is None:
sp_flat = S.ArraySpectrum(waveset, 0*waveset + 10.)
sp_flat.name = 'Flat spectrum in flam'
# Bandpass unit response is the flux (in flam) of a star that
# produces a response of one count per second in that bandpass
sp_norm = sp_flat.renorm(bp.unit_response(), 'flam', bp)
# Make sp_norm a list of spectral objects if it already isn't
if not isinstance(sp_norm, list):
sp_norm = [sp_norm]
nspec = len(sp_norm)
t3 = time.time()
# Set up an observation of the spectrum using the specified bandpass
if use_sp_waveset:
if nspec>1:
raise AttributeError("Only 1 spectrum allowed when use_sp_waveset=True.")
# Modify waveset if use_sp_waveset=True
obs_list = []
for sp in sp_norm:
# Select only wavelengths within bandpass
waveset = sp.wave
waveset = waveset[(waveset>=w1*1e4) and (waveset<=w2*1e4)]
obs_list.append(S.Observation(sp, bp, binset=waveset))
# Update wgood
wgood = waveset / 1e4
w1 = wgood.min()
w2 = wgood.max()
wrange = w2 - w1
else:
# Use the bandpass wavelength set to bin the fluxes
obs_list = [S.Observation(sp, bp, binset=waveset) for sp in sp_norm]
# Convert to count rate
for obs in obs_list:
obs.convert('counts')
t4 = time.time()
# Create a PSF for each wgood wavelength
use_legendre = True if coeff_hdr['LEGNDR'] else False
lxmap = [coeff_hdr['WAVE1'], coeff_hdr['WAVE2']]
psf_fit = jl_poly(wgood, coeff, dim_reorder=False, use_legendre=use_legendre, lxmap=lxmap)
# Just in case weird coeff gives negative values
# psf_fit[psf_fit<=0] = np.min(psf_fit[psf_fit>0]) / 10
t5 = time.time()
# Multiply each monochromatic PSFs by the binned e/sec at each wavelength
# Array broadcasting: [nx,ny,nwave] x [1,1,nwave]
# Do this for each spectrum/observation
if nspec==1:
psf_fit *= obs_list[0].binflux.reshape([-1,1,1])
psf_list = [psf_fit]
else:
psf_list = [psf_fit*obs.binflux.reshape([-1,1,1]) for obs in obs_list]
del psf_fit
# The number of pixels to span spatially
fov_pix = int(fov_pix)
oversample = int(oversample)
fov_pix_over = int(fov_pix * oversample)
t6 = time.time()
# Grism spectroscopy
if is_grism:
# spectral resolution in um/pixel
# res is in pixels per um and dw is inverse
grism_order = kwargs['grism_order'] if ('grism_order' in kwargs.keys()) else 1
res, dw = grism_res(pupil, module, grism_order)
# Number of real pixels that spectra will span
npix_spec = int(wrange // dw + 1 + fov_pix)
npix_spec_over = int(npix_spec * oversample)
spec_list = []
spec_list_over = []
for psf_fit in psf_list:
# If GRISM90 (along columns) rotate image by 90 deg CW
if 'GRISM90' in pupil:
psf_fit = np.rot90(psf_fit, k=1)
elif module=='B': # Flip right to left to disperse in correct orientation
psf_fit = psf_fit[:,:,::-1]
# Create oversampled spectral image
spec_over = np.zeros([fov_pix_over, npix_spec_over])
# Place each PSF at its dispersed location
for i, w in enumerate(wgood):
# Separate shift into an integer and fractional shift
delx = oversample * (w-w1) / dw # Number of oversampled pixels to shift
intx = int(delx)
fracx = delx - intx
if fracx < 0:
fracx = fracx + 1
intx = intx - 1
# spec_over[:,intx:intx+fov_pix_over] += fshift(psf_fit[i], fracx)
im = psf_fit[i]
spec_over[:,intx:intx+fov_pix_over] += im*(1.-fracx) + np.roll(im,1,axis=1)*fracx
spec_over[spec_over<__epsilon] = 0 #__epsilon
# Rotate spectrum to its V2/V3 coordinates
spec_bin = poppy.utils.krebin(spec_over, (fov_pix,npix_spec))
if 'GRISM90' in pupil: # Rotate image 90 deg CCW
spec_over = np.rot90(spec_over, k=-1)
spec_bin = np.rot90(spec_bin, k=-1)
elif module=='B': # Flip right to left for sci coords
spec_over = spec_over[:,::-1]
spec_bin = spec_bin[:,::-1]
# Rebin ovesampled spectral image to real pixels
spec_list.append(spec_bin)
spec_list_over.append(spec_over)
# Wavelength solutions
dw_over = dw/oversample
w1_spec = w1 - dw_over*fov_pix_over/2
wspec_over = np.arange(npix_spec_over)*dw_over + w1_spec
wspec = wspec_over.reshape((npix_spec,-1)).mean(axis=1)
if ('GRISM0' in pupil) and (module=='B'): # Flip for sci coords
wspec = wspec[::-1]
if nspec == 1:
spec_list = spec_list[0]
spec_list_over = spec_list_over[0]
# Return list of wavelengths for each horizontal pixel
# as well as spectral image
t7 = time.time()
_log.debug('jl_poly: {:.2f} sec; binflux: {:.2f} sec; disperse: {:.2f} sec'.format(t5-t4, t6-t5, t7-t6))
if return_oversample:
return (wspec, spec_list), (wspec_over, spec_list_over)
else:
return (wspec, spec_list)
# DHS spectroscopy
elif is_dhs:
raise NotImplementedError('DHS has yet to be fully included')
# Imaging
else:
# Create source image slopes (no noise)
data_list = []
data_list_over = []
for psf_fit in psf_list:
data_over = psf_fit.sum(axis=0)
data_over[data_over<=__epsilon] = data_over[data_over>__epsilon].min() / 10
data_list_over.append(data_over)
data_list.append(poppy.utils.krebin(data_over, (fov_pix,fov_pix)))
if nspec == 1:
data_list = data_list[0]
data_list_over = data_list_over[0]
t7 = time.time()
_log.debug('jl_poly: {:.2f} sec; binflux: {:.2f} sec; PSF sum: {:.2f} sec'.format(t5-t4, t6-t5, t7-t6))
if return_oversample:
return data_list, data_list_over
else:
return data_list
| 1.765625
| 2
|
madgrad/mirror_madgrad.py
|
haresh121/madgrad
| 0
|
12779334
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.optim
import math
from typing import TYPE_CHECKING, Any, Callable, Optional
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class MirrorMADGRAD(torch.optim.Optimizer):
"""
Mirror MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
Optimization.
.. _MADGRAD: https://arxiv.org/abs/2101.11075
Mirror MADGRAD uses the weighting and momentum of MADGRAD but uses mirror descent
rather than dual averaging as the base method. In general, the mirror variant works
better than standard MADGRAD on problems where generalization gap is not an issue,
such as large Transformer model training. On CIFAR-10/Image-Net and smaller NLP models
the standard variant should be prefered. The Mirror variant is more numerically stable
which may help with large model training.
Currently does not support sparse gradients.
Arguments:
params (iterable):
Iterable of parameters to optimize or dicts defining parameter groups.
lr (float):
Learning rate (default: 1e-2).
momentum (float):
Momentum value in the range [0,1) (default: 0.9).
weight_decay (float):
Weight decay, i.e. a L2 penalty (default: 0).
eps (float):
Term added to the denominator outside of the root operation to improve numerical stability. (default: 0).
This parameter is less important in MADGRAD than in Adam. A value of 0 will likely give the best results.
decouple_decay (bool):
Apply AdamW style decoupled weight decay (EXPERIMENTAL).
Application of decay occurs before the step.
"""
def __init__(
self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9,
weight_decay: float = 0, eps: float = 0, decouple_decay=False,
):
if momentum < 0 or momentum >= 1:
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if weight_decay < 0:
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
if eps < 0:
raise ValueError(f"Eps must be non-negative")
defaults = dict(lr=lr, eps=eps, momentum=momentum,
weight_decay=weight_decay, decouple_decay=decouple_decay)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self) -> bool:
return True
@property
def supports_flat_params(self) -> bool:
return True
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
# step counter must be stored in state to ensure correct behavior under
# optimizer sharding
if 'k' not in self.state:
self.state['k'] = torch.tensor([0], dtype=torch.long)
k = self.state['k'].item()
update_ratio = math.pow(k/(k+1), 1/2)
lamb = math.pow(k+1, 1/3)
for group in self.param_groups:
eps = group["eps"]
lr = group["lr"] + eps
decay = group["weight_decay"]
momentum = group["momentum"]
decouple_decay = group["decouple_decay"]
ck = 1 - momentum
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
state = self.state[p]
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
if "grad_sum_sq" not in state:
state["grad_sum_sq"] = torch.zeros_like(p_data_fp32).detach()
state["z"] = torch.clone(p_data_fp32).detach()
if momentum != 0.0 and grad.is_sparse:
raise RuntimeError("momentum != 0 is not compatible with sparse gradients")
grad_sum_sq = state["grad_sum_sq"]
z = state["z"]
# Apply weight decay
if decay != 0:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
if decouple_decay:
z.data.add_(z.data, alpha=-lr*decay)
else:
grad.add_(p_data_fp32, alpha=decay)
grad_sum_sq.mul_(update_ratio)
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=1)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
if eps == 0:
rms[rms == 0] = float('inf')
# Update z
z.data.addcdiv_(grad, rms, value=-lr*lamb)
# Step
p_data_fp32.mul_(1 - ck).add_(z, alpha=ck)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
self.state['k'] += 1
return loss
| 2.28125
| 2
|
src/video_popup.py
|
OdatNurd/OdatNurdTestPackage
| 1
|
12779335
|
import sublime
import sublime_plugin
###----------------------------------------------------------------------------
_video_popup = """
<body id="youtubeeditor-video-details">
<style>
body {{
font-family: system;
margin: 0.5rem 1rem;
width: 40em;
}}
h1 {{
width: 33em;
font-size: 1.2rem;
font-weight: bold;
margin: 0;
padding: 0;
border-bottom: 2px solid var(--bluish);
color: var(--bluish);
}}
h1 span {{
font-size: 0.80rem;
position: relative;
left: 0;
}}
.private {{
color: var(--redish);
}}
.public {{
color: var(--greenish);
}}
.unlisted {{
color: var(--yellowish);
}}
.statistics {{
font-size: 0.8rem;
line-height: 0.8rem;
margin-top: -0.8rem;
position: relative;
}}
.viewcount {{
color: var(--purplish);
}}
.likes {{
color: var(--greenish);
}}
.dislikes {{
color: var(--redish);
}}
.description {{
color: color(var(--foreground) alpha(0.70));
}}
.tags {{
width: 40rem;
margin: 0;
padding: 0;
border-top: 1px solid var(--greenish);
color: color(var(--greenish) alpha(0.7));
font-size: 0.9rem;
}}
.commands {{
width: 40rem;
margin: 0;
padding: 0;
border-top: 1px solid var(--greenish);
color: color(var(--greenish) alpha(0.7));
font-size: 0.9rem;
}}
</style>
{body}
</body>
"""
_body = """
<h1>{title} <span class="{vis_class}">({visibility})</span></h1>
<div class="statistics">
<span class="viewcount">{views} views</span>
<span class="likes">✔:{likes}</span>
<span class="dislikes">✘:{dislikes}</span>
</div>
<p class="description">{description}</p>
<div class="tags">{tags}</div>
<div class="commands">
[ <a href="subl:youtube_editor_view_video_link {{"video_id":"{video_id}"}}">Watch</a> ]
[ <a href="subl:youtube_editor_get_video_link {{"video_id":"{video_id}"}}">Get Link</a> ]
[ <a href="subl:youtube_editor_edit_video_details {{"video_id":"{video_id}"}}">Edit</a> ]
[ <a href="subl:youtube_editor_edit_in_studio {{"video_id":"{video_id}"}}">Edit in Studio</a> ]
</div>
"""
###----------------------------------------------------------------------------
def show_video_popup(view, point, video):
"""
At the given point in the given view, display a hover popup for the video
whose information is provided.
The hover popup will contain the key information for the video, and also
contain some links that will trigger commands that can be taken on the
video as well.
"""
content = _video_popup.format(
body=_body.format(
title=video['snippet.title'],
vis_class=video['status.privacyStatus'],
visibility=video['status.privacyStatus'].title(),
views=video['statistics.viewCount'],
likes=video['statistics.likeCount'],
dislikes=video['statistics.dislikeCount'],
description=video['snippet.description'].split('\n', 1)[0],
tags=", ".join(video.get("snippet.tags", [])),
video_id=video['id']
)
)
view.show_popup(content,
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point,
max_width=1024,
max_height=1024)
###----------------------------------------------------------------------------
| 2.203125
| 2
|
tests/flow/pipeline/conftest.py
|
mpearmain/forml
| 0
|
12779336
|
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Flow unit tests fixtures.
"""
# pylint: disable=no-self-use
import pytest
from forml.flow import pipeline
from forml.flow import task
from forml.flow.graph import node
from forml.flow.pipeline import topology
@pytest.fixture(scope='function')
def operator(spec: task.Spec) -> topology.Operator:
"""Operator fixture."""
class Operator(topology.Operator):
"""Operator mock."""
def compose(self, left: topology.Composable) -> pipeline.Segment:
"""Dummy composition."""
track = left.expand()
trainer = node.Worker(spec, 1, 1)
applier = trainer.fork()
extractor = node.Worker(spec, 1, 1)
trainer.train(track.train.publisher, extractor[0])
return track.use(label=track.train.extend(extractor)).extend(applier)
return Operator()
@pytest.fixture(scope='function')
def origin(spec: task.Spec) -> topology.Operator:
"""Origin operator fixture."""
class Operator(topology.Operator):
"""Operator mock."""
def compose(self, left: topology.Composable) -> pipeline.Segment:
"""Dummy composition."""
trainer = node.Worker(spec, 1, 1)
applier = trainer.fork()
return pipeline.Segment(applier, trainer)
return Operator()
| 1.84375
| 2
|
app_blog/controllers/article_controller.py
|
DubstepWar/flask-blog-test
| 0
|
12779337
|
<gh_stars>0
from flask import Blueprint, jsonify, request
from flask_jwt import jwt_required
from marshmallow import ValidationError
from app_blog.extensions import db
from app_blog.models import Article
from app_blog.models.article import article_schema, articles_schema
from app_blog.services.article_service import article_service
articles_blueprint = Blueprint("articles", __name__, url_prefix="/articles")
@articles_blueprint.route("", methods=["GET"])
def get_articles():
articles = article_service.get_articles()
return jsonify(articles_schema.dump(articles))
@articles_blueprint.route("", methods=["POST"])
@jwt_required()
def create_article():
data = request.get_json()
if not data:
return (
{"status": "error", "success": False, "message": "No input data provided"},
400,
)
# Валидируем входящие данные
try:
data = article_schema.load(data)
except ValidationError as err:
return err.messages, 422
return (
jsonify({"status": "error", "errors": err.messages, "success": False}),
422,
)
article = article_service.create_article(data)
return (
jsonify(
{"status": "ok", "success": True, "result": article_schema.dump(article)}
),
201,
)
@articles_blueprint.route("/<slug>", methods=["GET"])
def get_article(slug):
article = Article.query.filter_by(slug=slug).first()
if not article:
return jsonify({"status": "error", "message": "Not found", "success": False}), 401
return jsonify(article_schema.dump(article))
@articles_blueprint.route("/<slug>", methods=["PUT"])
@jwt_required()
def update_article(slug):
article = Article.query.filter_by(slug=slug).first()
if not article:
return jsonify({"status": "error", "message": "Not found", "success": False})
data = request.get_json()
if not data:
return (
{"status": "error", "success": False, "message": "No input data provided"},
400,
)
# Валидируем входящие данные
try:
data = article_schema.load(data)
except ValidationError as err:
return err.messages, 422
return (
jsonify({"status": "error", "errors": err.messages, "success": False}),
422,
)
article_service.update_article(article, data)
return jsonify(
{"success": True, "status": "Ok", "result": article_schema.dump(article)}
)
@articles_blueprint.route("/<slug>", methods=["DELETE"])
@jwt_required()
def delete_article(slug):
article = Article.query.filter_by(slug=slug).first()
if not article:
return jsonify({"status": "error", "message": "Not found", "success": False})
db.session.delete(article)
db.session.commit()
return jsonify({"success": True, "status": "Ok"})
| 2.390625
| 2
|
src/networks/main.py
|
LukasKratochvila/Deep-SVDD-PyTorch
| 0
|
12779338
|
from .mnist_LeNet import MNIST_LeNet, MNIST_LeNet_Autoencoder
from .cifar10_LeNet import CIFAR10_LeNet, CIFAR10_LeNet_Autoencoder
from .cifar10_LeNet_elu import CIFAR10_LeNet_ELU, CIFAR10_LeNet_ELU_Autoencoder
from .my_LeNet import MY_LeNet, MY_LeNet_Autoencoder
from .my_LeNet_480x480 import MY_LeNet_480, MY_LeNet_480_Autoencoder
from .my_LeNet_NN import MY_LeNet_NN, MY_LeNet_NN_Autoencoder
def build_network(net_name):
"""Builds the neural network."""
implemented_networks = ('mnist_LeNet', 'cifar10_LeNet', 'cifar10_LeNet_ELU', 'my_LeNet', 'my_LeNet_480', 'my_LeNet_NN')
assert net_name in implemented_networks
net = None
if net_name == 'mnist_LeNet':
net = MNIST_LeNet()
if net_name == 'cifar10_LeNet':
net = CIFAR10_LeNet()
if net_name == 'cifar10_LeNet_ELU':
net = CIFAR10_LeNet_ELU()
if net_name == 'my_LeNet':
net = MY_LeNet()
if net_name == 'my_LeNet_480':
net = MY_LeNet_480()
if net_name == 'my_LeNet_NN':
net = MY_LeNet_NN()
return net
def build_autoencoder(net_name):
"""Builds the corresponding autoencoder network."""
implemented_networks = ('mnist_LeNet', 'cifar10_LeNet', 'cifar10_LeNet_ELU', 'my_LeNet', 'my_LeNet_480', 'my_LeNet_NN')
assert net_name in implemented_networks
ae_net = None
if net_name == 'mnist_LeNet':
ae_net = MNIST_LeNet_Autoencoder()
if net_name == 'cifar10_LeNet':
ae_net = CIFAR10_LeNet_Autoencoder()
if net_name == 'cifar10_LeNet_ELU':
ae_net = CIFAR10_LeNet_ELU_Autoencoder()
if net_name == 'my_LeNet':
ae_net = MY_LeNet_Autoencoder()
if net_name == 'my_LeNet_480':
ae_net = MY_LeNet_480_Autoencoder()
if net_name == 'my_LeNet_NN':
ae_net = MY_LeNet_NN_Autoencoder()
return ae_net
| 2.546875
| 3
|
3.Object.Oriented.Programming/7.magicMethods.py
|
bhattvishal/programming-learning-python
| 1
|
12779339
|
class Book:
def __init__(self, title, price, author):
super().__init__()
self.title = title
self.price = price
self.author = author
# Magic Methods
# use __str__ to writtren the string representation
def __str__(self):
return f"{self.title} is written by {self.author} and costs {self.price}"
# __repr__ to return the object representation, it is more of a used for debugging easier
def __repr__(self):
return f"title: {self.title}, author: {self.author}, cost: {self.price}"
def main():
b1 = Book("Hit Refresh", 500, "Satya Nadella")
print(str(b1))
print(repr(b1))
if __name__ == "__main__":
main()
| 3.828125
| 4
|
test.py
|
Mirorrn/Spline-Lane-Detection
| 1
|
12779340
|
import os
path = os.getcwd()
from cu__grid_cell.data_gen import data_gen
from cu__grid_cell.preparation import preparation
import numpy as np
from cu__grid_cell.Validation.validation_utils import plot_image, grid_based_eval_with_iou, plot_image3d, nms, concatenate_cells
import matplotlib.pyplot as plt
import cv2
def sigmoid(x):
return 1. / (1. + np.exp(-x))
batch = 2
model_obj = preparation(testing = True)
config = model_obj.config
a = data_gen(dataset=config.CU_test6_curve_hdf5_path, batchsize=batch, config=config, augment=False)
generator = a.batch_gen(test=True)
x_img, y, gt_image, gt_lanes = next(generator)
y = y[0]
concatenate_cells(y[0], config)
prediction = model_obj.predict(x_img)
scale_size_y = (1640 -1) / config.img_w
scale_size_x = (590 -1) /config.img_h
M = np.array([[scale_size_y, 0, 0],
[0, scale_size_x, 0],
[0, 0, 1.]])
M=M[0:2]
if config.splitted:
lok = prediction[-2]
conf = prediction[-1]
prediction = np.concatenate([lok, conf], axis=-1)
#elif config.staged:
# prediction = prediction[-1]
for i, s in enumerate(prediction):
s = nms(s, config)
plt.figure(1)
#f, axarr = plt.subplots(1, 2)
#axarr[0].imshow(gt_image[i,:,:,::-1].astype(np.uint8))
#axarr[0].set_title('Ground Thruth', color='0.7')
for a in gt_lanes[i]:
gt_image[i] = cv2.polylines(gt_image[i], np.int32([a]), isClosed=0, color=(0, 255, 0), thickness=10)
lanes_pred = concatenate_cells(s, config, prediction=True)
original_points = lanes_pred
for j, o in enumerate(original_points):
o = np.array(o).T
ones = np.ones_like(o[:, 0])
ones = ones[..., None]
original_points[j] = np.concatenate((o, ones),
axis=1) # we reuse 3rd column in completely different way here, it is hack for matmul with M
original_points[j] = np.matmul(M, original_points[j].T).T # transpose for multiplikation
lanes = original_points # take only coords!
for a in lanes:
gt_image[i] = cv2.polylines(gt_image[i], np.int32([a]), isClosed=0,color=(0,0,255), thickness=10)
#pred_img = plot_image(s, config, with_print=True, plot_image =x_img[i,:,:])
plt.imshow(gt_image[i,:,:,::-1].astype(np.uint8))
# plt.set_title('Predicted', color='0.7')
# now 3d plot
plot_image3d(s, config, True, with_print=False)
# plot_image3d(y[i], config, False, with_print=False)
plt.show()
test = 0
| 2.171875
| 2
|
app/models/attribute.py
|
Maxcutex/personal_ecommerce
| 0
|
12779341
|
"""module of attribute model class"""
from .base_model import BaseModel, db
class Attribute(BaseModel):
"""attribute Model class"""
__tablename__ = 'attribute'
attribute_id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(100), nullable=False)
def __str__(self):
return '<Attribute: {}>'.format(self.name)
| 2.90625
| 3
|
db_logic.py
|
apie/seriesbot
| 0
|
12779342
|
<filename>db_logic.py
#!/usr/bin/env python3
import os
from pydblite import Base
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
show_db = Base(os.path.join(SCRIPT_DIR, 'show.db'))
show_db.create('show_id', 'name', 'latest_ep_id', mode="open")
ep_db = Base(os.path.join(SCRIPT_DIR, 'episode.db'))
ep_db.create('ep_id', 'show_id', 'season', 'number', 'name', 'airdate', 'downloaded', mode="open")
def get_shows_from_db():
return {
rec['show_id']: {
'name': rec.get('name'),
'latest_ep': rec.get('latest_ep_id'),
} for rec in show_db
}
def save_shows_in_db(shows):
db = show_db
for show_id, value in shows.items():
rec = db("show_id") == show_id
if not rec:
rec_id = db.insert(show_id=show_id)
db.commit()
rec = db[rec_id]
db.update(rec, name=value.get('name'), latest_ep_id=value.get('latest_ep'))
db.commit()
def get_new_eps_from_db():
return {
rec['ep_id']: {
'show_id': rec.get('show_id'),
'season': rec.get('season'),
'number': rec.get('number'),
'name': rec.get('name'),
'airdate': rec.get('airdate'),
'downloaded': rec.get('downloaded'),
} for rec in ep_db if not rec.get('downloaded')
}
def save_eps_in_db(eps):
db = ep_db
for ep_id, value in eps.items():
rec = db("ep_id") == ep_id
if not rec:
rec_id = db.insert(ep_id=ep_id)
db.commit()
rec = db[rec_id]
db.update(rec,
show_id=value.get('show_id'),
season=value.get('season'),
number=value.get('number'),
name=value.get('name'),
airdate=value.get('airdate'),
downloaded=value.get('downloaded')
)
db.commit()
def get_rec(ep_id):
db = ep_db
rec = db("ep_id") == ep_id
if not rec:
rec_id = db.insert(ep_id=ep_id)
db.commit()
rec = db[rec_id]
return rec
def mark_ep_as_downloaded(ep_id, downloaded=True):
rec = get_rec(ep_id)
db = ep_db
db.update(rec, downloaded=downloaded)
db.commit()
def list_show_db():
db = show_db
for rec in db:
print(rec)
def list_ep_db():
db = ep_db
for rec in db:
print(rec)
| 2.328125
| 2
|
cloudkittyclient/v1/info_cli.py
|
NeCTAR-RC/python-cloudkittyclient
| 19
|
12779343
|
<reponame>NeCTAR-RC/python-cloudkittyclient
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cliff import lister
from cloudkittyclient import utils
class CliInfoMetricGet(lister.Lister):
"""Get information about current metrics."""
info_columns = [
('metric_id', 'Metric'),
('unit', 'Unit'),
('metadata', 'Metadata'),
]
def take_action(self, parsed_args):
resp = utils.get_client_from_osc(self).info.get_metric(
metric_name=parsed_args.metric_name,
)
values = utils.list_to_cols([resp], self.info_columns)
return [col[1] for col in self.info_columns], values
def get_parser(self, prog_name):
parser = super(CliInfoMetricGet, self).get_parser(prog_name)
parser.add_argument('metric_name',
type=str, default='', help='Metric name')
return parser
class CliInfoMetricList(lister.Lister):
"""Get information about a single metric."""
info_columns = [
('metric_id', 'Metric'),
('unit', 'Unit'),
('metadata', 'Metadata'),
]
def take_action(self, parsed_args):
resp = utils.get_client_from_osc(self).info.get_metric()
values = utils.list_to_cols(resp['metrics'], self.info_columns)
return [col[1] for col in self.info_columns], values
class CliInfoConfigGet(lister.Lister):
"""Get information about the current configuration."""
def take_action(self, parsed_args):
resp = utils.get_client_from_osc(self).info.get_config()
values = [(key, value) for key, value in resp.items()]
return ('Section', 'Value'), values
| 2.3125
| 2
|
account/exceptions.py
|
KimSoungRyoul/drf_unitteset_study_project
| 0
|
12779344
|
from rest_framework.exceptions import NotAuthenticated as DRFNotAuthenticated
class NotAuthentication(DRFNotAuthenticated):
default_detail = '인증 되지 않은 사용자입니다.'
| 1.890625
| 2
|
backend/api/resources/pedidos_resource.py
|
jklemm/menu-fullstack-challenge
| 0
|
12779345
|
import json
import falcon
from api.resources import BaseResource
from core.pedidos.exceptions import PedidoNotFoundException
from core.pedidos.gateway import PedidoGateway
class PedidosResource(BaseResource):
def on_get(self, req, resp, pedido_id=None):
pedido_gateway = PedidoGateway(self.db.session)
if pedido_id:
try:
pedidos = pedido_gateway.get_one(int(pedido_id))
content = pedidos.as_dict
except PedidoNotFoundException as exc:
resp.status = falcon.HTTP_404
resp.body = json.dumps({"erro": str(exc)})
return resp
else:
pedidos = pedido_gateway.get_all()
content = [pedido.as_dict for pedido in pedidos]
resp.status = falcon.HTTP_200
resp.body = json.dumps(content)
def on_post(self, req, resp):
pedido_gateway = PedidoGateway(self.db.session)
body = req.bounded_stream.read().decode()
if not body:
resp.status = falcon.HTTP_PRECONDITION_FAILED
resp.body = json.dumps({"erro": "POST precisa conter um body."})
return resp
raw_json = json.loads(body)
data = raw_json["data"]
cliente_id = raw_json["cliente_id"]
valor = raw_json["valor"]
pedido_gateway.create(data, cliente_id, valor)
resp.status = falcon.HTTP_201
def on_put(self, req, resp, pedido_id=None):
pedido_gateway = PedidoGateway(self.db.session)
if not pedido_id:
resp.status = falcon.HTTP_412
resp.body = json.dumps({"erro": "Metodo PUT requer o campo 'pedido_id' na URL"})
return resp
resp.status = falcon.HTTP_200
raw_json = json.loads(req.bounded_stream.read().decode())
data = raw_json.get("data", None)
cliente_id = raw_json.get("cliente_id", None)
valor = raw_json.get("valor", None)
try:
pedido_gateway.update(pedido_id, data, cliente_id, valor)
except PedidoNotFoundException as exc:
resp.status = falcon.HTTP_404
resp.body = json.dumps({"erro": str(exc)})
return resp
| 2.234375
| 2
|
edge/wiotp/netspeed2wiotp/mqtt_pub.py
|
alexzinovyev-intensivate/examples
| 34
|
12779346
|
import time
import string
import json
import sys
import paho.mqtt.publish as publish
import paho.mqtt.client as mqtt
from workload_config import * # Read configuration
import utils # Utilities file in this dir (utils.py)
def post_networkdata_single_wiotp(jsonpayload, event_id, heart_beat=False):
"""Tries once to send network data in json format to WIoTP via mqtt.
Returns 1 if successful, 0 if not, -1 if failed because not registered.
"""
try:
retain = True
qos = 2 # since speed data is sent so infrequently we can afford to make sure it gets there exactly once
if debug_flag:
utils.print_("mqtt_pub.py: Sending data to mqtt... \
mqtt_topic=%s, mqtt_broker=%s, client_id=%s" % (mqtt_topic, mqtt_broker, mqtt_client_id))
# Publish to MQTT
publish.single(topic=mqtt_topic, payload=jsonpayload, qos=qos, hostname=mqtt_broker,
protocol=mqtt.MQTTv311, client_id=mqtt_client_id, port=mqtt_port, #auth=mqtt_auth,
tls=mqtt_tls, retain=retain)
if debug_flag: utils.print_('mqtt_pub.py: Send to mqtt successful')
return 1
except:
e = sys.exc_info()[1]
if 'not authori' in str(e).lower() or 'bad user name or password' in str(e).lower():
# The data send failed because we are not successfully registered
return -1
else:
utils.print_('Send to mqtt failed: %s' % e)
return 0
| 2.640625
| 3
|
myprodigy/urls.py
|
acdh-oeaw/nerdpool
| 0
|
12779347
|
# generated by appcreator
from django.conf.urls import url
from . import views, api_views
app_name = 'myprodigy'
urlpatterns = [
url(
r'^annotator/detail/(?P<pk>[0-9]+)$',
views.UserDetailView.as_view(),
name='user_detail'
),
url(
r'^nerdataset/$',
views.NerDataSetListView.as_view(),
name='nerdataset_browse'
),
url(
r'^servers/$',
views.ServerListView.as_view(),
name='server_browse'
),
url(
r'^nerdataset/detail/(?P<pk>[0-9]+)$',
views.NerDataSetDetailView.as_view(),
name='nerdataset_detail'
),
url(
r'^nerdataset/create/$',
views.NerDataSetCreate.as_view(),
name='nerdataset_create'
),
url(
r'^nerdataset/edit/(?P<pk>[0-9]+)$',
views.NerDataSetUpdate.as_view(),
name='nerdataset_edit'
),
url(
r'^nerdataset/delete/(?P<pk>[0-9]+)$',
views.NerDataSetDelete.as_view(),
name='nerdataset_delete'),
url(
r'^nersample/$',
views.NerSampleListView.as_view(),
name='nersample_browse'
),
url(
r'^nersample/detail/(?P<pk>[0-9]+)$',
views.NerSampleDetailView.as_view(),
name='nersample_detail'
),
url(
r'^nersample/create/$',
views.NerSampleCreate.as_view(),
name='nersample_create'
),
url(
r'^nersample/edit/(?P<pk>[0-9]+)$',
views.NerSampleUpdate.as_view(),
name='nersample_edit'
),
url(
r'^nersample/delete/(?P<pk>[0-9]+)$',
views.NerSampleDelete.as_view(),
name='nersample_delete'),
url(
r'^spawnprodigy/$',
api_views.ProdigyServers.as_view(),
name="api_prodigy_servers"
)
]
| 1.929688
| 2
|
python/shared/research_pacs/shared/util.py
|
aws-samples/research-pacs-on-aws
| 13
|
12779348
|
<filename>python/shared/research_pacs/shared/util.py<gh_stars>10-100
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import os
import re
import logging
import signal
from threading import Event
import boto3
import yaml
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def load_file(location, aws_region, content_type='str', s3_credentials=None):
"""
Load and return a file either from Amazon S3 or from a local (or locally mounted) file system.
Args:
location (str): File location in the format `s3://bucket/key` if the file is stored in Amazon
S3
aws_region (str): AWS region where the S3 bucket resides
content_type (str): Can be `str` to return a string, `bytes` to return encoded bytes, `json` to
parse a JSON document and return a dic, `yaml` to parse a YAML document and return a dict
s3_credentials (dict): Optional S3 credentials passed to the boto3 S3 client. If no
credentials are provided, we use the EC2 role or task role
"""
try:
logger.debug(f'Load the file "{location}" as "{content_type}"')
match = re.search('^s3:\/\/([^\/]+)\/(.+)$', location)
# Log the file from S3 if the location matches the S3 pattern
if match != None:
if s3_credentials != None:
s3 = boto3.client('s3', region_name=aws_region, **s3_credentials)
else:
s3 = boto3.client('s3', region_name=aws_region)
s3_response = s3.get_object(Bucket=match.group(1), Key=match.group(2))
content_bytes = s3_response['Body'].read()
# Otherwise, load the file from the local system, or locally-mounted file system
else:
with open(location, 'rb') as f:
content_bytes = f.read()
if content_type == 'bytes':
return content_bytes
elif content_type == 'str':
return content_bytes.decode()
elif content_type == 'json':
return json.loads(content_bytes.decode())
elif content_type == 'yaml':
return yaml.safe_load(content_bytes.decode())
except Exception as e:
msg_err = f'Failed to load the file {location} as "{content_type}" - {e}'
logger.debug(msg_err)
raise Exception(msg_err)
def write_file(content, location, aws_region, content_type='str', s3_credentials=None):
"""
Write a file either to Amazon S3 or to a local (or locally mounted) file system.
Args:
content: Can either by a bytes, str, JSON object
location (str): File location in the format `s3://bucket/key` if the file is stored in Amazon
S3
aws_region (str): AWS region where the S3 bucket resides
content_type (str): Type of `content` (`str`, `bytes` or `json`)
s3_credentials (dict): Optional S3 credentials passed to the boto3 S3 client. If no
credentials are provided, we use the EC2 role or task role
"""
logger.debug(f'Write the file "{location}" as "{content_type}"')
try:
if content_type == 'bytes':
content_bytes = content
elif content_type == 'str':
content_bytes.encode()
elif content_type == 'json':
content_bytes = json.dumps(content, indent=4, sort_keys=True).encode()
match = re.search('^s3:\/\/([^\/]+)\/(.+)$', location)
# Save to S3 if the location matches the S3 pattern
if match != None:
if s3_credentials != None:
s3 = boto3.client('s3', region_name=aws_region, **s3_credentials)
else:
s3 = boto3.client('s3', region_name=aws_region)
s3_response = s3.put_object(Body=content_bytes, Bucket=match.group(1), Key=match.group(2))
# Otherwise, save it to the local system
else:
with open(location, 'wb') as f:
f.write(content_bytes)
except Exception as e:
msg_err = f'Failed to write the "{content_type}" input to {location} - {e}'
logger.debug(msg_err)
raise Exception(msg_err)
class EnvVarList:
"""
Retrieve and store environment variables. Environment variable values are accessible as
attributes of the object.
"""
def __init__(self):
self.region = None
def add(self, attr_name, var_name, cast=None, default=None, password=False):
"""
Add a class attribute that contains the value of an environment variable. The application
exits if the environment variable is unset, unless a default value is provided.
Args:
attr_name (str): Attribute name
var_name (str): Name of the environment variable
cast (type, Optional): Type to cast the environment variable value
default (Optional): Default value if the environment variable is unset
password (bool, Optional): Hide the value when displaying debug logs
"""
value = os.getenv(var_name)
if value is None and default != None:
value = default
# Cast the value
if cast != None and value != None:
try:
value = cast(value)
except Exception as e:
msg_err = f'Unable to cast {var_name} to {cast}'
logger.debug(msg_err)
raise Exception(msg_err)
# Return the value or raise an exception if the value is missing
if value is None:
msg_err = f'Missing value for environment variable {var_name}'
logger.debug(msg_err)
raise Exception(msg_err)
else:
if password is True:
logger.debug(f'{var_name} = *******')
else:
logger.debug(f'{var_name} = {value}')
setattr(self, attr_name, value)
class ClientList:
"""
Store variables, notably client objects, and make them accessible as attributes of a class.
"""
def add(self, attr_name, client):
setattr(self, attr_name, client)
class GracefulKiller:
"""
Intercept SIGINT and SIGTERM and enable the program to exit gracefully. Use the `sleep` function
instead of `time.sleep` to interrupt the sleep function when a signal is intercepted.
"""
def __init__(self):
self.kill_now = False
self.exit = Event()
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, sig_num, *args):
self.exit.set()
self.kill_now = True
def sleep(self, seconds):
self.exit.wait(seconds)
| 2.28125
| 2
|
zerver/tests/test_management_commands.py
|
Frouk/zulip
| 0
|
12779349
|
<reponame>Frouk/zulip
# -*- coding: utf-8 -*-
import os
from mock import patch
from django.test import TestCase
from django.conf import settings
from django.core.management import call_command
class TestSendWebhookFixtureMessage(TestCase):
COMMAND_NAME = 'send_webhook_fixture_message'
def setUp(self):
self.fixture_path = os.path.join('some', 'fake', 'path.json')
self.url = '/some/url/with/hook'
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_fixture_param_is_empty(self, print_help_mock):
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, url=self.url)
print_help_mock.assert_any_call('python manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.Command.print_help')
def test_check_if_command_exits_when_url_param_is_empty(self, print_help_mock):
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, fixture=self.fixture_path)
print_help_mock.assert_any_call('python manage.py', self.COMMAND_NAME)
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
def test_check_if_command_exits_when_fixture_path_does_not_exist(self, os_path_exists_mock):
os_path_exists_mock.return_value = False
with self.assertRaises(SystemExit):
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
os_path_exists_mock.assert_any_call(os.path.join(settings.DEPLOY_ROOT, self.fixture_path))
@patch('zerver.management.commands.send_webhook_fixture_message.os.path.exists')
@patch('zerver.management.commands.send_webhook_fixture_message.Client')
@patch('zerver.management.commands.send_webhook_fixture_message.ujson')
@patch("zerver.management.commands.send_webhook_fixture_message.open", create=True)
def test_check_if_command_post_request_to_url_with_fixture(self,
open_mock,
ujson_mock,
client_mock,
os_path_exists_mock):
ujson_mock.loads.return_value = '{}'
ujson_mock.dumps.return_value = {}
os_path_exists_mock.return_value = True
client = client_mock()
call_command(self.COMMAND_NAME, fixture=self.fixture_path, url=self.url)
self.assertTrue(ujson_mock.dumps.called)
self.assertTrue(ujson_mock.loads.called)
self.assertTrue(open_mock.called)
client.post.assert_called_once_with(self.url, {}, content_type="application/json")
| 2.140625
| 2
|
Report2/Code/getDAWNData.py
|
cvanoort/USDrugUseAnalysis
| 0
|
12779350
|
import csv
import gc
# A small function for grabbing data from a list of dictionaries based on a shared key
def countKey(key,listDataDicts):
outDict = {}
for row in listDataDicts:
try:
outDict[row[key]] += 1
except KeyError:
outDict[row[key]] = 1
return outDict
alcData = [0,0,0,0,0,0,0,0]
nonAlcIllicitData = [0,0,0,0,0,0,0,0]
nonMedPharmaData = [0,0,0,0,0,0,0,0]
filePath1 = "data/DAWN_20"
filePath2 = [ "04", "05", "06", "07", "08", "09", "10", "11" ]
filePath3 = "/DS0001/"
filePath4 = ["33041","33042","33221","32861","31264","31921","34083","34565"]
filePath5 = "-0001-Data.tsv"
for i in range(8):
# Simple progress tracking
print "Beginning loop iteration %d \n" %(i)
# Create a path to a data set
path = filePath1 + filePath2[i] + filePath3 + filePath4[i] + filePath5
# Read that data set into a python object
listDataDicts = []
with open(path, 'r') as tsvFile:
tsvReader = csv.DictReader(tsvFile,delimiter='\t')
for row in tsvReader:
if (row['ALCOHOL'] == "1"):
alcData[i] += 1
if (row['NONALCILL'] == "1"):
nonAlcIllicitData[i] += 1
if (row['NONMEDPHARMA'] == "1"):
nonMedPharmaData[i] += 1
# Clean up the data objects from the prvious iteration in order to decrease memory usage
gc.collect()
# Simple progress tracking
print "Loop iteration %d complete! \n" %(i)
print alcData, "\n", nonAlcIllicitData, "\n", nonMedPharmaData, "\n"
# Write the data to a smaller tsv file for later use
with open("DAWN_data.tsv", 'w') as outFile:
csvWriter = csv.writer(outFile, delimiter='\t')
years = [x for x in range(2004, 2012)]
drugData = [alcData, nonAlcIllicitData, nonMedPharmaData]
csvWriter.writerow( years )
for i in drugData:
csvWriter.writerow( i )
| 2.953125
| 3
|