seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
4269069182
|
# Statement for enabling the development environment
DEBUG = True
# Define the application directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Application threads
# A common general assumption is using 2 per available cores
# to handle incoming requests using one
# perform backgorund operations using the other
THREADS_PER_PAGE = 2
# Enable protection against *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure and unique secret key for signing the data
CSRF_SESSION_KEY = 'secret'
# Secret key for signing cookies
SECRET_KEY = 'secret'
|
swapnil085/web_portal
|
src/config.py
|
config.py
|
py
| 586
|
python
|
en
|
code
| 0
|
github-code
|
6
|
20489742276
|
import scipy
from scipy.special import logsumexp
from sklearn.cluster import KMeans
from sklearn.cluster import SpectralClustering
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, SVR
from ucsl.sinkhornknopp_utils import *
def one_hot_encode(y, n_classes=None):
''' utils function in order to turn a label vector into a one hot encoded matrix '''
if n_classes is None:
n_classes = np.max(y) + 1
y_one_hot = np.copy(y)
return np.eye(n_classes)[y_one_hot]
def sigmoid(x, lambda_=5):
return 1 / (1 + np.exp(-lambda_ * x))
def py_softmax(x, axis=None):
"""stable softmax"""
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
def consensus_clustering(clustering_results, n_clusters, index_positives):
S = np.ones((clustering_results.shape[0], n_clusters)) / n_clusters
co_occurrence_matrix = np.zeros((clustering_results.shape[0], clustering_results.shape[0]))
for i in range(clustering_results.shape[0] - 1):
for j in range(i + 1, clustering_results.shape[0]):
co_occurrence_matrix[i, j] = sum(clustering_results[i, :] == clustering_results[j, :])
co_occurrence_matrix = np.add(co_occurrence_matrix, co_occurrence_matrix.transpose())
# here is to compute the Laplacian matrix
Laplacian = np.subtract(np.diag(np.sum(co_occurrence_matrix, axis=1)), co_occurrence_matrix)
Laplacian_norm = np.subtract(np.eye(clustering_results.shape[0]), np.matmul(
np.matmul(np.diag(1 / np.sqrt(np.sum(co_occurrence_matrix, axis=1))), co_occurrence_matrix),
np.diag(1 / np.sqrt(np.sum(co_occurrence_matrix, axis=1)))))
# replace the nan with 0
Laplacian_norm = np.nan_to_num(Laplacian_norm)
# check if the Laplacian norm is symmetric or not, because matlab eig function will automatically check this, but not in numpy or scipy
e_value, e_vector = scipy.linalg.eigh(Laplacian_norm)
# check if the eigen vector is complex
if np.any(np.iscomplex(e_vector)):
e_value, e_vector = scipy.linalg.eigh(Laplacian)
# train Spectral Clustering algorithm and make predictions
spectral_features = e_vector.real[:, :n_clusters]
# apply clustering method
k_means = KMeans(n_clusters=n_clusters).fit(spectral_features[index_positives])
S[index_positives] = one_hot_encode(k_means.labels_.astype(np.int), n_classes=n_clusters)
return S
def compute_similarity_matrix(consensus_assignment, clustering_assignments_to_pred=None):
# compute inter-samples positive/negative co-occurence matrix
similarity_matrix = np.zeros((len(consensus_assignment), len(clustering_assignments_to_pred)))
for i, p_assignment in enumerate(consensus_assignment):
for j, new_point_assignment in enumerate(clustering_assignments_to_pred):
similarity_matrix[i, j] = np.sum(p_assignment == new_point_assignment)
similarity_matrix += 1e-3
similarity_matrix /= np.max(similarity_matrix)
return similarity_matrix
def compute_spectral_clustering_consensus(clustering_results, n_clusters):
# compute positive samples co-occurence matrix
n_positives = len(clustering_results)
similarity_matrix = np.zeros((n_positives, n_positives))
for i in range(n_positives - 1):
for j in range(i + 1, n_positives):
similarity_matrix[i, j] = sum(clustering_results[i, :] == clustering_results[j, :])
similarity_matrix = np.add(similarity_matrix, similarity_matrix.transpose())
similarity_matrix += 1e-3
similarity_matrix /= np.max(similarity_matrix)
# initialize spectral clustering method
spectral_clustering_method = SpectralClustering(n_clusters=n_clusters, affinity='precomputed')
spectral_clustering_method.fit(similarity_matrix)
return spectral_clustering_method.labels_
def launch_svc(X, y, sample_weight=None, kernel='linear', C=1):
"""Fit the classification SVMs according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
Training sample weights.
kernel : string,
kernel used for SVM.
C : float,
SVM hyperparameter C
Returns
-------
SVM_coefficient : array-like, shape (1, n_features)
The coefficient of the resulting SVM.
SVM_intercept : array-like, shape (1,)
The intercept of the resulting SVM.
"""
# fit the different SVM/hyperplanes
SVM_classifier = SVC(kernel=kernel, C=C)
SVM_classifier.fit(X, y, sample_weight=sample_weight)
# get SVM intercept value
SVM_intercept = SVM_classifier.intercept_
# get SVM hyperplane coefficient
SVM_coefficient = SVM_classifier.coef_
return SVM_coefficient, SVM_intercept
def launch_svr(X, y, sample_weight=None, kernel='linear', C=1):
"""Fit the classification SVMs according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
Training sample weights.
kernel : string,
kernel used for SVM.
C : float,
SVM hyperparameter C
Returns
-------
SVM_coefficient : array-like, shape (1, n_features)
The coefficient of the resulting SVM.
SVM_intercept : array-like, shape (1,)
The intercept of the resulting SVM.
"""
# fit the different SVM/hyperplanes
SVM_regressor = SVR(kernel=kernel, C=C)
SVM_regressor.fit(X, y, sample_weight=sample_weight)
# get SVM intercept value
SVM_intercept = SVM_regressor.intercept_
# get SVM hyperplane coefficient
SVM_coefficient = SVM_regressor.coef_
return SVM_coefficient, SVM_intercept
def launch_logistic(X, y, sample_weight=None):
"""Fit the logistic regressions according to the given training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,)
Training sample weights.
Returns
-------
logistic_coefficient : array-like, shape (1, n_features)
The coefficient of the resulting logistic regression.
"""
# fit the different logistic classifier
logistic = LogisticRegression(max_iter=200)
logistic.fit(X, y, sample_weight=sample_weight)
# get logistic coefficient and intercept
logistic_coefficient = logistic.coef_
logistic_intercept = logistic.intercept_
return logistic_coefficient, logistic_intercept
|
rlouiset/py_ucsl
|
ucsl/utils.py
|
utils.py
|
py
| 6,772
|
python
|
en
|
code
| 1
|
github-code
|
6
|
18361449182
|
from rest_framework import serializers
from .models import *
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
def create(self, validated_data):
user = User.objects.create(
username=validated_data['username'],
email=validated_data['email']
)
user.set_password(validated_data['password'])
user.save()
return user
class Meta:
model = User
fields = ['id', 'username', 'password', 'email']
class SemesterSerializer(serializers.ModelSerializer):
class Meta:
model = Semester
fields = ['id', 'start_date', 'end_date', 'year']
class CourseSerializer(serializers.ModelSerializer):
semesters = SemesterSerializer(many=True, read_only=True)
class Meta:
model = Course
fields = ['id', 'title', 'code', 'description', 'semesters']
class LecturerSerializer(serializers.ModelSerializer):
user = UserSerializer(write_only=True)
class Meta:
model = Lecturer
fields = ['id', 'user', 'firstname', 'lastname', 'email', 'course', 'DOB']
def create(self, validated_data):
user_data = validated_data.pop('user')
user = UserSerializer(data=user_data)
user.is_valid(raise_exception=True)
user_instance = user.save()
lecturer = Lecturer.objects.create(user=user_instance, **validated_data)
return lecturer
class StudentSerializer(serializers.ModelSerializer):
user = UserSerializer(write_only=True)
# display only fields: username in the user model
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Student
fields = ['id', 'user', 'firstname', 'lastname', 'email', 'DOB']
def create(self, validated_data):
user_data = validated_data.pop('user')
user = UserSerializer(data=user_data)
user.is_valid(raise_exception=True)
user_instance = user.save()
student = Student.objects.create(user=user_instance, **validated_data)
return student
class ClassSerializer(serializers.ModelSerializer):
lecturer = LecturerSerializer(read_only=True)
class Meta:
model = Class
fields = ['id', 'number', 'semester', 'course', 'lecturer']
class EnrolmentSerializer(serializers.ModelSerializer):
enrolled_student = StudentSerializer(read_only=True)
enrolled_class = ClassSerializer(read_only=True)
class Meta:
model = Enrolment
fields = ['id', 'enrolled_student', 'enrolled_class', 'enrollment_date', 'grade_date', 'grade']
|
raulfanc/gradebook_backend
|
gradebook_app/serializers.py
|
serializers.py
|
py
| 2,626
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14127553386
|
import tkinter as tk
import datetime
class App:
def __init__(self,parent):
self.parent = parent
self.hour_label = tk.Label(self.parent,text="H",background='plum1',font=('verdana',12,'bold'))
self.tick1 = tk.Label(self.parent,text=':',background='plum1',font=('verdana',12,'bold'))
self.minute_label = tk.Label(self.parent,text="M",background='plum1',font=('verdana',12,'bold'))
self.tick2 = tk.Label(self.parent,text=':',background='plum1',font=('verdana',12,'bold'))
self.second_label = tk.Label(self.parent,text="S",background='plum1',font=('verdana',12,'bold'))
self.hour_label.place(x=150,y=130)
self.tick1.place(x=180,y=130)
self.minute_label.place(x=190,y=130)
self.tick2.place(x=220,y=130)
self.second_label.place(x=230,y=130)
def time_update():
self.my_label = tk.Label(self.parent,text="Digital Watch",background='yellow',
font=('times new roman',14,'bold'),borderwidth=4,relief='groove')
self.my_label.place(x=140,y=50)
hours = datetime.datetime.now().strftime("%H")
minutes = datetime.datetime.now().strftime("%M")
seconds = datetime.datetime.now().strftime("%S")
a = self.hour_label.cget(key="text")
b = self.minute_label.cget(key="text")
c = self.second_label.cget(key="text")
d = self.tick1.cget(key="text")
e = self.tick2.cget(key="text")
if a != hours:
self.hour_label.config(text=hours)
if b != minutes:
self.minute_label.config(text=minutes)
if c != seconds:
self.second_label.config(text=seconds)
if d == ':':
self.tick1.config(text=' ')
elif d == ' ':
self.tick1.config(text=':')
if e == ':':
self.tick2.config(text=' ')
elif e == ' ':
self.tick2.config(text=':')
self.my_label.after(500,time_update)
time_update()
def main():
root = tk.Tk()
root.title('Digital Watch!!!')
root.geometry('390x300+450+100')
root.config(bg='plum1')
root.resizable(0,0)
obj = App(root)
root.mainloop()
main()
|
vishalnain-10/Digital-Watch-using-tkinter
|
digi_clock.py
|
digi_clock.py
|
py
| 2,437
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70281057148
|
from typing import List, Iterable
from torch.utils import data
from allennlp.common.registrable import Registrable
from allennlp.data.samplers.bucket_batch_sampler import BucketBatchSampler
from allennlp.data.samplers.max_tokens_batch_sampler import MaxTokensBatchSampler
"""
Duplicates of the pytorch Sampler classes. Broadly, these only exist
so that we can add type hints, meaning we can construct them from configuration
files. You can use these directly from Python code, but they are identical to the
pytorch ones.
"""
class PyTorchSampler(Registrable):
"""
A copy of the pytorch [Sampler](https://pytorch.org/docs/stable/_modules/torch/utils/data/sampler.html)
which allows us to register it with `Registrable.`
"""
def __iter__(self) -> Iterable[int]:
raise NotImplementedError
class PyTorchBatchSampler(Registrable):
"""
A copy of the pytorch
[BatchSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.BatchSampler)
which allows us to register it with `Registrable.`
"""
def __iter__(self) -> Iterable[List[int]]:
raise NotImplementedError
@PyTorchSampler.register("sequential")
class PyTorchSequentialSampler(data.SequentialSampler, PyTorchSampler):
"""
A registrable version of pytorch's
[SequentialSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.SequentialSampler).
Registered as a `PyTorchSampler` with name "sequential".
In a typical AllenNLP configuration file, `data_source` parameter does not get an entry under
the "sampler", it gets constructed separately.
"""
def __init__(self, data_source: data.Dataset):
super().__init__(data_source)
@PyTorchSampler.register("random")
class PyTorchRandomSampler(data.RandomSampler, PyTorchSampler):
"""
A registrable version of pytorch's
[RandomSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.RandomSampler).
Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify `num_samples` to draw.
Registered as a `PyTorchSampler` with name "random".
# Parameters
data_source: `Dataset`, required
The dataset to sample from.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"sampler", it gets constructed separately.
replacement : `bool`, optional (default = `False`)
Samples are drawn with replacement if `True`.
num_samples: `int` (default = `len(dataset)`)
The number of samples to draw. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(
self, data_source: data.Dataset, replacement: bool = False, num_samples: int = None
):
super().__init__(data_source, replacement, num_samples)
@PyTorchSampler.register("subset_random")
class PyTorchSubsetRandomSampler(data.SubsetRandomSampler, PyTorchSampler):
"""
A registrable version of pytorch's
[SubsetRandomSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.SubsetRandomSampler).
Samples elements randomly from a given list of indices, without replacement.
Registered as a `PyTorchSampler` with name "subset_random".
# Parameters
indices: `List[int]`
a sequence of indices to sample from.
"""
def __init__(self, indices: List[int]):
super().__init__(indices)
@PyTorchSampler.register("weighted_random")
class PyTorchWeightedRandomSampler(data.WeightedRandomSampler, PyTorchSampler):
"""
A registrable version of pytorch's
[WeightedRandomSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.WeightedRandomSampler).
Samples elements from `[0,...,len(weights)-1]` with given probabilities (weights).
Registered as a `PyTorchSampler` with name "weighted_random".
# Parameters:
weights : `List[float]`
A sequence of weights, not necessary summing up to one.
num_samples : `int`
The number of samples to draw.
replacement : `bool`
If ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
# Examples
```python
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[0, 0, 0, 1, 0]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
```
"""
def __init__(self, weights: List[float], num_samples: int, replacement: bool = True):
super().__init__(weights, num_samples, replacement)
@PyTorchBatchSampler.register("basic")
class PyTorchBasicBatchSampler(data.BatchSampler, PyTorchBatchSampler):
"""
A registrable version of pytorch's
[BatchSampler](https://pytorch.org/docs/stable/data.html#torch.utils.data.BatchSampler).
Wraps another sampler to yield a mini-batch of indices.
Registered as a `PyTorchBatchSampler` with name "basic".
# Parameters
sampler: `PyTorchSampler`
The base sampler.
batch_size : `int`
The size of the batch.
drop_last : `bool`
If `True`, the sampler will drop the last batch if
its size would be less than batch_size`.
# Examples
```python
list(PyTorchBatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
list(PyTorchBatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
```
"""
def __init__(self, sampler: PyTorchSampler, batch_size: int, drop_last: bool):
super().__init__(sampler, batch_size, drop_last)
@PyTorchBatchSampler.register("bucket")
class PyTorchBucketBatchSampler(PyTorchBatchSampler):
"""
A PyTorch-compatible version of `BucketBatchSampler`.
"""
def __init__(
self,
data_source: data.Dataset,
batch_size: int,
sorting_keys: List[str] = None,
padding_noise: float = 0.1,
drop_last: bool = False,
) -> None:
self.base_sampler = BucketBatchSampler(
batch_size, sorting_keys=sorting_keys, padding_noise=padding_noise, drop_last=drop_last
)
self.data_source = data_source
def __iter__(self) -> Iterable[List[int]]:
return self.base_sampler.get_batch_indices(self.data_source)
def __len__(self):
return self.base_sampler.get_num_batches(self.data_source)
@PyTorchBatchSampler.register("max_tokens_sampler")
class PyTorchMaxTokensBatchSampler(PyTorchBatchSampler):
"""
A PyTorch-compatible version of `MaxTokensBatchSampler`.
"""
def __init__(
self,
data_source: data.Dataset,
max_tokens: int,
sorting_keys: List[str] = None,
padding_noise: float = 0.1,
) -> None:
self.base_sampler = MaxTokensBatchSampler(
max_tokens, sorting_keys=sorting_keys, padding_noise=padding_noise
)
self.data_source = data_source
def __iter__(self) -> Iterable[List[int]]:
return self.base_sampler.get_batch_indices(self.data_source)
def __len__(self):
return self.base_sampler.get_num_batches(self.data_source)
|
esteng/ambiguous_vqa
|
models/allennlp/data/samplers/pytorch_samplers.py
|
pytorch_samplers.py
|
py
| 7,387
|
python
|
en
|
code
| 5
|
github-code
|
6
|
37079042149
|
import sys
group_num = 1
while True:
n = int(sys.stdin.readline())
if n == 0:
break
name_list = []
pn_list = [[] for i in range(n)]
for j in range(n):
name_pn = sys.stdin.readline().split()
name_list.append(name_pn[0])
pn_list[j] = name_pn[1:]
check = True
print(f'Group {group_num}')
pn_order = 0
for k in pn_list:
n_order = 0
for l in k:
if l == 'N':
check = False
print(f'{name_list[pn_order - (n_order + 1)]} was nasty about {name_list[pn_order]}')
n_order += 1
pn_order += 1
if check:
print('Nobody was nasty')
print()
group_num += 1
|
JeonggonCho/algorithm
|
백준/Silver/1384. 메시지/메시지.py
|
메시지.py
|
py
| 752
|
python
|
en
|
code
| 0
|
github-code
|
6
|
10422855793
|
import dataclasses
import io
import logging
import math
import shutil
import tempfile
from pathlib import Path
import discord
import graphviz
import PIL.Image
from discord import Embed
from discord.ext.commands import Context, Converter
from PIL import ImageDraw
from randovania.game_description import default_database, pretty_print
from randovania.game_description.db.area import Area
from randovania.game_description.db.node import Node, NodeLocation
from randovania.game_description.db.region import Region
from randovania.game_description.game_description import GameDescription
from randovania.games.game import RandovaniaGame
from randovania.lib import enum_lib
from randovania.server.discord.bot import RandovaniaBot
from randovania.server.discord.randovania_cog import RandovaniaCog
@dataclasses.dataclass()
class AreaWidget:
area: Area
command_id: str
view: discord.ui.View = None
@dataclasses.dataclass()
class SplitRegion:
region: Region
name: str
areas: list[AreaWidget]
command_id: str
view: discord.ui.View = None
def render_area_with_graphviz(area: Area) -> io.BytesIO | None:
dot = graphviz.Digraph(comment=area.name)
for node in area.nodes:
if node.is_derived_node:
continue
dot.node(node.name)
known_edges = set()
for source, target in area.connections.items():
if source.is_derived_node:
continue
for target_node, requirement in target.items():
if target_node.is_derived_node:
continue
direction = None
if source in area.connections.get(target_node):
direction = "both"
known_edges.add((target_node.name, source.name))
if (source.name, target_node.name) not in known_edges:
dot.edge(source.name, target_node.name, dir=direction)
known_edges.add((source.name, target_node.name))
output_dir = tempfile.mkdtemp()
try:
p = Path(dot.render(directory=output_dir, format="png", cleanup=True))
return io.BytesIO(p.read_bytes())
except graphviz.backend.ExecutableNotFound as e:
logging.info("Unable to render graph for %s: %s", area.name, str(e))
return None
finally:
shutil.rmtree(output_dir)
def render_area_with_pillow(area: Area, data_path: Path) -> io.BytesIO | None:
image_path = data_path.joinpath("assets", "maps", f"{area.map_name}.png")
if not image_path.exists():
return None
with PIL.Image.open(image_path) as im:
assert isinstance(im, PIL.Image.Image)
def location_to_pos(loc: NodeLocation):
return loc.x, im.height - loc.y
draw = ImageDraw.Draw(im)
def draw_connections_from(source_node: Node):
for target_node, requirement in area.connections[source_node].items():
if target_node.is_derived_node:
continue
source = location_to_pos(source_node.location)
target = location_to_pos(target_node.location)
if sum((a - b) ** 2 for a, b in zip(source, target)) < 4:
continue
draw.line(source + target, width=2, fill=(255, 255, 255, 255))
draw.line(source + target, width=1, fill=(0, 0, 0, 255))
for node in area.nodes:
if not node.is_derived_node:
draw_connections_from(node)
for node in area.nodes:
if node.location is None or node.is_derived_node:
continue
p = location_to_pos(node.location)
draw.ellipse((p[0] - 5, p[1] - 5, p[0] + 5, p[1] + 5), fill=(255, 255, 255, 255), width=5)
draw.ellipse((p[0] - 5, p[1] - 5, p[0] + 5, p[1] + 5), fill=(0, 0, 0, 255), width=4)
draw.text(
(p[0] - draw.textlength(node.name) / 2, p[1] + 15),
node.name,
stroke_width=2,
stroke_fill=(255, 255, 255, 255),
)
draw.text(
(p[0] - draw.textlength(node.name) / 2, p[1] + 15),
node.name,
stroke_width=1,
stroke_fill=(0, 0, 0, 255),
)
result = io.BytesIO()
im.save(result, "PNG")
result.seek(0)
return result
async def create_split_regions(db: GameDescription) -> list[SplitRegion]:
region_options = []
def create_id():
return f"{db.game.value}_region_{len(region_options)}"
def create_areas(a):
return [AreaWidget(it, f"{create_id()}_area_{i}") for i, it in enumerate(a)]
for region in db.region_list.regions:
for use_dark_name in [False, True]:
if use_dark_name and region.dark_name is None:
continue
areas = sorted(
(area for area in region.areas if area.in_dark_aether == use_dark_name and area.nodes),
key=lambda it: it.name,
)
name = region.correct_name(use_dark_name)
if len(areas) > 25:
per_part = math.ceil(len(areas) / math.ceil(len(areas) / 25))
while areas:
areas_part = areas[:per_part]
del areas[:per_part]
region_options.append(
SplitRegion(
region,
f"{name} ({areas_part[0].name[:2]}-{areas_part[-1].name[:2]})",
create_areas(areas_part),
create_id(),
)
)
else:
region_options.append(SplitRegion(region, name, create_areas(areas), create_id()))
region_options.sort(key=lambda it: it.name)
return region_options
class EnumConverter(Converter):
async def convert(self, ctx: Context, argument: str):
return RandovaniaGame(argument)
_GameChoices = discord.Option(
str,
description="The game's database to check.",
choices=[
discord.OptionChoice(name=game.long_name, value=game.value) for game in enum_lib.iterate_enum(RandovaniaGame)
],
)
_GameChoices.converter = EnumConverter()
_GameChoices._raw_type = RandovaniaGame
class SelectNodesItem(discord.ui.Select):
def __init__(self, game: RandovaniaGame, area: AreaWidget):
self.game = game
self.area = area
options = [discord.SelectOption(label=node.name) for node in self.valid_nodes()]
super().__init__(
custom_id=area.command_id,
placeholder="Choose the nodes",
options=options,
max_values=min(10, len(options)),
)
def valid_nodes(self):
return [node for node in sorted(self.area.area.nodes, key=lambda it: it.name) if not node.is_derived_node]
def _describe_selected_connections(self, original_content: str):
db = default_database.game_description_for(self.game)
def snipped_message(n: str) -> str:
return f"\n{n}: *Skipped*\n"
body_by_node = {}
embeds = []
for node in sorted(self.area.area.nodes, key=lambda it: it.name):
if node.name not in self.values:
continue
name = node.name
if node.heal:
name += " (Heals)"
if self.area.area.default_node == node.name:
name += "; Default Node"
body = pretty_print.pretty_print_node_type(node, db.region_list) + "\n"
node_bodies = []
for target_node, requirement in db.region_list.area_connections_from(node):
if target_node.is_derived_node:
continue
extra_lines = []
for level, text in pretty_print.pretty_print_requirement(requirement.simplify()):
extra_lines.append("{}{}".format(" " * level, text))
inner = "\n".join(extra_lines)
new_entry = f"\n{target_node.name}:\n```\n{inner}\n```"
node_bodies.append([target_node.name, new_entry])
space_left = 4096 - len(body)
for node_name, _ in node_bodies:
space_left -= len(snipped_message(node_name))
node_bodies.sort(key=lambda it: len(it[1]))
for node_i, (node_name, node_entry) in enumerate(node_bodies):
snipped = snipped_message(node_name)
space_left += len(snipped)
if len(node_entry) <= space_left:
space_left -= len(node_entry)
else:
node_bodies[node_i][1] = snipped
node_bodies.sort(key=lambda it: it[0])
for _, node_entry in node_bodies:
body += node_entry
body_by_node[name] = body
snipped = "*(message too long, skipped)*"
space_left = 6000 - len(original_content)
for name, body in body_by_node.items():
space_left -= len(name) + len(snipped)
for name, body in sorted(body_by_node.items(), key=lambda it: len(it[1])):
space_left += len(snipped)
if len(body) <= space_left:
space_left -= len(body)
else:
body_by_node[name] = snipped
for name, body in body_by_node.items():
embeds.append(
discord.Embed(
title=name,
description=body,
)
)
return embeds
async def callback(self, interaction: discord.Interaction):
r = interaction.response
assert isinstance(r, discord.InteractionResponse)
await r.defer()
original_response = await interaction.original_response()
try:
embeds = self._describe_selected_connections(original_response.content)
except Exception as e:
logging.exception("Error updating visible nodes of %s: %s", self.area.area.name, str(self.values))
embeds = [
discord.Embed(
title="Error describing area",
description=str(e),
)
]
logging.info("Updating visible nodes of %s: %s", self.area.area.name, str(self.values))
await original_response.edit(
embeds=embeds,
)
class SelectAreaItem(discord.ui.Select):
def __init__(self, game: RandovaniaGame, split_region: SplitRegion):
self.game = game
self.split_region = split_region
options = [
discord.SelectOption(
label=area.area.name,
value=area.command_id,
)
for area in split_region.areas
]
super().__init__(
custom_id=split_region.command_id,
placeholder="Choose the room",
options=options,
)
async def callback(self, interaction: discord.Interaction):
r = interaction.response
assert isinstance(r, discord.InteractionResponse)
option_selected = self.values[0]
valid_items = [area for area in self.split_region.areas if area.command_id == option_selected]
if not valid_items:
await r.defer()
return await interaction.edit_original_response(
view=None,
embeds=[],
content=f"Invalid selected option, unable to find given db subset '{option_selected}'.",
)
area = valid_items[0]
db = default_database.game_description_for(self.game)
title = f"{self.game.long_name}: {db.region_list.area_name(area.area)}"
files = []
area_image = render_area_with_pillow(area.area, self.game.data_path)
if area_image is not None:
files.append(discord.File(area_image, filename=f"{area.area.name}_image.png"))
area_graph = render_area_with_graphviz(area.area)
if area_graph is not None:
files.append(discord.File(area_graph, filename=f"{area.area.name}_graph.png"))
logging.info("Responding to area for %s with %d attachments.", area.area.name, len(files))
await r.send_message(
content=f"**{title}**\nRequested by {interaction.user.display_name}.",
files=files,
view=area.view,
)
class SelectSplitRegionItem(discord.ui.Select):
def __init__(self, game: RandovaniaGame, split_regions: list[SplitRegion]):
self.game = game
self.split_regions = split_regions
options = [
discord.SelectOption(label=split_region.name, value=split_region.command_id)
for split_region in split_regions
]
super().__init__(
custom_id=f"{game.value}_region",
placeholder="Choose your region",
options=options,
)
async def callback(self, interaction: discord.Interaction):
r = interaction.response
assert isinstance(r, discord.InteractionResponse)
option_selected = self.values[0]
valid_items = [it for it in self.split_regions if it.command_id == option_selected]
if not valid_items:
return await r.send_message(
view=None,
embeds=[],
ephemeral=True,
content=f"Invalid selected option, unable to find given db subset '{option_selected}'.",
)
split_region = valid_items[0]
embed = Embed(
title=f"{self.game.long_name} Database", description=f"Choose the room in {split_region.name} to visualize."
)
logging.info(
"Responding to area selection for section %s with %d options.", split_region.name, len(split_region.areas)
)
return await r.send_message(
embed=embed,
view=split_region.view,
ephemeral=True,
)
class DatabaseCommandCog(RandovaniaCog):
_split_regions: dict[RandovaniaGame, list[SplitRegion]]
_select_split_region_view: dict[RandovaniaGame, discord.ui.View]
def __init__(self, configuration: dict, bot: RandovaniaBot):
self.configuration = configuration
self.bot = bot
self._split_regions = {}
self._select_split_region_view = {}
self._on_database_component_listener = {}
@discord.commands.slash_command(name="database")
async def database_inspect(self, context: discord.ApplicationContext, game: _GameChoices):
"""Consult the Randovania's logic database for one specific room."""
assert isinstance(game, RandovaniaGame)
embed = Embed(title=f"{game.long_name} Database", description="Choose the db subset to visualize.")
view = self._select_split_region_view[game]
logging.info("Responding requesting list of regions for game %s.", game.long_name)
await context.respond(embed=embed, view=view, ephemeral=True)
async def add_commands(self):
for game in enum_lib.iterate_enum(RandovaniaGame):
db = default_database.game_description_for(game)
region_options = await create_split_regions(db)
self._split_regions[game] = region_options
view = discord.ui.View(
SelectSplitRegionItem(game, region_options),
timeout=None,
)
self.bot.add_view(view)
self._select_split_region_view[game] = view
for split_region in region_options:
split_region.view = discord.ui.View(
SelectAreaItem(game, split_region),
timeout=None,
)
self.bot.add_view(split_region.view)
for area in split_region.areas:
area.view = discord.ui.View(
SelectNodesItem(game, area),
timeout=None,
)
self.bot.add_view(area.view)
def setup(bot: RandovaniaBot):
bot.add_cog(DatabaseCommandCog(bot.configuration, bot))
|
randovania/randovania
|
randovania/server/discord/database_command.py
|
database_command.py
|
py
| 16,089
|
python
|
en
|
code
| 165
|
github-code
|
6
|
8276036736
|
import subprocess
import sys
import os
from ronto import verbose
from ronto.model.builder import InteractiveBuilder, TargetBuilder
from ronto.model.docker import docker_context
from .fetch import process as fetch_process
from .init import init_process
@docker_context()
def build_process(args):
verbose("Process build command")
if args.interactive and not args.list_targets:
builder = InteractiveBuilder()
else:
builder = TargetBuilder()
if args.list_targets:
builder.list_targets()
else:
builder.build()
def process(args):
if args.fetch:
args.force = True
fetch_process(args)
if args.init:
args.rebuild_conf = False
args.clean_build = False
args.clean_conf = True
init_process(args)
build_process(args)
def add_command(subparser):
parser = subparser.add_parser(
"build",
help="""
Actually build something
source the environment and use bitbake.
""",
)
parser.add_argument(
"--fetch",
help="Run fetch command (with forced option) before build",
action="store_true",
)
parser.add_argument(
"-l", "--list-targets",
help="List all targes that are subject to 'batch' build" \
" - it overwrites --interactive option",
action="store_true",
)
parser.add_argument(
"--init",
help="Run init command (with clean_conf option) before build",
action="store_true",
)
parser.add_argument(
"-i",
"--interactive",
help="Ignore targets and operate interactively",
action="store_true",
)
parser.set_defaults(func=process)
|
almedso/ronto
|
src/ronto/cli/build.py
|
build.py
|
py
| 1,736
|
python
|
en
|
code
| 1
|
github-code
|
6
|
72330666747
|
# TEE RATKAISUSI TÄHÄN:
import pygame
pygame.init()
naytto = pygame.display.set_mode((640, 480))
robo = pygame.image.load("robo.png")
leveys, korkeus = 640, 480
x = 0
y = 0
suunta = 1
kello = pygame.time.Clock()
while True:
for tapahtuma in pygame.event.get():
if tapahtuma.type == pygame.QUIT:
exit()
naytto.fill((0, 0, 0))
naytto.blit(robo, (x, y))
pygame.display.flip()
if suunta == 1:
x += 1
if x+robo.get_width() == leveys:
suunta = 2
elif suunta == 2:
y += 1
if y+robo.get_height() == korkeus:
suunta = 3
elif suunta == 3:
x -= 1
if x == 0:
suunta = 4
elif suunta == 4:
y -= 1
if y == 0:
suunta = 1
kello.tick(60)
|
jevgenix/Python_OOP
|
osa13-06_reunan_kierto/src/main.py
|
main.py
|
py
| 796
|
python
|
fi
|
code
| 4
|
github-code
|
6
|
40734405251
|
from tkinter import Tk, Label, Button, HORIZONTAL
from tkinter import ttk
import time
from tkinter import messagebox
import sys
def main(name):
'''main function for studying'''
# instantiating a class object for the tkinter GUI
study_window = Tk()
study_window.title("Studying App")
study_window.geometry("600x400")
def study():
'''Studying function'''
# reading and getting the saved information about how an individual wants to study
students_register = open("students.txt", "r")
for line in students_register:
if line.split("|")[0].strip() == name:
subjects = line.split("|")[1].strip().split(",")
num_sessions = int(line.split("|")[2].strip())
session = int(line.split("|")[3].strip())
break_time = int(line.split("|")[4].strip())
name_label.config(text=f"Go, Go, {name.split('@')[0]}",font=("courier", 10, "bold"), fg="green", bg="yellow")
# session and time tracker function
def session_counter(num_sessions, subjects, session, break_time):
subjects_list = subjects*num_sessions # multiplying the subjects list by number of sessions so that we won't fall into the index out of range error
count_1 = session
count_2 = break_time
# Base case
if num_sessions == 0:
messagebox.showinfo("done", "Study is Over")
return None
# Sessions
messagebox.showinfo("study", "Session {}: You are studying {}".format(num_sessions, subjects_list[num_sessions])) # popup messagebox
while count_1 > 0:
count_1 -= 1
progress["value"] += ((1/session)*100) # updating the value of the progress widget
study_window.update_idletasks() # updating the screen before it reaches to the mainloop so that the student can see the time left
time.sleep(0.5) #time in seconds
progress["value"] = 100
# Breaks
while count_2 > 0:
if count_2 == break_time:
messagebox.showinfo("break", "On break...!")
count_2 -= 1
progress["value"] -= ((1/break_time)*100)
study_window.update_idletasks()
time.sleep(0.5)
# terminating window
option = messagebox.askquestion("option", "Continue studying")
if option == "no":
sys.exit()
# recursive statement
session_counter(num_sessions - 1 , subjects, session, break_time)
session_counter(num_sessions, subjects, session, break_time)
# progress widget that updates with time in this case
progress = ttk.Progressbar(study_window, orient=HORIZONTAL, length=300, mode='determinate')
progress.grid(row=1, column=0, pady=20, padx=150, columnspan=3)
start_button = Button(study_window, text="Start", command = study, font=("courier", 10, "bold"), fg="green", bg="yellow")
start_button.grid(row=2, column=1, padx=150, pady=10)
name_label = Label(study_window, text="")
name_label.grid(row=0, column=1, padx=150, pady=10)
study_window.mainloop()
|
tinotenda-alfaneti/pomodoro-study-app
|
studying.py
|
studying.py
|
py
| 3,374
|
python
|
en
|
code
| 0
|
github-code
|
6
|
17315144162
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zk
@contact: kun.zhang@nuance.com
@file: data_format.py
@time: 4/19/2019 10:08 AM
@desc:
"""
def add_label(data, topic, sep='\t'):
"""
add label for data
:param data:
:param topic:
:param sep:
:return:
"""
res = []
for item in data:
new_item = topic + sep + item
res.append(new_item)
return res
def tag_mask(data, mask, to_mask):
import re
for i in range(len(data)):
data[i] = re.sub(to_mask, mask, data[i])
|
zhiyou720/tools
|
data_format.py
|
data_format.py
|
py
| 537
|
python
|
en
|
code
| 0
|
github-code
|
6
|
21568994484
|
import torch
import torchvision.datasets
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# 相对路径为相对于当前工程的
dataset = torchvision.datasets.CIFAR10("./PyTorch/data", train=True, transform=torchvision.transforms.ToTensor(), download=True, )
dataloader = DataLoader(dataset, 64)
# 打印图片数量
print(f'size of dataset:{len(dataset)}')
# 实例化writer对象
writer = SummaryWriter("./PyTorch/logs/tensorboard")
step = 0
for data in dataloader:
img, target = data
# 这里有坑,注意add_images和add_image
writer.add_images("input", img, step)
step += 1
writer.close()
# 在命令行使用 tensorboard --logdir=/PyTorch/logs
# 在浏览器访问对应的链接查看
|
puzhiyuan/AutonomousDriving
|
PyTorch/src/TensorBoard.py
|
TensorBoard.py
|
py
| 761
|
python
|
en
|
code
| 1
|
github-code
|
6
|
72833441787
|
import numpy as np
import matplotlib.pyplot as plt
# Set size of squares and circles.
d = 31#61
r = int(d/2)
# Set linear dimension of square canvas.
res = 64 #128
pad = 2 # boundary number of pixels
# Set number of data examples to be generated.
num_examples = 1000
def gen():
# Create square stamp.
square = np.zeros((d,d,3))
square[:,:,0:1] = 1
# Create circle stamp.
circle = np.zeros((d,d,3))
for y in range(np.shape(circle)[0]):
for x in range(np.shape(circle)[1]):
if np.sqrt((x-r)*(x-r) + (r-y)*(r-y)) < r:
circle[y,x,0:1] = 1
# Create triangle stamp.
triangle = np.zeros((d,d,3))
for y in range(np.shape(triangle)[0]):
for x in range(np.shape(triangle)[1]):
if ((d//2)-y) < -(x-(d//2)) and (x-(d//2)) > ((d//2)-y):
triangle[y,x,0:1] = 1
# Create data array of blank input canvases and blank labels.
X_data = np.zeros((num_examples, res, res, 3))
y_data = np.zeros((num_examples, 2))
'''
X_data = np.zeros((num_examples, res, res, 3))
y_data = np.zeros((num_examples, 2))
'''
# Randomly stamp a circle or square somewhere on each canvas and label appropriately.
for i in range(num_examples):
rand_o = np.random.randint(0 + pad, res - d - pad, 2)
if np.random.randint(0, 2) == 0:
X_data[i, rand_o[0]:rand_o[0]+d, rand_o[1]:rand_o[1]+d, :] = triangle
y_data[i, :] = np.array([1, 0])
'''
X_data[i, :, :, :] = circle
y_data[i,:] = np.array([1,0])
'''
else:
X_data[i, rand_o[0]:rand_o[0] + d, rand_o[1]:rand_o[1] + d, :] = square
y_data[i, :] = np.array([0, 1])
'''
X_data[i, :, :, :] = square
y_data[i,:] = np.array([0,1])
'''
# Split data and label arrays into training and test sets.
X_train = X_data[0:int(0.8*num_examples)]
y_train = y_data[0:int(0.8*num_examples)]
X_test = X_data[int(0.8*num_examples):]
y_test = y_data[int(0.8*num_examples):]
return X_train, y_train, X_test, y_test
#imgplot = plt.imshow(example)
#plt.axis('off')
#plt.show(imgplot)
|
alexgilbert747/thesis
|
generate_data.py
|
generate_data.py
|
py
| 2,290
|
python
|
en
|
code
| 0
|
github-code
|
6
|
2083516856
|
import csv
import json
from collections import OrderedDict
from pathlib import Path
from typing import Any, List, Optional, Union
import torch
from pytorch_lightning import LightningModule, Trainer
from src.utils import pylogger
log = pylogger.get_pylogger(__name__)
def process_state_dict(
state_dict: Union[OrderedDict, dict],
symbols: int = 0,
exceptions: Optional[Union[str, List[str]]] = None,
) -> OrderedDict:
"""Filter and map model state dict keys.
Args:
state_dict (Union[OrderedDict, dict]): State dict.
symbols (int): Determines how many symbols should be cut in the
beginning of state dict keys. Default to 0.
exceptions (Union[str, List[str]], optional): Determines exceptions,
i.e. substrings, which keys should not contain.
Returns:
OrderedDict: Filtered state dict.
"""
new_state_dict = OrderedDict()
if exceptions:
if isinstance(exceptions, str):
exceptions = [exceptions]
for key, value in state_dict.items():
is_exception = False
if exceptions:
for exception in exceptions:
if key.startswith(exception):
is_exception = True
if not is_exception:
new_state_dict[key[symbols:]] = value
return new_state_dict
def save_state_dicts(
trainer: Trainer,
model: LightningModule,
dirname: str,
symbols: int = 6,
exceptions: Optional[Union[str, List[str]]] = None,
) -> None:
"""Save model state dicts for last and best checkpoints.
Args:
trainer (Trainer): Lightning trainer.
model (LightningModule): Lightning model.
dirname (str): Saving directory.
symbols (int): Determines how many symbols should be cut in the
beginning of state dict keys. Default to 6 for cutting
Lightning name prefix.
exceptions (Union[str, List[str]], optional): Determines exceptions,
i.e. substrings, which keys should not contain. Default to [loss].
"""
# save state dict for last checkpoint
mapped_state_dict = process_state_dict(
model.state_dict(), symbols=symbols, exceptions=exceptions
)
path = f"{dirname}/last_ckpt.pth"
torch.save(mapped_state_dict, path)
log.info(f"Last ckpt state dict saved to: {path}")
# save state dict for best checkpoint
best_ckpt_path = trainer.checkpoint_callback.best_model_path
if best_ckpt_path == "":
log.warning("Best ckpt not found! Skipping...")
return
best_ckpt_score = trainer.checkpoint_callback.best_model_score
if best_ckpt_score is not None:
prefix = str(best_ckpt_score.detach().cpu().item())
prefix = prefix.replace(".", "_")
else:
log.warning("Best ckpt score not found! Use prefix <unknown>!")
prefix = "unknown"
model = model.load_from_checkpoint(best_ckpt_path)
mapped_state_dict = process_state_dict(
model.state_dict(), symbols=symbols, exceptions=exceptions
)
path = f"{dirname}/best_ckpt_{prefix}.pth"
torch.save(mapped_state_dict, path)
log.info(f"Best ckpt state dict saved to: {path}")
def save_predictions_from_dataloader(
predictions: List[Any], path: Path
) -> None:
"""Save predictions returned by `Trainer.predict` method for single
dataloader.
Args:
predictions (List[Any]): Predictions returned by `Trainer.predict` method.
path (Path): Path to predictions.
"""
if path.suffix == ".csv":
with open(path, "w") as csv_file:
writer = csv.writer(csv_file)
for batch in predictions:
keys = list(batch.keys())
batch_size = len(batch[keys[0]])
for i in range(batch_size):
row = {key: batch[key][i].tolist() for key in keys}
writer.writerow(row)
elif path.suffix == ".json":
processed_predictions = {}
for batch in predictions:
keys = [key for key in batch.keys() if key != "names"]
batch_size = len(batch[keys[0]])
for i in range(batch_size):
item = {key: batch[key][i].tolist() for key in keys}
if "names" in batch.keys():
processed_predictions[batch["names"][i]] = item
else:
processed_predictions[len(processed_predictions)] = item
with open(path, "w") as json_file:
json.dump(processed_predictions, json_file, ensure_ascii=False)
else:
raise NotImplementedError(f"{path.suffix} is not implemented!")
def save_predictions(
predictions: List[Any], dirname: str, output_format: str = "json"
) -> None:
"""Save predictions returned by `Trainer.predict` method.
Due to `LightningDataModule.predict_dataloader` return type is
Union[DataLoader, List[DataLoader]], so `Trainer.predict` method can return
a list of dictionaries, one for each provided batch containing their
respective predictions, or a list of lists, one for each provided dataloader
containing their respective predictions, where each list contains dictionaries.
Args:
predictions (List[Any]): Predictions returned by `Trainer.predict` method.
dirname (str): Dirname for predictions.
output_format (str): Output file format. It could be `json` or `csv`.
Default to `json`.
"""
if not predictions:
log.warning("Predictions is empty! Saving was cancelled ...")
return
if output_format not in ("json", "csv"):
raise NotImplementedError(
f"{output_format} is not implemented! Use `json` or `csv`."
"Or change `src.utils.saving.save_predictions` func logic."
)
path = Path(dirname) / "predictions"
path.mkdir(parents=True, exist_ok=True)
if isinstance(predictions[0], dict):
target_path = path / f"predictions.{output_format}"
save_predictions_from_dataloader(predictions, target_path)
log.info(f"Saved predictions to: {str(target_path)}")
return
elif isinstance(predictions[0], list):
for idx, predictions_idx in enumerate(predictions):
if not predictions_idx:
log.warning(
f"Predictions for DataLoader #{idx} is empty! Skipping..."
)
continue
target_path = path / f"predictions_{idx}.{output_format}"
save_predictions_from_dataloader(predictions_idx, target_path)
log.info(
f"Saved predictions for DataLoader #{idx} to: "
f"{str(target_path)}"
)
return
raise Exception(
"Passed predictions format is not supported by default!\n"
"Make sure that it is formed correctly! It requires as List[Dict[str, Any]] type"
"in case of predict_dataloader returns DataLoader or List[List[Dict[str, Any]]]"
"type in case of predict_dataloader returns List[DataLoader]!\n"
"Or change `src.utils.saving.save_predictions` function logic."
)
|
gorodnitskiy/yet-another-lightning-hydra-template
|
src/utils/saving_utils.py
|
saving_utils.py
|
py
| 7,154
|
python
|
en
|
code
| 128
|
github-code
|
6
|
38857299042
|
from django.db.utils import IntegrityError
from spyne.error import ResourceNotFoundError, ResourceAlreadyExistsError
from spyne.model.complex import Iterable
from spyne.model.primitive import Integer
from spyne.protocol.soap import Soap11
from spyne.application import Application
from spyne.decorator import rpc
from spyne.util.django import DjangoService
from spyne.model.complex import Array
import json
from .models import Submission as SubmissionModel
from .serializers import Submission
class SortService(DjangoService):
"""
Service for sorts handling
"""
@rpc(Array(Integer), _returns=Array(Integer))
def sort(ctx, array):
input_data = json.dumps(array)
sub = SubmissionModel.objects.create(input=input_data)
output_data = sorted(array)
sub.output = json.dumps(output_data)
sub.save()
return output_data
@rpc(Integer, Integer, _returns=Iterable(Submission))
def list_submissions(ctx, limit, offset):
limit, offset = limit or 20, offset or 0 # default
return SubmissionModel.objects.all()[offset : offset + limit]
@rpc(Integer, _returns=Submission)
def get_submission(ctx, pk):
try:
return SubmissionModel.objects.get(pk=pk)
except SubmissionModel.DoesNotExist:
raise ResourceNotFoundError("submission")
@rpc(Submission, _returns=Submission)
def create_submission(ctx, submission):
try:
return SubmissionModel.objects.create(**submission.as_dict())
except IntegrityError:
raise ResourceAlreadyExistsError("Submission")
@rpc(Submission, _returns=Submission)
def update_submission(ctx, submission):
return SubmissionModel.objects.filter(pk=submission.pk).update(
**submission.as_dict()
)
app = Application(
[
SortService,
],
"soap.app.soap",
in_protocol=Soap11(validator="lxml"),
out_protocol=Soap11(),
)
|
Vixx-X/SOAP-REST-MICROSERVICES
|
SOAP/soap/soap/apps/soap/views.py
|
views.py
|
py
| 1,975
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22077450147
|
n = int(input())
while n > 0:
n = n - 1
l = []
s = 0
m = int(input())
i = 1
while i <= m:
if s + i <= m:
s += i
l.append(i)
i *= 2
if s < m:
l.append(m - s)
l1 = sorted(l)
print(len(l1) - 1)
for j in range(1, len(l1)):
print(l1[j] - l1[j - 1], end=" ")
print()
|
yunusergul/codeforcesExamples
|
Codeforces Round #638 (Div. 2)/D. Phoenix and Science.py
|
D. Phoenix and Science.py
|
py
| 361
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4680383558
|
from fabric.api import *
# reconsider changing the execution model
env.user = 'root'
env.colorize_errors = True
[env.hosts.append(i.replace('\n', '')) for i in open('servers_list', 'r')]
def install_key():
"""copy ssh public key"""
run('rm -rf ~/.ssh ; mkdir -p ~/.ssh')
put('fabric/concat.pub', '~/.ssh/authorized_keys')
put('fabric/id_rsa.pub', '~/.ssh')
put('fabric/id_rsa', '~/.ssh')
run('chmod 600 ~/.ssh/id_rsa')
@parallel
def search(arg):
"""search any file you want across servers"""
run('updatedb')
with settings(warn_only=True):
run('locate -i %s' % arg)
@parallel
def load():
"""check the load across servers"""
run('vnstat -d|tail -n7')
@parallel
def update_system():
run('apt-get update -y')
run('apt-get dist-upgrade -y --force-yes')
def update_git():
run('git pull')
@parallel
def update_lighttpd():
run('systemctl stop lighttpd')
run('mkdir /backup;mv /usr/local/lighttpd/{lighttpd.conf,logs,rutorrent_passwd} /backup/')
run('./autodl-setup --lighttpd')
run('systemctl stop lighttpd')
run('rm -r /usr/local/lighttpd/logs;mv /backup/* /usr/local/lighttpd/;rm -r /backup')
run('pkill lighttpd')
run('systemctl start lighttpd')
@parallel
def update_rutorrent():
with cd('/var/rutorrent/rutorrent'):
run('git pull')
@parallel
def update_rtorrent():
run('./autodl-setup --rtorrent')
def install(*args):
run('aptitude -y install %s' % ' '.join(args))
def install_base():
# modify /etc/proftpd.conf file to jail user
update_system()
# two times acceptance for proftpd
install('htop', 'iotop', 'nethogs', 'vnstat', 'git', 'mc', 'proftpd',
'smartmontools', 'deborphan', 'mlocate', 'hdparm', 'vim', 'screen',
'unattended-upgrades')
#run("if [ -d ~/.ssh ] ; then rm -rf ~/.ssh/* ; else mkdir ~/.ssh ; fi")
install_key()
#run('chmod 600 ~/.ssh/id_rsa')
# get rid of this yes prompt (| yes - probably a fix)
run('yes | git clone https://dolohow@bitbucket.org/dolohow/server-file-configuration.git foo')
run('mv foo/* foo/.git* .')
run('rm -rf foo')
run('dpkg-reconfigure -plow unattended-upgrades')
def install_autodl_irssi():
# never tested if works
install('libarchive-zip-perl', 'libnet-ssleay-perl', 'libhtml-parser-perl',
'libxml-libxml-perl', 'libjson-perl', 'libjson-xs-perl',
'libxml-libxslt-perl')
with cd('/var/rutorrent/rutorrent/plugins'):
run('svn co https://autodl-irssi.svn.sourceforge.net/svnroot/autodl-irssi/trunk/rutorrent/autodl-irssi')
run('mv autodl-irssi/_conf.php autodl-irssi/conf.php')
run('chown -R lighttpd:lighttpd autodl-irssi')
run('chmod 640 autodl-irssi/conf.php')
run('useradd --create-home irssi')
run('su irssi')
run('mkdir -p ~/.irssi/scripts/autorun')
run('cd ~/.irssi/scripts')
run('wget https://sourceforge.net/projects/autodl-irssi/files/autodl-irssi-v1.10.zip')
run('unzip -o autodl-irssi-v*.zip')
run('rm autodl-irssi-v*.zip')
run('cp autodl-irssi.pl autorun/')
run('mkdir -p ~/.autodl')
run('touch ~/.autodl/autodl.cfg')
def install_vnc():
update_system()
run('wget http://installer.jdownloader.org/JD2Setup_x64.sh')
install('openjdk-7-jre', 'tightvncserver', 'lxde-core', 'leafpad',
'lxterminal', 'filezilla', 'iceweasel', 'chromium-browser',
'xarchiver', 'rar', 'gpicview', 'autocutsel')
def install_rtorrent():
# additional deps:
# ffmpeg, mediainfo
with cd('~/'):
run('./autodl-setup -w --lighttpd --rtorrent --rutorrent')
def install_deluge(version):
install('python', 'python-twisted', 'python-twisted-web',
'python-openssl', 'python-simplejson', 'python-setuptools',
'intltool', 'python-xdg', 'python-chardet', 'geoip-database',
'python-libtorrent', 'python-notify', 'python-pygame',
'python-glade2', 'librsvg2-common', 'xdg-utils', 'python-mako')
with cd('~/'):
run('wget http://download.deluge-torrent.org/source/deluge-%s.tar.bz2' % version)
run('tar xvjf deluge-%s.tar.bz2' % version)
run('rm deluge-%s.tar.bz2' % version)
with cd('deluge-%s' % version):
run('python setup.py clean -a')
run('python setup.py build')
run('python setup.py install')
run('python setup.py install_data')
run('rm -r deluge-%s' % version)
def install_transmission():
install('transmission-daemon')
def install_quota(arg, version):
# 1.4.13
# TODO: Edit fstab file
install('quota')
run('/etc/init.d/quota stop')
if arg == 'op' or arg == 's4u':
run('mount -o remount /')
if arg == 'ovh':
run('mount -o remount /home')
if arg == 's4u':
run('mount -o remount /home2')
run('quotacheck -avugm')
run('/etc/init.d/quota start')
run('wget https://github.com/ekenberg/quotatool/archive/v%s.zip' % version)
run('unzip v%s.zip' % version)
with cd('quotatool-%s' % version), settings(warn_only=True):
run('./configure')
run('make -j2')
run('make install')
run('rm -r *%s*' % version)
def install_openvpn():
install('openvpn', 'easy-rsa')
run('mkdir /etc/openvpn/easy-rsa')
run('cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0/* \
/etc/openvpn/easy-rsa')
with cd('/etc/openvpn/easy-rsa'):
run('source vars')
run('./clean-all')
run('./build-ca')
run('./build-key-server server')
run('./build-dh')
# with cd('/etc/openvpn/easy-rsa/keys'):
# run('cp server.key server.crt ca.crt dh1024.pem /etc/openvpn')
run('iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -o eth0 \
-j MASQUERADE')
# edit /etc/sysctl.conf: net.ipv4.ip_forward = 1
run('echo 1 > /proc/sys/net/ipv4/ip_forward')
put('openvpn/server.conf', '/etc/openvpn/')
run('/etc/init.d/openvpn start')
def install_wine():
run('dpkg --add-architecture i386')
update_system()
install('wine-bin:i386')
@parallel
def set_protection():
run('chmod 770 /usr/bin/htop /usr/bin/top /bin/ps')
with settings(warn_only=True):
run('chmod 711 /home /home2')
def set_locale():
run('echo LANG="en_GB.UTF-8" > /etc/default/locale')
run('echo en_GB.UTF-8 UTF-8 > /etc/locale.gen')
run('locale-gen')
def set_cron():
run('echo -e "\n" >> /etc/crontab')
run('echo "*/5 * * * * root /root/startrt start all > /dev/null" >> /etc/crontab')
|
dolohow/torrents-shell-utilities
|
fabfile.py
|
fabfile.py
|
py
| 6,558
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24325824848
|
import cv2
import numpy as np
from scipy.ndimage.measurements import label
from code.features import FeatureExtractor
from collections import deque
HEAT_INCREMENT = 10
class VehicleDetector:
def __init__(self, svc, scaler, n_rows, n_cols, config, buffer_size = 8):
self.svc = svc
self.scaler = scaler
self.n_rows = n_rows
self.n_cols = n_cols
#self.orientations = config["orientations"]
self.pix_per_cell = config["pix_per_cell"]
self.cell_per_block = config["cell_per_block"]
self.spatial_size = config["spatial_size"]
self.histogram_bins = config["histogram_bins"]
self.window = config["window"]
n_rows_min = int(n_rows / 1.8)
n_cols_min = 100
self.search_parameters = [(n_rows_min, (n_rows_min + 200), n_cols // 2, n_cols, 1.5, 2),
(n_rows_min, (n_rows_min + 250), n_cols_min, n_cols, 2, 1)]
self.config = config
self.heatmap_buffer = deque(maxlen = buffer_size)
self.feature_extractor = FeatureExtractor(config)
def _image_region_search(self, image_region, v_min, h_min, scale, cells_per_step, cpu_pool = None):
if scale != 1.0:
if scale > 1.0:
interpolation = cv2.INTER_AREA
else:
interpolation = cv2.INTER_LINEAR
image_region = cv2.resize(image_region, (np.int(image_region.shape[1] / scale), np.int(image_region.shape[0] / scale)), interpolation = interpolation)
n_hblocks = (image_region.shape[1] // self.pix_per_cell) - self.cell_per_block + 1
n_vblocks = (image_region.shape[0] // self.pix_per_cell) - self.cell_per_block + 1
n_blocks_per_window = (self.window // self.pix_per_cell) - self.cell_per_block + 1
h_steps = (n_hblocks - n_blocks_per_window) // cells_per_step + 1
v_steps = (n_vblocks - n_blocks_per_window) // cells_per_step + 1
windows = []
predictions = []
for h_step in range(h_steps):
for v_step in range(v_steps):
h_pos = h_step * cells_per_step
v_pos = v_step * cells_per_step
window_min_h = h_pos * self.pix_per_cell
window_min_v = v_pos * self.pix_per_cell
image_window = image_region[window_min_v:window_min_v + self.window , window_min_h:window_min_h + self.window]
if (image_window.shape[0] < self.window) or (image_window.shape[1] < self.window):
image_window = cv2.resize(image_window, (self.window , self.window ), interpolation = cv.INTER_LINEAR)
features = self.feature_extractor.extract_image_features(image_window, cpu_pool = cpu_pool)
features = self.scaler.transform(features.reshape(1, -1))
prediction = self.svc.predict(features)[0]
window_scale = np.int(self.window * scale)
top_left = (np.int(window_min_h * scale) + h_min, np.int(window_min_v * scale) + v_min)
bottom_right = (top_left[0] + window_scale, top_left[1] + window_scale)
windows.append((top_left, bottom_right))
predictions.append(prediction)
return windows, predictions
def _image_search(self, image, search_parameters, cpu_pool = None):
windows = []
predictions = []
for v_min, v_max, h_min, h_max, scale, cells_per_step in search_parameters:
image_region = image[v_min:v_max, h_min:h_max, :]
_windows, _predictions = self._image_region_search(image_region, v_min, h_min, scale, cells_per_step, cpu_pool = cpu_pool)
windows.append(_windows)
predictions.append(_predictions)
# Flatten lists
windows = [item for sublist in windows for item in sublist]
predictions = [item for sublist in predictions for item in sublist]
return windows, predictions
def _make_heatmap(self, windows, predictions):
heatmap = np.zeros((self.n_rows, self.n_cols), dtype = np.float)
n_samples = len(windows)
for i in range(n_samples):
if predictions[i] == 1:
window = windows[i]
heatmap[window[0][1]:window[1][1], window[0][0]:window[1][0]] += HEAT_INCREMENT
return heatmap
def _bounding_boxes(self, heatmap, min_width, min_height):
labels = label(heatmap)
bounding_boxes = []
for car_n in range(1, labels[1] + 1):
tmp = (labels[0] == car_n).nonzero()
nonzero_x = np.array(tmp[1])
nonzero_y = np.array(tmp[0])
top_left = (np.min(nonzero_x), np.min(nonzero_y))
bottom_right = (np.max(nonzero_x), np.max(nonzero_y))
width = bottom_right[0] - top_left[0]
height = bottom_right[1] - top_left[1]
if (width >= min_width) and (height >= min_height):
bounding_boxes.append((top_left, bottom_right))
return bounding_boxes
def detect(self, image, cpu_pool = None):
windows, predictions = self._image_search(image, self.search_parameters, cpu_pool = cpu_pool)
heatmap = self._make_heatmap(windows, predictions)
self.heatmap_buffer.append(heatmap)
if len(self.heatmap_buffer) > 1:
heatmap = np.average(self.heatmap_buffer, axis = 0)
heatmap[heatmap < 3 * HEAT_INCREMENT] = 0
heatmap = np.clip(heatmap, 0, 255)
bounding_boxes = self._bounding_boxes(heatmap, (0.8 * self.window), (0.5 * self.window))
return bounding_boxes
|
olasson/SDCND-T1-P5-VehicleDetection
|
code/detect.py
|
detect.py
|
py
| 5,703
|
python
|
en
|
code
| 0
|
github-code
|
6
|
13279743396
|
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates
import pandas as pd
import numpy as np
import os.path
class Graphs:
def __init__(self, data_base):
self.data_base = data_base
def overall_results_per(self):
correct = self.data_base["correct"].sum() / len(self.data_base)
wrong = (self.data_base["correct"]==False).sum() / len(self.data_base)
results = [correct, wrong]
outcome = (0.30, 0.50)
fig, ax = plt.subplots()
ax.grid(axis="y", zorder=0, color="#9698A1")
width = 0.15
bars = ax.bar(outcome, results, width, zorder=3)
bars[0].set_color('lightseagreen')
bars[1].set_color('coral')
ax.set_xticks((0.375, 0.575))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticklabels(('Right','Wrong'))
fig.suptitle("Percentage Right/Wrong", fontsize=15)
#plt.show()
def results_per_gen(self):
grouped = (self.data_base["correct"]==True).groupby(self.data_base["article"])
correct = grouped.mean()
incorrect = 1 - grouped.mean()
gender_track = correct.append(incorrect)
fig, ax = plt.subplots()
ind_der = (1, 1.25)
ind_die = (1.75, 2)
ind_das = (2.50, 2.75)
width = 0.25
ax.grid(axis="y", zorder=0, color="#9698A1")
bars_der = ax.bar(ind_der, gender_track["der"], width)
bars_der[0].set_color('lightseagreen')
bars_der[1].set_color('coral')
bars_die = ax.bar(ind_die, gender_track["die"], width)
bars_die[0].set_color('lightseagreen')
bars_die[1].set_color('coral')
bars_das = ax.bar(ind_das, gender_track["das"], width)
bars_das[0].set_color('lightseagreen')
bars_das[1].set_color('coral')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks((1.25, 2, 2.75))
ax.set_xticklabels(('der','die', 'das'))
ax.set_xlim([0.9, 3.1])
fig.suptitle("Percentage Right/Wrong by Gender", fontsize=15)
# plt.show()
def results_per_date(self):
grouped = (self.data_base["correct"]==True).groupby(self.data_base["date"])
correct = grouped.mean()
wrong = 1 - grouped.mean()
data_date = correct.append(wrong)
fig, ax = plt.subplots()
x_loc = np.array([0, 0.15])
ax.grid(axis="y", zorder=0, color="#9698A1")
ticks = []
dates_played = []
for date in pd.unique(data_date.index):
width = 0.15
x_loc += 0.75
bars_date = ax.bar(x_loc, data_date[date], width, zorder=3)
bars_date[0].set_color('lightseagreen')
bars_date[1].set_color('coral')
ticks.append((width+x_loc)[0])
dates_played.append(datetime.datetime.strptime(date, '%d-%m-%Y' ).strftime("%d %b"))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(ticks)
ax.set_xticklabels(dates_played)
fig.suptitle("Right/Wrong by date", fontsize=15)
ax.set_xlim([ax.get_xlim()[0], ax.get_xlim()[1]+0.15])
# plt.show()
def wrong_rank(self):
wrong_words = self.data_base[self.data_base["correct"]==False]
wrong_by_word = wrong_words.groupby(wrong_words["wort"])
func = {"correct": "count", "article": lambda x: x.iloc[0]} ## "article": Getting the first value of entry
wrong_by_word = wrong_by_word.agg(func).rename(columns = {"correct": "wrong"})
wrong_by_word = wrong_by_word.reset_index()
wrong_ones_sort = wrong_by_word.sort_values(['wrong', 'wort'], ascending = [0, 0])
first_twenty_wrong = wrong_ones_sort[0:20]
col_num = np.arange(first_twenty_wrong.shape[0])
fig, ax = plt.subplots(figsize=(8, 6))
width = 0.65
bars_worng = ax.bar(col_num, first_twenty_wrong["wrong"], width, color= 'powderblue'
, linewidth=0.5)
ax.set_xlim([-0.5, ax.get_xlim()[1]])
ax.set_ylim([0, ax.get_ylim()[1] +1])
ax.set_title('Ranking of wrong words', fontsize=15)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.grid(axis="y", zorder=0, color="#9698A1")
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks(np.arange(first_twenty_wrong.iloc[-1]["wrong"], first_twenty_wrong.iloc[0]["wrong"]+2, 1.0))
ax.set_xticks(col_num + 0.325)
ax.set_xticklabels(list(first_twenty_wrong['wort']))
plt.setp(ax.get_xticklabels(), rotation=90)
for article in range(len(list(first_twenty_wrong['article']))):
ax.text(article + 0.4, 0.15 , list(first_twenty_wrong['article'])[article], rotation= 90,
horizontalalignment='center',
verticalalignment='center', fontsize=12)
# plt.tight_layout()
# plt.show()
def daily_stats(self):
"""Overall right/wrong percent by date"""
grouped = (self.data_base["correct"]==True).groupby(self.data_base["date"])
correct = grouped.mean().reset_index()
correct["wrong"] = 1 - correct["correct"]
correct["date"] = pd.to_datetime(correct["date"], dayfirst=True)
correct = correct.sort_values("date")
fig, ax = plt.subplots()
left_limit = (datetime.datetime.strptime(pd.unique(self.data_base["date"])[0], '%d-%m-%Y' ) -
datetime.timedelta(days= 1)).date()
right_limit = (datetime.datetime.strptime(pd.unique(self.data_base["date"])[-1], '%d-%m-%Y' )
+ datetime.timedelta(days=1)).date()
ax.plot(correct['date'], correct['correct'], marker = '.', color='lightseagreen',
ms = 15, lw = 2, linestyle= '-' , label="correct" )
ax.plot(correct['date'], correct['wrong'], color='coral', marker = '.',
ms = 15, lw = 2, linestyle= '-', label="wrong" )
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.grid(axis="y", zorder=0, color="#9698A1")
ax.set_xticks((correct['date'].values))
ax.set_xlim([left_limit, right_limit])
ax.set_ylim([0., 1.])
ax.legend(loc='upper right').get_frame().set_alpha(0.3)
ax.set_title('Daily Stats', fontsize=15)
ax.set_xticklabels(correct['date'].map(lambda x: x.strftime("%d %b")))
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d \n %b'))
def daily_stats_per_gender(self):
"""Overall right/wrong percent by date and gender"""
grouped = (self.data_base["correct"]==True).groupby(
[self.data_base["date"], self.data_base["article"]])
correct_gender = grouped.mean().reset_index()
correct_gender["wrong"] = 1 - correct_gender["correct"]
correct_gender = correct_gender.set_index(["date", "article"]).unstack().reset_index()
correct_gender["date"] = pd.to_datetime(correct_gender["date"], dayfirst=True)
correct_gender = correct_gender.sort_values("date")
left_limit = (datetime.datetime.strptime(pd.unique(self.data_base["date"])[0], '%d-%m-%Y' ) -
datetime.timedelta(days= 1)).date()
right_limit = (datetime.datetime.strptime(pd.unique(self.data_base["date"])[-1], '%d-%m-%Y' )
+ datetime.timedelta(days=1)).date()
fig= plt.figure()
ax = fig.add_axes([0.1, 0.2, 0.85, 0.70])
lab_desc, lab_chars = [], []
gen_spec = {"der":"#6191C5" , "die":"#D56054" , "das": "#69B17D"}
for gender, color_gen in gen_spec.items():
ax.plot(correct_gender["date"], correct_gender["correct"][gender], color= color_gen, marker = '.',
ms = 12, lw = 2, linestyle= '-' )
ax.plot(correct_gender["date"], correct_gender["wrong"][gender], color= color_gen, marker = '.',
ms = 12, lw = 2, linestyle= '--')
leg_char_corr = plt.Line2D((0,1),(0,0), color=color_gen, marker='.', linestyle='-')
leg_char_wrong = plt.Line2D((0,1),(0,0), color=color_gen, marker='.', linestyle='--')
lab_chars.extend([leg_char_corr, leg_char_wrong])
lab_desc.extend([gender + " right", gender + " wrong"])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.grid(axis="y", zorder=0, color="#9698A1")
ax.set_xticks((correct_gender['date'].values))
ax.set_xlim([left_limit, right_limit])
ax.set_ylim([0., 1.1])
ax.set_title('Daily Stats per gender', fontsize=15)
ax.set_xticklabels(correct_gender['date'].map(lambda x: x.strftime("%d %b")))
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d \n %b'))
ax.legend(lab_chars, lab_desc, loc='upper center',
bbox_to_anchor=(0.5, -0.15), fancybox=True, shadow=True, ncol=3)
def generate_report(self):
if not os.path.exists("img"):
os.makedirs("img")
self.overall_results_per()
plt.savefig('img/overall_results_per', bbox_inches='tight')
self.results_per_gen()
plt.savefig('img/results_per_gen', bbox_inches='tight')
self.results_per_date()
plt.savefig('img/results_per_date', bbox_inches='tight')
self.wrong_rank()
plt.savefig('img/wrong_rank', bbox_inches='tight')
self.daily_stats()
plt.savefig('img/daily_stats', bbox_inches='tight')
self.daily_stats_per_gender()
plt.savefig('img/daily_stats_per_gender', bbox_inches='tight')
|
jlcoto/der_die_das
|
grapher_gen_results.py
|
grapher_gen_results.py
|
py
| 10,304
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8103653348
|
#
# @lc app=leetcode id=88 lang=python3
#
# [88] Merge Sorted Array
#
# @lc code=start
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
'''
for j in range(n):
nums1[m+j] = nums2[j]
nums1.sort()
'''
i = m - 1
j = n - 1
k = m + n- 1
while j >= 0:
if i >= 0 and nums1[i] > nums2[j]:
nums1[k] = nums1[i]
i -= 1
else:
nums1[k] = nums2[j]
j -= 1
k -= 1
# @lc code=end
|
HongyuZhu999/LeetCode
|
88.merge-sorted-array.py
|
88.merge-sorted-array.py
|
py
| 683
|
python
|
en
|
code
| 0
|
github-code
|
6
|
41389643477
|
from ciscoconfparse import CiscoConfParse
import hashlib
import difflib
class audit:
def configuration_must_have(self,input_parent,input_config,show_command_output,show_command):
audit = False
actual_child_config = "No Configuration Found: {}".format(input_config)
parent_config = ""
actual_parent_config = ""
self.parse = CiscoConfParse(show_command_output.splitlines())
parent_obj = self.parse.find_objects(r"{}".format(input_parent))
for obj in parent_obj:
audit = obj.has_child_with(r"{}".format(input_config))
if audit == True:
actual_parent_config = obj.text
actual_child_config = " ".join([ x.text.strip() for x in obj.re_search_children(r"{}".format(input_config))])
break
expect_configuration = "{}\n{}".format(input_parent,input_config)
actual_configuration = "{}\n{}".format(actual_parent_config,actual_child_config)
if audit != True:
raise AssertionError("\n\nExpected Output: \n\n{}\n\nActual Output: \n\n{}\n\n".format(expect_configuration,actual_configuration))
else:
print("\n\nExpected Output: \n\n{}\n\nActual Output: \n\n{}\n\n".format(expect_configuration,actual_configuration))
def sanitize(inputstring):
output = ""
for x in inputstring.split("\n"):
output = output + x.strip() + "\n"
return output
def hash_diff(self,expected_configuration,actual_configuration):
actual_configuration = "\n".join(actual_configuration.split("\n")[2:-2]).strip()
actual_configuration = audit.sanitize(actual_configuration)
expected_configuration = audit.sanitize(expected_configuration)
expected_hash = hashlib.sha224(expected_configuration.encode()).hexdigest()
actual_hash = hashlib.sha224(actual_configuration.encode()).hexdigest()
d = difflib.Differ()
diff = d.compare(actual_configuration.split("\n"), expected_configuration.split("\n"))
diff_output = '\n'.join(diff)
if expected_hash == actual_hash:
print("\n\nExpected Output: \n\n{}\n\nActual Output: \n\n{}\n\n".format(expected_configuration,actual_configuration))
else:
raise AssertionError("\n\nExpected Output: \n\n{}\n\nActual Output: \n\n{}\n\nDifference: \n\n{}\n\n".format(expected_configuration,actual_configuration,diff_output))
|
johnanthonyraluta/Port_Mapping
|
Resources/cisco/ios_xr/audit.py
|
audit.py
|
py
| 2,484
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11964989637
|
from django.urls import path
from .views import *
app_name = 'blog'
urlpatterns = [
path('', posts_list, name='post_list'),
path('tag/<slug:tag_slug>', posts_list, name='post_list_by_tag'),
# path('', PostsList.as_view(), name='post_list'),
path('p/<slug:slug>/<int:year>/<int:month>/<int:day>/', post_details, name='post_details'),
path('p/share/<int:post_id>/', share_post, name='share_post'),
path('search/', search_posts, name='search_posts'),
]
|
black15/django-blog-v2
|
blog/urls.py
|
urls.py
|
py
| 500
|
python
|
en
|
code
| 2
|
github-code
|
6
|
10713095984
|
from typing import Set
from pychu.tlogic.tcards import Card
from pychu.tpattern.multiples import TMultiFinder
from pychu.tpattern.tpatternfinder import TPatternFinder
def find_bombs(cards):
buffer = []
out = []
first = True
for card in cards:
if first:
first = False
buffer.append(card)
else:
if lastCard.height-card.height == 0:
buffer.append(card)
else:
if len(buffer) == 4:
out.append(buffer)
buffer = [card,]
lastCard = card
return out
class PBombs(TPatternFinder):
def recognize(self, cards: Set[Card], phoenix=False) -> int:
if not phoenix:
return find_bombs(cards)
else:
mr = TMultiFinder(4, phoenix)
pass
|
akkurat/tichu
|
python/src/pychu/tpattern/bombs.py
|
bombs.py
|
py
| 830
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4333793200
|
# ruff: noqa: FBT001
# note: only used items are defined here, with used typing
import sys
from typing import Iterator, Optional, Sequence, TypeVar
from xml.etree.ElementTree import Element, ParseError
if sys.version_info < (3, 8): # pragma: no cover
from typing_extensions import Protocol
else: # pragma: no cover
from typing import Protocol
_T_co = TypeVar("_T_co", covariant=True)
class _SupportsRead(Protocol[_T_co]):
def read(self, size: Optional[int] = None) -> _T_co: ...
def iterparse(
source: _SupportsRead[bytes],
events: Sequence[str] | None = None,
forbid_dtd: bool = False,
forbid_entities: bool = True,
forbid_external: bool = False,
) -> Iterator[tuple[str, Element]]: ...
class DefusedXmlException(ValueError): ... # noqa: N818
__all__ = ("DefusedXmlException", "Element", "iterparse", "ParseError")
|
Rogdham/bigxml
|
stubs/defusedxml/ElementTree.pyi
|
ElementTree.pyi
|
pyi
| 858
|
python
|
en
|
code
| 17
|
github-code
|
6
|
26040212886
|
from __future__ import annotations
import dataclasses
import logging
import os
from abc import ABCMeta
from dataclasses import dataclass
from typing import Any, ClassVar, Generic, Iterable, Mapping, Type, TypeVar
import yaml
from typing_extensions import final
from pants.backend.helm.subsystems.helm import HelmSubsystem
from pants.backend.helm.utils.yaml import snake_case_attr_dict
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.core.util_rules import external_tool
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine import process
from pants.engine.collection import Collection
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.environment import EnvironmentName
from pants.engine.fs import (
EMPTY_DIGEST,
AddPrefix,
CreateDigest,
Digest,
DigestContents,
DigestSubset,
Directory,
FileDigest,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
)
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessCacheScope
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.unions import UnionMembership, union
from pants.option.subsystem import Subsystem
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list, pluralize
logger = logging.getLogger(__name__)
_HELM_CACHE_NAME = "helm"
_HELM_CACHE_DIR = "__cache"
_HELM_CONFIG_DIR = "__config"
_HELM_DATA_DIR = "__data"
# ---------------------------------------------
# Helm Plugins Support
# ---------------------------------------------
class HelmPluginMetadataFileNotFound(Exception):
def __init__(self, plugin_name: str) -> None:
super().__init__(f"Helm plugin `{plugin_name}` is missing the `plugin.yaml` metadata file.")
class HelmPluginMissingCommand(ValueError):
def __init__(self, plugin_name: str) -> None:
super().__init__(
f"Helm plugin `{plugin_name}` is missing either `platformCommand` entries or a single `command` entry."
)
class HelmPluginSubsystem(Subsystem, metaclass=ABCMeta):
"""Base class for any kind of Helm plugin."""
plugin_name: ClassVar[str]
class ExternalHelmPlugin(HelmPluginSubsystem, TemplatedExternalTool, metaclass=ABCMeta):
"""Represents the subsystem for a Helm plugin that needs to be downloaded from an external
source.
For declaring an External Helm plugin, extend this class provinding a value of the
`plugin_name` class attribute and implement the rest of it like you would do for
any other `TemplatedExternalTool`.
This class is meant to be used in combination with `ExternalHelmPluginBinding`, as
in the following example:
class MyHelmPluginSubsystem(ExternalHelmPlugin):
plugin_name = "myplugin"
options_scope = "my_plugin"
help = "..."
...
class MyPluginBinding(ExternalHelmPluginBinding[MyPluginSubsystem]):
plugin_subsystem_cls = MyHelmPluginSubsystem
With that class structure, then define a `UnionRule` so Pants can find this plugin and
use it in the Helm setup:
@rule
def download_myplugin_plugin_request(
_: MyPluginBinding, subsystem: MyHelmPluginSubsystem
) -> ExternalHelmPluginRequest:
return ExternalHelmPluginRequest.from_subsystem(subsystem, platform)
def rules():
return [
*collect_rules(),
UnionRule(ExternalHelmPluginBinding, MyPluginBinding),
]
"""
@dataclass(frozen=True)
class HelmPluginPlatformCommand:
os: str
arch: str
command: str
@classmethod
def from_dict(cls, d: dict[str, Any]) -> HelmPluginPlatformCommand:
return cls(**snake_case_attr_dict(d))
@dataclass(frozen=True)
class HelmPluginInfo:
name: str
version: str
usage: str | None = None
description: str | None = None
ignore_flags: bool | None = None
command: str | None = None
platform_command: tuple[HelmPluginPlatformCommand, ...] = dataclasses.field(
default_factory=tuple
)
hooks: FrozenDict[str, str] = dataclasses.field(default_factory=FrozenDict)
@classmethod
def from_dict(cls, d: dict[str, Any]) -> HelmPluginInfo:
platform_command = [
HelmPluginPlatformCommand.from_dict(d) for d in d.pop("platformCommand", [])
]
hooks = d.pop("hooks", {})
attrs = snake_case_attr_dict(d)
return cls(platform_command=tuple(platform_command), hooks=FrozenDict(hooks), **attrs)
@classmethod
def from_bytes(cls, content: bytes) -> HelmPluginInfo:
return HelmPluginInfo.from_dict(yaml.safe_load(content))
_ExternalHelmPlugin = TypeVar("_ExternalHelmPlugin", bound=ExternalHelmPlugin)
_EHPB = TypeVar("_EHPB", bound="ExternalHelmPluginBinding")
@union(in_scope_types=[EnvironmentName])
@dataclass(frozen=True)
class ExternalHelmPluginBinding(Generic[_ExternalHelmPlugin], metaclass=ABCMeta):
"""Union type allowing Pants to discover global external Helm plugins."""
plugin_subsystem_cls: ClassVar[Type[ExternalHelmPlugin]]
name: str
@final
@classmethod
def create(cls: Type[_EHPB]) -> _EHPB:
return cls(name=cls.plugin_subsystem_cls.plugin_name)
@dataclass(frozen=True)
class ExternalHelmPluginRequest(EngineAwareParameter):
"""Helper class to create a download request for an external Helm plugin."""
plugin_name: str
platform: Platform
_tool_request: ExternalToolRequest
@classmethod
def from_subsystem(
cls, subsystem: ExternalHelmPlugin, platform: Platform
) -> ExternalHelmPluginRequest:
return cls(
plugin_name=subsystem.plugin_name,
platform=platform,
_tool_request=subsystem.get_request(platform),
)
def debug_hint(self) -> str | None:
return self.plugin_name
def metadata(self) -> dict[str, Any] | None:
return {"platform": self.platform, "url": self._tool_request.download_file_request.url}
@dataclass(frozen=True)
class HelmPlugin(EngineAwareReturnType):
info: HelmPluginInfo
platform: Platform
snapshot: Snapshot
@property
def name(self) -> str:
return self.info.name
@property
def version(self) -> str:
return self.info.version
def level(self) -> LogLevel | None:
return LogLevel.DEBUG
def message(self) -> str | None:
return f"Materialized Helm plugin {self.name} with version {self.version} for {self.platform} platform."
def metadata(self) -> dict[str, Any] | None:
return {"name": self.name, "version": self.version, "platform": self.platform}
def artifacts(self) -> dict[str, FileDigest | Snapshot] | None:
return {"content": self.snapshot}
def cacheable(self) -> bool:
return True
class HelmPlugins(Collection[HelmPlugin]):
pass
@rule
async def all_helm_plugins(union_membership: UnionMembership) -> HelmPlugins:
bindings = union_membership.get(ExternalHelmPluginBinding)
external_plugins = await MultiGet(
Get(HelmPlugin, ExternalHelmPluginBinding, binding.create()) for binding in bindings
)
if logger.isEnabledFor(LogLevel.DEBUG.level):
plugins_desc = [f"{p.name}, version: {p.version}" for p in external_plugins]
logger.debug(
f"Downloaded {pluralize(len(external_plugins), 'external Helm plugin')}:\n{bullet_list(plugins_desc)}"
)
return HelmPlugins(external_plugins)
@rule(desc="Download external Helm plugin", level=LogLevel.DEBUG)
async def download_external_helm_plugin(request: ExternalHelmPluginRequest) -> HelmPlugin:
downloaded_tool = await Get(DownloadedExternalTool, ExternalToolRequest, request._tool_request)
plugin_info_file = await Get(
Digest,
DigestSubset(
downloaded_tool.digest,
PathGlobs(
["plugin.yaml", "plugin.yml"],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=request.plugin_name,
),
),
)
plugin_info_contents = await Get(DigestContents, Digest, plugin_info_file)
if len(plugin_info_contents) == 0:
raise HelmPluginMetadataFileNotFound(request.plugin_name)
plugin_info = HelmPluginInfo.from_bytes(plugin_info_contents[0].content)
if not plugin_info.command and not plugin_info.platform_command:
raise HelmPluginMissingCommand(request.plugin_name)
plugin_snapshot = await Get(Snapshot, Digest, downloaded_tool.digest)
return HelmPlugin(info=plugin_info, platform=request.platform, snapshot=plugin_snapshot)
# ---------------------------------------------
# Helm Binary setup
# ---------------------------------------------
@dataclass(frozen=True)
class HelmBinary:
path: str
env: FrozenDict[str, str]
immutable_input_digests: FrozenDict[str, Digest]
def __init__(
self,
path: str,
*,
helm_env: Mapping[str, str],
local_env: Mapping[str, str],
immutable_input_digests: Mapping[str, Digest],
) -> None:
object.__setattr__(self, "path", path)
object.__setattr__(self, "immutable_input_digests", FrozenDict(immutable_input_digests))
object.__setattr__(self, "env", FrozenDict({**helm_env, **local_env}))
@property
def config_digest(self) -> Digest:
return self.immutable_input_digests[_HELM_CONFIG_DIR]
@property
def data_digest(self) -> Digest:
return self.immutable_input_digests[_HELM_DATA_DIR]
@property
def append_only_caches(self) -> dict[str, str]:
return {_HELM_CACHE_NAME: _HELM_CACHE_DIR}
@dataclass(frozen=True)
class HelmProcess:
argv: tuple[str, ...]
input_digest: Digest
description: str = dataclasses.field(compare=False)
level: LogLevel
extra_env: FrozenDict[str, str]
extra_immutable_input_digests: FrozenDict[str, Digest]
extra_append_only_caches: FrozenDict[str, str]
cache_scope: ProcessCacheScope | None
timeout_seconds: int | None
output_directories: tuple[str, ...]
output_files: tuple[str, ...]
def __init__(
self,
argv: Iterable[str],
*,
description: str,
input_digest: Digest = EMPTY_DIGEST,
level: LogLevel = LogLevel.INFO,
output_directories: Iterable[str] | None = None,
output_files: Iterable[str] | None = None,
extra_env: Mapping[str, str] | None = None,
extra_immutable_input_digests: Mapping[str, Digest] | None = None,
extra_append_only_caches: Mapping[str, str] | None = None,
cache_scope: ProcessCacheScope | None = None,
timeout_seconds: int | None = None,
):
object.__setattr__(self, "argv", tuple(argv))
object.__setattr__(self, "input_digest", input_digest)
object.__setattr__(self, "description", description)
object.__setattr__(self, "level", level)
object.__setattr__(self, "output_directories", tuple(output_directories or ()))
object.__setattr__(self, "output_files", tuple(output_files or ()))
object.__setattr__(self, "extra_env", FrozenDict(extra_env or {}))
object.__setattr__(
self, "extra_immutable_input_digests", FrozenDict(extra_immutable_input_digests or {})
)
object.__setattr__(
self, "extra_append_only_caches", FrozenDict(extra_append_only_caches or {})
)
object.__setattr__(self, "cache_scope", cache_scope)
object.__setattr__(self, "timeout_seconds", timeout_seconds)
@rule(desc="Download and configure Helm", level=LogLevel.DEBUG)
async def setup_helm(
helm_subsytem: HelmSubsystem, global_plugins: HelmPlugins, platform: Platform
) -> HelmBinary:
downloaded_binary, empty_dirs_digest = await MultiGet(
Get(DownloadedExternalTool, ExternalToolRequest, helm_subsytem.get_request(platform)),
Get(
Digest,
CreateDigest(
[
Directory(_HELM_CONFIG_DIR),
Directory(_HELM_DATA_DIR),
]
),
),
)
tool_relpath = "__helm"
immutable_input_digests = {tool_relpath: downloaded_binary.digest}
helm_path = os.path.join(tool_relpath, downloaded_binary.exe)
helm_env = {
"HELM_CACHE_HOME": _HELM_CACHE_DIR,
"HELM_CONFIG_HOME": _HELM_CONFIG_DIR,
"HELM_DATA_HOME": _HELM_DATA_DIR,
}
# Create a digest that will get mutated during the setup process
mutable_input_digest = empty_dirs_digest
# Install all global Helm plugins
if global_plugins:
logger.debug(f"Installing {pluralize(len(global_plugins), 'global Helm plugin')}.")
prefixed_plugins_digests = await MultiGet(
Get(
Digest,
AddPrefix(
plugin.snapshot.digest, os.path.join(_HELM_DATA_DIR, "plugins", plugin.name)
),
)
for plugin in global_plugins
)
mutable_input_digest = await Get(
Digest, MergeDigests([mutable_input_digest, *prefixed_plugins_digests])
)
updated_config_digest, updated_data_digest = await MultiGet(
Get(
Digest,
DigestSubset(mutable_input_digest, PathGlobs([os.path.join(_HELM_CONFIG_DIR, "**")])),
),
Get(
Digest,
DigestSubset(mutable_input_digest, PathGlobs([os.path.join(_HELM_DATA_DIR, "**")])),
),
)
config_subset_digest, data_subset_digest = await MultiGet(
Get(Digest, RemovePrefix(updated_config_digest, _HELM_CONFIG_DIR)),
Get(Digest, RemovePrefix(updated_data_digest, _HELM_DATA_DIR)),
)
setup_immutable_digests = {
**immutable_input_digests,
_HELM_CONFIG_DIR: config_subset_digest,
_HELM_DATA_DIR: data_subset_digest,
}
local_env = await Get(EnvironmentVars, EnvironmentVarsRequest(["HOME", "PATH"]))
return HelmBinary(
path=helm_path,
helm_env=helm_env,
local_env=local_env,
immutable_input_digests=setup_immutable_digests,
)
@rule
async def helm_process(
request: HelmProcess,
helm_binary: HelmBinary,
helm_subsystem: HelmSubsystem,
) -> Process:
global_extra_env = await Get(
EnvironmentVars, EnvironmentVarsRequest(helm_subsystem.extra_env_vars)
)
# Helm binary's setup parameters go last to prevent end users overriding any of its values.
env = {**global_extra_env, **request.extra_env, **helm_binary.env}
immutable_input_digests = {
**request.extra_immutable_input_digests,
**helm_binary.immutable_input_digests,
}
append_only_caches = {**request.extra_append_only_caches, **helm_binary.append_only_caches}
argv = [helm_binary.path, *request.argv]
# A special case for "--debug".
# This ensures that it is applied to all operations in the chain,
# not just the final one.
# For example, we want this applied to the call to `template`, not just the call to `install`
# Also, we can be helpful and automatically forward a request to debug Pants to also debug Helm
debug_requested = "--debug" in helm_subsystem.valid_args() or (
0 < logger.getEffectiveLevel() <= LogLevel.DEBUG.level
)
if debug_requested and "--debug" not in request.argv:
argv.append("--debug")
return Process(
argv,
input_digest=request.input_digest,
immutable_input_digests=immutable_input_digests,
env=env,
description=request.description,
level=request.level,
append_only_caches=append_only_caches,
output_directories=request.output_directories,
output_files=request.output_files,
cache_scope=request.cache_scope or ProcessCacheScope.SUCCESSFUL,
timeout_seconds=request.timeout_seconds,
)
def rules():
return [*collect_rules(), *external_tool.rules(), *process.rules()]
|
pantsbuild/pants
|
src/python/pants/backend/helm/util_rules/tool.py
|
tool.py
|
py
| 16,274
|
python
|
en
|
code
| 2,896
|
github-code
|
6
|
41957058631
|
import unittest
from unittest.mock import Mock
from fhirbug.config import utils
from .resources import sample_settings
utils.default_settings = sample_settings
class TestLazySettings(unittest.TestCase):
def test_config_from_defaults(self):
lazy_setting = utils.LazySettings()
settings = lazy_setting._configure_from_defaults()
self.assertIsInstance(settings, utils.FhirSettings)
self.assertEquals(vars(settings), {"DEBUG": True, "TESTING": True})
def test_is_configured(self):
lazy_setting = utils.LazySettings()
self.assertFalse(lazy_setting.is_configured())
lazy_setting._configure_from_defaults()
self.assertTrue(lazy_setting.is_configured())
def test_configure(self):
class Mocked(utils.LazySettings):
_configure_from_dict = Mock()
_configure_from_path = Mock()
_configure_from_defaults = Mock()
lazy_setting = Mocked()
test_settings = {"TEST": 5, "SOME": "OTHER", "ignored": True}
lazy_setting.configure(test_settings)
lazy_setting._configure_from_dict.assert_called_once_with(test_settings)
lazy_setting._configure_from_path.assert_not_called()
lazy_setting._configure_from_defaults.assert_not_called()
|
zensoup/fhirbug
|
tests/test_config.py
|
test_config.py
|
py
| 1,284
|
python
|
en
|
code
| 14
|
github-code
|
6
|
22177410667
|
import numpy as np
import pandas as pd
from .weight_base import WeightBase
class WeiAge(WeightBase):
"""
Calculates weights which will make
the age distribution uniform
"""
name = 'wei_age'
def __init__(self,
data: pd.DataFrame):
super(WeiAge, self).__init__(
data,
)
def calculate(self) -> np.ndarray:
values = self._data.age
ages, counts = np.unique(
values,
return_counts=True
)
n_rows = len(self._data)
n_categories = len(ages)
wei = np.zeros(len(self._data))
for age, count in zip(ages, counts):
wei[values == age] = n_rows / (
(values == age).astype(int).sum() * n_categories
)
return wei
|
michal-racko/medical-cost
|
src/data_processing/weights/wei_age.py
|
wei_age.py
|
py
| 809
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74538689467
|
"Sample code to get hardware information."
from ctypes import (
c_short,
c_char_p,
byref,
)
from thorlabs_kinesis import benchtop_stepper_motor as bsm
if __name__ == "__main__":
serial_no = c_char_p(bytes("40875459", "utf-8"))
channel = c_short(1)
if bsm.SBC_Open(serial_no) == 0:
print("Is channel valid ", bsm.SBC_IsChannelValid(serial_no, channel))
hw_info = bsm.TLI_HardwareInformation() # container for hw info
err = bsm.SBC_GetHardwareInfoBlock(serial_no, channel, byref(hw_info))
if err == 0:
print("Serial No: ", hw_info.serialNumber)
print("Model No: ", hw_info.modelNumber)
print("Firmware Version: ", hw_info.firmwareVersion)
print("Number of Channels: ", hw_info.numChannels)
print("Type: ", hw_info.type)
else:
print(f"Error getting HW Info Block. Error Code: {err}")
bsm.SBC_Close(serial_no)
|
ekarademir/thorlabs-kinesis
|
examples/ex4_hwinfo_bsc.py
|
ex4_hwinfo_bsc.py
|
py
| 987
|
python
|
en
|
code
| 31
|
github-code
|
6
|
70322594108
|
"""remove_product_rating_score
Revision ID: 177cf327b079
Revises: acf2fa2bcf67
Create Date: 2023-05-26 14:43:55.049871
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "177cf327b079"
down_revision = "acf2fa2bcf67"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("product_rating", "score")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"product_rating",
sa.Column("score", sa.INTEGER(), autoincrement=False, nullable=False),
)
# ### end Alembic commands ###
|
ttq186/DressUp
|
alembic/versions/2023-05-26_remove_product_rating_score.py
|
2023-05-26_remove_product_rating_score.py
|
py
| 741
|
python
|
en
|
code
| 0
|
github-code
|
6
|
43625098034
|
class Solution(object):
# O(n**2)
def shortestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
i, j = 0, len(s)-1
while j >= 0:
if s[i] == s[j]:
i += 1
j -= 1
if i == len(s):
return s
mid = s[:i]
suffix = s[i:]
return suffix[::-1]+self.shortestPalindrome(mid)+suffix
# KMP <=> O(n)
def shortestPalindrome_kmp(self, s):
tmp = s + '#' + s[::-1]
tbl = self.getTable(tmp)
return s[tbl[-1]:][::-1] + s
def getTable(self, s):
tbl = [0]*len(s)
idx = 0
for i in range(1, len(s)):
if s[idx] == s[i]:
tbl[i] = tbl[i-1]+1
idx += 1
else:
idx = tbl[i-1]
while idx > 0 and s[idx] != s[i]:
idx = tbl[idx-1]
if s[idx] == s[i]:
idx += 1
tbl[i] = idx
return tbl
def test(self):
testCases = [
'aacecaaa',
'abcd',
'aacecabccaa',
]
for s in testCases:
print('s: %s' % (s))
result = self.shortestPalindrome(s)
res = self.shortestPalindrome_kmp(s)
print('result: %s' % result)
print('res: %s' % res)
print('-='*20+'-')
if __name__ == '__main__':
Solution().test()
|
MichaelTQ/LeetcodePythonProject
|
solutions/leetcode_0201_0250/LeetCode214_ShortestPalindrome.py
|
LeetCode214_ShortestPalindrome.py
|
py
| 1,464
|
python
|
en
|
code
| 0
|
github-code
|
6
|
6993423096
|
from __future__ import annotations
import math
from typing import TYPE_CHECKING
from adapter.path_adapter import PathAdapter, PathAttributeID
from adapter.straight_adapter import StraightAdapter
from common.image_manager import ImageID
from entity_base.image.image_state import ImageState
from models.path_models.path_segment_state.abstract_segment_state import AbstractSegmentState, SerializedSegmentStateState
from models.path_models.path_segment_state.arc_segment_state import ArcSegmentState
from models.path_models.path_segment_state.bezier_segment_state import BezierSegmentState
from models.path_models.path_segment_state.segment_type import SegmentType
from models.path_models.path_segment_state.straight_segment_state import StraightSegmentState
from models.path_models.segment_direction import SegmentDirection
from entities.root_container.field_container.segment.arc_segment_entity import ArcSegmentEntity
from entities.root_container.field_container.segment.bezier_segment_entity import BezierSegmentEntity
from entities.root_container.field_container.segment.straight_segment_entity import StraightSegmentEntity
from models.project_history_interface import ProjectHistoryInterface
from services.constraint_solver_service import ConstraintSolver
from utility.format_functions import formatDegrees, formatInches
from utility.math_functions import distanceTuples, thetaFromPoints
if TYPE_CHECKING:
from entities.root_container.field_container.field_entity import FieldEntity
from models.path_models.path_node_model import PathNodeModel
from models.path_models.path_model import PathModel
from entity_base.entity import Entity
from models.path_models.path_element_model import PathElementModel, SerializedPathElementState
class SerializedPathSegmentState(SerializedPathElementState):
def __init__(self, direction: SegmentDirection, states: dict[SegmentType, SerializedSegmentStateState], current: SegmentType):
self.direction = direction
self.states = states
self.current = current
def _deserialize(self, pathModel: PathModel) -> 'PathNodeModel':
segment = PathSegmentModel(pathModel)
segment.direction = self.direction
segment.states = {}
for key, value in self.states.items():
segment.states[key] = value.deserialize(segment)
segment.currentStateType = self.current
segment.generateUI()
return segment
def makeAdapterDeserialized(self):
for state in self.states.values():
state.adapter.makeDeserialized()
class PathSegmentModel(PathElementModel):
def makeAdapterSerialized(self):
for state in self.states.values():
state.adapter.makeSerialized()
def _serialize(self) -> SerializedPathSegmentState:
sStates: dict[SegmentType, SerializedSegmentStateState] = {}
for key, value in self.states.items():
sStates[key] = value.serialize()
return SerializedPathSegmentState(self.direction, sStates, self.currentStateType)
def __init__(self, pathModel: PathModel):
super().__init__(pathModel)
self.direction = SegmentDirection.FORWARD
self.states: dict[SegmentType, AbstractSegmentState] = {
SegmentType.STRAIGHT: StraightSegmentState(self),
SegmentType.ARC: ArcSegmentState(self),
SegmentType.BEZIER: BezierSegmentState(self)
}
self.currentStateType = SegmentType.STRAIGHT
self.bConstraints = None # before theta constraint solver
self.aConstraints = None # after theta constraint solver
self.generateUI()
"""
UPDATE methods that update values based on model state
"""
# update thetas based on segment states and segment direction
def updateThetas(self):
# need to recompute state thetas first
self.getState().onUpdate()
self.START_THETA = self.getState().getStartTheta()
self.END_THETA = self.getState().getEndTheta()
# if segment is reversed, flip thetas
if self.getDirection() == SegmentDirection.REVERSE:
self.START_THETA = (self.START_THETA + math.pi) % (2 * math.pi)
self.END_THETA = (self.END_THETA + math.pi) % (2 * math.pi)
self.getAdapter().set(PathAttributeID.THETA1, self.START_THETA, formatDegrees(self.START_THETA))
self.getAdapter().set(PathAttributeID.THETA2, self.END_THETA, formatDegrees(self.END_THETA))
# called when the distance of the segment is changed
def updateDistance(self):
self.DISTANCE = distanceTuples(self.getPrevious().getPosition(), self.getNext().getPosition())
# if segment is reversed, negate distance
if self.getDirection() == SegmentDirection.REVERSE:
self.DISTANCE *= -1
self.getAdapter().set(PathAttributeID.DISTANCE, self.DISTANCE, formatInches(self.DISTANCE))
# Update adapter for endpoint position
def updateEndpointPosition(self, node: PathNodeModel):
# new endpoint position
pos = node.getPosition()
# update the correct endpoint
if self.getPrevious() == node:
x, y = PathAttributeID.X1, PathAttributeID.Y1
else:
x, y = PathAttributeID.X2, PathAttributeID.Y2
# set adapter
self.getAdapter().set(x, pos[0], formatInches(pos[0]))
self.getAdapter().set(y, pos[1], formatInches(pos[1]))
"""
CALLBACK METHODS FOR WHEN THINGS NEED TO BE UPDATED
"""
def onInit(self):
self.updateThetas()
self.updateDistance()
self.updateEndpointPosition(self.getPrevious())
self.updateEndpointPosition(self.getNext())
self.getPrevious().onThetaChange()
self.getNext().onThetaChange()
def onInitSegmentOnly(self):
self.updateThetas()
self.updateDistance()
self.updateEndpointPosition(self.getPrevious())
self.updateEndpointPosition(self.getNext())
# called when a node attached to segment is moved
def onNodePositionChange(self, node: PathNodeModel = None):
# assert that node is attached to segment
assert(node is None or node == self.getPrevious() or node == self.getNext())
# update segment start/end thetas
self.updateThetas()
self.getPrevious().onThetaChange()
self.getNext().onThetaChange()
# update segment distance
self.updateDistance()
if node is None:
self.updateEndpointPosition(self.getPrevious())
self.updateEndpointPosition(self.getNext())
self.getPrevious().onThetaChange()
self.getNext().onThetaChange()
else:
# update endpoint that changed
self.updateEndpointPosition(node)
# update the other endpoint's angle
self.getOther(node).onThetaChange()
# redraw segment ui. No need to update segment model as
# segment endpoint positions are just refs to node models
self.recomputeUI()
"""
SETTER METHODS THAT MODIFY MODEL AND THEN SEND NOTIF TO UPDATE UI
"""
def setState(self, type: SegmentType):
assert(type in self.states)
self.currentStateType = type
# callback for state change
self.getState().onSwitchToState()
self.onInit()
# regenerate ui with new state
self.generateUI()
self.recomputeUI()
command = self.path.getCommandFromPath(self)
command.setNewAdapter(self.getAdapter())
command.rebuild()
command.ui.recomputeEntity()
# absolutely atrocious code to dig through interactor shit to
# sustain menu across changing segment entity
self.ui.interactor.removeAllEntities()
self.ui.interactor.addEntity(self.ui)
def toggleDirection(self):
if self.direction == SegmentDirection.FORWARD:
self.direction = SegmentDirection.REVERSE
else:
self.direction = SegmentDirection.FORWARD
self.getState()._updateIcon()
self.updateThetas()
self.getPrevious().onThetaChange()
self.getNext().onThetaChange()
"""
GETTER METHODS THAT READ FROM MODEL. DO NOT MODIFY MODEL OR SEND NOTIFICATIONS
"""
def getState(self) -> AbstractSegmentState:
return self.states[self.currentStateType]
def getStraightState(self) -> StraightSegmentState:
return self.states[SegmentType.STRAIGHT]
def getArcState(self) -> ArcSegmentState:
return self.states[SegmentType.ARC]
def getBezierState(self) -> BezierSegmentState:
return self.states[SegmentType.BEZIER]
def getAdapter(self) -> PathAdapter:
return self.getState().getAdapter()
def getPrevious(self) -> PathNodeModel:
return super().getPrevious()
def getNext(self) -> PathNodeModel:
return super().getNext()
def getBeforePos(self) -> tuple:
return self.getPrevious().getPosition()
def getAfterPos(self) -> tuple:
return self.getNext().getPosition()
def getStartTheta(self) -> float:
return self.START_THETA
def getEndTheta(self) -> float:
return self.END_THETA
def getDirection(self) -> SegmentDirection:
return self.direction
def getType(self) -> SegmentType:
return self.getState().getType()
def getCenterInches(self) -> tuple:
return self.getState()._defineCenterInches()
def getOther(self, node: PathNodeModel) -> PathNodeModel:
if node == self.getPrevious():
return self.getNext()
elif node == self.getNext():
return self.getPrevious()
else:
raise Exception("Node not attached to segment")
# initialize constraint solver for snapping to before node with angle (arc/bezier)
def initBeforeThetaConstraints(self):
self.bConstraints = ConstraintSolver(self.field)
prevNode = self.getPrevious()
# snap to cardinal directions for itself
self.bConstraints.addCardinalConstraints(prevNode)
# if segment before prevNode exists, snap to segment end angle
prevSegment = prevNode.getPrevious()
if prevSegment is not None:
prevAngle = prevSegment.getEndTheta()
self.bConstraints.addAngleConstraint(prevNode, prevAngle)
# initialize constraint solver for snapping to after node with angle (arc/bezier)
def initAfterThetaConstraints(self):
self.aConstraints = ConstraintSolver(self.field)
prevNode = self.getPrevious()
# snap to cardinal directions for itself
self.aConstraints.addCardinalConstraints(prevNode)
# if segment before prevNode exists, snap to segment end angle
prevSegment = prevNode.getPrevious()
if prevSegment is not None:
prevAngle = prevSegment.getEndTheta()
self.aConstraints.addAngleConstraint(prevNode, prevAngle)
# given a hypothetical start theta, return the "snapped" version if close enough
# return None if no snapping
def getConstrainedStartTheta(self, startTheta: float) -> float | None:
snappedTheta = self.bConstraints.constrainAngle(startTheta)
if snappedTheta is None:
# nothing to snap
return None
else:
# can snap
return snappedTheta
# given a hypothetical start theta, return the "snapped" version if close enough
# return None if no snapping
def getConstrainedEndTheta(self, endTheta: float) -> float | None:
snappedTheta = self.aConstraints.constrainAngle( endTheta)
if snappedTheta is None:
# nothing to snap
return None
else:
# can snap
return snappedTheta
"""
PRIVATE METHODS
"""
def _generateUI(self, fieldEntity: FieldEntity) -> Entity:
if self.getType() == SegmentType.STRAIGHT:
return StraightSegmentEntity(fieldEntity, self)
elif self.getType() == SegmentType.ARC:
return ArcSegmentEntity(fieldEntity, self)
elif self.getType() == SegmentType.BEZIER:
return BezierSegmentEntity(fieldEntity, self)
else:
raise Exception("Invalid segment type")
def __str__(self) -> str:
return f"PathSegmentModel"
|
AnselChang/Pathogen4
|
models/path_models/path_segment_model.py
|
path_segment_model.py
|
py
| 12,457
|
python
|
en
|
code
| 11
|
github-code
|
6
|
21686337671
|
import argparse
import mmread_utils as mmu
import pickle as pkl
def parse_annots(fn):
"""
Columns are:
* 0 - run_barcode
* 1 - cell_ontology_class
* 17 - tissue
"""
annots_dict = {}
for i, line in enumerate(open(fn)):
if i == 0: continue
line = line.split(',')
if len(line[1]) == 0:
continue
## Combine cell types with spaces in them
elif line[1][0] == '"':
annot = [line[1]]
idx = 2
while line[idx][-1] != '"':
annot.append(line[idx])
idx += 1
annot.append(line[idx])
annot = ';'.join(annot)
annot = annot[1:-1]
else:
annot = line[1]
idx = 1
re_rep = r"\s+"
cell_type = (f'{re.subn(re_rep, "_", line[idx+16])[0]}:'
f'{re.subn(re_rep, "_", annot)[0]}')
annots_dict[line[0]] = cell_type
return(annots_dict)
def load_sj_genes_num(gd):
"""
Return dictionary structure:
sj (chrom, start_pos, end_pos, strand):
{gene_ids}
"""
sj_list = f'{gd}/sjdbList.fromGTF.out.tab'
sj_genes_dict = {}
for line in open(sj_list, 'r'):
line = line.strip().split('\t')
# Fix chromosome
line[0] = line[0].strip('chr')
if line[0] == 'X':
line[0] = '23'
elif line[0] == 'Y':
line[0] = '24'
elif line[0][0] in {'M', 'm'}:
line[0] = '25'
elif not re.match(r'^[0-9]+$', line[0]):
continue
# Fix sign
line[3] = '1' if line[3] == '+' else '2'
idx = tuple(map(int, line[:4]))
sj_genes_dict[idx] = set(map(int, line[4].split(',')))
return(sj_genes_dict)
################################################################################
def get_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i')
parser.add_argument('-o')
parser.add_argument('-gd')
parser.add_argument('-annots')
return(parser.parse_args())
def main():
args = get_args()
annots_dict = parse_annots(args.annots)
## Load gene counts
all_counts_gene_orig, mtx_fns_gene = mmu.load_all_gene_counts(args.i)
## Load SJ counts
sj_annots = load_sj_genes_num(args.gd)
all_sjs, sj_lists = mmu.get_all_sjs(args.i, sj_annots)
all_counts_sj, mtx_fns_sj = mmu.load_all_sj_counts(args.i, all_sjs,
sj_lists)
## Make sure both mats are in CSR format
all_counts_sj = all_counts_sj.tocsr()
all_counts_gene_orig = all_counts_gene_orig.tocsr()
## Need to add on to the mats so there's 1:1 sj:gene
## (some SJs have multiple genes)
all_counts_sj, all_counts_gene, sj_ord, g_idx = mmu.adjust_mats(
all_counts_sj, all_counts_gene_orig, all_sjs, sj_annots)
## Split up gene and SJ counts
genes_dict, _ = mmu.split_counts_mat(all_counts_gene, mtx_fns_gene,
annots_dict)
sj_dict, _ = mmu.split_counts_mat(all_counts_sj, mtx_fns_sj, annots_dict)
genes_dict_orig, cell_bcs = mmu.split_counts_mat(all_counts_gene_orig, mtx_fns_gene,
annots_dict)
pkl.dump([sj_dict, genes_dict, sj_ord, genes_dict_orig, g_idx, cell_bcs],
open(args.o, 'wb'))
if __name__ == '__main__':
main()
|
dobinlab/STARsoloManuscript
|
splicing/make_mmu_pkl.py
|
make_mmu_pkl.py
|
py
| 3,304
|
python
|
en
|
code
| 8
|
github-code
|
6
|
71843767229
|
class Produto:
def __init__(self, nome, valor):
self.nome = nome
self.valor = valor
class CarrinhoCompras:
def __init__(self, ):
self.produtos = []
def inserir_produto(self, produto):
self.produtos.append(produto)
def lista_produtos(self):
for produto in self.produtos:
print(f'{produto.nome}: R${produto.valor}')
def soma_total(self):
total = 0
for produto in self.produtos:
total += produto.valor
return f'R${total}'
carrinho = CarrinhoCompras()
p1 = Produto('Boné', 50)
p2 = Produto('Tênis', 100)
p3 = Produto('Camiseta', 39)
carrinho.inserir_produto(p1)
carrinho.inserir_produto(p2)
carrinho.inserir_produto(p3)
carrinho.lista_produtos()
print(carrinho.soma_total())
|
gittil/SoulOn-Python2
|
Modulo_1/agregacao.py
|
agregacao.py
|
py
| 794
|
python
|
pt
|
code
| 1
|
github-code
|
6
|
27009828768
|
from sklearn.linear_model import LassoCV
def run(x_train, y_train, x_test, y_test, eps, n_alphas, alphas, fit_intercept, normalize, precompute, max_iter, tol, copy_X, cv, verbose, n_jobs,
positive, random_state, selection):
reg = LassoCV(eps=eps,
n_alphas=n_alphas,
alphas=alphas,
fit_intercept=fit_intercept,
normalize=normalize,
precompute=precompute,
max_iter=max_iter,
tol=tol,
copy_X=copy_X,
cv=cv,
verbose=verbose,
n_jobs=n_jobs,
positive=positive,
random_state=random_state,
selection=selection).fit(x_train, y_train)
return {'train_predict': reg.predict(x_train).tolist(),
'test_predict': reg.predict(x_test).tolist(),
'train_score': reg.score(x_train, y_train),
'test_score': reg.score(x_test, y_test),
'alpha_': reg.alpha_,
'coef_': reg.coef_.tolist(),
'intercept_': reg.intercept_,
'mse_path_': reg.mse_path_.tolist(),
'alphas_': reg.alphas_.tolist(),
'dual_gap_': reg.dual_gap_.tolist(),
'n_iter_': reg.n_iter_}
|
lisunshine1234/mlp-algorithm-python
|
machine_learning/regression/linear_models/lassoCV/run.py
|
run.py
|
py
| 1,314
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74626753147
|
#%%
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import matplotlib.ticker as ticker
from matplotlib import rcParams
import numpy as np
from highlight_text import fig_text
import pandas as pd
from PIL import Image
import urllib
import os
df = pd.read_csv("success_rate_2022_2023.csv", index_col = 0)
df = (
df
.sort_values(by = ["variable", "value"], ascending = True)
.reset_index(drop = True)
)
fig = plt.figure(figsize=(6.5, 10), dpi = 200, facecolor="#EFE9E6")
ax = plt.subplot(111, facecolor = "#EFE9E6")
# Adjust spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.grid(True, color = "lightgrey", ls = ":")
# Define the series
teams = list(df["team_id"].unique())
Y = np.arange(len(teams))
X_xg = df[df["variable"] == "2022_success_rate"]["value"]
X_goals = df[df["variable"] == "2023_success_rate"]["value"]
# Fix axes limits
ax.set_ylim(-.5, len(teams) - .5)
ax.set_xlim(
min(X_goals.min(), X_xg.min(), 35),
max(X_goals.max(), X_xg.max(), 55)
)
# Scatter plots
ax.scatter(X_xg, Y, color = "#74959A", s = 200, alpha = 1, zorder = 3)
ax.scatter(X_goals, Y, color = "#495371", s = 200, alpha = 1, zorder = 3)
ax.scatter(X_xg, Y, color = "none", ec = "#74959A", s = 180, lw = 2.5, zorder = 3)
ax.scatter(X_goals, Y, color = "none", ec = "#495371", s = 180, lw = 2.5, zorder = 3)
# Add line chart between points and difference annotation
for index in Y:
difference = X_xg.iloc[index] - X_goals.iloc[index]
if difference > 0:
color = "#74959A"
x_adj = -1.75
anot_position = X_xg.iloc[index]
anot_aux_sign = "-"
else:
color = "#495371"
x_adj = 1.75
anot_position = X_goals.iloc[index]
anot_aux_sign = "+"
ax.annotate(
xy = (anot_position, index),
text = f"{anot_aux_sign} {abs(difference):.1f}",
xytext = (13, -2),
textcoords = "offset points",
size = 8,
color = color,
weight = "bold"
)
if abs(difference) < 1.3:
continue
if abs(difference) < -1.1:
continue
ax.plot(
[X_xg.iloc[index] + x_adj, X_goals.iloc[index] + x_adj*(-1)],
[index, index],
lw = 1,
color = color,
zorder = 2
)
DC_to_FC = ax.transData.transform
FC_to_NFC = fig.transFigure.inverted().transform
# Native data to normalized data coordinates
DC_to_NFC = lambda x: FC_to_NFC(DC_to_FC(x))
logos_folder = "nfl_logos/"
# Modify the loop to fetch logos from the local folder
for index, team_id in enumerate(teams):
ax_coords = DC_to_NFC([33, index - 0.55])
logo_ax = fig.add_axes([ax_coords[0], ax_coords[1], 0.04, 0.04], anchor="C")
# Use the local path to the logos folder
logo_path = f"{logos_folder}{team_id:.0f}.png"
try:
# Check if the file exists before opening
with Image.open(logo_path) as club_icon:
logo_ax.imshow(club_icon.convert("LA"))
logo_ax.axis("off")
except FileNotFoundError:
print(f"Logo not found for team ID {team_id}")
# Remove tick labels
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
false_ticks = ax.set_yticklabels([])
fig_text(
x = 0.15, y = .9,
s = "Through 10 weeks, only 3 NFL Teams\nhave outperformed their <2022> \noffensive success rate in <2023>",
highlight_textprops = [
{"color":"#74959A"},
{"color": "#495371"}
],
va = "bottom", ha = "left",
fontsize = 14, color = "black", weight = "bold"
)
fig_text(
x = 0.15, y = .885,
s = "Source: rbsdm.com | Viz by Ray Carpenter | inspired by a viz by @sonofacorner",
va = "bottom", ha = "left",
fontsize = 8, color = "#4E616C"
)
# # ---- The League's logo
league_icon = Image.open("nfl_logos/NFL.png")
league_ax = fig.add_axes([0.055, 0.89, 0.065, 0.065], zorder=1)
league_ax.imshow(league_icon)
league_ax.axis("off")
plt.savefig(
"06202022_bundelsiga_xg.png",
dpi = 500,
facecolor = "#EFE9E6",
bbox_inches="tight",
edgecolor="none",
transparent = False
)
plt.savefig(
"06202022_bundelsiga_xg_tr.png",
dpi = 500,
facecolor = "none",
bbox_inches="tight",
edgecolor="none",
transparent = True
)
|
array-carpenter/14thstreetanalytics
|
success_rate_comparison/success_rate_comparison.py
|
success_rate_comparison.py
|
py
| 4,223
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24333749632
|
#!/bin/python3
import os
import sys
#
# Complete the gradingStudents function below.
#
def gradingStudents(grades):
l=[]
for i in grades:
if i<38:
l.append(i)
elif i%5==1 or i%5==2 or i%5==0:
l.append(i)
elif i%5==3:
l.append(i+2)
elif i%5==4:
l.append(i+1)
return l
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
grades = []
for _ in range(n):
grades_item = int(input())
grades.append(grades_item)
result = gradingStudents(grades)
f.write('\n'.join(map(str, result)))
f.write('\n')
f.close()
|
nami-h/Python
|
multiple conditions in if else.py
|
multiple conditions in if else.py
|
py
| 680
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37020505173
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from astropy import units as u
from astropy import coordinates
from astroquery.irsa_dust import IrsaDust
from astropy.coordinates import Angle,ICRS,SkyCoord
import math
import os.path
import sys
import requests
def timeFix(s,m,h): #fixes time to ensure it stays within normal range (0-60)
if(s>=60 or m>=60):
while(s>=60 or m>=60):
if s >= 60:
m+=1
s-=60
if m >= 60:
h+=1
m-=60
elif(s<0 or m<0):
while(s<0 or m<0):
if s < 0:
m-=1
s+=60
if m < 0:
h-=1
m+=60
return s,m,h;
def fourCoord(dam,ra,dec,coord):
ds = dam*4
#e
ds/=math.cos(math.radians(dec.degree))
h = ra.hms.h
m = ra.hms.m
s = ra.hms.s+ds
(s,m,h) = timeFix(s,m,h)
rad = Angle((h,m,s), unit = u.hour)
rad = Angle(rad.to_string(unit=u.hour),u.hour)
coord[1] = rad.to_string()+" "+dec.to_string()
#n
decli = dec.arcminute+dam
decl = Angle(decli,u.arcminute)
decl = Angle(decl.to_string(unit=u.degree),u.degree)
coord[0] = ra.to_string()+" "+decl.to_string()
#w
ds=ds*(-1)
ds/=math.cos(math.radians(dec.degree))
h = ra.hms.h
m = ra.hms.m
s = ra.hms.s+ds
(s,m,h) = timeFix(s,m,h)
rad = Angle((h,m,s), unit = u.hour)
rad = Angle(rad.to_string(unit=u.hour),u.hour)
coord[3] = rad.to_string()+" "+dec.to_string()
#s
decli = dec.arcminute-dam
decl = Angle(decli,u.arcminute)
decl = Angle(decl.to_string(unit=u.degree),u.degree)
coord[2] = ra.to_string()+" "+decl.to_string()
#print(coord)
return coord; #performs transformation of initial coord into cardinal coordinates
def tableFill(dam, ra, dec, appender,nme):
t = Table(None)
Am = Column(name = 'Arcminute')
North = Column(name = 'North')
East = Column(name = 'East')
South = Column(name = 'South')
West = Column(name = 'West')
t.add_columns([Am,North, East, South, West])
tA_v = []
curVal = [None] *4 #n = 0, e = 1, s = 2, w = 3
coord = [None] *4 #n = 0, e = 1, s = 2, w = 3
#get values for each arcminute
for j in range(0,dam+1):
fourCoord(j, ra, dec, coord)
t.add_row()
t[j][0]=j
for i in range(0,4):
C = coordinates.SkyCoord(coord[i])
table = IrsaDust.get_extinction_table(C.fk5, show_progress = False)
curVal[i] = (table['A_SandF'][2])
t[j][i+1] = curVal[i]
curVal = curVal[:]
tA_v.append(curVal)
t.add_row()
for i in range(0,5): #this adds a blank line to the table to separate queries
t[j+1][i] = None
n = [nme]
namesTable = Table([n], names=('n'))
final_name = namesTable.to_pandas()
final_vals = t.to_pandas()
from pandas import ExcelWriter
with open('A_v Values.csv', appender) as f:
final_name.to_csv(f, header =False, index = False)
appender = 'a'
with open('A_v Values.csv', appender) as f:
final_vals.to_csv(f, header =True, index = False, sep = ',')
return(tA_v)#gets the data from IRSA database and stores A_v in array
def grabImage(ra,dec):
imagelist = IrsaDust.get_image_list(SkyCoord(ra,dec).fk5, image_type="100um", radius=2*u.degree)
image_file = download_file(imagelist[0],cache=True)
image_data.append(fits.getdata(image_file, ext=0)) #gets image from IRSA database
def PicSaver(image_data,gals,goPics):
go = int(goPics)
iend = go
sz1 = (int(len(gals))//go)+1
sz2 = (int(len(gals))-(go*(sz1-1)))
if go==1:
sz1 = sz1-1
for j in range(0,sz1):
if go == 1:
plt.figure(1)
plt.title(gals[j])
plt.imshow(image_data[j],cmap='gray')
plt.colorbar()
plt.savefig(os.path.join('Pictures',(gals[j]+".png")))
plt.clf()
else:
if j==sz1-1: #if last set
iend = sz2
if iend == 1: #if only one plot remains
plt.figure(1)
plt.title(gals[len(gals)-1])
plt.imshow(image_data[len(image_data)-1],cmap='gray')
plt.colorbar()
plt.savefig(os.path.join('Pictures',(gals[len(gals)-1]+".png")))
plt.clf()
elif iend == 0:
plt.clf()
return
else:
f, axarr = plt.subplots(1,iend)
for i in range(go*(j),((j)*go)+iend):
im = axarr[i-(go*j)].imshow(image_data[i],cmap='gray')
axarr[i-(go*(j))].set_title(gals[i])
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
f.colorbar(im,cax = cbar_ax)
f.savefig(os.path.join('Pictures',(gals[go*(j)]+".png")))
plt.clf()
#-----Saves Graphs and Data To The Directory-----# #saves all images in .png files
def GraphMaker(A_v,gals,majAxis,goGraphs):
go = int(goGraphs)
iend = go
sz1 = (int(len(gals))//go)+1
sz2 = (int(len(gals))-(go*(sz1-1)))
if go==1:
sz1 = sz1-1
for j in range(0,sz1):
if go == 1:
plt.clf()
plt.figure(1)
plt.plot(x,A_v[j][:,0], color = "blue", marker = ".", label = "North")
plt.plot(x, A_v[j][:,1], color = "red", marker = ".", label = "East")
plt.plot(x, A_v[j][:,2], color = "green", marker = ".", label = "South")
plt.plot(x, A_v[j][:,3], color = "black", marker = ".", label = "West")
#plt.axvline(x=majAxis[j])
plt.xlabel("Arcminutes from Center of Galaxy")
plt.ylabel("A_v Value")
plt.legend(loc='center right', shadow=True)
plt.suptitle("A_v Values by Arcminute")
plt.title(gals[j])
plt.savefig(os.path.join('Graphs',(gals[j]+" Graph.png")))
plt.clf()
else:
if j==sz1-1:
iend = sz2
if iend == 1:
plt.clf()
plt.figure(1)
plt.plot(x,A_v[len(gals)-1][:,0], color = "blue", marker = ".", label = "North")
plt.plot(x, A_v[len(gals)-1][:,1], color = "red", marker = ".", label = "East")
plt.plot(x, A_v[len(gals)-1][:,2], color = "green", marker = ".", label = "South")
plt.plot(x, A_v[len(gals)-1][:,3], color = "black", marker = ".", label = "West")
#plt.axvline(x=majAxis[j])
plt.xlabel("Arcminutes from Center of Galaxy")
plt.ylabel("A_v Value")
plt.legend(loc='center right', shadow=True)
plt.suptitle("A_v Values by Arcminute")
plt.title(gals[len(gals)-1])
plt.savefig(os.path.join('Graphs',(gals[len(gals)-1]+" Graph.png")))
plt.clf()
elif iend == 0:
plt.clf()
return
else:
f, axarr = plt.subplots(nrows = 1,ncols = iend, sharey = True, sharex = True,figsize = (20,10))
f.text(.5,.04, 'Arcminutes From Center of Galaxy',ha='center',fontsize = 20)
f.text(.08,.5, 'A_V Value',va='center', rotation='vertical',fontsize = 20)
for i in range(go*(j),((j)*go)+iend):
no, = axarr[i-(go*j)].plot(x, A_v[i][:,0], color = "blue", marker = ".", label = "North")
ea, = axarr[i-(go*j)].plot(x, A_v[i][:,1], color = "red", marker = ".", label = "East")
so, = axarr[i-(go*j)].plot(x, A_v[i][:,2], color = "green", marker = ".", label = "South")
we, = axarr[i-(go*j)].plot(x, A_v[i][:,3], color = "black", marker = ".", label = "West")
#axarr[i-(go*j)].axvline(x=majAxis[i])
axarr[i-(go*(j))].set_title(gals[i], fontsize = 20)
plt.figlegend((no,ea,so,we),("North","East","South","West"),loc='center right', shadow=True, prop={'size':20})
plt.suptitle("A_v Values by Arcminute", fontsize = 20)
f.savefig(os.path.join('Graphs',(gals[go*(j)]+" Graph.png")))
plt.clf()
#-----Saves Graphs and Data To The Directory-----# #saves all images in .png files
# def getAxis(name,link,majAxis,minAxis,ra, dec):
# inputs = {'objname': name,
# 'extend': 'no',
# 'hconst': '73',
# 'omegam': '0.27',
# 'omegav': '0.73',
# 'corr_z': '1',
# 'out_csys': 'Equatorial',
# 'out_equinox': 'J2000.0',
# 'obj_sort': "RA or Longitude",
# 'of': 'pre_text',
# 'zv_breaker': '30000.0',
# 'list_limit': '5',
# 'img_stamp': 'YES'}
# page = requests.get(link, params = inputs)
# from bs4 import BeautifulSoup
# soup = BeautifulSoup(page.content, 'html.parser')
# #-------Get Velocities-----#
# velocities = soup.find_all('pre')[5]
# Helio = list(velocities.children)[2]
# VGS = list(velocities.children)[16]
# Helio = Helio.lstrip('\n')
# VGS = VGS.lstrip('\n')
# Hvals = [int(s) for s in Helio.split() if s.isdigit()]
# VGSVals = [int(s) for s in VGS.split() if s.isdigit()]
# #-----End Get Velocities-----#
# #-----Get Diameters-----#
# diameters = soup.find_all('table')[22]
# diameters = diameters.find_all('tr')[2]
# major = diameters.find_all('td')[1].get_text()
# minor = diameters.find_all('td')[2].get_text()
# #-----End Get Diameters-----#
# write_file = 'Data.csv'
# fix = False
# fix2 = False
# if(major != "..."):
# major = float(major)/60
# majAxis.append(major)
# else:
# major = "None"
# fix = True
# majAxis.append(None)
# if(minor != "..."):
# minor = float(minor)/60
# minAxis.append(minor)
# else:
# minor = "None"
# fix2 = True
# minAxis.append(None)
# with open(write_file, 'a') as output:
# if(fix and fix2):
# output.write(name + ',' + ra.to_string() + ',' + dec.to_string() + ',' + str(Hvals[0]) + ',' + str(Hvals[1]) + ',' + str(VGSVals[0]) + ',' + str(VGSVals[1]) + ',' + major + ',' + minor + '\n')
# elif(fix == True and fix2 == False):
# output.write(name + ',' + ra.to_string() + ',' + dec.to_string() + ',' + str(Hvals[0]) + ',' + str(Hvals[1]) + ',' + str(VGSVals[0]) + ',' + str(VGSVals[1]) + ',' + major + ',' + '%.3f' %minor + '\n')
# elif(fix == False and fix2 == True):
# output.write(name + ',' + ra.to_string() + ',' + dec.to_string() + ',' + str(Hvals[0]) + ',' + str(Hvals[1]) + ',' + str(VGSVals[0]) + ',' + str(VGSVals[1]) + ',' + '%.3f' %major + ',' + minor + '\n')
# else: output.write(name + ',' + ra.to_string() + ',' + dec.to_string() + ',' + str(Hvals[0]) + ',' + str(Hvals[1]) + ',' + str(VGSVals[0]) + ',' + str(VGSVals[1]) + ',' + '%.3f' %major + ',' + '%.3f' %minor + '\n')
from astropy.coordinates import name_resolve
#-----SETUP-----#
c3 = None
link = "https://ned.ipac.caltech.edu/cgi-bin/objsearch?"
gals = []
start_coord = []
print("\nWelcome to A_v Calculator!\n")
print("Created by: Tate Walker for Dr. Peter Brown at Texas A&M University\n")
can_read = False
while can_read == False:
choice = input("Enter [1] to enter galaxies by hand. Enter [2] to import a .txt file of names. Enter [q] to quit.\n")
if choice == '1':
galaxies = input("Enter galaxies separated by commas: Ex. M82, M83\n")
for x in galaxies.split(','):
gals.append(x.strip())
can_read = True
elif choice == '2':
file = input("What is the name of the file? Ex. galaxies.txt\n")
with open(file) as inp:
gals = inp.read().splitlines()
can_read = True
choice == '2'
elif choice == 'q':sys.exit()
else: print("Please enter either [1], [2], or [q]\n\n")
c1 = input("Do you want to get A_v values? [y] [n] \n") #option to get values
if c1 == 'y':
c3 = input("Do you want to graph these values? [y] [n] \n") #option to graph
if c3 == 'y': goGraphs = input("How many graphs per file?\n")
c2 = input("Do you want to get pictures of each galaxy? [y] [n] \n") #option to get pictures
if c2 == 'y':goPics = input("How many pictures per file?\n")
if c1 == 'y':
dam = input("How many arcminutes?\n") #dam = delta arcminutes
dam = int(dam)
elif c1 and c2 == 'n':
print("Those are all the options, please come back if you change your mind.")
sys.exit()
for i in range(0,len(gals)):
tcoord=SkyCoord.from_name(gals[i],frame ='icrs') #gets coordinate from name given and stores in temporary SkyCoord
start_coord.append(tcoord) #puts temporary SkyCoord in a list
from astropy.table import Table
from astropy.table import Column
#-----SETUP-----#
#------MAIN FUNCTION-----#
from astropy.utils.data import download_file
from astropy.io import fits
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
image_data = []
A_v = []
majAxis = []
minAxis = []
ra = Angle(start_coord[0].ra.hour,unit = u.hour) #gets radius of 1st coordinate as an angle (needed for things to work)
dec = start_coord[0].dec #dont need an angle for some reason but it works
appender = 'w' #lets us overwrite a file or make a new one
nme = gals[0]
import itertools
import threading
import time
import sys
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading ' + c)
sys.stdout.flush()
time.sleep(0.1)
print(chr(27) + "[2J")
sys.stdout.write('\rDone!')
print(chr(27) + "[2J")
threader = threading.Thread(target=animate)
threader.start()
if c1 == 'y':A_v.append(tableFill(dam,ra,dec,appender,nme)) #runs the main functionality and returns the data of a galaxy
if c2 == 'y':grabImage(ra,dec) #gets image data and stores in list
appender = 'a' #lets us append a file instead of overwriting
for i in range(1,len(start_coord)):
nme = gals[i]
ra = Angle(start_coord[i].ra.hour,unit = u.hour)
dec = start_coord[i].dec
if c1 == 'y':A_v.append(tableFill(dam,ra,dec,appender,nme))
if c2 == 'y':grabImage(ra,dec)
if c2 == 'y':PicSaver(image_data,gals,goPics) #saves all images in .png files, don't mess with this
if c3 == 'y':
x = np.arange(dam+1) #creates array of size dam+1 to store values
A_v = np.array(A_v) #numpy array needed to graph
# write_file = 'Data.csv'
# with open(write_file, 'w') as output:
# output.write("Name, Ra, Dec, Heliocentric Velocity (km/s), Uncertainty (km/s), VGS Velocity (km/s), Uncertainty (km/s), Apparent Major Axis (arcmin), Apparent Minor Axis (arcmin)\n")
# for i in range(0,len(gals)):
# nme = gals[i]
# ra = Angle(start_coord[i].ra.hour,unit = u.hour)
# dec = start_coord[i].dec
# getAxis(nme,link,majAxis,minAxis,ra,dec)
GraphMaker(A_v,gals,majAxis,goGraphs)
done = True
time.sleep(3)
print(chr(27) + "[2J")
print("All pictures and graphs are saved in the project directory if selected. The A_v values are stored in the .csv file if calculated\n")
print("Thank you for using this program!\n")
|
TateWalker/Galactic-Data
|
irsa.py
|
irsa.py
|
py
| 14,152
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4072532441
|
from __future__ import division
import os
import sys
import subprocess
import threading
import json
import numpy as np
import ast
import tempfile
# Assumes spice.jar is in the same directory as spice.py. Change as needed.
SPICE_JAR = 'SPICE-1.0/spice-1.0.jar'
TEMP_DIR = 'tmp'
CACHE_DIR = 'cache'
class Spice:
"""
Main Class to compute the SPICE metric
"""
def float_convert(self, obj):
try:
return float(obj)
except:
return np.nan
def compute_score(self, gts, res):
assert(sorted(gts.keys()) == sorted(res.keys()))
imgIds = sorted(gts.keys())
# Prepare temp input file for the SPICE scorer
input_data = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) >= 1)
input_data.append({
"image_id" : id,
"test" : hypo[0],
"refs" : ref
})
cwd = os.path.dirname(os.path.abspath("__file__"))
temp_dir=os.path.join(cwd, TEMP_DIR)
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
in_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir, mode='w')
json.dump(input_data, in_file, indent=2)
in_file.close()
# Start job
out_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir, mode='w')
out_file.close()
cache_dir=os.path.join(cwd, CACHE_DIR)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
spice_cmd = ['java', '-jar', '-Xmx8G', SPICE_JAR, in_file.name,
'-cache', cache_dir,
'-out', out_file.name,
'-subset',
'-silent'
]
subprocess.check_call(spice_cmd,
cwd=os.path.dirname(os.path.abspath("__file__")))
# Read and process results
with open(out_file.name) as data_file:
results = json.load(data_file)
os.remove(in_file.name)
os.remove(out_file.name)
imgId_to_scores = {}
spice_scores = []
for item in results:
imgId_to_scores[item['image_id']] = item['scores']
spice_scores.append(self.float_convert(item['scores']['All']['f']))
average_score = np.mean(np.array(spice_scores))
scores = []
for image_id in imgIds:
# Convert none to NaN before saving scores over subcategories
score_set = {}
for category,score_tuple in imgId_to_scores[image_id].items():
score_set[category] = {k: self.float_convert(v) for k, v in score_tuple.items()}
scores.append(score_set)
return average_score, scores
def method(self):
return "SPICE"
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <hfang@uw.edu> and Tsung-Yi Lin <tl483@cornell.edu>
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath("__file__"))
tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname, mode='w')
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.decode().split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
|
SamarthGVasist/Image-Captioning-using-Deep-Learning-Models-CDSAML-
|
SPICE.py
|
SPICE.py
|
py
| 5,716
|
python
|
en
|
code
| 0
|
github-code
|
6
|
12195520060
|
# 875. Koko Eating Bananas
# source: https://leetcode.com/problems/koko-eating-bananas/
class Solution(object):
def minEatingSpeed(self, piles, H):
left, right = 0, max(piles)
while left < right: # O(lgn) for binary search, totally O(NlgN)
mid = (left + right) / 2
# print(left, right, mid)
if self.whether(piles, H, mid): # this will take O(n)
right = mid
else:
left = mid + 1
return left
def whether(self, piles, H, K):
# print(K)
total = [ (bananas+K-1)//K for bananas in piles]
return sum(total) <= H
|
ClaytonStudent/leetcode-
|
LeetCodePython/875_Koko_Eating_Bananas.py
|
875_Koko_Eating_Bananas.py
|
py
| 651
|
python
|
en
|
code
| 0
|
github-code
|
6
|
8952803072
|
# Create your views here.
from bet_data_fetcher.models import *
from django.conf import settings
from django.core.cache import cache
from exceptions import *
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def _fetch_data_from_sites():
""" Fetches data from site classes in bet_sites.
Supposed to iterate through to fetch
"""
from bet_sites import StanJamer
data = StanJamer().get_data()
if reduce(lambda x, y: bool(x) or bool(y), data.values()):
logger.info("Fetched data from StanJamer")
return data
else:
#TODO: try other class
return data
def _map_data_to_obj(data):
def match_bet_values(bet_value_arr, bet_obj):
bet_val_objs = BetValue.objects.filter(bet=bet_obj)
logger.debug("Found %d bet values objects for %s bet" % (len(bet_val_objs), str(bet_obj)))
for bet_val_dict in bet_value_arr:
for bet_val_obj in bet_val_objs:
if bet_val_obj.name == bet_val_dict['name']:
bet_val_dict['bet_val_obj'] = bet_val_obj
break
if not bet_val_dict.has_key("bet_val_obj"):
logger.info('Bet value %s does not exist for %s bet. Creating one.' % (bet_val_dict['name'], str(bet_obj)))
bet_val_obj = BetValue.objects.create(name=bet_val_dict['name'], bet=bet_obj)
bet_val_dict['bet_val_obj'] = bet_val_obj
if data['tournament_data']['bets']:
tourn_bet_obj = Bet.objects.exclude(tournament = None)
if tourn_bet_obj:
logger.debug('Tournament bet - %s found' % str(tourn_bet_obj))
tourn_bet_obj = tourn_bet_obj[0]
else:
logger.info('No Tournament obj exists. Creating one.')
categ, created = BetCategory.objects.get_or_create(name="Tournament Winner")
if created:
logger.info("New Bet Category %s created" % str(categ))
tourn_bet_obj = Bet.objects.create(category=categ, match=None, tournament=Tournament.objects.all()[0])
data['tournament_data']['bets'][0]['tourn_obj'] = tourn_bet_obj
match_bet_values(data['tournament_data']['bets'][0]['values'], tourn_bet_obj)
for matches_data in [data['live_data'], data['upcoming_data']]:
for match_data in matches_data:
logger.debug("Got match from stie - %s, %s" % (str(match_data['date'].date()), match_data['name']))
match_data['match_obj'] = match_obj = Match.objects.get(match_date=match_data['date'].date(), name_lower=match_data['name'].lower())
match_bet_objs = Bet.objects.filter(match=match_obj)
logger.debug("Found %d bets for match - %s" % (len(match_bet_objs), str(match_obj)))
for bet in match_data['bets']:
for match_bet_obj in match_bet_objs:
if match_bet_obj.category.name == bet['name']:
bet['bet_obj'] = match_bet_obj
break
if not bet.has_key('bet_obj'):
logger.info("Bet %s doesn't exists for match %s. Create one" % (bet['name'], str(match_obj)))
categ, created = BetCategory.objects.get_or_create(name=bet['name'])
if created:
logger.info("New Bet Category %s created" % str(categ))
match_bet_obj = Bet.objects.create(match=match_obj,category=categ)
bet['bet_obj'] = match_bet_obj
match_bet_values(bet['values'], match_bet_obj)
def get_all_bet_data():
data = cache.get('bet_data')
if not data:
data = _fetch_data_from_sites()
_map_data_to_obj(data)
cache.set('bet_data', data, settings.REFRESH_TIME)
return data
def get_match_bet_data(match_id):
if match_id is None:
return
data = get_all_bet_data()
for match_bet_data in data['live_data'] + data['upcoming_data']:
if match_bet_data['match_obj'].id == match_id:
return match_bet_data
return None
def get_tournament_bet_data():
data = get_all_bet_data()
return data['tournament_data']
def get_bet_value_data(bet_value_id):
if bet_value_id is None:
return
data = get_all_bet_data()
for bet_val_data in [i for match in (data['upcoming_data'] + data['live_data'] + [data['tournament_data']]) for bet in match['bets'] for i in bet['values']]:
if bet_val_data['bet_val_obj'].id == bet_value_id:
return bet_val_data
return None
|
rahul342/betapp
|
bet_data_fetcher/views.py
|
views.py
|
py
| 4,634
|
python
|
en
|
code
| 0
|
github-code
|
6
|
25416563401
|
#Задача 2. Напишите программу, которая найдёт произведение пар чисел списка. Парой считаем первый и последний элемент, второй и предпоследний и т.д.
#************************
def fillList(n,l,r):
import random
resultList = []
for i in range(n):
resultList.append(random.randint(l, r))
return resultList
#************************
def listcomposition(lst):
if len(lst) % 2 != 0:
l = len(lst)//2 + 1
else: l = len(lst)//2
lstcomp = []
for i in range(l):
lstcomp.append(lst[i]*lst[len(lst)-i-1])
return lstcomp
#************************
import os
os.system('cls')
print("Программа задает список из нескольких чисел и находит произведение пар чисел списка (первый и последний элемент, второй и предпоследний и т.д.)")
listN = int(input("Введите размерность списка: "))
listL = int(input("Введите начало диапазона чисел для заполнения списка: "))
listR = int(input("Введите окончание диапазона чисел для заполнения списка: "))
lst = fillList(listN, listL, listR)
lstcomp = listcomposition(lst)
print(f"Из списка {lst} путем умножения элементов получен: {lstcomp}")
|
PavlovaAlena/Python3
|
Seminar3_#2/Program.py
|
Program.py
|
py
| 1,522
|
python
|
ru
|
code
| 0
|
github-code
|
6
|
27734565992
|
import RPi.GPIO as GPIO
import time
buttonPin = 26
# Set buttonPin to INPUT mode with pull-up resistor
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# If button pressed return 1, else return 0
def detect():
return 1 if GPIO.input(buttonPin) == GPIO.LOW else 0
# Loops detect(). give one second buffer
def loop():
while(True):
print(detect())
time.sleep(1)
# Program entrance
if __name__ == '__main__':
print ('Program is starting...')
setup()
try:
loop()
except KeyboardInterrupt: # Press CTRL-C to end the program
GPIO.cleanup() # Release GPIO resources
|
kendrick010/ece140a-lab6
|
hardware/button.py
|
button.py
|
py
| 700
|
python
|
en
|
code
| 0
|
github-code
|
6
|
70128173308
|
""" Main Qt widget and file as well. """
import sys
from PySide6.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout
from PySide6.QtCore import QTimer
import globals
from main_gl_widget import MainGlWidget
# Important:
# You need to run the following command to generate the ui_form.py file
# pyside6-uic form.ui -o ui_form.py, or
# pyside2-uic form.ui -o ui_form.py
from ui_form import Ui_Widget
class Widget(QWidget):
""" Program main Qt Widget. """
def __init__(self, parent=None):
super().__init__(parent)
self.ui = Ui_Widget()
self.ui.setupUi(self)
self.setWindowTitle("Python based Conway's Game of Life")
self.main_gl_widget = MainGlWidget(self)
self.ui.horizontalLayout.insertWidget(0, self.main_gl_widget, 7)
self.ui.horizontalLayout.setStretch(1, 4)
self.__connect_signals()
# Set refresh rate.
timer = QTimer(self)
timer.setInterval(1000 / globals.FPS)
timer.timeout.connect(self.main_gl_widget.repaint)
timer.start()
def __connect_signals(self) -> None:
self.ui.toggleButton.clicked.connect(self.__toggle_button_clicked)
self.ui.rewindButton.clicked.connect(
lambda: self.main_gl_widget.restart_game(globals.TEST_RLE)
)
self.ui.frequencySlider.valueChanged.connect(
self.__freqency_slider_val_changed
)
def __toggle_button_clicked(self) -> None:
idk = {"⏵": "⏸", "⏸": "⏵"}
self.ui.toggleButton.setText(idk[self.ui.toggleButton.text()])
self.main_gl_widget.toggle_game()
def __freqency_slider_val_changed(self, freq: int) -> None:
self.main_gl_widget.game.updater.set_frequency(freq)
self.ui.frequencyLabel.setText(f"{freq}/s")
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = Widget()
widget.show()
sys.exit(app.exec())
|
Iosiv42/GoL
|
src/widget.py
|
widget.py
|
py
| 1,938
|
python
|
en
|
code
| 0
|
github-code
|
6
|
18003891687
|
from unittest.mock import patch
from django.urls import reverse
from rest_framework import status
from django.contrib import messages
from comment.conf import settings
from comment.models import Comment
from comment.tests.base import BaseCommentTest, BaseCommentFlagTest, BaseCommentViewTest
from comment.tests.test_utils import BaseAnonymousCommentTest, AnonymousUser, timezone, signing
class CommentViewTestCase(BaseCommentViewTest):
def setUp(self):
super().setUp()
self.all_comments = Comment.objects.all().count()
self.parent_comments = Comment.objects.all_parents().count()
self.data = {
'content': 'comment body',
'app_name': 'post',
'model_name': 'post',
'parent_id': '',
'model_id': self.post_1.id
}
def increase_count(self, parent=False):
if parent:
self.parent_comments += 1
self.all_comments += 1
@staticmethod
def get_create_url():
return reverse('comment:create')
def comment_count_test(self):
self.assertEqual(Comment.objects.all_parents().count(), self.parent_comments)
self.assertEqual(Comment.objects.all().count(), self.all_comments)
def test_create_parent_and_child_comment(self):
self.assertEqual(self.all_comments, 0)
self.assertEqual(self.parent_comments, 0)
# parent comment
response = self.client.post(self.get_create_url(), data=self.data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comment/comments/base.html')
parent_comment = Comment.objects.get(object_id=self.post_1.id, parent=None)
self.assertEqual(response.context.get('comment').id, parent_comment.id)
self.assertTrue(response.context.get('comment').is_parent)
self.increase_count(parent=True)
self.comment_count_test()
# child comment
data = self.data.copy()
data['parent_id'] = parent_comment.id
response = self.client.post(self.get_create_url(), data=data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comment/comments/child_comment.html')
child_comment = Comment.objects.get(object_id=self.post_1.id, parent=parent_comment)
self.assertEqual(response.context.get('comment').id, child_comment.id)
self.assertFalse(response.context.get('comment').is_parent)
self.increase_count()
self.comment_count_test()
def test_create_comment_non_ajax_request(self):
response = self.client_non_ajax.post(self.get_create_url(), data=self.data)
self.assertEqual(response.status_code, 400)
def test_create_anonymous_comment(self):
self.client.logout()
settings.COMMENT_ALLOW_ANONYMOUS = True
data = self.data.copy()
data['email'] = 'a@a.com'
response = self.client.post(self.get_create_url(), data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTemplateUsed(response, 'comment/comments/base.html')
response_messages = response.context['messages']
for r in response_messages:
self.assertEqual(r.level, messages.INFO)
self.assertEqual(r.message, (
'We have have sent a verification link to your email. The comment will be '
'displayed after it is verified.'
)
)
# no change in comment count
self.comment_count_test()
class TestEditComment(BaseCommentViewTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.comment = cls.create_comment(cls.content_object_1)
cls.init_content = cls.comment.content
def test_edit_comment(self):
comment = self.create_comment(self.content_object_1)
self.client.force_login(comment.user)
self.assertEqual(Comment.objects.all().count(), 2)
data = {
'content': 'parent comment was edited',
'app_name': 'post',
'model_name': 'post',
'model_id': self.post_1.id
}
get_url = self.get_url('comment:edit', comment.id, data)
self.assertEqual(comment.content, 'comment 2')
response = self.client.get(get_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('comment/comments/update_comment.html')
self.assertEqual(response.context['comment_form'].instance.id, comment.id)
post_url = self.get_url('comment:edit', comment.id)
response = self.client.post(post_url, data=data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('comment/comments/comment_content.html')
comment.refresh_from_db()
self.assertEqual(comment.content, data['content'])
data['content'] = ''
with self.assertRaises(ValueError) as error:
self.client.post(
self.get_url('comment:edit', comment.id), data=data, HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertIsInstance(error.exception, ValueError)
def test_cannot_edit_comment_by_different_user(self):
comment = self.comment
self.client.force_login(self.user_2)
data = {
'content': 'parent comment was edited',
'app_name': 'post',
'model_name': 'post',
'model_id': self.post_1.id
}
self.assertEqual(comment.user.username, self.user_1.username)
response = self.client.get(self.get_url('comment:edit', comment.id), data=data)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.reason_phrase, 'Forbidden')
response = self.client.post(self.get_url('comment:edit', comment.id), data=data)
self.assertEqual(response.status_code, 403)
self.assertEqual(response.reason_phrase, 'Forbidden')
class TestDeleteComment(BaseCommentViewTest):
def response_fails_test(self, response):
self.assertEqual(response.status_code, 403)
self.assertEqual(response.reason_phrase, 'Forbidden')
def test_delete_comment(self):
comment = self.create_comment(self.content_object_1)
self.client.force_login(comment.user)
init_count = Comment.objects.all().count()
self.assertEqual(init_count, 1)
get_url = self.get_url('comment:delete', comment.id, self.data)
response = self.client.get(get_url, data=self.data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comment/comments/comment_modal.html')
self.assertContains(response, 'html_form')
response = self.client.post(self.get_url('comment:delete', comment.id), data=self.data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'comment/comments/base.html')
self.assertNotContains(response, 'html_form')
self.assertRaises(Comment.DoesNotExist, Comment.objects.get, id=comment.id)
self.assertEqual(Comment.objects.all().count(), init_count-1)
def test_delete_comment_by_moderator(self):
comment = self.create_comment(self.content_object_1)
self.client.force_login(self.moderator)
self.assertEqual(int(self.client.session['_auth_user_id']), self.moderator.id)
self.assertTrue(self.moderator.has_perm('comment.delete_flagged_comment'))
self.assertEqual(comment.user, self.user_1)
init_count = Comment.objects.count()
self.assertEqual(init_count, 1)
# moderator cannot delete un-flagged comment
response = self.client.post(self.get_url('comment:delete', comment.id), data=self.data)
self.assertEqual(response.status_code, 403)
# moderator can delete flagged comment
settings.COMMENT_FLAGS_ALLOWED = 1
self.create_flag_instance(self.user_1, comment)
self.create_flag_instance(self.user_2, comment)
response = self.client.post(self.get_url('comment:delete', comment.id), data=self.data)
self.assertEqual(response.status_code, 200)
self.assertRaises(Comment.DoesNotExist, Comment.objects.get, id=comment.id)
def test_delete_comment_by_admin(self):
comment = self.create_comment(self.content_object_1)
self.client.force_login(self.admin)
self.assertEqual(int(self.client.session['_auth_user_id']), self.admin.id)
self.assertTrue(self.admin.groups.filter(name='comment_admin').exists())
self.assertEqual(comment.user, self.user_1)
init_count = Comment.objects.count()
self.assertEqual(init_count, 1)
# admin can delete any comment
response = self.client.post(self.get_url('comment:delete', comment.id), data=self.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(Comment.objects.count(), init_count - 1)
def test_cannot_delete_comment_by_different_user(self):
comment = self.create_comment(self.content_object_1)
self.client.force_login(self.user_2)
self.assertEqual(comment.content, 'comment 1')
self.assertEqual(comment.user.username, self.user_1.username)
init_count = Comment.objects.all().count()
self.assertEqual(init_count, 1)
# test GET request
response = self.client.get(self.get_url('comment:delete', comment.id), data=self.data)
self.response_fails_test(response)
# test POST request
response = self.client.post(self.get_url('comment:delete', comment.id), data=self.data)
self.response_fails_test(response)
class SetReactionViewTest(BaseCommentViewTest):
def setUp(self):
super().setUp()
self.comment = self.create_comment(self.content_object_1)
@staticmethod
def get_reaction_url(obj_id, action):
return reverse('comment:react', kwargs={
'pk': obj_id,
'reaction': action
})
def test_set_reaction_for_authenticated_users(self):
"""Test whether users can create/change reactions using view"""
_url = self.get_reaction_url(self.comment.id, 'like')
response = self.client.post(_url)
data = {
'status': 0,
'likes': 1,
'dislikes': 0,
'msg': 'Your reaction has been updated successfully'
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(response.json(), data)
def test_set_reaction_for_old_comments(self):
"""Test backward compatibility for this update"""
_url = self.get_reaction_url(self.comment.id, 'like')
# delete the reaction object
self.comment.reaction.delete()
response = self.client.post(_url)
data = {
'status': 0,
'likes': 1,
'dislikes': 0,
'msg': 'Your reaction has been updated successfully'
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(response.json(), data)
def test_set_reaction_for_unauthenticated_users(self):
"""Test whether unauthenticated users can create/change reactions using view"""
_url = self.get_reaction_url(self.comment.id, 'dislike')
self.client.logout()
response = self.client.post(_url)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response.url, '{}?next={}'.format(settings.LOGIN_URL, _url))
def test_get_request(self):
"""Test whether GET requests are allowed or not"""
_url = self.get_reaction_url(self.comment.id, 'like')
response = self.client.get(_url)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_non_ajax_requests(self):
"""Test response if non AJAX requests are sent"""
_url = self.get_reaction_url(self.comment.id, 'like')
response = self.client_non_ajax.post(_url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_incorrect_comment_id(self):
"""Test response when an incorrect comment id is passed"""
_url = self.get_reaction_url(102_876, 'like')
response = self.client.post(_url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_incorrect_reaction(self):
"""Test response when incorrect reaction is passed"""
_url = self.get_reaction_url(self.comment.id, 'likes')
response = self.client.post(_url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# test incorrect type
_url = self.get_reaction_url(self.comment.id, 1)
response = self.client.post(_url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class SetFlagViewTest(BaseCommentFlagTest):
def setUp(self):
super().setUp()
self.flag_data.update({
'info': ''
})
self.response_data = {
'status': 1
}
def test_set_flag_for_flagging(self):
_url = self.get_url('comment:flag', self.comment.id)
self.flag_data['reason'] = 1
response = self.client.post(_url, data=self.flag_data)
response_data = {
'status': 0,
'flag': 1,
'msg': 'Comment flagged'
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), response_data)
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', 0)
def test_set_flag_when_flagging_not_enabled(self):
settings.COMMENT_FLAGS_ALLOWED = 0
_url = self.get_url('comment:flag', self.comment.id)
self.flag_data['reason'] = 1
response = self.client.post(_url, data=self.flag_data)
self.assertEqual(response.status_code, 403)
def test_set_flag_for_flagging_old_comments(self):
"""Test backward compatibility for this update"""
_url = self.get_url('comment:flag', self.comment.id)
data = self.flag_data
# delete the flag object
self.comment.flag.delete()
response = self.client.post(_url, data=data)
response_data = {
'status': 0,
'flag': 1,
'msg': 'Comment flagged'
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), response_data)
def test_set_flag_for_unflagging(self):
# un-flag => no reason is passed and the comment must be already flagged by the user
_url = self.get_url('comment:flag', self.comment_2.id)
data = {}
response = self.client.post(_url, data=data)
response_data = {
'status': 0,
'msg': 'Comment flag removed'
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(response.json(), response_data)
def test_set_flag_for_unauthenticated_user(self):
"""Test whether unauthenticated user can create/delete flag using view"""
url = self.get_url('comment:flag', self.comment.id).replace('?', '')
self.client.logout()
response = self.client.post(url, data=self.flag_data)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response.url, '{}?next={}'.format(settings.LOGIN_URL, url))
def test_get_request(self):
"""Test whether GET requests are allowed or not"""
url = self.get_url('comment:flag', self.comment.id)
response = self.client.get(url, data=self.flag_data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_non_ajax_requests(self):
"""Test response if non AJAX requests are sent"""
url = self.get_url('comment:flag', self.comment.id)
response = self.client_non_ajax.post(url, data=self.flag_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_incorrect_comment_id(self):
"""Test response when an incorrect comment id is passed"""
url = self.get_url('comment:flag', 102_876)
response = self.client.post(url, data=self.flag_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_incorrect_reason(self):
"""Test response when incorrect reason is passed"""
url = self.get_url('comment:flag', self.comment.id)
data = self.flag_data
reason = -1
data.update({'reason': reason})
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 200)
class ChangeFlagStateViewTest(BaseCommentFlagTest):
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', 1)
def setUp(self):
super().setUp()
self.data = {
'state': self.comment.flag.REJECTED
}
self.create_flag_instance(self.user_1, self.comment, **self.flag_data)
self.create_flag_instance(self.user_2, self.comment, **self.flag_data)
def test_change_flag_state_for_unflagged_comment(self):
self.comment.flag.toggle_flagged_state()
self.assertFalse(self.comment.is_flagged)
self.client.force_login(self.moderator)
self.assertEqual(int(self.client.session['_auth_user_id']), self.moderator.id)
response = self.client.post(self.get_url('comment:flag-change-state', self.comment.id), data=self.data)
self.assertEqual(response.status_code, 400)
def test_change_flag_state_by_not_permitted_user(self):
self.assertTrue(self.comment.is_flagged)
self.client.force_login(self.user_1)
self.assertEqual(int(self.client.session['_auth_user_id']), self.user_1.id)
response = self.client.post(self.get_url('comment:flag-change-state', self.comment.id), data=self.data)
self.assertEqual(response.status_code, 403)
def test_change_flag_state_with_wrong_state_value(self):
self.assertTrue(self.comment.is_flagged)
self.client.force_login(self.moderator)
self.assertEqual(int(self.client.session['_auth_user_id']), self.moderator.id)
self.assertEqual(self.comment.flag.state, self.comment.flag.FLAGGED)
# valid state is REJECTED and RESOLVED
self.data['state'] = self.comment.flag.UNFLAGGED
response = self.client.post(self.get_url('comment:flag-change-state', self.comment.id), data=self.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['state'], 0)
self.assertEqual(self.comment.flag.state, self.comment.flag.FLAGGED)
def test_change_flag_state_success(self):
self.assertTrue(self.comment.is_flagged)
self.client.force_login(self.moderator)
self.assertEqual(int(self.client.session['_auth_user_id']), self.moderator.id)
self.assertEqual(self.comment.flag.state, self.comment.flag.FLAGGED)
# valid state is REJECTED and RESOLVED
self.data['state'] = self.comment.flag.REJECTED
response = self.client.post(self.get_url('comment:flag-change-state', self.comment.id), data=self.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['state'], self.comment.flag.REJECTED)
self.comment.flag.refresh_from_db()
self.assertEqual(self.comment.flag.moderator, self.moderator)
self.assertEqual(self.comment.flag.state, self.comment.flag.REJECTED)
class ConfirmCommentViewTest(BaseAnonymousCommentTest, BaseCommentTest):
def setUp(self):
super().setUp()
# although this will work for authenticated users as well.
self.client.logout()
self.request.user = AnonymousUser()
self.init_count = Comment.objects.all().count()
self.template_1 = 'comment/anonymous/discarded.html'
self.template_2 = 'comment/comments/messages.html'
self.template_3 = 'comment/bootstrap.html'
def get_url(self, key=None):
if not key:
key = self.key
return reverse('comment:confirm-comment', args=[key])
def template_used_test(self, response):
self.assertTemplateUsed(response, self.template_1)
self.assertTemplateUsed(response, self.template_2)
self.assertTemplateUsed(response, self.template_3)
def test_bad_signature(self):
key = self.key + 'invalid'
response = self.client.get(self.get_url(key))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Comment.objects.all().count(), self.init_count)
self.template_used_test(response)
response_messages = response.context['messages']
self.assertEqual(len(response_messages), 1)
for r in response_messages:
self.assertEqual(r.level, messages.ERROR)
self.assertEqual(r.message, 'The link seems to be broken.')
def test_comment_exists(self):
comment_dict = self.comment_obj.to_dict().copy()
comment = self.create_anonymous_comment(posted=timezone.now(), email='a@a.com')
init_count = self.init_count + 1
comment_dict.update({
'posted': str(comment.posted),
'email': comment.email
})
key = signing.dumps(comment_dict)
response = self.client.get(self.get_url(key))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Comment.objects.all().count(), init_count)
self.template_used_test(response)
response_messages = response.context['messages']
self.assertEqual(len(response_messages), 1)
for r in response_messages:
self.assertEqual(r.level, messages.WARNING)
self.assertEqual(r.message, 'The comment has already been verified.')
def test_success(self):
response = self.client.get(self.get_url())
comment = Comment.objects.get(email=self.comment_obj.email, posted=self.time_posted)
self.assertEqual(Comment.objects.all().count(), self.init_count + 1)
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response.url, comment.get_url(self.request))
|
Shirhussain/RTL-Blog-with-multiple-user
|
comment/tests/test_views.py
|
test_views.py
|
py
| 22,289
|
python
|
en
|
code
| 2
|
github-code
|
6
|
7937970870
|
from PIL import Image, ImageDraw, ImageFont, ImageColor, ImageEnhance
import requests
import io
import json
from rembg import remove, new_session
import uvicorn
from fastapi import FastAPI, Request
from fastapi.responses import FileResponse, Response
app = FastAPI()
def ReduceOpacity(im, opacity):
"""
Returns an image with reduced opacity.
Taken from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/362879
"""
assert opacity >= 0 and opacity <= 1
if im.mode != 'RGBA':
im = im.convert('RGBA')
else:
im = im.copy()
alpha = im.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
im.putalpha(alpha)
return im
class Template:
def __init__(self, template):
path = f'templates/{template}.json'
self.template = json.load(open(path))
self.width = self.template['size']['width']
self.height = self.template['size']['height']
def calcXY(self, element):
x = element['position']['x']
y = element['position']['y']
width = element['size']['width']
height = element['size']['height']
if x == 'center':
x = (self.width - width) / 2
if y == 'center':
y = (self.height - height) / 2
return x, y
def calcXYText(self, element, content, font):
x = element['position']['x']
y = element['position']['y']
image = Image.new(mode="RGB", size=(self.width, self.height))
box = ImageDraw.Draw(image)
_, _, w, d = box.textbbox((0, 0), content, font=font)
if x == 'center':
x = (self.width - w) / 2
if y == 'center':
y = (self.height - d) / 2
return x, y, w, d
def render(self, kwagrs) -> Image:
background = None
if self.template['name'] not in kwagrs:
raise Exception(f"Missing {self.template['name']}")
if self.template['type'] == 'image':
background = Image.open(io.BytesIO(requests.get(kwagrs[self.template['name']]).content)).convert("RGBA")
print("Hello")
background = background.resize((self.template['size']['width'], self.template['size']['height']))
if self.template['type'] == 'solid':
background = Image.new(mode='RGBA', size=(self.template['size']['width'], self.template['size']['height']), color=ImageColor.getrgb(f"#{kwagrs[self.template['name']]}"))
for element in self.template['elements']:
if element['name'] not in kwagrs:
raise Exception(f"Missing {element['name']}")
# Render text
element['opacity'] = element['opacity'] if 'opacity' in element else 1
if element['type'] == 'text':
img_text = Image.new("RGBA", background.size, (255,255,255,0))
if f'{element["name"]}_color' in kwagrs:
element['font']['color'] = '#' + kwagrs[f'{element["name"]}_color']
content: str = kwagrs[element['name']]
font = ImageFont.truetype(font='fonts/' + element['font']['family'], size=element['font']['size'])
draw = ImageDraw.Draw(img_text)
x, y, w, d = self.calcXYText(element=element, content=content, font=font)
if w > self.width:
contents = content.split(" ")
index : int = -1
while True:
left = " ".join(contents[0:index])
x, y, w, d = self.calcXYText(element=element, content=left, font=font)
if w > self.width:
index -= 1
else:
right = " ".join(contents[index:])
draw.text(xy=(x, y), text=left, font=font, fill=ImageColor.getrgb(element['font']['color']) + (int(element['opacity'] * 255),))
draw.text(xy=(x, y + d), text=right, font=font, fill=ImageColor.getrgb(element['font']['color']) + (int(element['opacity'] * 255),))
background = Image.alpha_composite(background, img_text)
break
else:
draw.text(xy=(x, y), text=content, font=font, fill=ImageColor.getrgb(element['font']['color']) + (int(element['opacity'] * 255),))
background = Image.alpha_composite(background, img_text)
# Render image
if element['type'] == 'image':
url = kwagrs[element['name']]
image = Image.open(io.BytesIO(requests.get(url).content)).convert("RGBA")
image = image.resize((element['size']['width'], element['size']['height']))
x, y = self.calcXY(element=element)
# new_image = Image.new("RGBA", image.size)
# opacity = int(255 * element['opacity'])
overlay_image = Image.new("RGBA", background.size)
overlay_image.paste(image, (x, y))
overlay_opacity = element['opacity'] # Adjust this value as needed for the desired overlay opacity
# Create a copy of the overlay image with adjusted opacity
overlay_with_opacity = Image.new("RGBA", overlay_image.size)
for x in range(overlay_image.width):
for y in range(overlay_image.height):
r, g, b, a = overlay_image.getpixel((x, y))
overlay_with_opacity.putpixel((x, y), (r, g, b, int(a * overlay_opacity)))
# temp_image = temp_image.resize(temp_image.size, Image.ANTIALIAS)
# # Create a new alpha channel with the specified opacity
# alpha = Image.new("L", image.size, opacity)
# image.putalpha(int(element['opacity'] * 255))
# image = ReduceOpacity(image, element['opacity'])
# print(int(element['opacity'] * 255))
# Combine the color channels of the original image with the new alpha channel
# image = Image.alpha_composite(new_image, Image.merge("RGBA", image.split()[:-1] + (alpha,)))
# background.paste(image, (x, y))
background = Image.alpha_composite(background, overlay_with_opacity)
# Render image with remove background
if element['type'] == 'remove-background':
url = kwagrs[element['name']]
image = Image.open(io.BytesIO(requests.get(url).content)).convert("RGBA")
image = image.resize((element['size']['width'], element['size']['height']))
remove = RemoveBackground('u2net')
remove.load_session()
x, y = self.calcXY(element=element)
image = remove.remove(image=image)
overlay_image = Image.new("RGBA", background.size)
overlay_image.paste(image, (x, y))
overlay_opacity = element['opacity'] # Adjust this value as needed for the desired overlay opacity
# Create a copy of the overlay image with adjusted opacity
overlay_with_opacity = Image.new("RGBA", overlay_image.size)
for x in range(overlay_image.width):
for y in range(overlay_image.height):
r, g, b, a = overlay_image.getpixel((x, y))
overlay_with_opacity.putpixel((x, y), (r, g, b, int(a * overlay_opacity)))
background = Image.alpha_composite(background, overlay_with_opacity)
return background
class RemoveBackground:
def __init__(self, model: str) -> None:
self.model = model
def load_session(self) -> None:
self.session = new_session(self.model)
def remove(self, image: Image) -> Image:
return remove(data=image, session=self.session, alpha_matting=True, post_process_mask=True, alpha_matting_foreground_threshold=270 ,alpha_matting_background_threshold=20, alpha_matting_erode_size=11)
"""
if __name__ == '__main__':
test = Template('templates/template.json')
title = 'Logo car'
product = 'https://joytoyfigure.com/wp-content/uploads/2022/09/Warhammer-40K.jpg'
logo = "https://cdn.joytoyfigure.com/wp-content/uploads/2023/01/joytoy-figure-New-logo.png"
background = 'https://www.ledr.com/colours/white.jpg'
image = test.render(background=background, title=title, logo=logo, product=product)
image.save('a.png')
"""
@app.get('/image.webp')
def get(request: Request):
try:
params = request.query_params
template = Template(params['template'])
image = template.render(params)
imageByte = io.BytesIO()
image.save(imageByte, format='WebP', quality=85, lossless=False)
headers = {"Content-Disposition": f'''inline; filename="{params['name'] if 'name' in params else "image"}.webp"'''}
# image.save(imageByte, format='PNG')
return Response(imageByte.getvalue(), headers=headers)
except Exception as e:
return Response(str(e))
if __name__ == '__main__':
uvicorn.run("main:app", host='0.0.0.0', port=80, reload=True)
# def main(url: str, title_text: str | None = '') -> None:
# background = Image.open(io.BytesIO(requests.get(url=url, stream=True).content))
# title_size = 1
# title_font = ImageFont.truetype(font='Montserrat-Black.ttf', size=title_size)
# draw = ImageDraw.Draw(background)
# while True:
# _, _, w, _ = draw.textbbox((0, 0), text=title_text, font=title_font)
# if w < 0.5 * background.width:
# title_size += 1
# title_font = ImageFont.truetype(font='Montserrat-Black.ttf', size=title_size)
# else:
# break
# draw.text(xy=((background.width - w)/ 2 + 4, 1), text=title_text, font=title_font, fill=ImageColor.getrgb("#9E9E9E"))
# draw.text(xy=((background.width - w)/ 2, 1), text=title_text, font=title_font, fill=ImageColor.getrgb("#8C0000"))
# print(f"Text width: {w}")
# print(f"Height: {background.height}")
# print(f"Width: {background.width}")
# background.save('test.png')
# if __name__ == '__main__':
# main(url='https://img.freepik.com/premium-photo/abstract-rainbow-colorful-bright-feather-closeup-up-macro-view-background-plumage-texture-with-dew-drops_753134-644.jpg', title_text="aaaaaaaaaaaaa")
|
kimtrietvan/Image-text-template
|
main.py
|
main.py
|
py
| 10,449
|
python
|
en
|
code
| 0
|
github-code
|
6
|
36294713470
|
# 2016년 1월 1일은 금요일입니다. 2016년 a월 b일은 무슨 요일일까요? 두 수 a ,b를 입력받아 2016년 a월 b일이 무슨 요일인지 리턴하는 함수, solution을 완성하세요. 요일의 이름은 일요일부터 토요일까지 각각 SUN,MON,TUE,WED,THU,FRI,SAT
# 입니다. 예를 들어 a=5, b=24라면 5월 24일은 화요일이므로 문자열 "TUE"를 반환하세요.
# 제한 조건
# 2016년은 윤년입니다.
# 2016년 a월 b일은 실제로 있는 날입니다. (13월 26일이나 2월 45일같은 날짜는 주어지지 않습니다)
# jan = 31
# feb = 29
# march = 31
# apr = 30
# may = 31
# june = 30
# july = 31
# august = 31
# september = 30
# october = 31
# november = 30
# december = 31
# 방법 1
# def solution(a, b):
# day = ['FRI', 'SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU']
# date = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# return day[(sum(date[:a-1]) + b) % 7 - 1]
# 다른 풀이 참고 방법2
# datetime 라이브러리 사용하기
# 과정1
# print(datetime.datetime(2016, a, b))
# 과정2
# print(datetime.datetime(2016, a, b).weekday())
import datetime
def solution(a, b):
day = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
return day[datetime.datetime(2016, a, b).weekday()]
a = 5
b = 24
solution(a, b)
|
minkimhere/algorithm_python
|
13_2016.py
|
13_2016.py
|
py
| 1,324
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
6634251613
|
def vaild(strs):
length = len(strs)
if strs == None or len(strs) == 0:
return 1
hstack = []
sArr = list(strs)
hstack.append(sArr.pop(0))
while hstack and sArr:
next_item = sArr.pop(0)
if next_item in ["(", "[", "{"]:
hstack.append(next_item)
elif next_item == ")":
top = hstack.pop()
if top != "(":
return 0
elif next_item == "]":
top = hstack.pop()
if top != "[":
return 0
elif next_item == "}":
top = hstack.pop()
if top != "{":
return 0
if len(hstack) != 0:
return 0
return 1
s = input()
print(vaild(s))
|
rh01/gofiles
|
lcode1-99/ex65/main.py
|
main.py
|
py
| 727
|
python
|
en
|
code
| 0
|
github-code
|
6
|
11472746542
|
import json
from typing import List, Dict, Any
def extract_json(string: str) -> List[Dict[str, Any]]:
json_strings = []
json_objects = []
open_brackets = 0
start_index = None
for index, char in enumerate(string):
if char == '{':
open_brackets += 1
if open_brackets == 1:
start_index = index
elif char == '}':
open_brackets -= 1
if open_brackets == 0:
json_strings.append(string[start_index : index + 1])
for json_str in json_strings:
try:
json_object = json.loads(json_str)
json_objects.append(json_object)
except json.JSONDecodeError as e:
pass
return json_objects
def loads_or_none(str:str):
try:
return json.loads(str)
except:
return None
# input_string = "博士:兔兔你好。{\"reply\": \"你好博士。\", \"mental\": \"\", \"activity\": \"\"}"
# extracted_json = extract_json(input_string)
# print(extracted_json)
|
hsyhhssyy/amiyabot-arknights-hsyhhssyy-maa
|
utils/string_operation.py
|
string_operation.py
|
py
| 1,033
|
python
|
en
|
code
| 3
|
github-code
|
6
|
35141470185
|
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
with open("requirements.txt") as requirements_file:
requirements = requirements_file.read().splitlines()
setup_requirements = [
"pytest-runner",
]
test_requirements = [
"pytest>=3",
]
setup(
author="Harsh Parekh",
author_email="harsh_parekh@outlook.com",
python_requires=">=3.6",
classifiers=[
"Framework :: Flask",
"Development Status :: 2 - Pre-Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
description="Rest API on Flask made a little too easy.",
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="flask_easyapi",
name="flask_easyapi",
packages=find_packages(include=["flask_easyapi", "flask_easyapi.*"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/hXtreme/flask-easyapi",
version="0.1.1",
zip_safe=False,
)
|
hXtreme/flask-easyapi
|
setup.py
|
setup.py
|
py
| 1,725
|
python
|
en
|
code
| 2
|
github-code
|
6
|
3288249384
|
import tkinter as tk
from PIL import Image, ImageTk
from .constants import *
from utility.statistics import StatisticsKeys
class LoadStep:
def __init__(self, gui):
self.gui = gui
self.loading_steps_frame = tk.Frame(self.gui.root, bg='#d9ddff')
self.next_state_button = tk.Button(self.loading_steps_frame, text='Next', bg='grey', command=self.button_next)
self.previous_state_button = tk.Button(self.loading_steps_frame, text='Previous', bg='grey', state=tk.DISABLED)
self.loading_steps_title = tk.Label(self.loading_steps_frame, text="Load step", bg=TITLE_COLOR, justify="center", font=TITLE_FONT)
# button utils
def button_previous(self):
self.gui.production_engine.previous()
self.gui.print_current_state()
self.place()
def button_next(self):
self.gui.production_engine.next()
self.gui.print_current_state()
self.place()
def update_buttons(self):
if self.gui.production_engine.current_index() <= 0:
self.previous_state_button = tk.Button(self.loading_steps_frame, text="Previous", bg='grey', state=tk.DISABLED)
else:
self.previous_state_button = tk.Button(self.loading_steps_frame, text="Previous", bg='grey', command=self.button_previous)
self.place()
def place(self):
self.loading_steps_frame.place(relx=1 - MAIN_GRAPH_OFFSET, rely=MAIN_GRAPH_OFFSET, relwidth=LOADING_STEPS_WIDTH, relheight=LOADING_STEPS_HEIGHT, anchor='ne')
self.loading_steps_title.place(relwidth=1, relheight=0.4)
self.next_state_button.place(relx = 0.5, rely=0.4, relwidth=0.5, relheight=0.6)
self.previous_state_button.place(rely=0.4, relwidth=0.5, relheight=0.6)
|
kn2282/TIAG_Project_Python
|
src/gui/load_step.py
|
load_step.py
|
py
| 1,742
|
python
|
en
|
code
| 0
|
github-code
|
6
|
4759197935
|
"""Test the gambit.sigs.convert module."""
import pytest
import numpy as np
from gambit.sigs.convert import dense_to_sparse, sparse_to_dense, can_convert, \
check_can_convert, convert_dense, convert_sparse
from gambit.kmers import KmerSpec
from gambit.test import random_seq
def test_dense_sparse_conversion():
"""Test conversion between dense and sparse representations of k-mer coordinates."""
for k in range(1, 10):
kspec = KmerSpec(k, 'ATGAC')
# Create dense signature with every 3rd k-mer
vec = np.zeros(kspec.nkmers, dtype=bool)
vec[np.arange(vec.size) % 3 == 0] = True
# Convert to sparse
sig = dense_to_sparse(vec)
assert len(sig) == vec.sum()
for index in sig:
assert vec[index]
# Check sorted
assert np.all(np.diff(sig) > 0)
# Check converting back
assert np.array_equal(vec, sparse_to_dense(kspec, sig))
class TestKmerSpecConversion:
"""Test converting signatures from one KmerSpec to another."""
def test_can_convert(self):
from_kspec = KmerSpec(11, 'ATGAC')
compatible = [
KmerSpec(11, 'ATGAC'),
KmerSpec(8, 'ATGAC'),
KmerSpec(10, 'ATGACA'),
KmerSpec(8, 'ATGACA'),
]
for to_kspec in compatible:
assert can_convert(from_kspec, to_kspec)
check_can_convert(from_kspec, to_kspec)
incompatible = [
KmerSpec(11, 'CAGTA'),
KmerSpec(12, 'ATGAC'),
KmerSpec(11, 'ATGA'),
KmerSpec(11, 'ATGACT'),
]
for to_kspec in incompatible:
assert not can_convert(from_kspec, to_kspec)
with pytest.raises(ValueError):
check_can_convert(from_kspec, to_kspec)
@pytest.fixture(scope='class')
def seqs(self):
np.random.seed(0)
return [random_seq(100_000) for _ in range(100)]
@pytest.mark.parametrize('to_kspec', [
KmerSpec(10, 'ATGAC'), # Reduce k
KmerSpec(8, 'ATGAC'), # Reduce k
KmerSpec(9, 'ATGACGT'), # Extend prefix
KmerSpec(7, 'ATGACGT'), # Extend prefix and reduce k further
])
def test_convert(self, seqs, to_kspec):
from gambit.sigs.calc import calc_signature
from_kspec = KmerSpec(11, 'ATGAC')
for seq in seqs:
from_sig = calc_signature(from_kspec, seq)
from_vec = sparse_to_dense(from_kspec.k, from_sig)
to_vec = convert_dense(from_kspec, to_kspec, from_vec)
to_sig = convert_sparse(from_kspec, to_kspec, from_sig)
found_sig = calc_signature(to_kspec, seq)
assert np.array_equal(to_sig, found_sig)
assert np.array_equal(to_vec, sparse_to_dense(to_kspec.k, found_sig))
|
jlumpe/gambit
|
tests/sigs/test_sigs_convert.py
|
test_sigs_convert.py
|
py
| 2,427
|
python
|
en
|
code
| 16
|
github-code
|
6
|
5469193778
|
import os
import glob
import re
primary=[
r"path1",
r"path2",
r"path3",
r"path4",
r"path5"
]
secondary=[
r"path6",
r"path7"
]
pl=len(primary)
sl=len(secondary)
for q in range(pl):
primary[q]+="\\"
#print(primary[q])
for q in range(sl):
secondary[q]+="\\"
#print(secondary[q])
flag=0
xlsx=".xlsx"
xls=".xls"
def primary_search(pl,xlsx):
for i in range(pl):
os.chdir(primary[i])
tempfilename=primary[i]+"**\\"+filename+xlsx
l=glob.glob(tempfilename, recursive=True)
if bool(l)==True:
print(l)
if bool(l)==False:
print("該当なし")
def secondary_search(pl,xlsx):
for i in range(pl):
os.chdir(secondary[i])
tempfilename=secondary[i]+"**\\"+filename+xlsx
l=glob.glob(tempfilename, recursive=True)
if bool(l)==True:
print(l)
if bool(l)==False:
print("該当なし")
#def search(path,len,filetype):
# for i in range(len):
# os.chdir(path[i])
# tempfilename=path[i]+"**\\"+filename+filetype
# l=glob.glob(tempfilename, recursive=True)
# if bool(l)==True:
# print(l)
# if bool(l)==False:
# print("該当なし")
# #ex)search(primary,pl,xls) search(secondary,sl,xlsx)
while flag==0:
filename=input("file name?\n")
print("優先パスを検索します")
print("xlsxを検索")
primary_search(pl,xlsx)
print("xlsを検索")
primary_search(pl,xls)
print("\n")
exitflag=int(input("さらに過去も検索:0,exit:1\n"))
if exitflag==1:
break
else:
print("xlsxを検索")
secondary_search(sl,xlsx)
print("xlsを検索")
secondary_search(sl,xls)
print("\n")
flag=int(input("0:continue\n1:exit\n"))
input("end:enter")
|
kobayu0902art/work_snippets
|
find.py
|
find.py
|
py
| 2,051
|
python
|
en
|
code
| 0
|
github-code
|
6
|
75188751546
|
import sys
sys.stdin = open("inputs/문자열비교.txt", "r")
def match(p, t):
i = 0 # 전체 문자열 t의 인덱스
j = 0 # 안에 속할 문자열 p의 인덱스
while j < len(p) and i < len(t):
if t[i] != p[j]: # 만약 일치에 실패한다면
i = i-j # i는 j와 비교를 시작한 부분에서 한 칸 더 오른쪽으로 시작점을 옮김
j = -1 # j는 처음부터 (인덱스 0부터) 비교 시작
i = i+1
j = j+1
# 1 일치한다면 j가 len(p)까지 상승하여 (j의 끝까지 일치확인)
# 2 일치하지 않는다면 결국 i가 len(t)까지 상승하여
# while문이 종료될 것
# 1의 경우
if j == len(p):
return 1
# 2의 경우
else:
return 0
T = int(input())
for test_case in range(1, T+1):
p = input()
t = input()
ans = match(p, t)
print(f'#{test_case} {ans}')
|
zacinthepark/Problem-Solving-Notes
|
swea/0816_문자열비교.py
|
0816_문자열비교.py
|
py
| 917
|
python
|
ko
|
code
| 0
|
github-code
|
6
|
36767879089
|
'''
Author : knight_byte
File : A_Patrick_and_Shopping.py
Created on : 2021-04-19 14:15:35
'''
def main():
d = sorted(list(map(int, input().split())))
mi = min(2*(d[0]+d[1]), sum(d))
print(mi)
if __name__ == '__main__':
main()
|
arbkm22/Codeforces-Problemset-Solution
|
Python/A_Patrick_and_Shopping.py
|
A_Patrick_and_Shopping.py
|
py
| 257
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74977722427
|
import logging
from ontobio.io.gafparser import GafParser
from dipper.sources.Source import Source
from dipper.models.assoc.Association import Assoc
from dipper.models.Model import Model
from dipper.models.Reference import Reference
__author__ = 'timputman'
LOG = logging.getLogger(__name__)
class RGD(Source):
"""
Ingest of Rat Genome Database gene to mammalian phenotype gaf file
"""
RGD_BASE = \
'ftp://ftp.rgd.mcw.edu/pub/data_release/annotated_rgd_objects_by_ontology/'
files = {
'rat_gene2mammalian_phenotype': {
'file': 'rattus_genes_mp',
'url': RGD_BASE + 'rattus_genes_mp',
'columns': [ # expected. GAF v2.1
'DB',
'DB Object ID',
'DB Object Symbol',
'Qualifier',
'GO ID',
'DB:Reference (|DB:Reference)',
'Evidence Code',
'With (or) From',
'Aspect',
'DB Object Name',
'DB Object Synonym (|Synonym)',
'DB Object Type',
'Taxon(|taxon)',
'Date',
'Assigned By',
'Annotation Extension',
'Gene Product Form ID',
]
},
}
def __init__(self,
graph_type,
are_bnodes_skolemized,
data_release_version=None):
super().__init__(
graph_type=graph_type,
are_bnodes_skized=are_bnodes_skolemized,
data_release_version=data_release_version,
name='rgd',
ingest_title='Rat Genome Database',
ingest_url='http://rgd.mcw.edu/',
ingest_logo='source-rgd.png',
license_url=None,
data_rights='https://rgd.mcw.edu/wg/disclaimer/',
# file_handle=None
)
self.dataset.set_citation('https://rgd.mcw.edu/wg/citing-rgd/')
def fetch(self, is_dl_forced=False):
"""
Override Source.fetch()
Fetches resources from rat_genome_database via the rat_genome_database ftp site
Args:
:param is_dl_forced (bool): Force download
Returns:
:return None
"""
self.get_files(is_dl_forced)
return
def parse(self, limit=None):
"""
Override Source.parse()
Args:
:param limit (int, optional) limit the number of rows processed
Returns:
:return None
"""
if limit is not None:
LOG.info("Only parsing first %d rows", limit)
rgd_file = '/'.join(
(self.rawdir, self.files['rat_gene2mammalian_phenotype']['file']))
# ontobio gafparser implemented here
p = GafParser()
assocs = p.parse(open(rgd_file, "r"))
for i, assoc in enumerate(assocs):
if 'relation' in assoc.keys():
self.make_association(assoc)
if limit is not None and i > limit:
break
return
def make_association(self, record):
"""
contstruct the association
:param record:
:return: modeled association of genotype to mammalian phenotype
"""
model = Model(self.graph)
record['relation']['id'] = self.resolve("has phenotype")
# define the triple
gene = record['subject']['id']
relation = record['relation']['id']
phenotype = record['object']['id']
# instantiate the association
g2p_assoc = Assoc(self.graph, self.name, sub=gene, obj=phenotype, pred=relation)
# add the references
references = record['evidence']['has_supporting_reference']
# created RGDRef prefix in curie map to route to proper reference URL in RGD
references = [
x.replace('RGD', 'RGDRef') if 'PMID' not in x else x for x in references]
if len(references) > 0:
# make first ref in list the source
g2p_assoc.add_source(identifier=references[0])
ref_model = Reference(
self.graph, references[0],
self.globaltt['publication']
)
ref_model.addRefToGraph()
if len(references) > 1:
# create equivalent source for any other refs in list
# This seems to be specific to this source and
# there could be non-equivalent references in this list
for ref in references[1:]:
model.addSameIndividual(sub=references[0], obj=ref)
# add the date created on
g2p_assoc.add_date(date=record['date'])
g2p_assoc.add_evidence(self.resolve(record['evidence']['type'])) # ?set where?
g2p_assoc.add_association_to_graph()
return
|
monarch-initiative/dipper
|
dipper/sources/RGD.py
|
RGD.py
|
py
| 4,818
|
python
|
en
|
code
| 53
|
github-code
|
6
|
4246502703
|
from Model.Animal import animalModel
from Controller.Animal import animalController
from fastapi import APIRouter, Depends, HTTPException
router = APIRouter(
prefix="/animals",
tags=["animals"],
responses={404: {"description": "Not found"}},
)
_animalCon = animalController.animalController()
@router.get("/")
async def root():
return _animalCon.getAllAnimal()
@router.get("/{animalname}")
async def root(animalname : str):
return _animalCon.getAnimalbyName(animalname)
@router.put("/")
async def root(item : animalModel.animal):
return _animalCon.insertAnimal(item)
|
aziznaufal/Mainan
|
Python/PercobaanFastAPI/Route/Animal/AnimalApi.py
|
AnimalApi.py
|
py
| 596
|
python
|
en
|
code
| 0
|
github-code
|
6
|
14489820952
|
import numpy as np
import pandas as pd
import string
def csv():
#Verify .csv files in folder
possible_csvs = ["ByTeacher_DetailData.csv"]
for i in range(1,10):
possible_csvs.append("ByTeacher_DetailData ({}).csv".format(i))
csvs = possible_csvs.copy()
for i in possible_csvs:
try:
pd.read_csv(i, encoding='utf-16')
except FileNotFoundError:
csvs.remove(i)
#Select a .csv file
if len(csvs) == 1: answer = csvs[0]
elif len(csvs) == 0: answer = 0
else:
print("Found {} .csv files:".format(len(csvs)))
print(*csvs, sep='\n')
while True:
answer = input("Please select one (by name or number) or type exit\n")
if answer in csvs: break
elif int(answer) in range(1,len(csvs)+1):
answer = csvs[int(answer)-1]
break
elif answer == "e" or answer == "exit" or answer == "Exit": break
else:
print("Invalid answer")
continue
return answer
try:
#Choose which columns to use
usecols = ["Reservation Teacher", "Group ID", "%Attendance MarkedOnTime",
"TeachingTime (ACH)", "%TeacherTaskCompletion", "%PT App Use",
"%SkillTestCompleted", "%Teacher-Led Skills Completion",
"%BS CanDo Completion"]
names = {"Reservation Teacher": "Reservation\nTeacher",
"Group ID": "Group ID",
"%Attendance MarkedOnTime": "Attendance\nMarked On Time (%)",
"TeachingTime (ACH)": "Teaching\nTime (ACH)",
"%TeacherTaskCompletion": "Teacher Task\nCompletion (%)",
"%PT App Use": "PT App\nUse (%)",
"%SkillTestCompleted": "Skill Test\nCompleted (%)",
"%Teacher-Led Skills Completion": "Teacher-Led Skills\nCompletion (%)",
"%BS CanDo Completion": "BS Can Do\nCompletion (%)"}
#Read data into pandas data frames
df = pd.read_csv(csv(), sep='\t', usecols=usecols,
index_col=0, encoding='utf-16')
df.rename(columns=names, inplace=True)
df.fillna('0.0', inplace=True)
df = df[df["Group ID"] == "Total"]
df = df.drop("Group ID", axis=1)
for i in list(df):
if isinstance(df[i][0], str):
df[i] = df[i].str.rstrip("%").astype(float)
writer = pd.ExcelWriter("TD Report.xlsx", engine='xlsxwriter')
df.style.set_properties(**{'text-align': 'center'}).to_excel(writer, sheet_name="Teacher Detail")
#df.style.applymap(lambda _:'text-align: center').to_excel(writer, sheet_name="Teacher Detail")
workbook, worksheet = writer.book, writer.sheets["Teacher Detail"]
workbook.add_format().set_text_wrap()
for column in df:
#column_length = max(df[column].astype(str).max(), len(column))
column_index = df.columns.get_loc(column)
worksheet.set_column(column_index, column_index, len(column))
worksheet.conditional_format('B2:C19', {'type': '3_color_scale',
'min_value': '0',
'max_value': '124.3',
'min_color': '#FF0F0F',
'mid_color': '#FFFF00',
'max_color': '#00F000'})
worksheet.conditional_format('D2:I19', {'type': '3_color_scale',
'min_value': '0',
'mid_value': '50',
'max_value': '100',
'min_color': '#FF0F0F',
'mid_color': '#FFFF00',
'max_color': '#00F000'})
writer.save()
#except (FileNotFoundError, ValueError):
# print("No .csv files found. Exiting program")
except NameError:
print("Exiting program")
|
cameronfantham/EF-KPI-Data-Pre-processing
|
KPI Report Generator/kpi_all (old).py
|
kpi_all (old).py
|
py
| 3,971
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42084381756
|
import logging
import json
import re
from typing import Any
from concurrent.futures import ThreadPoolExecutor
from pymongo import MongoClient, database
from bson.json_util import dumps, loads
from bson.objectid import ObjectId
from backend.conf.confload import config
from backend.models.system import ResponseBasic
from backend.util.webhook_runner import exec_hook_func
import time
log = logging.getLogger(__name__)
class CMDBOSS_db:
def __init__(self):
self.server = config.mongo_server_ip
self.port = config.mongo_server_port
self.username = config.mongo_user
self.password = config.mongo_password
if self.username:
self.raw_connection = MongoClient(
host=self.server,
port=self.port,
username=self.username,
password=self.password,
)
else:
self.raw_connection = MongoClient(
host=self.server,
port=self.port,
)
self.base_connection = self.raw_connection.cmdboss
def run_hooks(self, operation: str, model: str, data: Any):
filter = {
"events.model": model,
"events.operation": operation
}
hooks = self.query(
payload=filter,
model="hooks"
)
if len(hooks["result"]) >= 1:
with ThreadPoolExecutor(config.num_thread_workers) as worker_pool:
executions = []
for hook in hooks["result"]:
if len(hook["events"]) >= 1:
for event in hook["events"]:
if event["operation"] == operation:
log.info(f"run_hooks: Webhook Executing on {operation} model {model}")
send_payload = {
"base64_payload": hook["base64_payload"],
"payload": data
}
execution = worker_pool.submit(exec_hook_func, send_payload)
executions.append(execution)
def get_model_name(self, path):
path_array = path.split("/")
url_parser = {
r"\/table/.*/": -2,
r"\/hooks/.*": -2,
r"\/hooks": -1,
r"\/models/.*": -2,
}
for key in url_parser:
if re.search(key, path):
return path_array[url_parser[key]]
return path_array[-1]
def ingress_parse_object_id(self, data: dict):
if data.get("object_id", False):
data["_id"] = ObjectId(data["object_id"])
del data["object_id"]
return data
def egress_parse_object_id(self, data: list):
if len(data) >= 1:
for idx, obj in enumerate(data, start=0):
obj_id = obj["_id"]["$oid"]
data[idx]["object_id"] = obj_id
del data[idx]["_id"]
return data
def insert_one(self, model, payload: dict):
ret = self.base_connection[model].insert_one(payload)
result = ResponseBasic(status="success", result=[{"object_id": f"{ret.inserted_id}"}]).dict()
return result
def insert_many(self, model, payload: list):
ret = self.base_connection[model].insert_many(payload)
resp_arr = []
for obj in ret.inserted_ids:
resp_arr.append({"object_id": f"{obj}"})
result = ResponseBasic(status="success", result=resp_arr).dict()
return result
def insert(self, model_instance_data: Any, path: str):
""" wrapper for both insert_one and insert_many"""
model_name = self.get_model_name(path)
if isinstance(model_instance_data, list):
req_data = []
for item in model_instance_data:
req_data.append(item.dict())
result = self.insert_many(model=model_name, payload=req_data)
else:
req_data = model_instance_data.dict()
result = self.insert_one(model=model_name, payload=req_data)
self.run_hooks(operation="create", model=model_name, data=result)
return result
def query(self, model: str, payload: dict):
""" wrapper for find with filtering """
cleaned_data = self.ingress_parse_object_id(payload)
ret = self.base_connection[model].find(cleaned_data)
temp_json_result = dumps(ret)
loaded_result = json.loads(temp_json_result)
final_result = self.egress_parse_object_id(loaded_result)
if final_result is None or len(loaded_result) < 1:
final_result = []
result = ResponseBasic(status="success", result=final_result).dict()
return result
def find(self, model: str):
ret = self.base_connection[model].find()
temp_json_result = dumps(ret)
loaded_result = json.loads(temp_json_result)
final_result = self.egress_parse_object_id(loaded_result)
if final_result is None:
final_result = []
result = ResponseBasic(status="success", result=final_result).dict()
return result
def retrieve(self, query_obj: dict, object_id: str, path: str):
""" wrapper for both find and query"""
model_name = self.get_model_name(path)
if query_obj.get("filter", False):
result = self.query(model=model_name, payload=query_obj["filter"])
elif object_id:
query_obj["filter"] = {}
query_obj["filter"]["object_id"] = object_id
result = self.query(model=model_name, payload=query_obj["filter"])
elif object_id is None:
result = self.find(model=model_name)
self.run_hooks(operation="retrieve", model=model_name, data=result)
return result
def delete(self, query: dict, object_id: str, path: str):
final_result = []
model_name = self.get_model_name(path)
if query.get("filter", False):
cleaned_data = self.ingress_parse_object_id(query["filter"])
ret = self.base_connection[model_name].delete_many(cleaned_data)
if ret.deleted_count >= 1:
final_result = [{"deleted_object_count": ret.deleted_count}]
if object_id:
query["filter"] = {}
query["filter"]["object_id"] = object_id
cleaned_data = self.ingress_parse_object_id(query["filter"])
ret = self.base_connection[model_name].delete_many(cleaned_data)
if ret.deleted_count >= 1:
final_result = [{"deleted_object_count": ret.deleted_count}]
result = ResponseBasic(status="success", result=final_result).dict()
self.run_hooks(operation="delete", model=model_name, data=result)
return result
def update(self, model_instance_data: dict, object_id: str, path: str):
final_result = []
model_name = self.get_model_name(path)
set_data = model_instance_data.dict()
new_data = { "$set": set_data }
query = {}
query["filter"] = {}
if object_id:
query["filter"]["object_id"] = object_id
cleaned_data = self.ingress_parse_object_id(query["filter"])
ret = self.base_connection[model_name].update_many(cleaned_data, new_data)
if ret.modified_count >= 1:
final_result = [{"updated_object_count": ret.modified_count}]
result = ResponseBasic(status="success", result=final_result).dict()
self.run_hooks(operation="update", model=model_name, data=result)
return result
|
tbotnz/cmdboss
|
backend/cmdboss_db/cmdboss_db.py
|
cmdboss_db.py
|
py
| 7,900
|
python
|
en
|
code
| 22
|
github-code
|
6
|
30783796663
|
"""
https://adventofcode.com/2019/day/8
"""
from typing import List, Tuple, NamedTuple, Iterator
import itertools
import math
Layer = Tuple[Tuple[int]]
class LayerMetrics(NamedTuple):
name: str
count: int
layer_nums: Layer
def grouper(iterable: List[int], size:int) -> Iterator[Tuple[int]]:
iterable = iter(iterable)
while True:
tup = tuple(itertools.islice(iterable, 0, size))
if tup:
yield tup
else:
break
def get_layer(inputs: List[int], width:int, height:int, dig_count: int) -> Iterator[Layer]:
batches = grouper(inputs, width)
layers = grouper(batches, height)
for i, layer in enumerate(layers,1):
yield LayerMetrics(name = f'Layer_{i}',
count = count_digits(layer, dig_count),
layer_nums= layer)
def result_part1(inputs: List[int], width:int, height:int, dig_count:int) -> int:
min_result = math.inf
result = None
for layer in get_layer(inputs, width, height, dig_count):
if layer.count < min_result:
min_result = layer.count
result = mult_digits(layer.layer_nums, 1, 2) # hardcoded for now
return result
def count_digits(layer: Layer, digit:int)-> int:
count = 0
for l in layer:
for d in l:
if d == digit:
count += 1
return count
def mult_digits(layer: Layer, digit1:int, digit2: int)-> int:
count_1 = count_digits(layer, digit1)
count_2 = count_digits(layer, digit2)
return count_1 * count_2
def problem_prep(text_input:str)-> List[int]:
return [int(s) for s in text_input.strip()]
TEST1 = [0,0,3,4,5,6,1,8,9,0,1,2]
assert result_part1(TEST1, width=3, height=2, dig_count=0) == 2
# TEST1 = ((1, 8, 9), (0, 1, 2))
# print(count_digits(TEST1, digit=0))
# print(mult_digits(TEST1, digit1=1, digit2=2))
if __name__ == "__main__":
with open("day08_input.txt", 'r') as file:
inputs = problem_prep(file.read())
part1 = result_part1(inputs, width=25, height=6, dig_count=0)
print("1s multiply the 2s in fewest 0s layer", part1)
|
giordafrancis/adventofcode2019
|
day08_image.py
|
day08_image.py
|
py
| 2,140
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23178241186
|
'''
Created by Jianyuan Su
Date: Jan 4, 2019
'''
from tkinter import *
from tkinter import filedialog, messagebox
from GUI import main_page
class StartPage(Frame):
def __init__(self, master, controller):
super().__init__(master)
self.master = master
self.controller = controller
controller.title("InverseBach")
# variables
self.save = IntVar()
self.save.set(0)
self.ly_file = StringVar()
self.ly_file.set("<replace me>")
self.audio_file = StringVar()
self.audio_file.set("<replace me>")
# add directory selection frames
self.ly_sframe = SelectionFrame(self, text="Select .ly score file generating location")
self.ly_sframe.pack()
self.ly_file_label = Label(self, text="Enter name of .ly score file")
self.ly_file_label.pack()
self.ly_file_entry = Entry(self, textvariable=self.ly_file)
self.ly_file_entry.pack()
self.score_sframe = SelectionFrame(self, text="Select music score file generating location")
self.score_sframe.pack()
self.audio_sframe = SelectionFrame(self, text="Select audio file generating location")
self.audio_sframe.pack()
self.audio_file_label = Label(self, text="Enter name of .mid audio file")
self.audio_file_label.pack()
self.audio_file_entry = Entry(self, textvariable=self.audio_file)
self.audio_file_entry.pack()
self.load_settings()
self.save_check = Checkbutton(self, text="save directory settings", variable=self.save,
onvalue=1, offvalue=0)
self.save_check.pack()
self.next_button = Button(self, text="Next", command=self.next_item)
self.next_button.pack()
self.master.bind('n', self.next_item)
self.master.bind('q', quit)
def next_item(self):
error_entries = self.get_error_entries()
if self.display_error(error_entries) == 1:
return
self.controller.ly_file = self.ly_file.get() + '.ly'
self.controller.audio_file = self.audio_file.get() + '.mid'
self.ly_file_entry.config(bg='white')
self.audio_file_entry.config(bg='white')
self.save_settings()
self.controller.show_frame(main_page.MainPage)
def display_error(self, error_entries):
if len(error_entries) == 0:
return 0
err_m = "The contents in these entries are invalid\n"
entry_name = {self.ly_file_entry: ".ly music score file name",
self.audio_file_entry: ".mid audio score file name",
self.ly_sframe.entry: ".ly music score directory",
self.score_sframe.entry: ".pdf music score directory",
self.audio_sframe.entry: ".mid audio file directory"
}
for entry in error_entries:
entry.configure(bg='red')
entry.delete(0, 'end')
err_m += entry_name[entry] + '\n'
messagebox.showerror("Error", err_m)
return 1
def load_settings(self):
try:
fi = open('settings/start.setting', 'r')
self.ly_sframe.set_dir(fi.readline()[0:-1])
self.score_sframe.set_dir(fi.readline()[0:-1])
self.audio_sframe.set_dir(fi.readline()[0:-1])
self.ly_file.set(fi.readline()[0:-1])
self.audio_file.set(fi.readline()[0:-1])
except FileNotFoundError:
return
def save_settings(self):
if self.save.get() == 1:
fo = open('settings/start.setting', 'w')
s = f'{self.ly_sframe.get_dir()}\n{self.score_sframe.get_dir()}\n' \
f'{self.audio_sframe.get_dir()}\n{self.ly_file.get()}\n{self.audio_file.get()}\n'
fo.write(s)
def get_error_entries(self):
ret = []
if self.ly_sframe.get_dir() == '':
ret.append(self.ly_sframe.entry)
if self.score_sframe.get_dir() == '':
ret.append(self.score_sframe.entry)
if self.audio_sframe.get_dir() == '':
ret.append(self.audio_sframe.entry)
if not self.entry_check(self.ly_file.get()):
ret.append(self.ly_file_entry)
if not self.entry_check(self.audio_file.get()):
ret.append(self.audio_file_entry)
return ret
@staticmethod
def entry_check(var):
if len(var) == 0 or re.search(r'[^A-Za-z0-9_\-\\]', var):
return False
return True
def reset_bgcolor(self, entry):
if entry.cget('bg') == 'red':
entry.configure(bg='white')
class SelectionFrame(Frame):
def __init__(self, master, text="", *args, **kwargs):
self.master = master
super().__init__(master, *args, **kwargs)
self.create_widgets(text)
def create_widgets(self, text):
self.dir = StringVar()
self.tmp_dir = ''
self.label = Label(self, text=text)
self.label.pack()
self.frame = Frame(self)
self.frame.pack()
self.entry = Entry(self.frame, textvariable=self.dir, state='disabled')
self.entry.grid(row=0, column=0)
self.button = Button(self.frame, text="Select Directory",
command=self.select_directory)
self.button.grid(row=0, column=1)
def select_directory(self):
name = filedialog.askdirectory()
self.dir.set(name)
def set_dir(self, d):
self.dir.set(d)
def get_dir(self):
return self.dir.get()
|
haohao1331/InverseBach
|
GUI/start_page.py
|
start_page.py
|
py
| 5,589
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74788996347
|
# Task 1
# my_string = str(input("Enter the string:\n"))
# change_character = my_string[0].lower()
# new_string = my_string[0] + my_string[1:].lower().replace(change_character, "$")
# print(new_string)
# Task 2
"""
2) Add 'ing' at the end of a given string (length should be at least 3).
If the given string already ends with 'ing' then add 'ly' instead.
If the string length of the given string is less than 3, leave it unchanged.
"""
my_string = str(input("Enter the string:\n"))
if len(my_string) < 3:
print(my_string)
exit(0)
if my_string[-3:].find("ing") >= 0:
print(my_string + "ly")
exit(0)
print(my_string + "ing")
|
imigivanov/hillel-python-course
|
practice/practice_day_3/main.py
|
main.py
|
py
| 648
|
python
|
en
|
code
| 0
|
github-code
|
6
|
37182141846
|
from PyQt5.QtWidgets import QLabel, QWidget
class DescriptionLabel(QLabel):
def __init__(self, text: str, parent: QWidget):
super().__init__(text, parent)
self.setMaximumWidth(300)
self.setWordWrap(True)
self.setStyleSheet("QLabel{"
"font-size: 8pt;}")
|
brankomilovanovic/Database-Handler
|
handlers/rel/mysql/Connection/UI/DescriptionLabel.py
|
DescriptionLabel.py
|
py
| 318
|
python
|
en
|
code
| 0
|
github-code
|
6
|
19265213860
|
from kubernetes import client, config
from datetime import datetime
import threading
import time
from settings import CPU_THRESHOLD ,SUSPEND_AFTER_DRAIN
def time_now():
return datetime.now().strftime("%H:%M:%S")
class Drainy:
"""
This Drainy class is to auto-drain a faulty node as self-healing solution
"""
def __init__(self):
config.load_incluster_config()
self.session = client.CoreV1Api()
self.drained = dict()
def node_cpu_capacity(self, node_name):
return int(self.session.read_node_status(node_name).status.capacity['cpu'])
def show_node(self, node_name):
return self.session.read_node_status(node_name)
def is_cp_node(self, node_name):
if 'node-role.kubernetes.io/master' in self.session.read_node_status(node_name).metadata.labels:
return True
else:
return False
def delete_pod(self, name, namespace):
try:
delete_options = client.V1DeleteOptions()
api_response = self.session.delete_namespaced_pod(
name=name,
namespace=namespace,
body=delete_options)
# print(api_response)
except Exception as e:
print("{} Exception: Drainy:delete_pod {}".format(time_now(),e))
def cordon_node(self, node_name):
try:
body = {
"spec": {
"unschedulable": True,
},
}
self.session.patch_node(node_name, body)
print("{} {} has been cordoned!".format(time_now(), node_name))
except Exception as e:
time_frame = datetime.now().strftime("%H:%M:%S")
print("{} Exception: Drainy:cordon_node {}".format(time_now(),e))
def uncordon_node(self, node_name):
try:
body = {
"spec": {
"unschedulable": False,
},
}
self.session.patch_node(node_name, body)
print("{} {} has been uncordoned!".format(time_now(), node_name))
except Exception as e:
print("{} Exception: Drainy:uncordon_node {}".format(time_now(), e))
def drain_node(self, node_name):
try:
self.cordon_node(node_name)
# field selectors are a string, you need to parse the fields from the pods here
field_selector = 'spec.nodeName='+node_name
pods = self.session.list_pod_for_all_namespaces(watch=False, field_selector=field_selector)
for i in pods.items:
print("{} Going to delete pod {}\t{}\t{}".format(time_now(), i.status.pod_ip, i.metadata.namespace, i.metadata.name))
self.delete_pod(name=i.metadata.name, namespace=i.metadata.namespace)
print("{} {} has been drained!".format(time_now(), node_name))
except Exception as e:
print("{} Exception: Drainy:drain_node {}".format(time_now(),e))
def is_drained(self, node):
if node in self.drained and self.drained[node]:
return True
else:
return False
def ttl_key_remove(self, key, ttl):
try:
time.sleep(ttl)
if key in self.drained:
self.drained.pop(key)
except Exception as e:
print("{} Exception: Drainy:ttl_key_remove {}".format(time_now(), e))
def drain_high_cpu_node(self):
try:
api = client.CustomObjectsApi()
k8s_nodes = api.list_cluster_custom_object("metrics.k8s.io", "v1beta1", "nodes")
for stats in k8s_nodes['items']:
node_name = stats['metadata']['name']
core_num = self.node_cpu_capacity(node_name)
cpu_usage = float(stats['usage']['cpu'][:-1]) / (1000000000.0 * core_num) * 100
# If this is a worker node which CPU usage higher than the threshold then drain it
if cpu_usage > CPU_THRESHOLD and not self.is_cp_node(node_name):
print("{} {} cpu usage is way too high!".format(time_now(), node_name))
if not self.is_drained(node_name):
# will only drain a node if it has not been drained
self.drained[node_name] = True
# After 10min(600 sec), remove node_name from self.drained dict
# This can avoid frequent drain to one particular node in 10 mins
t = threading.Thread(target=self.ttl_key_remove, args=(node_name, SUSPEND_AFTER_DRAIN))
t.start()
self.drain_node(node_name)
except Exception as e:
print("{} Exception: Drainy:drain_high_cpu_node {}".format(time_now(), e))
def main():
try:
d = Drainy()
while True:
d.drain_high_cpu_node()
time.sleep(60)
except Exception as e:
print("{} Exception: main {}".format(time_now(), e))
if __name__ == '__main__':
main()
|
netmanyys/drainy
|
main.py
|
main.py
|
py
| 5,104
|
python
|
en
|
code
| 0
|
github-code
|
6
|
74147628668
|
# coding: utf-8
"""
Pydici billing views. Http request are processed here.
@author: Sébastien Renard (sebastien.renard@digitalfox.org)
@license: AGPL v3 or newer (http://www.gnu.org/licenses/agpl-3.0.html)
"""
from datetime import date, timedelta
import mimetypes
import json
from io import BytesIO
import os
import subprocess
import tempfile
from os.path import basename
from django.shortcuts import render
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext as _
from django.utils import translation
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.db.models import Sum, Q, F, Min, Max, Count
from django.db.models.functions import TruncMonth
from django.views.generic import TemplateView
from django.views.decorators.cache import cache_page
from django.forms.models import inlineformset_factory
from django.forms.utils import ValidationError
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.template.loader import get_template
from django_weasyprint.views import WeasyTemplateResponse, WeasyTemplateView
from pypdf import PdfMerger, PdfReader
import facturx
from billing.utils import get_billing_info, update_client_bill_from_timesheet, update_client_bill_from_proportion, \
bill_pdf_filename, get_client_billing_control_pivotable_data, generate_bill_pdf
from billing.models import ClientBill, SupplierBill, BillDetail, BillExpense
from leads.models import Lead
from people.models import Consultant
from people.utils import get_team_scopes
from staffing.models import Timesheet, Mission
from staffing.views import MissionTimesheetReportPdf
from crm.models import Subsidiary
from crm.utils import get_subsidiary_from_session
from core.utils import get_fiscal_years_from_qs, get_parameter, user_has_feature
from core.utils import COLORS, nextMonth, previousMonth
from core.decorator import pydici_non_public, PydiciNonPublicdMixin, pydici_feature, PydiciFeatureMixin
from billing.forms import BillDetailInlineFormset, BillExpenseFormSetHelper, BillExpenseInlineFormset, BillExpenseForm
from billing.forms import ClientBillForm, BillDetailForm, BillDetailFormSetHelper, SupplierBillForm
@pydici_non_public
@pydici_feature("reports")
def bill_review(request):
"""Review of clients bills: bills overdue, due soon, or to be created"""
today = date.today()
wait_warning = timedelta(15) # wait in days used to warn that a bill is due soon
subsidiary = get_subsidiary_from_session(request)
# Get bills overdue, due soon, litigious and recently paid
overdue_bills = ClientBill.objects.filter(state="1_SENT", due_date__lte=today)
overdue_bills = overdue_bills.prefetch_related("lead__responsible", "lead__subsidiary").select_related("lead__client__contact", "lead__client__organisation__company")
soondue_bills = ClientBill.objects.filter(state="1_SENT", due_date__gt=today, due_date__lte=(today + wait_warning))
soondue_bills = soondue_bills.prefetch_related("lead__responsible", "lead__subsidiary").select_related("lead__client__contact", "lead__client__organisation__company")
recent_bills = ClientBill.objects.filter(state="2_PAID").order_by("-payment_date")
recent_bills = recent_bills.prefetch_related("lead__responsible", "lead__subsidiary").select_related("lead__client__contact", "lead__client__organisation__company")
litigious_bills = ClientBill.objects.filter(state="3_LITIGIOUS").select_related()
# Filter bills on subsidiary if defined
if subsidiary:
overdue_bills = overdue_bills.filter(lead__subsidiary=subsidiary)
soondue_bills = soondue_bills.filter(lead__subsidiary=subsidiary)
recent_bills = recent_bills.filter(lead__subsidiary=subsidiary)
litigious_bills = litigious_bills.filter(lead__subsidiary=subsidiary)
# Limit recent bill to last 20 ones
recent_bills = recent_bills[: 20]
# Compute totals
soondue_bills_total = soondue_bills.aggregate(Sum("amount"))["amount__sum"]
overdue_bills_total = overdue_bills.aggregate(Sum("amount"))["amount__sum"]
litigious_bills_total = litigious_bills.aggregate(Sum("amount"))["amount__sum"]
soondue_bills_total_with_vat = sum([bill.amount_with_vat for bill in soondue_bills if bill.amount_with_vat])
overdue_bills_total_with_vat = sum([bill.amount_with_vat for bill in overdue_bills if bill.amount_with_vat])
litigious_bills_total_with_vat = sum([bill.amount_with_vat for bill in litigious_bills if bill.amount_with_vat])
# Get leads with done timesheet in past three month that don't have bill yet
leads_without_bill = Lead.objects.filter(state="WON", mission__timesheet__working_date__gte=(date.today() - timedelta(90)))
leads_without_bill = leads_without_bill.annotate(Count("clientbill")).filter(clientbill__count=0)
if subsidiary:
leads_without_bill = leads_without_bill.filter(subsidiary=subsidiary)
return render(request, "billing/bill_review.html",
{"overdue_bills": overdue_bills,
"soondue_bills": soondue_bills,
"recent_bills": recent_bills,
"litigious_bills": litigious_bills,
"soondue_bills_total": soondue_bills_total,
"overdue_bills_total": overdue_bills_total,
"litigious_bills_total": litigious_bills_total,
"soondue_bills_total_with_vat": soondue_bills_total_with_vat,
"overdue_bills_total_with_vat": overdue_bills_total_with_vat,
"litigious_bills_total_with_vat": litigious_bills_total_with_vat,
"leads_without_bill": leads_without_bill,
"billing_management": user_has_feature(request.user, "billing_management"),
"consultant": Consultant.objects.filter(trigramme__iexact=request.user.username).first(),
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def supplier_bills_validation(request):
"""Review and validate suppliers bills"""
today = date.today()
subsidiary = get_subsidiary_from_session(request)
supplier_overdue_bills = SupplierBill.objects.filter(state__in=("1_RECEIVED", "1_VALIDATED"), due_date__lte=today)
supplier_overdue_bills = supplier_overdue_bills.prefetch_related("lead").select_related()
supplier_soondue_bills = SupplierBill.objects.filter(state__in=("1_RECEIVED", "1_VALIDATED"), due_date__gt=today)
supplier_soondue_bills = supplier_soondue_bills.prefetch_related("lead").select_related()
# Filter bills on subsidiary if defined
if subsidiary:
supplier_overdue_bills = supplier_overdue_bills.filter(lead__subsidiary=subsidiary)
supplier_soondue_bills = supplier_soondue_bills.filter(lead__subsidiary=subsidiary)
return render(request, "billing/supplier_bills_validation.html",
{"supplier_soondue_bills": supplier_soondue_bills,
"supplier_overdue_bills": supplier_overdue_bills,
"billing_management": user_has_feature(request.user, "billing_management"),
"consultant": Consultant.objects.filter(trigramme__iexact=request.user.username).first(),
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 60 * 24)
def bill_delay(request):
"""Report on client bill creation and payment delay"""
data = []
subsidiary = get_subsidiary_from_session(request)
bills = ClientBill.objects.filter(creation_date__gt=(date.today() - timedelta(2*365)), state__in=("1_SENT", "2_PAID"),
amount__gt=0)
if subsidiary:
bills = bills.filter(lead__subsidiary=subsidiary)
bills = bills.select_related("lead__responsible", "lead__subsidiary", "lead__client__organisation__company",
"lead__paying_authority__company", "lead__paying_authority__contact")
bills = bills.prefetch_related("billdetail_set__mission")
for bill in bills:
data.append(
{_("Lead"): bill.lead.deal_id,
_("Responsible"): bill.lead.responsible.name,
_("Subsidiary"): bill.lead.subsidiary.name,
_("client company"): bill.lead.client.organisation.company.name,
_("Paying authority"): str(bill.lead.paying_authority or "null"),
_("Billing mode"): ",".join(list(set([d.mission.get_billing_mode_display() or "NA" for d in bill.billdetail_set.all()] or ["NA"]))),
_("creation lag"): bill.creation_lag() or "null",
_("payment delay"): bill.payment_delay(),
_("payment wait"): bill.payment_wait(),
_("creation date"): bill.creation_date.replace(day=1).isoformat()}
)
return render(request, "billing/payment_delay.html",
{"data": data,
"user": request.user},)
class BillingRequestMixin(PydiciFeatureMixin):
pydici_feature = "billing_request"
@pydici_non_public
@pydici_feature("billing_management")
def mark_bill_paid(request, bill_id):
"""Mark the given bill as paid"""
bill = ClientBill.objects.get(id=bill_id)
bill.state = "2_PAID"
bill.save()
return HttpResponseRedirect(reverse("billing:bill_review"))
@pydici_non_public
@pydici_feature("management")
def validate_supplier_bill(request, bill_id):
"""Mark the given supplier bill as validated"""
consultant = Consultant.objects.filter(trigramme__iexact=request.user.username).first()
bill = SupplierBill.objects.get(id=bill_id)
if consultant == bill.lead.responsible and bill.state == "1_RECEIVED":
bill.state = "1_VALIDATED"
bill.save()
return HttpResponseRedirect(reverse("billing:supplier_bills_validation"))
else:
return HttpResponseRedirect(reverse("core:forbidden"))
@pydici_non_public
@pydici_feature("billing_management")
def mark_supplierbill_paid(request, bill_id):
"""Mark the given supplier bill as paid"""
bill = SupplierBill.objects.get(id=bill_id)
bill.state = "2_PAID"
bill.save()
return HttpResponseRedirect(reverse("billing:supplier_bills_validation"))
@pydici_non_public
@pydici_feature("management")
def bill_file(request, bill_id=0, nature="client"):
"""Returns bill file"""
response = HttpResponse()
try:
if nature == "client":
bill = ClientBill.objects.get(id=bill_id)
else:
bill = SupplierBill.objects.get(id=bill_id)
if bill.bill_file:
response["Content-Type"] = mimetypes.guess_type(bill.bill_file.name)[0] or "application/stream"
response["Content-Disposition"] = 'attachment; filename="%s"' % basename(bill.bill_file.name)
for chunk in bill.bill_file.chunks():
response.write(chunk)
except (ClientBill.DoesNotExist, SupplierBill.DoesNotExist, OSError):
pass
return response
class Bill(PydiciNonPublicdMixin, TemplateView):
template_name = 'billing/bill.html'
def get_context_data(self, **kwargs):
context = super(Bill, self).get_context_data(**kwargs)
try:
bill = ClientBill.objects.get(id=kwargs.get("bill_id"))
context["bill"] = bill
context["expenses_image_receipt"] = []
for expenseDetail in bill.billexpense_set.all():
if expenseDetail.expense and expenseDetail.expense.receipt_content_type() != "application/pdf":
context["expenses_image_receipt"].append(expenseDetail.expense.receipt_data())
except ClientBill.DoesNotExist:
bill = None
return context
@method_decorator(pydici_feature("billing_request"))
def dispatch(self, *args, **kwargs):
return super(Bill, self).dispatch(*args, **kwargs)
class BillAnnexPDFTemplateResponse(WeasyTemplateResponse):
"""TemplateResponse override to merge """
@property
def rendered_content(self):
old_lang = translation.get_language()
try:
target = BytesIO()
bill = self.context_data["bill"]
translation.activate(bill.lang)
bill_pdf = super(BillAnnexPDFTemplateResponse, self).rendered_content
merger = PdfMerger()
merger.append(PdfReader(BytesIO(bill_pdf)))
# Add expense receipt
for billExpense in bill.billexpense_set.all():
if billExpense.expense and billExpense.expense.receipt_content_type() == "application/pdf":
merger.append(PdfReader(billExpense.expense.receipt.file))
# Add timesheet
if bill.include_timesheet:
fake_http_request = self._request
fake_http_request.method = "GET"
for mission in Mission.objects.filter(billdetail__bill=bill).annotate(Min("billdetail__month"), Max("billdetail__month")).distinct():
response = MissionTimesheetReportPdf.as_view()(fake_http_request, mission=mission,
start=mission.billdetail__month__min,
end=mission.billdetail__month__max)
merger.append(BytesIO(response.rendered_content))
merger.write(target)
target.seek(0) # Be kind, rewind
# Make it PDF/A-3B compliant
cmd = "gs -q -dPDFA=3 -dBATCH -dNOPAUSE -sColorConversionStrategy=UseDeviceIndependentColor -sDEVICE=pdfwrite -dPDFACompatibilityPolicy=1 -sOutputFile=- -"
try:
gs_in = tempfile.TemporaryFile()
gs_out = tempfile.TemporaryFile()
gs_in.write(target.getvalue())
target.close()
gs_in.seek(0)
subprocess.run(cmd.split(), stdin=gs_in, stdout=gs_out)
gs_out.seek(0)
# Add factur-x information
if bill.add_facturx_data:
facturx_xml = get_template("billing/invoice-factur-x.xml").render({"bill": bill})
facturx_xml = facturx_xml.encode("utf-8")
pdf_metadata = {
"author": "enioka",
"keywords": "Factur-X, Invoice, pydici",
"title": "enioka Invoice %s" % bill.bill_id,
"subject": "Factur-X invoice %s dated %s issued by enioka" % (bill.bill_id, bill.creation_date),
}
pdf = facturx.generate_from_binary(gs_out.read(), facturx_xml, pdf_metadata=pdf_metadata, lang=bill.lang)
else:
pdf = gs_out.read()
finally:
gs_out.close()
gs_in.close()
finally:
translation.activate(old_lang)
return pdf
class BillPdf(Bill, WeasyTemplateView):
response_class = BillAnnexPDFTemplateResponse
def get_filename(self):
bill = self.get_context_data(**self.kwargs)["bill"]
return bill_pdf_filename(bill)
@pydici_non_public
@pydici_feature("billing_request")
def client_bill(request, bill_id=None):
"""Add or edit client bill"""
billDetailFormSet = None
billExpenseFormSet = None
billing_management_feature = "billing_management"
wip_status = ("0_DRAFT", "0_PROPOSED")
forbidden = HttpResponseRedirect(reverse("core:forbidden"))
if bill_id:
try:
bill = ClientBill.objects.get(id=bill_id)
have_expenses = bill.lead.expense_set.filter(chargeable=True, billexpense__isnull=True).exists()
except ClientBill.DoesNotExist:
raise Http404
else:
bill = None
have_expenses = False
BillDetailFormSet = inlineformset_factory(ClientBill, BillDetail, formset=BillDetailInlineFormset, form=BillDetailForm, fields="__all__")
BillExpenseFormSet = inlineformset_factory(ClientBill, BillExpense, formset=BillExpenseInlineFormset, form=BillExpenseForm, fields="__all__")
if request.POST:
form = ClientBillForm(request.POST, request.FILES, instance=bill)
# First, ensure user is allowed to manipulate the bill
if bill and bill.state not in wip_status and not user_has_feature(request.user, billing_management_feature):
return forbidden
if form.data["state"] not in wip_status and not user_has_feature(request.user, billing_management_feature):
return forbidden
# Now, process form
if bill and bill.state in wip_status:
billDetailFormSet = BillDetailFormSet(request.POST, instance=bill)
billExpenseFormSet = BillExpenseFormSet(request.POST, instance=bill)
if form.data["state"] not in wip_status and (billDetailFormSet.has_changed() or billExpenseFormSet.has_changed()):
form.add_error("state", ValidationError(_("You can't modify bill details in that state")))
if form.is_valid() and (billDetailFormSet is None or billDetailFormSet.is_valid()) and (billExpenseFormSet is None or billExpenseFormSet.is_valid()):
bill = form.save()
if billDetailFormSet:
billDetailFormSet.save()
if billExpenseFormSet:
billExpenseFormSet.save()
bill.save() # Again, to take into account modified details.
if bill.state in wip_status:
success_url = reverse_lazy("billing:client_bill", args=[bill.id, ])
# User want to add chargeable expenses ?
if "Submit-expenses" in request.POST:
# compute again because user may add expenses during submit
expenses = bill.lead.expense_set.filter(chargeable=True, billexpense__isnull=True)
for expense in expenses:
BillExpense(bill=bill, expense=expense).save()
else:
success_url = request.GET.get('return_to', False) or reverse_lazy("billing:client_bill_detail", args=[bill.id, ])
if bill.bill_file:
if form.changed_data == ["state"] and billDetailFormSet is None and billExpenseFormSet is None:
# only state has change. No need to regenerate bill file.
messages.add_message(request, messages.INFO, _("Bill state has been updated"))
elif "bill_file" in form.changed_data:
# a file has been provided by user himself. We must not generate a file and overwrite it.
messages.add_message(request, messages.WARNING, _("Using custom user file to replace current bill"))
elif bill.billexpense_set.exists() or bill.billdetail_set.exists():
# bill file exist but authorized admin change information and do not provide custom file. Let's generate again bill file
messages.add_message(request, messages.WARNING, _("A new bill is generated and replace the previous one"))
if os.path.exists(bill.bill_file.path):
os.remove(bill.bill_file.path)
generate_bill_pdf(bill, request)
else:
# Bill file still not exist. Let's create it
messages.add_message(request, messages.INFO, _("A new bill file has been generated"))
generate_bill_pdf(bill, request)
return HttpResponseRedirect(success_url)
else:
if bill:
# Create a form to edit the given bill
form = ClientBillForm(instance=bill)
if bill.state in wip_status:
billDetailFormSet = BillDetailFormSet(instance=bill)
billExpenseFormSet = BillExpenseFormSet(instance=bill)
else:
# Still no bill, let's create it with its detail if at least mission or lead has been provided
missions = []
if request.GET.get("lead"):
lead = Lead.objects.get(id=request.GET.get("lead"))
missions = lead.mission_set.all() # take all missions
if request.GET.get("mission"):
missions = [Mission.objects.get(id=request.GET.get("mission"))]
if missions:
bill = ClientBill(lead=missions[0].lead)
bill.save()
for mission in missions:
if mission.billing_mode == "TIME_SPENT":
if request.GET.get("start_date") and request.GET.get("end_date"):
start_date = date(int(request.GET.get("start_date")[0:4]), int(request.GET.get("start_date")[4:6]), 1)
end_date = date(int(request.GET.get("end_date")[0:4]), int(request.GET.get("end_date")[4:6]), 1)
else:
start_date = previousMonth(date.today())
end_date = date.today().replace(day=1)
update_client_bill_from_timesheet(bill, mission, start_date, end_date)
else: # FIXED_PRICE mission
proportion = request.GET.get("proportion", 0.30)
bill = update_client_bill_from_proportion(bill, mission, proportion=proportion)
if bill:
form = ClientBillForm(instance=bill)
billDetailFormSet = BillDetailFormSet(instance=bill)
billExpenseFormSet = BillExpenseFormSet(instance=bill)
else:
# Simple virgin new form
form = ClientBillForm()
return render(request, "billing/client_bill_form.html",
{"bill_form": form,
"detail_formset": billDetailFormSet,
"detail_formset_helper": BillDetailFormSetHelper(),
"expense_formset": billExpenseFormSet,
"expense_formset_helper": BillExpenseFormSetHelper(),
"bill_id": bill.id if bill else None,
"can_delete": bill.state in wip_status if bill else False,
"can_preview": bill.state in wip_status if bill else False,
"have_expenses": have_expenses,
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def client_bill_detail(request, bill_id):
"""Display detailed bill information, metadata and bill pdf"""
bill = ClientBill.objects.get(id=bill_id)
return render(request, "billing/client_bill_detail.html",
{"bill": bill})
@pydici_non_public
@pydici_feature("billing_request")
def clientbill_delete(request, bill_id):
"""Delete client bill in early stage"""
redirect_url = reverse("billing:client_bills_in_creation")
try:
bill = ClientBill.objects.get(id=bill_id)
if bill.state in ("0_DRAFT", "0_PROPOSED"):
bill.delete()
messages.add_message(request, messages.INFO, _("Bill removed successfully"))
else:
messages.add_message(request, messages.WARNING, _("Can't remove a bill that have been sent. You may cancel it"))
redirect_url = reverse_lazy("billing:client_bill", args=[bill.id, ])
except Exception as e:
messages.add_message(request, messages.WARNING, _("Can't find bill %s" % bill_id))
return HttpResponseRedirect(redirect_url)
@pydici_non_public
@pydici_feature("billing_management")
def supplier_bill(request, bill_id=None):
"""Add or edit supplier bill"""
if bill_id:
try:
bill = SupplierBill.objects.get(id=bill_id)
except SupplierBill.DoesNotExist:
raise Http404
else:
bill = None
lead_id =request.GET.get("lead")
if request.POST:
form = SupplierBillForm(request.POST, request.FILES, instance=bill)
if form.is_valid():
bill = form.save()
return HttpResponseRedirect(reverse_lazy("billing:supplier_bills_archive"))
else:
if bill:
form = SupplierBillForm(instance=bill)
elif lead_id:
form = SupplierBillForm(initial={"lead": lead_id})
else:
form = SupplierBillForm()
return render(request, "billing/supplier_bill_form.html",
{"bill_form": form,
"bill_id": bill.id if bill else None,
"can_delete": bill.state == "1_RECEIVED" if bill else False,
"user": request.user})
@pydici_non_public
@pydici_feature("billing_management")
def supplierbill_delete(request, bill_id):
"""Delete supplier in early stage"""
redirect_url = reverse("billing:supplier_bills_archive")
try:
bill = SupplierBill.objects.get(id=bill_id)
if bill.state == "1_RECEIVED":
bill.delete()
messages.add_message(request, messages.INFO, _("Bill removed successfully"))
else:
messages.add_message(request, messages.WARNING, _("Can't remove a bill in state %s. You may cancel it" % bill.get_state_display()))
redirect_url = reverse_lazy("billing:supplier_bill", args=[bill.id, ])
except Exception as e:
messages.add_message(request, messages.WARNING, _("Can't find bill %s" % bill_id))
return HttpResponseRedirect(redirect_url)
@pydici_non_public
@pydici_feature("billing_request")
def pre_billing(request, start_date=None, end_date=None, mine=False):
"""Pre billing page: help to identify bills to send"""
subsidiary = get_subsidiary_from_session(request)
team = None
team_consultants = None
if end_date is None:
end_date = date.today().replace(day=1)
else:
end_date = date(int(end_date[0:4]), int(end_date[4:6]), 1)
if start_date is None:
start_date = previousMonth(date.today())
else:
start_date = date(int(start_date[0:4]), int(start_date[4:6]), 1)
if end_date - start_date > timedelta(180):
# Prevent excessive window that is useless would lead to deny of service
start_date = (end_date - timedelta(180)).replace(day=1)
if end_date < start_date:
end_date = nextMonth(start_date)
if "team_id" in request.GET:
team = Consultant.objects.get(id=int(request.GET["team_id"]))
team_consultants = Consultant.objects.filter(staffing_manager=team)
mine = False
timeSpentBilling = {} # Key is lead, value is total and dict of mission(total, Mission billingData)
rates = {} # Key is mission, value is Consultant rates dict
internalBilling = {} # Same structure as timeSpentBilling but for billing between internal subsidiaries
try:
billing_consultant = Consultant.objects.get(trigramme__iexact=request.user.username)
except Consultant.DoesNotExist:
billing_consultant = None
mine = False
fixedPriceMissions = Mission.objects.filter(nature="PROD", billing_mode="FIXED_PRICE",
timesheet__working_date__gte=start_date,
timesheet__working_date__lt=end_date)
undefinedBillingModeMissions = Mission.objects.filter(nature="PROD", billing_mode=None,
timesheet__working_date__gte=start_date,
timesheet__working_date__lt=end_date)
timespent_timesheets = Timesheet.objects.filter(working_date__gte=start_date, working_date__lt=end_date,
mission__nature="PROD", mission__billing_mode="TIME_SPENT")
internalBillingTimesheets = Timesheet.objects.filter(working_date__gte=start_date, working_date__lt=end_date,
mission__nature="PROD")
internalBillingTimesheets = internalBillingTimesheets.exclude(Q(consultant__company=F("mission__subsidiary")) & Q(consultant__company=F("mission__lead__subsidiary")))
#TODO: handle fixed price mission fully delegated to a subsidiary
if mine: # Filter on consultant mission/lead as responsible
fixedPriceMissions = fixedPriceMissions.filter(Q(lead__responsible=billing_consultant) | Q(responsible=billing_consultant))
undefinedBillingModeMissions = undefinedBillingModeMissions.filter(Q(lead__responsible=billing_consultant) | Q(responsible=billing_consultant))
timespent_timesheets = timespent_timesheets.filter(Q(mission__lead__responsible=billing_consultant) | Q(mission__responsible=billing_consultant))
internalBillingTimesheets = internalBillingTimesheets.filter(Q(mission__lead__responsible=billing_consultant) | Q(mission__responsible=billing_consultant))
elif team: # Filter on team
fixedPriceMissions = fixedPriceMissions.filter(
Q(lead__responsible__in=team_consultants) | Q(responsible__in=team_consultants))
undefinedBillingModeMissions = undefinedBillingModeMissions.filter(
Q(lead__responsible__in=team_consultants) | Q(responsible__in=team_consultants))
timespent_timesheets = timespent_timesheets.filter(
Q(mission__lead__responsible__in=team_consultants) | Q(mission__responsible__in=team_consultants))
internalBillingTimesheets = internalBillingTimesheets.filter(
Q(mission__lead__responsible__in=team_consultants) | Q(mission__responsible__in=team_consultants))
fixedPriceMissions = fixedPriceMissions.order_by("lead").distinct()
undefinedBillingModeMissions = undefinedBillingModeMissions.order_by("lead").distinct()
if subsidiary: # filter on subsidiary
fixedPriceMissions = fixedPriceMissions.filter(subsidiary=subsidiary)
timespent_timesheets = timespent_timesheets.filter(mission__subsidiary=subsidiary)
undefinedBillingModeMissions = undefinedBillingModeMissions.filter(subsidiary=subsidiary)
timesheet_data = timespent_timesheets.order_by("mission__lead", "consultant").values_list("mission", "consultant").annotate(Sum("charge"))
timeSpentBilling = get_billing_info(timesheet_data)
for internal_subsidiary in Subsidiary.objects.all():
subsidiary_timesheet_data = internalBillingTimesheets.filter(consultant__company=internal_subsidiary)
for target_subsidiary in Subsidiary.objects.exclude(pk=internal_subsidiary.id):
timesheet_data = subsidiary_timesheet_data.filter(mission__lead__subsidiary=target_subsidiary)
timesheet_data = timesheet_data .order_by("mission__lead", "consultant").values_list("mission", "consultant").annotate(Sum("charge"))
billing_info = get_billing_info(timesheet_data)
if billing_info:
internalBilling[(internal_subsidiary,target_subsidiary)] = billing_info
scopes, team_current_filter, team_current_url_filter = get_team_scopes(subsidiary, team)
if team:
team_name = _("team %(manager_name)s") % {"manager_name": team}
else:
team_name = None
return render(request, "billing/pre_billing.html",
{"time_spent_billing": timeSpentBilling,
"fixed_price_missions": fixedPriceMissions,
"undefined_billing_mode_missions": undefinedBillingModeMissions,
"internal_billing": internalBilling,
"start_date": start_date,
"end_date": end_date,
"mine": mine,
"scope": team_name or subsidiary or _("Everybody"),
"team_current_filter": team_current_filter,
"team_current_url_filter": team_current_url_filter,
"scopes": scopes,
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def client_bills_in_creation(request):
"""Review client bill in preparation"""
return render(request, "billing/client_bills_in_creation.html",
{"data_url": reverse('billing:client_bills_in_creation_DT'),
"datatable_options": ''' "order": [[4, "desc"]], "columnDefs": [{ "orderable": false, "targets": [1, 3] }] ''',
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def client_bills_archive(request):
"""Review all client bill """
return render(request, "billing/client_bills_archive.html",
{"data_url": reverse('billing:client_bills_archive_DT'),
"datatable_options": ''' "lengthMenu": [ 10, 25, 50, 100, 500 ], "order": [[4, "desc"]], "columnDefs": [{ "orderable": false, "targets": [1, 2, 10] }] ''',
"user": request.user})
@pydici_non_public
@pydici_feature("billing_request")
def supplier_bills_archive(request):
"""Review all supplier bill """
return render(request, "billing/supplier_bills_archive.html",
{"data_url": reverse('billing:supplier_bills_archive_DT'),
"datatable_options": ''' "order": [[4, "desc"]], "columnDefs": [{ "orderable": false, "targets": [2, 10] }] ''',
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
def lead_billing(request, lead_id):
"""lead / mission billing tab that display billing control and client/supplier bill list"""
lead = Lead.objects.get(id=lead_id)
return render(request, "billing/_lead_billing.html",
{"lead": lead,
"show_supplier_bills": True})
@pydici_non_public
@pydici_feature("reports")
def client_billing_control_pivotable(request, filter_on_subsidiary=None, filter_on_company=None, filter_on_lead=None):
"""Check lead/mission billing."""
subsidiary = get_subsidiary_from_session(request)
month_to_exc_from_my_leads = [date.today().replace(day=1)]
for i in range(6):
month_to_exc_from_my_leads.append(nextMonth(month_to_exc_from_my_leads[-1]))
month_to_exc_from_my_leads = [m.isoformat() for m in month_to_exc_from_my_leads]
data = get_client_billing_control_pivotable_data(filter_on_subsidiary=filter_on_subsidiary or subsidiary,
filter_on_company=filter_on_company,
filter_on_lead=filter_on_lead,
only_active=True)
return render(request, "billing/client_billing_control_pivotable.html",
{"data": data,
"consultant": Consultant.objects.filter(trigramme__iexact=request.user.username).first(),
"month_to_exc_from_my_leads": month_to_exc_from_my_leads,
"derivedAttributes": "{}"})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 60)
def graph_billing(request):
"""Bar graph of client bills by status"""
subsidiary = get_subsidiary_from_session(request)
bills = ClientBill.objects.filter(creation_date__gt=(date.today() - timedelta(3*365)), state__in=("1_SENT", "2_PAID"))
if subsidiary:
bills = bills.filter(lead__subsidiary=subsidiary)
if bills.count() == 0:
return HttpResponse()
bills = bills.annotate(month=TruncMonth("creation_date")).values("month")
bills = bills.annotate(amount_paid=Sum("amount", filter=Q(state="2_PAID")),
amount_sent=Sum("amount", filter=Q(state="1_SENT")))
bills = bills.values("month", "amount_paid", "amount_sent").order_by()
bills = [{"month": b["month"].isoformat(), "amount_paid": float(b["amount_paid"] or 0)/1000, "amount_sent": float(b["amount_sent"] or 0)/1000} for b in bills]
return render(request, "billing/graph_billing.html",
{"graph_data": json.dumps(bills),
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 10)
def graph_yearly_billing(request):
"""Fiscal year billing per subsidiary"""
bills = ClientBill.objects.filter(state__in=("1_SENT", "2_PAID"))
years = get_fiscal_years_from_qs(bills, "creation_date")
month = int(get_parameter("FISCAL_YEAR_MONTH"))
data = {}
graph_data = []
labels = []
growth = []
subsidiary = get_subsidiary_from_session(request)
if subsidiary:
subsidiaries = [subsidiary,]
else:
subsidiaries = Subsidiary.objects.all()
for subsidiary in subsidiaries:
data[subsidiary.name] = []
for year in years:
turnover = {}
for subsidiary_name, amount in bills.filter(creation_date__gte=date(year, month, 1), creation_date__lt=date(year + 1, month, 1)).values_list("lead__subsidiary__name").annotate(Sum("amount")):
turnover[subsidiary_name] = float(amount)
for subsidiary in subsidiaries:
data[subsidiary.name].append(turnover.get(subsidiary.name, 0))
last_turnover = 0
for current_turnover in [sum(i) for i in zip(*list(data.values()))]: # Total per year
if last_turnover > 0:
growth.append(round(100 * (current_turnover - last_turnover) / last_turnover, 1))
else:
growth.append(None)
last_turnover = current_turnover
if years[-1] == date.today().year:
growth.pop() # Don't compute for on-going year.
graph_data.append(["x"] + years) # X (years) axis
# Add turnover per subsidiary
for key, value in list(data.items()):
if sum(value) == 0:
continue
value.insert(0, key)
graph_data.append(value)
labels.append(key)
# Add growth
graph_data.append([_("growth")] + growth)
labels.append(_("growth"))
return render(request, "billing/graph_yearly_billing.html",
{"graph_data": json.dumps(graph_data),
"years": years,
"subsidiaries_names" : json.dumps(labels),
"series_colors": COLORS,
"user": request.user})
@pydici_non_public
@pydici_feature("reports")
@cache_page(60 * 60 * 4)
def graph_outstanding_billing(request):
"""Graph outstanding billing, including overdue clients bills"""
end = nextMonth(date.today() + timedelta(45))
current = (end - timedelta(30) * 24).replace(day=1)
today = date.today()
months = []
outstanding = []
outstanding_overdue = []
graph_data = []
subsidiary = get_subsidiary_from_session(request)
while current < end:
months.append(current.isoformat())
next_month = nextMonth(current)
bills = ClientBill.objects.filter(due_date__lte=next_month, state__in=("1_SENT", "2_PAID")).exclude(payment_date__lt=current)
if subsidiary:
bills = bills.filter(lead__subsidiary=subsidiary)
overdue_bills = bills.exclude(payment_date__lte=F("due_date")).exclude(payment_date__gt=next_month).exclude(due_date__gt=today)
outstanding.append(float(bills.aggregate(Sum("amount"))["amount__sum"] or 0))
outstanding_overdue.append(float(overdue_bills.aggregate(Sum("amount"))["amount__sum"] or 0))
current = next_month
graph_data.append(["x"] + months)
graph_data.append([_("billing outstanding")] + outstanding)
graph_data.append([_("billing outstanding overdue")] + outstanding_overdue)
return render(request, "billing/graph_outstanding_billing.html",
{"graph_data": json.dumps(graph_data),
"series_colors": COLORS,
"user": request.user})
|
digitalfox/pydici
|
billing/views.py
|
views.py
|
py
| 39,484
|
python
|
en
|
code
| 122
|
github-code
|
6
|
6442868869
|
import torch
import torch.nn as nn
from constants import IMG_SIZE
class LinearVAE(nn.Module):
def __init__(self, num_features):
super(LinearVAE, self).__init__()
self.num_features = num_features
self.encoder = nn.Sequential(
nn.Linear(in_features=IMG_SIZE ** 2, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=num_features * 2),
)
self.decoder = nn.Sequential(
nn.Linear(in_features=num_features, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=784),
nn.Sigmoid(),
)
def reparameterize(self, mu, log_var):
"""
samples a vector with a gaussian distribution N(mu, sigma)
"""
standard_deviation = torch.exp(0.5 * log_var)
# epsilon is sampled using a centered gaussian distribution
epsilon = torch.randn_like(standard_deviation)
return mu + (epsilon * standard_deviation)
def forward(self, x):
# compute encoding space distribution parameters
parameters = self.encoder(x).view(-1, 2, self.num_features)
# retrieve the mean `mu` and the log variance `log_var`
mu = parameters[:, 0, :]
log_var = parameters[:, 1, :]
# sample the latent vector through the reparameterization trick
sampled_latent_vector = self.reparameterize(mu, log_var)
# decoding
reconstruction = self.decoder(sampled_latent_vector)
return reconstruction, mu, log_var
|
antoineeudes/MLFlowPlayground
|
src/model.py
|
model.py
|
py
| 1,564
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42964990913
|
# def draw_line(px_context, x1: int, y1: int, x2: int, y2: int, color = (0, 0, 0)) -> None:
# """Brezenhem - algorithm drawing line"""
# lx = abs(x1 - x2) # define lengths
# ly = abs(y1 - y2)
#
# dy = 1 if y1 < y2 else -1 # define direct line
# dx = 1 if x1 < x2 else -1
#
# def calculate_brezenhem_cycle(a : int, b : int, da : int, db : int, la : int, lb : int,
# aend : int, aeqlx : bool) -> None:
# err = 0 # error drawing
# derr = lb + 1 # value incline line
# while a != aend:
# px_context[(a, b) if aeqlx else (b, a)] = color # draw pixel
# a += da # go to next point on coordinate a
# err += derr # increment value error drawing
# if err >= la + 1: # if error greater (a0 - a1) + 1
# err -= la + 1 # decrement value error
# b += db # go to next point on coordinate b
#
# if lx > ly:
# calculate_brezenhem_cycle(x1, y1, dx, dy, lx, ly, x2, True) # angle is belongs [315; 45] and [135; 225]
# else:
# calculate_brezenhem_cycle(y1, x1, dy, dx, ly, lx, y2, False) # angle is belongs (45; 135) and (225; 315)
def draw_line(px_context, x1: int, y1: int, x2: int, y2: int, color = (0, 0, 0)):
"""Brezenhem - algorithm drawing line"""
lx = abs(x1 - x2) # define lengths
ly = abs(y1 - y2)
swap = ly >= lx
if swap:
x1, y1 = y1, x1
x2, y2 = y2, x2
lx, ly = ly, lx
dy = 1 if y1 < y2 else -1 # define direct line
dx = 1 if x1 < x2 else -1
err = 0
derr = ly + 1
while x1 != x2:
px_context[(y1, x1) if swap else (x1, y1)] = color
x1 += dx
err += derr
if err >= lx + 1:
err -= lx + 1
y1 += dy
def draw_fill_delta(px_context, p1, p2, p3, fill_color=(0, 0, 0)):
"""algorithm rasterize delta"""
# ================== sort points ======================
if p2[1] < p1[1]:
p1, p2 = p2, p1
if p3[1] < p1[1]:
p1, p3 = p3, p1
if p3[1] < p2[1]:
p2, p3 = p3, p2
# ======================================================
dx13 = dx12 = dx23 = 0
# ================== calculate dxs =====================
if p1[1] != p3[1]:
dx13 = (p3[0] - p1[0]) / (p3[1] - p1[1])
if p1[1] != p2[1]:
dx12 = (p2[0] - p1[0]) / (p2[1] - p1[1])
if p2[1] != p3[1]:
dx23 = (p3[0] - p2[0]) / (p3[1] - p2[1])
# ======================================================
_dx13 = dx13
x1 = x2 = p1[0] # work points in up point
if dx13 > dx12:
dx13, dx12 = dx12, dx13
# ================== case up delta =====================
for y in range(int(p1[1]), int(p2[1])):
for x in range(int(x1), int(x2) + 1):
px_context[x, y] = fill_color # draw pixel
x1 += dx13
x2 += dx12
# ======================================================
dx13 = _dx13
if p1[1] == p2[1]:
x1 = p1[0]
x2 = p2[0]
if dx23 > dx13:
dx13, dx23 = dx23, dx13
# ================== case down delta ===================
for y in range(int(p2[1]), int(p3[1]) + 1):
for x in range(int(x1), int(x2) + 1):
px_context[x, y] = fill_color # draw pixel
x1 += dx13
x2 += dx23
# ======================================================
def point_in_field(x, y, field_size):
"""check point in field"""
return 0 <= x < field_size[0] and 0 <= y < field_size[1]
def get4neighbours(x, y, field_size):
"""search 4 point's neighbours"""
p1 = [x, y - 1]
p2 = [x, y + 1]
p3 = [x - 1, y]
p4 = [x + 1, y]
ans = []
if point_in_field(*p1, field_size):
ans += [p1]
if point_in_field(*p2, field_size):
ans += [p2]
if point_in_field(*p3, field_size):
ans += [p3]
if point_in_field(*p4, field_size):
ans += [p4]
return ans
def fill(px_context, x: int, y: int, size, fill_color = (0, 0, 0)):
"""fill field"""
queue = [] # queue points
curr_point = [x, y] # event pixel
main_color = px_context[x, y] # field's color
queue.append(curr_point)
while len(queue) > 0:
curr_point = queue.pop() # get point
neighbours = get4neighbours(*curr_point, size) # get her neighbours
for neighbour in neighbours:
if px_context[neighbour[0], neighbour[1]] == main_color: # check color neighbour-point
queue.append(neighbour) # add unfilled point
px_context[curr_point[0], curr_point[1]] = fill_color # fill this point
def draw_polygon(px_context, size, xc: int, yc: int, r: int, n: int, fill_color=(0, 0, 0)):
if n < 3:
raise ValueError("too little angles!!!")
d = math.pi / 180
a = 90
da = (360 / n)
points = []
x = xc
y = yc
for i in range(n):
x = xc + r * math.cos(a * d)
y = yc + r * math.sin(a * d)
a += da
points += [[int(x), int(y)]]
for i in range(n):
try:
draw_line(px_context, *points[i], *points[(i + 1) % n])
except IndexError:
print("image index out of range!!!")
fill(px_context, xc + 1, yc + 1, size, fill_color)
|
DnineO/Computer-Graphics
|
lab 14/algorithms_drawing_with_pillow.py
|
algorithms_drawing_with_pillow.py
|
py
| 5,772
|
python
|
en
|
code
| 0
|
github-code
|
6
|
30528663499
|
from PIL import Image, ImageDraw
def board(num, size):
col = (255, 255, 255)
picture = Image.new("RGB", (num * size, num * size), col)
x = size * num
y = x
draw = ImageDraw.Draw(picture)
for i in range(0, x, size):
if i % (size * 2) == 0:
for j in range(0, y, size):
if j % (size * 2) == 0:
draw.rectangle(
[i, j, i + size - 1, j + size - 1], fill='black')
else:
for j in range(size, y, size):
if j % (size * 2) != 0:
draw.rectangle(
[i, j, i + size - 1, j + size - 1], fill='black')
picture.save('res.png', "PNG")
|
OrAnge-Lime/Python-Practice-3
|
26.2.py
|
26.2.py
|
py
| 704
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1989910236
|
import os
from os import system, name
from time import sleep
from var import *
from verifpiece import *
from time import sleep
from verifmove import *
#from chess import *
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def platePrint(plate):
clear()
# for i in range(8):
# print("|", 8 - i, "||", end=" ")
# for v in range(8):
# print(plate[i][v], end=" | ")
# print("")
# if not i == 7:
# print("--------------------------------------")
# print("--------------------------------------")
#print("| 0 || a | b | c | d | e | f | g | h |")
def PlateUpdate(chesspi, plate, alpha):
holdPlate = []
newPlate = []
holdPlate = list(plate[8 - int(chesspi[0][1])])
holdPlate[chesspi[0][0]] = "#"
newPlate = list(plate[8 - int(chesspi[1][1])])
newPlate[chesspi[1][0]] = chesspi[2]
plate[8 - int(chesspi[0][1])] = holdPlate
plate[8 - int(chesspi[1][1])] = newPlate
platePrint(plate)
return False
def getMove(turns):
print("ex: a1,a3,T: move tower a1 to a3")
if not error("get") == 0:
print("Error : ", allError)
print(turns)
def verif(chesspi, piece, pieceAdv, turns):
chesspi.append(plate[-int(chesspi[0][1])][chesspi[0][0]])
if not chesspi[2] in piece:
if chesspi[2] in pieceAdv:
error('pieceIsNotYour')
else:
error('pieceNotExist')
if not plate[8 - (int(chesspi[0][1]))][chesspi[0][0]] in piece:
error('pieceIsNotFind',)
if not 1 <= int(chesspi[1][1]) <= 8 and 1 <= (chesspi[1][0]) <= 8:
error('localisationNotInPlate')
if error("get") == 0:
pieceMove(chesspi, turns)
def error(errors):
if errors == "get":
return len(allError)
elif errors == "clear":
allError.clear()
else:
print(errors)
allError.append(errors)
def pieceMove(chesspi, turns):
y = int(chesspi[0][1])
x = chesspi[0][0]
ny = int(chesspi[1][1])
nx = chesspi[1][0]
zy = ny - y
zx = nx - x
piMo = chesspi[2]
try:
if piMo == "r"or piMo == "R":
if turns == "Black":
piece = pieceN
elif turns == "White":
piece = pieceB
Roi(y, x, ny, nx, zy, zx, piece, plate)
except IndexError:
error('moveIsImpossible')
try:
if piMo == "p"or piMo == "P":
Pion(turns, y, x, ny, nx, plate, pieceN, pieceB)
except IndexError:
error("moveIsImpossible")
try:
if piMo == "d"or piMo == "D":
Dame(y, x, ny, nx, zy, zx)
except IndexError:
error('moveIsImpossible')
try:
if piMo == "f"or piMo == "F":
Fou(y, x, ny, nx, zy, zx)
except IndexError:
error('moveIsImpossible')
try:
if piMo == "c"or piMo == "C":
if turns == "Black":
piece = pieceN
elif turns == "White":
piece = pieceB
Cavalier(y, x, ny, nx, plate, piece)
except IndexError:
error('moveIsImpossible')
try:
if piMo == "t"or piMo == "T":
Tour(y, x, ny, nx, zy, zx)
except IndexError:
error('moveIsImpossible')
if error("get") == 0:
moveobs(piMo, zx, zy, x, y, nx, ny, plate, pieceB, pieceN, outEquation, chesspi, turns)
def moveobs(piMo, zx, zy, x, y, nx, ny, plate, pieceB, pieceN, outEquation, chesspi, turns):
erreur = True
if turns == "Black":
piece = pieceN
pieceAdv = pieceB
elif turns == "White":
piece = pieceB
pieceAdv = pieceN
if piMo == "D" or piMo == "d":
erreur = DameMoveVerif(zx, zy, x, y, nx, ny, plate, piece, pieceAdv, outEquation)
if piMo == "T" or piMo == "t":
erreur = TourMoveVerif(zx, zy, x, y, nx, ny, plate, piece, pieceAdv, outEquation)
if piMo == "F" or piMo == "f":
erreur = FouMoveVerif(zx, zy, x, y, nx, ny, plate, piece, pieceAdv)
print(erreur)
if erreur == False:
error('CantMove(obstacle)')
if error("get") == 0:
PlateUpdate(chesspi, plate, alpha)
|
MaximCosta/PythonChess
|
verif.py
|
verif.py
|
py
| 3,995
|
python
|
en
|
code
| 1
|
github-code
|
6
|
1336928761
|
# Найдите три ключа с самыми высокими значениями в словаре
# my_dict = {'a':500, 'b':5874, 'c': 560,'d':400, 'e':5874, 'f': 20}.
my_dict = {'a':500, 'b':5874, 'c': 560,'d':400, 'e':5874, 'f': 20}
#Решение зацени говнокодище, без использования операторов сортировки!!!)
list_1=tuple(my_dict.items()) # здесь трансформируем словарь в кортежжж
list_2=[] #Здесь создаем пустой список, в который буду сидывать значения из словаря для сортировки
for varies in list_1:
x=varies[0]
y=varies[1]
list_2.append(y) # здесь отделяю числовые значения от ключей (буковок ) и добавляю в пустой список
list_2.sort(reverse=True) # здесь сортирую по убыванию
a=list_2[0] #Здесь добавляю первые три максимальные значения в переменные
b=list_2[1] #
c=list_2[2]
for varies in list_1:
x=varies[0]
y=varies[1]
if y==a or y==b or y==c : # здесь проверяю значения переменных, если совпадают вывожу буковку ключа
print(x)
##### ХА ХА
|
capeman1/Examples
|
example#5.py
|
example#5.py
|
py
| 1,400
|
python
|
ru
|
code
| 0
|
github-code
|
6
|
70396795707
|
import chex
import jax
import jax.numpy as jnp
import shinrl as srl
dS = 10
dA = 5
obs_shape = (2,)
act_shape = (3,)
init_probs = jnp.array([0.2, 0.8, 0, 0, 0, 0, 0, 0, 0, 0])
discount = 0.99
def tran_fn(state, action):
next_state = jnp.array([state, (state + action) % 10], dtype=int)
prob = jnp.array([0.2, 0.8], dtype=float)
return next_state, prob
def rew_fn(state, action):
return jnp.array(state + action, dtype=float)
def obs_fn(state):
return jnp.array([state, state + 5], dtype=float)
def act_fn(act):
return jnp.array([act, act + 2, act + 3], dtype=float)
def test_mdp():
obs_mat = srl.MDP.make_obs_mat(obs_fn, dS, obs_shape)
chex.assert_shape(obs_mat, (dS, *obs_shape))
tran_mat = srl.MDP.make_tran_mat(tran_fn, dS, dA)
key = jax.random.PRNGKey(0)
dmat = jax.random.uniform(key, shape=(dS * dA, 11))
res = srl.sp_mul(tran_mat, dmat, (dS * dA, dS))
chex.assert_shape(res, (dS * dA, 11))
rew_mat = srl.MDP.make_rew_mat(rew_fn, dS, dA)
chex.assert_shape(rew_mat, (dS, dA))
act_mat = srl.MDP.make_act_mat(act_fn, dA, act_shape)
chex.assert_shape(act_mat, (dA, *act_shape))
mdp = srl.MDP(
dS=dS,
dA=dA,
obs_shape=(2,),
obs_mat=obs_mat,
rew_mat=rew_mat,
tran_mat=tran_mat,
init_probs=init_probs,
discount=discount,
act_shape=(3,),
act_mat=act_mat,
)
srl.MDP.is_valid_mdp(mdp)
|
omron-sinicx/ShinRL
|
tests/envs/base/mdp_test.py
|
mdp_test.py
|
py
| 1,458
|
python
|
en
|
code
| 42
|
github-code
|
6
|
37273225181
|
from cc3d.core.PySteppables import *
from cc3d.CompuCellSetup import persistent_globals as pg
class ConstraintInitializerSteppable(SteppableBasePy):
def __init__(self,frequency=1):
SteppableBasePy.__init__(self,frequency)
def start(self):
for cell in self.cell_list:
cell.targetVolume = 25
cell.lambdaVolume = 20.0
#self.plot_win = self.add_new_plot_window(title='Tolerant and sensitive cells',
# x_axis_title='MonteCarlo Step (MCS)',
# y_axis_title='total population', x_scale_type='linear', y_scale_type='linear',
# grid=False)
#self.plot_win.add_plot("sen", style='Dots', color='red', size=3)
#self.plot_win.add_plot("tol", style='Dots', color='green', size=3)
# def step(self,mcs):
# field=CompuCell.getConcentrationField(self.simulator,"nutrient")
# comPt=CompuCell.Point3D()
# for cell in self.cellList:
# comPt.x=int(round(cell.xCOM))
# comPt.y=int(round(cell.yCOM))
# comPt.z=int(round(cell.zCOM))
# #Condensing cell
# if cell.type==self.SENCELL:
# concentration=field.get(comPt) # get concentration at comPt
# cell.targetVolume+=0.1*concentration # increase cell's target volume
class GrowthSteppable(SteppableBasePy):
def __init__(self,frequency=1):
SteppableBasePy.__init__(self,frequency)
def start(self):
# self.plot_win = self.add_new_plot_window(title='Tolerant and sensitive cells',
# x_axis_title='MonteCarlo Step (MCS)',
# y_axis_title='total population', x_scale_type='linear', y_scale_type='linear',
# grid=False)
#self.plot_win.add_plot("sen", style='Dots', color='red', size=3)
#self.plot_win.add_plot("tol", style='Dots', color='green', size=3)
self.initnumtolcell=len(self.cell_list_by_type(self.TOLCELL))
self.initnumsencell=len(self.cell_list_by_type(self.SENCELL))
numiter=int(pg.input_object[9])
self.sencellfc=np.zeros((numiter+1))
self.tolcellfc=np.zeros((numiter+1))
self.timstp=0
self.simtype = pg.input_object[0]
self.senHcoef = pg.input_object[1]
self.senMinGrwthNut = pg.input_object[2]
self.senStrsCoef = pg.input_object[3]
self.tolHcoef = pg.input_object[4]
self.tolLCoef1 = pg.input_object[5]
self.tolLCoef2 = pg.input_object[6]
self.tolLCoef3 = pg.input_object[7]
self.tolMinGrwthNut = pg.input_object[8]
print("starting time steps .... ")
def step(self, mcs):
# # alternatively if you want to make growth a function of chemical concentration uncomment lines below and comment lines above
field1 = self.field.NUTRIENT
field2 = self.field.STRESS
for cell in self.cell_list:
ntrntAtCOM = field1[int(cell.xCOM), int(cell.yCOM), int(cell.zCOM)]
strssAtCOM = field2[int(cell.xCOM), int(cell.yCOM), int(cell.zCOM)]
if cell.type == self.SENCELL:
#cell.targetVolume += 0.4 * (ntrntAtCOM) - 0.2*strssAtCOM-0.05 #- 0.01*(6-ntrntAtCOM)
#cell.targetVolume += 0.13 * (ntrntAtCOM) - 0.025*strssAtCOM-0.05
#cell.targetVolume += -0.2*np.log((1-ntrntAtCOM/5.51)/(80.5*ntrntAtCOM/5.51))- 0.34*max(0,-np.log((10.0-strssAtCOM)/(50.5*ntrntAtCOM)))
#cell.targetVolume += -0.12*np.log((1-ntrntAtCOM/5.51)/(80*ntrntAtCOM/5.51))- 0.2*max(0,-np.log((10.0-strssAtCOM)/(40.5*strssAtCOM)))
#cell.targetVolume += 0.68*ntrntAtCOM/(ntrntAtCOM+2.5)*(ntrntAtCOM-0.15) - 0.08*strssAtCOM/(strssAtCOM+2.5)*(strssAtCOM-0.1)
#health= 0.14*ntrntAtCOM # - 1.1/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-0.2)/0.6 ))))
#cell.targetVolume += 1*((health))*(ntrntAtCOM-0.1) - 0.17*strssAtCOM*strssAtCOM
health= self.senHcoef*ntrntAtCOM
cell.targetVolume += 1*((health))*(ntrntAtCOM-self.senMinGrwthNut) - self.senStrsCoef*strssAtCOM*strssAtCOM
#health= 0.09*ntrntAtCOM
#cell.targetVolume += 1*((health))*(ntrntAtCOM-0.03) - 0.18*strssAtCOM*(strssAtCOM)
#cell.targetVolume += (0.35-2/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-0.3)/4)))))*(ntrntAtCOM)-0.1
numsencell=len(self.cell_list_by_type(self.SENCELL))
self.sencellfc[self.timstp]=numsencell/self.initnumsencell
#self.plot_win.add_data_point("sen", mcs, numsencell/self.initnumsencell)
#cell.targetVolume += 0.34* 10.0/( 1+70*np.power( 2,-( ntrntAtCOM-3.5 ) ) ) - 0.34* 10.0/( 1+70*np.power( 2,-( strssAtCOM-3.5 ) ) )
#cell.targetVolume += -0.32*np.log((1000.0-ntrntAtCOM/strssAtCOM)/(800*ntrntAtCOM/strssAtCOM))
elif cell.type == self.TOLCELL:
#cell.targetVolume += 0.2 * (ntrntAtCOM) - 0.22*strssAtCOM- 0.05
#cell.targetVolume += -0.12*np.log((1-ntrntAtCOM/5.51)/(55*ntrntAtCOM/5.51)) - 0.20*max(0,-np.log((10.0-strssAtCOM)/(40.5*ntrntAtCOM)))
#cell.targetVolume += -0.04*np.log((1-ntrntAtCOM/5.51)/(55*ntrntAtCOM/5.51)) - 0.20*max(0,-np.log((10.0-strssAtCOM)/(40.5*strssAtCOM)))
#health= 0.22*ntrntAtCOM - 3/(1+np.exp((1-2*((strssAtCOM-1.5)/3.5))))
#health= 0.175 *ntrntAtCOM - 1.1/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-0.5)/0.4 ))))
#cell.targetVolume += 1*((health))*(ntrntAtCOM-0.1) #- 0.04*strssAtCOM*strssAtCOM
health= self.tolHcoef *ntrntAtCOM - self.tolLCoef1/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-self.tolLCoef2 )/self.tolLCoef3 ))))
cell.targetVolume += 1*((health))*(ntrntAtCOM-self.tolMinGrwthNut) #- 0.04*strssAtCOM*strssAtCOM
#health= 0.072 *ntrntAtCOM - 0.25/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-0.2 )/0.4 ))))#(strssAtCOM**1.5)/(0.3+strssAtCOM**1.5)
#cell.targetVolume += 1*((health))*(ntrntAtCOM-0.01)
#cell.targetVolume += (0.26-2/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-1.0)/2.7)))))*(ntrntAtCOM)-0.1
#cell.targetVolume += -0.1+0.12*25/(1+np.exp(np.log(19)*(1-2*((ntrntAtCOM-0.2)/5.0)))) - 0.035*max(0, 9/(1+np.exp(np.log(19)*(1-2*((strssAtCOM-0.3)/10.4)))))
numtolcell=len(self.cell_list_by_type(self.TOLCELL))
self.tolcellfc[self.timstp]=numtolcell/self.initnumtolcell
#self.plot_win.add_data_point("tol", mcs, numtolcell/self.initnumtolcell)
#cell.targetVolume += -0.4*np.log((1-ntrntAtCOM/5.51)/(55*ntrntAtCOM/5.51)) - 0.38*max(0,-np.log((10.0-strssAtCOM)/(99.5*ntrntAtCOM)))
self.timstp+=1
print("+", end="",flush=True)
#print("time step: ", self.timstp)
# arguments are (name of the data series, x, y)
def finish(self):
pg.return_object = [self.tolcellfc, self.sencellfc]
print("\n ending the time stepping")
# if (self.timstp==600):
# output = np.column_stack((self.tolcellfc.flatten(),self.sencellfc.flatten()))
# np.savetxt("exp.csv", output, delimiter=",")
class MitosisSteppable(MitosisSteppableBase):
def __init__(self,frequency=1):
MitosisSteppableBase.__init__(self,frequency)
def step(self, mcs):
cells_to_divide=[]
for cell in self.cell_list:
# ntrntAtCOM = field1[int(cell.xCOM), int(cell.yCOM), int(cell.zCOM)]
if cell.volume>50:
cells_to_divide.append(cell)
# if ntrntAtCOM > 0.2
# ntrntAtCOM -= 0.2
# field1[int(cell.xCOM), int(cell.yCOM), int(cell.zCOM)] = ntrntAtCOM
for cell in cells_to_divide:
self.divide_cell_random_orientation(cell)
# Other valid options
# self.divide_cell_orientation_vector_based(cell,1,1,0)
# self.divide_cell_along_major_axis(cell)
# self.divide_cell_along_minor_axis(cell)
def update_attributes(self):
# reducing parent target volume
self.parent_cell.targetVolume /= 2.0
self.clone_parent_2_child()
# for more control of what gets copied from parent to child use cloneAttributes function
# self.clone_attributes(source_cell=self.parent_cell, target_cell=self.child_cell, no_clone_key_dict_list=[attrib1, attrib2])
if self.parent_cell.type==1:
self.child_cell.type=1
elif self.parent_cell.type==2:
self.child_cell.type=2
class DeathSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
# def step(self, mcs):
# if mcs == 1000:
# for cell in self.cell_list:
# if cell.type==1:
# cell.targetVolume=0
# cell.lambdaVolume=100
# if mcs == 2000:
# for cell in self.cell_list:
# if cell.type==2:
# cell.targetVolume=0
# cell.lambdaVolume=100
|
SouKot/cc3d-scipyOptimize
|
Simulation/nutrient_stress_mitosisSteppables.py
|
nutrient_stress_mitosisSteppables.py
|
py
| 9,639
|
python
|
en
|
code
| 0
|
github-code
|
6
|
26625314906
|
from django.contrib.auth.decorators import user_passes_test
from django.core import urlresolvers
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from product.forms import VariationManagerForm, InventoryForm, ProductExportForm, ProductImportForm
from product.models import Product
from product.modules.configurable.models import ConfigurableProduct
from satchmo_utils.views import bad_or_missing
import logging
log = logging.getLogger('product.views.adminviews')
def edit_inventory(request):
"""A quick inventory price, qty update form"""
if request.method == "POST":
new_data = request.POST.copy()
form = InventoryForm(new_data)
if form.is_valid():
form.save(request)
url = urlresolvers.reverse('satchmo_admin_edit_inventory')
return HttpResponseRedirect(url)
else:
form = InventoryForm()
ctx = RequestContext(request, {
'title' : _('Inventory Editor'),
'form' : form
})
return render_to_response('product/admin/inventory_form.html',
context_instance=ctx)
edit_inventory = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(edit_inventory)
def export_products(request, template='product/admin/product_export_form.html'):
"""A product export tool"""
if request.method == 'POST':
new_data = request.POST.copy()
form = ProductExportForm(new_data)
if form.is_valid():
return form.export(request)
else:
form = ProductExportForm()
fileform = ProductImportForm()
ctx = RequestContext(request, {
'title' : _('Product Import/Export'),
'form' : form,
'importform': fileform
})
return render_to_response(template, context_instance=ctx)
export_products = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(export_products)
def import_products(request, maxsize=10000000):
"""
Imports product from an uploaded file.
"""
if request.method == 'POST':
errors = []
results = []
if 'upload' in request.FILES:
infile = request.FILES['upload']
form = ProductImportForm()
results, errors = form.import_from(infile, maxsize=maxsize)
else:
errors.append('File: %s' % request.FILES.keys())
errors.append(_('No upload file found'))
ctx = RequestContext(request, {
'errors' : errors,
'results' : results
})
return render_to_response("product/admin/product_import_result.html",
context_instance=ctx)
else:
url = urlresolvers.reverse('satchmo_admin_product_export')
return HttpResponseRedirect(url)
import_products = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(import_products)
# def product_active_report(request):
#
# products = Product.objects.filter(active=True)
# products = [p for p in products.all() if 'productvariation' not in p.get_subtypes]
# ctx = RequestContext(Request, {title: 'Active Product Report', 'products' : products })
# return render_to_response('product/admin/active_product_report.html', ctx)
#
# product_active_report = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(product_active_report)
def variation_list(request):
products = Product.objects.filter(configurableproduct__in = ConfigurableProduct.objects.all())
ctx = RequestContext(request, {
'products' : products,
})
return render_to_response('product/admin/variation_manager_list.html',
context_instance=ctx)
def variation_manager(request, product_id = ""):
try:
product = Product.objects.get(id=product_id)
subtypes = product.get_subtypes()
if 'ProductVariation' in subtypes:
# got a variation, we want to work with its parent
product = product.productvariation.parent.product
if 'ConfigurableProduct' in product.get_subtypes():
url = urlresolvers.reverse("satchmo_admin_variation_manager",
kwargs = {'product_id' : product.id})
return HttpResponseRedirect(url)
if 'ConfigurableProduct' not in subtypes:
return bad_or_missing(request, _('The product you have requested is not a Configurable Product.'))
except Product.DoesNotExist:
return bad_or_missing(request, _('The product you have requested does not exist.'))
if request.method == 'POST':
new_data = request.POST.copy()
form = VariationManagerForm(new_data, product=product)
if form.is_valid():
log.debug("Saving form")
form.save(request)
# rebuild the form
form = VariationManagerForm(product=product)
else:
log.debug('errors on form')
else:
form = VariationManagerForm(product=product)
ctx = RequestContext(request, {
'product' : product,
'form' : form,
})
return render_to_response('product/admin/variation_manager.html',
context_instance=ctx)
variation_manager = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(variation_manager)
|
dokterbob/satchmo
|
satchmo/apps/product/views/adminviews.py
|
adminviews.py
|
py
| 5,586
|
python
|
en
|
code
| 30
|
github-code
|
6
|
25785346221
|
import numpy as np
import matplotlib.pyplot as plt
import xlrd
from config import SHEET_INDEX, ROW_START, ROW_END, COLUMN_START, COLUMN_END,FILE_NAME
from model import Unit
plt.rcParams['font.family']='Calibri'
def read_data():
workbook = xlrd.open_workbook(FILE_NAME)
sheet = workbook.sheets()[0]
result = []
for row_idx in range(ROW_START,ROW_END+1):
row = sheet.row_values(row_idx)[COLUMN_START:COLUMN_END+1]
result.append(Unit(row[0], float(row[1]), float(row[2]), row[3]))
return result
def _print(data):
for item in data:
print(item.name,item.yct,item.pei, item.color)
def _bottom(data, current_idx, is_yct=True):
pass
def stack_values(data1, data2):
result1 = [0.0]
result2 = [0.0]
for item1,item2 in zip(data1, data2):
result1.append(result1[-1]+item1.yct)
result2.append(result2[-1]+item2.pei)
return result1, result2
def draw():
row_data = read_data()
data_by_yct = sorted(row_data, key=lambda unit: unit.yct, reverse=True)
data_by_pei = sorted(row_data, key=lambda unit: unit.pei, reverse=True)
stack1, stack2 = stack_values(data_by_yct, data_by_pei)
index = 0.5
bw = 0.5
plt.axis([0,3,0,100])
plt.plot([0,4],[2,2],'-')
for idx,item in enumerate(data_by_yct):
plt.bar(index, np.array([item.yct]), bw, color=item.color, edgecolor='None',
label=item.name, bottom=stack1[idx])
for idx,item in enumerate(data_by_pei):
plt.bar(index +1, np.array([item.pei]), bw, color=item.color, edgecolor='None',
bottom=stack2[idx])
plt.legend(loc='right', ncol=1, fontsize=5.5, frameon=False)
#decorate
x_ticks = [0.75, 1.75]
x_label = [r"$D_{YCT}$", r"$D_{PEI}$"]
y_ticks = np.arange(0,101,10)
y_labels = np.array([str(item) for item in y_ticks])
plt.ylabel('Percentage (%)')
plt.xticks(x_ticks, x_label, fontsize=12)
plt.yticks(y_ticks, y_labels)
gca = plt.gca()
gca.xaxis.set_ticks_position('bottom')
gca.yaxis.set_ticks_position('left')
gca.yaxis.set_ticks_position('left')
gca.spines['right'].set_color('none')
plt.show()
if __name__ == '__main__':
draw()
|
gaufung/CodeBase
|
PDA/Figures/app.py
|
app.py
|
py
| 2,205
|
python
|
en
|
code
| 0
|
github-code
|
6
|
1414104927
|
import logging
import coloredlogs
import sys
import os
import yaml
import argparse
log = logging.getLogger(__name__)
class Workspace:
BACK_CONFIG_VERSION = "0.03"
CONFIG_VERSION = "0.05"
DEFAULT_WORKSPACE_DIR = os.path.join(os.path.expanduser("~"),
".tng-workspace")
DEFAULT_SCHEMAS_DIR = os.path.join(os.path.expanduser("~"), ".tng-schema")
__descriptor_name__ = "workspace.yml"
def __init__(self, ws_root, config=None, ws_name=None, log_level=None):
self._ws_root = ws_root
self._ws_config = dict()
if config:
self._ws_config = config
else:
self.load_default_config()
if ws_name:
self.config['name'] = ws_name
if log_level:
self.config['log_level'] = log_level
# coloredlogs.install(level=self.config['log_level'])
@property
def workspace_root(self):
return self._ws_root
@property
def workspace_name(self):
return self.config['name']
@property
def default_descriptor_extension(self):
return self.config['default_descriptor_extension']
@property
def log_level(self):
return self.config['log_level']
@log_level.setter
def log_level(self, value):
self.config['log_level'] = value
coloredlogs.install(level=value)
@property
def schemas_local_master(self):
return self.config['schemas_local_master']
@property
def schemas_remote_master(self):
return self.config['schemas_remote_master']
@property
def validate_watchers(self):
return self.config['validate_watchers']
@property
def config(self):
return self._ws_config
@property
def catalogues_dir(self):
return self.config['catalogues']
@property
def configuration_dir(self):
return self.config['configuration_dir']
@property
def platforms_dir(self):
return self.config['platforms_dir']
@property
def projects_dir(self):
return self.config['projects_dir']
@property
def ns_catalogue_dir(self):
return os.path.join(self.catalogues_dir, 'ns_catalogue')
@property
def vnf_catalogue_dir(self):
return os.path.join(self.catalogues_dir, 'vnf_catalogue')
def load_default_config(self):
self.config['version'] = self.CONFIG_VERSION
self.config['name'] = '5GTANGO Workspace'
self.config['log_level'] = 'INFO'
self.config['catalogues_dir'] = 'catalogues'
self.config['configuration_dir'] = 'configuration'
self.config['platforms_dir'] = 'platforms'
self.config['projects_dir'] = 'projects'
self.config['projects_config'] = os.path.join(self.workspace_root,
'projects', 'config.yml')
self.config['schemas_local_master'] = Workspace.DEFAULT_SCHEMAS_DIR
self.config['schemas_remote_master'] = \
"https://github.com/sonata-nfv/tng-schema"
self.config['default_descriptor_extension'] = 'yml'
self.config['service_platforms'] = \
{'sp1':
{'url': 'http://sp.int3.sonata-nfv.eu:32001',
'credentials': {'username': 'sonata',
'password': 's0n@t@',
'token_file': 'token.txt',
'signature': {'pub_key': '',
'prv_key': '',
'cert': ''
}
}
}
}
self.config['default_service_platform'] = 'sp1'
self.config['validate_watchers'] = \
{os.path.join(self.workspace_root, self.config['projects_dir']):
{'type': 'project',
'syntax': True,
'integrity': True,
'topology': True}
}
def create_dirs(self):
"""
Create the base directory structure for the workspace
Invoked upon workspace creation.
:return:
"""
log.info('Creating workspace at %s', self.workspace_root)
os.makedirs(self.workspace_root, exist_ok=False)
dirs = [self.config['catalogues_dir'],
self.config['configuration_dir'],
self.config['platforms_dir'],
self.config['projects_dir'],
]
for d in dirs:
path = os.path.join(self.workspace_root, d)
os.makedirs(path, exist_ok=True)
def create_files(self):
"""
Creates a workspace configuration file descriptor.
This is triggered by workspace creation and configuration changes.
:return:
"""
cfg_d = self.config.copy()
ws_file_path = os.path.join(self.workspace_root,
Workspace.__descriptor_name__)
ws_file = open(ws_file_path, 'w')
yaml.dump(cfg_d, ws_file, default_flow_style=False)
# write project config (schema-MIME mapping)
mapping = {
'application/vnd.5gtango.vnfd':
'https://raw.githubusercontent.com/sonata-nfv/tng-schema/master/function-descriptor/vnfd-schema.yml',
'application/vnd.5gtango.nsd':
'https://raw.githubusercontent.com/sonata-nfv/tng-schema/master/service-descriptor/nsd-schema.yml',
'application/vnd.5gtango.tstd':
'https://raw.githubusercontent.com/sonata-nfv/tng-schema/master/test-descriptor/testdescriptor'
'-schema.yml',
'application/vnd.5gtango.slad':
'https://raw.githubusercontent.com/sonata-nfv/tng-schema/master/sla-template-descriptor/sla-template'
'-schema.yml',
'application/vnd.5gtango.rpd':
'https://raw.githubusercontent.com/sonata-nfv/tng-schema/master/policy-descriptor/policy-schema.yml',
'application/vnd.5gtango.nstd':
'https://raw.githubusercontent.com/sonata-nfv/tng-schema/master/slice-descriptor/nst-schema.yml'
}
# add reverse mapping (type to schema) for translation of old SONATA descriptors
reverse_mapping = {v: k for k, v in mapping.items()}
mapping.update(reverse_mapping)
conf_path = os.path.join(self.workspace_root, 'projects', 'config.yml')
conf_file = open(conf_path, 'w')
yaml.dump(mapping, conf_file, default_flow_style=False)
return cfg_d
def check_ws_exists(self):
ws_file = os.path.join(self.workspace_root,
Workspace.__descriptor_name__)
print(ws_file)
return os.path.exists(ws_file) or os.path.exists(self.workspace_root)
@staticmethod
def load_workspace(ws_root):
"""
Creates a Workspace object based on a configuration descriptor
:param ws_root: base path of the workspace
:return: Workspace object
"""
if ws_root is None:
ws_root = Workspace.DEFAULT_WORKSPACE_DIR
ws_filename = os.path.join(ws_root, Workspace.__descriptor_name__)
if not os.path.isdir(ws_root) or not os.path.isfile(ws_filename):
if ws_root == Workspace.DEFAULT_WORKSPACE_DIR:
# if we tried the default WS, and cannot find it,
# assume first execution of tng-sdk-project and just
# create the default WS
create_workspace(ws_root)
else:
log.error("Unable to load workspace descriptor '{}'. "
"Create workspace with tng-wks and specify "
+ "location with -w".format(ws_filename))
return None
ws_file = open(ws_filename, 'r')
try:
ws_config = yaml.load(ws_file, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
log.error("Error parsing descriptor file '{0}': {1}"
.format(ws_filename, exc))
return
if not ws_config:
log.error("Couldn't read descriptor file: '{0}'"
.format(ws_filename))
return
if not ws_config['version'] == Workspace.CONFIG_VERSION:
if ws_config['version'] < Workspace.BACK_CONFIG_VERSION:
log.error("Workspace configuration version '{0}' is no longer "
"supported (<{1})"
.format(ws_config['version'],
Workspace.BACK_CONFIG_VERSION))
return
if ws_config['version'] > \
Workspace.CONFIG_VERSION:
log.error("Workspace configuration version '{0}' is ahead of "
"the current supported version (={1})"
.format(ws_config['version'],
Workspace.CONFIG_VERSION))
return
ws = Workspace(ws_root, config=ws_config)
# Make adjustments to support backwards compatibility
# 0.03
if ws_config['version'] == "0.03":
ws.config['validate_watchers'] = \
{os.path.join(ws.workspace_root, ws.config['projects_dir']):
{'type': 'project',
'syntax': True,
'integrity': True,
'topology': True}
}
# 0.04
if ws_config['version'] <= "0.04":
sps = ws.config['service_platforms']
for sp_key, sp in sps.items():
sp['credentials']['signature'] = dict()
sp['credentials']['signature']['pub_key'] = ''
sp['credentials']['signature']['prv_key'] = ''
sp['credentials']['signature']['cert'] = ''
if ws_config['version'] < Workspace.CONFIG_VERSION:
log.warning("Loading workspace with an old configuration "
"version ({0}). Updated configuration: {1}"
.format(ws_config['version'], ws.config))
return ws
@property
def default_service_platform(self):
return self.config['default_service_platform']
@default_service_platform.setter
def default_service_platform(self, sp_id):
self.config['default_service_platform'] = sp_id
@property
def service_platforms(self):
return self.config['service_platforms']
@service_platforms.setter
def service_platforms(self, sps):
self.config['service_platforms'] = sps
def get_service_platform(self, sp_id):
if sp_id not in self.service_platforms.keys():
return
return self.service_platforms[sp_id]
def add_service_platform(self, sp_id):
if sp_id in self.service_platforms.keys():
return
self.service_platforms[sp_id] = {'url': '',
'credentials': {'username': '',
'password': '',
'token_file': '',
'signature': {
'pub_key': '',
'prv_key': '',
'cert': ''
}}
}
def config_service_platform(self, sp_id, default=None, url=None,
username=None, password=None, token=None,
pub_key=None, prv_key=None, cert=None):
if sp_id not in self.service_platforms.keys():
return
sp = self.service_platforms[sp_id]
if url:
sp['url'] = url
if username:
sp['credentials']['username'] = username
if password:
sp['credentials']['password'] = password
if token:
sp['credentials']['token_file'] = token
if pub_key:
sp['credentials']['signature']['pub_key'] = pub_key
if prv_key:
sp['credentials']['signature']['prv_key'] = prv_key
if cert:
sp['credentials']['signature']['cert'] = cert
if default:
self.default_service_platform = sp_id
# update workspace config descriptor
self.create_files()
def __eq__(self, other):
"""
Function to assert if two workspaces have the
same configuration. It overrides the super method
as is only the need to compare configurations.
"""
return isinstance(other, type(self)) \
and self.workspace_name == other.workspace_name \
and self.workspace_root == other.workspace_root \
and self.config == other.config
def parse_args_workspace(input_args=None):
parser = argparse.ArgumentParser(description="Create a new workspace")
parser.add_argument(
"-w", "--workspace",
help="location of new workspace. If not specified will assume '{}'"
.format(Workspace.DEFAULT_WORKSPACE_DIR),
required=False)
parser.add_argument(
"--debug",
help="increases logging level to debug",
required=False,
action="store_true")
if input_args is None:
input_args = sys.argv[1:]
return parser.parse_args(input_args)
# for entry point tng-workspace; was as "tng-project --init" before
def init_workspace(args=None):
if args is None:
args = parse_args_workspace()
log_level = "INFO"
if args.debug:
log_level = "DEBUG"
coloredlogs.install(level=log_level)
# If workspace arg is not given, create a workspace in user home
if not args.workspace:
ws_root = Workspace.DEFAULT_WORKSPACE_DIR
# If a workspace already exists at user home, throw an error and quit
if os.path.isdir(ws_root):
log.warning("A workspace already exists in {}. Please specify a different location.".format(ws_root))
return
else:
ws_root = os.path.expanduser(args.workspace)
create_workspace(ws_root, log_level)
def create_workspace(ws_root, log_level="INFO"):
# create a new workspace
ws = Workspace(ws_root, log_level=log_level)
if ws.check_ws_exists():
log.warning("A workspace already exists at the specified location")
exit(1)
log.debug("Attempting to create a new workspace")
cwd = os.getcwd()
ws.create_dirs()
ws.create_files()
os.chdir(cwd)
log.debug("Workspace created.")
|
sonata-nfv/tng-sdk-project
|
src/tngsdk/project/workspace.py
|
workspace.py
|
py
| 14,885
|
python
|
en
|
code
| 5
|
github-code
|
6
|
4729016187
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 13 00:32:57 2018
@author: pablosanchez
"""
import tensorflow as tf
import utils.constants as const
from networks.dense_net import DenseNet
class ConvNet(object):
def __init__(self, hidden_dim, output_dim, reuse, transfer_fct=tf.nn.relu,
act_out=tf.nn.sigmoid, drop_rate=0., kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.0)):
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.transfer_fct = transfer_fct
self.act_out = act_out
self.reuse = reuse
self.drop_rate = drop_rate
self.kinit= kinit
self.bias_init = bias_init
def build(self, input_):
raise NotImplementedError
# filetrs == num_channels
# padding == 'SAME' 'VALID'
def conv_layer(self, input_, filters, k_size, stride, padding, name, act_func=tf.nn.relu):
conv = tf.layers.conv2d(inputs=input_,
filters=filters,
kernel_size=k_size,
strides=stride,
padding=padding,
activation=act_func,
kernel_initializer=self.kinit,
bias_initializer=self.bias_init,
name=name,
reuse=self.reuse)
print('[*] Layer (',conv.name, ') output shape:', conv.get_shape().as_list())
# with tf.variable_scope(name, reuse=True):
# variable_summary(tf.get_variable('kernel'), 'kernel')
# variable_summary(tf.get_variable('bias'), 'bias')
return conv
def max_pool(self, input_, pool_size, stride, name):
# Pooling Layer #1 output = [batch_size,14, 14,32]
pool = tf.layers.max_pooling2d(inputs=input_,
pool_size=pool_size,
strides=strides,
name=name )
print('[*] Layer (',pool.name, ') output shape:', pool.get_shape().as_list())
return pool
class ConvNet3(ConvNet):
def __init__(self, input_, hidden_dim, output_dim, reuse, transfer_fct=tf.nn.relu,
act_out=tf.nn.sigmoid, drop_rate=0., kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.0)):
super().__init__(hidden_dim, output_dim, reuse, transfer_fct, act_out, drop_rate, knit, bias_init)
self.output = self.build(input_)
def build(self, input_):
output = None
x = self.conv_layer(input_=input_,
filters=32,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='conv_1',
act_func=self.transfer_fct)
x = self.conv_layer(input_=x,
filters=64,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='conv_2',
act_func=self.transfer_fct)
x = self.conv_layer(input_=x,
filters=64,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='conv_3',
act_func=self.transfer_fct)
x = tf.contrib.layers.flatten(x)
dense = DenseNet(input_=x,
hidden_dim=self.hidden_dim,
output_dim=self.output_dim,
num_layers=2,
transfer_fct=self.transfer_fct,
act_out=self.act_out,
reuse=self.reuse,
kinit=self.kinit,
bias_init=self.bias_init,
drop_rate=self.drop_rate)
x = dense.output
return x
class ConvNet3Gauss(ConvNet):
def __init__(self, input_, hidden_dim, output_dim, reuse, transfer_fct=tf.nn.relu,
act_out_mean=None,act_out_var=tf.nn.softplus, drop_rate=0., kinit=tf.contrib.layers.xavier_initializer(),
bias_init=tf.constant_initializer(0.0)):
super().__init__(hidden_dim, output_dim, reuse, transfer_fct, act_out_mean, drop_rate, kinit, bias_init)
self.act_out_mean = act_out_mean
self.act_out_var = act_out_var
self.mean, self.var = self.build(input_)
def build(self, input_):
output = None
x = self.conv_layer(input_=input_,
filters=32,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='conv_1',
act_func=self.transfer_fct)
x = self.conv_layer(input_=x,
filters=64,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='conv_2',
act_func=self.transfer_fct)
x = self.conv_layer(input_=x,
filters=64,
k_size=4, #[4, 4]
stride=2,
padding='SAME',
name='conv_3',
act_func=self.transfer_fct)
x = tf.contrib.layers.flatten(x)
dense = DenseNet(input_=x,
hidden_dim=self.hidden_dim,
output_dim=self.hidden_dim,
num_layers=1,
transfer_fct=self.transfer_fct,
act_out=self.transfer_fct,
reuse=self.reuse,
kinit=self.kinit,
bias_init=self.bias_init,
drop_rate=self.drop_rate)
x = dense.output
with tf.variable_scope('mean', reuse=self.reuse):
dense_mean = DenseNet(input_=x,
hidden_dim=self.hidden_dim,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=self.transfer_fct,
act_out=self.act_out_mean,
reuse=self.reuse,
kinit=self.kinit,
bias_init=self.bias_init,
drop_rate=self.drop_rate)
with tf.variable_scope('var', reuse=self.reuse):
dense_var = DenseNet(input_=x,
hidden_dim=self.hidden_dim,
output_dim=self.output_dim,
num_layers=1,
transfer_fct=self.transfer_fct,
act_out=self.act_out_var,
reuse=self.reuse,
kinit=self.kinit,
bias_init=self.bias_init,
drop_rate=self.drop_rate)
return dense_mean.output, dense_var.output
|
psanch21/VAE-GMVAE
|
networks/conv_net.py
|
conv_net.py
|
py
| 7,684
|
python
|
en
|
code
| 197
|
github-code
|
6
|
3695969994
|
"""Split-Attention"""
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from .dropblock import DropBlock2D
__all__ = ['SplAtConv2d']
class SplAtConv2d(nn.Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, act_layer=nn.ReLU, norm_layer=None, drop_block=None, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
rectify = rectify and (padding[0] > 0 or padding[1] > 0)
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.drop_block = drop_block
if rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, out_channels * radix, kernel_size, stride, padding, dilation,
groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = nn.Conv2d(in_channels, out_channels * radix, kernel_size, stride, padding, dilation,
groups=groups * radix, bias=bias, **kwargs)
self.bn0 = norm_layer(out_channels * radix) if norm_layer is not None else None
self.act0 = act_layer(inplace=True)
self.fc1 = nn.Conv2d(out_channels, inter_channels, 1, groups=groups)
self.bn1 = norm_layer(inter_channels) if norm_layer is not None else None
self.act1 = act_layer(inplace=True)
self.fc2 = nn.Conv2d(inter_channels, out_channels * radix, 1, groups=groups)
self.rsoftmax = RadixSoftmax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.bn0 is not None:
x = self.bn0(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act0(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
if torch.__version__ < '1.5':
splited = torch.split(x, int(rchannel // self.radix), dim=1)
else:
splited = torch.split(x, rchannel // self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.bn1 is not None:
gap = self.bn1(gap)
gap = self.act1(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
if torch.__version__ < '1.5':
attens = torch.split(atten, int(rchannel // self.radix), dim=1)
else:
attens = torch.split(atten, rchannel // self.radix, dim=1)
out = sum([att * split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class RadixSoftmax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
|
welkin-feng/ComputerVision
|
cvmodels/models/layers/splat.py
|
splat.py
|
py
| 3,449
|
python
|
en
|
code
| 2
|
github-code
|
6
|
13138139061
|
from tkinter import *
def miles_to_km():
miles = float(miles_input.get())
km = miles * 1.609
kilometer_result_label.config(text=f"{km}")
mero_app = Tk()
mero_app.title("Miles to Kilometer Converter")
mero_app.config(padx=20, pady=20)
miles_input = Entry(width=7)
miles_input.grid(column=1, row=0)
miles_labels = Label(text="Miles")
miles_labels.grid(column=2, row=0)
is_equal_label = Label(text="is equal to")
is_equal_label.grid(column=0, row=1)
kilometer_result_label = Label(text="0")
kilometer_result_label.grid(column=1, row=1)
kilometer_label = Label(text="Km")
kilometer_label.grid(column=2, row=1)
calculate_button = Button(text="Calculate", command=miles_to_km)
calculate_button.grid(column=1, row=2)
mero_app.mainloop()
|
callmebhawesh/100-Days-Of-Code
|
Day 27/project/main.py
|
main.py
|
py
| 753
|
python
|
en
|
code
| 3
|
github-code
|
6
|
69997908667
|
import requests
try:
import BeautifulSoup #python2
version = 2
except ImportError:
import bs4 as BeautifulSoup #python3
version = 3
def get_people_by_url(url, recurse=False):
response = requests.get(url)
html = response.content
if version == 2:
soup = BeautifulSoup.BeautifulSoup(html)
if version == 3:
soup = BeautifulSoup.BeautifulSoup(html, "html.parser")
payload = soup.find("pre")
output = []
# case 1: no matches
if "No matches to your query." in payload.text:
return output
# case 2: multiple matches
if isinstance(payload.contents[0], BeautifulSoup.Tag):
if payload.contents[0].name == "a":
for a in payload.findAll("a", href=True):
if recurse:
person_url = "http://web.mit.edu" + a["href"]
details = get_people_by_url(person_url)
output.extend(details)
else:
output.append({"name": a.contents[0]})
return output
# case 3: one match
user_dict = {}
field = u""
val = u""
for content in payload.contents:
if isinstance(content, BeautifulSoup.Tag):
val = content.contents[0]
user_dict[field] = val.strip()
else:
for field_val in map(lambda s: s.strip(), content.split("\n")):
if field_val != "":
split_field_val = field_val.split(': ', 1)
field = split_field_val[0]
if field[-1] == ":":
field = field[:-1]
if len(split_field_val) > 1:
val = split_field_val[1]
user_dict[field] = val.strip()
output.append(user_dict)
return output
def get_people(query, options="general", recurse=False):
"""Accesses the MIT People Directory
Args:
query (str): The name, email, or phone number to search for
options (str, optional): The type of search to do. Should be one of
"general" -- name/email
"phone" -- reverse lookup (10 digits)
"lastnamesx" -- lastname sounds like
recurse (bool, optional): Whether or not additional requests should
be made to retrieve all fields when multiple results are found.
Defaults to False.
Returns:
list: The people found.
Each element is a dictionary containing the person's details.
The dictionary will at least contain a "name" field.
"""
url = "http://web.mit.edu/bin/cgicso?options="+options+"&query="+query
return get_people_by_url(url, recurse)
|
barryam3/MITpeople
|
MITpeople.py
|
MITpeople.py
|
py
| 2,696
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27812524833
|
from collections import deque
class Queue:
def __init__(self):
self.queue = deque()
def enqueue(self, item):
self.queue.append(item)
def dequeue(self):
if self.is_empty():
raise IndexError("Queue is empty")
return self.queue.popleft()
def is_empty(self):
return len(self.queue) == 0
def size(self):
return len(self.queue)
# Example usage
q = Queue()
q.enqueue(5)
q.enqueue(10)
q.enqueue(15)
print("Dequeue:", q.dequeue())
print("Dequeue:", q.dequeue())
print("Is empty:", q.is_empty())
print("Size:", q.size())
#################################
import queue
# Create a new queue
q = queue.Queue()
# Enqueue elements
q.put(5)
q.put(10)
q.put(15)
# Dequeue elements
item1 = q.get()
item2 = q.get()
print("Dequeue:", item1)
print("Dequeue:", item2)
print("Is empty:", q.empty())
print("Size:", q.qsize())
|
aaryam-dev/Python_DSA
|
Data Structures/2_Queue/Queue.py
|
Queue.py
|
py
| 897
|
python
|
en
|
code
| 0
|
github-code
|
6
|
27537060768
|
import requests
import json
BASE_URL = 'http://localhost:9000'
def test_health():
response = requests.get(BASE_URL)
assert response.status_code == 200
assert response.text == "Welcome to our Spring-Boot Application!"
def test_get_logs():
response = requests.get(BASE_URL + "/logs")
logs = response.json()
expected_logs = json.dumps(logs, separators=(
',', ':')) # Converting the json to string
assert response.status_code == 200
assert response.text == expected_logs
def test_transaction():
response = requests.get(BASE_URL + "/transaction")
assert response.status_code == 200
assert response.text == "New transaction have been made!"
|
GalDavid6/GaMosh.DevOps
|
tests/test_http.py
|
test_http.py
|
py
| 696
|
python
|
en
|
code
| 0
|
github-code
|
6
|
71859300669
|
'''
Plotting Histograms and Scatter Plots for analysis
Also makes and prints dataframes sorted by various
statistics (see per_million)
Makes a final csv file with relevant contribution stats
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import warnings
warnings.filterwarnings('ignore')
def hist_stat(df):
''' Function: hist_stat
Parameters: df (dataframe)
Returns: none
Does: creates histogram
'''
# for loop iterates by column, creates histogram, and saves to pdf
for col in df.columns[4:]:
df.hist(column = col, bins = 25, grid = False, figsize = (9,6),
color = '#2FB7D7', rwidth = 0.9)
plt.savefig('hist_{}.pdf'.format(col))
plt.show()
def scatter_stat(df, win):
''' Function: scatter_stat
Parameters: df (dataframe), win(string)
Returns: none
Does: creates scatterplot
'''
# creates empty lists coef and coef_weights
coef = []
coef_weights = []
# for loop iterates by column
for col in df.columns[:-1]:
# for linear regression data
x_stat = df[col]
y_stat = df[win]
x_stat = sm.add_constant(x_stat)
model = sm.OLS(y_stat, x_stat).fit()
print('For ', col, ':', '\n', model.summary(), '\n', sep = '')
# grabbing coefficients from data
model_coef = list(model.params)
model_coef.append(col)
coef.append(model_coef)
# for making linear regression line
x_pred = np.linspace(x_stat.min(), x_stat.max(), 50)
x_pred2 = sm.add_constant(x_pred)
y_pred = model.predict(x_pred2)
x_pred = np.delete(x_pred, np.s_[:1], 1)
# plotting scatter plot with regression line
df.plot.scatter(x = col, y = win)
plt.plot(x_pred, y_pred, color = 'red')
plt.savefig('scatter_{}.pdf'.format(col))
plt.show()
def scatter_players(df):
''' Function: scatter_players
Parameters: df (dataframe)
Returns: params
Does: creates scatterplot
'''
# specifies columns
cols = ['eff', 'PER', 'Cont']
# empty dictionary of params
params = {}
# for loop iterates by specified columns
for col in cols:
# making linear regression line
x_stat = df['Salary/$mil']
y_stat = df[col]
model = sm.OLS(y_stat, x_stat).fit()
x_pred = np.linspace(x_stat.min(), x_stat.max(), 50)
y_pred = model.predict(x_pred)
# adding to dictionary the coefficients of linear regression
params[col] = float(model.params)
# print top players by stat
top_df = df.sort_values(by = col, ascending = False)
print('Top/Bottom 10 in {}:\n\n'.format(col),
top_df.head(10), '\n\n', top_df.tail(10), '\n\n')
# creates scatterplots and saves to pdf
fig, ax = plt.subplots()
df.plot.scatter(x = 'Salary/$mil', y = col, c = 'green', zorder = 2, ax = ax)
# annotates the top 3 points in the stat
for index, column in top_df.head(3).iterrows():
ax.annotate(index, (column['Salary/$mil'] + .5, column[col]))
plt.plot(x_pred, y_pred, linestyle = '--', color = 'black')
plt.grid(linestyle = ':')
plt.savefig('salary_vs_{}.pdf'.format(col))
plt.show()
return params
def per_million(df):
''' Function: scatter_players
Parameters: df (dataframe)
Returns: df (dataframe)
Does: prints dataframes to view players sorted by how much
they are over/underpaid based on linear regression line
Also adds to dataframe.
'''
# calls above function and returns the coefficient from linear regression
params = scatter_players(df)
# specifies columns
cols = ['eff', 'PER', 'Cont']
cols_dollar = ['eff/$mil', 'PER/$mil', 'Cont/$mil']
for col in cols:
# adding per million to dataframe
df['{}/$mil'.format(col)] = (df[col] / (df['Salary/$mil']))
# calculating difference between how much should be paid
# and how much they are actually paid based on linear regression
df['Net Diff. $mil({})'.format(col)] = ((df[col] / params[col]) -
df['Salary/$mil'])
# sorting by above metric
top_df = df.sort_values(by = 'Net Diff. $mil({})'.format(col),
ascending = False)
# moving column to view in idle
col_name = top_df.pop(col)
top_df.insert(1, col_name.name, col_name)
print('Top/Bottom 10 in Net Diff. $mil({}):\n'.format(col),
top_df.head(10), '\n', top_df.tail(10), '\n\n')
return df
def main():
# reads csv, calls hist_stat
nba_df = pd.read_csv('statistics.csv', index_col = 0)
hist_stat(nba_df)
# reads csv, makes dataframe
team_nba_df = pd.read_csv('team_stats(2018)v2.csv', index_col = 1)
team_nba_df = team_nba_df.drop(columns = ['Rk', '2P', 'FTA', 'FGA'])
# calls scatter_stat
scatter_stat(team_nba_df, 'Wins')
# modifies nba_df to more suitable dataframe for function
nba_df_indexed = nba_df.set_index('Names')
mod_nba = nba_df_indexed[['Salary', 'eff', 'PER', 'Cont']]
mod_nba['Salary'] = mod_nba['Salary'].div(1000000)
mod_nba.rename(columns = {'Salary': 'Salary/$mil'}, inplace = True)
# calls per_million
final_stats = per_million(mod_nba)
final_stats.to_csv('contribution_stats.csv')
main()
|
fzyang1227/ds2000proj
|
graphs.py
|
graphs.py
|
py
| 5,649
|
python
|
en
|
code
| 0
|
github-code
|
6
|
16593446793
|
from __future__ import absolute_import, print_function, unicode_literals
del absolute_import, print_function, unicode_literals
import gpg
import support
support.init_gpgme(gpg.constants.protocol.OpenPGP)
c = gpg.Context()
def dump_item(item):
print("l={} k={} t={} o={} v={} u={}".format(
item.level, item.keyid, item.type, item.owner_trust,
item.validity, item.name))
c.op_trustlist_start("alice", 0)
while True:
item = c.op_trustlist_next()
if not item:
break
dump_item(item)
c.op_trustlist_end()
for item in c.op_trustlist_all("alice", 0):
dump_item(item)
|
Discreete-Linux/gpgme
|
lang/python/tests/t-trustlist.py
|
t-trustlist.py
|
py
| 608
|
python
|
en
|
code
| 1
|
github-code
|
6
|
24623314639
|
import tkinter as tk
import webbrowser
def open_pdf(event):
webbrowser.open_new("Os.pdf")
def open_pdf2(event):
webbrowser.open_new("idea.pdf")
root = tk.Tk()
# Creating four frames for the top row
frame1 = tk.Frame(root, width=400, height=300, bg="white")
frame1.grid(row=0, column=0, padx=0, pady=10)
frame2 = tk.Frame(root, width=400, height=300, bg="white")
frame2.grid(row=0, column=1, padx=0, pady=10)
frame3 = tk.Frame(root, width=300, height=300, bg="orange")
frame3.grid(row=0, column=2, padx=10, pady=10)
frame4 = tk.Frame(root, width=300, height=300, bg="red")
frame4.grid(row=0, column=3, padx=10, pady=10)
frame5 = tk.Frame(root, width=280, height=300, bg="blue")
frame5.grid(row=0, column=4, padx=10, pady=10)
image = tk.PhotoImage(file="pdf-1.png").subsample(3)
# Creating frames for second row
frame6 = tk.Frame(root, width=300, height=200, bg="white")
frame6.grid(row=1, column=0, padx=20, pady=50)
label6 = tk.Label(frame6, image=image)
label6.pack()
label6.bind("<Button-1>", open_pdf)
label_text6 = tk.Label(frame6, text="Image 1")
label_text6.pack()
frame7 = tk.Frame(root, width=300, height=200, bg="white")
frame7.grid(row=1, column=1, padx=20, pady=10)
label7 = tk.Label(frame7, image=image)
label7.pack()
label7.bind("<Button-1>", open_pdf2)
label_text7 = tk.Label(frame7, text="Image 2")
label_text7.pack()
frame8 = tk.Frame(root, width=300, height=200, bg="white")
frame8.grid(row=1, column=2, padx=20, pady=10)
label8 = tk.Label(frame8, image=image)
label8.pack()
label_text8 = tk.Label(frame8, text="Image 3", font=("Helvetica", 16, "bold"))
label_text8.pack()
frame9 = tk.Frame(root, width=300, height=200, bg="white")
frame9.grid(row=1, column=3, padx=20, pady=10)
label9 = tk.Label(frame9, image=image)
label9.pack()
label_text9 = tk.Label(frame9, text="Image 4")
label_text9.pack()
frame10 = tk.Frame(root, width=280, height=200, bg="white")
frame10.grid(row=1, column=4, padx=20, pady=10)
label10 = tk.Label(frame10, image=image)
label10.pack()
label_text10 = tk.Label(frame10, text="Image 5")
label_text10.pack()
# Creating frames for third row
frame11 = tk.Frame(root, width=300, height=200, bg="white")
frame11.grid(row=2, column=0, padx=20, pady=10)
label11 = tk.Label(frame11, image=image)
label11.pack()
label_text11 = tk.Label(frame11, text="Image 6")
label_text11.pack()
frame12 = tk.Frame(root, width=300, height=200, bg="white")
frame12.grid(row=2, column=1, padx=20, pady=10)
label12 = tk.Label(frame12, image=image)
label12.pack()
frame13 = tk.Frame(root, width=300, height=200, bg="white")
frame13.grid(row=2, column=2, padx=20, pady=10)
label13 = tk.Label(frame13, image=image)
label13.pack()
frame14 = tk.Frame(root, width=300, height=200, bg="white")
frame14.grid(row=2, column=3, padx=20, pady=10)
label14 = tk.Label(frame14, image=image)
label14.pack()
frame15 = tk.Frame(root, width=280, height=200, bg="white")
frame15.grid(row=2, column=4, padx=20, pady=10)
# Create a label within the frame and set the image
label15 = tk.Label(frame15, image=image)
label15.pack()
label_text15=tk.Label(frame15,text="OPEN THIS PDF")
label_text15.pack()
root.mainloop()
|
TanishDahiya/WebPort
|
guibox.py
|
guibox.py
|
py
| 3,288
|
python
|
en
|
code
| 0
|
github-code
|
6
|
24199878977
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 20:11:22 2019
@author: Administrator
"""
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
m = len(grid)
if not m:
return
n = len(grid[0])
memo = [[None for _ in range(n)] for _ in range(m)]
memo[m - 1][n - 1] = grid[m - 1][n - 1]
# take memo[m-1] done
for i in range(n - 2, -1, -1):
memo[m - 1][i] = grid[m - 1][i] + memo[m - 1][i + 1]
# take memo[:][n - 1] done
for i in range(m - 2, -1, -1):
memo[i][n - 1] = grid[i][n - 1] + memo[i + 1][n - 1]
for i in range(m - 2, -1, -1):
for j in range(n - 2, -1, -1):
memo[i][j] = grid[i][j] + min(memo[i + 1][j], memo[i][j + 1])
return memo[0][0]
|
AiZhanghan/Leetcode
|
code/64. Minimum Path Sum.py
|
64. Minimum Path Sum.py
|
py
| 851
|
python
|
en
|
code
| 0
|
github-code
|
6
|
32909453379
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import m3u8
import os
import sys
from Crypto.Cipher import AES
from multiprocessing import Pool
import multiprocessing
import requests
import logging
import threading
import queue
import aiohttp
import asyncio
import time
from functools import wraps
from concurrent.futures import ThreadPoolExecutor
_logger = logging.getLogger(__name__)
# 信号量
sem = asyncio.Semaphore(30)
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'
}
def logger_cost_time(func):
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
res = func(*args, **kwargs)
_logger.info({
'cost time: ': time.time() - start_time
})
print('cost: ', time.time() - start_time)
return res
return wrapper
class M3u8Download(object):
def __init__(self, m3u8_url):
self.m3u8_url = m3u8_url
self.m3u8_obj = self.get_m3u8_obj()
self._base_path = self.mkdir_m3u8_file()
self.m3u8_key = self.get_m3u8_key_iv()[0]
self.m3u8_iv = self.get_m3u8_key_iv()[1]
# TODO: focus on diff resolution
def get_m3u8_obj(self):
"""
m3u8
playlists: bandwidth and resolution
:return: m3u8
"""
m3u8_obj = m3u8.load(self.m3u8_url, headers=headers)
if m3u8_obj.data.get('playlists', False):
# TODO: for now, only one resolution
for playlist_uri in m3u8_obj.data.get('playlists'):
_logger.info({
'resolution': playlist_uri
})
url = m3u8_obj.base_uri + playlist_uri.get('uri')
m3u8_obj = m3u8.load(url, headers=headers)
return m3u8_obj
def get_m3u8_key_iv(self):
"""
获取 m3u8 的 key
:return: 返回 key 和 iv
"""
try:
if self.m3u8_obj.keys[0].uri.startswith('http'):
key_context, iv_context = requests.get(self.m3u8_obj.keys[0].uri).content, self.m3u8_obj.keys[0].iv
else:
key_context, iv_context = requests.get(self.m3u8_obj.base_uri + self.m3u8_obj.keys[0].uri).content, \
self.m3u8_obj.keys[0].iv
key_context = key_context.decode('utf-8')
except Exception as e:
_logger.info({
'error': e
})
key_context, iv_context = False, False
return key_context, iv_context.decode('hex') if iv_context else iv_context
def decrypt_ts_file(self, data):
"""
解密 ts
:param data: 数据
:return: AES.MODE_CBC 解密后的数据
"""
if self.m3u8_iv:
decrypt_obj = AES.new(self.m3u8_key, AES.MODE_CBC, self.m3u8_key, iv=self.m3u8_iv)
else:
decrypt_obj = AES.new(self.m3u8_key, AES.MODE_CBC, self.m3u8_key)
return decrypt_obj.decrypt(data)
def mkdir_m3u8_file(self):
"""
创建文件夹
:return: 文件夹
"""
base_uri = self.m3u8_obj.base_uri
base_path = base_uri.replace('/', '').replace(':', '')
if not os.path.exists(base_path):
os.mkdir(base_path)
return base_path + '/'
def merge_ts_2_mp4(self):
"""
合并ts 为 mp4
按照自然排序 ls -1v
"""
os.chdir(self.base_path)
if os.path.exists('*.ts'):
# ls -1v sort by name
merge_file = 'for ts_id in `ls -1v *.ts`; do cat $ts_id >> all.mp4; done'
os.system(merge_file)
os.system('rm *.ts')
def download_m3u8_ts_file(self, m3u8_obj_segments):
"""
下载文件
:param m3u8_obj_segments: m3u8 ts 段
"""
if m3u8_obj_segments.uri.startswith('http'):
ts_link, ts_name = m3u8_obj_segments.uri, m3u8_obj_segments.uri
else:
ts_link, ts_name = m3u8_obj_segments.base_uri + m3u8_obj_segments.uri, m3u8_obj_segments.uri
ts_name = ts_name.replace('/', '_')
print('ts_link, ts_name', ts_link, ts_name)
with open(self.base_path + ts_name, 'wb') as f:
tmp = requests.get(ts_link, headers=headers)
tmp_data = tmp.content
# 解密
if self.m3u8_key:
decrypt_data = self.decrypt_ts_file(tmp_data)
else:
decrypt_data = tmp_data
_logger.info({
'download file: ': ts_name
})
f.write(decrypt_data)
@property
def base_path(self):
return self._base_path
@base_path.setter
def base_path(self, base_path):
self._base_path = base_path
class MultiProcessM3u8Download(M3u8Download):
"""
多进程下载
"""
def __init__(self, m3u8_url):
super(MultiProcessM3u8Download, self).__init__(m3u8_url)
@logger_cost_time
def download_m3u8_multi_process(self):
"""
多进程下载 Pool(processes=multiprocessing.cpu_count())
"""
pool = Pool(processes=multiprocessing.cpu_count())
for m3u8_obj_segments in self.m3u8_obj.segments:
pool.apply_async(self.download_m3u8_ts_file, args=(m3u8_obj_segments,))
pool.close()
pool.join()
class MultiThreadingM3u8Download(M3u8Download):
"""
多线程下载
"""
def __init__(self, m3u8_url):
super(MultiThreadingM3u8Download, self).__init__(m3u8_url)
@logger_cost_time
def download_m3u8_multi_threading(self):
# use ThreadPoolExecutor in case too many threader
with ThreadPoolExecutor(max_workers=30) as executor:
executor.map(self.download_m3u8_ts_file, self.m3u8_obj.segments)
# thread_ids = []
# for m3u8_obj_segments in self.m3u8_obj.segments:
# t = threading.Thread(target=self.download_m3u8_ts_file, args=(m3u8_obj_segments,))
# t.start()
# thread_ids.append(t)
# for x in thread_ids:
# x.join()
class MultiThreadingQueueM3u8Download(M3u8Download):
"""
多线程队列
"""
def __init__(self, m3u8_url):
super(MultiThreadingQueueM3u8Download, self).__init__(m3u8_url)
self.queue = queue.Queue()
@logger_cost_time
def download_m3u8_multi_threading_queue(self):
"""
多线程下载 queue
:return:
"""
thread_ids = []
for m3u8_obj_segments in self.m3u8_obj.segments:
t = threading.Thread(target=self.download_m3u8_ts_file_queue, args=(m3u8_obj_segments,))
t.start()
thread_ids.append(t)
for x in thread_ids:
x.join()
result = []
while not self.queue.empty():
result.append(self.queue.get())
print('result: ', result)
def download_m3u8_ts_file_queue(self, m3u8_obj_segments):
ts_link, ts_name = m3u8_obj_segments.base_uri + m3u8_obj_segments.uri, m3u8_obj_segments.uri
with open(self.base_path + ts_name, 'wb') as f:
tmp = requests.get(ts_link, headers=headers)
f.write(tmp.content)
self.queue.put(ts_link)
class AsyncM3u8Download(M3u8Download):
"""
异步多线程
"""
def __init__(self, m3u8_url):
super(AsyncM3u8Download, self).__init__(m3u8_url)
# FIXME: 可能对异步的理解不对
async def download_m3u8_ts_file_async(self, ts_link, ts_name):
"""
异步下载
:param ts_link: 下载的ts链接
:param ts_name: 下载的名字
"""
async with aiohttp.ClientSession() as session:
async with session.request('GET', ts_link, headers=headers) as resp:
with open(self.base_path + ts_name + '.ts', 'wb') as f:
tmp = await resp.read()
f.write(tmp)
@logger_cost_time
def main_download_m3u8(self):
"""
下载的入口
"""
tasks = []
loop = asyncio.get_event_loop()
for m3u8_obj_segments in self.m3u8_obj.segments:
ts_link, ts_name = m3u8_obj_segments.base_uri + m3u8_obj_segments.uri, m3u8_obj_segments.uri
tasks.append(self.download_m3u8_ts_file_async(ts_link, ts_name))
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
if __name__ == '__main__':
try:
m3u8_link = sys.argv[1]
except IndexError:
m3u8_link = 'https://bk.andisk.com/data/3048aa1f-b2fb-4fb7-b452-3ebc96c76374/res/' \
'f1826fdb-def2-4dba-a7a1-4afbf5d17491.m3u8'
# m3u8_link = 'https://v.zdubo.com/20200115/FkoHzMWM/index.m3u8'
# # 多进程下载
# downloader = MultiProcessM3u8Download(m3u8_link)
# downloader.download_m3u8_multi_process()
# downloader.merge_ts_2_mp4()
# # 多线程下载
downloader = MultiThreadingM3u8Download(m3u8_link)
downloader.download_m3u8_multi_threading()
downloader.merge_ts_2_mp4()
# # 多线程队列
# downloader = MultiThreadingQueueM3u8Download(m3u8_link)
# downloader.download_m3u8_multi_threading_queue()
# downloader.merge_ts_2_mp4()
# # 异步多线程
# downloader = AsyncM3u8Download(m3u8_link)
# downloader.main_download_m3u8()
# downloader.merge_ts_2_mp4()
|
id10tttt/tools
|
m3u8_donwload.py
|
m3u8_donwload.py
|
py
| 9,424
|
python
|
en
|
code
| 1
|
github-code
|
6
|
137753727
|
###############################################################################
# Pareto-Optimal Cuts for Benders Decomposition #
# Article: Accelerating Benders Decomposition: Algorithmic Enhancement and #
# Model Selection Criteria #
# Authors: T. L. MAGNANTI, R. T. WONG (1981) #
# Coding: Lan Peng #
###############################################################################
# This script compares the performance of traditional Benders Decomposition
# and Benders Decomposition with Pareto-optimal cuts for Facility Location
# Problem. The formulation is as follows
#
# v = \min_{i = 1}^n \min_{j = 1}^m c_{ij} x_{ij} + \sum_{j = 1}^m d_j y_j
# s.t. \sum_{j = 1}^m x_{ij} \ge 1, \quad \forall i \in \{1, 2, \ldots, n\}
# x_{ij} \le y_j, \quad \forall i \in \{1, 2, \ldots, n\}, j \{1, 2, \ldots, m\}
# x_{ij} \ge 0, \quad \forall i \in \{1, 2, \ldots, n\}, j \{1, 2, \ldots, m\}
# y_{j} \in \{0, 1\}, \quad \forall j \in \{1, 2, \ldots, m\}
#
# Notice that Gurobi is a god-like software, this script cannot beat gurobi
# even with the Pareto-optimal cuts.
import gurobipy as grb
import random
def generateInstance(
n: "Number of CUSTOMERS",
m: "Number of FACILITIES",
instancePath: "Export path for instances" = "test.dat"
) -> "Generate a facility location problem instance":
# Cost for assigning customer i to facility j =============================
c = {}
for i in range(n):
for j in range(m):
c[i, j] = random.randint(50, 200)
# Cost for opening facility j =============================================
d = {}
for j in range(m):
d[j] = random.randint(1000, 3000)
# Save to a local file ====================================================
f = open(instancePath, "w")
# Meta info
f.write(str(m) + "\t" + str(n) + "\n")
# Save d[j]
for j in range(m):
f. write(str(d[j]) + "\n")
# Save c[i, j]
for j in range(m):
s = ""
for i in range(n):
s += str(c[i, j]) + "\t"
f.write(s + "\n")
f.close()
return {
'n': n,
'm': m,
'c': c,
'd': d
}
def readInstance(
instancePath: "Import path for instance"
) -> "Read an instance file from local":
# Initialize ==============================================================
c = {}
d = {}
# Read file ===============================================================
f = open(instancePath, "r")
meta = f.readline().split("\t")
m = int(meta[0])
n = int(meta[1])
d = {}
for i in range(m):
d[i] = int(f.readline())
c = {}
for j in range(m):
ci = f.readline().split("\t")
for i in range(n):
c[i, j] = int(ci[i])
f.close()
return {
'n': n,
'm': m,
'c': c,
'd': d
}
def directSolveProblem(
instance: "Facility location problem, with n, m, c, d"
) -> "Use gurobi to find the optimal ofv of the instance without Benders Decomposition":
# Read instance ===========================================================
n = instance['n']
m = instance['m']
c = instance['c']
d = instance['d']
# Directly use Gurobi =====================================================
FL = grb.Model("Facility Location")
x = {}
for i in range(n):
for j in range(m):
x[i, j] = FL.addVar(vtype=grb.GRB.CONTINUOUS, obj=c[i, j])
y = {}
for j in range(m):
y[j] = FL.addVar(vtype=grb.GRB.BINARY, obj=d[j])
for i in range(n):
FL.addConstr(grb.quicksum(x[i, j] for j in range(m)) >= 1)
for i in range(n):
for j in range(m):
FL.addConstr(x[i, j] <= y[j])
FL.update()
FL.modelSense = grb.GRB.MINIMIZE
FL.optimize()
ofv = None
masterY = []
if (FL.status == grb.GRB.status.OPTIMAL):
ofv = FL.getObjective().getValue()
for j in y:
if (y[j].x >= 0.9):
masterY.append(j)
return {
'ofv': ofv,
'y': masterY
}
def masterProblem(
instance: "Facility location problem, with n, m, c, d",
paretoFlag: "True if enable Pareto-optimal cuts, False otherwise" = False,
) -> "Use Benders Decomposition to solve Facility Location Problem":
# Read instance ===========================================================
n = instance['n']
m = instance['m']
c = instance['c']
d = instance['d']
# Initialize model ========================================================
master = grb.Model("Master-IP")
x = {}
for i in range(n):
for j in range(m):
x[i, j] = master.addVar(vtype=grb.GRB.CONTINUOUS, obj=c[i, j])
y = {}
for j in range(m):
y[j] = master.addVar(vtype=grb.GRB.BINARY, obj=d[j])
# Integer part, no constraint =============================================
pass
# Call back to add cuts ===================================================
master._y = y
def addCuts(model, where):
if (where == grb.GRB.Callback.MIPSOL):
y_sol = model.cbGetSolution(model._y)
subY = {}
for i in range(m):
if (y_sol[i] >= 0.9):
subY[i] = 1
else:
subY[i] = 0
findCut = dualSubproblem(instance, subY, paretoFlag)
if (findCut['type'] == "infeasible"):
model.terminate()
elif (findCut['type'] == "optimality"):
print("Add optimality cut")
subLam = findCut['lam']
subPi = findCut['pi']
model.cbLazy(grb.quicksum(c[i, j] * x[i, j] for i in range(n) for j in range(m))
>= grb.quicksum(subLam[i] for i in range(n)) - grb.quicksum(subPi[i, j] * y[j] for i in range(n) for j in range(m)))
elif (findCut['type'] == "feasibility"):
print("Add feasibility cut")
subLam = findCut['lam']
subPi = findCut['pi']
model.cbLazy(grb.quicksum(subLam[i] for i in range(n)) - grb.quicksum(subPi[i, j] * y[j] for i in range(n) for j in range(m)) <= 0)
return
master.Params.lazyConstraints = 1
master.setParam('OutputFlag', 0)
master.optimize(addCuts)
ofv = None
masterY = []
if (master.status == grb.GRB.status.OPTIMAL):
ofv = master.getObjective().getValue()
for j in y:
if (y[j].x >= 0.9):
masterY.append(j)
return {
'ofv': ofv,
'y': masterY
}
def dualSubproblem(
instance: "Facility location problem, with n, m, c, d",
y: "Given integer solution",
paretoFlag: "True if enable Pareto-optimal cuts, False otherwise" = False
) -> "Calculate the dual of subproblem, which is a LP":
# Read instance ===========================================================
n = instance['n']
m = instance['m']
c = instance['c']
d = instance['d']
# Dual subproblem initialize ==============================================
sub = grb.Model("Dual-LP")
# fTy =====================================================================
fTy = 0
for j in range(m):
fTy += d[j] * y[j]
# Decision variables ======================================================
lam = {}
for i in range(n):
lam[i] = sub.addVar(vtype=grb.GRB.CONTINUOUS)
pi = {}
for i in range(n):
for j in range(m):
pi[i, j] = sub.addVar(vtype=grb.GRB.CONTINUOUS)
# Constraints =============================================================
for i in range(n):
for j in range(m):
sub.addConstr(lam[i] - pi[i, j] <= c[i, j])
sub.update()
# Objective ===============================================================
sub.setObjective(grb.quicksum(lam[i] - grb.quicksum(pi[i, j] * y[j] for j in range(m)) for i in range(n)) + fTy)
# Pareto-optimal cut generation ===========================================
def paretoOptimal():
# Generate a core point
y0 = {}
for j in range(m):
y0[j] = random.random()
# First, define set O and set C for opened/closed facility
O = []
C = []
for j in range(m):
if (y[j] == 1):
O.append(j)
else:
C.append(j)
poLam = {}
poPi = {}
# Calculate for each customer i
for i in range(n):
# Get index where cij is minimum
minIndex = None
cMin = None
for j in O:
if (minIndex == None or c[i, j] <= cMin):
cMin = c[i, j]
minIndex = j
# Calculate Li
Li = None
for j in O:
if (j != minIndex and (Li == None or c[i, j] <= Li)):
Li = c[i, j]
if (Li == None):
poLam[i] = cMin
for j in O:
poPi[i, j] = 0
for j in C:
poPi[i, j] = max(0, poLam[i] - c[i, j])
else:
# Step 1: Start with initial lambda i
poLam[i] = cMin
improveFlag = True
while(improveFlag):
improveFlag = False
# Step 2: T, s
T = []
for j in C:
if (c[i, j] <= poLam[i]):
T.append(j)
s = y[minIndex] - y0[minIndex]
for j in T:
s += y[j] - y0[j]
if (s <= 0):
# s <= 0, stop, poLam[i] is optimal
pass
elif(s > 0 and len(T) == len(C)):
poLam[i] = Li
else:
ck = None
for j in C:
if (j not in T and (ck == None or c[i, j] <= ck)):
ck = c[i, j]
if (Li <= ck):
poLam[i] = Li
else:
poLam[i] = ck
improveFlag = True
for j in range(m):
if (j in O and j != minIndex):
poPi[i, j] = 0
elif (j == minIndex):
poPi[i, j] = poLam[i] - cMin
else:
poPi[i, j] = max(0, poLam[i] - c[i, j])
return {
'lam': poLam,
'pi': poPi
}
# Interpret result ========================================================
sub.modelSense = grb.GRB.MAXIMIZE
sub.setParam('OutputFlag', 0)
sub.setParam("InfUnbdInfo", 1)
sub.optimize()
# If bounded, return Optimality cuts
if (sub.status == grb.GRB.status.OPTIMAL):
# Enable Pareto Optimality cuts
if (paretoFlag):
paretoCut = paretoOptimal()
poLam = paretoCut['lam']
poPi = paretoCut['pi']
return {
'type': "optimality",
'lam': poLam,
'pi': poPi
}
# Unable Pareto Optimality Cuts
else:
subLam = {}
subPi = {}
for i in range(n):
subLam[i] = lam[i].x
for i in range(n):
for j in range(m):
subPi[i, j] = pi[i, j].x
return {
'type': "optimality",
'lam': subLam,
'pi': subPi
}
# If unbounded, return Feasibility cut
elif (sub.status == grb.GRB.status.UNBOUNDED):
subLam = {}
subPi = {}
ray = sub.UnbdRay
for i in range(n):
subLam[i] = ray[i]
for i in range(n):
for j in range(m):
subPi[i, j] = ray[n + i * m + j]
return {
'type': "feasibility",
'lam': subLam,
'pi': subPi
}
elif (sub.status == grb.GRB.status.INFEASIBLE):
return {
'type': "infeasible",
'lam': subLam,
'pi': subPi
}
|
isaac0821/BendersDecomposition
|
benders.py
|
benders.py
|
py
| 10,604
|
python
|
en
|
code
| 15
|
github-code
|
6
|
5634782182
|
#encoding:utf-8
# 导入matplotlib.pyplot, numpy 包
import numpy as np
import matplotlib.pyplot as plt
# 添加主题样式
# plt.style.use('mystyle')
# 设置图的大小,添加子图
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
#绘制sin, cos
x = np.arange(-np.pi, np.pi, np.pi / 100)
y1 = np.sin(x)
y2 = np.cos(x)
y3=np.tan(x)
sin, = ax.plot(x, y1, color='red', label='sin')
cos, = ax.plot(x, y2, color='blue', label='cos')
tan, = ax.plot(x, y2, color='black', label='tan')
ax.set_ylim([-1.2, 1.2])
# 第二种方式 拆分显示
sin_legend = ax.legend(handles=[sin,tan], loc='upper right')
ax.add_artist(sin_legend)
ax.legend(handles=[cos], loc='lower right')
plt.show()
# import numpy as np
# import matplotlib.pyplot as plt
#添加主题样式
# plt.style.use('mystyle')
#设置图的大小,添加子图
# fig = plt.figure(figsize=(5,5))
# ax = fig.add_subplot(111)
# for color in ['red', 'green']:
# n = 750
# x, y = np.random.rand(2, n)
# scale = 200.0 * np.random.rand(n)
# ax.scatter(x, y, c=color, s=scale, r
# label=color, alpha=0.3,
# edgecolors='none')
# ax.legend()
# ax.grid(True)
# plt.show()
|
peipei1109/P_04_CodeCategory
|
visualize/pltmultiLegend.py
|
pltmultiLegend.py
|
py
| 1,227
|
python
|
en
|
code
| 0
|
github-code
|
6
|
39253570970
|
from datetime import datetime
from urllib.parse import urljoin
import os
import responses
from django.conf import settings
from django.test import TestCase
from mangaki.models import (Work, WorkTitle, RelatedWork, Category, Genre,
Language, ExtLanguage, Artist, Staff, Studio)
from mangaki.wrappers.anilist import (fuzzydate_to_python_datetime, to_anime_season, read_graphql_query,
AniList, AniListException, AniListStatus, AniListWorkType, AniListSeason,
AniListMediaFormat, AniListRelation, AniListRelationType,
insert_works_into_database_from_anilist, insert_work_into_database_from_anilist)
class AniListTest(TestCase):
@staticmethod
def read_fixture(filename):
with open(os.path.join(settings.TEST_DATA_DIR, filename), 'r', encoding='utf-8') as f:
return f.read()
def setUp(self):
self.anilist = AniList()
def test_fuzzydate_to_python_datetime(self):
self.assertEqual(fuzzydate_to_python_datetime({'year': 2017, 'month': 12, 'day': 25}), datetime(2017, 12, 25, 0, 0))
self.assertEqual(fuzzydate_to_python_datetime({'year': 2017, 'month': 12, 'day': None}), datetime(2017, 12, 1, 0, 0))
self.assertEqual(fuzzydate_to_python_datetime({'year': 2017, 'month': None, 'day': None}), datetime(2017, 1, 1, 0, 0))
self.assertIsNone(fuzzydate_to_python_datetime({'year': None, 'month': None, 'day': 25}))
self.assertIsNone(fuzzydate_to_python_datetime({'year': None, 'month': 12, 'day': 25}))
self.assertIsNone(fuzzydate_to_python_datetime({'year': None, 'month': 12, 'day': None}))
self.assertIsNone(fuzzydate_to_python_datetime({'year': None, 'month': None, 'day': None}))
def test_to_anime_season(self):
self.assertEqual(to_anime_season(datetime(2017, 1, 1, 0, 0)), AniListSeason.WINTER)
self.assertEqual(to_anime_season(datetime(2017, 4, 1, 0, 0)), AniListSeason.SPRING)
self.assertEqual(to_anime_season(datetime(2017, 7, 1, 0, 0)), AniListSeason.SUMMER)
self.assertEqual(to_anime_season(datetime(2017, 10, 1, 0, 0)), AniListSeason.FALL)
@responses.activate
def test_api_errors(self):
responses.add(
responses.POST,
self.anilist.BASE_URL,
body='{ "data": { "Media": null }, "errors": [ { "message": "Not Found.", "status": 404, "locations": [{"line": 2, "column": 3}] } ] }',
status=404,
content_type='application/json'
)
with self.assertRaisesRegex(AniListException, 'Error 404 : Not Found.'):
self.anilist._request(
query=read_graphql_query('work-info'),
variables={'id': 0}
)
@responses.activate
def test_get_work(self):
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/hibike_euphonium.json'),
status=200,
content_type='application/json'
)
hibike_by_id = self.anilist.get_work(search_id=20912)
hibike_by_title = self.anilist.get_work(search_title='Hibike')
hibike_by_id_and_title = self.anilist.get_work(search_id=20912, search_title='Hibike')
hibike = hibike_by_id_and_title
self.assertEqual(hibike, hibike_by_id)
self.assertEqual(hibike, hibike_by_title)
@responses.activate
def test_work_properties(self):
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/hibike_euphonium.json'),
status=200,
content_type='application/json'
)
hibike = self.anilist.get_work(search_id=20912)
self.assertEqual(hibike.anilist_id, 20912)
self.assertEqual(hibike.anilist_url, 'https://anilist.co/anime/20912')
self.assertEqual(hibike.media_format, AniListMediaFormat.TV)
self.assertEqual(hibike.title, 'Hibike! Euphonium')
self.assertEqual(hibike.english_title, 'Sound! Euphonium')
self.assertEqual(hibike.japanese_title, '響け!ユーフォニアム')
self.assertCountEqual(hibike.synonyms, [])
self.assertEqual(hibike.start_date, datetime(2015, 4, 8, 0, 0))
self.assertEqual(hibike.end_date, datetime(2015, 7, 1, 0, 0))
self.assertEqual(hibike.season, AniListSeason.SPRING)
self.assertEqual(hibike.description, 'The anime begins when Kumiko Oumae, a girl who was in the brass band club in junior high school, visits her high school\'s brass band club as a first year. Kumiko\'s classmates Hazuki and Sapphire decide to join the club, but Kumiko sees her old classmate Reina there and hesitates. She remembers an incident she had with Reina at a brass band club contest in junior high school...<br>\n<br>\n(Source: ANN)')
self.assertCountEqual(hibike.genres, ['Music', 'Slice of Life', 'Drama'])
self.assertFalse(hibike.is_nsfw)
self.assertEqual(hibike.poster_url, 'https://cdn.anilist.co/img/dir/anime/reg/20912-vpZDPyqs22Rz.jpg')
self.assertEqual(hibike.nb_episodes, 13)
self.assertEqual(hibike.episode_length, 24)
self.assertIsNone(hibike.nb_chapters)
self.assertEqual(hibike.status, AniListStatus.FINISHED)
self.assertEqual(hibike.studio, 'Kyoto Animation')
self.assertCountEqual(hibike.external_links, {
'Official Site': 'http://anime-eupho.com/',
'Crunchyroll': 'http://www.crunchyroll.com/sound-euphonium',
'Twitter': 'https://twitter.com/anime_eupho'
})
self.assertCountEqual(hibike.tags, [
{ 'anilist_tag_id': 110, 'name': 'Band', 'spoiler': False, 'votes': 100 },
{ 'anilist_tag_id': 46, 'name': 'School', 'spoiler': False, 'votes': 79 },
{ 'anilist_tag_id': 98, 'name': 'Female Protagonist', 'spoiler': False, 'votes': 79 },
{ 'anilist_tag_id': 84, 'name': 'School Club', 'spoiler': False, 'votes': 73 },
{ 'anilist_tag_id': 50, 'name': 'Seinen', 'spoiler': False, 'votes': 33 }
])
self.assertEqual(len(hibike.staff), 13)
self.assertCountEqual(hibike.relations, [
AniListRelation(related_id=86133, relation_type=AniListRelationType.ADAPTATION),
AniListRelation(related_id=21255, relation_type=AniListRelationType.SIDE_STORY),
AniListRelation(related_id=21376, relation_type=AniListRelationType.SIDE_STORY),
AniListRelation(related_id=21460, relation_type=AniListRelationType.SEQUEL),
AniListRelation(related_id=21638, relation_type=AniListRelationType.SUMMARY),
AniListRelation(related_id=100178, relation_type=AniListRelationType.SIDE_STORY)
])
@responses.activate
def test_get_seasonal_anime(self):
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/airing_fall_2017.json'),
status=200,
content_type='application/json'
)
airing_animes = list(self.anilist.list_seasonal_animes(year=2017, season=AniListSeason.SUMMER))
self.assertEqual(len(airing_animes), 36)
@responses.activate
def test_get_animelist(self):
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/mrsalixor_anilist_animelist.json'),
status=200,
content_type='application/json'
)
anime_list = list(self.anilist.get_user_list(AniListWorkType.ANIME, 'mrsalixor'))
self.assertEqual(len(anime_list), 450)
@responses.activate
def test_get_mangalist(self):
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/mrsalixor_anilist_mangalist.json'),
status=200,
content_type='application/json'
)
anime_list = list(self.anilist.get_user_list(AniListWorkType.MANGA, 'mrsalixor'))
self.assertEqual(len(anime_list), 100)
@responses.activate
def test_insert_into_database(self):
artist = Artist(name='Ishihara Tatsuya').save()
# Test insert AniListEntry into database
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/hibike_euphonium.json'),
status=200,
content_type='application/json'
)
hibike_entry = self.anilist.get_work(search_id=20912)
hibike = insert_work_into_database_from_anilist(hibike_entry, build_related=False)
titles_hibike = WorkTitle.objects.filter(work=hibike).values_list('title', flat=True)
genres_hibike = hibike.genre.values_list('title', flat=True)
related_hibike = RelatedWork.objects.filter(parent_work=hibike)
staff_hibike = Work.objects.get(pk=hibike.pk).staff_set.all().values_list('artist__name', flat=True)
self.assertEqual(hibike.studio.title, 'Kyoto Animation')
self.assertCountEqual(titles_hibike, ['Hibike! Euphonium', 'Sound! Euphonium', '響け!ユーフォニアム'])
self.assertCountEqual(genres_hibike, ['Slice of Life', 'Music', 'Drama'])
self.assertCountEqual(staff_hibike, ['Ishihara Tatsuya', 'Matsuda Akito', 'Takeda Ayano'])
# Check for no artist duplication
artist = Artist.objects.filter(name='Ishihara Tatsuya')
self.assertEqual(artist.count(), 1)
self.assertEqual(artist.first().anilist_creator_id, 100055)
# Try adding this work to the DB again
hibike_again = insert_work_into_database_from_anilist(hibike_entry, build_related=False)
self.assertEqual(hibike, hibike_again)
@responses.activate
def test_update_work(self):
fake_studio = Studio.objects.create(title='Fake Studio')
hibike_outdated = Work.objects.create(
category=Category.objects.get(slug='anime'),
title='Sound! Euphonium',
studio=fake_studio
)
hibike_outdated.genre.add(Genre.objects.create(title='Fake genre'))
responses.add(
responses.POST,
self.anilist.BASE_URL,
body=self.read_fixture('anilist/hibike_euphonium.json'),
status=200,
content_type='application/json'
)
hibike_entry = self.anilist.get_work(search_id=20912)
# FIXME: properly mock the insertion of related works
insert_work_into_database_from_anilist(hibike_entry, build_related=False)
hibike_updated = Work.objects.get(title='Hibike! Euphonium')
titles_hibike = WorkTitle.objects.filter(work=hibike_updated).values_list('title', flat=True)
genres_hibike = hibike_updated.genre.values_list('title', flat=True)
related_hibike = RelatedWork.objects.filter(parent_work=hibike_updated)
staff_hibike = Work.objects.get(pk=hibike_updated.pk).staff_set.all().values_list('artist__name', flat=True)
self.assertEqual(hibike_updated.studio.title, 'Kyoto Animation')
self.assertCountEqual(titles_hibike, ['Hibike! Euphonium', 'Sound! Euphonium', '響け!ユーフォニアム'])
self.assertCountEqual(genres_hibike, ['Slice of Life', 'Music', 'Drama'])
self.assertCountEqual(staff_hibike, ['Ishihara Tatsuya', 'Matsuda Akito', 'Takeda Ayano'])
|
mangaki/mangaki
|
mangaki/mangaki/tests/test_anilist.py
|
test_anilist.py
|
py
| 11,533
|
python
|
en
|
code
| 137
|
github-code
|
6
|
5309111950
|
import bisect
def search(nums, target, start, end):
while start <= end:
mid = (start+end) // 2
if nums[mid] == target:
return mid
elif nums[mid] < target:
start = mid
else:
end = mid
return -1
def search2(nums, target, start, end):
if start <= end:
mid = (start+end)//2
if nums[mid] < target:
search2(nums, target, mid+1, end)
elif nums[mid] > target:
search2(nums, target, start, mid-1)
else:
return mid
else:
return -1
def search3(nums, target, start, end):
idx = bisect.bisect_left(nums, target)
# bisect_left 이용시 인덱스 범위인지 체크 중요!
if idx < len(nums) and nums[idx] == target:
return idx
return -1
def search4(nums, target):
try:
return nums.index(target)
except ValueError:
return -1
nums = [1, 3, 5, 6, 7, 8, 9, 14]
target = 8
print(search(nums, target, 0, len(nums)-1))
|
louisuss/Algorithms-Code-Upload
|
Python/Tips/BinarySearch/binary_search.py
|
binary_search.py
|
py
| 1,022
|
python
|
en
|
code
| 0
|
github-code
|
6
|
72780266429
|
import onnx
import numpy as np
import onnxruntime as ort
import tvm
from tvm.contrib import graph_executor
import tvm.auto_scheduler as auto_scheduler
from tvm import relay, autotvm
import tvm.relay.testing
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
model_encoder = "/home/xinyuwang/adehome/tvm_latest/tvm_example/model/pts_voxel_encoder_centerpoint.onnx"
model_head = "/home/xinyuwang/adehome/tvm_latest/tvm_example/model/pts_backbone_neck_head_centerpoint.onnx"
output_model_path = "/home/xinyuwang/adehome/tvm_latest/tvm_example/deploy_lib.so"
output_graph_path = "/home/xinyuwang/adehome/tvm_latest/tvm_example/deploy_graph.json"
output_param_path = "/home/xinyuwang/adehome/tvm_latest/tvm_example/deploy_param.params"
# onnx_ = onnx.load(model_encoder)
# x = np.ones((40000,32,9), dtype=np.float32)
# input_name = "input_features"
onnx_ = onnx.load(model_head)
x = np.ones((1,32,560,560), dtype=np.float32)
input_name = "spatial_features"
target = "cuda"
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_, shape_dict)
hardware_params = auto_scheduler.HardwareParams(
max_shared_memory_per_block=49152,
max_threads_per_block=1024,
# The value `max_local_memory_per_block` is not used in AutoScheduler,
# but is required by the API.
max_local_memory_per_block=12345678,
max_vthread_extent=8,
warp_size=32,
target=target,
)
network = "header"
log_file = "%s.json" % network
runner = auto_scheduler.LocalRunner(
timeout=10,
number=3,
repeat=2,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
device=0,
)
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=target,
hardware_params=hardware_params,
)
for idx, (task, task_weight) in enumerate(zip(tasks, task_weights)):
print(
f"==== Task {idx}: {task.desc} "
f"(weight {task_weight} key: {task.workload_key}) ====="
)
print(task.compute_dag)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights)
tuner.tune(
auto_scheduler.TuningOptions(
num_measure_trials=6000,
verbose=1,
runner=runner,
measure_callbacks=[
auto_scheduler.RecordToFile(log_file),
],
),
adaptive_training=False,
)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}["graph"]
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
graph, lib, params = relay_build(mod, target=target, params=params)
lib.export_library(output_model_path)
with open(output_graph_path, 'w', encoding='utf-8') as graph_file:
graph_file.write(graph)
with open(output_param_path, 'wb') as param_file:
param_file.write(relay.save_param_dict(params))
|
angry-crab/tvm_example
|
python/ansor.py
|
ansor.py
|
py
| 2,954
|
python
|
en
|
code
| 0
|
github-code
|
6
|
15757203807
|
from django.http import HttpRequest
from django.test import TestCase
from snippets.views import top
from django.urls import resolve
from snippets.views import top, snippet_new, snippet_edit, snippet_detail
# Create your tests here.
class CreateSnippetTest(TestCase):
def test_should_resolve_snippet_new(self):
found = resolve("/snippets/new/")
self.assertEqual(snippet_new, found.func)
class SnippetDetailTest(TestCase):
def test_should_resolve_snippet_detail(self):
found = resolve("/snippets/1/")
self.assertEqual(snippet_detail, found.func)
class EditSnippetTest(TestCase):
def test_should_resolve_snippet_edit(self):
found = resolve("/snippets/1/edit/")
self.assertEqual(snippet_edit, found.func)
|
KentaKamikokuryo/DjangoWebApplication
|
djangosnippets/snippets/tests.py
|
tests.py
|
py
| 771
|
python
|
en
|
code
| 0
|
github-code
|
6
|
386804619
|
"""
Created on Tue Nov 10
This program finds the solution to the system Ax = b and the LU factorization of A
using the Doolittle method.
Parameters
----------
A : Invertible matrix
b : Constant vector
Returns
-------
x : Solution
L : Factorization matrix L
U : Factorization matriz U
@author: Cesar Andres Garcia Posada
"""
import sympy as sm
import math
import sys
import json
import base64
import numpy as np
import matrix_function
import copy
np.set_printoptions(precision=7)
def SteppedPartialPivot(matrix):
matrix = np.array(matrix)
dic = {}
auxiliary_matrix = np.array(matrix)
#matrixDic = matrix.tolist()
#dic[0] = copy.deepcopy(matrix)
dic[0] = np.array(matrix)
temporal_array = []
for i in range(matrix.shape[0]-1):
pivot_number = auxiliary_matrix[0][0]
if i == 0:
for j in auxiliary_matrix:
pivot_column = np.abs(j[:-1])
temporal_maxpivot = np.max(pivot_column)
temporal_array.append(temporal_maxpivot)
sub_matrix = auxiliary_matrix.T[0]
division_colum = np.abs(sub_matrix)/temporal_array[i:]
posmax_pivot = np.where(division_colum == np.max(division_colum))[0][0]
if(posmax_pivot != 0):
pivot_number = auxiliary_matrix[posmax_pivot][0]
temporal_matrix = np.array(auxiliary_matrix[0])
auxiliary_matrix[0] = np.array(auxiliary_matrix[posmax_pivot])
auxiliary_matrix[posmax_pivot] = temporal_matrix
temporal_matrix = np.array(matrix[i])
matrix[i]=np.array(matrix[i+posmax_pivot])
matrix[i+posmax_pivot] = temporal_matrix
if (pivot_number==0 and i == matrix.shape[0]-2):
print ("Error the last pivot number is cero so the matrix doesn't have a solution")
fj = auxiliary_matrix[0] # Fj
column_vector = np.reshape(auxiliary_matrix.T[0][1:], (auxiliary_matrix.T[0][1:].shape[0], 1))
multiplier = column_vector/pivot_number
fi = auxiliary_matrix[1:]
fi = fi - (multiplier*fj)
if(i == 0):
matrix[i+1:] = fi
else:
axiliary_fi = fi
while (axiliary_fi.shape[1]+1 <matrix[i+1:].shape[1]):
axiliary_fi = np.insert(axiliary_fi, 0, np.zeros(1), axis=1)
matrix[i+1:] = np.insert(axiliary_fi, 0, np.zeros(1), axis=1)
auxiliary_matrix = fi.T[1:].T
matrix = np.array(matrix)
dic[i+1] = matrix
#matrixDic = matrix.tolist()
#dic[i+1] = copy.deepcopy(matrixDic)
a = np.delete(matrix, matrix.shape[1]-1, axis=1)
b = matrix.T[matrix.shape[1]-1]
return a,b,dic
def initialData(A,b):
results = {}
try:
A,b,matrix2 = matrix_function.mix_matrix(A,b)
if (matrix_function.determinant(A) == True):
matrix2 = np.array(matrix2)
A,B,dic = SteppedPartialPivot(matrix2)
dic = matrix_function.rebuild_matrix(dic)
print(json.dumps(dic))
x = matrix_function.soltion(A,B)
x = x.tolist()
xSolution = {}
xSolution[0] = x
print(json.dumps(xSolution))
else:
results[0] = "Error the matrix determinant is 0"
print(json.dumps(results))
except BaseException as e:
results[0] = "Error" + str(e)
print(json.dumps(results))
#A = "[[4,-1,0,3],[1,15.5,3,8],[0,-1.3,-4,1.1],[14,5,-2,30]]"
#b = "[1,1,1,1]"
A = sys.argv[1]
b = sys.argv[2]
initialData(A,b)
|
jsperezsalazar2001/SADA_ANALYTICS
|
SADA_ANALYTICS/public/python/stepped.py
|
stepped.py
|
py
| 3,549
|
python
|
en
|
code
| 1
|
github-code
|
6
|
35776081465
|
from django.urls import path
from . import consumers
websocket_urlpatterns = [
path('ws/machine/<int:pk>/last/', consumers.MachineLastRunConsumer),
path('ws/machine/<int:pk>/runs/', consumers.MachineRunsStatusConsumer),
path('ws/machine/status/', consumers.MachinesStatusConsumer),
]
|
TobKed/system_test_progress_tracking
|
system_test_progress_tracking/progress_tracking/routing.py
|
routing.py
|
py
| 299
|
python
|
en
|
code
| 0
|
github-code
|
6
|
23520366148
|
import pygame
from animation_folder import import_folder
class Door(pygame.sprite.Sprite):
def __init__(self, pos):
super().__init__()
# Door attributes
self.frames = 0
self.animation_speed = 0.15
self.animations = import_folder('./graphics/pain_character/door')
self.image = self.animations[int(self.frames)]
self.rect = self.image.get_rect(topleft = pos)
def animation(self):
# changing between images
self.frames += self.animation_speed
# continuous loop of images
if self.frames >= len(self.animations):
self.frames = 0
self.image = self.animations[int(self.frames)]
def update(self, shift):
self.animation()
self.rect.x += shift
|
lekan2410/Naruto-Platformer-V2
|
Code/door.py
|
door.py
|
py
| 825
|
python
|
en
|
code
| 0
|
github-code
|
6
|
9174273420
|
load(":common/python/semantics.bzl", "TOOLS_REPO")
_CcInfo = _builtins.toplevel.CcInfo
# NOTE: This is copied to PyRuntimeInfo.java
DEFAULT_STUB_SHEBANG = "#!/usr/bin/env python3"
# NOTE: This is copied to PyRuntimeInfo.java
DEFAULT_BOOTSTRAP_TEMPLATE = "@" + TOOLS_REPO + "//tools/python:python_bootstrap_template.txt"
_PYTHON_VERSION_VALUES = ["PY2", "PY3"]
def _PyRuntimeInfo_init(
*,
interpreter_path = None,
interpreter = None,
files = None,
coverage_tool = None,
coverage_files = None,
python_version,
stub_shebang = None,
bootstrap_template = None):
if (interpreter_path and interpreter) or (not interpreter_path and not interpreter):
fail("exactly one of interpreter or interpreter_path must be specified")
if interpreter_path and files != None:
fail("cannot specify 'files' if 'interpreter_path' is given")
if (coverage_tool and not coverage_files) or (not coverage_tool and coverage_files):
fail(
"coverage_tool and coverage_files must both be set or neither must be set, " +
"got coverage_tool={}, coverage_files={}".format(
coverage_tool,
coverage_files,
),
)
if python_version not in _PYTHON_VERSION_VALUES:
fail("invalid python_version: '{}'; must be one of {}".format(
python_version,
_PYTHON_VERSION_VALUES,
))
if files != None and type(files) != type(depset()):
fail("invalid files: got value of type {}, want depset".format(type(files)))
if interpreter:
if files == None:
files = depset()
else:
files = None
if coverage_files == None:
coverage_files = depset()
if not stub_shebang:
stub_shebang = DEFAULT_STUB_SHEBANG
return {
"interpreter_path": interpreter_path,
"interpreter": interpreter,
"files": files,
"coverage_tool": coverage_tool,
"coverage_files": coverage_files,
"python_version": python_version,
"stub_shebang": stub_shebang,
"bootstrap_template": bootstrap_template,
}
# TODO(#15897): Rename this to PyRuntimeInfo when we're ready to replace the Java
# implemented provider with the Starlark one.
PyRuntimeInfo, _unused_raw_py_runtime_info_ctor = provider(
doc = """Contains information about a Python runtime, as returned by the `py_runtime`
rule.
A Python runtime describes either a *platform runtime* or an *in-build runtime*.
A platform runtime accesses a system-installed interpreter at a known path,
whereas an in-build runtime points to a `File` that acts as the interpreter. In
both cases, an "interpreter" is really any executable binary or wrapper script
that is capable of running a Python script passed on the command line, following
the same conventions as the standard CPython interpreter.
""",
init = _PyRuntimeInfo_init,
fields = {
"interpreter_path": (
"If this is a platform runtime, this field is the absolute " +
"filesystem path to the interpreter on the target platform. " +
"Otherwise, this is `None`."
),
"interpreter": (
"If this is an in-build runtime, this field is a `File` representing " +
"the interpreter. Otherwise, this is `None`. Note that an in-build " +
"runtime can use either a prebuilt, checked-in interpreter or an " +
"interpreter built from source."
),
"files": (
"If this is an in-build runtime, this field is a `depset` of `File`s" +
"that need to be added to the runfiles of an executable target that " +
"uses this runtime (in particular, files needed by `interpreter`). " +
"The value of `interpreter` need not be included in this field. If " +
"this is a platform runtime then this field is `None`."
),
"coverage_tool": (
"If set, this field is a `File` representing tool used for collecting code coverage information from python tests. Otherwise, this is `None`."
),
"coverage_files": (
"The files required at runtime for using `coverage_tool`. " +
"Will be `None` if no `coverage_tool` was provided."
),
"python_version": (
"Indicates whether this runtime uses Python major version 2 or 3. " +
"Valid values are (only) `\"PY2\"` and " +
"`\"PY3\"`."
),
"stub_shebang": (
"\"Shebang\" expression prepended to the bootstrapping Python stub " +
"script used when executing `py_binary` targets. Does not " +
"apply to Windows."
),
"bootstrap_template": (
"See py_runtime_rule.bzl%py_runtime.bootstrap_template for docs."
),
},
)
def _check_arg_type(name, required_type, value):
value_type = type(value)
if value_type != required_type:
fail("parameter '{}' got value of type '{}', want '{}'".format(
name,
value_type,
required_type,
))
def _PyInfo_init(
*,
transitive_sources,
uses_shared_libraries = False,
imports = depset(),
has_py2_only_sources = False,
has_py3_only_sources = False):
_check_arg_type("transitive_sources", "depset", transitive_sources)
# Verify it's postorder compatible, but retain is original ordering.
depset(transitive = [transitive_sources], order = "postorder")
_check_arg_type("uses_shared_libraries", "bool", uses_shared_libraries)
_check_arg_type("imports", "depset", imports)
_check_arg_type("has_py2_only_sources", "bool", has_py2_only_sources)
_check_arg_type("has_py3_only_sources", "bool", has_py3_only_sources)
return {
"transitive_sources": transitive_sources,
"imports": imports,
"uses_shared_libraries": uses_shared_libraries,
"has_py2_only_sources": has_py2_only_sources,
"has_py3_only_sources": has_py2_only_sources,
}
PyInfo, _unused_raw_py_info_ctor = provider(
"Encapsulates information provided by the Python rules.",
init = _PyInfo_init,
fields = {
"transitive_sources": """\
A (`postorder`-compatible) depset of `.py` files appearing in the target's
`srcs` and the `srcs` of the target's transitive `deps`.
""",
"uses_shared_libraries": """
Whether any of this target's transitive `deps` has a shared library file (such
as a `.so` file).
This field is currently unused in Bazel and may go away in the future.
""",
"imports": """\
A depset of import path strings to be added to the `PYTHONPATH` of executable
Python targets. These are accumulated from the transitive `deps`.
The order of the depset is not guaranteed and may be changed in the future. It
is recommended to use `default` order (the default).
""",
"has_py2_only_sources": "Whether any of this target's transitive sources requires a Python 2 runtime.",
"has_py3_only_sources": "Whether any of this target's transitive sources requires a Python 3 runtime.",
},
)
def _PyCcLinkParamsProvider_init(cc_info):
return {
"cc_info": _CcInfo(linking_context = cc_info.linking_context),
}
# buildifier: disable=name-conventions
PyCcLinkParamsProvider, _unused_raw_py_cc_link_params_provider_ctor = provider(
doc = ("Python-wrapper to forward CcInfo.linking_context. This is to " +
"allow Python targets to propagate C++ linking information, but " +
"without the Python target appearing to be a valid C++ rule dependency"),
init = _PyCcLinkParamsProvider_init,
fields = {
"cc_info": "A CcInfo instance; it has only linking_context set",
},
)
|
bazelbuild/bazel
|
src/main/starlark/builtins_bzl/common/python/providers.bzl
|
providers.bzl
|
bzl
| 7,822
|
python
|
en
|
code
| 21,632
|
github-code
|
6
|
9309515906
|
from sklearn import svm
import numpy as np
import cv2
from autoCanny import auto_canny
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
train =[]
label =[]
for i in range(0,10):
for j in range(1,3795):
path = "hog/" + str(i) + "/" + "img (" + str(j) + ").jpg"
img = cv2.imread(path)
img = cv2.resize(img, (40,40))
cv2.imshow(str(i)+"img",img)
img = img.reshape(-1,1)[0,:]
train.append(img)
label.append(i)
train=np.array(train)
X_train, X_test, y_train, y_test = train_test_split(train, label, test_size=0.2)
# svm canny edge
clf = svm.SVC(gamma='scale',kernel='rbf')
clf.fit(X_train,y_train)
print(clf.get_params())
y_pred = clf.predict(X_test)
score = accuracy_score(y_test, y_pred)
print("svm dataset")
print(score)
# print(clf.score(X_test,y_test))
clf = svm.SVC(gamma='scale',kernel='rbf')
clf.fit(X_train,y_train)
print(clf.get_params())
y_pred = clf.predict(X_test)
score = accuracy_score(y_test, y_pred)
print(score)
#cross validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(clf, train, label, cv=2)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
cv2.waitKey()
cv2.destroyAllWindows()
|
mervedadas/Optical-Character-Recognition
|
model.py
|
model.py
|
py
| 1,275
|
python
|
en
|
code
| 0
|
github-code
|
6
|
42166292211
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import cv2
import sys
# In[ ]:
def view_img(filepath):
img = cv2.imread(filepath)
window = cv2.namedWindow("Image Viewer", cv2.WINDOW_NORMAL)
img = cv2.resize(img, (1080, 720))
cv2.imshow(window, img)
cv2.waitKey()
print("image at "+filepath+" shown successfully.")
# In[ ]:
if __name__== "__main__":
if(len(sys.argv) != 2):
print("\nCheck correct usage: 'view_img <filepath>' ")
sys.exit()
filename = sys.argv[1]
view_img(filename)
cv2.destroyAllWindows()
# In[ ]:
|
ANSHAY/OpenCV
|
practice/scripts/view_img.py
|
view_img.py
|
py
| 605
|
python
|
en
|
code
| 0
|
github-code
|
6
|
3475871616
|
#!/bin/env python3
import requests
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
IP = "127.0.0.1"
PORT = ":12345"
url = "http://" + IP + PORT + "/cpu"
response = requests.get(url)
if response.ok:
print ("%s" % (response.content))
else:
response.raise_for_status()
json = response.json()
fig = plt.figure()
ax = plt.axes(xlim=(0, 1), ylim=(0, 3500))
line, = ax.plot([], [], lw=2)
def init():
line.set_data([], [])
return line,
def animate(i):
response = requests.get(url)
json = response.json()
x = np.linspace(0, 2, 1000)
y = json["core"][0]["freq_mhz"]
line.set_data(x, y)
return line,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=2, interval=20, blit=True)
plt.show()
|
Antonito/cpp_gkrellm
|
scripts/cpu_load.py
|
cpu_load.py
|
py
| 829
|
python
|
en
|
code
| 0
|
github-code
|
6
|
22904964205
|
import argparse
from os.path import join
from pathlib import Path
import cv2
import numpy as np
parser = argparse.ArgumentParser(description='This script creates points.txt and clusters.txt files for a given image.')
parser.add_argument('--src_img', type=str, help='Path to the source image.')
parser.add_argument('--dst_folder', type=str, help='Directory in which points.txt and clusters.txt will be saved.')
parser.add_argument('--k_init_centroids', type=int, help='How many initial uniformly sampled centroids to generate.',
default=10)
args = parser.parse_args()
def nparray_to_str(X):
to_save = '\n'.join([' '.join(str(X[i])[1:-1].split()) for i in range(len(X))])
return to_save
def main(src_img, dst_folder, k):
# files to be created
points_path = join(dst_folder, 'points.txt')
clusters_path = join(dst_folder, 'clusters.txt')
# create directory
Path(dst_folder).mkdir(parents=True, exist_ok=True)
# load and write points
img = cv2.imread(src_img).reshape((-1, 3)).astype(np.float32)
with open(points_path, 'w') as f:
f.write(nparray_to_str(img))
print(f'Points saved in: {points_path}')
# generate and save uniformly sampled centroids
s = np.random.uniform(low=img.min(), high=img.max(), size=(k, 3))
tmp_labels = np.arange(1, k + 1).reshape((k, 1))
clusters = np.hstack((tmp_labels, s))
with open(clusters_path, 'w') as f:
f.write(nparray_to_str(clusters))
print(f'Centroids saved in: {clusters_path}')
if __name__ == '__main__':
args = parser.parse_args()
main(args.src_img, args.dst_folder, args.k_init_centroids)
|
markomih/kmeans_mapreduce
|
data_prep_scripts/data_prep.py
|
data_prep.py
|
py
| 1,652
|
python
|
en
|
code
| 41
|
github-code
|
6
|
38452601952
|
mod = int(1e9)
L = [0,1]
n = int(input())
for i in range(abs(n)-1):
L.append((L[i]+L[i+1])%mod)
if n<0 and n%2 == 0:
print(-1)
elif n == 0:
print(0)
else:
print(1)
print(L[abs(n)])
|
LightPotato99/baekjoon
|
dynamic/fibonacci/fiboExpansion.py
|
fiboExpansion.py
|
py
| 197
|
python
|
en
|
code
| 0
|
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.