max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
checkov/terraform/checks/resource/digitalocean/DropletSSHKeys.py | pmalkki/checkov | 1 | 12769351 | from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
from typing import Any
from checkov.common.models.consts import ANY_VALUE
class DropletSSHKeys(BaseResourceValueCheck):
def __init__(self):
name = "Ensure the droplet specifies an SSH key"
id = "CKV_DIO_2"
supported_resources = ['digitalocean_droplet']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return "ssh_keys"
def get_expected_value(self) -> Any:
return ANY_VALUE
check = DropletSSHKeys()
| 2.203125 | 2 |
code/segment_cells.py | a9w/Fat2_polarizes_WAVE | 0 | 12769352 | <reponame>a9w/Fat2_polarizes_WAVE
"""
For all base images in a directory, segment the cells from a 2D image or image
stack. Batch segmentation does not allow hand-adjustment of tissue segmentation
parameters by image, which is necessary for some images.
Input is a directory with .tif files to be segmented, either a single image with
a cell edge label or a multichannel image in which one of the channels has a
cell edge label. The names of files to be segmented should have the format
condition_samplenumber.tif and no other tifs in the directory should end in a
number.
Output is labeled tifs of the segmented cells with name
condition_samplenumber_seg.tif.
"""
from os import listdir
from imageio import imread, volread, imwrite
# Internal functions
from functions.segment import (epithelium_watershed,
largest_object_mask)
# Set location of directory with images to segment, output location
DATA_DIR = ('./data/Sra1GFP_level_polarity/')
OUT_DIR = ('./data/Sra1GFP_level_polarity/')
# Set total number of channels in each image
# and the index of the one to be used for cell segmentation
CHANNELS_TOTAL = 3
SEG_CHANNEL = 2
# Get the image file names and their basenames
file_names = sorted(listdir(DATA_DIR))
file_names_tif = [file for file in file_names if '.tif' in file]
base_image_files = []
basenames = []
for file in file_names_tif:
name = file.split('.tif')[0]
samplenumber = name.split('_')[-1]
if samplenumber.isdigit():
base_image_files.append(file)
condition = name.split('_' + samplenumber)[0]
basenames.append(condition + '_' + samplenumber)
# Import and segment each image, output segmented cells as tif
for i in range(len(base_image_files)):
if CHANNELS_TOTAL > 1:
ims = volread(DATA_DIR + base_image_files[i])
im = ims[SEG_CHANNEL]
else:
im = imread(DATA_DIR + base_image_files[i])
tissue_mask = largest_object_mask(im)
im_seg = epithelium_watershed(im, tissue_mask)
imwrite(OUT_DIR + basenames[i] + '_seg.tif', im_seg)
| 2.734375 | 3 |
sudan-art/django-backend/sudan_art/migrations/0004_alter_artwork_tags.py | osintalex/sudan-art | 1 | 12769353 | # Generated by Django 4.0 on 2022-04-16 00:55
import sudan_art.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("sudan_art", "0003_alter_artwork_tags"),
]
operations = [
migrations.AlterField(
model_name="artwork",
name="tags",
field=models.CharField(
max_length=250, validators=[sudan_art.models.validate_tags]
),
),
]
| 1.484375 | 1 |
examples/s3client.py | JohnVinyard/annotate-api | 1 | 12769354 | <gh_stars>1-10
import boto3
from botocore.exceptions import ClientError
from botocore.config import Config
from botocore import UNSIGNED
class ObjectStorageClient(object):
def __init__(self, endpoint, region, access_key, secret, bucket):
self.bucket = bucket
self.secret = secret
self.access_key = access_key
self.region = region
self.endpoint = endpoint
config = Config()
config.signature_version = UNSIGNED
self.s3 = boto3.client(
service_name='s3',
endpoint_url=self.endpoint,
region_name=self.region,
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret)
self.url_generator = boto3.client('s3', config=config)
def ensure_bucket_exists(self):
try:
self.s3.head_bucket(Bucket=self.bucket)
except ClientError:
self.s3.create_bucket(
ACL='public-read',
Bucket=self.bucket)
self.s3.put_bucket_cors(
Bucket=self.bucket,
CORSConfiguration={
'CORSRules': [
{
'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
'MaxAgeSeconds': 3000
}
]
}
)
def put_object(self, key, body, content_type):
self.s3.put_object(
Bucket=self.bucket,
Body=body,
Key=key,
ACL='public-read',
ContentType=content_type)
# KLUDGE: It would be nice if this would work correctly with boto3
# and fake s3, but for local dev environments, it seems that uris must
# be built by hand
if self.endpoint:
return f'{self.endpoint}/{self.bucket}/{key}'
else:
return self.url_generator.generate_presigned_url(
'get_object',
ExpiresIn=0,
Params={'Bucket': self.bucket, 'Key': key})
| 1.992188 | 2 |
spatial_correlation.py | Binxu-Stack/Statistics | 0 | 12769355 | <reponame>Binxu-Stack/Statistics
#!/usr/bin/env ovitos
import numpy as np
from ovito.io import import_file
from ovito.data import CutoffNeighborFinder
filename = "min.cfg"
max_r = 5.0
nbins = 10
dr = max_r/float(nbins)
node = import_file(filename)
data = node.source
finder = CutoffNeighborFinder(max_r, data)
atomic_modulus = []
with open("./atomic_nonaffine_modulus.dat",'r') as infile:
for line in infile:
atomic_modulus.append(float(line))
atomic_modulus = np.array(atomic_modulus)
mean_atomic_modulus = atomic_modulus.mean()
corr = np.zeros(nbins)
sigma_i = np.zeros(nbins)
sigma_j = np.zeros(nbins)
#number = np.zeros(nbins)
rho = np.zeros(nbins)
for index in range(data.number_of_particles):
delta_i = atomic_modulus[index] - mean_atomic_modulus
for neigh in finder.find(index):
distance = neigh.distance
ibin = int(distance/dr)
if ibin >= 10:
continue
j = neigh.index
delta_j = atomic_modulus[j] - mean_atomic_modulus
corr[ibin] += delta_i*delta_j
sigma_i[ibin] += delta_i*delta_i
sigma_j[ibin] += delta_j*delta_j
#number[ibin] += 1
for ibin in range(nbins):
rho[ibin] = corr[ibin]/np.sqrt(sigma_i[ibin])/np.sqrt(sigma_j[ibin])
print((ibin+0.5)*dr,rho[ibin])
| 2.125 | 2 |
src/integral_timber_joints/rhino/debug_movements.py | gramaziokohler/integral_timber_joints | 3 | 12769356 | import Rhino # type: ignore
import rhinoscriptsyntax as rs
import scriptcontext
import re
from integral_timber_joints.rhino.load import get_process, get_process_artist, process_is_none
from integral_timber_joints.geometry import JointHalfLap
from integral_timber_joints.rhino.assembly_artist import AssemblyNurbsArtist
from integral_timber_joints.rhino.process_artist import ProcessArtist, RobotClampAssemblyProcess, Assembly
from integral_timber_joints.process import RoboticMovement, ObjectState
from compas_rhino.utilities import clear_layer, delete_objects, draw_mesh
from compas_rhino.utilities import draw_polylines
from compas.geometry import Frame, Transformation, Cylinder, Point, transform_points, transpose_matrix, multiply_matrices
from compas.datastructures import Mesh, mesh_weld
try:
from typing import Any, Dict, List, Optional, Tuple, Type
except:
pass
if __name__ == '__main__':
process = get_process()
artist = get_process_artist()
| 1.65625 | 2 |
rubberband/handlers/fe/file.py | ambros-gleixner/rubberband | 4 | 12769357 | """Contains FileView."""
from tornado.web import HTTPError
from rubberband.models import TestSet, Result
from rubberband.constants import EXPORT_DATA_FORMATS, EXPORT_FILE_TYPES
from .base import BaseHandler
class FileView(BaseHandler):
"""Request handler handling the view or download of log files or log file contents."""
def get(self, file_id):
"""
Answer to GET requests.
Displays file or part of file as different file types, formats, for download or plain view.
Options available via query string parameters.
Parameters
----------
file_id : str
file to be viewed.
Renders `file.html`.
"""
instance_id = self.get_argument("instance", default=None)
for_download = self.get_argument("download", default=False)
fformat = self.get_argument("format", default="raw")
ftype = self.get_argument("ftype", default=".out")
# perform some validation on the query params
if fformat not in EXPORT_DATA_FORMATS:
raise HTTPError(404)
if ftype not in EXPORT_FILE_TYPES:
raise HTTPError(404)
# load the appropriate object
if instance_id:
obj = Result.get(id=instance_id, routing=file_id)
else:
obj = TestSet.get(id=file_id)
# e.g. `result.json(ftype=".set")`
file_contents = getattr(obj, fformat)(ftype=ftype)
if file_contents is None:
raise HTTPError(404)
if for_download:
self.write(file_contents)
else:
self.render("file.html", contents=file_contents)
| 2.71875 | 3 |
src/adventofcode2020/solutions/day11.py | RoelAdriaans/adventofcode2020 | 2 | 12769358 | from abc import abstractmethod
from collections import defaultdict, deque
from typing import Deque, Dict
from adventofcode2020.utils.abstract import FileReaderSolution
class Day11:
count_seats: int
@abstractmethod
def count_next_to_it(self, grid, row, col) -> int:
"""Count the number of seats next to it/ Depends on the part"""
def str_to_map(self, input_data) -> Dict[int, Dict]:
grid = {}
for row, line in enumerate(input_data.splitlines()):
grid[row] = {k: v for k, v in enumerate(line)}
return grid
def generation(self, grid) -> Dict[int, Dict]:
# First we will compute the numbers, and then assign it to a new dict
# This works, but it not really performance proof..
new_grid: Dict[int, Dict] = defaultdict(dict)
for row in range(0, len(grid)):
for col in range(0, len(grid[row])):
if grid[row][col] == ".":
new_grid[row][col] = "."
continue
count = self.count_next_to_it(grid, row, col)
if count == 0:
new_grid[row][col] = "#"
elif grid[row][col] == "#" and count >= self.count_seats:
# If a seat is occupied (#) and four or more seats adjacent to it
# are also occupied, the seat becomes empty
new_grid[row][col] = "L"
elif grid[row][col] == "#" and count < self.count_seats:
# Occupied but other then 4, it stays the same
new_grid[row][col] = grid[row][col]
else:
new_grid[row][col] = grid[row][col]
return new_grid
def count_filled(self, grid, char="#") -> int:
rij = []
for row, value in grid.items():
rij += value.values()
return rij.count(char)
def print_grid(self, grid):
from time import sleep
sleep(0.1)
print(chr(27) + "[2J")
print("\n")
for row, value in grid.items():
print("".join(value.values()))
print("\n")
def run_day(self, input_data):
average: Deque[int] = deque()
grid = self.str_to_map(input_data)
while True:
grid = self.generation(grid)
# Filled seats
filled = self.count_filled(grid)
average.append(filled)
# self.print_grid(map)
if len(average) >= 5:
popped = average.popleft()
if popped == filled:
return filled
class Day11PartA(Day11, FileReaderSolution):
count_seats = 4
def count_next_to_it(self, grid, row, col) -> int:
"""Returns how many seats next to this one are filled"""
res = [
grid.get(row, {}).get(col - 1, 0), # Left
grid.get(row, {}).get(col + 1, 0), # Right
grid.get(row - 1, {}).get(col, 0), # Bottom
grid.get(row + 1, {}).get(col, 0), # Top
#
grid.get(row - 1, {}).get(col - 1, 0), # Bottom Left
grid.get(row - 1, {}).get(col + 1, 0), # Bottom Right
grid.get(row + 1, {}).get(col - 1, 0), # Top Left
grid.get(row + 1, {}).get(col + 1, 0), # Top Right
]
return res.count("#")
def solve(self, input_data: str) -> int:
return self.run_day(input_data)
class Day11PartB(Day11, FileReaderSolution):
count_seats = 5
def find_recursive(self, grid, row, col, drow, dcol, offset=1) -> bool:
"""
Find if a spot is taken.
If a spot is the floor, it will look further.
If the seat is taken, it will return True, else it will return False
"""
spot = grid.get(row + (drow * offset), {}).get(col + (dcol * offset), None)
if spot == ".":
return self.find_recursive(grid, row, col, drow, dcol, offset + 1)
elif spot == "#":
return True
elif spot == "L":
return False
elif spot is None:
# Off the grid
return False
else:
raise ValueError(f"Unknown char at {spot}")
def count_next_to_it(self, grid, row, col) -> int:
"""
Returns how many seats next to it are empty, but when a a seat
is next to the floor, we look ahead!
"""
res = [
self.find_recursive(grid, row, col, drow, dcol)
for drow, dcol in (
(0, -1), # Left
(0, +1), # Right
(-1, 0), # Top
(+1, 0), # Bottom
(-1, -1), # Bottom left
(-1, +1), # Bottom right
(+1, -1), # Top left
(+1, +1), # Top right
)
]
return sum(res)
def solve(self, input_data: str) -> int:
return self.run_day(input_data)
| 3.765625 | 4 |
src/spaczz/customattrs.py | JonasHablitzel/spaczz | 153 | 12769359 | <gh_stars>100-1000
"""Custom spaCy attributes for spaczz."""
from __future__ import annotations
from typing import Iterable, Optional, Set, Tuple, Type
import warnings
from spacy.tokens import Doc, Span, Token
from .exceptions import AttrOverwriteWarning, SpaczzSpanDeprecation
class SpaczzAttrs:
"""Adds spaczz custom attributes to spacy."""
_initialized = False
@classmethod
def initialize(cls: Type[SpaczzAttrs]) -> None:
"""Initializes and registers custom attributes."""
if not cls._initialized:
try:
Token.set_extension("spaczz_token", default=False)
Token.set_extension("spaczz_type", default=None)
Token.set_extension("spaczz_ratio", default=None)
Token.set_extension("spaczz_counts", default=None)
Token.set_extension("spaczz_details", default=None)
Span.set_extension("spaczz_span", getter=cls.get_spaczz_span)
Span.set_extension("spaczz_ent", getter=cls.get_spaczz_ent)
Span.set_extension("spaczz_type", getter=cls.get_span_type)
Span.set_extension("spaczz_types", getter=cls.get_span_types)
Span.set_extension("spaczz_ratio", getter=cls.get_ratio)
Span.set_extension("spaczz_counts", getter=cls.get_counts)
Span.set_extension("spaczz_details", getter=cls.get_details)
Doc.set_extension("spaczz_doc", getter=cls.get_spaczz_doc)
Doc.set_extension("spaczz_types", getter=cls.get_doc_types)
cls._initialized = True
except ValueError:
warnings.warn(
"""One or more spaczz custom extensions has already been registered.
These are being force overwritten. Please avoid defining personal,
custom extensions prepended with "spaczz_".
""",
AttrOverwriteWarning,
)
Token.set_extension("spaczz_token", default=False, force=True)
Token.set_extension("spaczz_type", default=None, force=True)
Token.set_extension("spaczz_ratio", default=None, force=True)
Token.set_extension("spaczz_counts", default=None, force=True)
Span.set_extension(
"spaczz_span", getter=cls.get_spaczz_span, force=True
)
Span.set_extension("spaczz_type", getter=cls.get_span_type, force=True)
Span.set_extension(
"spaczz_types", getter=cls.get_span_types, force=True
)
Span.set_extension("spaczz_ratio", getter=cls.get_ratio, force=True)
Span.set_extension("spaczz_counts", getter=cls.get_counts, force=True)
Doc.set_extension("spaczz_doc", getter=cls.get_spaczz_doc, force=True)
Doc.set_extension("spaczz_types", getter=cls.get_doc_types, force=True)
@staticmethod
def get_spaczz_span(span: Span) -> bool:
"""Getter for spaczz_span `Span` attribute."""
warnings.warn(
"""spaczz_span is deprecated.
Use spaczz_ent instead.""",
SpaczzSpanDeprecation,
)
return all([token._.spaczz_token for token in span])
@staticmethod
def get_spaczz_ent(span: Span) -> bool:
"""Getter for spaczz_ent `Span` attribute."""
return all([token._.spaczz_token for token in span])
@classmethod
def get_span_type(cls: Type[SpaczzAttrs], span: Span) -> Optional[str]:
"""Getter for spaczz_type `Span` attribute."""
if cls._all_equal([token._.spaczz_type for token in span]):
return span[0]._.spaczz_type
else:
return None
@staticmethod
def get_span_types(span: Span) -> Set[str]:
"""Getter for spaczz_types `Span` attribute."""
types = [token._.spaczz_type for token in span if token._.spaczz_type]
return set(types)
@classmethod
def get_ratio(cls: Type[SpaczzAttrs], span: Span) -> Optional[int]:
"""Getter for spaczz_ratio `Span` attribute."""
if cls._all_equal([token._.spaczz_ratio for token in span]):
return span[0]._.spaczz_ratio
else:
return None
@classmethod
def get_counts(
cls: Type[SpaczzAttrs], span: Span
) -> Optional[Tuple[int, int, int]]:
"""Getter for spaczz_counts `Span` attribute."""
if cls._all_equal([token._.spaczz_counts for token in span]):
return span[0]._.spaczz_counts
else:
return None
@classmethod
def get_details(cls: Type[SpaczzAttrs], span: Span) -> Optional[int]:
"""Getter for current placeholder spaczz_details `Span` attribute."""
if cls._all_equal([token._.spaczz_details for token in span]):
return span[0]._.spaczz_details
else:
return None
@staticmethod
def get_spaczz_doc(doc: Doc) -> bool:
"""Getter for spaczz_doc `Doc` attribute."""
return any([token._.spaczz_token for token in doc])
@staticmethod
def get_doc_types(doc: Doc) -> Set[str]:
"""Getter for spaczz_types `Doc` attribute."""
types = [token._.spaczz_type for token in doc if token._.spaczz_type]
return set(types)
@staticmethod
def _all_equal(iterable: Iterable) -> bool:
"""Tests if all elements of iterable are equal."""
iterator = iter(iterable)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
| 2.3125 | 2 |
src/pybel/resources/definitions/definitions.py | tehw0lf/pybel | 0 | 12769360 | # -*- coding: utf-8 -*-
import logging
import os
import requests.exceptions
import six
from configparser import ConfigParser
from ..exc import EmptyResourceError, InvalidResourceError, MissingResourceError
from ..utils import download, is_url
__all__ = [
'parse_bel_resource',
'get_lines',
'get_bel_resource',
]
log = logging.getLogger(__name__)
def _get_bel_resource_kvp(line, delimiter):
"""
:param str line:
:param str delimiter:
:rtype: tuple[str,str]
"""
split_line = line.rsplit(delimiter, 1)
key = split_line[0].strip()
value = split_line[1].strip() if 2 == len(split_line) else None
return key, value
def parse_bel_resource(lines):
"""Parses a BEL config (BELNS, BELANNO, or BELEQ) file from the given line iterator over the file
:param iter[str] lines: An iterable over the lines in a BEL config file
:return: A config-style dictionary representing the BEL config file
:rtype: dict
"""
lines = list(lines)
value_line = 1 + max(
index
for index, line in enumerate(lines)
if '[Values]' == line.strip()
)
metadata_config = ConfigParser(strict=False)
metadata_config.optionxform = lambda option: option
metadata_config.read_file(lines[:value_line])
delimiter = metadata_config['Processing']['DelimiterString']
value_dict = dict(
_get_bel_resource_kvp(line, delimiter)
for line in lines[value_line:]
)
res = {}
res.update({k: dict(v) for k, v in metadata_config.items()})
res['Values'] = value_dict
return res
def get_lines(location):
"""Gets the lines from a location
:param str location: The URL location to download or a file path to open. File path expands user.
:return: list[str]
:raises: requests.exceptions.HTTPError
"""
if is_url(location):
res = download(location)
return list(line.decode('utf-8', errors='ignore').strip() for line in res.iter_lines())
else:
with open(os.path.expanduser(location)) as f:
return list(f)
def get_bel_resource(location):
"""Loads/downloads and parses a config file from the given url or file path
:param str location: The URL or file path to a BELNS, BELANNO, or BELEQ file to download and parse
:return: A config-style dictionary representing the BEL config file
:rtype: dict
:raises: pybel.resources.exc.ResourceError
"""
log.debug('getting resource: %s', location)
try:
lines = get_lines(location)
except requests.exceptions.HTTPError as e:
six.raise_from(MissingResourceError(location), e)
try:
result = parse_bel_resource(lines)
except ValueError as e:
six.raise_from(InvalidResourceError(location), e)
if not result['Values']:
raise EmptyResourceError(location)
return result
| 2.5 | 2 |
usersapp/migrations/0002_auto_20200309_1308.py | AlpsRunner/money_tranfer_page | 0 | 12769361 | # Generated by Django 2.2.3 on 2020-03-09 08:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usersapp', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='customuser',
options={'ordering': ['username']},
),
migrations.AlterField(
model_name='customuser',
name='balance',
field=models.DecimalField(db_index=True, decimal_places=2, default=0, max_digits=12),
),
migrations.AlterField(
model_name='customuser',
name='inn',
field=models.CharField(db_index=True, max_length=12),
),
]
| 1.671875 | 2 |
dataset_creation/find_answer.py | AseelAlshorafa/SOQAL | 109 | 12769362 | <filename>dataset_creation/find_answer.py
import sys
import os
from scipy import spatial
import numpy as np
def editDistance(str1, str2, m, n):
# edit distance recursive implementation, m = len(str1) and n = len(str2)
dp = [[0 for x in range(n + 1)] for x in range(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0:
dp[i][j] = j # Min. operations = j
elif j == 0:
dp[i][j] = i # Min. operations = i
elif str1[i - 1] == str2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i][j - 1], # Insert
dp[i - 1][j], # Remove
dp[i - 1][j - 1]) # Replace
return dp[m][n]
def concatenateString(paragraph, start, length):
final_string = paragraph[start]
for i in range(1, length):
final_string += " " + paragraph[start + i]
return final_string
def find_answer(paragraph, answer):
# check if answer already in paragraph
correct_answer = ""
score_answer = 1000000
para_words = paragraph.split()
for i in range(0, len(para_words)):
# check max 15 word ranges, reduced for efficiency
for j in range(1, min(15, len(para_words) - i+1)):
candidate = concatenateString(para_words, i, j)
if candidate == answer:
return answer, paragraph.find(answer)
score = editDistance(answer, candidate, len(answer), len(candidate))
if (score < score_answer):
score_answer = score
correct_answer = candidate
return correct_answer, paragraph.find(correct_answer)
def test_find_answer():
p = "أصبحت بلاكبول وبلاكبيرن مع داروين سلطات وحدوية مستقلة "
a = "بلاكبو"
print(find_answer(p, a))
| 3.03125 | 3 |
scripts/djvuPageLabels.py | Noitaenola/ScanManual | 2 | 12769363 | <gh_stars>1-10
"""
Generate page labels for the djvu and insert them.
"""
import os
SCAN = "../djv/SCAN_INS_OCR_BOOK.djvu"
SCAN_PAG = "../djv/SCAN_INS_OCR_BOOK_PAG.djvu"
djvuPageLabels = "djvuPageLabels.dsed"
# Delete old files (if they exist).
if os.path.isfile(SCAN_PAG):
os.remove(SCAN_PAG)
if os.path.isfile(djvuPageLabels):
os.remove(djvuPageLabels)
# Define the page labels for the different parts.
frontMatter = ["Cover", "FFEP"] # , 'FFEP2', 'FFEP3']
romans = ["i", "ii", "iii", "iv", "v", "vi", "vii", "viii", "ix", "x"]
backMatter = ["RFEP", '"Back cover"', "Spine"]
# Total number of pages in the book.
total = 461
# Calculate some indices.
fmlen = len(frontMatter) + len(romans)
pPages = total - fmlen - len(backMatter)
lastpPage = total - len(backMatter)
# Generate the script and save it.
with open(djvuPageLabels, "w") as outputFile:
for p, title in enumerate(frontMatter + romans, start=1):
print("select", p, file=outputFile)
print("set-page-title", title, file=outputFile)
for p in range(1, pPages + 1):
print("select", p + fmlen, file=outputFile)
print("set-page-title", p, file=outputFile)
for p, title in enumerate(backMatter, start=lastpPage + 1):
print("select", p, file=outputFile)
print("set-page-title", title, file=outputFile)
print("save-bundled " + SCAN_PAG, file=outputFile)
# Call djvused on the created script.
os.system("djvused " + SCAN + " -f " + djvuPageLabels)
| 3.015625 | 3 |
scripts/print.py | DavideMammarella/USI-MSDE-Thesis-Code | 4 | 12769364 | <reponame>DavideMammarella/USI-MSDE-Thesis-Code<gh_stars>1-10
# Copyright 2021 Testing Automated @ Università della Svizzera italiana (USI)
# All rights reserved.
# This file is part of the project SelfOracle, a misbehaviour predictor for autonomous vehicles,
# developed within the ERC project PRECRIME
# and is released under the "MIT License Agreement". Please see the LICENSE
# file that should have been included as part of this package.
import tensorflow
from scipy.stats import ttest_rel
from tqdm import tqdm
import utils
from config import Config
from utils import *
from selforacle.vae import normalize_and_reshape, VAE
WHAT = '-latent16-centerimg-nocrop'
if __name__ == '__main__':
os.chdir(os.getcwd().replace('scripts', ''))
print(os.getcwd())
cfg = Config()
cfg.from_pyfile("config_my.py")
drive = utils.get_driving_styles(cfg)
cfg.SIMULATION_NAME = 'gauss-journal-track3-nominal'
data_nominal = load_all_images(cfg)
encoder_mse = tensorflow.keras.models.load_model('sao/encoder-track3-MSEloss' + WHAT + '-CI-RETRAINED-2X-UNC')
decoder_mse = tensorflow.keras.models.load_model('sao/decoder-track3-MSEloss' + WHAT + '-CI-RETRAINED-2X-UNC')
vae_mse = VAE(model_name="encoder_mse",
loss="MSE",
latent_dim=cfg.SAO_LATENT_DIM,
encoder=encoder_mse,
decoder=decoder_mse)
vae_mse.compile(optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.0001))
encoder_vae = tensorflow.keras.models.load_model('sao/encoder-track3-VAEloss' + WHAT + '-CI-RETRAINED-2X-UNC')
decoder_vae = tensorflow.keras.models.load_model('sao/decoder-track3-VAEloss' + WHAT + '-CI-RETRAINED-2X-UNC')
vae_vae = VAE(model_name="encoder_vae",
loss="VAE",
latent_dim=cfg.SAO_LATENT_DIM,
encoder=encoder_vae,
decoder=decoder_vae)
vae_vae.compile(optimizer=tensorflow.keras.optimizers.Adam(learning_rate=0.0001))
i = 0
list_original = []
list_reconstructed_mse = []
list_reconstructed_vae = []
for x in tqdm(data_nominal):
i += 1
x = utils.resize(x)
x = normalize_and_reshape(x)
list_original.append(x.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# z_mean, z_log_var, z = vae_mse.encoder.predict(x)
# decoded = vae_mse.decoder.predict(z)
reconstructed_mse = vae_mse.predict(x)
reconstructed_vae = vae_vae.predict(x)
list_reconstructed_mse.append(
reconstructed_mse.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
list_reconstructed_vae.append(
reconstructed_vae.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# if i % 400 == 0:
# plt.figure(figsize=(80, 16))
# # display original
# ax = plt.subplot(1, 3, 1)
# plt.imshow(x.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.title("Original", fontsize=60)
#
# # display reconstruction
# ax = plt.subplot(1, 3, 2)
# plt.imshow(reconstructed_mse.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.title("MSE: %.4f" % vae_mse.test_on_batch(x)[2], fontsize=60)
#
# ax = plt.subplot(1, 3, 3)
# plt.imshow(reconstructed_vae.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.title("MSE: %.4f" % vae_vae.test_on_batch(x)[2], fontsize=60)
#
# plt.savefig("plots/example-" + WHAT + "-" + str(i) + ".png")
# plt.show()
# plt.close()
del data_nominal
cfg.SIMULATION_NAME = 'gauss-journal-track3-rain'
data_unseen = load_all_images(cfg)
i = 0
list_original_unseen = []
list_reconstructed_mse_unseen = []
list_reconstructed_vae_unseen = []
for x in tqdm(data_unseen):
i += 1
x = utils.resize(x)
x = normalize_and_reshape(x)
list_original_unseen.append(x.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# z_mean, z_log_var, z = vae_mse.encoder.predict(x)
# decoded = vae_mse.decoder.predict(z)
reconstructed_mse = vae_mse.predict(x)
reconstructed_vae = vae_vae.predict(x)
list_reconstructed_mse_unseen.append(
reconstructed_mse.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
list_reconstructed_vae_unseen.append(
reconstructed_vae.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# if i % 100 == 0:
# plt.figure(figsize=(80, 16))
# # display original
# ax = plt.subplot(1, 3, 1)
# plt.imshow(x.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.title("Original", fontsize=60)
#
# # display reconstruction
# ax = plt.subplot(1, 3, 2)
# plt.imshow(reconstructed_mse.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.title("MSE: %.4f" % vae_mse.test_on_batch(x)[2], fontsize=60)
#
# ax = plt.subplot(1, 3, 3)
# plt.imshow(reconstructed_vae.reshape(RESIZED_IMAGE_HEIGHT, RESIZED_IMAGE_WIDTH, IMAGE_CHANNELS))
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# plt.title("MSE: %.4f" % vae_vae.test_on_batch(x)[2], fontsize=60)
#
# plt.savefig("plots/example-" + WHAT + "-" + str(i) + ".png")
# plt.show()
# plt.close()
lpl_original = laplacian_variance(list_original)
lpl_var_mse = laplacian_variance(list_reconstructed_mse)
lpl_var_vae = laplacian_variance(list_reconstructed_vae)
res = ttest_rel(lpl_var_mse, lpl_var_vae)
print(f'T-score = {res.statistic:.2f}, p-value = {res.pvalue:.2f}')
plt.hist(lpl_original, bins=50, alpha=0.2, label='nominal images')
plt.hist(lpl_var_mse, bins=50, alpha=0.2, label='unseen images generated by VAE (MSE loss)')
plt.hist(lpl_var_vae, bins=50, alpha=0.2, label='unseen images generated by VAE (VAE loss)')
plt.legend(loc='upper right')
plt.xlabel('Laplacian variance')
plt.title(f'T-score = {res.statistic:.2f}, p-value = {res.pvalue:.2f}')
plt.savefig("plots/laplacian-" + WHAT + ".png")
plt.show()
lpl_original = laplacian_variance(list_original)
lpl_var_mse = laplacian_variance(list_reconstructed_mse_unseen)
lpl_var_vae = laplacian_variance(list_reconstructed_vae_unseen)
res = ttest_rel(lpl_var_mse, lpl_var_vae)
print(f'T-score = {res.statistic:.2f}, p-value = {res.pvalue:.2f}')
plt.hist(lpl_original, bins=50, alpha=0.2, label='nominal images')
plt.hist(lpl_var_mse, bins=50, alpha=0.2, label='unseen images generated by VAE (MSE loss)')
plt.hist(lpl_var_vae, bins=50, alpha=0.2, label='unseen images generated by VAE (VAE loss)')
plt.legend(loc='upper right')
plt.xlabel('Laplacian variance')
plt.title(f'T-score = {res.statistic:.2f}, p-value = {res.pvalue:.2f}')
plt.savefig("plots/laplacian-" + WHAT + ".png")
plt.show()
| 1.84375 | 2 |
mesa.py | jadexter/astr3400 | 0 | 12769365 |
import numpy as np
import os
from astropy.io import ascii
class mesa:
def init(self):
###### parameters ####################
self.path = '/Users/Jason/code/mesa_progenitors/' # file path
# self.type = 'solar' # file type
self.name = 'profileXX.data' # file name
def __init__(self,**kwargs):
self.init()
self.__dict__.update(kwargs)
# sample lab 1 solution
def read_mesa_profile(self):
h = ascii.read(self.name,header_start=1,data_start=2,data_end=3)
f = ascii.read(self.name,header_start=4,data_start=5)
return h,f
def read_mesa_star(self):
h,f = self.read_mesa_profile()
self.data = f
self.header = h
| 2.890625 | 3 |
data_preparation/get_density_map_gaussian.py | ZhengPeng7/Multi_column_CNN_in_Keras_for_crowd_counting | 5 | 12769366 | import cv2
import math
import numpy as np
def get_density_map_gaussian(im, points):
im_density = np.zeros_like(im, dtype=np.float64)
h, w = im_density.shape
if points is None:
return im_density
if points.shape[0] == 1:
x1 = max(0, min(w-1, round(points[0, 0])))
y1 = max(0, min(h-1, round(points[0, 1])))
im_density[y1, x1] = 255
return im_density
for j in range(points.shape[0]):
f_sz = 15
sigma = 4.0
H = np.multiply(cv2.getGaussianKernel(f_sz, sigma), (cv2.getGaussianKernel(f_sz, sigma)).T)
x = min(w-1, max(0, abs(int(math.floor(points[j, 0])))))
y = min(h-1, max(0, abs(int(math.floor(points[j, 1])))))
if x >= w or y >= h:
continue
x1 = x - f_sz//2 + 0
y1 = y - f_sz//2 + 0
x2 = x + f_sz//2 + 1
y2 = y + f_sz//2 + 1
dfx1, dfy1, dfx2, dfy2 = 0, 0, 0, 0
change_H = False
if x1 < 0:
dfx1 = abs(x1) + 0
x1 = 0
change_H = True
if y1 < 0:
dfy1 = abs(y1) + 0
y1 = 0
change_H = True
if x2 > w:
dfx2 = x2 - w
x2 = w
change_H = True
if y2 > h:
dfy2 = y2 - h
y2 = h
change_H = True
x1h, y1h, x2h, y2h = 1 + dfx1, 1 + dfy1, f_sz - dfx2, f_sz - dfy2
if change_H is True:
H = np.multiply(cv2.getGaussianKernel(y2h-y1h+1, sigma), (cv2.getGaussianKernel(x2h-x1h+1, sigma)).T)
im_density[y1:y2, x1:x2] += H
return im_density
| 2.578125 | 3 |
src/analisador_sintatico/Node.py | matheusfbonfim/BahTche-Language-Compiler | 1 | 12769367 | class Node:
def __init__(self, name=None, terminal=False):
self._name = name
self._children = []
self._level = None
self._terminal = terminal
# ====================
# GETTERS E SETTERS
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def terminal(self):
return self._terminal
@terminal.setter
def terminal(self, value):
self._terminal = value
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def children(self):
return self._children
@children.setter
def children(self, child):
self._children.append(child)
| 3.3125 | 3 |
page_objects/temperature_main_page.py | NikhilPrakashrao/evaluationtask | 0 | 12769368 | """
This class models the main Temperature page.
"""
from .Base_Page import Base_Page
from .temperature_object import Temperature_Object
from utils.Wrapit import Wrapit
class Temperature_Main_Page(Base_Page,Temperature_Object):
"Page Object for the temperature main page"
def start(self):
"Use this method to go to specific URL -- if needed"
url = ''
self.open(url)
| 3.09375 | 3 |
helper_scripts/components/headers_helper.py | fengjixuchui/difuze | 347 | 12769369 | def get_all_includes(comp_args, dst_includes):
i = 0
while i < len(comp_args):
curr_arg = comp_args[i].strip()
if curr_arg == "-isystem":
curr_arg1 = "-I" + comp_args[i+1].strip()
if curr_arg1 not in dst_includes:
dst_includes.append(curr_arg1)
if curr_arg == "-include":
curr_arg1 = comp_args[i+1].strip()
if "dhd_sec_feature.h" not in curr_arg1:
final_arg = curr_arg + " " + curr_arg1
if final_arg not in dst_includes:
dst_includes.append(final_arg)
if curr_arg[0:2] == "-I":
if curr_arg not in dst_includes:
if 'drivers' not in curr_arg and 'sound' not in curr_arg:
dst_includes.append(curr_arg)
i += 1
| 2.765625 | 3 |
tests/test_clean_string.py | jceresearch/pydit | 0 | 12769370 | """ test of base functions"""
import os
import sys
# pylint: disable=import-error disable=wrong-import-position
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from pydit import setup_logging, clean_string
# import numpy as np
# from datetime import datetime, date, timedelta
# from pandas import Timestamp
logger = setup_logging()
def test_clean_string():
"""test the clean string function"""
assert clean_string(" <NAME> 123 456 . ") == "john_smith_123_456"
assert (
clean_string(" <NAME> 123 456 . ", space_to_underscore=False)
== "<NAME> 123 456"
)
if __name__ == "__main__":
test_clean_string
| 2.53125 | 3 |
KilluaRobot/utils/adminperms.py | apisuserbot/Killua-Robot | 3 | 12769371 | <reponame>apisuserbot/Killua-Robot<filename>KilluaRobot/utils/adminperms.py
import asyncio
from pyrogram import filters
from pyrogram.types import ChatPermissions, Message
from KilluaRobot import pbot as app
async def member_permissions(chat_id: int, user_id: int):
perms = []
member = await app.get_chat_member(chat_id, user_id)
if member.can_post_messages:
perms.append("can_post_messages")
if member.can_edit_messages:
perms.append("can_edit_messages")
if member.can_delete_messages:
perms.append("can_delete_messages")
if member.can_restrict_members:
perms.append("can_restrict_members")
if member.can_promote_members:
perms.append("can_promote_members")
if member.can_change_info:
perms.append("can_change_info")
if member.can_invite_users:
perms.append("can_invite_users")
if member.can_pin_messages:
perms.append("can_pin_messages")
if member.can_manage_voice_chats:
perms.append("can_manage_voice_chats")
return perms
| 2.078125 | 2 |
gamestonk_terminal/stocks/comparison_analysis/ca_controller.py | Aerex/GamestonkTerminal | 3 | 12769372 | <gh_stars>1-10
"""Comparison Analysis Controller Module"""
__docformat__ = "numpy"
# pylint:disable=too-many-lines
import argparse
import difflib
import random
from typing import List
from datetime import datetime, timedelta
import yfinance as yf
from colorama import Style
from matplotlib import pyplot as plt
from prompt_toolkit.completion import NestedCompleter
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import (
check_non_negative,
check_positive,
get_flair,
parse_known_args_and_warn,
try_except,
system_clear,
valid_date,
)
from gamestonk_terminal.menu import session
from gamestonk_terminal.portfolio.portfolio_optimization import po_controller
from gamestonk_terminal.stocks.comparison_analysis import (
finbrain_view,
finnhub_model,
finviz_compare_model,
finviz_compare_view,
marketwatch_view,
polygon_model,
yahoo_finance_view,
yahoo_finance_model,
)
# pylint: disable=E1121
class ComparisonAnalysisController:
"""Comparison Analysis Controller class"""
# Command choices
CHOICES = ["?", "cls", "help", "q", "quit"]
CHOICES_COMMANDS = [
"ticker",
"getpoly",
"getfinnhub",
"getfinviz",
"set",
"add",
"rmv",
"historical",
"hcorr",
"volume",
"income",
"balance",
"cashflow",
"sentiment",
"scorr",
"overview",
"valuation",
"financial",
"ownership",
"performance",
"technical",
"tsne",
]
CHOICES_MENUS = ["po"]
CHOICES += CHOICES_COMMANDS + CHOICES_MENUS
def __init__(
self,
similar: List[str] = None,
):
"""Constructor
Parameters
----------
similar : List
Similar tickers
"""
if similar:
self.similar = similar
else:
self.similar = []
if similar and len(similar) == 1:
self.ticker = self.similar[0].upper()
else:
self.ticker = ""
self.user = ""
self.ca_parser = argparse.ArgumentParser(add_help=False, prog="ca")
self.ca_parser.add_argument(
"cmd",
choices=self.CHOICES,
)
def print_help(self):
"""Print help"""
help_str = f"""
Comparison Analysis:
cls clear screen
?/help show this menu again
q quit this menu, and shows back to main menu
quit quit to abandon program
ticker set ticker to get similar companies from{Style.NORMAL if self.ticker else Style.DIM}
Ticker to get similar companies from: {self.ticker}
tsne run TSNE on all SP500 stocks and returns closest tickers
getpoly get similar stocks from polygon API
getfinnhub get similar stocks from finnhub API
getfinviz get similar stocks from finviz API{Style.RESET_ALL}
set reset and set similar companies
add add more similar companies
rmv remove similar companies individually or all
{Style.NORMAL if self.similar and len(self.similar)>1 else Style.DIM}
Similar Companies: {', '.join(self.similar) if self.similar else ''}
Yahoo Finance:
historical historical price data comparison
hcorr historical price correlation
volume historical volume data comparison
Market Watch:
income income financials comparison
balance balance financials comparison
cashflow cashflow comparison
Finbrain:
sentiment sentiment analysis comparison
scorr sentiment correlation
Finviz:
overview brief overview comparison
valuation brief valuation comparison
financial brief financial comparison
ownership brief ownership comparison
performance brief performance comparison
technical brief technical comparison
> po portfolio optimization for selected tickers{Style.RESET_ALL}
"""
print(help_str)
@try_except
def call_ticker(self, other_args: List[str]):
"""Process ticker command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ticker",
description="""Set ticker to extract similars from""",
)
parser.add_argument(
"-t",
"--ticker",
dest="ticker",
type=str,
required=True,
help="Ticker get similar tickers from",
)
# For the case where a user uses: 'add NIO,XPEV,LI'
if other_args and "-" not in other_args[0]:
other_args.insert(0, "-t")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if "," in ns_parser.ticker:
print("Only one ticker must be selected!")
else:
stock_data = yf.download(
ns_parser.ticker,
progress=False,
)
if stock_data.empty:
print(f"The ticker '{ns_parser.ticker}' provided does not exist!")
else:
self.ticker = ns_parser.ticker.upper()
print("")
@try_except
def call_tsne(self, other_args: List[str]):
"""Process tsne command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="tsne",
description="""Get similar companies to compare with using sklearn TSNE.""",
)
parser.add_argument(
"-r",
"--learnrate",
default=200,
dest="lr",
type=check_non_negative,
help="TSNE Learning rate. Typical values are between 50 and 200",
)
parser.add_argument(
"-l",
"--limit",
default=10,
dest="limit",
type=check_positive,
help="Limit of stocks to retrieve. The subsample will occur randomly.",
)
parser.add_argument(
"-p", "--no_plot", action="store_true", default=False, dest="no_plot"
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.ticker:
print("You need to 'set' a ticker to get similar companies from first!")
return
self.similar = yahoo_finance_model.get_sp500_comps_tsne(
self.ticker,
lr=ns_parser.lr,
no_plot=ns_parser.no_plot,
num_tickers=ns_parser.limit,
)
self.similar = [self.ticker] + self.similar
print(f"[ML] Similar Companies: {', '.join(self.similar)}", "\n")
@try_except
def call_getfinviz(self, other_args: List[str]):
"""Process getfinviz command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="getfinviz",
description="""Get similar companies from finviz to compare with.""",
)
parser.add_argument(
"--nocountry",
action="store_true",
default=False,
dest="b_no_country",
help="Similar stocks from finviz using only Industry and Sector.",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.ticker:
print("You need to 'set' a ticker to get similar companies from first!")
return
if ns_parser.b_no_country:
compare_list = ["Sector", "Industry"]
else:
compare_list = ["Sector", "Industry", "Country"]
self.similar, self.user = finviz_compare_model.get_similar_companies(
self.ticker, compare_list
)
if self.ticker.upper() in self.similar:
self.similar.remove(self.ticker.upper())
if len(self.similar) > 10:
random.shuffle(self.similar)
self.similar = sorted(self.similar[:10])
print(
"The limit of stocks to compare with are 10. Hence, 10 random similar stocks will be displayed.\n",
)
if self.similar:
self.similar = [self.ticker] + self.similar
print(f"[{self.user}] Similar Companies: {', '.join(self.similar)}", "\n")
@try_except
def call_getpoly(self, other_args: List[str]):
"""Process get command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="getpoly",
description="""Get similar companies from polygon to compare with.""",
)
parser.add_argument(
"-u",
"--us_only",
action="store_true",
default=False,
dest="us_only",
help="Show only stocks from the US stock exchanges",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.ticker:
print("You need to 'set' a ticker to get similar companies from first!")
return
self.similar, self.user = polygon_model.get_similar_companies(
self.ticker, ns_parser.us_only
)
if self.ticker.upper() in self.similar:
self.similar.remove(self.ticker.upper())
if len(self.similar) > 10:
random.shuffle(self.similar)
self.similar = sorted(self.similar[:10])
print(
"The limit of stocks to compare with are 10. Hence, 10 random similar stocks will be displayed.\n",
)
self.similar = [self.ticker] + self.similar
if self.similar:
print(f"[{self.user}] Similar Companies: {', '.join(self.similar)}", "\n")
@try_except
def call_getfinnhub(self, other_args: List[str]):
"""Process get command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="getfinnhub",
description="""Get similar companies from finnhub to compare with.""",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.ticker:
print("You need to 'set' a ticker to get similar companies from first!")
return
self.similar, self.user = finnhub_model.get_similar_companies(self.ticker)
if self.ticker.upper() in self.similar:
self.similar.remove(self.ticker.upper())
if len(self.similar) > 10:
random.shuffle(self.similar)
self.similar = sorted(self.similar[:10])
print(
"The limit of stocks to compare with are 10. Hence, 10 random similar stocks will be displayed.\n",
)
self.similar = [self.ticker] + self.similar
if self.similar:
print(f"[{self.user}] Similar Companies: {', '.join(self.similar)}", "\n")
@try_except
def call_add(self, other_args: List[str]):
"""Process add command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="add",
description="""Add similar tickers to compare with.""",
)
parser.add_argument(
"-s",
"--similar",
dest="l_similar",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=[],
help="Tickers to add to similar list",
)
# For the case where a user uses: 'add NIO,XPEV,LI'
if other_args and "-" not in other_args[0]:
other_args.insert(0, "-s")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
# Add sets to avoid duplicates
if self.similar:
self.similar = list(set(self.similar + ns_parser.l_similar))
else:
self.similar = ns_parser.l_similar
self.user = "Custom"
print(f"[{self.user}] Similar Companies: {', '.join(self.similar)}", "\n")
@try_except
def call_rmv(self, other_args: List[str]):
"""Process rmv command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="rmv",
description="""Remove similar tickers to compare with.""",
)
parser.add_argument(
"-s",
"--similar",
dest="l_similar",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=[],
help="Tickers to remove from similar list",
)
# For the case where a user uses: 'add NIO,XPEV,LI'
if other_args and "-" not in other_args[0]:
other_args.insert(0, "-s")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if ns_parser.l_similar:
# Add sets to avoid duplicates
for symbol in ns_parser.l_similar:
if symbol in self.similar:
self.similar.remove(symbol)
else:
print(
f"Ticker {symbol} does not exist in similar list to be removed"
)
print(f"[{self.user}] Similar Companies: {', '.join(self.similar)}")
else:
self.similar = []
print("")
self.user = "Custom"
@try_except
def call_set(self, other_args: List[str]):
"""Process set command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="set",
description="""Select similar companies to compare with.""",
)
parser.add_argument(
"-s",
"--similar",
dest="l_similar",
type=lambda s: [str(item).upper() for item in s.split(",")],
default=[],
help="similar companies to compare with.",
)
# For the case where a user uses: 'select NIO,XPEV,LI'
if other_args and "-" not in other_args[0]:
other_args.insert(0, "-s")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
self.similar = list(set(ns_parser.l_similar))
self.user = "Custom"
print(f"[{self.user}] Similar Companies: {', '.join(self.similar)}", "\n")
def switch(self, an_input: str):
"""Process and dispatch input
Returns
-------
True, False or None
False - quit the menu
True - quit the program
None - continue in the menu
"""
# Empty command
if not an_input:
print("")
return None
(known_args, other_args) = self.ca_parser.parse_known_args(an_input.split())
# Help menu again
if known_args.cmd == "?":
self.print_help()
return None
# Clear screen
if known_args.cmd == "cls":
system_clear()
return None
return getattr(
self, "call_" + known_args.cmd, lambda: "Command not recognized!"
)(other_args)
def call_help(self, _):
"""Process Help command"""
self.print_help()
def call_q(self, _):
"""Process Q command - quit the menu"""
return False
def call_quit(self, _):
"""Process Quit command - quit the program"""
return True
@try_except
def call_historical(self, other_args: List[str]):
"""Process historical command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="historical",
description="""Historical price comparison between similar companies.
""",
)
parser.add_argument(
"-t",
"--type",
action="store",
dest="type_candle",
type=str,
choices=["o", "h", "l", "c", "a"],
default="a", # in case it's adjusted close
help="Candle data to use: o-open, h-high, l-low, c-close, a-adjusted close.",
)
parser.add_argument(
"-n",
"--no-scale",
action="store_false",
dest="no_scale",
default=False,
help="Flag to not put all prices on same 0-1 scale",
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the stock",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
yahoo_finance_view.display_historical(
similar_tickers=self.similar,
start=ns_parser.start,
candle_type=ns_parser.type_candle,
normalize=not ns_parser.no_scale,
export=ns_parser.export,
)
@try_except
def call_hcorr(self, other_args: List[str]):
"""Process historical correlation command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="hcorr",
description=""" Correlation heatmap based on historical price comparison between similar
companies.
""",
)
parser.add_argument(
"-t",
"--type",
action="store",
dest="type_candle",
type=str,
choices=["o", "h", "l", "c", "a"],
default="a", # in case it's adjusted close
help="Candle data to use: o-open, h-high, l-low, c-close, a-adjusted close.",
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the stock",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are similar tickers selected. \n")
return
yahoo_finance_view.display_correlation(
similar_tickers=self.similar,
start=ns_parser.start,
candle_type=ns_parser.type_candle,
)
@try_except
def call_income(self, other_args: List[str]):
"""Process income command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="income",
description="""
Prints either yearly or quarterly income statement the company, and compares
it against similar companies.
""",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter financial data flag.",
)
parser.add_argument(
"-t",
"--timeframe",
dest="s_timeframe",
type=str,
default=None,
help="Specify yearly/quarterly timeframe. Default is last.",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
marketwatch_view.display_income_comparison(
similar=self.similar,
timeframe=ns_parser.s_timeframe,
quarter=ns_parser.b_quarter,
)
@try_except
def call_volume(self, other_args: List[str]):
"""Process volume command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="volume",
description="""Historical volume comparison between similar companies.
""",
)
parser.add_argument(
"-s",
"--start",
type=valid_date,
default=(datetime.now() - timedelta(days=366)).strftime("%Y-%m-%d"),
dest="start",
help="The starting date (format YYYY-MM-DD) of the stock",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are similar tickers selected. \n")
return
yahoo_finance_view.display_volume(
similar_tickers=self.similar,
start=ns_parser.start,
export=ns_parser.export,
)
@try_except
def call_balance(self, other_args: List[str]):
"""Process balance command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="balance",
description="""
Prints either yearly or quarterly balance statement the company, and compares
it against similar companies.
""",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter financial data flag.",
)
parser.add_argument(
"-t",
"--timeframe",
dest="s_timeframe",
type=str,
default=None,
help="Specify yearly/quarterly timeframe. Default is last.",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
marketwatch_view.display_balance_comparison(
similar=self.similar,
timeframe=ns_parser.s_timeframe,
quarter=ns_parser.b_quarter,
)
@try_except
def call_cashflow(self, other_args: List[str]):
"""Process cashflow command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="cashflow",
description="""
Prints either yearly or quarterly cashflow statement the company, and compares
it against similar companies.
""",
)
parser.add_argument(
"-q",
"--quarter",
action="store_true",
default=False,
dest="b_quarter",
help="Quarter financial data flag.",
)
parser.add_argument(
"-t",
"--timeframe",
dest="s_timeframe",
type=str,
default=None,
help="Specify yearly/quarterly timeframe. Default is last.",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
marketwatch_view.display_cashflow_comparison(
similar=self.similar,
timeframe=ns_parser.s_timeframe,
quarter=ns_parser.b_quarter,
)
@try_except
def call_sentiment(self, other_args: List[str]):
"""Process sentiment command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sentiment_compare",
description="""
FinBrain's sentiment comparison across similar tickers.
""",
)
parser.add_argument(
"-r",
"--raw",
action="store_true",
default=False,
help="Display raw sentiment data",
dest="raw",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finbrain_view.display_sentiment_compare(
similar=self.similar,
raw=ns_parser.raw,
export=ns_parser.export,
)
@try_except
def call_scorr(self, other_args: List[str]):
"""Process sentiment correlation command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sentiment_compare",
description="""
FinBrain's sentiment correlation across similar tickers.
""",
)
parser.add_argument(
"-r",
"--raw",
action="store_true",
default=False,
help="Display raw sentiment data",
dest="raw",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are similar tickers selected. \n")
return
finbrain_view.display_sentiment_correlation(
similar=self.similar,
raw=ns_parser.raw,
export=ns_parser.export,
)
@try_except
def call_overview(self, other_args: List[str]):
"""Process overview command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="overview",
description="""
Prints screener data of similar companies. [Source: Finviz]
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finviz_compare_view.screener(
similar=self.similar,
data_type="overview",
export=ns_parser.export,
)
@try_except
def call_valuation(self, other_args: List[str]):
"""Process valuation command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="valuation",
description="""
Prints screener data of similar companies. [Source: Finviz]
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finviz_compare_view.screener(
similar=self.similar,
data_type="valuation",
export=ns_parser.export,
)
@try_except
def call_financial(self, other_args: List[str]):
"""Process financial command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="financial",
description="""
Prints screener data of similar companies. [Source: Finviz]
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finviz_compare_view.screener(
similar=self.similar,
data_type="financial",
export=ns_parser.export,
)
@try_except
def call_ownership(self, other_args: List[str]):
"""Process ownership command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ownership",
description="""
Prints screener data of similar companies. [Source: Finviz]
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finviz_compare_view.screener(
similar=self.similar,
data_type="ownership",
export=ns_parser.export,
)
@try_except
def call_performance(self, other_args: List[str]):
"""Process performance command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="performance",
description="""
Prints screener data of similar companies. [Source: Finviz]
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finviz_compare_view.screener(
similar=self.similar,
data_type="performance",
export=ns_parser.export,
)
@try_except
def call_technical(self, other_args: List[str]):
"""Process technical command"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="technical",
description="""
Prints screener data of similar companies. [Source: Finviz]
""",
)
parser.add_argument(
"--export",
choices=["csv", "json", "xlsx"],
default="",
type=str,
dest="export",
help="Export dataframe data to csv,json,xlsx file",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return
finviz_compare_view.screener(
similar=self.similar,
data_type="technical",
export=ns_parser.export,
)
def call_po(self, _):
"""Call the portfolio optimization menu with selected tickers"""
if not self.similar or len(self.similar) == 1:
print("Please make sure there are more than 1 similar tickers selected. \n")
return None
return po_controller.menu(self.similar)
def menu(similar: List):
"""Comparison Analysis Menu
Parameters
----------
similar : List
Similar tickers
"""
ca_controller = ComparisonAnalysisController(similar)
ca_controller.call_help(None)
while True:
# Get input command from user
if session and gtff.USE_PROMPT_TOOLKIT:
completer = NestedCompleter.from_nested_dict(
{c: None for c in ca_controller.CHOICES}
)
an_input = session.prompt(
f"{get_flair()} (stocks)>(ca)> ",
completer=completer,
)
else:
an_input = input(f"{get_flair()} (stocks)>(ca)> ")
try:
plt.close("all")
process_input = ca_controller.switch(an_input)
if process_input is not None:
return process_input
except SystemExit:
print("The command selected doesn't exist\n")
similar_cmd = difflib.get_close_matches(
an_input, ca_controller.CHOICES, n=1, cutoff=0.7
)
if similar_cmd:
print(f"Did you mean '{similar_cmd[0]}'?\n")
continue
| 2.015625 | 2 |
blog/admin.py | thanhngk/blog | 0 | 12769373 | from django.contrib import admin
from django.utils.html import mark_safe, format_html
from . import models
class CategoryAdmin(admin.ModelAdmin):
fields = ('image_tag', 'title', 'description', 'image',)
readonly_fields = ('image_tag',)
class PostAdmin(admin.ModelAdmin):
fields = ('title', 'content', 'author', 'image_tag', 'image', 'category',
'n_views', 'tags')
readonly_fields = ('image_tag', 'n_views', 'author', )
def save_model(self, request, obj, form, change):
if getattr(obj, 'author', None) is None:
obj.author = request.user
obj.save()
class ProfileAdmin(admin.ModelAdmin):
fields = ('user', 'image', 'description',)
readonly_fields = ('image_tag',)
# Register your models here.
admin.site.register(models.Post, PostAdmin)
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Profile, ProfileAdmin)
admin.site.register(models.Subscriber)
| 2.25 | 2 |
icarus/models/cache/systems.py | Chrisys93/IcarusRepoSEND | 4 | 12769374 | # -*- coding: utf-8 -*-
"""Simple networks of caches modeled as single caches."""
import random
import numpy as np
from icarus.util import inheritdoc
from icarus.tools import DiscreteDist
from icarus.registry import register_cache_policy, CACHE_POLICY
from .policies import Cache
__all__ = [
'PathCache',
'TreeCache',
'ArrayCache',
'ShardedCache',
]
"""
So let me get this straight, these "systems" do not implement ANY delay or
any kind of change between fetching or adding data to or from specific nodes?!
- as it says at the end of the document, modeled as single caches -
"""
@register_cache_policy('PATH')
class PathCache(object):
"""Path of caches
This is not a single-node cache implementation but rather it implements
a path of caching nodes in which requests are fed to the first node of the
path and, in case of a miss, are propagated down to the remaining nodes
of the path. A miss occurs if none of the nodes on the path has the
requested content.
"""
def __init__(self, caches, **kwargs):
"""Constructor
Parameters
----------
caches : array-like
An array of caching nodes instances on the path
"""
self._caches = caches
self._len = len(caches)
"""
TODO: Implement all the below methods, with the appropriate "cache"
(implement EDRs as "caches")
"""
def __len__(self):
return self._len
@property
def maxlen(self):
return self._len
def has(self, k):
for c in self._caches:
if c.has(k):
return True
else:
return False
def get(self, k):
for i in range(self._len):
if self._caches[i].get(k):
break
else:
return False
# Put contents on all caches traversed by the retrieved content
for j in range(i):
self._caches[j].put(k)
return True
def put(self, k):
"""Insert an item in the cache if not already inserted.
If the element is already present in the cache, it will pushed to the
top of the cache.
Parameters
----------
k : any hashable type
The item to be inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
for c in self._caches:
c.put(k)
def remove(self, k):
raise NotImplementedError('This method is not implemented')
def position(self, k):
raise NotImplementedError('This method is not implemented')
def dump(self, serialized=True):
dump = [c.dump() for c in self._caches]
return sum(dump, []) if serialized else dump
def clear(self):
for c in self._caches:
c.clear()
@register_cache_policy('TREE')
class TreeCache(object):
"""Path of caches
This is not a single-node cache implementation but rather it implements
a tree of caching nodes in which requests are fed to a random leaf node
and, in case of a miss, are propagated down to the remaining nodes
of the path. A miss occurs if none of the nodes on the path has the
requested content.
Notes
-----
This cache can only be operated in a read-through manner and not in write
through or read/write aside. In other words, before issuing a put, you
must issue a get for the same item. The reason for this limitation is
to ensure that matching get/put requests go through the same randomly
selected node.
"""
def __init__(self, leaf_caches, root_cache, **kwargs):
"""Constructor
Parameters
----------
caches : array-like
An array of caching nodes instances on the path
segments : int
The number of segments
"""
self._leaf_caches = leaf_caches
self._root_cache = root_cache
self._len = sum(len(c) for c in leaf_caches) + len(root_cache)
self._n_leaves = len(leaf_caches)
self._leaf = None
def __len__(self):
return self._len
@property
def maxlen(self):
return self._len
def has(self, k):
raise NotImplementedError('This method is not implemented')
def get(self, k):
self._leaf = random.choice(self._leaf_caches)
if self._leaf.get(k):
return True
else:
if self._root_cache.get(k):
self._leaf.put(k)
return True
else:
return False
def put(self, k):
"""Insert an item in the cache if not already inserted.
If the element is already present in the cache, it will pushed to the
top of the cache.
Parameters
----------
k : any hashable type
The item to be inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
if self._leaf is None:
raise ValueError("You are trying to insert an item not requested before. "
"Tree cache can be used in read-through mode only")
self._leaf.put(k)
self._root_cache.put(k)
def remove(self, k):
raise NotImplementedError('This method is not implemented')
def position(self, k):
raise NotImplementedError('This method is not implemented')
def dump(self, serialized=True):
dump = [c.dump() for c in self._leaf_caches]
dump.append(self._root_cache.dump())
return sum(dump, []) if serialized else dump
def clear(self):
for c in self._caches:
c.clear()
@register_cache_policy('ARRAY')
class ArrayCache(object):
"""Array of caches
This is not a single-node cache implementation but rather it implements
an array of caching nodes in which requests are fed to a random node of
a set.
Notes
-----
This cache can only be operated in a read-through manner and not in write
through or read/write aside. In other words, before issuing a put, you
must issue a get for the same item. The reason for this limitation is
to ensure that matching get/put requests go through the same randomly
selected node.
"""
def __init__(self, caches, weights=None, **kwargs):
"""Constructor
Parameters
----------
caches : array-like
An array of caching nodes instances on the array
weights : array-like
Random weights according to which a cache of the array should be
selected to process a given request
"""
self._caches = caches
self._len = sum(len(c) for c in caches)
self._n_caches = len(caches)
self._selected_cache = None
if weights is not None:
if np.abs(np.sum(weights) - 1) > 0.0001:
raise ValueError("weights must sum up to 1")
if len(weights) != self._n_caches:
raise ValueError("weights must have as many elements as nr of caches")
randvar = DiscreteDist(weights)
self.select_cache = lambda : self._caches[randvar.rv() - 1]
else:
self.select_cache = lambda : random.choice(self._caches)
def __len__(self):
return self._len
@property
def maxlen(self):
return self._len
def has(self, k):
raise NotImplementedError('This method is not implemented')
def get(self, k):
self._selected_cache = self.select_cache()
return self._selected_cache.get(k)
def put(self, k):
"""Insert an item in the cache if not already inserted.
If the element is already present in the cache, it will pushed to the
top of the cache.
Parameters
----------
k : any hashable type
The item to be inserted
Returns
-------
evicted : any hashable type
The evicted object or *None* if no contents were evicted.
"""
if self._selected_cache is None:
raise ValueError("You are trying to insert an item not requested before. "
"Array cache can be used in read-through mode only")
self._selected_cache.put(k)
def remove(self, k):
raise NotImplementedError('This method is not implemented')
def position(self, k):
raise NotImplementedError('This method is not implemented')
def dump(self, serialized=True):
dump = [c.dump() for c in self._caches]
return sum(dump, []) if serialized else dump
def clear(self):
for c in self._caches:
c.clear()
@register_cache_policy('SHARD')
class ShardedCache(Cache):
"""Set of sharded caches.
Set of caches coordinately storing items. When a request reaches the
caches, the request is forwarded to the specific cache (shard) based on the
outcome of a hash function. So, an item can be stored only by a single
node of the system.
"""
def __init__(self, maxlen, policy='LRU', nodes=4, f_map=None,
policy_attr={}, **kwargs):
"""Constructor
Parameters
----------
maxlen : int
The maximum number of items the cache can store.
policy : str, optional
The eviction policy of each node (e.g., LRU, LFU, FIFO...).
Default is LRU.
nodes : int, optional
The number of nodes, default is 4.
f_map : callable, optional
A callable governing the mapping between items and caching nodes.
It receives as argument a value of an item :math:`k` and returns an
integer between :math:`0` and :math:`nodes - 1` identifying the
target node.
If not specified, the mapping is done by computing the hash of the
given item modulo the number of nodes.
policy_attr : dict, optional
A set of parameters for initializing the underlying caching policy.
Notes
-----
The maxlen parameter refers to the cumulative size of the caches in the
set. The size of each shard is derived dividing maxlen by the number
of nodes.
"""
maxlen = int(maxlen)
if maxlen <= 0:
raise ValueError('maxlen must be positive')
if not isinstance(nodes, int) or nodes <= 0 or nodes > maxlen:
raise ValueError('nodes must be an integer and 0 < nodes <= maxlen')
# If maxlen is not a multiple of nodes, then some nodes have one slot
# more than others
self._node_maxlen = [maxlen // nodes for _ in range(nodes)]
for i in range(maxlen % nodes):
self._node_maxlen[i] += 1
self._maxlen = maxlen
self._node = [CACHE_POLICY[policy](self._node_maxlen[i], **policy_attr)
for i in range(nodes)]
self.f_map = f_map if f_map is not None else lambda k: hash(k) % nodes
@inheritdoc(Cache)
def __len__(self):
return sum(len(s) for s in self._node)
@property
def maxlen(self):
return self._maxlen
@inheritdoc(Cache)
def has(self, k):
return self._node[self.f_map(k)].has(k)
@inheritdoc(Cache)
def get(self, k):
return self._node[self.f_map(k)].get(k)
@inheritdoc(Cache)
def put(self, k):
return self._node[self.f_map(k)].put(k)
@inheritdoc(Cache)
def dump(self, serialized=True):
dump = list(s.dump() for s in self._node)
return sum(dump, []) if serialized else dump
@inheritdoc(Cache)
def remove(self, k):
return self._node[self.f_map(k)].remove(k)
@inheritdoc(Cache)
def clear(self):
for s in self._node:
s.clear()
| 2.953125 | 3 |
app/service/train_mleng/runner.py | kislerdm/conversion_rate_service | 1 | 12769375 | <reponame>kislerdm/conversion_rate_service
import os
import time
import yaml
import argparse
from typing import Tuple
import pandas as pd
import numpy as np
from google.cloud import storage
from service_pkg.logger import getLogger
from service_pkg.file_io import load_data
import importlib
from pathlib import Path
np.random.seed(2019)
def get_args():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser(description='cr prediction train')
parser.add_argument(
'--train-path',
default=None,
required=True,
help='Path to train data sample')
parser.add_argument(
'--eval-path',
default=None,
required=False,
help='Path to eval data sample')
parser.add_argument(
'--config-path',
default=None,
required=False,
help='Path to the config file')
parser.add_argument(
'--model-dir',
default=None,
required=False,
help='The directory to store the model')
parser.add_argument(
'--webhook-url',
type=str,
default=None,
required=False,
help='Url to push webhook to')
args = parser.parse_args()
return args
PROJECT_ID = os.getenv("PROJECT_ID", "sellics")
MODEL_PKG_NAME = "conversion_rate_model"
MODEL_VERSION = os.getenv("MODEL_VERSION", "v1")
BUCKET_DATA = os.getenv("BUCKET_DATA", "/data")
BUCKET_CONFIG = os.getenv("BUCKET_CONFIG", "/config")
BUCKET_MODEL = os.getenv("BUCKET_MODEL", "/model")
def is_gs_bucket(bucket: str) -> Tuple[bool, str]:
"""Function to check if the bucket exists
Args:
bucket: gs bucket name
Returns:
tuple with boolean flag and error (in case of exception)
"""
# ckeck if bucket exists
try:
buckets_list = [i.id for i in list(gs.list_buckets())
if i.id == bucket]
except Exception as ex:
return False, f"Cannot list buckets. Error:\n{ex}"
if len(buckets_list) == 0:
return False, f"Bucket '{bucket}' doesn't exist"
return True, None
def is_gs_file(bucket: str,
obj: str) -> Tuple[bool, str]:
"""Function to check if the object exists
Args:
bucket: gs bucket name
obj: object path
Returns:
tuple with boolean flag and error (in case of exception)
"""
is_bucket, err = is_gs_bucket(bucket)
if err:
return False, err
# check if the object exists in the bucket
try:
bucket_objects = gs.list_blobs(bucket)
bucket_object = [i.name for i in list(bucket_objects)
if i.name == obj]
except Exception as ex:
return False, f"Cannot list bucket files. Error:\n{ex}"
if len(bucket_object) == 0:
return False, f"File '{obj}' doesn't exist"
return True, None
if __name__ == "__main__":
args = get_args()
WEBHOOK_URL = args.webhook_url
logs = getLogger(f"service/train-mleng/{MODEL_VERSION}",
webhook_url=WEBHOOK_URL)
PATH_DATA = args.train_path
PATH_EVAL = args.eval_path
PATH_CONFIG = args.config_path
PATH_MODEL = args.model_dir
if PATH_MODEL is None:
PATH_MODEL = os.path.join(MODEL_VERSION,
time.strftime('%Y/%m/%d'))
# link the model module
try:
model_pkg = importlib.import_module(
f"{MODEL_PKG_NAME}.{MODEL_VERSION}.model")
except Exception as ex:
logs.send(f"Model {MODEL_VERSION} is not defined in the package {MODEL_PKG_NAME}.\nError:{ex}",
lineno=logs.get_line(),
kill=True)
# get gs client (this step can be modified to access s3 bucket)
try:
gs = storage.Client(project=PROJECT_ID)
except Exception as ex:
logs.send(f"Cannot connect to GS. Error:\n{ex}",
lineno=logs.get_line(),
kill=True)
# check if specified buckets exist
for bucket in [BUCKET_DATA, BUCKET_CONFIG, BUCKET_MODEL]:
flag, err = is_gs_bucket(bucket)
if err:
logs.send(err,
lineno=logs.get_line(),
kill=True)
# check if data exist
flag, err = is_gs_file(bucket=BUCKET_DATA, obj=PATH_DATA)
if err:
logs.send(err,
lineno=logs.get_line(),
kill=True)
flag_eval, err = is_gs_file(bucket=BUCKET_DATA, obj=PATH_EVAL)
if err:
logs.send(f"Eval data set not found.\nError: {err}",
lineno=logs.get_line(),
kill=False)
# download the train data set
try:
with open("/tmp/data.csv.gz", 'wb') as f:
gs.get_bucket(BUCKET_DATA)\
.get_blob(PATH_DATA)\
.download_to_file(f)
except Exception as ex:
logs.send(f"Cannot download data file.\nError: {ex}",
lineno=logs.get_line(),
kill=True)
df_train, err = load_data("/tmp/data.csv.gz")
if err:
logs.send(f"Cannot read train data set.\nError: {err}",
lineno=logs.get_line(),
kill=True)
# prepare data for training
X, y, err = model_pkg.data_preparation(df_train)
if err:
logs.send(err,
lineno=logs.get_line(),
kill=True)
# instantiate a model class object
model = model_pkg.Model()
# read the config in case it's provided
t0 = time.time()
if "grid_search" not in model.__dir__() or PATH_CONFIG is None:
logs.send("Start training",
is_error=False,
kill=False)
metrics_train = model.train(X=X,
y=y)
else:
logs.send("Start training with grid search",
is_error=False,
kill=False)
try:
config_text = gs.get_bucket(BUCKET_CONFIG)\
.get_blob(PATH_CONFIG)\
.download_as_string()
config = yaml.safe_load(config_text)
except Exception as ex:
logs.send(ex,
lineno=logs.get_line(),
kill=True)
metrics_train = model.grid_search(X=X,
y=y,
config=config)
t = round(time.time() - t0, 2)
logs.send(f"Training completed. Elapsed time: {t} sec.\nModel performance: {metrics_train}",
is_error=False,
kill=False,
webhook=True)
# save the model
tmp_model_dir = f"/tmp/{MODEL_VERSION}"
dest_model_dir = os.path.join(BUCKET_MODEL, args.model_dir)
model.save(tmp_model_dir)
try:
bucket = gs.bucket(BUCKET_MODEL)
for obj in os.listdir(tmp_model_dir):
file_dir = os.path.join(tmp_model_dir, obj)
if os.path.isfile(file_dir):
bucket.blob(f"{args.model_dir}/{obj}")\
.upload_from_filename(file_dir)
except Exception as ex:
logs.send(f"Cannot copy from '{file_dir}' to 'gs://{dest_model_dir}'. Error:\n{ex}",
lineno=logs.get_line(),
kill=True)
logs.send(f"Model saved to gs://{dest_model_dir}.",
is_error=False,
kill=False,
webhook=True)
# evaluate model
if flag_eval:
# download the train data set
try:
with open("/tmp/data.csv.gz", 'wb') as f:
gs.get_bucket(BUCKET_DATA)\
.get_blob(PATH_EVAL)\
.download_to_file(f)
except Exception as ex:
logs.send(f"Cannot download eval file. Done!",
lineno=logs.get_line(),
is_error=False,
kill=True)
df_eval, err = load_data("/tmp/data.csv.gz")
if err:
logs.send(f"Cannot read eval data set.\nError: {err}",
lineno=logs.get_line(),
is_error=False,
kill=True)
try:
X, y, err = model_pkg.data_preparation(df_eval)
metrics_eval = model.score(y, model.predict(X))
except Exception as ex:
logs.send(f"Model evaluation error: {ex}",
lineno=logs.get_line(),
kill=False)
logs.send(f"Model eval performance: {metrics_eval}",
is_error=False,
kill=False,
webhook=True)
| 2.5 | 2 |
narrowing_ai_research/transformers/process_paper_data.py | nestauk/narrowing_ai_research | 4 | 12769376 | import pandas as pd
import pickle
from narrowing_ai_research.utils.list_utils import flatten_list
import narrowing_ai_research
import datetime
import logging
import os
import json
project_dir = narrowing_ai_research.project_dir
def make_ai_ids():
"""Function to extract AI Ids from the categories and expanded paper
list files
"""
with open(f"{project_dir}/data/interim/find_ai_outputs.p", "rb") as infile:
t = pickle.load(infile)
paper_categories = pd.read_csv(
f"{project_dir}/data/raw/arxiv_article_categories.csv"
)
ai_cats = set(["cs.AI", "cs.NE", "stat.ML", "cs.LG"])
ai_core_papers = set(
paper_categories.loc[paper_categories["category_id"].isin(ai_cats)][
"article_id"
]
)
ai_papers_expanded = set(flatten_list([x for x in t[0].values()]))
all_ai_ids = ai_core_papers.union(ai_papers_expanded)
return all_ai_ids
def process_paper_data():
"""Some final data processing
* Add AI dates to relevant datasets (papers and Grid)
* Add dates to the papers df
* Create long topic df
* Add DeepMind and OpenAI papers to the paper_grid file
"""
# Add dates
# This reads the first line of the papers to check if year is there.
# papers = pd.read_csv(
# f"{project_dir}/data/raw/arxiv_articles.csv", dtype={"article_id": str}
# )
if os.path.exists(f"{project_dir}/data/processed/arxiv_articles.csv") is True:
logging.info("Already processed paper data")
logging.info("Already added AI ids to data")
else:
papers = pd.read_csv(
f"{project_dir}/data/raw/arxiv_articles.csv", dtype={"article_id": str}
)
ai_ids = make_ai_ids()
logging.info("Adding dates to paper_df")
papers["date"] = papers["created"].apply(
lambda x: datetime.datetime(int(x.split("-")[0]), int(x.split("-")[1]), 1)
)
papers["year"] = papers["date"].apply(lambda x: x.year)
logging.info("Add AI dummy")
papers["is_ai"] = papers["article_id"].isin(ai_ids)
papers.to_csv(f"{project_dir}/data/processed/arxiv_articles.csv", index=False)
papers_year_dict = papers.set_index("article_id").to_dict()
if os.path.exists(f"{project_dir}/data/processed/arxiv_topics_years.csv") is True:
logging.info("Already created topic year df")
else:
logging.info("making topic year df")
topic_mix = pd.read_csv(
f"{project_dir}/data/processed/ai_topic_mix.csv", dtype={"article_id": str}
)
topic_long = topic_mix.melt(id_vars="article_id")
topic_long["year"], topic_long["date"] = [
[papers_year_dict[var][_id] for _id in topic_long["article_id"]]
for var in ["created", "date"]
]
topic_long.to_csv(
f"{project_dir}/data/processed/arxiv_topics_years.csv", index=False
)
if os.path.exists(f"{project_dir}/data/processed/arxiv_grid.csv") is True:
logging.info("Already processed GRID data")
else:
logging.info("Processing GRID data")
logging.info("Fixing UCL bug")
pd.options.mode.chained_assignment = None
g = pd.read_csv(
f"{project_dir}/data/raw/arxiv_grid_short.csv", dtype={"article_id": str}
)
ucl_aus = g.loc[g["institute_name"] == "UCL Australia"]
ucl_aus["institute_name"] = "UCL"
ucl_aus["institute_country"] = "United Kingdom"
ucl_aus["institute_lat"] = 0.1340
ucl_aus["institute_lon"] = 51.5246
ucl_aus["org_type"] = "Education"
g_no_aus = g.loc[g["institute_name"] != "UCL Australia"]
g_fixed = pd.concat([g_no_aus, ucl_aus], axis=0)
# g_fixed.to_csv("arxiv_grid_proc.csv",index=False)
logging.info("Adding DeepMind and OpenAI")
with open(f"{project_dir}/data/raw/scraped_arxiv.json", "r") as infile:
scraped = json.load(infile)
with open(f"{project_dir}/data/interim/scraped_meta.json", "r") as infile:
scraped_meta = json.load(infile)
scraped_c = {k.split("/")[-1]: v for k, v in scraped.items()}
new_results = []
# Create a df with the information for deepmind / openai ids
scr_no_dupes = g.loc[
[x in set(scraped_c.keys()) for x in g["article_id"]]
].drop_duplicates("article_id")
# For each id there we create a new series with org metadata
for _id, r in scr_no_dupes.iterrows():
if r["article_id"] in scraped_c.keys():
paper_vector = {}
n = scraped_c[r["article_id"]]
paper_vector["institute_name"] = n
paper_vector["article_id"] = r["article_id"]
paper_vector["mag_id"] = r["mag_id"]
paper_vector["mag_authors"] = r["mag_authors"]
paper_vector["is_multinational"] = 0
paper_vector["institute_id"] = f"extra_{n}"
paper_vector["institute_country"] = scraped_meta[n]["institute_country"]
paper_vector["institute_lat"] = scraped_meta[n]["lat"]
paper_vector["institute_lon"] = scraped_meta[n]["lon"]
paper_vector["org_type"] = scraped_meta[n]["org_type"]
paper_series = pd.Series(paper_vector)
new_results.append(paper_series)
grid_out = pd.concat([g_fixed, pd.DataFrame(new_results)], axis=0)
logging.info("Adding AI labels")
ai_ids = make_ai_ids()
grid_out["is_ai"] = grid_out["article_id"].isin(ai_ids)
logging.info("Saving grid file")
grid_out.to_csv(f"{project_dir}/data/processed/arxiv_grid.csv", index=False)
if __name__ == "__main__":
process_paper_data()
os.remove(f"{project_dir}/data/raw/arxiv_articles.csv")
os.remove(f"{project_dir}/data/raw/arxiv_grid_short.csv")
| 2.921875 | 3 |
kerastools/models/horde_models.py | gyfastas/ICCV2019-Horde | 84 | 12769377 | #!/usr/bin/env python
# coding: utf-8
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Conv2D, Dense, Concatenate, Flatten, Input
from .abe_models import ABE
from ..layers import L2Normalisation
from ..layers.horde_layers import CKOP, PKOB
from .extractors import get_extractor, get_preprocess_method
def KOrderModel(extractor_name,
embedding_sizes,
high_order_dims,
ho_trainable=False,
end_layer=None):
model = get_extractor(extractor_name, end_layer=end_layer)
inputs = model.input
x = model.output
max_order = len(high_order_dims)
output_list = [x]
# Add all high-order approximation layers:
for k, order_dim in enumerate(high_order_dims, start=2):
x_ho = CKOP(output_dim=order_dim, name='CKOP_' + str(k), ho_trainable=ho_trainable)([x] * k)
output_list.append(x_ho)
# Add pooling and embedding layers:
for k in range(len(output_list)):
output_list[k] = GlobalAveragePooling2D(name='GAP_' + extractor_name + '_O' + str(k + 1))(output_list[k])
if embedding_sizes[k] > 0:
output_list[k] = Dense(embedding_sizes[k], use_bias=False)(output_list[k])
output_list[k] = L2Normalisation(name='L2_' + extractor_name + '_O' + str(k + 1))(output_list[k])
return Model(inputs=inputs, outputs=output_list, name=extractor_name + '_O' + str(max_order)), get_preprocess_method(extractor_name)
def CascadedKOrder(extractor_name,
embedding_sizes,
high_order_dims,
ho_trainable=True,
end_layer=None):
model = get_extractor(extractor_name, end_layer=end_layer)
inputs = model.input
x = model.output
max_order = len(high_order_dims)
output_list = [x]
# Add all high-order approximation layers:
for k, order_dim in enumerate(high_order_dims, start=2):
only_project_second = False if k == 2 else True
x_ho = PKOB(order_dim,
only_project_second=only_project_second,
ho_trainable=ho_trainable)([output_list[-1], x])
output_list.append(x_ho)
# Add pooling and embedding layers:
for k in range(len(output_list)):
output_list[k] = GlobalAveragePooling2D(name='GAP_' + extractor_name + '_O' + str(k + 1))(output_list[k])
if ho_trainable:
output_list[k] = Dense(embedding_sizes[k],
use_bias=False,
name='Proj_' + extractor_name + '_O' + str(k + 1))(output_list[k])
elif k == 0:
output_list[k] = Dense(embedding_sizes[k],
use_bias=False,
name='Proj_' + extractor_name + '_O' + str(k + 1))(output_list[k])
output_list[k] = L2Normalisation(name='L2_' + extractor_name + '_O' + str(k + 1))(output_list[k])
return Model(inputs=inputs, outputs=output_list, name=extractor_name + '_O' + str(max_order)), get_preprocess_method(extractor_name)
def CascadedABE(embedding_size,
high_order_dims,
features_reduction=256,
ho_trainable=True,
n_head=8):
model, preprocess_method = ABE(embedding_size[0], n_head=8)
inp = model.input
multi_head_out = [model.get_layer(name='inception_5b/output').get_output_at(k) for k in range(n_head)]
concat = Concatenate()(multi_head_out) # Nx H x W x n_ensemble*1024
if features_reduction is not None:
concat = Conv2D(filters=features_reduction,
kernel_size=(1, 1),
use_bias=False)(concat)
output_list = [concat]
# Add all high-order approximation layers:
for k, order_dim in enumerate(high_order_dims, start=2):
only_project_second = False if k == 2 else True
x_ho = PKOB(order_dim,
only_project_second=only_project_second,
ho_trainable=ho_trainable)([output_list[-1], concat])
output_list.append(x_ho)
# Add pooling and embedding layers:
for k in range(1, len(output_list)):
output_list[k] = GlobalAveragePooling2D(name='GAP_O' + str(k + 1))(output_list[k])
if ho_trainable:
output_list[k] = Dense(embedding_size[k],
use_bias=False,
name='Proj_O' + str(k + 1))(output_list[k])
output_list[k] = L2Normalisation(name='L2_O' + str(k + 1))(output_list[k])
# Finally we replace the first order by the true model:
output_list[0] = model.get_layer(name='ABE'+str(n_head)).output
return Model(inp, output_list, name='ABE'+str(n_head)+'_O'+str(len(embedding_size))), preprocess_method
| 2.28125 | 2 |
EventHandler/repo/push.py | AdityaTelange/bitbucket-telegram-bot-webhook | 2 | 12769378 | <reponame>AdityaTelange/bitbucket-telegram-bot-webhook<gh_stars>1-10
def push(data_json):
# A user pushes 1 or more commits to a repository.
repository = data_json['repository']
# repository_scm = repository['scm']
repository_name = repository['name']
repository_link = repository['links']['html']['href']
actor = data_json['actor']
actor_name = actor['display_name']
actor_profile = actor['links']['html']['href']
pushh = data_json['push']['changes'][0]
push_forced = pushh['forced']
push_created = pushh['created']
push_truncated = pushh['truncated']
push_closed = pushh['closed']
push_commits_all = pushh['commits']
commits_str = ""
for commit in push_commits_all:
commit_hash = commit['hash']
if len(commit_hash) > 7:
commit_hash = commit_hash[:7]
commit_link = commit['links']['html']['href']
# commit_summary = commit['summary']['raw']
commit_message = commit['message']
# commit_author_name = commit['author']['user']['display_name']
# commit_author_link = commit['author']['user']['links']['html']['href']
commits_str += '\n\t#{} [{}]({}) \n=> _{}_'.format(push_commits_all.index(commit),
commit_hash, commit_link,
commit_message)
push_link = pushh['links']['html']['href']
push_new = pushh['new']
push_old = pushh['old']
message = "[{}]({}) pushed to [{}]({})" \
" \nCompare: [compare]({})" \
" \nCommits: \n{}".format(actor_name,
actor_profile,
repository_name,
repository_link,
push_link,
commits_str
)
return message
| 2.5625 | 3 |
django_dicom/models/managers/messages.py | ZviBaratz/django-dicom | 8 | 12769379 | <gh_stars>1-10
"""
Messages for the :mod:`~django_dicom.models.managers` module.
"""
DATA_ELEMENT_CREATION_FAILURE = (
"Failed to create DataElement instance for:\n{data_element}\n{exception}"
)
HEADER_CREATION_FAILURE = "Failed to read header information!\n{exception}"
IMPORT_ERROR = (
"Failed to import {path}\nThe following exception was raised: {exception}"
)
PATIENT_UID_MISMATCH = "Patient UID mismatch! Image {image_uid} is associated with patient {db_value} in the database, but the provided header shows {patient_uid}"
SERVER_START = "Starting {n_servers} DICOM storage service class providers..."
# flake8: noqa: E501
| 2.015625 | 2 |
.sublime/Packages/CTags/ranking/parse.py | teedoo/dotfiles | 1 | 12769380 | <reponame>teedoo/dotfiles<filename>.sublime/Packages/CTags/ranking/parse.py
import re
from helpers.common import *
#import spdb
# spdb.start()
class Parser:
"""
Parses tag references and tag definitions. Used for ranking
"""
@staticmethod
def extract_member_exp(line_to_symbol, source):
"""
Extract receiver object e.g. receiver.mtd()
Strip away brackets and operators.
TODO:HIGH: Add base lang defs + Python/Ruby/C++/Java/C#/PHP overrides (should be very similar)
TODO: comment and string support (eat as may contain brackets. add them to context - js['prop1']['prop-of-prop1'])
"""
lang = get_lang_setting(source)
if not lang:
return [line_to_symbol]
# Get per-language syntax regex of brackets, splitters etc.
mbr_exp = lang.get('member_exp')
if mbr_exp is None:
return [line_to_symbol]
lstStop = mbr_exp.get('stop', [])
if (not lstStop):
print('warning!: language has member_exp setting but it is ineffective: Must have "stop" key with array of regex to stop search backward from identifier')
return [line_to_symbol]
lstClose = mbr_exp.get('close', [])
reClose = concat_re(lstClose)
lstOpen = mbr_exp.get('open', [])
reOpen = concat_re(lstOpen)
lstIgnore = mbr_exp.get('ignore', [])
reIgnore = concat_re(lstIgnore)
if len(lstOpen) != len(lstClose):
print('warning!: extract_member_exp: settings lstOpen must match lstClose')
matchOpenClose = dict(zip(lstOpen, lstClose))
# Construct | regex from all open and close strings with capture (..)
splex = concat_re(lstOpen + lstClose + lstIgnore + lstStop)
reStop = concat_re(lstStop)
splex = "({0}|{1})".format(splex, reIgnore)
splat = re.split(splex, line_to_symbol)
#print('splat=%s' % splat)
# Stack iter reverse(splat) for detecting unbalanced e.g 'func(obj.yyy'
# while skipping balanced brackets in getSlow(a && b).mtd()
stack = []
lstMbr = []
insideExp = False
for cur in reversed(splat):
# Scan backwards from the symbol: If alpha-numeric - keep it. If
# Closing bracket e.g ] or ) or } --> push into stack
if re.match(reClose, cur):
stack.append(cur)
insideExp = True
# If opening bracket --> match it from top-of-stack: If stack empty
# - stop else If match pop-and-continue else stop scanning +
# warning
elif re.match(reOpen, cur):
# '(' with no matching ')' --> func(obj.yyy case --> return obj.yyy
if len(stack) == 0:
break
tokClose = stack.pop()
tokCloseCur = matchOpenClose.get(cur)
if tokClose != tokCloseCur:
print(
'non-matching brackets at the same nesting level: %s %s' %
(tokCloseCur, tokClose))
break
insideExp = False
# If white space --> stop. Do not stop for whitespace inside
# open-close brackets nested expression
elif re.match(reStop, cur):
if not insideExp:
break
elif re.match(reIgnore, cur):
pass
else:
lstMbr[0:0] = cur
strMbrExp = "".join(lstMbr)
lstSplit = mbr_exp.get('splitters', [])
reSplit = concat_re(lstSplit)
# Split member deref per-lang (-> and :: in PHP and C++) - use base if
# not found
arrMbrParts = list(filter(None, re.split(reSplit, strMbrExp)))
# print('arrMbrParts=%s' % arrMbrParts)
return arrMbrParts
| 2.1875 | 2 |
mysite/urls.py | AmanRiat1/uOttaHack | 0 | 12769381 | <filename>mysite/urls.py
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$', views.basic, name='output'),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 1.71875 | 2 |
lib/local.py | sakyra01/CatPaw_OSINT | 0 | 12769382 | <gh_stars>0
import sys
import time
import collections
import requests
nu = "\033[0m"
re = "\033[1;31m"
gr = "\033[0;32m"
cy = "\033[0;36m"
wh = "\033[0;37m"
ye = "\033[0;34m"
su = f"\033[1;31m[\033[1;36m+\033[1;31m]{nu}"
fa = f"\033[1;31m[\033[1;31m!\033[1;31m]{nu}"
er = f"\033[1;31m[\033[1;34m?\033[1;31m]{nu}"
def urlshortner(url):
data = requests.get("http://tinyurl.com/api-create.php?url=" + url)
return data.text
def write(stri):
for char in stri:
time.sleep(0.1)
sys.stdout.write(char)
sys.stdout.flush()
def sort_list(xlist):
with_count = dict(collections.Counter(xlist))
output = {k: v for k, v in sorted(with_count.items(), reverse=True, key=lambda item: item[1])}
return output
useragent = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393'
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
'Mozilla/5.0 (iPad; CPU OS 8_4_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12H321 Safari/600.1.4'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
'Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Galaxy Nexus Build/IML74K) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Mobile Safari/535.7',
'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148']
def banner_i():
banner = """
-**- ......
:@- -#- :+ .# :#:
=+ @- *+ .# .#.
-# .%%++@+ -%@#%%- .# .#. .%%++@+ .#. %% .#.
*% .+. ++ *+ .#*****=@+ .+. ++ *= :+=: +*
:@ :=%%%@+ *+ .# :=%%%@+ @- @..@. .@
@: *- *% *+ *+ .# :% *+ -% ** *+ %-
.%+ += +* -#+ *= .# +* -#= =:@ @:=
.*@@@@*. -%@@+..+ *@@* .= -%@@+..+ .=- -%.
###: .###
Version 0.1.1 .##### +####*
OSINT -+:=###= -####..+*
Social Media Tool -####. -+:. %###=
~AmaSus01~ .####% .#####: :####*
.=#%.-#######+ *#@-
-###########+
%############
.@####%@####:
"""
print(banner)
| 2.390625 | 2 |
angio2020/generate_poly_masks.py | dhaulagiri0/Mask_RCNN | 0 | 12769383 | <filename>angio2020/generate_poly_masks.py<gh_stars>0
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from pathlib import Path
import cv2
import numpy as np
import imageio
import os, json
from shutil import copyfile
def getPolyImage(points_dict, shape=(512, 512)):
points = []
for point in points_dict:
points.append([point['x'], point['y']])
points = np.array(points).reshape((-1,1,2))
blankImage = np.zeros(shape=shape)
cv2.fillPoly(blankImage, np.int32([points]), (255))
return blankImage
pathString = 'A:/poly_json/'
path = Path(pathString)
for jf in path.iterdir():
image_id = jf.name.split('.')[0]
keyframe_folder = 'A:/segmented_manual/' + image_id.split('_')[0] + '/' + image_id + '/'
with open(jf) as json_file:
data = json.load(json_file)
for region in data['regions']:
artery = region['tags'][0]
save_name = image_id + '_' + artery + '_segmented_threshold_binary.png'
points_dict = region['points']
img = getPolyImage(points_dict)
if not os.path.exists('A:/segmented_manual/' + image_id.split('_')[0] + '/'):
os.mkdir('A:/segmented_manual/' + image_id.split('_')[0] + '/')
if not os.path.exists(keyframe_folder):
os.mkdir(keyframe_folder)
imageio.imwrite(keyframe_folder + save_name, img)
imageio.imwrite(keyframe_folder + image_id + '_' + artery + '_bin_mask.png', img)
if os.path.exists('A:/test/png/' + image_id + '.png'):
copyfile('A:/test/png/' + image_id + '.png', keyframe_folder + image_id + '_original.png')
elif os.path.exists('A:/val/png/' + image_id + '.png'):
copyfile('A:/val/png/' + image_id + '.png', keyframe_folder + image_id + '_original.png')
| 2.28125 | 2 |
push_db_to_stage.py | vanpattenmedia/vpmframe-module-scripts | 0 | 12769384 | #!/usr/bin/env python
#
#
# Push the current database to stage
#
# This script pushes the content in the site database for the current
# stage to another stage, for example, development -> staging.
#
# It automatically handles the rewriting of key WordPress options siteurl,
# home and upload_path, as well as rewriting content URLs.
#
#
# Requires PyYAML for DB credentials import and Python-MySQL for string escaping
#
# Assumes Puppet, or deploy:setup has already created the DBs and populated the priv tables with the username/passwords
# specified in database.yml.
#
#
import yaml
import os
import sys
from sys import exit
import subprocess
from subprocess import Popen, PIPE, STDOUT
import _mysql
from pprint import pprint
import argparse
from pipes import quote
from datetime import datetime
import re
ips = {}
ssh_ports = {}
users = {}
passwords = {}
siteurls = {}
homes = {}
upload_paths = {}
upload_url_paths = {}
tbl_prefixes = {}
# argument parsing
parser = argparse.ArgumentParser(description='Command line arguments')
parser.add_argument('-d', '--database-config', default='config/database.yml', help='The path to the database.yml file. (Default: %(default)s)')
parser.add_argument('-p', '--project-config', default='config/project.yml', help='The path to the project.yml file. (Default: %(default)s)')
parser.add_argument('--ignore-upload-paths', action='store_true', help='Do not change the upload_path, upload_url_path, siteurl or home after the database is synced. (Default: %(default)s)')
parser.add_argument('--update-site-paths', action='store_true', help='Update the siteurl and home paths in the database, after it is synced. (Default %(default)s)')
parser.add_argument('-f', '--from', help='The stage from which to download the database.')
parser.add_argument('-t', '--to', help='The stage whose database should be replaced.', action='append')
parser.add_argument('--days', default=0, help='Transfer only the last n days of posts and related content. (Default: %(default)s, where 0 transfers all posts.)')
parser.add_argument('--source-prefix-override', default=None, help='Force the source database table prefix to the specified prefix. Useful with WP Multisite')
arguments = parser.parse_args()
db_config_path = arguments.database_config
proj_config_path = arguments.project_config
ignore_upload_paths = arguments.ignore_upload_paths
update_site_paths = arguments.update_site_paths
should_ignore_upload_paths = {}
source_stage = vars(arguments)["from"] # hack, because 'from' is reserved, so we can't access it via the Namespace
dest_stage = arguments.to
if not source_stage or not dest_stage or len(source_stage) < 1 or len(dest_stage) < 1:
parser.print_help()
exit(2)
try:
days = int(arguments.days)
except ValueError as e:
parser.print_help()
exit(2)
if days < 0:
print "--days must be greater than or equal to 0."
print
parser.print_help()
exit(2)
pid = os.getpid()
pid_str = str(pid) + '_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
# bring in WordPress and stage settings from project.yml
try:
proj_config_file = open(proj_config_path, 'r')
except IOError as e:
print "Could not open project configuration file from " + proj_config_path + "."
print "I/O Error({0}): {1}".format(e.errno, e.strerror)
print "Cannot continue."
exit(1)
try:
proj_config = yaml.safe_load(proj_config_file)
except yaml.YAMLError as e:
print "Unable to parse project configuration file."
print "YAMLError({0}): {1}".format(e.errno, e.strerror)
exit(1)
if not 'stage' in proj_config:
print "The YAML file did not seem to have a 'stage' section."
exit(1)
if not source_stage in proj_config['stage']:
print "The '" + source_stage + "' stage was not specified in the project configuration file's 'stage' section."
exit(1)
for this_dest in dest_stage:
if not this_dest in proj_config['stage']:
print "The '" + this_dest + "' stage was not specified in the project configuration file's 'stage' section."
exit(1)
all_stages = [source_stage] + dest_stage
for stage in all_stages:
# sanity check the stage in the YAML
if not 'ip' in proj_config['stage'][stage] or not 'ssh_port' in proj_config['stage'][stage] or not 'user' in proj_config['stage'][stage]:
print "The '" + stage + "' stage in the 'stage' section of the project configuration file does not have one or more of the required 'ip', 'ssh_port' or 'user' entries."
exit(1)
# load the YAML vars into our internal vars
ips[stage] = proj_config['stage'][stage]['ip']
ssh_ports[stage] = str(proj_config['stage'][stage]['ssh_port'])
users[stage] = proj_config['stage'][stage]['user']
# prepare some possible prefixes for the paths in case we want them
if stage == 'production':
url_prefix = 'www'
else:
url_prefix = stage
if stage == 'dev':
upload_url_path_prefix = 'uploads'
else:
upload_url_path_prefix = 'static'
upload_path_prefix = 'uploads'
if not ignore_upload_paths:
# check for presence of upload paths for this stage in the project.yml
if 'upload_path' in proj_config['stage'][stage] and 'upload_url_path' in proj_config['stage'][stage]:
if proj_config['stage'][stage]['upload_path'] is None or proj_config['stage'][stage]['upload_url_path'] is None or len(proj_config['stage'][stage]['upload_path']) == 0 or len(proj_config['stage'][stage]['upload_url_path']) == 0:
ignore_upload_paths = True
print "WARNING: The '" + stage + "' stage had an upload_path and upload_url_path, but at least one was blank."
print "The upload paths in the restored database on '" + stage + "' will be left alone."
print ""
should_ignore_upload_paths[stage] = True
else:
upload_paths[stage] = proj_config['stage'][stage]['upload_path']
upload_url_paths[stage] = proj_config['stage'][stage]['upload_url_path']
should_ignore_upload_paths[stage] = False
else:
# not set, so infer some WordPress-y type things from YAML and our sensible defaults
print "INFO: The '" + stage + "' stage did not specify an upload_path and upload_url_path. We will assume some sensible wpframe defaults."
upload_paths[stage] = '../../../../../' + upload_path_prefix + '.' + proj_config['domain'] + '/content/uploads'
upload_url_paths[stage] = 'http://' + upload_url_path_prefix + '.' + proj_config['domain'] + '/content/uploads'
else:
should_ignore_upload_paths[stage] = True
if update_site_paths:
homes[stage] = 'http://' + url_prefix + '.' + proj_config['domain'] + '/'
siteurls[stage] = 'http://' + url_prefix + '.' + proj_config['domain'] + '/wp'
# bring in database credentials from YAML
try:
db_config_file = open(db_config_path, 'r')
except IOError as e:
print "Could not open database configuration file from " + db_config_path + "."
print "I/O Error({0}): {1}".format(e.errno, e.strerror)
print "Cannot continue."
exit(1)
try:
db_config = yaml.safe_load(db_config_file)
except yaml.YAMLError as e:
print "Unable to parse database configuration file."
print "YAMLError({0}): {1}".format(e.errno, e.strerror)
exit(1)
# sanity checking of config YAML
if not source_stage in db_config:
print "The '" + source_stage + "' stage was not found in the database config YAML file."
exit(1)
for this_dest in dest_stage:
if not this_dest in db_config:
print "The '" + this_dest + "' stage was not found in the database config YAML file."
exit(1)
for stage in all_stages:
if not 'name' in db_config[stage] or not 'user' in db_config[stage] or not 'password' in db_config[stage] or not 'host' in db_config[stage] or not 'grant_to' in db_config[stage] or not 'tbl_prefix' in db_config[stage] or not 'host' in db_config[stage]:
print "The '" + stage + "' stage does not have all of the required YAML attributes in the config file."
print "Does it have the 'tbl_prefix' and 'host' in addition to the previously required attributes?"
exit(1)
tbl_prefixes[stage] = db_config[stage]['tbl_prefix']
if arguments.source_prefix_override is not None:
tbl_prefixes[source_stage] = arguments.source_prefix_override
print "INFO: Source table prefix is overriden to '" + arguments.source_prefix_override + "'. Ensure this includes a trailing underscore if appropriate!"
print
source_db_prefix = source_stage[0] + "_"
dest_db_prefix = dest_stage[0] + "_"
# get confirmation from the user
confirm = raw_input("Are you sure you want to download the '" + source_stage + "' database and push it to '" + ", ".join(dest_stage) + "'? (y/n): ")
if not confirm == 'y' and not confirm == 'Y':
print "Exiting as requesting."
exit(1)
# connect to source
source_db = _mysql.escape_string(db_config[source_stage]['name'])
source_user = _mysql.escape_string(db_config[source_stage]['user'])
source_pass = quote(db_config[source_stage]['password'])
source_host = _mysql.escape_string(db_config[source_stage]['host'])
# mysqldump the source
# simple method for days=0
if days == 0:
print "Running a simple mysqldump on the source (" + source_stage + ") database..."
sdump = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' > ~/push_db_to_stage_' + pid_str + '_src_tmp.sql'], universal_newlines=True)
sdump.communicate()
else:
# complex method for selective dumping
print "Running a complex dump for " + str(days) + "..."
print
print "Determining if this is a WPMU install..."
# before anything else, determine if this is WPMU and we should loop over prefixes
listtablescmd = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysql -Bh ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' -se "SHOW TABLES" ' + source_db + ' > ~/push_db_to_stage_' + pid_str + '_' + source_db + '_list.txt'], universal_newlines=True)
listtablescmd.communicate()
# pull down that list
listtablesdump = Popen(['scp', '-C', '-P', ssh_ports[source_stage], users[source_stage] + '@' + ips[source_stage] + ':~/push_db_to_stage_' + pid_str + '_' + source_db + '_list.txt', '.'], universal_newlines=True)
listtablesdump.communicate()
print
# determine if any prefix_n tables exist (e.g. wp_2_*, wp_3_*)
dumpable_prefixes = [ tbl_prefixes[source_stage] ]
lt = open('./push_db_to_stage_' + pid_str + '_' + source_db + '_list.txt', 'r')
for line in lt:
dumpable_prefix = line[: (len(tbl_prefixes[source_stage])+2)]
#print "this dumpable prefix is " + dumpable_prefix + " from " + line
if re.match(tbl_prefixes[source_stage] + r"([0-9]+)_", line) and dumpable_prefix not in dumpable_prefixes:
print "INFO: Will dump the prefix " + dumpable_prefix
dumpable_prefixes.append( dumpable_prefix )
lt.close()
# remove local temporary file...
os.remove('./push_db_to_stage_' + pid_str + '_' + source_db + '_list.txt')
# remove from source...
print "Removing the temporary file from the " + source_stage + " server..."
removesource = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'rm -fv -- ~/push_db_to_stage_' + pid_str + '_' + source_db + '_list.txt'], universal_newlines=True)
removesource.communicate()
print
# determine number of posts that will be pulled from each prefix
for this_prefix in dumpable_prefixes:
print
print "INFO: This dump of " + str(days) + " days of " + this_prefix + "* will have the following number of posts (includes revisions, drafts):"
postnumcmd = Popen(['ssh', '-qp', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysql -t -Bh ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' -e "SELECT COUNT(ID) AS posts_subset_count FROM ' + this_prefix + 'posts WHERE post_modified_gmt > (NOW() - INTERVAL ' + str(days) + ' DAY);" '], universal_newlines=True)
postnumcmd.communicate()
# get confirmation from the user
confirm = raw_input("Are you sure you want to replace the data on '" + ", ".join(dest_stage) + "' with this subset of posts? (y/n): ")
if not confirm == 'y' and not confirm == 'Y':
print "Exiting as requesting."
exit(1)
for this_prefix in dumpable_prefixes:
# selectively dump the wp_posts table
print "Dumping " + this_prefix + "posts..."
wpposts = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump --single-transaction -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' ' + this_prefix + 'posts --where="post_modified_gmt > ( NOW() - INTERVAL ' + str(days) + ' DAY)" > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_posts_src_tmp.sql'], universal_newlines=True)
wpposts.communicate()
# selectively dump wp_postmeta
print "Dumping " + this_prefix + "postmeta..."
wppostmeta = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump --single-transaction -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' ' + this_prefix + 'postmeta --where="post_id IN (SELECT ID FROM ' + this_prefix + 'posts WHERE post_modified_gmt > ( NOW() - INTERVAL ' + str(days) + ' DAY))" > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_postmeta_src_tmp.sql'], universal_newlines=True)
wppostmeta.communicate()
# selectively dump wp_comments
print "Dumping " + this_prefix + "comments..."
wpcomments = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump --single-transaction -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' ' + this_prefix + 'comments --where="comment_post_id IN (SELECT ID FROM ' + this_prefix + 'posts WHERE post_modified_gmt > ( NOW() - INTERVAL ' + str(days) + ' DAY))" > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_comments_src_tmp.sql'], universal_newlines=True)
wpcomments.communicate()
# selectively dump wp_commentmeta
print "Dumping " + this_prefix + "commentmeta..."
wpcommentmeta = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump --single-transaction -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' ' + this_prefix + 'commentmeta --where="comment_id IN (SELECT comment_post_ID FROM ' + this_prefix + 'comments AS wpc INNER JOIN ' + this_prefix + 'posts AS wpp ON wpp.ID = wpc.comment_post_ID WHERE wpp.post_modified_gmt > ( NOW() - INTERVAL ' + str(days) + ' DAY))" > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_commentmeta_src_tmp.sql'], universal_newlines=True)
wpcommentmeta.communicate()
# selectively dump wp_term_relationships
print "Dumping " + this_prefix + "term_relationships..."
wpterm_relationships = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump --single-transaction -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' ' + this_prefix + 'term_relationships --where="object_id IN (SELECT ID FROM ' + this_prefix + 'posts AS wpp WHERE wpp.post_modified_gmt > ( NOW() - INTERVAL ' + str(days) + ' DAY))" > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_term_relationships_src_tmp.sql'], universal_newlines=True)
wpterm_relationships.communicate()
print
print "INFO: It is safe to ignore warnings about " + this_prefix + "cfs_values being missing if CFS is not installed in this site."
print "Dumping " + this_prefix + "cfs_values..."
# selectively dump wp_cfs_values
wpcfs_values = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump --single-transaction -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' ' + this_prefix + 'cfs_values --where="post_id IN (SELECT ID FROM ' + this_prefix + 'posts AS wpp WHERE wpp.post_modified_gmt > ( NOW() - INTERVAL ' + str(days) + ' DAY))" > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_cfs_values_src_tmp.sql'], universal_newlines=True)
wpcfs_values.communicate()
# dump other tables
# tables dump subquery for listing tables to dump -- we exclude the separate ones we have done
ignore_tables = [ 'posts', 'postmeta', 'comments', 'commentmeta', 'term_relationships', 'cfs_values' ]
ignore_tables_formatted = ''
for table in ignore_tables:
ignore_tables_formatted += '\'' + this_prefix + table + '\','
ignore_tables_formatted = ignore_tables_formatted[:-1] # cut off last comma
# our tables dump subquery must exclude any tables that match another prefix (wp_* matches the wp_n_* 'other' tables, which is not desired)
if tbl_prefixes[source_stage] == this_prefix:
other_prefixes = list(dumpable_prefixes)
# remove this prefix from other_prefixes list
if this_prefix in other_prefixes:
other_prefixes.remove(this_prefix)
other_prefixes_formatted = ' AND TABLE_NAME NOT LIKE '
for other_prefix in other_prefixes:
other_prefixes_formatted += '\'' + other_prefix + '%\' AND TABLE_NAME NOT LIKE '
other_prefixes_formatted = other_prefixes_formatted[:-25] # cut off last 'AND' to finish statement
else:
other_prefixes_formatted = ''
# this subquery selects which tables specifically for mysqldump to dump below
tables_subquery = 'mysql -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' -Bse "SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_SCHEMA=\'' + source_db + '\' AND TABLE_NAME LIKE \'' + this_prefix + '%\' AND TABLE_NAME NOT IN (' + ignore_tables_formatted + ')' + other_prefixes_formatted + '"'
#print tables_subquery
# actually do the 'other' tables dump
print "Dumping " + this_prefix + "'s other tables..."
sdump = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'mysqldump -h ' + source_host + ' -u ' + source_user + ' -p' + source_pass + ' ' + source_db + ' --tables $(' + tables_subquery + ') > ~/push_db_to_stage_' + pid_str + '_' + this_prefix + '_other_tmp.sql'], universal_newlines=True)
sdump.communicate()
print "Completed processing " + this_prefix
print "--------------------------"
print
print "Merging dumps..."
# merge dumps
mergedumps = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'cat ~/push_db_to_stage_' + pid_str + '*.sql > ~/push_db_to_stage_' + pid_str + '_src_tmp.sql'], universal_newlines=True)
mergedumps.communicate()
print "Done."
print
# download the file
print "Downloading the dump..."
sdump = Popen(['scp', '-C', '-P', ssh_ports[source_stage], users[source_stage] + '@' + ips[source_stage] + ':~/push_db_to_stage_' + pid_str + '_src_tmp.sql', '.'], universal_newlines=True)
sdump.communicate()
print "Done."
print
# remove from source
print "Removing the temporary file from the " + source_stage + " server..."
sdump = Popen(['ssh', '-p', ssh_ports[source_stage], '-l', users[source_stage], ips[source_stage], 'rm -fv -- ~/push_db_to_stage_' + pid_str + '*.sql'], universal_newlines=True)
sdump.communicate()
print "Done."
print
for this_dest in dest_stage:
# get confirmation from the user
confirm = raw_input("This is your final stop before the push of the database to '" + this_dest + "'. Do you want to go ahead? (y/n): ")
if not confirm == 'y' and not confirm == 'Y':
print "Exiting as requesting."
exit(1)
print "Uploading the source (" + source_stage + ") database dump to " + this_dest + "..."
print
ddump = Popen(['scp', '-C', '-P', ssh_ports[this_dest], './push_db_to_stage_' + pid_str + '_src_tmp.sql', users[this_dest] + '@' + ips[this_dest] + ':~/push_db_to_stage_' + pid_str + '_dest_tmp.sql'], universal_newlines=True)
ddump.communicate()
print "Done."
print
print "Applying the (" + source_stage + ") database dump to the database on " + this_dest + "..."
print
# execute against the destination
dest_db = _mysql.escape_string(db_config[this_dest]['name'])
dest_user = _mysql.escape_string(db_config[this_dest]['user'])
dest_pass = quote(db_config[this_dest]['password'])
dest_host = _mysql.escape_string(db_config[this_dest]['host'])
dexec = Popen(['ssh', '-p', ssh_ports[this_dest], '-l', users[this_dest], ips[this_dest], 'mysql -h ' + dest_host + ' -u ' + dest_user + ' -p' + dest_pass + ' ' + dest_db + ' < ~/push_db_to_stage_' + pid_str + '_dest_tmp.sql'], universal_newlines=True)
dexec.communicate()
print "Done."
print
# remove from source
print "Removing the temporary file from the " + this_dest + " server..."
ddump = Popen(['ssh', '-p', ssh_ports[this_dest], '-l', users[this_dest], ips[this_dest], 'rm -fv -- ~/push_db_to_stage_' + pid_str + '_dest_tmp.sql'], universal_newlines=True)
ddump.communicate()
print "Done."
print
# now, change the variables that need changing
# get the variables ready
sql = ""
old_tblprefix = _mysql.escape_string(tbl_prefixes[source_stage])
new_tblprefix = _mysql.escape_string(tbl_prefixes[this_dest])
if not should_ignore_upload_paths[this_dest]:
old_upload_url_path = _mysql.escape_string(upload_url_paths[source_stage])
new_upload_url_path = _mysql.escape_string(upload_url_paths[this_dest])
old_upload_path = _mysql.escape_string(upload_paths[source_stage])
new_upload_path = _mysql.escape_string(upload_paths[this_dest])
sql = sql + "UPDATE `" + new_tblprefix + "posts` SET post_content = REPLACE(post_content, '" + old_upload_url_path + "', '" + new_upload_url_path + "');\n\
UPDATE `" + new_tblprefix + "options` SET option_value = '" + new_upload_path + "' WHERE option_name = 'upload_path';\n\
UPDATE `" + new_tblprefix + "options` SET option_value = '" + new_upload_url_path + "' WHERE option_name = 'upload_url_path';\n"
if update_site_paths:
old_siteurl = _mysql.escape_string(siteurls[source_stage])
new_siteurl = _mysql.escape_string(siteurls[this_dest])
old_home = _mysql.escape_string(homes[source_stage])
new_home = _mysql.escape_string(homes[this_dest])
# sub into the MySQL commands
sql = sql + "UPDATE `" + new_tblprefix + "options` SET option_value = '" + new_siteurl + "' WHERE option_name = 'siteurl';\n\
UPDATE `" + new_tblprefix + "options` SET option_value = '" + new_home + "' WHERE option_name = 'home';\n\
"
if len(sql) > 0:
# write this to a file, which we will then upload and execute against the destination database
sql_file = open('./push_db_to_stage_' + pid_str + '_update_tmp.sql', 'w')
sql_file.write(sql)
sql_file.close()
# upload the SQL commands for update
print "Uploading the SQL commands for updating static content URLs..."
sqdump = Popen(['scp', '-C', '-P', ssh_ports[this_dest], './push_db_to_stage_' + pid_str + '_update_tmp.sql', users[this_dest] + '@' + ips[this_dest] + ':~/push_db_to_stage_' + pid_str + '_update_tmp.sql'], universal_newlines=True)
sqdump.communicate()
print "Done."
print
# execute those commands
print "Executing the update..."
uexec = Popen(['ssh', '-p', ssh_ports[this_dest], '-l', users[this_dest], ips[this_dest], 'mysql -h ' + dest_host + ' -u ' + dest_user + ' -p' + dest_pass + ' ' + dest_db + ' < ~/push_db_to_stage_' + pid_str + '_update_tmp.sql'], universal_newlines=True)
update_result = uexec.communicate()
print "Done."
print
# remove from source
print "Removing the temporary file from the " + this_dest + " server..."
udump = Popen(['ssh', '-p', ssh_ports[this_dest], '-l', users[this_dest], ips[this_dest], 'rm -fv -- ~/push_db_to_stage_' + pid_str + '_update_tmp.sql'], universal_newlines=True)
udump.communicate()
print "Done."
print
# remove local temporary files
print "Removing local temporary files..."
if len(sql) > 0:
os.remove('./push_db_to_stage_' + pid_str + '_update_tmp.sql')
os.remove('./push_db_to_stage_' + pid_str + '_src_tmp.sql')
print
print "All done!"
| 2.203125 | 2 |
scripts/un/energy/process_test.py | padma-g/data | 0 | 12769385 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test UN energy data processing
"""
from un.energy import process
import filecmp
import os
import sys
import unittest
from absl import app
# Allows the following module imports to work when running as a script
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
# module_dir_ is the path to where this test is running from.
module_dir_ = os.path.dirname(__file__)
class TestUNEnergyProcess(unittest.TestCase):
def test_un_energy_process(self):
"""Test the process() function for UN energy data set.
Generates output files for the test_data input and compares it to the
expected output files.
"""
data_input = os.path.join(module_dir_, 'test_data/un_energy_input.csv')
# create a tmp output directory
tmp_dir = os.path.join(module_dir_, 'tmp')
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
test_output = os.path.join(tmp_dir, 'un_energy_test_output')
expected_output = os.path.join(module_dir_,
'test_data/un_energy_output')
print(f'test file path: {data_input}, output: {test_output}')
test_counters = process.process([data_input], test_output, 10000)
self.assertTrue(test_counters['input_files'] > 0)
self.assertTrue(test_counters['inputs_processed'] > 0)
self.assertTrue(test_counters['output_csv_rows'] > 0)
self.assertTrue(test_counters['output_stat_vars'] > 0)
# Verify there are no error counters
errors = 0
for c in test_counters:
if 'error' in c:
errors += test_counters[c]
self.assertEqual(errors, 0)
# Compare file outputs
for output in ['.csv', '.mcf', '.tmcf']:
self.assertTrue(
filecmp.cmp(test_output + output, expected_output + output))
if __name__ == '__main__':
app.run()
unittest.main()
| 2.296875 | 2 |
Visitor_behaviour_sequence.py | daiweiLin/unity_simulator | 0 | 12769386 | <reponame>daiweiLin/unity_simulator<gh_stars>0
"""
This is the class of visitor which are attracted to a sequence of actions
Attract sequence: Low-High, High-Low
"""
import numpy as np
from collections import deque
from itertools import groupby
class Visitor_behaviour_sequence:
def __init__(self, num_visitors, epsilon=0):
self.num_visitors = num_visitors
self.epsilon = epsilon
# self.visitor_stay_time = stay_time # seconds
self.node_number = None
self.dist_matrix = None
self.buffer_size = 50
self.seq_interval_length = 12
self.observation_buffer = deque(maxlen=self.buffer_size)
# self.visitor_start_ts = np.zeros(self.num_visitors, dtype=np.float64)
self.visitor_prev_dest = [None]*self.num_visitors
self.visitor_arrived_at_node = [False]*self.num_visitors
def setup(self, coordinates):
self.node_number = int(len(coordinates) / 2)
self.dist_matrix = self._cal_node_distance(coordinates, normalize=True)
@staticmethod
def _cal_node_distance(coordinates, normalize=True):
num_nodes = int(len(coordinates)/2)
dis_matrix = np.zeros((num_nodes, num_nodes), dtype=np.float64)
for i in range(num_nodes):
start_point = np.array([coordinates[i*2], coordinates[i*2+1]])
for j in range(i, num_nodes):
end_point = np.array([coordinates[j*2], coordinates[j*2+1]])
dis_matrix[i, j] = np.linalg.norm(end_point-start_point)
dis_matrix[j, i] = dis_matrix[i, j] # because of symmetry
if normalize:
# normalize distance so that the minimal distance is 1.
# This is to ensure center nodes are more important than surrounding nodes
dis_sort = np.sort(dis_matrix.flatten())
min_dis = 0.0
for d in dis_sort:
if d != 0:
min_dis = d
break
assert min_dis != 0.0, "Minimum distance between nodes is 0.0. Double check distance matrix."
if min_dis < 1:
dis_matrix = dis_matrix / min_dis
# Make diagonal element equal to 1. This is to avoid numerical error in _find_hot_spot()
for i in range(num_nodes):
dis_matrix[i,i] = 1
# print(dis_matrix)
return dis_matrix
@staticmethod
def _cal_node_visitor_distance(v_coordinates, n_coordinates):
distance = np.zeros((int(len(n_coordinates)/2),))
for idx in range(len(distance)):
d = np.linalg.norm(np.array(v_coordinates) - np.array(n_coordinates[idx*2:idx*2+2]))
# Avoid numerical error
if d == 0.0:
d = 1e-6
distance[idx] = d
return distance
def _find_sequence(self, observation):
seperation = observation > 0.5
g = groupby(seperation)
interval = []
for k,v in g:
interval.append(len(list(v)))
sequence_cnt = 0
for i in range(len(interval)-1):
if interval[i] > self.seq_interval_length and interval[i+1] > self.seq_interval_length:
sequence_cnt += 1
return sequence_cnt
def _find_hot_spot(self, v_coordinates, n_coordinates, timeout=False, prev_dest=None):
"""
Find the spot with most activities in the area
The score is calculated using the sum of inverse of distances times observation values. Then it is divided by
the distance between visitor and nodes.
:return: index of selected spot if exists, return None o.w.
"""
observation = np.array(self.observation_buffer)
sequence_count = np.zeros(self.node_number)
for i in range(self.node_number):
# print("observation = {}".format(observation[:,i]))
sequence_count[i] = self._find_sequence(observation[:,i])
# print(sequence_count)
heat = np.zeros(self.node_number)
for i in range(self.node_number):
distance = self.dist_matrix[i, :]
#distance[i] = 1 # change this to avoid numerical error (moved into _cal_node_distance())
heat[i] = np.sum(sequence_count * np.reciprocal(distance))
'''
Only consider those nodes with sequence detected.
Then divide Heat by the distance between visitor and each node to select the nearest interesting spot.
'''
heat = heat * (sequence_count > 0)
#print("filtered heat=\n{}".format(heat))
v_n_distance = self._cal_node_visitor_distance(v_coordinates, n_coordinates)
heat = heat / v_n_distance
#==============================#
# Select spot w/ highest score #
#==============================#
# At least one hot spot
if np.sum(heat > 0) > 0:
if not timeout:
return heat.argmax()
else:
#assert prev_dest is not None, "Must provide previous destination if visitor times out."
sorted_dest = np.argsort(heat)
for i in range(self.node_number - 1, -1, -1):
if prev_dest != sorted_dest[i]:
return sorted_dest[i]
# No hot spot
else:
return None
def step(self, observation):
"""
NOTICE: is_timeout_x ONLY exist in multi-visitor case
:param observation: [LED1, LED2, ... LEDn,
x1, y1, x2, y2, ... xn, yn,
is_arrived_1, is_timeout_1, is_arrived_2, is_timeout_2,... is_arrived_m,is_timeout_m,
v_x1, v_y1, ...v_xm, v_ym
ingame_time]
============================================================================================
LEDn : LED intensity
xn, yn : LED's (x,y) coordinates,
is_arrived_m, is_timeout_m: whether visitor arrives at destination and whether visitor time
out.
v_xm, v_ym : visitor current coordinates
============================================================================================
3*n + 4*m + 1 elements in total.
n = number of nodes;
m = number of visitors;
:return: [visitor1.x, visitor1.y, visitor2.x, visitor2.y, ... visitorN.x, visitorN.y]
"""
visitor_actions = list()
obs = observation[0 : self.node_number]
self.observation_buffer.append(obs)
# print("visitor observation :{}".format(obs))
node_positions = observation[self.node_number:self.node_number*3]
is_arrv = observation[self.node_number*3:self.node_number*3 + self.num_visitors*2][::2]
is_timeout = observation[self.node_number*3:self.node_number*3 + self.num_visitors*2][1::2]
# if np.sum(is_timeout) > 0:
# print("Timeout:{}".format(is_timeout))
visitor_coords = observation[self.node_number*3 + self.num_visitors*2:self.node_number*3 + self.num_visitors*4]
for v in range(self.num_visitors):
dest = None
if is_timeout[v] == 1:
# Time out: the visitor has spent too much time trying to get to destination
dest = self._find_hot_spot(v_coordinates=visitor_coords[v*2:v*2+2], n_coordinates=node_positions,
timeout=True, prev_dest=self.visitor_prev_dest[v])
self.visitor_prev_dest[v] = dest
elif is_arrv[v]:
prev_dest = self.visitor_prev_dest[v]
if np.random.random_sample() < self.epsilon:
dest = None
self.visitor_prev_dest[v] = None
else:
if prev_dest is None:
# Visitor just arrived at a random position, so he/she moves to next location
dest = self._find_hot_spot(v_coordinates=visitor_coords[v*2:v*2+2],
n_coordinates=node_positions)
self.visitor_prev_dest[v] = dest
else:
# Visitor just arrived at a node, so he/she wants to stay as long as the light is ON
# When light is turned OFF, the visitor select a new destination
if obs[prev_dest] <= 0:
dest = self._find_hot_spot(v_coordinates=visitor_coords[v*2:v*2+2],
n_coordinates=node_positions)
self.visitor_prev_dest[v] = dest
else:
dest = self.visitor_prev_dest[v]
else:
dest = self.visitor_prev_dest[v]
# Convert dest(int) into coordinates
if dest is not None:
# Found a destination
# print("Destination = Node{}".format(dest))
visitor_actions.append(node_positions[2 * dest])
visitor_actions.append(node_positions[2 * dest + 1])
else:
# Random select a position in space
random_dest = np.random.uniform(low=-1, high=1, size=2) * np.array([10.0, 6.5])
visitor_actions = visitor_actions + random_dest.tolist()
# print("Destination = Random {}".format(random_dest))
return visitor_actions
if __name__ == "__main__":
# This section is for testing Visitor_behaviour Class
visitor_bh = Visitor_behaviour_sequence(num_visitors=1)
visitor_bh.setup(coordinates=[0, 0, 0, 1.5, 1.5, 0, 1.5, 1.5])
print("\ndistance matrix:")
print(visitor_bh.dist_matrix)
print("\nDistance between visitor and lights:")
print(visitor_bh._cal_node_visitor_distance(v_coordinates=[0, 0], n_coordinates=[0, 0, 0, 1.5, 1.5, 0, 1.5, 1.5]))
for i in range(15):
visitor_bh.observation_buffer.append([0, 0, 1, 1])
for i in range(15):
visitor_bh.observation_buffer.append([1, 1, 1, 1])
for i in range(15):
visitor_bh.observation_buffer.append([1, 0, 0, 0])
print("\nHot spot:")
print(visitor_bh._find_hot_spot(v_coordinates=[0.75, 0.75], n_coordinates=[0, 0, 0, 1.5, 1.5, 0, 1.5, 1.5]))
| 2.96875 | 3 |
src/ui/ui_cfg_flexspinand.py | paotien833/NXP-MCUBootUtility | 0 | 12769387 | #! /usr/bin/env python
import wx
import sys
import os
import uivar
import uidef
sys.path.append(os.path.abspath(".."))
from win import bootDeviceWin_FlexspiNand
class secBootUiFlexspiNand(bootDeviceWin_FlexspiNand.bootDeviceWin_FlexspiNand):
def __init__(self, parent):
bootDeviceWin_FlexspiNand.bootDeviceWin_FlexspiNand.__init__(self, parent)
flexspiNandOpt, flexspiNandFcbOpt, flexspiNandImageInfo, flexspiNandKeyBlob = uivar.getBootDeviceConfiguration(uidef.kBootDevice_FlexspiNand)
self.flexspiNandOpt = flexspiNandOpt
self.flexspiNandFcbOpt = flexspiNandFcbOpt
self.flexspiNandImageInfo = flexspiNandImageInfo
self.flexspiNandKeyBlob = flexspiNandKeyBlob
def _getFrequence( self ):
txt = self.m_choice_Max_Freq.GetString(self.m_choice_Max_Freq.GetSelection())
if txt == '30MHz':
val = 0x1
elif txt == '50MHz':
val = 0x2
elif txt == '60MHz':
val = 0x3
elif txt == '75MHz':
val = 0x4
elif txt == '80MHz':
val = 0x5
elif txt == '100MHz':
val = 0x6
else:
pass
self.flexspiNandOpt = (self.flexspiNandOpt & 0xFFFFFFF0) | (val << 0)
def _getPageSize( self ):
txt = self.m_choice_Page_Size.GetString(self.m_choice_Page_Size.GetSelection())
if txt == '2KB':
val = 0x2
elif txt == '4KB':
val = 0x4
else:
pass
self.flexspiNandOpt = (self.flexspiNandOpt & 0xFFFFFF0F) | (val << 4)
def _getPagePerBlock( self ):
txt = self.m_choice_Pages.GetString(self.m_choice_Pages.GetSelection())
if txt == '64':
val = 0x0
elif txt == '128':
val = 0x1
elif txt == '256':
val = 0x2
elif txt == '32':
val = 0x3
else:
pass
self.flexspiNandOpt = (self.flexspiNandOpt & 0xFFFFF0FF) | (val << 8)
def _getFlashSize( self ):
txt = self.m_choice_Flash_size.GetString(self.m_choice_Flash_size.GetSelection())
if txt == '512M':
val = 0x0
elif txt == '1GB':
val = 0x1
elif txt == '2GB':
val = 0x2
elif txt == '4GB':
val = 0x4
else:
pass
self.flexspiNandOpt = (self.flexspiNandOpt & 0xFFF0FFFF) | (val << 16)
def _getMultiplane( self ):
txt = self.m_choice_planes.GetString(self.m_choice_planes.GetSelection())
if txt == '1 plane':
val = 0x0
elif txt == '2 planes':
val = 0x1
else:
pass
self.flexspiNandOpt = (self.flexspiNandOpt & 0xFFFF0FFF) | (val << 12)
def _getOptionSize( self ):
txt = self.m_choice_Option_size.GetString(self.m_choice_Option_size.GetSelection())
if txt == '0':
val = 0x0
elif txt == '1':
val = 0x1
elif txt == '2':
val = 0x2
elif txt == '3':
val = 0x3
elif txt == '4':
val = 0x4
elif txt == '5':
val = 0x5
elif txt == '6':
val = 0x6
elif txt == '7':
val = 0x7
elif txt == '8':
val = 0x8
elif txt == '9':
val = 0x9
elif txt == '10':
val = 0xA
elif txt == '11':
val = 0xB
elif txt == '12':
val = 0xC
elif txt == '13':
val = 0xD
elif txt == '14':
val = 0xE
elif txt == '15':
val = 0xF
else:
pass
self.flexspiNandOpt = (self.flexspiNandOpt & 0xF0FFFFFF) | (val << 24)
def _getFCBSize( self ):
txt = self.m_choice_Size.GetString(self.m_choice_Size.GetSelection())
if txt == '3':
val = 0x3
elif txt == '4':
val = 0x4
elif txt == '5':
val = 0x5
elif txt == '6':
val = 0x6
elif txt == '7':
val = 0x7
elif txt == '8':
val = 0x8
elif txt == '9':
val = 0x9
elif txt == '10':
val = 0x10
else:
pass
self.flexspiNandFcbOpt = (self.flexspiNandFcbOpt & 0xFFFFFFF0) | (val << 0)
def _getAddressType( self ):
txt = self.m_choice_address_type.GetString(self.m_choice_address_type.GetSelection())
if txt == 'byte address':
val = 0x0
elif txt == 'block address':
val = 0x1
else:
pass
self.flexspiNandFcbOpt = (self.flexspiNandFcbOpt & 0xFFFFF0FF) | (val << 8)
def _getSearchStride( self ):
txt = self.m_choice_search_stride.GetString(self.m_choice_search_stride.GetSelection())
if txt == '64 pages':
val = 0x0
elif txt == '128 pages':
val = 0x1
elif txt == '256 pages':
val = 0x2
elif txt == '32 pages':
val = 0x3
else:
pass
self.flexspiNandFcbOpt = (self.flexspiNandFcbOpt & 0xFF0FFFFF) | (val << 20)
def _getSearchCount( self ):
txt = self.m_choice_search_count.GetString(self.m_choice_search_count.GetSelection())
if txt == '1':
val = 0x1
elif txt == '2':
val = 0x2
elif txt == '3':
val = 0x3
elif txt == '4':
val = 0x4
else:
pass
self.flexspiNandFcbOpt = (self.flexspiNandFcbOpt & 0xF0FFFFFF) | (val << 24)
################################# may be exist problem Need to be confirmed#################################
def _getBlockCountandID( self ):
val_block_count = int(self.m_textCtrl_block_count.GetLineText(0))
val_block_id = int(self.m_textCtrl_block_id.GetLineText(0))
if val_block_id > val_block_count:
wx.MessageBox('Block ID Error', 'Confirm', wx.OK)
if val_block_count > 8:
wx.MessageBox('Max Block Number Error', 'Confirm', wx.OK)
self.flexspiNandImageInfo = (self.flexspiNandImageInfo & 0xFFFF0000) | (val_block_id << 0)
self.flexspiNandImageInfo = (self.flexspiNandImageInfo & 0x0000FFFF) | (val_block_count << 16)
################################# may be exist problem Need to be confirmed#################################
def _getImageIndex( self ):
txt = self.m_choice_image_index.GetString(self.m_choice_image_index.GetSelection())
if txt == '0':
val = 0x0
elif txt == '1':
val = 0x1
elif txt == '2':
val = 0x2
elif txt == '3':
val = 0x3
elif txt == '4':
val = 0x4
elif txt == '5':
val = 0x5
elif txt == '6':
val = 0x6
elif txt == '7':
val = 0x7
elif txt == '8':
val = 0x8
elif txt == '9':
val = 0x9
elif txt == '10':
val = 0xA
elif txt == '11':
val = 0xB
elif txt == '12':
val = 0xC
elif txt == '13':
val = 0xD
elif txt == '14':
val = 0xE
elif txt == '15':
val = 0xF
else:
pass
if (self.flexspiNandKeyBlob & 0x0F000000) == 0x01000000:
self.flexspiNandKeyBlob = (self.flexspiNandKeyBlob & 0xFFFFFFF0) | (val << 0)
def _getDekSize( self ):
txt = self.m_choice_dek_size.GetString(self.m_choice_dek_size.GetSelection())
if txt == '128bits':
val = 0x0
else:
pass
if (self.flexspiNandKeyBlob & 0x0F000000) == 0x00000000:
self.flexspiNandKeyBlob = (self.flexspiNandKeyBlob & 0xFFFFFF0F) | (val << 4)
def _getKeyBlobInfoSize( self ):
txt = self.m_choice_keyblob_infosize.GetString(self.m_choice_keyblob_infosize.GetSelection())
if txt == '0':
val = 0x0
elif txt == '1':
val = 0x1
elif txt == '2':
val = 0x2
elif txt == '3':
val = 0x3
elif txt == '4':
val = 0x4
elif txt == '5':
val = 0x5
elif txt == '6':
val = 0x6
elif txt == '7':
val = 0x7
elif txt == '8':
val = 0x8
elif txt == '9':
val = 0x9
elif txt == '10':
val = 0xA
elif txt == '11':
val = 0xB
elif txt == '12':
val = 0xC
elif txt == '13':
val = 0xD
elif txt == '14':
val = 0xE
elif txt == '15':
val = 0xF
else:
pass
if (self.flexspiNandKeyBlob & 0x0F000000) == 0x00000000:
if txt != '3':
wx.MessageBox('keyblob_info size must equal to 3 if Type = Update', 'Confirm', wx.OK )
else:
self.flexspiNandKeyBlob = (self.flexspiNandKeyBlob & 0xFF0FFFFF) | (val << 20)
def _getType( self ):
txt = self.m_choice_type.GetString(self.m_choice_type.GetSelection())
if txt == 'Update':
val = 0x0
elif txt == 'Program':
val = 0x1
else:
pass
self.flexspiNandKeyBlob = (self.flexspiNandKeyBlob & 0xF0FFFFFF) | (val << 24)
def cancel_of_FLEXSPI_NAND(self, event):
self.Show(False)
def apply_of_FLEXSPI_NAND(self, event):
self._getFrequence()
self._getPageSize()
self._getPageSize()
self._getPagePerBlock()
self._getFlashSize()
self._getMultiplane()
self._getOptionSize()
self._getFCBSize()
self._getAddressType()
self._getSearchStride()
self._getSearchCount()
self._getBlockCountandID()
self._getType()
self._getImageIndex()
self._getDekSize()
self._getKeyBlobInfoSize()
uivar.setBootDeviceConfiguration(uidef.kBootDevice_FlexspiNand, self.flexspiNandOpt, self.flexspiNandFcbOpt, self.flexspiNandImageInfo, self.flexspiNandKeyBlob)
self.Show(False)
def OnClose_FLEXSPI_NAND(self, event):
ret = wx.MessageBox('Do you really want to leave?', 'Confirm', wx.OK | wx.CANCEL)
if ret == wx.OK:
self.Show(False) | 2.140625 | 2 |
src/ml_tooling/storage/base.py | thomasfrederikhoeck/ml_tooling | 7 | 12769388 | <filename>src/ml_tooling/storage/base.py
import pathlib
from abc import ABCMeta, abstractmethod
from typing import List
from ml_tooling.utils import Pathlike, Estimator
class Storage(metaclass=ABCMeta):
"""
Base class for Storage classes
"""
@abstractmethod
def load(self, file_path: Pathlike) -> Estimator:
"""
Abstract method to be implemented by the user.
Defines method used to load data from the storage type
Returns
-------
Estimator
Returns the unpickled object
"""
raise NotImplementedError
@abstractmethod
def save(
self, estimator: Estimator, file_path: Pathlike, prod: bool = False
) -> Pathlike:
"""
Abstract method to be implemented by the user.
Defines method used to save data from the storage type
Returns
-------
Pathlike
Path to where the pickled object is saved
"""
raise NotImplementedError
@abstractmethod
def get_list(self) -> List[pathlib.Path]:
"""
Abstract method to be implemented by the user.
Defines method used to show which objects have been saved
Returns
-------
List[Path]
Paths to each of the estimators sorted lexically
"""
raise NotImplementedError
| 2.984375 | 3 |
637_average_of_levels_in_binary_tree.py | Sanster/LeetCode | 2 | 12769389 | from utils import TreeNode
class Solution:
def averageOfLevels(self, root: TreeNode):
"""
BFS 广度优先搜索
"""
out = []
level = [root] if root else []
while len(level) != 0:
queue = []
# 把一层的所有 node 和 leaf 加到 queue 里
# 如果是最后一层,则 queue 为空
level_sum = 0
level_count = len(level)
for el in level:
level_sum += el.val
if el.left:
queue.append(el.left)
if el.right:
queue.append(el.right)
out.append(level_sum / level_count)
level = queue
return out
| 3.46875 | 3 |
public_21CMvFAST_MC/Programs/CosmoHammer_21CMMC/sampler/MpiCosmoHammerSampler.py | NNSSA/21cmvFAST | 5 | 12769390 | from .CosmoHammerSampler import CosmoHammerSampler
from collections import namedtuple
from cosmoHammer.util.SampleFileUtil import SampleFileUtil
from mpi4py import MPI
import emcee
import itertools
class MpiCosmoHammerSampler(CosmoHammerSampler):
"""
A sampler implementation extending the regular sampler in order to allow for distributing
the computation with MPI.
:param kwargs:
key word arguments passed to the CosmoHammerSampler
"""
def __init__(self, **kwargs):
"""
CosmoHammer sampler implementation
"""
self._rank = MPI.COMM_WORLD.Get_rank()
super(MpiCosmoHammerSampler, self).__init__(**kwargs)
self.M = self._getMapFunction()
def _getMapFunction(self):
"""
Returns the build in map function
"""
return map
def createSampleFileUtil(self):
"""
Returns a new instance of a File Util
"""
return SampleFileUtil(self.filePrefix, self.isMaster(), reuseBurnin=self.reuseBurnin)
def sampleBurnin(self, p0):
"""
Starts the sampling process. The master node (mpi rank = 0) persists the result to the disk
"""
p0 = self.mpiBCast(p0)
self.log("MPI Process rank "+ str(self._rank)+" starts sampling")
return super(MpiCosmoHammerSampler, self).sampleBurnin(p0);
def sample(self, burninPos, burninProb, burninRstate, datas):
"""
Starts the sampling process. The master node (mpi rank = 0) persists the result to the disk
"""
burninPos = self.mpiBCast(burninPos)
burninProb = self.mpiBCast(burninProb)
burninRstate = self.mpiBCast(burninRstate)
self.log("MPI Process rank "+ str(self._rank)+" starts sampling")
super(MpiCosmoHammerSampler, self).sample(burninPos, burninProb, burninRstate, datas);
def loadBurnin(self):
"""
loads the burn in form the file system
"""
if(self.isMaster()):
pos, prob, rstate = super(MpiCosmoHammerSampler, self).loadBurnin()
else:
pos, prob, rstate = []
pos = self.mpiBCast(pos)
prob = self.mpiBCast(prob)
rstate = self.mpiBCast(rstate)
self.log("loading done")
return pos, prob, rstate
def createEmceeSampler(self, callable):
"""
Factory method to create the emcee sampler
"""
self.log("Using emcee "+str(emcee.__version__))
#create a tuple to emulate to pool's map function using our self.mpiParallelizedMap
pool = namedtuple('pool',['map'])(self.mpiParallelizedMap)
return emcee.EnsembleSampler(self.nwalkers, self.paramCount, callable,
threads=self.threadCount, pool=pool)
def createInitPos(self):
"""
Factory method to create initial positions
"""
#bcast the positions to ensure that all mpi nodes start at the same position
return self.mpiBCast(super(MpiCosmoHammerSampler, self).createInitPos())
#MPI sync routines
def mpiBCast(self, value):
"""
Mpi bcasts the value and Returns the value from the master (rank = 0).
"""
return MPI.COMM_WORLD.bcast(value)
def mpiParallelizedMap(self, function,list):
"""
Emulates a pool map function using Mpi.
Retrieves the number of mpi processes and splits the list of walker position
in order to allow each process its block
"""
(rank,size) = (MPI.COMM_WORLD.Get_rank(),MPI.COMM_WORLD.Get_size())
#sync
list = self.mpiBCast(list)
#split, process and merge the list
return self.mergeList(MPI.COMM_WORLD.allgather(self.M(function, self.splitList(list,size)[rank])))
def splitList(self, list, n):
"""
Splits the list into block of eqals sizes (listlength/n)
"""
blockLen = len(list) / float(n)
return [list[int(round(blockLen * i)): int(round(blockLen * (i + 1)))] for i in range(n)]
def mergeList(self, lists):
"""
Merges the lists into one single list
"""
return list(itertools.chain(*lists))
def isMaster(self):
"""
Returns true if the rank is 0
"""
return (self._rank==0)
| 2.265625 | 2 |
objects/multigraph.py | cklb/jsongraph.py | 10 | 12769391 | from .graph import Graph
import json
import unittest
class Multigraph:
GRAPHS = 'graphs'
TYPE = 'type'
LABEL = 'label'
METADATA = 'metadata'
def __init__(self, graphs=[], type=None, label=None, metadata=None):
"""Constructor of the Multigraph class.
Arguments:
graphs -- [Graph] list of Graph objects that are part of the multigraph (default [])
type -- string (optionally) the typename of the multigraph (default None)
label -- string (optionally) the label of the multigraph (default None)
metadata -- dictionary (optionally) a dictionary representing the metadata that belongs to the multigraph (default None)
"""
self._graphs = []
self.set_graphs(graphs)
self._type = None
if type != None:
self.set_type(type)
self._label = None
if label != None:
self.set_label(label)
self._metadata = None
if metadata != None:
self.set_metadata(metadata)
def _isJsonSerializable(self, dictionay):
try:
json.dumps(dictionay)
return True
except Exception:
return False
def add_graph(self, graph):
"""Method to add a graph to the multigraph.
Arguments:
graph -- Graph the graph to add
"""
if graph == None:
return
if isinstance(graph, Graph):
self._graphs.append(graph)
else:
raise TypeError("Adding graph to Multigraph failed: graph must of type Graph")
def set_graphs(self, graphs):
"""Method to add a list of graphs.
Arguments:
graphs -- [Graph] the list of graphs that need to be added
"""
for graph in graphs:
self.add_graph(graph)
def set_type(self, type):
"""Method to set the type of the multigraph.
Arguments:
type -- string the typename of the multigraph to set
"""
if type == None:
self._type = None
else:
if isinstance(type, str):
self._type = type
else:
try:
stringType = str(type)
self._type = stringType
except Exception as excecption:
raise TypeError("Type of type in Multigraph object needs to be a string (or string castable): " + str(exception))
def set_label(self, label):
"""Method to set the label of the multigraph.
Arguments:
label -- string the labelname of the multigraph to set
"""
if label == None:
self._label = None
else:
if isinstance(label, str):
self._label = label
else:
try:
stringLabel = str(label)
self._label = stringLabel
except Exception as excecption:
raise TypeError("Type of label in Multigraph object needs to be a string (or string castable): " + str(exception))
def set_metadata(self, metadata):
"""Method to set the metadata of the multigraph.
Arguments:
metadata -- dictionary the metadata to set on the multigraph
"""
if metadata == None:
self._metadata = None
else:
if isinstance(metadata, dict) and self._isJsonSerializable(metadata):
self._metadata = metadata
else:
raise TypeError("metadata in Multigraph object needs to be json serializable")
def get_graphs(self):
"""Method to get a list of all graphs in the multigraph.
Returns:
[Graph] list of graphs present in the multigraph
"""
return self._graphs
def get_type(self):
"""Method to get the type of the multigraph.
Returns:
string the typename of the multigraph if set, else None
"""
return self._type
def get_label(self):
"""Method to get the label of the multigraph.
Returns:
string the label of the multigraph if set, else None
"""
return self._label
def get_metadata(self):
""""Get the metadata of the multigraph.
Returns:
dictionary the metadata of the multigraph if set, else None
"""
return self._metadata
def to_JSON(self, asString=False):
"""Convert the multigraph to JSON.
Creates a dictionary object of the multigraph comforming the JSON Graph Format.
Arguments:
asString -- bool if set to True the method returns the JSON as string
Returns:
dictionary the multigraph as dictionary ready to serialize
"""
result = {}
if self._label != None:
result[Multigraph.LABEL] = self._label
if self._type != None:
result[Multigraph.TYPE] = self._type
if self._metadata != None:
result[Multigraph.METADATA] = self._metadata
graphs = []
for graph in self._graphs:
graphs.append(graph.to_JSON())
result[Multigraph.GRAPHS] = graphs
if asString:
return json.dumps(result)
else:
return result
class TestMultigraphClass(unittest.TestCase):
def test_base(self):
graph = Graph([], [], 'graphType', 'graphLabel', True, {'metaNumber': 11, 'metaString': 'hello world'})
mgraph = Multigraph([graph], 'multigraphType', 'multigraphLabel', {'metaNumber': 11, 'metaString': 'hello world'})
self.assertEqual(mgraph.get_type(), 'multigraphType')
self.assertEqual(mgraph.get_label(), 'multigraphLabel')
self.assertEqual(mgraph.get_metadata()['metaNumber'], 11)
self.assertEqual(mgraph.get_metadata()['metaString'], 'hello world')
self.assertEqual(mgraph.get_graphs()[0], graph)
def test_setters(self):
graph = Graph([], [], 'graphType', 'graphLabel', True, {'metaNumber': 11, 'metaString': 'hello world'})
mgraph = Multigraph([], 'multigraphType', 'multigraphLabel', {'metaNumber': 11, 'metaString': 'hello world'})
mgraph.set_label('new_multigraphLabel')
mgraph.set_type('new_multigraphType')
mgraph.set_metadata({'new_metaNumber': 13, 'new_metaString': 'world hello'})
mgraph.set_graphs([graph])
self.assertEqual(mgraph.get_type(), 'new_multigraphType')
self.assertEqual(mgraph.get_label(), 'new_multigraphLabel')
self.assertEqual(mgraph.get_metadata()['new_metaNumber'], 13)
self.assertEqual(mgraph.get_metadata()['new_metaString'], 'world hello')
self.assertEqual(mgraph.get_graphs()[0], graph)
#TODO make unit test complete
def test_to_JSON(self):
self.assertEqual("TODO", "TODO")
#TODO unittest json result
if __name__ == '__main__':
unittest.main() | 3.328125 | 3 |
utils.py | hyperionfalling/final_assignment | 1 | 12769392 | """
-*- coding:utf-8 -*-
@Time :2019/11/27 下午4:54
@Author :wts
@File :utils.py
@Version:1.0
"""
import torch
import torch.utils.data.sampler as Sampler
import torch.nn as nn
import torch.nn.functional as F
class Mish(nn.Module):
def __init__(self):
super().__init__()
#print("Mish activation loaded...")
def forward(self, x):
x = x * (torch.tanh(F.softplus(x)))
return x
class MseLoss(nn.Module):
def __init__(self):
super(MseLoss, self).__init__()
| 2.5625 | 3 |
test_mindfuck.py | pradeepchhetri/mindfuck | 1 | 12769393 | #!/usr/bin/env python
import os
import pytest
import mindfuck
def load_from_file(brainfuck_code):
cwd = os.path.dirname(__file__)
fpath = os.path.join(cwd, 'data/%s' % brainfuck_code)
with open(fpath, 'r') as f:
return f.read()
def test_hello_world(capfd):
code = load_from_file("helloworld.bf")
mindfuck.eval(code)
output, error = capfd.readouterr()
assert output=="Hello World!\n"
def test_foobar(capfd):
code = load_from_file("foobar.bf")
mindfuck.eval(code)
output, error = capfd.readouterr()
assert output!="foobar"
| 2.5625 | 3 |
conbench/tests/api/test_auth.py | jonkeane/conbench | 48 | 12769394 | <reponame>jonkeane/conbench<filename>conbench/tests/api/test_auth.py
import copy
from ...tests.api import _asserts
class TestLoginPost(_asserts.PostEnforcer):
url = "/api/login/"
required_fields = ["email", "password"]
valid_payload = {"email": "<EMAIL>", "password": "<PASSWORD>"}
def test_unauthenticated(self, client):
self._create_fixture_user()
# cannot get users before login
response = client.get("/api/users/")
self.assert_401_unauthorized(response)
# login
response = client.post("/api/login/", json=self.valid_payload)
self.assert_204_no_content(response)
# can get users after login
response = client.get("/api/users/")
self.assert_200_ok(response)
def test_already_authenticated_good_credentials(self, client):
# already logged in
self.authenticate(client)
# can get users before re-login
response = client.get("/api/users/")
self.assert_200_ok(response)
# login
response = client.post("/api/login/", json=self.valid_payload)
self.assert_204_no_content(response)
# can get users after re-login
response = client.get("/api/users/")
self.assert_200_ok(response)
def test_already_authenticated_bad_credentials(self, client):
# already logged in
self.authenticate(client)
# can get users before re-login
response = client.get("/api/users/")
self.assert_200_ok(response)
# login (with bad credentials)
data = copy.deepcopy(self.valid_payload)
data["password"] = "<PASSWORD>"
response = client.post("/api/login/", json=data)
message = {"_errors": ["Invalid email or password."]}
self.assert_400_bad_request(response, message)
# cannot get users after bad re-login
response = client.get("/api/users/")
self.assert_401_unauthorized(response)
def test_bad_credentials(self, client):
data = self.valid_payload.copy()
data["password"] = "<PASSWORD>"
response = client.post("/api/login/", json=data)
message = {"_errors": ["Invalid email or password."]}
self.assert_400_bad_request(response, message)
def test_unknown_email(self, client):
data = self.valid_payload.copy()
data["email"] = "<EMAIL>"
response = client.post("/api/login/", json=data)
message = {"_errors": ["Invalid email or password."]}
self.assert_400_bad_request(response, message)
def test_invalid_email_address(self, client):
data = self.valid_payload.copy()
data["email"] = "not-an-email-address"
response = client.post("/api/login/", json=data)
message = {"email": ["Not a valid email address."]}
self.assert_400_bad_request(response, message)
class TestLogoutGet(_asserts.ApiEndpointTest):
def test_authenticated(self, client):
self.authenticate(client)
# can get users before logout
response = client.get("/api/users/")
self.assert_200_ok(response)
# logout
response = client.get("/api/logout/")
self.assert_204_no_content(response)
# cannot get users after logout
response = client.get("/api/users/")
self.assert_401_unauthorized(response)
def test_unauthenticated(self, client):
# cannot get users before logout
response = client.get("/api/users/")
self.assert_401_unauthorized(response)
# logout
response = client.get("/api/logout/")
self.assert_204_no_content(response)
# cannot get users after logout
response = client.get("/api/users/")
self.assert_401_unauthorized(response)
| 2.59375 | 3 |
tests/test_zonecontrolhumidistat.py | marcelosalles/pyidf | 19 | 12769395 | <filename>tests/test_zonecontrolhumidistat.py
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.zone_hvac_controls_and_thermostats import ZoneControlHumidistat
log = logging.getLogger(__name__)
class TestZoneControlHumidistat(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_zonecontrolhumidistat(self):
pyidf.validation_level = ValidationLevel.error
obj = ZoneControlHumidistat()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_humidifying_relative_humidity_setpoint_schedule_name = "object-list|Humidifying Relative Humidity Setpoint Schedule Name"
obj.humidifying_relative_humidity_setpoint_schedule_name = var_humidifying_relative_humidity_setpoint_schedule_name
# object-list
var_dehumidifying_relative_humidity_setpoint_schedule_name = "object-list|Dehumidifying Relative Humidity Setpoint Schedule Name"
obj.dehumidifying_relative_humidity_setpoint_schedule_name = var_dehumidifying_relative_humidity_setpoint_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.zonecontrolhumidistats[0].name, var_name)
self.assertEqual(idf2.zonecontrolhumidistats[0].zone_name, var_zone_name)
self.assertEqual(idf2.zonecontrolhumidistats[0].humidifying_relative_humidity_setpoint_schedule_name, var_humidifying_relative_humidity_setpoint_schedule_name)
self.assertEqual(idf2.zonecontrolhumidistats[0].dehumidifying_relative_humidity_setpoint_schedule_name, var_dehumidifying_relative_humidity_setpoint_schedule_name) | 2.671875 | 3 |
exploration-scripts/error_distribution.py | alecokas/BiLatticeRNN-data-processing | 5 | 12769396 | <gh_stars>1-10
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import re
from scipy import stats
import sys
def read_pickle(file_name):
""" Load the pickle file
"""
with (open(file_name, "rb")) as openfile:
return pickle.load(openfile)
def difference_error_fn(x1, x2):
return float(x1) - float(x2)
def curate_error_data(lattice_start_time, lattice_stop_time, cn_start_time, cn_stop_time):
""" Return the error data with the start time, stop time, and duration errors making up the columns of the array. """
start_time_error = difference_error_fn(cn_start_time, lattice_start_time)
stop_time_error = difference_error_fn(cn_stop_time, lattice_stop_time)
duration_error = difference_error_fn(float(cn_stop_time) - float(cn_start_time), float(lattice_stop_time) - float(lattice_start_time))
return [start_time_error, stop_time_error, duration_error]
def get_errors(input_file):
errors = []
with open(input_file, 'r') as log_file:
lat_marker = 'Lattice start time'
cn_marker = 'Confnet start time:'
for line in log_file:
if lat_marker in line:
lattice_regex_results = re.findall(r'[0-9]+.[0-9]+', line)
lattice_start_time, lattice_stop_time = lattice_regex_results
cn_line = next(log_file)
if not cn_marker in cn_line:
raise Exception('Unexpected format. A lattice line should always be followed by a confnet line')
cn_regex_results = re.findall(r'[0-9]+.[0-9]+', cn_line)
cn_start_time, cn_stop_time = cn_regex_results
error_list = curate_error_data(lattice_start_time, lattice_stop_time, cn_start_time, cn_stop_time)
errors.append(error_list)
return np.array(errors)
def save_statistics(error_array, target_file_name):
# Remove file if it exists
try:
os.remove(target_file_name)
except OSError:
pass
stats_dict = {}
error_type = ['Start Time', 'End Time', 'Duration']
for errors, error_type in zip(error_array, error_type):
errors = list(filter(lambda a: a != 0, errors))
stats_dict[error_type] = stats.describe(errors)
# Write
print(error_type)
with open(target_file_name + '.pickle', 'wb') as tgt_file:
pickle.dump(stats_dict, tgt_file, protocol=pickle.HIGHEST_PROTOCOL)
def remove_outliers(error_list):
mean = np.mean(np.array(error_list), axis=0)
std = np.std(np.array(error_list), axis=0)
error_list = [x for x in error_list if (x > mean - 2 * std)]
error_list = [x for x in error_list if (x < mean + 2 * std)]
return error_list
def plot_distributions(error_array, directory):
""" Generate plots for the empirical probability mass distribution of the start, end, and duration times. """
error_type = ['Start Time', 'End Time', 'Duration']
for i, (errors, error_type) in enumerate(zip(error_array, error_type)):
print(errors)
errors = list(filter(lambda a: a != 0, errors))
fig = plt.figure()
errors = remove_outliers(errors)
n, bins, patches = plt.hist(x=errors, bins='auto', color='#0504aa',
alpha=0.7, rwidth=0.85, density=True)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Time Difference (s)', fontsize=14)
plt.ylabel('Normalised Probability Mass', fontsize=14)
plt.ylim(ymax=np.max(n))
file_name = os.path.join(directory, 'distribution-{}'.format(i))
plt.show()
def main(args):
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
error_array = get_errors(args.input_file)
plot_distributions(np.transpose(error_array), args.output_dir)
save_statistics(np.transpose(error_array), target_file_name=os.path.join(args.output_dir,'error-stats'))
def parse_arguments(args_to_parse):
""" Parse the command line arguments.
"""
description = "Determine statistics on the arc matching errors"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-o', '--output-dir', type=str, required=True,
help='The directory to save the error distribution information.'
)
parser.add_argument(
'-i', '--input-file', type=str, required=True,
help='The path to the log file from which to extract the statistics'
)
args = parser.parse_args(args_to_parse)
return args
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args) | 2.65625 | 3 |
thenewboston_node/business_logic/tests/test_blockchain_base/test_validate_root_account_file.py | nishp77/thenewboston-node | 30 | 12769397 | import pytest
from thenewboston_node.business_logic.exceptions import ValidationError
from thenewboston_node.business_logic.tests.baker_factories import (
make_account_state, make_blockchain_state, make_genesis_blockchain_state
)
from thenewboston_node.business_logic.tests.mocks.utils import patch_blockchain_states, patch_blocks
def test_blockchain_blockchain_genesis_state_is_validated(blockchain_base):
blockchain_genesis_state = make_genesis_blockchain_state()
with patch_blockchain_states(blockchain_base, [blockchain_genesis_state]):
blockchain_base.validate_blockchain_states(is_partial_allowed=False)
def test_blockchain_without_blockchain_genesis_state_is_validated(blockchain_base):
non_initial_blockchain_state = make_blockchain_state()
with patch_blockchain_states(blockchain_base, [non_initial_blockchain_state]):
blockchain_base.validate_blockchain_states(is_partial_allowed=True)
def test_blockchain_must_have_at_least_blockchain_genesis_state(blockchain_base):
with patch_blockchain_states(blockchain_base, []):
with pytest.raises(ValidationError, match='Blockchain must contain at least one blockchain state'):
blockchain_base.validate_blockchain_states()
def test_blockchain_must_start_with_blockchain_genesis_state(blockchain_base):
non_initial_blockchain_state = make_blockchain_state()
with patch_blockchain_states(blockchain_base, [non_initial_blockchain_state]):
with pytest.raises(ValidationError, match='Blockchain must start with initial blockchain state'):
blockchain_base.validate_blockchain_states(is_partial_allowed=False)
def test_validate_blockchain_state_points_to_non_existing_block(blockchain_base, blockchain_genesis_state, block_0):
with patch_blocks(blockchain_base, [block_0]):
blockchain_state_5 = blockchain_base.generate_blockchain_state()
blockchain_state_5.last_block_number = 5
with patch_blockchain_states(blockchain_base, [blockchain_genesis_state, blockchain_state_5]):
with pytest.raises(
ValidationError, match='Blockchain state last_block_number points to non-existing block'
):
blockchain_base.validate_blockchain_states(is_partial_allowed=True)
def test_validate_blockchain_state_last_block_identifier_mismatch(blockchain_base, blockchain_genesis_state, block_0):
with patch_blocks(blockchain_base, [block_0]):
blockchain_state_0 = blockchain_base.generate_blockchain_state()
blockchain_state_0.last_block_identifier = 'wrong-identifier'
with patch_blockchain_states(blockchain_base, [blockchain_genesis_state, blockchain_state_0]):
with pytest.raises(
ValidationError, match='Blockchain state last_block_identifier does not match block_identifier'
):
blockchain_base.validate_blockchain_states(is_partial_allowed=True)
def test_validate_blockchain_state_next_block_identifier_mismatch(blockchain_base, blockchain_genesis_state, block_0):
with patch_blocks(blockchain_base, [block_0]):
blockchain_state_0 = blockchain_base.generate_blockchain_state()
blockchain_state_0.next_block_identifier = 'wrong-identifier'
with patch_blockchain_states(blockchain_base, [blockchain_genesis_state, blockchain_state_0]):
with pytest.raises(
ValidationError,
match='Blockchain state next_block_identifier does not match last_block_number message hash'
):
blockchain_base.validate_blockchain_states(is_partial_allowed=True)
def test_validate_node_is_declared_if_pv_schedule_is_set(blockchain_base):
account_state = make_account_state()
account_state.node = None
blockchain_genesis_state = make_genesis_blockchain_state(message__account_states={'00000': account_state})
with patch_blockchain_states(blockchain_base, [blockchain_genesis_state]):
with pytest.raises(ValidationError, match='Account state node must be set'):
blockchain_base.validate_blockchain_states(is_partial_allowed=False)
| 2.25 | 2 |
octopus_deploy_swagger_client/models/package_notes_result.py | cvent/octopus-deploy-api-client | 0 | 12769398 | <gh_stars>0
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PackageNotesResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'succeeded': 'bool',
'notes': 'str',
'failure_reason': 'str',
'display_message': 'str'
}
attribute_map = {
'succeeded': 'Succeeded',
'notes': 'Notes',
'failure_reason': 'FailureReason',
'display_message': 'DisplayMessage'
}
def __init__(self, succeeded=None, notes=None, failure_reason=None, display_message=None): # noqa: E501
"""PackageNotesResult - a model defined in Swagger""" # noqa: E501
self._succeeded = None
self._notes = None
self._failure_reason = None
self._display_message = None
self.discriminator = None
if succeeded is not None:
self.succeeded = succeeded
if notes is not None:
self.notes = notes
if failure_reason is not None:
self.failure_reason = failure_reason
if display_message is not None:
self.display_message = display_message
@property
def succeeded(self):
"""Gets the succeeded of this PackageNotesResult. # noqa: E501
:return: The succeeded of this PackageNotesResult. # noqa: E501
:rtype: bool
"""
return self._succeeded
@succeeded.setter
def succeeded(self, succeeded):
"""Sets the succeeded of this PackageNotesResult.
:param succeeded: The succeeded of this PackageNotesResult. # noqa: E501
:type: bool
"""
self._succeeded = succeeded
@property
def notes(self):
"""Gets the notes of this PackageNotesResult. # noqa: E501
:return: The notes of this PackageNotesResult. # noqa: E501
:rtype: str
"""
return self._notes
@notes.setter
def notes(self, notes):
"""Sets the notes of this PackageNotesResult.
:param notes: The notes of this PackageNotesResult. # noqa: E501
:type: str
"""
self._notes = notes
@property
def failure_reason(self):
"""Gets the failure_reason of this PackageNotesResult. # noqa: E501
:return: The failure_reason of this PackageNotesResult. # noqa: E501
:rtype: str
"""
return self._failure_reason
@failure_reason.setter
def failure_reason(self, failure_reason):
"""Sets the failure_reason of this PackageNotesResult.
:param failure_reason: The failure_reason of this PackageNotesResult. # noqa: E501
:type: str
"""
self._failure_reason = failure_reason
@property
def display_message(self):
"""Gets the display_message of this PackageNotesResult. # noqa: E501
:return: The display_message of this PackageNotesResult. # noqa: E501
:rtype: str
"""
return self._display_message
@display_message.setter
def display_message(self, display_message):
"""Sets the display_message of this PackageNotesResult.
:param display_message: The display_message of this PackageNotesResult. # noqa: E501
:type: str
"""
self._display_message = display_message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PackageNotesResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PackageNotesResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.875 | 2 |
bin/disco_vpc_ui.py | Angakkuq/asiaq-aws | 0 | 12769399 | <reponame>Angakkuq/asiaq-aws
#!/usr/bin/env python
"""
Command line tool for creating and destroying VPC's
"""
from __future__ import print_function
import logging
import argparse
import sys
from disco_aws_automation import DiscoVPC
from disco_aws_automation.disco_aws_util import run_gracefully
from disco_aws_automation.disco_logging import configure_logging
def parse_arguments():
"""Read in options passed in over command line"""
parser = argparse.ArgumentParser(description='AWS VPC automation')
parser.add_argument('--debug', dest='debug', action='store_const',
const=True, default=False, help='Log in debug level.')
subparsers = parser.add_subparsers(help='Sub-command help')
parser_create = subparsers.add_parser('create', help='Create new VPC based environmnet')
parser_create.set_defaults(mode="create")
parser_create.add_argument('--name', dest='vpc_name', required=True,
help='What to call the new environment.')
parser_create.add_argument('--type', dest='vpc_type', required=True,
help='What type of environment to create (as defined in config).')
parser_destroy = subparsers.add_parser(
'destroy', help='Delete environment releasing all non-persistent resources.')
parser_destroy.set_defaults(mode='destroy')
parser_destroy_group = parser_destroy.add_mutually_exclusive_group(required=True)
parser_destroy_group.add_argument('--name', dest='vpc_name', default=None,
help="The name of the environment that ought to be destroyed.")
parser_destroy_group.add_argument('--vpc-id', dest='vpc_id', default=None,
help="The VPC ID of the environment that ought to be destroyed.")
parser_list = subparsers.add_parser('list', help='List all current VPCs')
parser_list.set_defaults(mode="list")
parser_list.add_argument('--type', dest='env_type', action='store_const',
const=True, default=False, help='Print env type')
parser_peerings = subparsers.add_parser('peerings', help='operation on vpc peerings')
parser_peerings.set_defaults(mode="peerings")
parser_peerings.add_argument(
'--create', dest='create_peerings', action='store_const',
const=True, default=False,
help='Create peerings between the VPCs that currently exist, as configured in disco_vpc.ini')
parser_peerings.add_argument('--delete', dest='delete_peerings', action='store_const',
const=True, default=False,
help='Delete all existing VPC peerings')
parser_peerings.add_argument('--list', dest='list_peerings', action='store_const',
const=True, default=False,
help='List all VPC peerings')
parser_peerings.add_argument('--name', dest='vpc_name', required=False, default=None,
help='The VPC Name of the environment for VPC peering operation')
parser_peerings.add_argument('--vpc-id', dest='vpc_id', required=False, default=None,
help="The VPC ID of the environment for VPC peering operation")
return parser.parse_args()
def create_vpc_command(args):
""" handle vpc create command actions"""
if DiscoVPC.fetch_environment(environment_name=args.vpc_name):
logging.error("VPC with same name already exists.")
sys.exit(1)
else:
vpc = DiscoVPC(args.vpc_name, args.vpc_type)
logging.info("VPC %s(%s) has been created", args.vpc_name, vpc.vpc.id)
def destroy_vpc_command(args):
""" handle vpc destroy command actions"""
if args.vpc_name:
vpc = DiscoVPC.fetch_environment(environment_name=args.vpc_name)
else:
vpc = DiscoVPC.fetch_environment(vpc_id=args.vpc_id)
if vpc:
vpc.destroy()
else:
logging.error("No matching VPC found")
sys.exit(2)
def list_vpc_command(args):
""" handle list vpcs command actions """
for vpc_env in DiscoVPC.list_vpcs():
line = u"{0}\t{1:<15}".format(vpc_env.id, vpc_env.tags.get("Name", "-"))
if args.env_type:
line += u"\t{0}".format(vpc_env.tags.get("type", "-"))
print(line)
def proxy_peerings_command(args):
""" handle peerins command actions"""
if args.vpc_name and args.vpc_id:
logging.error("Don't use vpc_name and vpc_id at the same time.")
sys.exit(2)
if args.vpc_name:
vpc_id = DiscoVPC.find_vpc_id_by_name(args.vpc_name)
elif args.vpc_id:
vpc_id = args.vpc_id
else:
vpc_id = None
if args.list_peerings:
vpc_map = {vpc.id: vpc for vpc in DiscoVPC.list_vpcs()}
peerings = sorted(
DiscoVPC.list_peerings(vpc_id, include_failed=True),
key=lambda p: vpc_map.get(p.accepter_vpc_info.vpc_id).tags.get("Name"))
for peering in peerings:
vpc1 = vpc_map.get(peering.accepter_vpc_info.vpc_id)
vpc2 = vpc_map.get(peering.requester_vpc_info.vpc_id)
line = u"{0:<14} {1:<8} {2:<20} {3:<21}".format(
peering.id, peering.status_code, "{}<->{}".format(
vpc1.tags.get("Name"), vpc2.tags.get("Name")),
"{}<->{}".format(
peering.accepter_vpc_info.cidr_block,
peering.requester_vpc_info.cidr_block))
print(line)
elif args.delete_peerings:
DiscoVPC.delete_peerings(vpc_id)
elif args.create_peerings:
peering_configs = DiscoVPC.parse_peerings_config(vpc_id)
DiscoVPC.create_peering_connections(peering_configs)
def run():
"""Parses command line and dispatches the commands"""
args = parse_arguments()
configure_logging(args.debug)
if args.mode == "create":
create_vpc_command(args)
elif args.mode == "destroy":
destroy_vpc_command(args)
elif args.mode == "list":
list_vpc_command(args)
elif args.mode == 'peerings':
proxy_peerings_command(args)
if __name__ == "__main__":
run_gracefully(run)
| 2.59375 | 3 |
src/litmos/api.py | alexeiser/python-litmos-api | 0 | 12769400 | <filename>src/litmos/api.py
import html
import json
import time
import requests
class API(object):
ROOT_URL = 'https://api.litmos.com/v1.svc'
PAGINATION_OFFSET = 200
api_key = None
app_name = None
@classmethod
def _base_url(cls, resource, **kwargs):
return cls.ROOT_URL + "/" + \
resource + \
("/" + kwargs['resource_id'] if kwargs.get('resource_id', None) else "") + \
("/" + kwargs['sub_resource'] if kwargs.get('sub_resource', None) else "") + \
("/" + kwargs['sub_resource_id'] if kwargs.get('sub_resource_id', None) else "") + \
'?source=' + cls.app_name + \
'&format=json' + \
("&search=" + str(kwargs['search_param']) if kwargs.get('search_param', None) else "") + \
("&limit=" + str(kwargs['limit']) if kwargs.get('limit', None) else "") + \
("&start=" + str(kwargs['start']) if kwargs.get('start', None) else "")
@classmethod
def _perform_request(cls, method, url, **kwargs):
kwargs['headers'] = {'apikey': cls.api_key}
response = requests.request(method, url, **kwargs)
if response.status_code == 503: # request rate limit exceeded
time.sleep(60)
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
@staticmethod
def _parse_response(response):
return json.loads(html.unescape(response.text))
@classmethod
def find(cls, resource, resource_id):
response = cls._perform_request(
'GET',
cls._base_url(resource, resource_id=resource_id)
)
return cls._parse_response(response)
@classmethod
def delete(cls, resource, resource_id):
cls._perform_request(
'DELETE',
cls._base_url(resource,
resource_id=resource_id
)
)
return True
@classmethod
def create(cls, resource, attributes):
response = cls._perform_request(
'POST',
cls._base_url(resource),
json=attributes
)
return cls._parse_response(response)
@classmethod
def update(cls, resource, resource_id, attributes):
response = cls._perform_request(
'PUT',
cls._base_url(resource, resource_id=resource_id),
json=attributes
)
if response.text:
return cls._parse_response(response)
return {}
@classmethod
def search(cls, resource, search_param):
response = cls._perform_request(
'GET',
cls._base_url(resource, search_param=search_param)
)
return cls._parse_response(response)
@classmethod
def _get_all(cls, resource, results, start_pos):
response = cls._perform_request(
'GET',
cls._base_url(resource, limit=cls.PAGINATION_OFFSET, start=start_pos)
)
response_list = cls._parse_response(response)
results += response_list
if not response_list:
return results
else:
return cls._get_all(resource, results, start_pos + cls.PAGINATION_OFFSET)
@classmethod
def all(cls, resource):
return cls._get_all(resource, [], 0)
@classmethod
def get_children(cls, resource, resource_id):
response = cls._perform_request(
'GET',
cls._base_url(resource, resource_id=resource_id, sub_resource=resource)
)
return cls._parse_response(response)
@classmethod
def get_sub_resource(cls, resource, resource_id, sub_resource):
response = cls._perform_request(
'GET',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource
)
)
return cls._parse_response(response)
@classmethod
def add_sub_resource(cls, resource, resource_id, sub_resource, attributes):
response = cls._perform_request(
'POST',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource
),
json=attributes
)
if response.text:
return cls._parse_response(response)
return True
@classmethod
def update_sub_resource(cls, resource, resource_id, sub_resource, sub_resource_id, attributes=None):
response = cls._perform_request(
'PUT',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource,
sub_resource_id=sub_resource_id
),
json=attributes
)
if response.text:
return cls._parse_response(response)
return True
@classmethod
def remove_sub_resource(cls, resource, resource_id, sub_resource, sub_resource_id):
cls._perform_request(
'DELETE',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource,
sub_resource_id=sub_resource_id)
)
return True
@classmethod
def remove_sub_resources(cls, resource, resource_id, sub_resource, attributes):
response = cls._perform_request(
'DELETE',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource
),
json=attributes
)
if response.text:
return cls._parse_response(response)
return True
| 2.640625 | 3 |
NSGA2/individual_selector.py | hirokuramoto/Optimizer | 0 | 12769401 | # 混雑度トーナメント選択により新たな探索母集団Qt+1を生成
import numpy as np
import random
import copy
class Tournament(object):
"""混雑度トーナメント選択
"""
def __init__(self, archive_set):
self._archive_set = copy.deepcopy(archive_set)
def tournament(self):
# アーカイブ母集団の個体数分の探索母集団を生成
size = int(self._archive_set.shape[0])
search_set = np.array([], dtype = np.float64)
for i in range(size):
rnd1 = random.randrange(size)
rnd2 = random.randrange(size)
# まずランクで比較
if self._archive_set[rnd1, 2] < self._archive_set[rnd2, 2]:
search_set = np.append(search_set, self._archive_set[rnd1, :])
elif self._archive_set[rnd1, 2] > self._archive_set[rnd2, 2]:
search_set = np.append(search_set, self._archive_set[rnd2, :])
# 次に混雑度距離で比較
elif self._archive_set[rnd1, 3] > self._archive_set[rnd2, 3]:
search_set = np.append(search_set, self._archive_set[rnd1, :])
else:
search_set = np.append(search_set, self._archive_set[rnd2, :])
search_set = search_set.reshape(size, -1)
return search_set
| 3.34375 | 3 |
backend/api/services/authorServices.py | CMPUT404-wi21-project/CMPUT404-project-socialdistribution | 1 | 12769402 | from ..models.author import Author
from ..models.node import Node
from ..serializers import AuthorSerializer
from rest_framework import status
from rest_framework.response import Response
from django.conf import settings
from django.forms import URLField
from django.core.exceptions import ValidationError
from django.contrib.auth.hashers import make_password
# Helper
from urllib.parse import urlparse
'''
Getting author api format json by author id from local db
input:
str:id
return:
json or 404
'''
def getAuthorJsonById(id):
try:
a = Author.objects.get(id=id)
except:
return Response(status=404)
serializer = AuthorSerializer(a)
return Response(serializer.data)
# Checks the author table for a author with a specific URL
def getRemoteAuthor(request, remote_author_create_func):
requestor_url = request.headers.get('X-Request-User')
#author = Author.objects.get(url=requestor_url)
author = Author.objects.filter(url=requestor_url)
if author.exists():
return author[0]
# At this point the author doesnt exist so we must create
else:
author = createRemoteAuthor(request, remote_author_create_func)
return author
# Create a Remote author by requesting information for a remote author and creating a corresponding author
def createRemoteAuthor(request, remote_author_create_func):
parsed_host = urlparse(request.headers.get('Origin'))
host = f"{parsed_host.scheme}://{parsed_host.netloc}"
#===Test Block please Remove for production====
if host == "http://localhost:8000":
host = "http://172.28.0.3:8000"
elif host == "http://localhost:8080":
host = "http://172.28.0.5:8080"
#===============================================
# Ensure that the provided host exists
node = Node.objects.get(host_url=host)
if node is None:
print("We could not find the node trying to talk")
return None
# Host is an accepted node so we will create a user for them
response = remote_author_create_func(request, request.headers.get('X-Request-User'), ignore_user=True)
if response.status_code != 200:
return None
# Create a local author for this dude
remote_author = response.json()
new_author = Author(
displayName=remote_author['displayName'],
host=remote_author['host'],
github=remote_author['github'],
username=remote_author['id'],
password=<PASSWORD>("<PASSWORD>")
)
new_author.url = remote_author['id']
new_author.save()
return new_author
'''
handle profile request
input:
request
str:id
return:
json, status code
'''
def processProfileApi(request, id):
try:
a = Author.objects.get(id=id)
except:
return Response({"detail": "author not found"}, status=404)
if request.method == 'GET':
serializer = AuthorSerializer(a)
return Response(serializer.data)
elif request.method == 'POST':
# check author identity: author self or admin
if request.user.id == a.id or request.user.is_superuser:
serializer = AuthorSerializer(a, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=400)
else: #no auth
return Response({"detail": "You can't edit other author's profile."}, status=403)
'''
check if host is our backend host
input:
str: host url
return:
true or false
'''
def isHostLocal(host):
return host.split('/')[2]==settings.HEROKU_HOST.split('/')[2]
'''
check if host is our frontend host
input:
str: host url
return:
true or false
'''
def isHostFront(host):
return host.split('/')[2]==settings.FRONTEND_HOST.split('/')[2]
'''
validate url format
input:
str: url
return:
true or false
'''
def validateUrl(url):
url_form_field = URLField()
try:
url = url_form_field.clean(url)
except ValidationError:
return False
return True
'''
validate if url is in correct author format
input:
str: url
return:
true or false
'''
def isInAuthorFormat(url):
url_list = url.split('/')
if url_list[3] != 'author':
return False
return True
'''
get host url from whole url
input:
str: url
return:
str: host
'''
def getHostFromUrl(url):
url_list = url.split('/')
return url_list[0] + '//' + url_list[2] + '/'
'''
get destination host url from request
input:
request
return:
host url
'''
def getDestHostFromRequest(request):
host = request.get_host()
protocol = request.scheme
return (protocol + '://' + host + '/')
| 2.484375 | 2 |
Diena_6_lists/uzd3_g1.py | edzya/Python_RTU_08_20 | 8 | 12769403 | <filename>Diena_6_lists/uzd3_g1.py
# Māras
text = input("ievadi teikumu")
texta = []
words = text.split()
textb = [w[::-1] for w in words]
# for item in words:
# texta.append(item[::-1])
# textb = " ".join(texta) # so we do not have " " after last item
print(" ".join(textb))
| 3.578125 | 4 |
movimento/migrations/0001_initial.py | pontual/sistema-v1 | 1 | 12769404 | # Generated by Django 2.0.1 on 2018-01-30 23:36
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('registros', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ItemDeLinha',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('qtde', models.IntegerField(default=1)),
('preco_unitario', models.IntegerField(default=0)),
('moeda', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='registros.Moeda')),
('produto', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='registros.Produto')),
],
),
migrations.CreateModel(
name='Transacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_recorded', models.DateField(default=datetime.date.today)),
('comprador', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='buyer_company', to='registros.Empresa')),
('vendedor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='seller_company', to='registros.Empresa')),
],
options={
'verbose_name_plural': 'Transações',
},
),
migrations.AddField(
model_name='itemdelinha',
name='transacao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movimento.Transacao'),
),
]
| 1.851563 | 2 |
facenet.py | KomissarYarrik/face_recognition | 0 | 12769405 | <filename>facenet.py
# coding: utf-8
import tensorflow as tf
import keras
from keras import applications
import os
from keras.models import load_model,model_from_json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import inline
import cv2
import dlib
from sklearn.svm import SVC
from sklearn.preprocessing import LabelEncoder
import imutils
from imutils.face_utils import FaceAligner
_FLOATX = 'float32'
# Загрузка Facenet-модели и весов
model=load_model(os.path.join(r'keras\model',r'facenet_keras.h5'))
model.load_weights(os.path.join(r'keras\weights',r'facenet_keras_weights.h5'))
model.summary()
base_dir=r'dataset'
def Standardize(inp):
if inp.ndim==3:
axis=(0,1,2)
elif inp.ndim==4:
axis=(1,2,3)
mean=np.mean(inp,axis=axis,keepdims=True)
std=np.std(inp,axis=axis,keepdims=True)
inp=(inp-mean)/std
return inp
def L2_Norm(inp):
for i in range(len(inp)):
inp[i]=inp[i]/np.sqrt(np.sum(np.square(inp[i])))
return inp
def Euclied_dist(inp1,inp2):
return np.sqrt(np.sum(np.square(inp1-inp2)))
# Определение детектора и предиктора
detector=dlib.get_frontal_face_detector()
shape_predictor=dlib.shape_predictor(os.path.join(r'dlib',r'shape_predictor_68_face_landmarks.dat'))
face_aligner=FaceAligner(shape_predictor)
# Построение датасета
if os.path.exists('data\FImages.npy') == False:
images=[]
for i in os.listdir(base_dir):
for j in os.listdir(os.path.join(base_dir,i)):
img=cv2.imread(os.path.join(base_dir,i,j))
print(i,j)
faces=detector(img,1)
f=faces[0]
print(len(faces))
x,y,w,h=f.left(),f.top(),(f.right()-f.left()),(f.bottom()-f.top())
temp=face_aligner.align(img,cv2.cvtColor(img,cv2.COLOR_BGR2GRAY),f)
temp=cv2.resize(cv2.cvtColor(temp,cv2.COLOR_BGR2GRAY),(160,160))
cv2.imwrite(os.path.join(base_dir,r'temp.jpg'),temp)
temp=cv2.imread(os.path.join(base_dir,r'temp.jpg'))
os.remove(os.path.join(base_dir,r'temp.jpg'))
images.append(temp)
images=Standardize(np.array(images))
np.save(os.path.join(r'data',r'FImages'),images)
images=np.load(os.path.join(r'data',r'FImages.npy'))
else:
images=np.load(os.path.join(r'data',r'FImages.npy'))
# Преобразование лейблов для всех лиц
labels=[]
for i in os.listdir(base_dir):
labels.extend([i]*len(os.listdir(os.path.join(base_dir,i))))
le=LabelEncoder().fit(labels)
y=le.transform(labels)
# Генерация вставок для каждого из обучаемых лиц
embs=[]
for i in range(len(images)):
t=L2_Norm(np.array(model.predict(images[i:i+1])))
embs.append(t)
embs=np.reshape(np.array(embs),(len(embs),128))
# Рассчет средних значений каждого лица
labels_emb={}
i=0
c=0
for j in os.listdir(base_dir):
l=len(os.listdir(os.path.join(base_dir,j)))
print(l)
# Принять значение всех вложений конкретного лица
labels_emb[c]=np.mean(embs[i:i+l],axis=0)
c+=1
i+=l
# Определение SVC модели для классификации и обучения с помещенными данными вставок
Smodel=SVC(kernel='linear',probability=True,decision_function_shape='ovo')
clf=Smodel.fit(embs,y)
# Рассчет порогового значения расстояния
for i in range(len(embs)):
for j in range(len(labels_emb)):
print("{} and {} :{}".format(i,j,Euclied_dist(embs[i],labels_emb[j])))
| 2.59375 | 3 |
spin/prophecy.py | cloudRoutine/curveship | 1 | 12769406 | spin = {
'time': 'before'}
| 1.34375 | 1 |
postreise/analyze/generation/tests/test_emissions.py | lanesmith/PostREISE | 0 | 12769407 | import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_almost_equal
from powersimdata.tests.mock_grid import MockGrid
from powersimdata.tests.mock_scenario import MockScenario
from postreise.analyze.generation.emissions import (
generate_emissions_stats,
summarize_emissions_by_bus,
)
@pytest.fixture
def mock_plant():
# plant_id is the index
return {
"plant_id": [101, 102, 103, 104, 105],
"bus_id": [1001, 1002, 1003, 1004, 1005],
"type": ["solar", "wind", "ng", "coal", "dfo"],
"GenFuelCost": [0, 0, 3.3, 4.4, 5.5],
}
@pytest.fixture
def mock_gencost():
# plant_id is the index
return {
"plant_id": [101, 102, 103, 104, 105],
"type": [2] * 5,
"startup": [0] * 5,
"shutdown": [0] * 5,
"n": [3] * 5,
"c2": [1, 2, 3, 4, 5],
"c1": [10, 20, 30, 40, 50],
"c0": [100, 200, 300, 400, 500],
"interconnect": ["Western"] * 5,
}
@pytest.fixture
def mock_pg(mock_plant):
return pd.DataFrame(
{
plant_id: [(i + 1) * p for p in range(4)]
for i, plant_id in enumerate(mock_plant["plant_id"])
},
index=pd.date_range("2019-01-01", periods=4, freq="H"),
)
@pytest.fixture
def scenario(mock_plant, mock_gencost, mock_pg):
return MockScenario(
grid_attrs={"plant": mock_plant, "gencost_before": mock_gencost},
pg=mock_pg,
)
def _test_emissions_structure(emissions, mock_plant, pg):
plant = pd.DataFrame(mock_plant)
plant.set_index("plant_id", inplace=True)
# check data frame structure
err_msg = "generate_emissions_stats should return a data frame"
assert isinstance(emissions, pd.DataFrame), err_msg
for a, b in zip(pg.index.to_numpy(), emissions.index.to_numpy()):
assert a == b, "emissions and pg should have same index"
for a, b in zip(pg.columns.to_numpy(), emissions.columns.to_numpy()):
assert a == b, "emissions and pg should have same columns"
# sanity check values
emissions_from_wind = plant[plant.type == "wind"].index.values
err_msg = "Wind farm does not emit emissions"
assert emissions[emissions_from_wind[0]].sum() == 0, err_msg
emissions_from_solar = plant[plant.type == "solar"].index.values
err_msg = "Solar plant does not emit emissions"
assert emissions[emissions_from_solar[0]].sum() == 0, err_msg
negative_emissions_count = np.sum((emissions < 0).to_numpy().ravel())
assert negative_emissions_count == 0, "No plant should emit negative emissions"
class TestEmissionStatsArguments:
def test_pollutant_value(self, scenario):
with pytest.raises(ValueError) as excinfo:
generate_emissions_stats(scenario, pollutant="CO2")
assert "Unknown pollutant for generate_emissions_stats()" in str(excinfo.value)
def test_method_type(self, scenario):
with pytest.raises(TypeError) as excinfo:
generate_emissions_stats(scenario, method=1)
assert "method must be a str" in str(excinfo.value)
def test_method_value(self, scenario):
with pytest.raises(ValueError) as excinfo:
generate_emissions_stats(scenario, pollutant="nox", method="always-off")
assert "method for nox must be one of: {'simple'}" in str(excinfo.value)
class TestCarbonCalculation:
def test_carbon_calc_always_on(self, scenario, mock_plant):
carbon = generate_emissions_stats(scenario, method="always-on")
_test_emissions_structure(carbon, mock_plant, scenario.state.get_pg())
# check specific values
expected_values = np.array(
[
[0, 0, 4.82, 8.683333, 6.77],
[0, 0, 6.6998, 13.546000, 11.8475],
[0, 0, 9.4472, 21.1873333, 20.3100],
[0, 0, 13.0622, 31.6073333, 32.1575],
]
)
assert_array_almost_equal(
expected_values, carbon.to_numpy(), err_msg="Values do not match expected"
)
def test_carbon_calc_decommit(self, scenario, mock_plant):
carbon = generate_emissions_stats(scenario, method="decommit")
_test_emissions_structure(carbon, mock_plant, scenario.state.get_pg())
# check specific values
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 6.6998, 13.546000, 11.8475],
[0, 0, 9.4472, 21.1873333, 20.3100],
[0, 0, 13.0622, 31.6073333, 32.1575],
]
)
assert_array_almost_equal(
expected_values, carbon.to_numpy(), err_msg="Values do not match expected"
)
def test_carbon_calc_simple(self, scenario, mock_plant):
carbon = generate_emissions_stats(scenario, method="simple")
_test_emissions_structure(carbon, mock_plant, scenario.state.get_pg())
# check specific values
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1.407, 4.004, 4.2],
[0, 0, 2.814, 8.008, 8.4],
[0, 0, 4.221, 12.012, 12.6],
]
)
assert_array_almost_equal(
expected_values, carbon.to_numpy(), err_msg="Values do not match expected"
)
class TestNOxCalculation:
def test_calculate_nox_simple(self, scenario):
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 0.000537, 0.002632, 0.007685],
[0, 0, 0.001074, 0.005264, 0.015370],
[0, 0, 0.001611, 0.007896, 0.023055],
]
)
nox = generate_emissions_stats(scenario, pollutant="nox", method="simple")
assert_array_almost_equal(
expected_values, nox.to_numpy(), err_msg="Values do not match expected"
)
def test_calculate_nox_disallowed_method(self, scenario):
with pytest.raises(ValueError):
generate_emissions_stats(scenario, pollutant="nox", method="decommit")
class TestSO2Calculation:
def test_calculate_so2_simple(self, scenario):
expected_values = np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 3.0000e-05, 3.8600e-03, 1.0945e-02],
[0, 0, 6.0000e-05, 7.7200e-03, 2.1890e-02],
[0, 0, 9.0000e-05, 1.1580e-02, 3.2835e-02],
]
)
nox = generate_emissions_stats(scenario, pollutant="so2", method="simple")
assert_array_almost_equal(
expected_values, nox.to_numpy(), err_msg="Values do not match expected"
)
def test_calculate_so2_disallowed_method(self, scenario):
with pytest.raises(ValueError):
generate_emissions_stats(scenario, pollutant="so2", method="always-on")
class TestEmissionsSummarization:
def test_emissions_is_non_negative(self, scenario):
carbon = generate_emissions_stats(scenario)
with pytest.raises(ValueError):
summarize_emissions_by_bus(
-1 * carbon, MockGrid(grid_attrs={"plant": mock_plant})
)
def test_emissions_summarization(self, mock_pg, mock_plant):
# setup
pg = pd.DataFrame(mock_pg).iloc[:3, :]
plant = pd.DataFrame(mock_plant)
plant.set_index("plant_id", inplace=True)
input_carbon_values = [
[0, 0, 6.6998, 13.546000, 11.8475],
[0, 0, 9.4472, 21.1873333, 20.3100],
[0, 0, 13.0622, 31.6073333, 32.1575],
]
input_carbon = pd.DataFrame(
input_carbon_values, index=pg.index, columns=pg.columns
)
expected_sum = {
"coal": {1004: 66.3406666},
"ng": {1003: 29.2092},
"dfo": {1005: 64.315},
}
# calculation
summation = summarize_emissions_by_bus(
input_carbon, MockGrid(grid_attrs={"plant": mock_plant})
)
# checks
err_msg = "summarize_emissions_by_bus didn't return a dict"
assert isinstance(summation, dict), err_msg
err_msg = "summarize_emissions_by_bus didn't return the right dict keys"
assert set(summation.keys()) == expected_sum.keys(), err_msg
for k in expected_sum.keys():
err_msg = "summation not correct for fuel " + k
assert expected_sum[k].keys() == summation[k].keys(), err_msg
for bus in expected_sum[k]:
err_msg = "summation not correct for bus " + str(bus)
assert expected_sum[k][bus] == pytest.approx(summation[k][bus]), err_msg
| 2.15625 | 2 |
modules/msa/msa/contrib/uniqauth/static.py | haoyutan/MSA-Framework | 2 | 12769408 | EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e"
| 0.972656 | 1 |
physicoModule/physico_style.py | sseungyong/physico_project | 0 | 12769409 | <gh_stars>0
import openpyxl
from openpyxl.styles import PatternFill
from openpyxl.styles import Border, Side
from openpyxl.styles import Alignment, Font
boldFont = Font(bold=True)
centetAlignment = Alignment(horizontal='center', vertical='center')
normal_box = Border(
left=Side(border_style='thin', color='FF000000'),
right=Side(border_style='thin', color='FF000000'),
top=Side(border_style='thin', color='FF000000'),
bottom=Side(border_style='thin', color='FF000000'),
diagonal=Side(border_style='thin', color='FF000000'),
diagonal_direction=0,
outline=Side(border_style='thin', color='FF000000'),
vertical=Side(border_style='thin', color='FF000000'),
horizontal=Side(border_style='thin', color='FF000000')
)
name_box = Border(
bottom=Side(border_style='double', color='FF000000'),
)
topFill = PatternFill(patternType='solid', fgColor='fff200')
frontFill = PatternFill(patternType='solid', fgColor='dfe4ea')
bottomFill = PatternFill(patternType='solid', fgColor='ff6b6b')
reserveFill = PatternFill(patternType='solid', fgColor='b2bec3')
| 2.5625 | 3 |
setup.py | antiproblemist/django-accountk | 15 | 12769410 | <gh_stars>10-100
from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(name='django-accountkit',
version='0.2.3',
description='Facebook accountkit support for Django',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/antiproblemist/django-accountkit',
packages=['accountkitlogin', 'accountkitlogin.templatetags'],
include_package_data = True,
package_data={
'': ['LICENSE', 'README.md'],
},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
install_requires=[
"Django >= 1.11",
"requests >= 2.18.4",
],
) | 1.328125 | 1 |
socket_rlbot_test/hive.py | ViliamVadocz/Hivemind | 2 | 12769411 | import socket
from messages import send_packet, receive_packet, INPUT_MESSAGE
ID = 0
NUM_BOTS = 4
# Notice: multiplicity == 4, telling RLBot I want to control bots.
# Controlled IDs are [id, id + 1, id + 2, id + 3].
# There must be enough cars in the match config file to support this.
HIVE_READY_MESSAGE = {
"type": "Ready",
"name": "Hivemind",
"team": 0,
"id": ID,
"multiplicity": NUM_BOTS
}
class SocketHivemind:
def log(self, statement):
if self.debug:
print(f"[HIVEMIND] {statement}")
def __init__(self, port, debug=False):
self.debug = debug
self.id = ID
self.drone_ids = [ID + i for i in range(NUM_BOTS)]
self.initialize_hive()
self.log("Loaded")
# Create socket.
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log("Attempting to connect")
self.socket.connect(("localhost", port))
self.log("Connected")
self.log("Sending READY message")
send_packet(self.socket, HIVE_READY_MESSAGE)
def run(self):
try:
# Packet loop.
while True:
self.log("Receiving packet")
packet = receive_packet(self.socket)
self.parse_packet(packet)
except Exception as e:
print(e)
self.log("Closing")
self.socket.close()
def parse_packet(self, packet):
# Packet is a list of messages.
for message in packet:
if message["type"] == "Update":
self.log("Received UPDATE message")
output = self.get_output(message)
self.log("Sending INPUT messages")
send_packet(self.socket, output)
else:
# TODO Other kinds of messages
continue
def initialize_hive(self):
# Space for an init func.
pass
def get_output(self, message):
controls = [INPUT_MESSAGE for drone in self.drone_ids]
return controls
if __name__ == "__main__":
hivemind = SocketHivemind(23234, debug=True)
hivemind.run() | 2.8125 | 3 |
Max/Max_0011_20200131.py | Morek999/OMSCS_Taiwan_Leetcode | 1 | 12769412 | <filename>Max/Max_0011_20200131.py
"""
11. Container With Most Water
https://leetcode.com/problems/container-with-most-water/
Time complexity: O()
Space complexity: O()
Solution:
"""
from typing import List
class Solution:
def maxArea(self, height: List[int]) -> int:
res = l = 0
r = len(height) - 1
while l < r:
res = max(res, (r-l) * min(height[l], height[r]))
if height[l] < height[r]:
l += 1
else:
r -= 1
return res
ans = [
[1,8,6,2,5,4,8,3,7] # output = 49
]
for trails in ans:
print(Solution().maxArea(trails))
| 3.953125 | 4 |
account/migrations/0010_auto_20200929_1634.py | cuiliang0302/myblog | 5 | 12769413 | <filename>account/migrations/0010_auto_20200929_1634.py<gh_stars>1-10
# Generated by Django 3.1.1 on 2020-09-29 16:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0009_auto_20200929_1109'),
]
operations = [
migrations.AlterModelOptions(
name='leavemessage',
options={'ordering': ('root_id', 'level', '-time'), 'verbose_name': '留言记录表', 'verbose_name_plural': '留言记录表'},
),
migrations.AddField(
model_name='leavemessage',
name='comment_img',
field=models.ImageField(blank=True, null=True, upload_to='comment/', verbose_name='头像'),
),
]
| 1.421875 | 1 |
services/ingest-file/ingestors/support/convert.py | bakrale/aleph | 0 | 12769414 | import math
import logging
import requests
from itertools import count
from requests import RequestException, HTTPError
from servicelayer.util import backoff
from followthemoney.helpers import entity_filename
from ingestors.settings import CONVERT_URL, CONVERT_TIMEOUT
from ingestors.support.cache import CacheSupport
from ingestors.support.temp import TempFileSupport
from ingestors.exc import ProcessingException
log = logging.getLogger(__name__)
class DocumentConvertSupport(CacheSupport, TempFileSupport):
"""Provides helpers for UNO document conversion via HTTP."""
def document_to_pdf(self, file_path, entity):
key = self.cache_key('pdf', entity.first('contentHash'))
pdf_hash = self.tags.get(key)
if pdf_hash is not None:
file_name = entity_filename(entity, extension='pdf')
path = self.manager.load(pdf_hash, file_name=file_name)
if path is not None:
log.info("Using PDF cache: %s", file_name)
entity.set('pdfHash', pdf_hash)
return path
pdf_file = self._document_to_pdf(file_path, entity)
if pdf_file is not None:
content_hash = self.manager.store(pdf_file)
entity.set('pdfHash', content_hash)
self.tags.set(key, content_hash)
return pdf_file
def _document_to_pdf(self, file_path, entity):
"""Converts an office document to PDF."""
file_name = entity_filename(entity)
mime_type = entity.first('mimeType')
log.info('Converting [%s] to PDF...', file_name)
for attempt in count(1):
try:
with open(file_path, 'rb') as fh:
files = {'file': (file_name, fh, mime_type)}
res = requests.post(CONVERT_URL,
params={'timeout': CONVERT_TIMEOUT},
files=files,
timeout=CONVERT_TIMEOUT + 10,
stream=True)
res.raise_for_status()
out_path = self.make_work_file('out.pdf')
with open(out_path, 'wb') as fh:
bytes_written = 0
for chunk in res.iter_content(chunk_size=None):
bytes_written += len(chunk)
fh.write(chunk)
if bytes_written > 50:
return out_path
raise ProcessingException("Could not be converted to PDF.")
except HTTPError as exc:
if exc.response.status_code in (400, 500):
# For error 500, this might also be a temporary error
# in the conversion service. But all attempts to divy
# these phenomena apart have failed so far.
raise ProcessingException(res.text)
msg = "Converter not available: %s (attempt: %s)"
log.info(msg, exc, attempt)
backoff(failures=math.sqrt(attempt))
except RequestException as exc:
msg = "Converter not available: %s (attempt: %s)"
log.error(msg, exc, attempt)
backoff(failures=math.sqrt(attempt))
| 2.234375 | 2 |
src/mrsimulator/methods/tests/test_MQVAS.py | pjgrandinetti/mrsimulator | 14 | 12769415 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from mrsimulator.method.query import TransitionQuery
from mrsimulator.methods import FiveQ_VAS
from mrsimulator.methods import SevenQ_VAS
from mrsimulator.methods import ThreeQ_VAS
__author__ = "<NAME>"
__email__ = "<EMAIL>"
methods = [ThreeQ_VAS, FiveQ_VAS, SevenQ_VAS]
names = ["ThreeQ_VAS", "FiveQ_VAS", "SevenQ_VAS"]
def sample_test_output(n):
return {
"magnetic_flux_density": "9.4 T",
"rotor_angle": "0.9553166181245 rad",
"rotor_frequency": "1000000000000.0 Hz",
"spectral_dimensions": [
{
"count": 1024,
"spectral_width": "25000.0 Hz",
"events": [{"transition_query": [{"ch1": {"P": [n], "D": [0]}}]}],
},
{
"count": 1024,
"spectral_width": "25000.0 Hz",
"events": [{"transition_query": [{"ch1": {"P": [-1], "D": [0]}}]}],
},
],
}
def test_MQ_VAS_rotor_freq():
e = "`rotor_frequency=1e12 Hz` is fixed for 2D Methods and cannot be modified."
isotopes = ["87Rb", "27Al", "51V"]
for iso, method in zip(isotopes, methods):
with pytest.raises(ValueError, match=f".*{e}.*"):
method(channels=[iso], rotor_frequency=10, spectral_dimensions=[{}, {}])
def test_MQ_VAS_affine():
sites = ["87Rb", "27Al", "51V"]
spins = [1.5, 2.5, 3.5]
k_MQ_MAS = {
3: {1.5: 21 / 27, 2.5: 114 / 72, 3.5: 303 / 135, 4.5: 546 / 216},
5: {2.5: 150 / 72, 3.5: 165 / 135, 4.5: 570 / 216},
7: {3.5: 483 / 135, 4.5: 84 / 216},
9: {4.5: 1116 / 216},
}
for j, method in enumerate(methods):
for i, isotope in zip(spins[j:], sites[j:]):
meth = method(channels=[isotope])
k = k_MQ_MAS[3 + 2 * j][i]
assert meth.spectral_dimensions[0].events[0].fraction == 1
assert meth.spectral_dimensions[1].events[0].fraction == 1
assert np.allclose(meth.affine_matrix, [1 / (k + 1), k / (k + 1), 0, 1])
def test_3Q_VAS_general():
"""3Q-VAS method test"""
mth = ThreeQ_VAS(channels=["87Rb"], spectral_dimensions=[{}, {}])
assert mth.name == "ThreeQ_VAS"
assert mth.description == "Simulate a 3Q variable-angle spinning spectrum."
assert mth.spectral_dimensions[0].events[0].transition_query == [
TransitionQuery(ch1={"P": [-3], "D": [0]})
]
assert mth.spectral_dimensions[1].events[0].transition_query == [
TransitionQuery(ch1={"P": [-1], "D": [0]})
]
assert ThreeQ_VAS.parse_dict_with_units(mth.json()) == mth
assert np.allclose(mth.affine_matrix, [0.5625, 0.4375, 0.0, 1.0])
serialize = mth.json()
_ = serialize.pop("affine_matrix")
assert serialize == {
"channels": ["87Rb"],
"description": "Simulate a 3Q variable-angle spinning spectrum.",
"name": "ThreeQ_VAS",
**sample_test_output(-3),
}
def test_5Q_VAS_general():
"""5Q-VAS method test"""
mth = FiveQ_VAS(channels=["17O"], spectral_dimensions=[{}, {}])
assert mth.name == "FiveQ_VAS"
assert mth.description == "Simulate a 5Q variable-angle spinning spectrum."
assert mth.spectral_dimensions[0].events[0].transition_query == [
TransitionQuery(ch1={"P": [-5], "D": [0]})
]
assert mth.spectral_dimensions[1].events[0].transition_query == [
TransitionQuery(ch1={"P": [-1], "D": [0]})
]
assert FiveQ_VAS.parse_dict_with_units(mth.json()) == mth
assert np.allclose(
mth.affine_matrix, [0.3243243243243243, 0.6756756756756757, 0.0, 1.0]
)
serialize = mth.json()
_ = serialize.pop("affine_matrix")
assert serialize == {
"channels": ["17O"],
"description": "Simulate a 5Q variable-angle spinning spectrum.",
"name": "FiveQ_VAS",
**sample_test_output(-5),
}
def test_7Q_VAS_general():
"""7Q-VAS method test"""
mth = SevenQ_VAS(channels=["51V"], spectral_dimensions=[{}, {}])
assert mth.name == "SevenQ_VAS"
assert mth.description == "Simulate a 7Q variable-angle spinning spectrum."
assert mth.spectral_dimensions[0].events[0].transition_query == [
TransitionQuery(ch1={"P": [-7], "D": [0]})
]
assert mth.spectral_dimensions[1].events[0].transition_query == [
TransitionQuery(ch1={"P": [-1], "D": [0]})
]
assert SevenQ_VAS.parse_dict_with_units(mth.json()) == mth
assert np.allclose(mth.affine_matrix, [0.2184466, 0.7815534, 0.0, 1.0])
serialize = mth.json()
_ = serialize.pop("affine_matrix")
assert serialize == {
"channels": ["51V"],
"description": "Simulate a 7Q variable-angle spinning spectrum.",
"name": "SevenQ_VAS",
**sample_test_output(-7),
}
| 1.929688 | 2 |
distsys/actions/status.py | austinlostinboston/distsys | 0 | 12769416 | <reponame>austinlostinboston/distsys
#!/usr/bin/python
## Import builtins
import sys
from multiprocessing import Pool
## Import distsys
from distsys.services import services
from distsys.statistics import online
def client_status(client):
if online(client):
print "Client: " + str(client) + "\t" + "\033[92m" + "ON" + "\033[0m"
else:
print "Client: " + str(client) + "\t" + "\033[91m" + "OFF" + "\033[0m"
if __name__ == "__main__":
s = services(localhost=False)
num_clients = len(s.clients)
print "Determining online status of clients..."
pool = Pool(processes=num_clients)
pool.map(client_status, s.clients)
| 2.75 | 3 |
ucf_sub_embedded_ros/ucf_sub/src/sub_sensors/src/InternalEnvironment.py | RoboticsClubatUCF/RoboSub | 0 | 12769417 | #!/usr/bin/env python3
import HIH6130
import rospy
from sensor_msgs.msg import Temperature, RelativeHumidity
def publish():
tempPub = rospy.Publisher('InternalTemperature', Temperature, queue_size=1)
humidPub = rospy.Publisher('InternalHumidity', RelativeHumidity, queue_size=1)
rospy.init_node('InternalEnvironment')
sensor = HIH6130.HIH6130(bus=1)
temp = Temperature()
temp.header.frame_id = "base_link"
humid = RelativeHumidity()
humid.header.frame_id = "base_link"
freq = rospy.Rate(5)
while not rospy.is_shutdown():
sensor.read()
temp.temperature = sensor.t
humid.relative_humidity = sensor.rh
tempPub.publish(temp)
humidPub.publish(humid)
freq.sleep()
if __name__ == '__main__':
try:
publish()
except rospy.ROSInterruptException:
pass
| 2.4375 | 2 |
src/transformers/models/herbert/__init__.py | timpal0l/transformers | 2 | 12769418 | <reponame>timpal0l/transformers<gh_stars>1-10
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from ...file_utils import is_tokenizers_available
from .tokenization_herbert import HerbertTokenizer
if is_tokenizers_available():
from .tokenization_herbert_fast import HerbertTokenizerFast
| 1.3125 | 1 |
src/main.py | Saztroz/Top-100-Movies | 0 | 12769419 | <filename>src/main.py
from bs4 import BeautifulSoup
import requests
import spotipy
from spotipy.oauth2 import SpotifyOAuth
#scraping data
date = input("Which year do you want to travel to? Type the date in this format YYYY-MM-DD: ")
URL = f"https://www.billboard.com/charts/hot-100/{date}"
response = requests.get(URL)
soup = BeautifulSoup(response.text, "html.parser")
all_songs = soup.find_all(name="span", class_="chart-element__information__song text--truncate color--primary")
song_names = [song.getText() for song in all_songs]
#spotify authentication
sp = spotipy.Spotify(
auth_manager=SpotifyOAuth(
scope="playlist-modify-private",
redirect_uri="http://example.com",
client_id="your client ID",
client_secret="your client SECRET",
show_dialog=True,
cache_path="token.txt"
)
)
user_id = sp.current_user()["id"]
#search songs by title
song_uris = []
year = date.split("-")[0]
for song in song_names:
result = sp.search(q=f"track:{song} year:{year}",type="track")
try:
uri=result["tracks"]["items"][0]["uri"]
song_uris.append(uri)
except IndexError:
print(f"{song} doesn't exist in Spotify. Skipped")
#create private playlist
playlist = sp.user_playlist_create(user=user_id, name=f"{date} Billboard 100", public=False)
print(playlist)
#add found songs to playlist
sp.playlist_add_items(playlist_id=playlist["id"], items=song_uris) | 3.671875 | 4 |
fuel_agent/fuel_agent/tests/test_utils.py | Zipfer/fuel-web | 0 | 12769420 | <gh_stars>0
# Copyright 2011 <NAME>
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import mock
import stevedore
from fuel_agent import errors
from fuel_agent.utils import utils
class ExecuteTestCase(testtools.TestCase):
"""This class is partly based on the same class in openstack/ironic."""
def setUp(self):
super(ExecuteTestCase, self).setUp()
fake_driver = stevedore.extension.Extension('fake_driver', None, None,
'fake_obj')
self.drv_manager = stevedore.driver.DriverManager.make_test_instance(
fake_driver)
def test_parse_unit(self):
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=True), 1)
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=False), 1)
self.assertRaises(ValueError, utils.parse_unit, '1.00m', 'MiB')
self.assertRaises(ValueError, utils.parse_unit, '', 'MiB')
def test_B2MiB(self):
self.assertEqual(utils.B2MiB(1048575, ceil=False), 0)
self.assertEqual(utils.B2MiB(1048576, ceil=False), 1)
self.assertEqual(utils.B2MiB(1048575, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048576, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048577, ceil=True), 2)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(errors.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
@mock.patch('stevedore.driver.DriverManager')
def test_get_driver(self, mock_drv_manager):
mock_drv_manager.return_value = self.drv_manager
self.assertEqual('fake_obj', utils.get_driver('fake_driver'))
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@mock.patch('six.moves.builtins.open')
def test_render_and_save_fail(self, mock_open, mock_j_lo, mock_j_env):
mock_open.side_effect = Exception('foo')
self.assertRaises(errors.TemplateWriteError, utils.render_and_save,
'fake_dir', 'fake_tmpl_name', 'fake_data',
'fake_file_name')
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@mock.patch('six.moves.builtins.open')
def test_render_and_save_ok(self, mock_open, mock_j_lo, mock_j_env):
mock_render = mock.Mock()
mock_render.render.return_value = 'fake_data'
mock_j_env.get_template.return_value = mock_render
utils.render_and_save('fake_dir', 'fake_tmpl_name', 'fake_data',
'fake_file_name')
mock_open.assert_called_once_with('fake_file_name', 'w')
| 2.078125 | 2 |
src/python/analysis.py | yigitozgumus/AAPP_Project | 0 | 12769421 | <filename>src/python/analysis.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
from os import listdir
from os.path import isfile, join
# visualization library
import seaborn as sns
sns.set( color_codes=True)
sns.set_context(rc={"font.family":'sans',"font.size":24,"axes.titlesize":18,"axes.labelsize":14})
sns.set_palette("husl")
def main():
parser = argparse.ArgumentParser(description='Visualize the experiment results')
parser.add_argument("csv", metavar='C', help="The Target Location of the experiment csv file")
parser.add_argument("--index",metavar='I',help="Optional file selection variable")
args = parser.parse_args()
file_name = args.csv
# df = get_data(4)
df = get_data_from_file(file_name)
new_dfs = seperate_dataframes(df,df.shape[0])
plot_comparisons(new_dfs)
def print_available_cvs():
data = "../data"
experiment_csvs = [("data/" + f) for f in listdir(data)
if (isfile(join(data, f)) and f[-3:] == 'csv')]
print("The current experiment list is:")
for f in range(len(experiment_csvs)):
print(str(f) + "-) " + experiment_csvs[f])
def get_data_from_file(filename):
names = ["id","Vertex","Edge",
'T-Tarjan','S-Tarjan',
'T-Nuutila Original','S-Nuutila Original',
'T-Nuutila Version1','S-Nuutila Version1',
'T-Nuutila Version2','S-Nuutila Version2',
'T-Pearson Version1','S-Pearson Version1',
'T-Pearson Version2','S-Pearson Version2',
'T-Pearson Version3','S-Pearson Version3']
file = "/data/" + filename
df = pd.read_csv(file[1:], sep=',', names=names)
return df
def get_data(index):
data = "../data"
experiment_csvs = [("data/" + f) for f in listdir(data)
if (isfile(join(data, f)) and f[-3:] == 'csv')]
names = ["id", "Vertex", "Edge",
'T-Tarjan', 'S-Tarjan',
'T-Nuutila Original', 'S-Nuutila Original',
'T-Nuutila Version1', 'S-Nuutila Version1',
'T-Nuutila Version2', 'S-Nuutila Version2',
'T-Pearson Version1', 'S-Pearson Version1',
'T-Pearson Version2', 'S-Pearson Version2',
'T-Pearson Version3', 'S-Pearson Version3']
file = "../" + experiment_csvs[index]
print("\n The file that is used to create the dataframe is : " + file)
df = pd.read_csv(file, sep=',', names=names)
return df
def seperate_dataframes(df, maxIndex):
df_storage_all = df[['Vertex', 'S-Tarjan',
'S-Nuutila Original',
'S-Nuutila Version1',
'S-Nuutila Version2',
'S-Pearson Version1',
'S-Pearson Version2',
'S-Pearson Version3']]
df_storage_all = df_storage_all.sort_values(by=['Vertex'])
df_edges = df[['Vertex', 'Edge']].sort_values(by=['Vertex'])
df_s_t = df_storage_all[['Vertex', 'S-Tarjan']]
df_s_n = df_storage_all[['Vertex',
'S-Nuutila Original',
'S-Nuutila Version1',
'S-Nuutila Version2', ]]
df_s_p = df_storage_all[['Vertex',
'S-Pearson Version1',
'S-Pearson Version2',
'S-Pearson Version3']]
bundle = [df_edges[:maxIndex], df_s_n[:maxIndex],
df_s_p[:maxIndex], df_storage_all[:maxIndex],]
return bundle
def plot_storage(df, title, axe=None):
if (axe != None):
for i in range(1, len(df.columns)):
axe.plot(df['Vertex'], df[df.columns[i]])
if("Edge" in df.columns):
axe.set_ylabel('Edge Count')
else:
axe.set_ylabel('Kilobytes')
axe.legend(loc='best')
axe.set_xlabel('Vertex Count')
axe.set_title(title)
else:
plt.figure(figsize=(12, 12))
for i in range(1, len(df.columns)):
plt.plot(df['Vertex'], df[df.columns[i]])
if("Edge" in df.columns):
plt.set_ylabel('Edge Count')
else:
plt.ylabel('Kilobytes')
plt.legend(loc='best')
plt.xlabel('Vertex Count')
plt.title(title)
def plot_comparisons(bundle):
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(14, 14))
titles = ["Edge Count With Respect to Vertices",
"Storage Performance of Nuutila Implementations",
"Storage Performance of Pearce Implementations",
"Storage Performance of All Algorithms"]
axs = [ax1, ax2, ax3, ax4]
for i in range(len(bundle)):
plot_storage(bundle[i], titles[i], axs[i])
plt.show()
if __name__ == "__main__":
main()
| 2.84375 | 3 |
script_data_transformation/LifeCycle.py | mbeviere/mbeviere.github.io | 0 | 12769422 | <reponame>mbeviere/mbeviere.github.io<filename>script_data_transformation/LifeCycle.py
class LifeCycle:
def __init__(self, id, label, composed_of):
self.id = id
self.label = label
self.composed_of = composed_of
| 1.632813 | 2 |
sdk/python/pulumi_alicloud/ros/get_stacks.py | pulumi/pulumi-alicloud | 42 | 12769423 | <gh_stars>10-100
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetStacksResult',
'AwaitableGetStacksResult',
'get_stacks',
]
@pulumi.output_type
class GetStacksResult:
"""
A collection of values returned by getStacks.
"""
def __init__(__self__, enable_details=None, id=None, ids=None, name_regex=None, names=None, output_file=None, parent_stack_id=None, show_nested_stack=None, stack_name=None, stacks=None, status=None, tags=None):
if enable_details and not isinstance(enable_details, bool):
raise TypeError("Expected argument 'enable_details' to be a bool")
pulumi.set(__self__, "enable_details", enable_details)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
if name_regex and not isinstance(name_regex, str):
raise TypeError("Expected argument 'name_regex' to be a str")
pulumi.set(__self__, "name_regex", name_regex)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
if output_file and not isinstance(output_file, str):
raise TypeError("Expected argument 'output_file' to be a str")
pulumi.set(__self__, "output_file", output_file)
if parent_stack_id and not isinstance(parent_stack_id, str):
raise TypeError("Expected argument 'parent_stack_id' to be a str")
pulumi.set(__self__, "parent_stack_id", parent_stack_id)
if show_nested_stack and not isinstance(show_nested_stack, bool):
raise TypeError("Expected argument 'show_nested_stack' to be a bool")
pulumi.set(__self__, "show_nested_stack", show_nested_stack)
if stack_name and not isinstance(stack_name, str):
raise TypeError("Expected argument 'stack_name' to be a str")
pulumi.set(__self__, "stack_name", stack_name)
if stacks and not isinstance(stacks, list):
raise TypeError("Expected argument 'stacks' to be a list")
pulumi.set(__self__, "stacks", stacks)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="enableDetails")
def enable_details(self) -> Optional[bool]:
return pulumi.get(self, "enable_details")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
return pulumi.get(self, "ids")
@property
@pulumi.getter(name="nameRegex")
def name_regex(self) -> Optional[str]:
return pulumi.get(self, "name_regex")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
return pulumi.get(self, "names")
@property
@pulumi.getter(name="outputFile")
def output_file(self) -> Optional[str]:
return pulumi.get(self, "output_file")
@property
@pulumi.getter(name="parentStackId")
def parent_stack_id(self) -> Optional[str]:
return pulumi.get(self, "parent_stack_id")
@property
@pulumi.getter(name="showNestedStack")
def show_nested_stack(self) -> Optional[bool]:
return pulumi.get(self, "show_nested_stack")
@property
@pulumi.getter(name="stackName")
def stack_name(self) -> Optional[str]:
return pulumi.get(self, "stack_name")
@property
@pulumi.getter
def stacks(self) -> Sequence['outputs.GetStacksStackResult']:
return pulumi.get(self, "stacks")
@property
@pulumi.getter
def status(self) -> Optional[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, Any]]:
return pulumi.get(self, "tags")
class AwaitableGetStacksResult(GetStacksResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStacksResult(
enable_details=self.enable_details,
id=self.id,
ids=self.ids,
name_regex=self.name_regex,
names=self.names,
output_file=self.output_file,
parent_stack_id=self.parent_stack_id,
show_nested_stack=self.show_nested_stack,
stack_name=self.stack_name,
stacks=self.stacks,
status=self.status,
tags=self.tags)
def get_stacks(enable_details: Optional[bool] = None,
ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
parent_stack_id: Optional[str] = None,
show_nested_stack: Optional[bool] = None,
stack_name: Optional[str] = None,
status: Optional[str] = None,
tags: Optional[Mapping[str, Any]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStacksResult:
"""
This data source provides the Ros Stacks of the current Alibaba Cloud user.
> **NOTE:** Available in v1.106.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
example = alicloud.ros.get_stacks(ids=["example_value"],
name_regex="the_resource_name")
pulumi.export("firstRosStackId", example.stacks[0].id)
```
:param bool enable_details: Default to `false`. Set it to `true` can output more details about resource attributes.
:param Sequence[str] ids: A list of Stack IDs.
:param str name_regex: A regex string to filter results by Stack name.
:param str parent_stack_id: Parent Stack Id.
:param bool show_nested_stack: The show nested stack.
:param str stack_name: Stack Name.
:param str status: The status of Stack. Valid Values: `CREATE_COMPLETE`, `CREATE_FAILED`, `CREATE_IN_PROGRESS`, `DELETE_COMPLETE`, `DELETE_FAILED`, `DELETE_IN_PROGRESS`, `ROLLBACK_COMPLETE`, `ROLLBACK_FAILED`, `ROLLBACK_IN_PROGRESS`.
:param Mapping[str, Any] tags: Query the instance bound to the tag. The format of the incoming value is `json` string, including `TagKey` and `TagValue`. `TagKey` cannot be null, and `TagValue` can be empty. Format example `{"key1":"value1"}`.
"""
__args__ = dict()
__args__['enableDetails'] = enable_details
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['parentStackId'] = parent_stack_id
__args__['showNestedStack'] = show_nested_stack
__args__['stackName'] = stack_name
__args__['status'] = status
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:ros/getStacks:getStacks', __args__, opts=opts, typ=GetStacksResult).value
return AwaitableGetStacksResult(
enable_details=__ret__.enable_details,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
parent_stack_id=__ret__.parent_stack_id,
show_nested_stack=__ret__.show_nested_stack,
stack_name=__ret__.stack_name,
stacks=__ret__.stacks,
status=__ret__.status,
tags=__ret__.tags)
| 1.8125 | 2 |
analysis/utils/mouse_player.py | chrelli/3DDD_social_mouse_tracker | 1 | 12769424 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# from IPython import get_ipython
import time, os, sys, shutil
# from utils.fitting_utils import *
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # <--- This is important for 3d plotting
import sys, os, pickle
# import cv2
# from colour import Color
import h5py
import glob
import itertools
# and pytorch
import torch
# In[2]:
import ipywidgets as widgets
from ipywidgets import HBox, VBox
from IPython.display import display
# %matplotlib inline
get_ipython().run_line_magic('matplotlib', 'widget')
# In[ ]:
# In[3]:
# def unpack_from_jagged(jagged_line):
# ''' THE REVESER SO HERE IT UNPACKS AGAIN SO THE DATA CAN BE SAVED
# AS A JAGGED H5PY DATASET
# FROM OTHER: Takes the NX3, N, Mx3, M, M shapes and packs to a single float16
# We ravel the position, ravel the keyp, stack everything and
# - importantly - we also save M, the number of keypoints'''
# n_keyp = int(jagged_line[-1])
# keyp_idx2 = jagged_line[-(1+n_keyp):-1].astype('int')
# pkeyp2 = jagged_line[-(1+2*n_keyp):-(1+n_keyp)]
# keyp2 = jagged_line[-(1+5*n_keyp):-(1+2*n_keyp)].reshape((n_keyp,3))
# block2 = jagged_line[:-(1+5*n_keyp)].reshape((-1,4))
# pos2,pos_weights2 = block2[:,:3], block2[:,3]
# # HACK to cut the floor
# floor_logic = pos2[:,2] > .012
# pos2 = pos2[floor_logic,:]
# pos_weights2 = pos_weights2[floor_logic]
# return pos2, pos_weights2, keyp2, pkeyp2, keyp_idx2
from utils.analysis_tools import unpack_from_jagged
from utils.analysis_tools import particles_to_body_supports_cuda
class data_storage(object):
def __init__(self):
# TODO update all this properly
self.data_path = None
self.tracking_path = None
self.jagged_lines = None
self.has_implant = True
self.is_running = False
def load_jagged(self):
with h5py.File(self.data_path, mode='r') as hdf5_file:
print("Loading jagged lines from " + self.data_path + "...")
# print(hdf5_file.keys())
# print(len(hdf5_file['dataset']))
self.jagged_lines = hdf5_file['dataset'][...]
print("Loaded {} jagged lines.".format(len(self.jagged_lines)) )
def load_tracking(self):
with open(self.tracking_path, 'rb') as f:
tracked_behavior = pickle.load(f)
print(tracked_behavior.keys())
self.tracked_behavior = tracked_behavior
self.has_implant = tracked_behavior['has_implant']
self.start_frame = tracked_behavior['start_frame']
self.end_frame = tracked_behavior['end_frame']
# get the raw tracking data!
part = self.tracked_behavior['tracking_holder']
# unpack all the 3D coordinates!
part = torch.from_numpy(part).float().cuda()
part = torch.transpose(part,0,1)
if self.has_implant:
body_support_0 = particles_to_body_supports_cuda(part[:,:9],implant = True)
body_support_1 = particles_to_body_supports_cuda(part[:,9:],implant = False)
# and the spine length
s_0 = part[:,2].cpu().numpy()
s_1 = part[:,2+9].cpu().numpy()
else:
body_support_0 = particles_to_body_supports_cuda(part[:,:8],implant = False)
body_support_1 = particles_to_body_supports_cuda(part[:,8:],implant = False)
# and the spine length
s_0 = part[:,2].cpu().numpy()
s_1 = part[:,2+8].cpu().numpy()
# add the raw and smoothed coordinates as numpy arrays
self.body_support_0_raw = [i.cpu().numpy().squeeze() for i in body_support_0]
# self.body_support_0_smooth = body_support_0_smooth
self.s_0_raw = s_0
# self.s_0_smooth = s_0_smooth
self.body_support_1_raw = [i.cpu().numpy().squeeze() for i in body_support_1]
# self.body_support_1_smooth = body_support_1_smooth
self.s_1_raw = s_1
# self.s_1_smooth = s_1_smooth
def make_3d_axis(self):
# 3D plot of the
fig = plt.figure(figsize = (4.5,4.5))
ax = fig.add_subplot(111, projection='3d')
# add to self for use later
self.fig = fig
self.ax = ax
def add_raw_data(self,frame):
# unpack the raw data in a plottable format
pos, pos_weights, keyp, pkeyp, ikeyp = unpack_from_jagged(self.jagged_lines[frame])
X, Y, Z = pos[:,0],pos[:,1],pos[:,2]
# add to axis 3D plot of Sphere
self.h_pc = self.ax.scatter(X, Y, Z, zdir='z', s=2, c='k', alpha = .05,rasterized=False)
body_colors = ['dodgerblue','red','lime','orange']
body_indices = [0,1,2,3]
# loop over the types of body, and make emptyscatter plots
self.h_kp_list = []
for body in body_indices:
h_kp = self.ax.scatter([],[],[], zdir='z', s=25, c=body_colors[body],rasterized=False)
self.h_kp_list.append(h_kp)
# THEN set the 3d values to be what the shoud be
for body in body_indices:
self.h_kp_list[body]._offsets3d = (keyp[ikeyp==body,0], keyp[ikeyp==body,1], keyp[ikeyp==body,2])
# for axis adjustment
self.max_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0
self.mid_x = (X.max()+X.min()) * 0.5
self.mid_y = (Y.max()+Y.min()) * 0.5
self.mid_z = (Z.max()+Z.min()) * 0.5
def update_raw_data(self,frame):
# get new raw data!
pos, pos_weights, keyp, pkeyp, ikeyp = unpack_from_jagged(self.jagged_lines[frame])
X, Y, Z = pos[:,0],pos[:,1],pos[:,2]
# update the pointcloud
self.h_pc._offsets3d = (X,Y,Z)
# and update the keypoints
for body in range(4):
self.h_kp_list[body]._offsets3d = (keyp[ikeyp==body,0], keyp[ikeyp==body,1], keyp[ikeyp==body,2])
def plot_skeleton(self,body_support,color = 'k',body_idx = 0,has_implant = False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
#print("c_hip is {}".format(c_hip))
if has_implant:
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip,c_impl]
p_line = [c_nose,c_nose,c_mid,c_impl,c_impl]
q_line = [c_mid,c_tip,c_ass,c_nose,c_tip]
else:
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip]
p_line = [c_nose,c_nose,c_mid]
q_line = [c_mid,c_tip,c_ass]
# add the body points
for p in p_skel:
h_bp = self.ax.scatter(p[0],p[1],p[2],zdir='z', s=50, alpha = 1 , c=color,rasterized=False)
self.h_bp_list[body_idx].append(h_bp)
# and the lines between body parts
for p,q in zip(p_line,q_line):
h_skel = self.ax.plot([p[0],q[0]],[p[1],q[1]],[p[2],q[2]],c=color,lw = 4)
self.h_skel_list[body_idx].append(h_skel)
def add_skel_fit(self,frame,fit='raw',plot_ellipsoids = True):
# frame index
i_frame = frame-self.start_frame
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
# and plot!
self.h_skel_list = [[],[]]
self.h_bp_list = [[],[]]
self.plot_skeleton(body_support_0,color = 'k',body_idx = 0,has_implant = self.has_implant)
self.plot_skeleton(body_support_1,color = 'peru',body_idx = 1,has_implant = False)
def update_skeleton(self,body_support,body_idx = 0, has_implant = False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
if has_implant :
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip,c_impl]
p_line = [c_nose,c_nose,c_mid,c_impl,c_impl]
q_line = [c_mid,c_tip,c_ass,c_nose,c_tip]
else:
p_skel = [c_hip,c_mid,c_nose,c_ass,c_tip]
p_line = [c_nose,c_nose,c_mid]
q_line = [c_mid,c_tip,c_ass]
# update the body points
for j,p in enumerate(p_skel):
self.h_bp_list[body_idx][j]._offsets3d = ([p[0]],[p[1]],[p[2]])
# update the lines between body parts
for j,(p,q) in enumerate(zip(p_line,q_line)):
# # lines are an extra level deep for some stupid matplotlib reason
# self.h_skel_list[body_idx][j][0].set_xdata([p[0],q[0]])
# self.h_skel_list[body_idx][j][0].set_ydata([p[1],q[1]])
# self.h_skel_list[body_idx][j][0].set_3d_properties([p[2],q[2]])
# new matplotlilb has changed how this is done:
self.h_skel_list[body_idx][j][0].set_data_3d([p[0],q[0]],[p[1],q[1]],[p[2],q[2]])
def update_skel_fit(self,frame,fit='raw'):
# get the data out frame index
i_frame = frame-self.start_frame
# speed up this list nonsense
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
self.update_skeleton(body_support_0,body_idx = 0, has_implant = self.has_implant)
self.update_skeleton(body_support_1,body_idx = 1, has_implant = False)
def add_ellip_fit(self,frame,fit='raw',plot_ellipsoids = True):
# frame index
i_frame = frame-self.start_frame
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
self.h_hip_list = [[],[]]
self.plot_ellipsoids(body_support_0,s_0,color = 'k',body_idx = 0,has_implant=self.has_implant)
self.plot_ellipsoids(body_support_1,s_1,color = 'peru',body_idx = 1,has_implant=False)
def add_wireframe_to_axis(self,ax,R_body,c_hip, a_nose,b_nose,a_hip,b_hip,r_impl,style='hip',this_color='k',this_alpha=.4):
# FIRST PLOT THE ELLIPSE, which is the hip
# generate points on a sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
# get the mesh, by using the equation of an ellipsoid
if style == 'hip':
x=np.cos(u)*a_hip
y=np.sin(u)*np.sin(v)*b_hip
z=np.sin(u)*np.cos(v)*b_hip
this_color = 'grey'
if style == 'nose':
x=np.cos(u)*a_nose
y=np.sin(u)*np.sin(v)*b_nose
z=np.sin(u)*np.cos(v)*b_nose
if style == 'impl':
x=np.cos(u)*r_impl
y=np.sin(u)*np.sin(v)*r_impl
z=np.sin(u)*np.cos(v)*r_impl
# pack to matrix of positions
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
# apply the rotatation and unpack
# posi_rotated = ((R_body @ (posi.T + c_hip).T ).T + t_body).T
# REMEBRE BODY SUPPORTS ARE [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose]
posi_rotated = np.einsum('ij,ja->ia',R_body,posi) + c_hip[:,np.newaxis]
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
# reshape for wireframe
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
h_hip = ax.plot_wireframe(x, y, z, color=this_color,alpha = this_alpha)
return h_hip
def plot_ellipsoids(self,body_support,s,color = 'k',body_idx = 0,has_implant=False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
# this is not so elegant, hm hm
_, a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose,b_nose,d_nose,x_impl,z_impl,r_impl= self.tracked_behavior['body_constants']
a_hip_delta = a_hip_max - a_hip_min
b_hip_delta = b_hip_max - b_hip_min
a_hip_0 = a_hip_min
b_hip_0 = b_hip_min
a_hip = a_hip_0 + a_hip_delta * s
b_hip = b_hip_0 + b_hip_delta * (1.-s)
d_hip = .75 * a_hip
if has_implant:
RRs,ccs,styles = [R_body,R_nose,R_nose],[c_hip,c_nose,c_impl],['hip','nose','impl']
else:
RRs,ccs,styles = [R_body,R_nose],[c_hip,c_nose],['hip','nose']
for RR,cc,style in zip(RRs,ccs,styles):
h_hip = self.add_wireframe_to_axis(self.ax,RR,
cc,
a_nose,
b_nose,
a_hip,
b_hip,
r_impl,
style=style,this_color=color)
self.h_hip_list[body_idx].append(h_hip)
def update_wireframe_lines(self,h_hip,X,Y,Z):
# h_hip is the handle to the lines3dcollection
# much of the code is taken from the source of the marplotlib wireframe plotting
X, Y, Z = np.broadcast_arrays(X, Y, Z)
rows, cols = Z.shape
rstride = 1
cstride = 1
# We want two sets of lines, one running along the "rows" of
# Z and another set of lines running along the "columns" of Z.
# This transpose will make it easy to obtain the columns.
tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
if rstride:
rii = list(range(0, rows, rstride))
# Add the last index only if needed
if rows > 0 and rii[-1] != (rows - 1):
rii += [rows-1]
else:
rii = []
if cstride:
cii = list(range(0, cols, cstride))
# Add the last index only if needed
if cols > 0 and cii[-1] != (cols - 1):
cii += [cols-1]
else:
cii = []
xlines = [X[i] for i in rii]
ylines = [Y[i] for i in rii]
zlines = [Z[i] for i in rii]
txlines = [tX[i] for i in cii]
tylines = [tY[i] for i in cii]
tzlines = [tZ[i] for i in cii]
lines = ([list(zip(xl, yl, zl))
for xl, yl, zl in zip(xlines, ylines, zlines)]
+ [list(zip(xl, yl, zl))
for xl, yl, zl in zip(txlines, tylines, tzlines)])
h_hip.set_segments(lines)
def calculate_wireframe_points(self,R_body,c_hip,a_nose,b_nose,a_hip,b_hip,r_impl,style='hip'):
# FIRST PLOT THE ELLIPSE, which is the hip
# generate points on a sphere
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:10j]
# get the mesh, by using the equation of an ellipsoid
if style == 'hip':
x=np.cos(u)*a_hip
y=np.sin(u)*np.sin(v)*b_hip
z=np.sin(u)*np.cos(v)*b_hip
if style == 'nose':
x=np.cos(u)*a_nose
y=np.sin(u)*np.sin(v)*b_nose
z=np.sin(u)*np.cos(v)*b_nose
if style == 'impl':
x=np.cos(u)*r_impl
y=np.sin(u)*np.sin(v)*r_impl
z=np.sin(u)*np.cos(v)*r_impl
# pack to matrix of positions
posi = np.vstack((x.ravel(),y.ravel(),z.ravel()))
# apply the rotatation and unpack
# posi_rotated = ((R_body @ (posi.T + c_hip).T ).T + t_body).T
# REMEBRE BODY SUPPORTS ARE [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose]
posi_rotated = np.einsum('ij,ja->ia',R_body,posi) + c_hip[:,np.newaxis]
x = posi_rotated[0,:]
y = posi_rotated[1,:]
z = posi_rotated[2,:]
# reshape for wireframe
x = np.reshape(x, (u.shape) )
y = np.reshape(y, (u.shape) )
z = np.reshape(z, (u.shape) )
return x,y,z
def update_ellipsoids(self,body_support,s,body_idx = 0, has_implant = False):
# unpack
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
# this is not so elegant, hm hm
# this is STILL not so elegant, hm hm
_, a_hip_min,a_hip_max,b_hip_min,b_hip_max,a_nose,b_nose,d_nose,x_impl,z_impl,r_impl= self.tracked_behavior['body_constants']
a_hip_delta = a_hip_max - a_hip_min
b_hip_delta = b_hip_max - b_hip_min
a_hip_0 = a_hip_min
b_hip_0 = b_hip_min
a_hip = a_hip_0 + a_hip_delta * s
b_hip = b_hip_0 + b_hip_delta * (1.-s)
d_hip = .75 * a_hip
if has_implant:
RRs,ccs,styles = [R_body,R_nose,R_nose],[c_hip,c_nose,c_impl],['hip','nose','impl']
else:
RRs,ccs,styles = [R_body,R_nose],[c_hip,c_nose],['hip','nose']
for jj, (RR,cc,style) in enumerate(zip(RRs,ccs,styles)):
X,Y,Z = self.calculate_wireframe_points(RR,
cc,
a_nose,
b_nose,
a_hip,
b_hip,
r_impl,
style=style)
h_hip = self.h_hip_list[body_idx][jj]
self.update_wireframe_lines(h_hip,X,Y,Z)
def update_ellip_fit(self,frame,fit = 'raw'):
# get the data out frame index
i_frame = frame-self.start_frame
# speed up this list nonsense
if fit =='raw':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_raw]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_raw]
s_0 = self.s_0_raw[i_frame]
s_1 = self.s_1_raw[i_frame]
elif fit =='smooth':
body_support_0 = [ d[i_frame,...] for d in self.body_support_0_smooth]
body_support_1 = [ d[i_frame,...] for d in self.body_support_1_smooth]
s_0 = self.s_0_smooth[i_frame]
s_1 = self.s_1_smooth[i_frame]
else:
return
self.update_ellipsoids(body_support_0,s_0,body_idx = 0,has_implant = self.has_implant)
self.update_ellipsoids(body_support_1,s_1,body_idx = 1,has_implant = False)
def unpack_trace(self,body_support,trace_indices,body_idx = 0,what_type=['hip'],color='k'):
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
type_list = np.array(['hip','ass','mid','nose','tip','impl'])
c_list = [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
ii_c_list = np.arange(len(type_list))
# TODO make the decay work!
for ttt in what_type:
# this is also not so elegant
selecta = np.arange(len(type_list))[type_list == ttt]
dat = c_list[selecta[0]].squeeze()
X,Y,Z = dat[trace_indices,0],dat[trace_indices,1],dat[trace_indices,2]
h_trace = self.ax.plot(X,Y,Z,lw=2,c=color,alpha = .65)
self.h_trace_list[body_idx][ii_c_list[type_list == ttt][0]] = h_trace
def add_trace(self,frame,trace='raw',trace_length=90,trace_clip = None,decay_factor=.9, type_list = ['nose']):
# get the particle, convert to torch tensor, calculate body supports
i_frame = frame-self.start_frame
# make a holder for the lines
self.h_trace_list = [[None]*5,[None]*5]
if trace_clip is not None:
i_clip = trace_clip-self.start_frame
i_trace_start = np.max([i_clip, i_frame-trace_length])
else:
i_trace_start = np.max([0, i_frame-trace_length])
#print("i_trace_start is {} and i_frame is {}".format(i_trace_start,i_frame))
trace_indices = np.arange(i_trace_start,i_frame)
if trace == 'raw':
self.unpack_trace(self.body_support_0_raw,trace_indices, body_idx = 0,what_type=type_list,color='black')
self.unpack_trace(self.body_support_1_raw,trace_indices, body_idx = 1,what_type=type_list,color='peru')
if trace == 'smooth':
self.unpack_trace(self.body_support_0_smooth,trace_indices, body_idx = 0,what_type=type_list,color='black')
self.unpack_trace(self.body_support_1_smooth,trace_indices, body_idx = 1,what_type=type_list,color='peru')
def update_trace_3dlines(self,body_support,trace_indices,body_idx=0,what_type=['hip']):
c_hip,c_ass,c_mid,c_nose,c_tip,c_impl,R_body,R_head,R_nose = body_support
type_list = np.array(['hip','ass','mid','nose','tip','impl'])
c_list = [c_hip,c_ass,c_mid,c_nose,c_tip,c_impl]
ii_c_list = np.arange(len(type_list))
# TODO make the decay work!
for ttt in what_type:
# this is also not so elegant
selecta = np.arange(len(type_list))[type_list == ttt]
dat = c_list[selecta[0]].squeeze()
X,Y,Z = dat[trace_indices,0],dat[trace_indices,1],dat[trace_indices,2]
# self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_xdata(X)
# self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_ydata(Y)
# self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_3d_properties(Z)
# Ugh matplotlib changed the api, the new way makes much more sense though, so fine..
self.h_trace_list[body_idx][ii_c_list[type_list == what_type][0]][0].set_data_3d(X,Y,Z)
def update_trace_fit(self,frame,trace='raw',trace_length=90,trace_clip = None,decay_factor=.9, type_list = None):
# get the particle, convert to torch tensor, calculate body supports
i_frame = frame-self.start_frame
if trace_clip is not None:
i_clip = trace_clip-self.start_frame
i_trace_start = np.max([i_clip, i_frame-trace_length])
else:
i_trace_start = np.max([0, i_frame-trace_length])
# these are the indices to plot
trace_indices = np.arange(i_trace_start,i_frame)
if trace =='raw':
body_support_0 = self.body_support_0_raw
body_support_1 = self.body_support_1_raw
elif trace =='smooth':
body_support_0 = self.body_support_0_smooth
body_support_1 = self.body_support_1_smooth
else:
return
if len(trace_indices)== 0:
# just skip if there is no trace
return
self.update_trace_3dlines(body_support_0,trace_indices,body_idx=0,what_type = type_list)
self.update_trace_3dlines(body_support_1,trace_indices,body_idx=1,what_type = type_list)
def finish_3d_axis(self,view_style = 'ex', zoom = False, dump = False):
# finish the labeling, plot adjustments, dump and show
ax = self.ax
if self.max_range is not None:
ax.set_xlim(self.mid_x - self.max_range, self.mid_x + self.max_range)
ax.set_ylim(self.mid_y - self.max_range, self.mid_y + self.max_range)
ax.set_zlim(0, 2*self.max_range)
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.zaxis.set_ticklabels([])
if view_style == 'top':
az = -30
el = 90
if view_style == 'side':
az = -15
el = 9
if view_style == 'mix':
az = 150
el = 50
if view_style == 'ex':
az = -14
el = 46
if view_style == 'ex':
az = -46
el = 23
ax.view_init(elev=el, azim=az)
storage = data_storage()
# In[4]:
play = widgets.Play(
value=0,
min=0,
max=10000,
step=10,
interval=100,
description="Press play",
disabled=False
)
slider = widgets.IntSlider(value=0,
min=0,
max=10000)
def on_value_change(change):
frame = int(change['new'])
storage.update_raw_data( change['new'] )
storage.update_skel_fit( int(change['new']) )
storage.update_ellip_fit( int(change['new']) )
# storage.update_trace_fit( int(change['new']) )
# storage.update_trace_fit(frame)
storage.fig.canvas.draw()
slider.observe(on_value_change, 'value')
widgets.jslink((play, 'value'),(slider, 'value'))
# In[5]:
data_path_textbox = widgets.Text(
value='/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/pre_processed_frames.hdf5',
description='Path:'
)
tracking_path_textbox = widgets.Text(
value='/media/chrelli/SSD4TB/Data0_backup/recording_20201110-105540/tracked_behavior_in_progress.pkl',
description='Path:'
)
load_button = widgets.Button(
description='Load data',
)
load_behavior_button = widgets.Button(
description='Load tracking',
)
# In[6]:
@load_button.on_click
def plot_on_click(b):
storage.data_path = data_path_textbox.value
storage.load_jagged()
# and make the plot
storage.add_raw_data( int(play.value) )
storage.finish_3d_axis()
storage.fig.canvas.draw()
# set the min and max time to the behavior!
play.min = 0
play.max = len(storage.jagged_lines)
slider.min = 0
slider.max = len(storage.jagged_lines)
@load_behavior_button.on_click
def plot_on_click2(b):
storage.tracking_path = tracking_path_textbox.value
storage.load_tracking()
storage.add_skel_fit( int(play.value) )
storage.add_ellip_fit( int(play.value) )
# storage.add_trace( int(play.value) )
play.min = storage.tracked_behavior['start_frame']
play.max = storage.tracked_behavior['end_frame']
slider.min = storage.tracked_behavior['start_frame']
slider.max = storage.tracked_behavior['end_frame']
# # set the min and max time to the tracked behavior!
# play.min = 0
# play.max = len(storage.jagged_lines)
storage.fig.canvas.draw()
# In[7]:
frame_textbox = widgets.BoundedIntText(
value=0,
min = 0,
max = 10000,
description='Frame #:'
)
jump_frame_button = widgets.Button(
description='Jump to frame',
)
# In[8]:
@jump_frame_button.on_click
def update_frame(b):
play.value = frame_textbox.value
# storage.update_raw_data( frame_textbox.value)
# storage.fig.canvas.draw()
# In[9]:
fps = 60
time_textbox = widgets.BoundedFloatText(
value=0,
min = 0,
max = 10000/60,
description='Time [s]:'
)
jump_time_button = widgets.Button(
description='Jump to time',
)
# In[10]:
@jump_time_button.on_click
def update_time(b):
play.value = int(time_textbox.value * fps)
# storage.update_raw_data( int(time_textbox.value * fps) )
# storage.fig.canvas.draw()
# In[ ]:
# In[11]:
# widgets.jslink((play, 'value'),(frame_textbox, 'value'))
# In[12]:
raw_ok =widgets.Valid(
value=True,
indent = True,
description='Raw data',
)
track_ok = widgets.Valid(
value=True,
description='Tracking'
)
# In[13]:
check_raw = widgets.Checkbox(
value=True,
description='Display raw data',
disabled=False,
indent=True
)
check_skel = widgets.Checkbox(
value=True,
description='Display skeleton',
disabled=False,
indent=False
)
check_ellip = widgets.Checkbox(
value=True,
description='Display ellipsoids',
disabled=False,
indent=True
)
check_trace = widgets.Checkbox(
value=False,
description='Display trace',
disabled=False,
indent=False
)
# In[14]:
sub10_button = widgets.Button(
description='<< 10',
)
sub5_button = widgets.Button(
description='< 5',
)
add10_button = widgets.Button(
description='10 >>',
)
add5_button = widgets.Button(
description='5 >',
)
@sub10_button.on_click
def update_frame(b):
play.value = play.value - 10
@sub5_button.on_click
def update_frame(b):
play.value = play.value - 5
@add5_button.on_click
def update_frame(b):
play.value = play.value + 5
@add10_button.on_click
def update_frame(b):
play.value = play.value + 10
# In[15]:
from ipywidgets import AppLayout, GridspecLayout
item_layout = widgets.Layout(margin='0 0 10px 10px')
dashboard = VBox([
HBox([data_path_textbox, load_button], layout = item_layout) ,
HBox([tracking_path_textbox, load_behavior_button], layout = item_layout) ,
HBox([track_ok, raw_ok], layout = item_layout) ,
HBox([play, slider], layout = item_layout) ,
HBox([sub10_button,sub5_button,add5_button,add10_button]) ,
HBox([frame_textbox,jump_frame_button], layout = item_layout) ,
HBox([time_textbox,jump_time_button] , layout = item_layout) ,
HBox([check_raw,check_skel]),
HBox([check_ellip,check_trace])
])
output = widgets.Output()
with output:
storage.make_3d_axis()
storage.fig.canvas.toolbar_position = 'bottom'
# In[ ]:
# In[16]:
from ipywidgets import AppLayout
from ipywidgets import HTML, Layout, Dropdown, Output, Textarea, VBox, Label, Text
from ipywidgets import Label, Layout, HBox
from IPython.display import display
# header = HTML("<h1><center><\"(__)~~.. MousePlayer <\"(__)~~....</center></h1>")
# header = HTML("<h1><center><\"(__)~~.. ʍօʊֆɛ քʟǟʏɛʀ <\"(__)~~....</center></h1>")
header = HTML("<h1><center>🐭 ʍօʊֆɛ քʟǟʏɛʀ 🐭</center></h1>")
# board = VBox( [header, HBox([output,dashboard]) ], layout=Layout(justify_content = 'center') )
board = AppLayout(header=None,
left_sidebar=None,
center=output,
right_sidebar=dashboard,
footer=None,
pane_widths=[0,2, 2])
app = VBox( [header, board ], layout=Layout(justify_content = 'center') )
# In[ ]:
# In[17]:
# In[ ]:
# In[ ]:
# In[18]:
# TODO toggles to show trace, ellipsoids, skeleton, raw data,
# Labeles showing if data is loaded or tracking is loaded
# Tracking without the raw data (get the xy limits from the xy data)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| 2.828125 | 3 |
bayesapi/resources/anomalies.py | vishalbelsare/bayesrest | 3 | 12769425 | import falcon
import history
from bayesapi.resources import BaseResource
from bayesapi.validation import validate
class AnomaliesResource(BaseResource):
def on_post(self, req, resp):
req_vars = validate(self.api_def, 'post', req)
target_column = req_vars['target-column']
context_columns = req_vars['context-columns']
quoted_tgt_column = '"{}"'.format(target_column)
quoted_ctx_columns = ['"{}"'.format(c) for c in context_columns]
with self.bdb.savepoint():
query = self.queries.find_anomalies(
population = self.cfg.population_name,
target_column = quoted_tgt_column,
context_columns = quoted_ctx_columns
)
self.logger.info(query)
cursor = self.execute(query)
cols = ['row-id','probability']
result = [dict(zip(cols, row)) for row in cursor]
history.save(self.cfg.history,
{'type': 'anomalies',
'query': query,
'result': result,
'target_column': target_column,
'context_columns': context_columns})
history.save(self.cfg.history,
{ 'result': result },
"anomaly" )
resp.media = result
resp.status = falcon.HTTP_200
| 2.203125 | 2 |
kmc_paper_data/make_inputs/make_inputs_105.py | j-m-dean/Overscreening_and_Underscreening | 1 | 12769426 | import json
import numpy as np
import os
home = os.getcwd() + "/"
with open(home + "averaged_distributions/105_charges_distributions_errors.json") as file:
data = json.load(file)
x = list(np.array(range(4, 38)) * 2.5e-10)
permittivities = [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 50, 75, 100, 2, 65, 85]
os.chdir(home + "charges_105")
for perm in permittivities:
os.chdir(home + "charges_105/permittivity_{}".format(perm))
y = data["distribution_{}".format(perm)][3:]
yerr = data["standard_errors_{}".format(perm)][3:]
input = {"x": x, "y" : y, "yerr":yerr}
with open(home + "charges_105/permittivity_{}/inputs.json".format(perm), 'w') as d:
json.dump(input,d)
os.chdir(home + "charges_105")
| 2.515625 | 3 |
edk2toolext/capsule/signtool_signer.py | joschock/edk2-pytool-extensions | 32 | 12769427 | # @file signtool_signer.py
# This module contains the abstracted signing interface for Windows Signtool.
# This interface abstraction takes in the signature_options and signer_options
# dictionaries that are used by capsule_tool and capsule_helper.
#
# Will attempt to locate a valid installation of Windows Signtool using the
# utility_functions provided by edk2toollib.
#
##
# Copyright (C) Microsoft Corporation
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import os
import tempfile
import warnings
from edk2toollib.windows import locate_tools
from edk2toollib.utility_functions import RunCmd
GLOBAL_SIGNTOOL_PATH = None
SUPPORTED_SIGNATURE_TYPE_OPTIONS = {
'pkcs7': {'detachedSignedData', 'embedded', 'pkcs7DetachedSignedData'}
}
def get_signtool_path():
'''
helper function to locate a valid installation of Windows Signtool. Will
attempt to reuse a previously located version, since this call can be
lengthy
'''
global GLOBAL_SIGNTOOL_PATH
if GLOBAL_SIGNTOOL_PATH is None:
GLOBAL_SIGNTOOL_PATH = locate_tools.FindToolInWinSdk('signtool.exe')
return GLOBAL_SIGNTOOL_PATH
def sign(data: bytes, signature_options: dict, signer_options: dict) -> bytes:
'''
primary signing interface. Takes n the signature_options and signer_options
dictionaries that are used by capsule_tool and capsule_helper
'''
# NOTE: Currently, we only support the necessary options for capsules & Windows Firmware Policies
# The following _if_ clause handles the deprecated signature_option 'sign_alg' for backwards compatibility
# when the deprecated option is supplied, this code adds the new, required options based on prior code behavior
if 'sign_alg' in signature_options:
warnings.warn('Signature_option "sign_alg" is deprecated, use "type"', DeprecationWarning)
if signature_options['sign_alg'] == 'pkcs12':
# map legacy behavior to new options and backwards-compatible values
signature_options['type'] = 'pkcs7'
signature_options['type_options'] = {'detachedSignedData'}
signature_options['encoding'] = 'DER'
signer_options['key_file_format'] = 'pkcs12'
else:
raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}!")
if signature_options['type'] != 'pkcs7':
raise ValueError(f"Unsupported signature type: {signature_options['type']}!")
for opt in signature_options['type_options']:
if opt not in SUPPORTED_SIGNATURE_TYPE_OPTIONS[signature_options['type']]:
raise ValueError(f"Unsupported type option: {opt}! Ensure you have provied a set")
mutually_exclusive_options = ('embedded', 'detachedSignedData', 'pkcs7DetachedSignedData')
option_found = None
for option in mutually_exclusive_options:
if option in signature_options['type_options']:
if option_found is None:
option_found = option
else:
raise ValueError("type_options '%s' and '%s' are mutually exclusive" % (option_found, option))
if signature_options['encoding'] != 'DER':
raise ValueError(f"Unsupported signature encoding: {signature_options['type']}!")
if signature_options['hash_alg'] != 'sha256':
raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}!")
if 'key_file' not in signer_options:
raise ValueError("Must supply a key_file in signer_options for Signtool!")
if signer_options['key_file_format'] != 'pkcs12':
raise ValueError(f"Unsupported key file format: {signer_options['key_file_format']}!")
# Set up a temp directory to hold input and output files.
temp_folder = tempfile.mkdtemp()
in_file_path = os.path.join(temp_folder, "data_to_sign.bin")
# Create the input file for Signtool.
in_file = open(in_file_path, 'wb')
in_file.write(data)
in_file.close()
# Start building the parameters for the call.
signtool_params = ['sign']
signtool_params += ['/fd', signature_options['hash_alg']]
if 'detachedSignedData' in signature_options['type_options']:
signtool_params += ['/p7ce', 'DetachedSignedData']
elif 'pkcs7DetachedSignedData' in signature_options['type_options']:
signtool_params += ['/p7ce', 'PKCS7DetachedSignedData']
elif 'embedded' in signature_options['type_options']:
signtool_params += ['/p7ce', 'Embedded']
else:
raise ValueError("For pkcs7, type_options must include either embedded or detachedSignedData")
signtool_params += ['/p7', f'"{temp_folder}"']
signtool_params += ['/f', f"\"{signer_options['key_file']}\""]
if 'oid' in signer_options:
signtool_params += ['/p7co', signer_options['oid']]
if 'eku' in signer_options:
signtool_params += ['/u', signer_options['eku']]
if 'key_pass' in signer_options:
signtool_params += ['/p', signer_options['key_pass']]
# Add basic options.
signtool_params += ['/debug', '/v', f'"{in_file_path}"']
# Make the call to Signtool.
ret = RunCmd(get_signtool_path(), " ".join(signtool_params))
if ret != 0:
raise RuntimeError(f"Signtool.exe returned with error: {ret}!")
# Load the data from the output file and return it.
out_file_path = os.path.join(temp_folder, "data_to_sign.bin.p7")
out_file = open(out_file_path, 'rb')
out_data = out_file.read()
out_file.close()
return out_data
def sign_in_place(sign_file_path, signature_options, signer_options):
'''
alternate module-specific signing interface to support particular signatures associated
with Windows capsule files (e.g. CAT files). Takes n the signature_options and signer_options
dictionaries that are used by capsule_tool and capsule_helper
'''
# NOTE: Currently, we only support the necessary algorithms for capsules.
if signature_options['sign_alg'] != 'pkcs12':
raise ValueError(f"Unsupported signature algorithm: {signature_options['sign_alg']}!")
if signature_options['hash_alg'] != 'sha256':
raise ValueError(f"Unsupported hashing algorithm: {signature_options['hash_alg']}!")
if 'key_file' not in signer_options:
raise ValueError("Must supply a key_file in signer_options for Signtool!")
# Start building the parameters for the call.
signtool_params = ['sign', '/a']
signtool_params += ['/fd', signature_options['hash_alg']]
signtool_params += ['/f', f"\"{signer_options['key_file']}\""]
# if 'oid' in signer_options:
# signtool_params += ['/p7co', signer_options['oid']]
# if 'eku' in signer_options:
# signtool_params += ['/u', signer_options['eku']]
if 'key_pass' in signer_options:
signtool_params += ['/p', signer_options['key_pass']]
# Add basic options.
signtool_params += ['/debug', '/v', f'"{sign_file_path}"']
# Make the call to Signtool.
ret = RunCmd(get_signtool_path(), " ".join(signtool_params))
if ret != 0:
raise RuntimeError(f"Signtool.exe returned with error: {ret}!")
| 1.984375 | 2 |
tests/argo_workflow_tools/dsl/test_workflow_template.py | DiagnosticRobotics/argo-workflow-tools | 15 | 12769428 | import argo_workflow_tools.models.io.argoproj.workflow.v1alpha1 as argo
from argo_workflow_tools import dsl, WorkflowTemplate
@dsl.Task(image="python:3.10")
def say_hello(name: str):
return f"hello {name}"
@dsl.DAG()
def command_hello(name):
return say_hello(name)
def test_workflow_template():
workflow = WorkflowTemplate(
name="hello-world", entrypoint=command_hello, arguments={"name": "Brian"}
)
model = workflow.to_model()
assert model.kind == "WorkflowTemplate"
def test_workflow_template_labels_and_annotations():
workflow = WorkflowTemplate(
name="hello-world", entrypoint=command_hello, arguments={"name": "Brian"},
labels={'key1': 'val1'}, workflow_labels={'key2': 'val2'},
annotations={'key1': 'val1'}, workflow_annotations={'key2': 'val2'},
)
model = workflow.to_model()
assert model.metadata.labels['key1'] == 'val1'
assert model.spec.workflow_metadata.labels['key2'] == 'val2'
assert model.metadata.annotations['key1'] == 'val1'
assert model.spec.workflow_metadata.annotations['key2'] == 'val2'
def test_workflow_template_arguments():
workflow = WorkflowTemplate(
name="hello-world",
entrypoint=command_hello,
arguments=[argo.Parameter(name="name", value="Brian", enum=["Brian", "Joe"])],
)
model = workflow.to_model()
assert model.spec.arguments.parameters[0].enum == ["Brian", "Joe"]
| 2.3125 | 2 |
fedhf/api/logger/logger.py | beiyuouo/fedhf | 2 | 12769429 | <reponame>beiyuouo/fedhf
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : fedhf\api\logger\logger.py
# @Time : 2022-05-03 15:58:38
# @Author : <NAME>
# @Email : <EMAIL>
# @License : Apache License 2.0
import os
import logging
import sys
import wandb
import time
from .base_logger import BaseLogger, logger_map
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class Logger(BaseLogger):
class __Logger(BaseLogger):
def __init__(self, args):
if args.log_level in logger_map:
self.log_level = logger_map[args.log_level]
else:
raise "No such log level!"
if args.log_name is not None:
self.log_name = args.log_name
else:
self.log_name = "root"
self.logger = logging.getLogger(self.log_name)
self.logger.setLevel(self.log_level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.log_file is None:
os.makedirs(os.path.join('log'), exist_ok=True)
args.log_file = os.path.join('log', f'log_{int(time.time())}.log')
file_handler = logging.FileHandler(args.log_file, mode='w')
file_handler.setLevel(level=self.log_level)
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
# use streamHandler to print to stdout
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level=self.log_level)
stream_handler.setFormatter(formatter)
self.logger.addHandler(stream_handler)
if args.use_wandb:
self.use_wandb = args.use_wandb
wandb.init(project=args.project_name,
config=args,
reinit=args.wandb_reinit,
name=args.name)
def debug(self, log_str: str) -> None:
self.logger.debug(log_str)
def info(self, log_str: str) -> None:
self.logger.info(log_str)
def warning(self, log_str: str) -> None:
self.logger.warning(log_str)
def error(self, log_str: str) -> None:
self.logger.error(log_str)
def log(self, log_dict: dict, *args, **kwargs) -> None:
# log one line in result.csv
with open(args.log_file, 'a') as f:
f.write(str(log_dict) + '\n')
if self.use_wandb:
self.to_wandb(log_dict, *args, **kwargs)
def to_wandb(self, log_dict: dict, *args, **kwargs) -> None:
wandb.log(log_dict, *args, **kwargs)
__instance = None
def __new__(cls, args):
if not cls.__instance:
cls.__instance = Logger.__Logger(args)
return cls.__instance
def debug(self, log_str: str) -> None:
self.__instance.debug(log_str)
def info(self, log_str: str) -> None:
self.__instance.info(log_str)
def warning(self, log_str: str) -> None:
self.__instance.warning(log_str)
def error(self, log_str: str) -> None:
self.__instance.error(log_str)
def log(self, log_dict: dict, *args, **kwargs) -> None:
self.__instance.log(log_dict, args, kwargs)
def to_wandb(self, log_dict: dict, *args, **kwargs) -> None:
if self.__instance.use_wandb:
self.__instance.to_wandb(log_dict, args, kwargs)
| 2.484375 | 2 |
configloader.py | metecyu/shadowsocksr | 0 | 12769430 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import importloader
g_config = None
def load_config():
global g_config
g_config = importloader.loads(['userapiconfig', 'apiconfig'])
def get_config():
return g_config
load_config()
| 2.015625 | 2 |
basic/Day3/Day11.py | ductnn/Python-tu | 1 | 12769431 | <reponame>ductnn/Python-tu
a = []
maximum = 0
for _ in range(6):
tmp = [int(x) for x in str(input()).split(" ")]
a.append(tmp)
for i in range(6):
for j in range(6):
if (j + 2 < 6) and (i + 2 < 6):
result = a[i][j] + a[i][j+1] + a[i][j+2] + a[i+1][j+1] + a[i+2][j] + a[i+2][j+1] + a[i+2][j+2]
if(result > maximum):
maximum = result
print(maximum) | 2.84375 | 3 |
zahlwort2num/command_line.py | spatialbitz/zahlwort2num | 5 | 12769432 | from _ctypes import ArgumentError
import zahlwort2num as w2n
import sys
def main():
if(sys.argv[1]):
print(w2n.convert(sys.argv[1]))
else:
raise ArgumentError('No parameter given!') | 2.875 | 3 |
Vanilla/src/assistant.py | allepicondor/SnakePython | 0 | 12769433 | <reponame>allepicondor/SnakePython<gh_stars>0
def NumpyInList(array,l1):
for i in l1:
if i[0] == array[0] and i[1] == array[1]:
return True
return False
| 2.890625 | 3 |
src/the_tale/the_tale/accounts/exceptions.py | devapromix/the-tale | 1 | 12769434 |
import smart_imports
smart_imports.all()
class AccountError(utils_exceptions.TheTaleError):
MSG = 'account error'
class UnkwnownAchievementTypeError(AccountError):
MSG = 'unknown achievement type: %(achievement_type)r'
class EmailAndPasswordError(AccountError):
MSG = 'email & password must be specified or not specified together'
class BotIsFastError(AccountError):
MSG = 'can not cant fast account for bot'
class ChangeCredentialsError(AccountError):
MSG = 'change credentials error'
class MailNotSpecifiedForFastAccountError(ChangeCredentialsError):
MSG = 'new_email must be specified for fast account'
class PasswordNotSpecifiedForFastAccountError(ChangeCredentialsError):
MSG = 'password must be specified for fast account'
class NickNotSpecifiedForFastAccountError(ChangeCredentialsError):
MSG = 'nick must be specified for fast account'
class NewEmailNotSpecifiedError(ChangeCredentialsError):
MSG = 'email not specified'
| 2.40625 | 2 |
environments/Gazebo/controller/__init__.py | zal/simenvbenchmark | 9 | 12769435 | from .robot_env import RobotEnv_gazebo
from .nnn_env import nnnEnv_gazebo
| 1.054688 | 1 |
Extracting Faces/main.py | rudrabarad/OpenCV-Basics | 1 | 12769436 | from PIL import Image # importing packages
import face_recognition
image = face_recognition.load_image_file('group.jpg') # reading group photo
face_locations = face_recognition.face_locations(image)
for face_location in face_locations:
top, right, bottom, left = face_location
face_image = image[top:bottom, left:right]
pil_image = Image.fromarray(face_image)
#pil_image.show()
pil_image.save(f'faces/{top}.jpg') # to save faces
| 3.21875 | 3 |
goods/migrations/0007_alter_pharmproduct_unique_together.py | dcopm999/pharmcrm2-goods | 0 | 12769437 | # Generated by Django 3.2.7 on 2021-10-12 09:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0006_auto_20211012_0812'),
]
operations = [
migrations.AlterUniqueTogether(
name='pharmproduct',
unique_together={('trade_name', 'maker', 'original_packing', 'dosage_packing')},
),
]
| 1.585938 | 2 |
src/coder.py | CedMaire/pdc-project | 1 | 12769438 | <reponame>CedMaire/pdc-project
import unireedsolomon as ReedSalomon
import lib as Lib
import mpmath as BigNumbers
import numpy as np
class Coder:
# Constuctor, initializes the parameters.
def __init__(self):
self.RSCoder = ReedSalomon.rs.RSCoder(
Lib.CODE_WORD_LENGTH, Lib.MESSAGE_LENGTH)
self.receivedData = list()
# TODO: Test
def newVectorReceived(self, vector):
self.receivedData.append(vector)
length = len(self.receivedData)
while (self.dataReceivedIsTooLong(length)):
self.receivedData.pop(0)
if (self.dataReceivedHasRightLength(length)):
return self.decode(self.receivedData)
else:
print(Lib.DECODING_NOT_READY)
return (False, Lib.DECODING_NOT_READY)
# Checks if the received data is too long.
def dataReceivedIsTooLong(self, length):
return length > Lib.NEEDED_AMOUNT_OF_VECTORS
# Checks if the received data has the correct lenght.
def dataReceivedHasRightLength(self, length):
return length == Lib.NEEDED_AMOUNT_OF_VECTORS
# Encodes the string so that it can be sent as k bits at a time.
def encode(self, string):
rsEncodedString = self.applyEcc(string)
byteVectorList = self.stringToListOfByteVectors(rsEncodedString)
chunkedVectorsList = self.chunk(byteVectorList)
return chunkedVectorsList
# Decodes k-bit vectors to get back a readable string.
def decode(self, tupleList):
output = Lib.DECODING_FAILED
isDecoded = False
try:
assembledVectorsList = self.assemble(tupleList)
stringReceived = self.listOfByteVectorsToString(
assembledVectorsList)
output = self.recoverEcc(stringReceived)
isDecoded = True
except:
print(Lib.DECODING_FAILED)
return (isDecoded, output)
# Chunks the 8-bit vectors into smaller vectors.
def chunk(self, vectorList):
# Chunk
chuncked = list(map(lambda vector: [vector[i:i + Lib.CHUNK_SIZE] for i in range(0, len(vector), Lib.CHUNK_SIZE)],
vectorList))
# Flatten
outputList = list()
for i in range(0, len(chuncked)):
for j in range(0, len(chuncked[i])):
outputList.append(chuncked[i][j])
return outputList
# Assembles the k-bit vectors to have 8-bit vectors again.
def assemble(self, vectorList):
output = list()
step = int(Lib.BYTE_BIT_SIZE / Lib.CHUNK_SIZE)
for i in range(0, len(vectorList), step):
tempList = list()
for j in range(0, step):
tempList.append(vectorList[i + j])
output.append([bit for vector in tempList for bit in vector])
return output
# Applies an error correcting encoding. In this case it is the Reed Solomon ECC.
def applyEcc(self, string):
return self.RSCoder.encode(string)
# Tries to recover the original string from a received Reed Solomon ECC.
def recoverEcc(self, string):
return self.RSCoder.decode(string)[0]
# Randomizes the bytes to expect having a P(0)=P(1)=1/2 so that we can use an ML rule.
def randomizeBytes(self, byteString):
'''
output = map(lambda x: int.from_bytes(
[x], byteorder=Lib.BYTE_ENDIANESS) + 1, byteString)
output = map(lambda x: BigNumbers.fmul(
x, Lib.BIG_PRIME_NUMBER), output)
output = map(lambda x: BigNumbers.fmod(
x, Lib.BYTE_DIFF_VALUES), output)
output = map(lambda x: int(BigNumbers.nstr(
x)[: - 2]), output)
'''
output = map(lambda x: Lib.BYTE_RANDOMIZE_MAP.get(int.from_bytes(
[x], byteorder=Lib.BYTE_ENDIANESS)), byteString)
return bytes(output)
# Recovers the original bytes that have been randomized.
def recoverBytes(self, byteString):
output = map(lambda x: Lib.BYTE_RECOVER_MAP.get(int.from_bytes(
[x], byteorder=Lib.BYTE_ENDIANESS)), byteString)
return bytes(output)
# Converts a regular string into a list of 8-bit vectors.
# "He" -> [[0, 1, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 1]]
def stringToListOfByteVectors(self, string):
tmp = list(
map(lambda x: bin(x)[2:], self.randomizeBytes(string.encode(Lib.UTF_8))))
output = list()
for e in tmp:
test = list()
for i in e:
test.append(int(i))
output.append(test)
for e in output:
if len(e) < 8:
for i in range(8-len(e)):
e.insert(0, 0)
return output
# Converts a list of 8-bit vectors to a regular string (with accents).
# [[0, 1, 0, 0, 1, 0, 0, 0], [0, 1, 1, 0, 0, 1, 0, 1]] -> "He"
def listOfByteVectorsToString(self, byteVectors):
return self.recoverBytes(bytes(list(
map(lambda bitString: int(bitString, 2),
map(lambda vector: "".join(
map(lambda x: repr(x), vector)), byteVectors))))).decode(
Lib.UNICODE_ESCAPE).encode(Lib.LATIN_1).decode(Lib.UTF_8)
# Creates a random array using a seed.
def createRandomArray(self, array_size, seed, max_value):
np.random.seed(seed)
tmp = np.random.randint(max_value, size=(array_size))
return tmp
| 2.859375 | 3 |
tpfd/compat.py | erinxocon/tpfd | 106 | 12769439 | import sys
"""
This module handles import compatibility issues between Python 2 and
Python 3.
"""
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
if is_py2:
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float) | 2.8125 | 3 |
viberbot/api/user_profile.py | wowkin2/viber-bot-python | 0 | 12769440 | from future.utils import python_2_unicode_compatible
class UserProfile(object):
def __init__(self, name=None, avatar=None, user_id=None, country=None, language=None, api_version=None):
self._name = name
self._avatar = avatar
self._id = user_id
self._country = country
self._language = language
self._api_version = api_version
@property
def name(self):
return self._name
@property
def avatar(self):
return self._avatar
@property
def id(self):
return self._id
@property
def country(self):
return self._country
@property
def language(self):
return self._language
@property
def api_version(self):
return self._api_version
def from_dict(self, user_dict):
if 'name' in user_dict:
self._name = user_dict['name']
if 'avatar' in user_dict:
self._avatar = user_dict['avatar']
if 'id' in user_dict:
self._id = user_dict['id']
if 'country' in user_dict:
self._country = user_dict['country']
if 'language' in user_dict:
self._language = user_dict['language']
if 'api_version' in user_dict:
self._api_version = user_dict['api_version']
return self
@python_2_unicode_compatible
def __str__(self):
return u"UserProfile[name={0}, avatar={1}, id={2}, country={3}, language={4}, api_version={5}".format(
self._name,
self._avatar,
self._id,
self._country,
self._language,
self._api_version
)
| 2.59375 | 3 |
src/sadie/airr/igblast/igblast.py | jwillis0720/pybody | 0 | 12769441 | """Low level module for IgBLAST api calls
You probably only want to interact with this module through airr as the database files are extremely tricky to get right
"""
from __future__ import annotations
import glob
import logging
import os
import subprocess
import tempfile
import warnings
import semantic_version
from multiprocessing import cpu_count
from pathlib import Path
from typing import Any, List, Union
# Third party
import pandas as pd
# package/module level
from sadie.utility.util import is_tool
from sadie.airr.airrtable.constants import IGBLAST_AIRR
from sadie.airr.exceptions import (
BadIgBLASTArgument,
BadIgBLASTExe,
BadIgDATA,
MissingIgBLASTArgument,
EmtpyFileError,
IgBLASTRunTimeError,
)
# get logger in global scope
logger = logging.getLogger("IgBLAST")
def ensure_prefix_to(path: Union[str, Path]) -> Union[Path, bool]:
"""Ensure that the blast db is actually a blast like database
The problem is that a blast_db takes in a prefix file
ex. /path/to/blast/human_V
which is a file or path that does not actually exists, but blasts uses it as a file glob to match
/path/to/blast/human_V.nod
/path/to/blast/human_V.nsq
/path/to/blast/human_V.fasta
because of this, we don't have validated file path but an actual file path glob. This method validates that the file glob returns blast like files
Parameters
----------
path : str
path glob
Returns
-------
return : Union[str, bool]
returns Path or False if not a file glob
"""
# convert Path to str
path = str(path)
directory_path = os.path.dirname(path)
if not os.path.exists(directory_path):
return False
# get abs path to directory
directory_path = os.path.abspath(directory_path)
# base name
basename = os.path.basename(path)
glob_path = os.path.join(directory_path, basename) + "*"
# make sure that there are things that match this queyr
if not glob.glob(glob_path):
return False
return Path(os.path.join(directory_path, basename))
class IgBLASTArgument:
"""
A class for handling all IgBLAST Arguments
"""
def __init__(self, name: str, arg_key: str, arg_value: Union[str, int, bool, Path], required: bool):
"""IgBLASTArgument Class constructor
Parameters
----------
name : str
the internal name for the computer
arg_key : str
the argument key, ex. -germline_db_V
arg_value : Union[str, int, bool]
the value for the argument /path/germline/db/V
required : bool
is the argument required
"""
self.name = name
self.key = arg_key
self.value = arg_value
self.required = required
@property
def name(self) -> str:
"""
An internal name for the argument
"""
return self._name
@name.setter
def name(self, n: str) -> None:
self._name = n
@property
def key(self) -> str:
"""The blast command key argument
ex. '-germline_db_v
Returns
-------
str
blast command key
"""
return self._key
@key.setter
def key(self, k: str) -> None:
self._key = k
@property
def value(self) -> Union[str, int, bool, Path]:
"""Return the value of the argument
Returns
-------
Union[str,int,bool]
ex. /path/to/database
"""
return self._value
@value.setter
def value(self, v: Union[str, int, bool]) -> None:
self._value = v
@property
def required(self) -> bool:
"""Returns if the argument is required
Returns
-------
bool
if argument is required
"""
return self._required
@required.setter
def required(self, r: bool) -> None:
self._required = r
def get_formatted_blast_arg(self) -> List[str]:
"""Return the blast formatted argument as a list
Returns
-------
Union[List[str],List[str,str]]
Either returns a single argument ['-arg'] for bool args or key value arguments ['-arg', 'value']
"""
# If value is a bool, we return the key
if isinstance(self.value, bool) and self.value:
return ["-" + self.key]
else:
# If its not a bool, we check if it has been set
if self.value:
return ["-" + str(self.key), str(self.value)]
return [] # return an empty str if its been set
def __str__(self) -> str:
return "{}-{}".format(self.name, self.key)
ArgumentType = Union[IgBLASTArgument, int, str, Path]
class IgBLASTN:
"""IgBLASTN
IgBLASTN class. A tool for immunoglobulin (IG) and T cell receptor (TR) V domain sequences from nucletodies.
This is a lower level class and you should probably use sadie.airr to interact
Examples
--------
>>> ig_blast = igblast.IgBLASTN('igblastn')
>>> germline_ref = "reference/germlines/"
>>> db_ref = "reference/germlines/blastdb/Ig"
>>> aux_path = "reference/germlines/aux_data)
# Set data
>>> ig_blast.igdata = germline_ref
>>> query = "fasta_inputs/PG9_H.fasta"
>>> ig_blast.germline_db_v = os.path.join(db_ref, "human/human_V")
>>> ig_blast.germline_db_d = os.path.join(db_ref, "human/human_D")
>>> ig_blast.germline_db_j = os.path.join(db_ref, "human/human_J")
>>> ig_blast.aux_path = os.path.join(aux_path, "human_gl.aux")
>>> ig_blast.organism = "human"
>>> csv_dataframe = ig_blast.run(query)
"""
# Only allow these attributes
__slots__ = [
"_executable",
"_version",
"_min_d_match",
"_num_v",
"_num_d",
"_num_j",
"_outfmt",
"_receptor",
"_word_size",
"_nomenclature",
"_gap_open",
"_gap_extend",
"_num_threads",
"_extend_5",
"_extend_3",
"_j_penalty",
"_v_penalty",
"_d_penalty",
"_organism",
"_germline_db_v",
"_germline_db_d",
"_germline_db_j",
"_aux_path",
"_igdata",
"_temp_dir",
"_allow_vdj_overlap",
]
def __init__(self, executable: Union[Path, str] = "igblastn"):
"""IgBLASTN with a query. Set everything up with a setter"""
# set the executable dynamically
self.executable = Path(executable)
self._version = self._get_version()
# setup all the default values if we don't add them
self.min_d_match = 5
self.num_v = 3
self.num_d = 3
self.num_j = 3
self.outfmt = 19
self.receptor = "Ig"
self.word_size = 5
self.nomenclature = "imgt"
self.gap_open = 5
self.gap_extend = 2
self.num_threads = cpu_count()
self.extend_5 = True
self.extend_3 = True
self.j_penalty = -2
self.v_penalty = -1
self.d_penalty = -1
self.allow_vdj_overlap = False
# Make these blank, if they are not set by the caller, then we will complain during runtime. They must be set dynamically
self._organism = IgBLASTArgument("organism", "organism", "", True)
self._germline_db_v = IgBLASTArgument("germline_db_v", "germline_db_V", "", True)
self._germline_db_d = IgBLASTArgument("germline_db_d", "germline_db_D", "", True)
self._germline_db_j = IgBLASTArgument("germline_db_j", "germline_db_J", "", True)
self._aux_path = IgBLASTArgument("aux_path", "auxiliary_data", "", True)
# Igdata is not an official blast argument, it is an enviroment
self._igdata = Path(".")
self.temp_dir = Path(".")
def _get_version(self) -> semantic_version.Version:
"""Private method to parse igblast -version and get semantic_version
Returns
-------
semantic_version.Version
the igblast version
"""
process = subprocess.run([self.executable, "-version"], capture_output=True)
stdout = process.stdout.decode("utf-8")
if process.stderr:
logger.error(
f"{self.executable}, has no returned and error when checking version,. Tried igblastn -version: {process.stderr.decode('utf-8')}"
)
raise BadIgBLASTExe(self.executable, process.stderr.decode("utf-8"))
version = stdout.split("\n")[0].split(":")[-1].strip()
try:
version = semantic_version.Version(version)
except ValueError:
raise BadIgBLASTExe(self.executable, f"semantic version can't parse {version}")
return version
@property
def version(self) -> semantic_version.Version:
return self._version
@property
def executable(self) -> Path:
"""The igblastn executable
Returns
-------
Path
igblastn path
"""
return self._executable
@executable.setter
def executable(self, exe: Path) -> None:
if isinstance(exe, str):
exe = Path(exe)
self._executable = exe
@property
def temp_dir(self) -> Path:
"""The path to the tempdata directory for spliting igblast
Returns
-------
Path
A valid temporary directory path
"""
return self._temp_dir
@temp_dir.setter
def temp_dir(self, data: Union[Path, str]) -> None:
if isinstance(data, str):
data = Path(data)
self._temp_dir = data.absolute()
if not os.access(self._temp_dir, os.W_OK):
raise IOError(self._temp_dir, "Unable to write to temp dir")
@property
def igdata(self) -> Path:
"""The path to IGDATA which contains the internal_data needed to make a recomdination
Returns
-------
Path
A valid IGDATA path
"""
return Path(self._igdata)
@igdata.setter
def igdata(self, data: Union[Path, str]) -> None:
if isinstance(data, str):
data = Path(data)
if not data.exists() or not data.is_dir():
raise BadIgDATA(data)
self._igdata = Path(data.absolute())
@property
def min_d_match(self) -> ArgumentType:
"""Required minimal consecutive nucleotide base matches for D genes
Returns
-------
IgBLASTArgument
"""
return self._min_d_match
@min_d_match.setter
def min_d_match(self, d: int) -> None:
if not isinstance(d, int) and d < 5:
raise BadIgBLASTArgument(d, ">5")
self._min_d_match = IgBLASTArgument("min_d_match", "min_D_match", d, False)
@property
def num_v(self) -> ArgumentType:
"""
Number of Germline sequences to show alignments for
Returns
-------
IgBLASTArgument
"""
return self._num_v
@num_v.setter
def num_v(self, v: int) -> None:
if not isinstance(v, int):
raise BadIgBLASTArgument(v, int)
self._num_v = IgBLASTArgument("num_v", "num_alignments_V", v, False)
@property
def num_d(self) -> ArgumentType:
"""
Number of Germline sequences to show alignments for D gene
Returns
-------
IgBLASTArgument
"""
return self._num_d
@num_d.setter
def num_d(self, d: int) -> None:
if not isinstance(d, int):
raise BadIgBLASTArgument(d, int)
self._num_d = IgBLASTArgument("num_d", "num_alignments_D", d, False)
@property
def num_j(self) -> ArgumentType:
"""
Number of Germline sequences to show alignments for J gene
Returns
-------
IgBLASTArgument
"""
return self._num_j
@num_j.setter
def num_j(self, j: int) -> None:
if not isinstance(j, int):
raise BadIgBLASTArgument(j, int)
self._num_j = IgBLASTArgument("num_j", "num_alignments_J", j, False)
@property
def organism(self) -> ArgumentType:
"""The organism for your query sequence.
Returns
-------
IgBLASTArgument
"""
return self._organism
@organism.setter
def organism(self, o: str) -> None:
"""Organism
Parameters
----------
o : str
an organism string
Raises
------
BadIgBLASTArgument
if igblast is not a str
"""
# I don't want to hardcode in the organisms here.
# I will handle that logic at a higher level,
# this is because blast has no preset organims and it's all about the v,d,j blast paths which are set dynamically
if not isinstance(o, str):
raise BadIgBLASTArgument(o, str)
self._organism = IgBLASTArgument("organism", "organism", o, True)
@property
def outfmt(self) -> ArgumentType:
"""alignment view options:
3 = Flat query-anchored, show identities,
4 = Flat query-anchored, no identities,
7 = Tabular with comment lines
19 = Rearrangement summary report (AIRR format)
Returns
-------
IgBLASTArgument
"""
return self._outfmt
@outfmt.setter
def outfmt(self, fmt: int) -> None:
# only accept 19 for now
if fmt != 19:
raise BadIgBLASTArgument(fmt, 19)
self._outfmt = IgBLASTArgument("outfmt", "outfmt", fmt, True)
@property
def receptor(self) -> ArgumentType:
"""
Specify Ig or T cell receptor sequence
Returns
-------
IgBLASTArgument
"""
return self._receptor
@receptor.setter
def receptor(self, r: str) -> None:
if not isinstance(r, str) and not (r in ["Ig", "TCR"]):
raise BadIgBLASTArgument(r, ["Ig", "TCR"])
self._receptor = IgBLASTArgument("receptor", "ig_seqtype", r, True)
@property
def nomenclature(self) -> ArgumentType:
"""Domain system to be used for segment annotation
Returns
-------
IgBLASTArgument
"""
return self._nomenclature
@nomenclature.setter
def nomenclature(self, system: str) -> None:
if system.lower() not in ["imgt", "kabat"]:
raise BadIgBLASTArgument(system, "['imgt','kaba']")
self._nomenclature = IgBLASTArgument("nomenclature", "domain_system", system, True)
@property
def aux_path(self) -> ArgumentType:
"""Auxilary data path. This is needed to lookup the J genes and tell them when the CDR3 stops.
Returns
-------
IgBLASTArgument
"""
return self._aux_path
@aux_path.setter
def aux_path(self, aux_path: Path | str) -> None:
if isinstance(aux_path, str):
aux_path = Path(aux_path)
if not aux_path.exists():
raise BadIgBLASTArgument(aux_path, "valid path to Auxilary database")
self._aux_path = IgBLASTArgument("aux_path", "auxiliary_data", aux_path.absolute(), True)
@property
def germline_db_v(self) -> ArgumentType:
"""Path to V gene database prefix
Returns
-------
IgBLASTArgument
"""
return self._germline_db_v
@germline_db_v.setter
def germline_db_v(self, path: str | Path) -> None:
abs_path = ensure_prefix_to(path)
if not abs_path:
raise BadIgBLASTArgument(path, "Valid path to V Database")
self._germline_db_v = IgBLASTArgument("germline_db_v", "germline_db_V", path, True)
@property
def germline_db_d(self) -> ArgumentType:
"""Path to D gene database prefix
Returns
-------
IgBLASTArgument
"""
return self._germline_db_d
@germline_db_d.setter
def germline_db_d(self, path: str | Path) -> None:
abs_path = ensure_prefix_to(path)
if not abs_path:
warnings.warn(f"{path} is not found, No D gene segment", UserWarning)
# raise BadIgBLASTArgument(path, "Valid path to D Database")
self._germline_db_d = IgBLASTArgument("germline_db_d", "germline_db_D", "", False)
else:
self._germline_db_d = IgBLASTArgument("germline_db_d", "germline_db_D", path, True)
@property
def germline_db_j(self) -> ArgumentType:
"""Path to J gene database prefix
Returns
-------
IgBLASTArgument
"""
return self._germline_db_j
@germline_db_j.setter
def germline_db_j(self, path: str | Path) -> None:
abs_path = ensure_prefix_to(path)
if not abs_path:
raise BadIgBLASTArgument(path, "Valid path to J Database")
self._germline_db_j = IgBLASTArgument("germline_db_j", "germline_db_J", path, True)
@property
def word_size(self) -> ArgumentType:
"""Word size for wordfinder algorithm (length of best perfect match)
Returns
-------
IgBLASTArugment
"""
return self._word_size
@word_size.setter
def word_size(self, word_size: int) -> None:
if not isinstance(word_size, int) and word_size < 4:
raise BadIgBLASTArgument(word_size, ">4")
self._word_size = IgBLASTArgument("word_size", "word_size", word_size, False)
@property
def gap_open(self) -> ArgumentType:
"""Cost to open a gap
Returns
-------
IgBLASTArgument
"""
return self._gap_open
@gap_open.setter
def gap_open(self, go: int) -> None:
if not isinstance(go, int) and go > 0:
raise BadIgBLASTArgument(go, ">0")
self._gap_open = IgBLASTArgument("gap_open", "gapopen", go, False)
@property
def gap_extend(self) -> ArgumentType:
"""Cost to extend a gap
Returns
-------
IgBLASTArgument
"""
return self._gap_extend
@gap_extend.setter
def gap_extend(self, ge: int) -> None:
if not isinstance(ge, int) and ge > 0:
raise BadIgBLASTArgument(ge, ">0")
self._gap_extend = IgBLASTArgument("gap_open", "gapextend", ge, False)
@property
def num_threads(self) -> ArgumentType:
"""
Number of threads (CPUs) to use in the BLAST search
Returns
-------
IgBLASTArgument
"""
return self._num_threads
@num_threads.setter
def num_threads(self, num_threads: int) -> None:
if num_threads > cpu_count():
raise BadIgBLASTArgument(num_threads, "<" + str(cpu_count()))
self._num_threads = IgBLASTArgument("number_threds", "num_threads", num_threads, False)
@property
def extend_5(self) -> ArgumentType:
"""Extend V gene alignment at 5' end
Returns
-------
IgBLASTArgument
"""
return self._extend_5
@extend_5.setter
def extend_5(self, extend_5: bool) -> None:
self._extend_5 = IgBLASTArgument("extend_5", "extend_align5end", extend_5, False)
@property
def extend_3(self) -> ArgumentType:
"""Extend V gene alignment at 3' end
Returns
-------
IgBLASTArgument
"""
return self._extend_3
@extend_3.setter
def extend_3(self, extend_3: bool) -> None:
self._extend_3 = IgBLASTArgument("extend_3", "extend_align3end", extend_3, False)
@property
def allow_vdj_overlap(self) -> Any:
"""Allow the VDJ overlap
This option is active only when D_penalty
and J_penalty are set to -4 and -3, respectively
Returns
-------
IgBLASTArgument
"""
return self._allow_vdj_overlap # type: ignore[has-type]
@allow_vdj_overlap.setter
def allow_vdj_overlap(self, allow: bool) -> None:
j_penalty: IgBLASTArgument = self.j_penalty # type: ignore[assignment]
d_penalty: IgBLASTArgument = self.d_penalty # type: ignore[assignment]
if j_penalty.value != -3 and d_penalty.value != -4 and allow:
warnings.warn(
f"Allows vdj overlap set but j penalty and d penalty need to be -3 and -4, now are {self.j_penalty}, {self.d_penalty}",
UserWarning,
)
self._allow_vdj_overlap = IgBLASTArgument("allow_vdj_overlap", "allow_vdj_overlap", allow, False)
@property
def d_penalty(self) -> ArgumentType:
"""What is the D gene panalty
Returns
-------
IgBLASTArgument
"""
return self._d_penalty
@d_penalty.setter
def d_penalty(self, penalty: int) -> None:
if not -5 < penalty < 1:
raise BadIgBLASTArgument(penalty, "must be less than 0 and greater than -5")
self._d_penalty = IgBLASTArgument("d_penalty", "D_penalty", penalty, True)
@property
def j_penalty(self) -> ArgumentType:
"""What is the J gene panalty
Returns
-------
IgBLASTArgument
"""
return self._j_penalty
@j_penalty.setter
def j_penalty(self, penalty: int) -> None:
if not -4 < penalty < 1:
raise BadIgBLASTArgument(penalty, "must be less than 0 and greater than -4")
self._j_penalty = IgBLASTArgument("j_penalty", "J_penalty", penalty, True)
@property
def v_penalty(self) -> ArgumentType:
"""What is the v gene panalty
Returns
-------
IgBLASTArgument
"""
return self._v_penalty
@v_penalty.setter
def v_penalty(self, penalty: int) -> None:
if not -5 < penalty < 1:
raise BadIgBLASTArgument(penalty, "must be less than 0 and greater than -5")
self._v_penalty = IgBLASTArgument("v_penalty", "V_penalty", penalty, True)
@property
def arguments(self) -> List[IgBLASTArgument]:
"""return a list of IgBLASTArugments
Returns
-------
List[IgBLASTArguments]
"""
# lots of type ignores since these are IgBLASTArguments set in the setter, but are read from the property
return [
self.min_d_match, # type: ignore
self.num_v, # type: ignore
self.num_j, # type: ignore
self.num_d, # type: ignore
self.organism, # type: ignore
self.receptor, # type: ignore
self.germline_db_v, # type: ignore
self.germline_db_d, # type: ignore
self.germline_db_j, # type: ignore
self.aux_path, # type: ignore
self.outfmt, # type: ignore
self.nomenclature, # type: ignore
self.word_size, # type: ignore
self.gap_open, # type: ignore
self.gap_extend, # type: ignore
self.j_penalty, # type: ignore
self.v_penalty, # type: ignore
self.d_penalty, # type: ignore
self.num_threads, # type: ignore
self.extend_5, # type: ignore
self.extend_3, # type: ignore
self.allow_vdj_overlap,
]
@property
def cmd(self) -> List[str]:
"""Return the blast cmd that will be run by subprocess"""
_cmd = [str(self.executable)]
for blast_arg in self.arguments:
kv = blast_arg.get_formatted_blast_arg() # can return non if we already set it twice
if kv: # only set on boolean if they are true
_cmd += kv
return _cmd
def pre_check(self) -> None:
"""Ensures we have set everything right
Raises
------
MissingIgBLASTArg
We have set the IGDATA field
BadIgBLASTExe
Correct IGblast executable
BadIgDATA
If any of the fields are not set properly
"""
# Ensure required arguments werer set
for blast_arg in self.arguments:
if blast_arg.required and not (blast_arg.value):
raise MissingIgBLASTArgument(f"Missing Blast argument. Need to set IgBLASTN.{blast_arg.name}")
# Check the executable
if not is_tool(str(self.executable)):
raise BadIgBLASTExe(self.executable, "Is not an executable tool")
if not self.igdata:
raise BadIgDATA("No IGDATA set, set with IgBLASTN.igdata")
else:
if not os.path.exists(self.igdata):
raise BadIgDATA(self.igdata)
# Run methods
def run_file(self, file: Union[Path, str]) -> pd.DataFrame:
"""Run IgBlast on a file
Parameters
----------
file : Path
the fasta file path
Returns
-------
pd.DataFrame
A dataframe with the IgBLAST results
Raises
------
EmtpyFileError
if the fasta file is empty
IgBLASTRunTimeError
for any given runtime error for igblastn
"""
# because igblast uses IGDATA as the internal file structure, we should pass the enviroment to the subprocess
local_env = os.environ.copy()
local_env["IGDATA"] = str(self.igdata)
# we want to ensure they actually passed a file with stuff in it
if os.path.getsize(file) == 0:
raise EmtpyFileError(file)
# take the cmd and finally add the query file
cmd = self.cmd
cmd += ["-query", str(file)]
# run a precheck to make sure everything passed was working
self.pre_check()
# while we can certainly do this as an output stream on stdout,
# It's probably best to take advantage of IGblast output and tempfile
with tempfile.NamedTemporaryFile(dir=self.temp_dir, suffix="_igblast.tsv") as tmpfile:
cmd += ["-out", tmpfile.name]
process = subprocess.run(cmd, env=local_env, capture_output=True)
if process.stderr:
raise IgBLASTRunTimeError(process.stderr)
# we read the dataframe from the tempfile, it should always be in .TSV.
# We can also cast it to IGBLAST_AIRR dtypes to save memory
df = pd.read_csv(tmpfile.name, sep="\t", dtype=IGBLAST_AIRR)
if Path(tmpfile.name).exists():
logger.debug(f"{tmpfile.name} was not deleted after it exited scope")
Path(tmpfile.name).unlink()
df["v_identity"] = df["v_identity"] / 100
df["d_identity"] = df["d_identity"] / 100
df["j_identity"] = df["j_identity"] / 100
return df
def __repr__(self) -> str:
return "IgBLAST: env IGDATA={} {}".format(str(self.igdata), " ".join(self.cmd))
def __str__(self) -> str:
return self.__repr__()
if __name__ == "__main__":
ig_blast = IgBLASTN()
| 2.171875 | 2 |
py/sonetos.py | ceciliakuhnvanin/meusoneto.github.io | 1 | 12769442 | <gh_stars>1-10
import random
def estrofe1():
return verso()+ "<br>" + verso() + "<br>" + verso() + "<br>" + verso() + "<br> <br>"
return """
Amo-te tanto, meu amor... nao canto
O humano coracao com mais verdade...
Amo-te como amigo e como amante
Numa sempre diversa realidade
"""
def estrofe2():
return verso()+ "<br>" + verso() + "<br>" + verso() + "<br>" + verso() + "<br> <br>"
return """
Amo-te afim, de um calmo amor prestante,
E te amo alem, presente na saudade.
Amo-te, enfim, com grande liberdade
Dentro da eternidade e a cada instante.
"""
def estrofe3():
return verso()+ "<br>" + verso() + "<br>" + verso() + "<br> <br>"
return """
Amo-te como um bicho, simplesmente,
De um amor sem misterio e sem virtude
Com um desejo macico e permanente.
"""
def estrofe4():
return verso()+ "<br>" + verso() + "<br>" + verso() + "<br> <br>"
return """
E de te amar assim muito amiude,
que um dia em teu corpo de repente
Hei de morrer de amar mais do que pude.
"""
def verboInfinitivo():
lista = ["amar", "beijar", "ver", "viver", "contar", "dancar", "voar", "sonhar", "buscar"]
return random.choice(lista)
def verboConjugado():
lista = ["quero", "vou", "preciso", "desejo", "espero", "nasci para"]
return random.choice(lista)
def substantivo():
lista = ["pudim",
"aipim",
"ceu",
"no beleleu",
"pastel",
"irmao do jorel",
"doguinho",
"passarinho",
"carinho",
"baixinho"]
return random.choice(lista)
def verso():
return verboConjugado() + " " + verboInfinitivo() + " " + substantivo() + "\n"
def imprime():
inicio = "<html> <head> </head> <body>"
fim = "</body> </html>"
print inicio, getSoneto(), fim
def getSoneto():
return estrofe1(), "\n", estrofe2(), "\n", estrofe3(), "\n", estrofe4()
def getSonetoJson():
return {'estrofe1': estrofe1(), 'estrofe2': estrofe2(), 'estrofe3': estrofe3(), 'estrofe4': estrofe4()}
| 3.140625 | 3 |
core/admin.py | monitorme/monitorme | 1 | 12769443 | from django.contrib import admin
from core.models import User, TrackerGroup, TrackerGroupInstance, Question, Answer, Response, TrackerGroupInstance
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = (
'username',
'name',
'email',
'slug',
)
@admin.register(TrackerGroup)
class TrackerGroup(admin.ModelAdmin):
list_display = (
'name',
'user',
'created_at',
'id',
)
@admin.register(TrackerGroupInstance)
class TrackerGroupInstanceAdmin(admin.ModelAdmin):
list_display = (
'tracker',
'started_at',
'created_by',
'tracker_id',
)
@admin.register(Question)
class Question(admin.ModelAdmin):
list_display = (
'current_question',
'tracker',
'created_at',
'created_by',
)
@admin.register(Answer)
class Answer(admin.ModelAdmin):
list_display = (
'current_answer',
'question',
'created_at',
'created_by',
)
@admin.register(Response)
class Response(admin.ModelAdmin):
list_display = (
'tracker',
'tracker_instance',
'display_answers',
'question',
'user',
'created_at',
) | 2.046875 | 2 |
Classes/Androxus.py | devRMA/Androxus | 8 | 12769444 | <reponame>devRMA/Androxus
# -*- coding: utf-8 -*-
# Androxus bot
# Androxus.py
__author__ = 'Rafael'
import re
from datetime import datetime
from itertools import cycle
from json import loads
from os import listdir
from string import ascii_letters
from sys import version
from traceback import format_exc
import discord
from asyncpg.pool import Pool
from discord.ext import commands, tasks
from requests import get
from EmbedModels.embedHelpCategory import embed_help_category
from database.Factories.ConnectionFactory import ConnectionFactory
from database.Repositories.BlacklistRepository import BlacklistRepository
from database.Repositories.ComandoDesativadoRepository import ComandoDesativadoRepository
from database.Repositories.ComandoPersonalizadoRepository import ComandoPersonalizadoRepository
from database.Repositories.ServidorRepository import ServidorRepository
from utils import permissions
from utils.Utils import get_configs, prettify_number, get_path_from_file, pegar_o_prefixo, get_emojis_json
from utils.Utils import string_similarity, get_most_similar_item
def _warn(frase):
"""
Args:
frase (str): A frase que vai ser printada, com a cor amarela
Returns:
None
"""
print('\033[1;33m')
print(frase)
print('\033[0;0m')
def _load_cogs(bot):
bot.remove_command('help')
bot.load_extension('jishaku')
path_cmds = get_path_from_file('cmds/')
path_events = get_path_from_file('events/')
for filename in listdir(path_cmds):
if filename.endswith('.py'):
try:
bot.load_extension(f'cmds.{filename[:-3]}') # vai adicionar ao bot
except commands.NoEntryPointError:
print(f'⚠ - Módulo {filename[:-3]} ignorado! "def setup" não encontrado!!')
except:
print(f'⚠ - Módulo {filename[:-3]} deu erro na hora de carregar!\n{format_exc()}')
for filename in listdir(path_events): # vai listar todas os arquivos que tem na pasta "events"
if filename.endswith('.py'):
try:
bot.load_extension(f'events.{filename[:-3]}')
except commands.NoEntryPointError:
pass # se não achar o def setup
except:
print(f'⚠ - Módulo {filename[:-3]} não foi carregado!\n{format_exc()}')
class Androxus(commands.Bot):
__version__ = '2.3'
configs: dict = get_configs()
uptime: datetime = None
mudar_status: bool = True
server_log: discord.TextChannel = None
maintenance_mode: bool = False
db_connection: Pool = None
def __init__(self, *args, **kwargs):
# Intents do discord.py 1.5.0
intents = discord.Intents.all()
# configurações do .json
configs = get_configs()
async def _prefix_or_mention(bot, message):
prefix = await pegar_o_prefixo(bot, message)
return commands.when_mentioned_or(prefix)(bot, message)
kwargs['command_prefix'] = _prefix_or_mention
kwargs['owner_id'] = configs['owners'] if len(configs['owners']) > 1 else configs['owners'][0]
kwargs['case_insensitive'] = True
kwargs['intents'] = intents
# iniciando o bot
super().__init__(*args, **kwargs)
_load_cogs(self)
# vai verificar se a pessoa está com a versão mais atual do bot
url = 'https://api.github.com/repositories/294764564/commits' # url onde ficam todos os commits do bot
html = get(url).text
json = loads(html)
# como os commits, sempre são assim:
# Versão x.x.x.x
# - alterações
# vai pegar a primeira linha do commit e apenas a versão do último commit
version_github = json[0]['commit']['message'].splitlines()[0].split(' ')[-1]
# e vai comparar com a versão atual
if version_github != self.__version__:
_warn('========== ATENÇÃO! ==========\n'
'Já você está usando uma versão desatualizada do Androxus!\n'
'Isso não vai impedir que o bot inicie, porém a sua versão pode\n'
'estar com algum bug que já foi resolvido ou algum comando a menos!\n'
'Acesse o repositório original, e baixe a nova versão!\n'
'Link do repositório original:\nhttps://github.com/devRMA/Androxus\n'
f'Nova versão: {version_github}\n'
f'Versão que você está usando: {self.__version__}')
async def on_ready(self):
if self.db_connection is None:
self.uptime = datetime.utcnow()
self.server_log = self.get_channel(self.configs['channels_log']['servers'])
self.db_connection = await ConnectionFactory.get_connection()
print(('-=' * 10) + 'Androxus Online!' + ('-=' * 10))
print(f'Logado em {self.user}')
print(f'ID: {self.user.id}')
print(f'{len(self.get_all_commands())} comandos!')
print(f'{len(set(self.get_all_members()))} usuários!')
print(f'{len(self.guilds)} servidores!')
print(f'Versão do discord.py: {discord.__version__}')
print(f'Versão do python: {version[0:5]}')
print(f'Versão do bot: {self.__version__}')
try:
self._change_status.start() # inicia o loop para mudar o status
except RuntimeError:
pass
async def on_message(self, message):
if (not self.is_ready()) or (self.db_connection is None):
return
ctx = await self.get_context(message)
banido = (await BlacklistRepository().get_pessoa(self.db_connection, ctx.author.id))[0]
if message.is_system() or not permissions.can_send(ctx) or ctx.author.bot or banido:
return
try:
permissions.is_owner(ctx)
except discord.ext.commands.errors.NotOwner:
# se a pessoa não for dona do bot, e ele estiver em manutenção, simplesmente ignora a mensagem
if self.maintenance_mode:
return
servidor = await ServidorRepository().get_servidor(self.db_connection, ctx.guild.id) if \
ctx.guild is not None else None
prefixo = await pegar_o_prefixo(self, message)
if (f'<@{self.user.id}>' == message.content) or (f'<@!{self.user.id}>' == message.content):
await ctx.reply(f'Use o comando ``{prefixo}cmds`` para obter todos os meus comandos!',
mention_author=False)
if permissions.can_use_external_emojis(ctx):
await ctx.send(self.emoji("hello"))
return
if (servidor is not None) and ctx.valid:
for comando_desativado_obj in await ComandoDesativadoRepository().get_commands(self.db_connection,
servidor):
comando_desativado = self.get_command(comando_desativado_obj.comando.lower())
if comando_desativado.name == ctx.command.name:
return await ctx.reply(f'{self.emoji("no_no")} Este comando '
'foi desativado por um administrador do servidor!', delete_after=10,
mention_author=False)
channel = message.channel
if (servidor is not None) and (ctx.command is None):
# vai ver se a pessoa usou algum comando personalizado
for comando_personalizado in await ComandoPersonalizadoRepository().get_commands(self.db_connection,
servidor):
if comando_personalizado.comando.lower() in message.content.lower():
enviar_mensagem = True
if not comando_personalizado.in_text and (not message.content.lower() ==
comando_personalizado.comando.lower()):
enviar_mensagem = False
if enviar_mensagem:
resposta = comando_personalizado.resposta
variaveis = {
'{author_mention}': message.author.mention,
'{author_name}': message.author.name,
'{author_nametag}': str(message.author),
'{author_nick}': message.author.display_name,
'{author_id}': message.author.id,
'{channel_mention}': message.channel.mention,
'{channel_name}': message.channel.name
}
for key_value in variaveis.items():
# estamos dando um replace em vez de .format
# pois, se der um .format e o usuário colocou um {abc} por exemplo
# iria dar erro, pois eu não estaria passando o valor de abc
resposta = resposta.replace(key_value[0], str(key_value[-1]))
return await channel.send(resposta)
if not ctx.valid:
if isinstance(message.channel, discord.DMChannel):
return
if not message.content.startswith(prefixo):
return
# vai pegar toda a mensagem, depois do prefixo
comando = message.content.lower()[len(prefixo):]
if len(comando) == 0:
return
# se o primeiro caracter da mensagem, não for uma letra
if comando[0] not in ascii_letters:
return
comando = comando.split(' ')[0]
mostrar_erro = servidor.sugestao_de_comando
commands = []
for command in self.get_all_commands():
if comando.lower() == command.category:
e = await embed_help_category(self, ctx, comando)
return await ctx.reply(embed=e, mention_author=False)
commands.append(command.name)
commands.append(command.category)
for alias in command.aliases:
commands.append(alias)
if mostrar_erro:
msg = f'{self.emoji("sad")} eu não achei consegui ' \
f'achar o comando "{comando}".'
sugestao = get_most_similar_item(comando, commands)
if sugestao is not None:
# se a sugestão for pelo menos 40% semelhante ao comando
if string_similarity(comando, sugestao) >= 0.4:
msg += f'\nVocê quis dizer ``{sugestao}``?'
msg += f'\nPara desativar esta mensagem, use o comando ``desativar_sugestão``'
return await ctx.reply(msg, delete_after=10, mention_author=False)
await self.process_commands(message)
@tasks.loop(minutes=1)
async def _change_status(self): # loop que vai ficar alterando o status do bot
if self.mudar_status:
# lista com os status
status = cycle(['Para me adicionar em um servidor, basta enviar a mensagem "invite" no meu privado!',
'Eu estou divertindo {servers} servidores!',
'Estou divertindo {pessoas} pessoas',
'Estou ouvindo {channels} chats!',
'Caso você precise de ajuda, basta me mencionar!',
'🤔 como que eu estou "jogando" se eu sou um bot?',
'Caso você queira saber mais detalhes sobre mim, use o comando "botinfo"!',
'Caso você queira ver meu código fonte, use o comando "source"!',
'Para saber todos os meus comandos, digite "cmds"!',
'Para obter mais informações sobre um comando, use o comando "help comando"!'
])
status_escolhido = next(status) # escolhe o próximo status
status_escolhido = status_escolhido.format(servers=prettify_number(len(self.guilds)),
pessoas=prettify_number(len(self.users)),
channels=prettify_number(len(set(self.get_all_channels())))
)
await self.change_presence(activity=discord.Game(name=status_escolhido))
async def is_owner(self, user: discord.User):
if user.id in self.configs['owners']:
return True
return await super().is_owner(user)
@staticmethod
def get_all_categories():
categories = [c[0] for c in get_emojis_json()['categories'].items()]
# é retornado uma copia da lista só por segurança
return sorted(list(set(categories)).copy())
def is_category(self, argument):
for category in self.get_all_categories():
if argument.lower() == category.lower():
return True
return False
def get_commands_from_category(self, category):
commands_from_category = []
if self.is_category(category):
for cog in self.cogs:
for command in self.get_cog(cog).get_commands():
if hasattr(command, 'category') and (command.category == category) and (not command.hidden):
commands_from_category.append(command)
return sorted(commands_from_category.copy(), key=lambda c: c.name)
def get_emoji_from_category(self, category):
category = category.lower()
if self.is_category(category):
try:
return get_emojis_json()['categories'][category]
except KeyError:
return ''
return ''
@staticmethod
def emoji(emoji_name):
"""
Args:
emoji_name (str): O nome do emoji no .json
Returns:
str: O que achou no json.
"""
dict_emojis = get_emojis_json()
try:
return dict_emojis[emoji_name]
except KeyError:
try:
return dict_emojis['dances'][emoji_name]
except KeyError:
try:
return dict_emojis['categories'][emoji_name]
except KeyError:
return None
def get_emoji(self, args):
# alteração para aceitar o id,
# o nome do emoji que está no configs.json
# e o uso do emoji <:nome:1234>
args = str(args).lower()
if args.isdigit():
return super().get_emoji(int(args))
emoji = self.emoji(args)
if emoji is None:
emoji = args
emoji_regex = re.compile(r'<a?:.+?:([0-9]{15,21})>')
regex_match = emoji_regex.match(emoji)
if regex_match is not None:
emoji_id = int(regex_match.group(1))
return super().get_emoji(emoji_id)
return None
def get_all_commands(self):
all_commands = []
for cog in self.cogs:
for command in self.get_cog(cog).get_commands():
if (not command.hidden) and (hasattr(command, 'category')):
all_commands.append(command)
return sorted(all_commands.copy(), key=lambda c: c.name)
async def send_help(self, ctx):
await self.get_command('help')(ctx)
async def language(self, ctx):
"""
Args:
ctx (discord.ext.commands.context.Context): O contexto que vai ser usado para pegar o prefixo
Returns:
"""
pass
class _BaseComando(commands.Command):
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.category = kwargs.get('category', 'outros')
self.parameters = kwargs.get('parameters', [])
self.examples = kwargs.get('examples', [])
self.perm_user = kwargs.get('perm_user', None)
self.perm_bot = kwargs.get('perm_bot', None)
def comando(*args, **kwargs):
"""
Example:
@Androxus.comando(name='comando',
aliases=['alias1', 'alias2'],
description='Descrição do comando',
parameters=['<parâmetro obrigatorio>', '[parâmetro opcional]'],
examples=['`{prefix}comando` `exemplo de uso do comando tal`',
'`{prefix}alias1` `outro exemplo`'],
# f'Você precisa ter permissão de `{perm_user}` para usar este comando!'
perm_user='permissão que o usuário precisa ter',
# f'Eu preciso ter permissão de `{perm_bot}` para realizar este comando!'
perm_bot='permissão que o bot precisa ter para executar o comando',
category='Categoria do comando',
hidden=False)
async def _comando(self, ctx, parametro_obrigatorio, parametro_opcional='valor default')
pass
"""
return commands.command(*args, **kwargs, cls=_BaseComando)
| 1.976563 | 2 |
utility/Team.py | KilometersFan/SciOlyScheduler | 1 | 12769445 | from Coach import Coach
class Team:
def __init__(self, number, name):
self.number = number
self.coaches = []
self.name = name
def add_coach(self, coach):
if isinstance(coach, Coach):
self.coaches.append(coach)
else:
print("Invalid argument passed. Looking for Coach!")
def get_teammate(self, coach):
if coach in self.coaches:
return [other_coach for other_coach in self.coaches if other_coach != coach]
else:
return None
def get_coaches(self):
return self.coaches
def get_number(self):
return self.number
def get_name(self):
return self.name
def print_info(self):
print("Team", self.get_number(), ":", self.get_name())
print("Coaches:")
if len(self.get_coaches()) > 0:
for coach in self.get_coaches():
coach.print_info()
print()
else:
print("No Coaches")
print()
if __name__ == "__main__":
pass
| 3.8125 | 4 |
ker/api/ssh.py | csvwolf/ker.py | 5 | 12769446 | """
ssh module
"""
from ker.utils import request
from .list import API_LIST
class SSH:
"""
ssh api
"""
def __init__(self, email, token):
self.email = email
self.token = token
def list(self):
"""
list ssh keys
"""
return request(API_LIST.SSH_LIST.value, {
'email': self.email,
'token': self.token
})
def create(self, name, key):
"""
create ssh key
"""
return request(API_LIST.SSH_CREATE.value, {
'email': self.email,
'token': self.token,
'name': name,
'key': key
})
def delete(self):
"""
delete ssh key
"""
return request(API_LIST.SSH_DELETE.value, {
'email': self.email,
'token': self.token
})
| 2.703125 | 3 |
ampligraph/latent_features/__init__.py | wayne9qiu/AmpliGraph | 0 | 12769447 | # Copyright 2019-2020 The AmpliGraph Authors. All Rights Reserved.
#
# This file is Licensed under the Apache License, Version 2.0.
# A copy of the Licence is available in LICENCE, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
r"""This module includes neural graph embedding models and support functions.
Knowledge graph embedding models are neural architectures that encode concepts from a knowledge graph
(i.e. entities :math:`\mathcal{E}` and relation types :math:`\mathcal{R}`) into low-dimensional, continuous vectors
:math:`\in \mathcal{R}^k`. Such *knowledge graph embeddings* have applications in knowledge graph completion,
entity resolution, and link-based clustering, just to cite a few :cite:`nickel2016review`.
"""
from .models.EmbeddingModel import EmbeddingModel, MODEL_REGISTRY, set_entity_threshold, reset_entity_threshold
from .models.TransE import TransE
from .models.DistMult import DistMult
from .models.ComplEx import ComplEx
from .models.HolE import HolE
from .models.RandomBaseline import RandomBaseline
from .models.ConvKB import ConvKB
from .models.ConvE import ConvE
from .loss_functions import Loss, AbsoluteMarginLoss, SelfAdversarialLoss, NLLLoss, PairwiseLoss,\
NLLMulticlass, BCELoss, LOSS_REGISTRY
from .regularizers import Regularizer, LPRegularizer, REGULARIZER_REGISTRY
from .optimizers import Optimizer, AdagradOptimizer, AdamOptimizer, MomentumOptimizer, SGDOptimizer, OPTIMIZER_REGISTRY
from .initializers import Initializer, RandomNormal, RandomUniform, Xavier, Constant, INITIALIZER_REGISTRY
from .misc import get_entity_triples
from ..utils import save_model, restore_model
__all__ = ['LOSS_REGISTRY', 'REGULARIZER_REGISTRY', 'MODEL_REGISTRY', 'OPTIMIZER_REGISTRY', 'INITIALIZER_REGISTRY',
'set_entity_threshold', 'reset_entity_threshold',
'EmbeddingModel', 'TransE', 'DistMult', 'ComplEx', 'HolE', 'ConvKB', 'ConvE', 'RandomBaseline',
'Loss', 'AbsoluteMarginLoss', 'SelfAdversarialLoss', 'NLLLoss', 'PairwiseLoss', 'BCELoss', 'NLLMulticlass',
'Regularizer', 'LPRegularizer', 'Optimizer', 'AdagradOptimizer', 'AdamOptimizer', 'MomentumOptimizer',
'SGDOptimizer', 'Initializer', 'RandomNormal', 'RandomUniform', 'Xavier', 'Constant',
'get_entity_triples',
'save_model', 'restore_model']
| 1.671875 | 2 |
exptools/viewing/__init__.py | Shiduo-zh/pybulletSim | 9 | 12769448 | ''' possible complement of viewing an experiment results
'''
__author__ = 'dementrock' | 1.015625 | 1 |
code/utils.py | yi-ding-cs/TSception | 9 | 12769449 | <filename>code/utils.py
import os
import time
import pprint
from networks import TSception
from eeg_dataset import *
from torch.utils.data import DataLoader
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
def set_gpu(x):
torch.set_num_threads(1)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = x
print('using gpu:', x)
def seed_all(seed):
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
def ensure_path(path):
if os.path.exists(path):
pass
else:
os.makedirs(path)
class Averager():
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
pred = torch.argmax(logits, dim=1)
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
class Timer():
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def get_model(args):
if args.model == 'TSception':
model = TSception(
num_classes=args.num_class, input_size=args.input_shape,
sampling_rate=args.sampling_rate, num_T=args.T, num_S=args.T,
hidden=args.hidden, dropout_rate=args.dropout)
return model
def get_dataloader(data, label, batch_size, shuffle=True):
# load the data
dataset = eegDataset(data, label)
loader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, pin_memory=True)
return loader
def get_metrics(y_pred, y_true, classes=None):
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
if classes is not None:
cm = confusion_matrix(y_true, y_pred, labels=classes)
else:
cm = confusion_matrix(y_true, y_pred)
return acc, f1, cm
def get_trainable_parameter_num(model):
total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_params
def L1Loss(model, Lambda):
w = torch.cat([x.view(-1) for x in model.parameters()])
err = Lambda * torch.sum(torch.abs(w))
return err
def generate_TS_channel_order(original_order: list):
"""
This function will generate the channel order for TSception
Parameters
----------
original_order: list of the channel names
Returns
-------
TS: list of channel names which is for TSception
"""
original_order_up = [item.upper() for item in original_order]
chan_letter, chan_num = [], []
for i, chan in enumerate(original_order_up):
if len(chan)==2:
chan_letter.append(chan[0])
chan_num.append(chan[-1])
elif len(chan)==3:
chan_letter.append(chan[:2])
chan_num.append(chan[-1])
idx_pair = []
for i, chan in enumerate(chan_letter):
for j, chan_ in enumerate(chan_letter):
if i!=j:
if chan == chan_ and chan_num[i]!= 'Z' and \
chan_num[j] != 'Z' and int(chan_num[i]) - int(chan_num[j]) == -1:
idx_pair.append([i, j])
idx_pair = np.array(idx_pair)
idx_pair_t = idx_pair.T
idx_pair = np.concatenate(idx_pair_t, axis=0).astype(int)
return [original_order[item] for item in idx_pair]
if __name__=="__main__":
# example of using generate_TS_channel_order()
original_order = ['Fp1', 'AF3', 'F3', 'F7', 'FC5', 'FC1', 'C3', 'T7', 'CP5', 'CP1', 'P3', 'P7', 'PO3',
'O1', 'Oz', 'Pz', 'Fp2', 'AF4', 'Fz', 'F4', 'F8', 'FC6', 'FC2', 'Cz', 'C4', 'T8', 'CP6',
'CP2', 'P4', 'P8', 'PO4', 'O2']
TS = generate_TS_channel_order(original_order)
print('done')
| 2.296875 | 2 |
test/jinja_script.py | SecSamDev/cancamusa | 3 | 12769450 | from jinja2 import Template
import sys
import os
import argparse
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from ad_struct import ADStructure
with open(os.path.join(os.path.dirname(__file__), '../lib/scripter/templates/fill-ad.ps1.jinja'),'r') as fill_ad:
template = Template(fill_ad.read())
dc_domain = "cancamusa.com"
dc_path = dc_domain.split(".")
dc_path_string = "DC=" + (",DC=".join(dc_path))
ad_structure = {
"domain" : dc_domain,
"ou" : {
"IT-Services" : {
"name" : "IT-Services",
"ou" : {
"SupportGroups" : {
"name" : "SupportGroups",
"ou" : {
"CostCenter" : {
"name" : "CostCenter",
"ou" : {
},
"groups" : {
"CostCenter-123" : {
"name" : "CostCenter-123",
"sam_account_name" : "CostCenter-123",
"group_category" : "Security",
"group_scope" : "Global",
"display_name" : "CostCenter 123"
},
"CostCenter-125" : {
"name" : "CostCenter-125",
"sam_account_name" : "CostCenter-125",
"group_category" : "Security",
"group_scope" : "Global",
"display_name" : "CostCenter 125"
}
}
}
},
"groups" : {
"SecurePrinting" : {
"name" : "SecurePrinting",
"sam_account_name" : "SecurePrinting",
"group_category" : "Security",
"group_scope" : "Global",
"display_name" : "Secure Printing Users"
}
}
}
},
"groups" : {
}
},
"Locations" : {
"name" : "Locations",
"ou" : {
"HeadQuarter" : {
"name" : "HeadQuarter",
"ou" : {
"Users" : {
"name" : "Users",
"groups" : {},
"ou" : {}
}
},
"groups" : {}
}
},
"groups" : {}
}
}
}
struct = ADStructure.from_json(ad_structure)
ad_groups = struct.list_groups()
ad_ous = struct.list_child_ou()
user_list = [
{
"sam_account_name" : "samuel.garces",
"UserPrincipalName" : "<EMAIL>",
"Firstname" : "Samuel",
"Lastname" : "Garces",
"Department" : "CyberSecurity",
"ou" : "HeadQuarter",
"Password" : "<PASSWORD>"
},
{
"sam_account_name" : "canca.musa",
"UserPrincipalName" : "<EMAIL>",
"Firstname" : "Canca",
"Lastname" : "Musa",
"Department" : "CyberSecurity",
"ou" : "HeadQuarter",
"Password" : "<PASSWORD>"
}
]
print(template.render(user_list=user_list, ad_groups=ad_groups, ad_ous= ad_ous))
| 1.929688 | 2 |