max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
client/paddleflow/pipeline/dsl/io_types/parameter.py
|
HaozhengAN/PaddleFlow
| 0
|
12778751
|
<reponame>HaozhengAN/PaddleFlow
#!/usr/bin/env python3
"""
Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
param = Parameter()you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Parameter: Parameters are inputs to pipelines that are known before your pipeline is executed
from typing import Dict
from typing import Any
from paddleflow.pipeline.dsl.utils.util import validate_string_by_regex
from paddleflow.pipeline.dsl.utils.consts import VARIBLE_NAME_REGEX
from paddleflow.pipeline.dsl.utils.consts import PipelineDSLError
from paddleflow.common.exception.paddleflow_sdk_exception import PaddleFlowSDKException
TYPE_TO_STRING = {str: "string", int: "int", float: "float"}
STRING_TO_TYPE = {value: key for key, value in TYPE_TO_STRING.items()}
SUPPORT_TYPE = STRING_TO_TYPE.keys()
class Parameter(object):
""" Parameters are inputs to pipelines that are known before your pipeline is executed, Parameters let you change the behavior of a step, through configuration instead of code.
"""
def __init__(
self,
default: Any=None,
type: str=None,
):
""" create a new instance of Paramter
Args:
type (str): the type of Parameter, type check before execution
default (Any): the default value of Parameter
Raises:
PaddleFlowSDKException: if all of "type" and "default" are None or "type" is not supported or "type" and "default" are not match
"""
if type and not isinstance(type, str):
self._type = self._trans_type_to_str(type)
else:
self._type = type
self._default = default
if all([self._type, self._default]):
self._default = self._validate_type(self._type, self._default)
# if __ref param = Parameter()is not None, means other parameters are referenced
self.__ref = None
self.__step = None
self.__name = None
def _trans_type_to_str(self, t):
""" trans type to str
Args:
t (type): the type need to trans to str
Raises:
PaddleFlowSDKException: if t is not supported
"""
if t not in TYPE_TO_STRING:
raise PaddleFlowSDKException(PipelineDSLError, f"the type of Parameter only support {SUPPORT_TYPE}")
return TYPE_TO_STRING[t]
def _validate_type(self, t: str, value: Any=None):
""" validate the type of it and the value
"""
if not isinstance(value, STRING_TO_TYPE[t]):
try:
return STRING_TO_TYPE[t](value)
except Exception as e:
raise PaddleFlowSDKException(PipelineDSLError, "the type of Parameter is not match the default value of it")
return value
def set_base_info(self, name: str, step, ref: Any=None):
""" set the step that this paramter instances was belong to and set the name of it
Args:
step (Step): the step that this paramter instances was belong to
name (str): the name of it
ref (Any): the refrence parameter
Raises:
PaddleFlowSDKException: if the name is illegal
"""
self.__step = step
if not validate_string_by_regex(name, VARIBLE_NAME_REGEX):
raise PaddleFlowSDKException(PipelineDSLError, f"the name of parameter[{name}] for step[{step.name}]is illegal, " + \
f"the regex used for validation is {VARIBLE_NAME_REGEX}")
self.__name = name
self.__ref = ref
def compile(self):
""" trans to dict while be invoked at compile stage
Returns:
a dict/string which can describe it
Raises:
PaddleFlowSDKException: if the ref attribute and the [default, type] attribute exist at the same time
"""
if self.__ref and any([self.default, self.type]):
raise PaddleFlowSDKException(PipelineDSLError, f"the parameter[{self._name}] for step[{step.name}]" + \
"have both [ref] attribute and [default | type] attribute, please contact manager")
if isinstance(self.__ref, Parameter):
return self.__ref.to_template()
elif self.__ref:
return self.__ref
dicts = {}
if self._default:
dicts.update({"default": self._default})
if self._type:
dicts.update({"type": self._type})
if dicts:
return dicts
return ""
def to_template(self):
""" trans to template when downstream step ref this Parameter
Returns:
A string indicate the template of it
"""
return "{{" + self.__step.name + "." + self.__name + '}}'
@property
def step(self):
""" get the step step that this paramter instances was belong to
Returns:
a Step that it was belong to
"""
return self.__step
@property
def name(self):
""" get the name of it
Returns:
a string indicate the name of it
"""
return self.__name
@property
def type(self):
""" get the type of it
Returns:
a string indicate the type of it
"""
return self._type
@property
def default(self):
""" get the default value of it
Returns:
the default value of it
"""
return self._default
@default.setter
def default(self, value: Any):
""" set the default value of it
Args:
value (Any): the value to set the default of it
Raises:
PaddleFlowSDKException: if the value is not match the type of it
"""
if self._type:
self._default = self._validate_type(self._type, value)
else:
self._default = value
self.__ref = None
@property
def ref(self):
""" get refrence
Returns:
the refrence of this instance
"""
return self.__ref
def __deepcopy__(self, memo):
""" support copy.deepcopy
"""
param = Parameter(type=self.type, default=self.default)
if self.name:
param.set_base_info(step=self.step, name=self.name, ref=self.ref)
return param
def __eq__(self, other):
""" support == and !=
"""
return self.name == other.name and self.type == other.type and self.default == other.default and \
self.ref == other.ref and self.step == other.step
| 2.859375
| 3
|
backend/api/models.py
|
jzrlza/plant-soil-buddy
| 0
|
12778752
|
from django.db import models
from rest_framework import serializers
from django.contrib import auth
from django.core.validators import MaxValueValidator, MinValueValidator
from datetime import datetime
class Message(models.Model):
subject = models.CharField(max_length=200)
body = models.TextField()
class MessageSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Message
fields = ('url', 'subject', 'body', 'pk')
class User(auth.models.User):
user_ptr = None
class PlantMoistLvl(models.Model):
VERY_LOW = 0
LOW = 1
MID = 2
HIGH = 3
VERY_HIGH = 4
LEVEL_CHOICES = (
(VERY_LOW, 'Very Low'),
(LOW, 'Low'),
(MID, 'Medium'),
(HIGH, 'High'),
(VERY_HIGH, 'Very High'),
)
plant_name = models.CharField(max_length=100, default='')
min_moist_lvl = models.IntegerField(
choices= LEVEL_CHOICES,
default= VERY_LOW
)
max_moist_lvl = models.IntegerField(
choices= LEVEL_CHOICES,
default= VERY_HIGH
)
def __str__(self):
lvl_choice_arr = ['Very Low', 'Low', 'Medium', 'High', 'Very High']
return self.plant_name +', Min: '+ lvl_choice_arr[self.min_moist_lvl]+', Max: '+ lvl_choice_arr[self.max_moist_lvl]
class PlantPh(models.Model):
plant_name = models.CharField(max_length=100, default='')
min_ph = models.DecimalField(max_digits=10, decimal_places=2, default=0,validators=[MaxValueValidator(14), MinValueValidator(0)])
max_ph = models.DecimalField(max_digits=10, decimal_places=2, default=14,validators=[MaxValueValidator(14), MinValueValidator(0)])
def __str__(self):
return self.plant_name +', Min: '+ str(self.min_ph)+', Max: '+ str(self.max_ph)
class PlantLifeCycle(models.Model):
ANNUAL = 0
BIENNIAL = 1
PERENNIAL = 2
CYCLE_CHOICES = (
(ANNUAL, 'Annual - life shorter than a year'),
(BIENNIAL, 'Biennial - life around a year to two years'),
(PERENNIAL, 'Perennial - life about more than many years')
)
plant_name = models.CharField(max_length=100, default='')
life_cycle = models.IntegerField(
choices= CYCLE_CHOICES,
default= ANNUAL
)
def __str__(self):
choice_arr = ['Annual', 'Biennial', 'Perennial']
return self.plant_name +', '+ choice_arr[self.life_cycle]+' Plant'
class Plant(models.Model):
moist_data = models.ForeignKey(PlantMoistLvl, on_delete=models.CASCADE, default=0)
ph_data = models.ForeignKey(PlantPh, on_delete=models.CASCADE, default=0)
lifecycle_data = models.ForeignKey(PlantLifeCycle, on_delete=models.CASCADE, default=0)
plant_name = models.CharField(max_length=100, default='')
def __str__(self):
return self.plant_name
class NpkPerPh(models.Model):
LOW = 1
MID = 2
HIGH = 3
LEVEL_CHOICES = (
(LOW, 'Low'),
(MID, 'Medium'),
(HIGH, 'High'),
)
min_ph = models.DecimalField(max_digits=10, decimal_places=2,validators=[MaxValueValidator(14), MinValueValidator(0)], default=0)
max_ph = models.DecimalField(max_digits=10, decimal_places=2,validators=[MaxValueValidator(14), MinValueValidator(0)], default=0)
n_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
p_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
k_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
def __str__(self):
return str(self.min_ph) +' - '+str(self.max_ph)
class SoilType(models.Model):
VERY_LOW = 0
LOW = 1
MID = 2
HIGH = 3
VERY_HIGH = 4
LEVEL_CHOICES = (
(VERY_LOW, 'Very Low'),
(LOW, 'Low'),
(MID, 'Medium'),
(HIGH, 'High'),
(VERY_HIGH, 'Very High'),
)
name = models.CharField(max_length=100)
good_for_min_moist_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
good_for_max_moist_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
def __str__(self):
return self.name
class SoilProfile(models.Model):
owner = models.ForeignKey(auth.models.User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
location =models.CharField(max_length=256)
def __str__(self):
user = auth.models.User.objects.get(pk=self.owner.pk)
return user.username + " - " + self.name
class SensorRecord(models.Model):
soil_id = models.ForeignKey(SoilProfile, on_delete=models.CASCADE)
moist = models.DecimalField(max_digits=10, decimal_places=2, default=0)
ph = models.DecimalField(max_digits=10, decimal_places=2, default=7)
record_date = models.DateTimeField(default=datetime.now, null=True)
record_frequency_min = models.DecimalField(max_digits=10, decimal_places=2, validators=[MaxValueValidator(10080), MinValueValidator(0.1)], default=0.1)
def __str__(self):
soil = SoilProfile.objects.get(pk=self.soil_id.pk)
return soil.name + " - " + str(self.record_date)
class Recommendation(models.Model):
LOW = 1
MID = 2
HIGH = 3
LEVEL_CHOICES = (
(LOW, 'Low'),
(MID, 'Medium'),
(HIGH, 'High'),
)
soil_id = models.ForeignKey(SoilProfile, on_delete=models.CASCADE)
recco_time = models.DateTimeField(default=datetime.now, blank=True)
recco_n_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
recco_p_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
recco_k_lvl = models.IntegerField(
choices= LEVEL_CHOICES
)
def __str__(self):
soil = SoilProfile.objects.get(pk=self.soil_id.pk)
return soil.name
class RecommendedPlant(models.Model):
recco_id = models.ForeignKey(Recommendation, on_delete=models.CASCADE)
plant_name = models.CharField(max_length=100, default='')
soil_type_name = models.CharField(max_length=100, default='')
def __str__(self):
return self.plant_name + ", " + self.soil_type_name
| 2.0625
| 2
|
recipe/run_test.py
|
jschueller/suitesparse-feedstock
| 1
|
12778753
|
import os
import re
import sys
from subprocess import check_output
def check_install_name(name):
"""Verify that the install_name is correct on mac"""
libname = "lib" + name + ".dylib"
path = os.path.join(sys.prefix, "lib", libname)
otool = check_output(["otool", "-L", path]).decode("utf8")
self_line = otool.splitlines()[1]
install_name = self_line.strip().split()[0]
pat = "@rpath/lib{}\.\d+\.dylib".format(name)
assert re.match(pat, install_name), "{} != {}".format(install_name, pat)
if sys.platform == "darwin":
for lib in (
"amd",
"btf",
"camd",
"ccolamd",
"cholmod",
"colamd",
"cxsparse",
"klu",
"ldl",
"rbio",
"spqr",
"suitesparseconfig",
"umfpack",
):
check_install_name(lib)
| 2.375
| 2
|
neuralnetwork.py
|
entrepreneur07/seassignment
| 0
|
12778754
|
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
def createModel(totalPlayers):
cp =[]
for i in range(totalPlayers):
model = Sequential()
model.add(Dense(input_dim=3,units=7))
model.add(Activation("sigmoid"))
model.add(Dense(units=1))
model.add(Activation("sigmoid"))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mse',optimizer=sgd, metrics=['accuracy'])
cp.append(model)
return cp
| 2.65625
| 3
|
backend/histocat/api/analysis/controller.py
|
BodenmillerGroup/histocat-web
| 4
|
12778755
|
<reponame>BodenmillerGroup/histocat-web<gh_stars>1-10
import logging
import os
from typing import Sequence
import cv2
import numpy as np
import scanpy as sc
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import ORJSONResponse
from imctools.io.ometiff.ometiffparser import OmeTiffParser
from skimage.measure import regionprops
from sklearn.ensemble import RandomForestClassifier
from sqlalchemy.orm import Session
from starlette import status
from starlette.requests import Request
from histocat.api.db import get_db
from histocat.api.security import get_active_member
from histocat.core.acquisition import service as acquisition_service
from histocat.core.analysis.dto import (
ClassifyCellsDto,
ClassifyCellsSubmissionDto,
RegionChannelStatsDto,
RegionStatsSubmissionDto,
)
from histocat.core.constants import ANNDATA_FILE_EXTENSION
from histocat.core.dataset import service as dataset_service
from histocat.core.member.models import MemberModel
from histocat.core.result import service as result_service
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post("/groups/{group_id}/analysis/region", response_model=Sequence[RegionChannelStatsDto])
async def calculate_region_stats(
params: RegionStatsSubmissionDto,
request: Request,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""
Calculate region's statistics
"""
acquisition = acquisition_service.get_by_id(db, params.acquisition_id)
parser = OmeTiffParser(acquisition.location)
acq = parser.get_acquisition_data()
mask = None
contour = np.array(params.region_polygon).astype(int)
content = []
for metal in acq.channel_names:
channel_img = acq.get_image_by_name(metal)
if mask is None:
mask = np.zeros(channel_img.shape, np.uint8)
mask = cv2.drawContours(mask, [contour], 0, 255, -1)
props = regionprops(mask, intensity_image=channel_img, cache=True, coordinates=None)
props = props[0]
content.append(
{
"metal": metal,
"min": float("{0:.3f}".format(props.min_intensity)),
"max": float("{0:.3f}".format(props.max_intensity)),
"mean": float("{0:.3f}".format(props.mean_intensity)),
}
)
return ORJSONResponse(content)
@router.post("/groups/{group_id}/analysis/classify", response_model=ClassifyCellsDto)
async def classify_cells(
group_id: int,
params: ClassifyCellsSubmissionDto,
member: MemberModel = Depends(get_active_member),
db: Session = Depends(get_db),
):
"""Classify cells."""
if params.result_id:
result = result_service.get(db, id=params.result_id)
location = os.path.join(result.location, f"output{ANNDATA_FILE_EXTENSION}")
adata = sc.read_h5ad(location)
else:
dataset = dataset_service.get(db, id=params.dataset_id)
adata = sc.read_h5ad(dataset.cell_file_location())
cell_ids = []
cell_classes = []
for annotation in params.annotations:
ann_cell_ids = annotation["cellIds"]
cell_ids.extend(ann_cell_ids)
ann_cell_classes = [annotation["cellClass"]] * len(ann_cell_ids)
cell_classes.extend(ann_cell_classes)
# Convert cell ids to strings
cell_ids = list(map(str, cell_ids))
# Map cell ids to cell classes
cells = dict(zip(cell_ids, cell_classes))
df = adata.to_df()
df_train = df[df.index.isin(cell_ids)].copy()
if df_train.empty:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Train dataset is empty",
)
df_test = df[~df.index.isin(cell_ids)].copy()
if df_test.empty:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Test dataset is empty",
)
df_train["cellClass"] = df_train.index
df_train["cellClass"].replace(cells, inplace=True)
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=params.n_estimators)
# Train the model using the training sets y_pred=clf.predict(X_test)
clf.fit(df_train[params.channels], df_train["cellClass"])
y_pred = clf.predict(df_test[params.channels])
y_pred_proba = clf.predict_proba(df_test[params.channels])
df_test["cellClass"] = y_pred
# Assign max class probability to a column
df_test["Prob"] = [max(prob) for prob in y_pred_proba]
df_test["Threshold"] = [params.thresholds[pred] for pred in y_pred]
# Filter cells by probability thresholds
df_test = df_test[df_test["Prob"] >= df_test["Threshold"]]
# Combine train and test dataframes together
result_df = df_test.append(df_train)
annotations = []
for cell_class in result_df["cellClass"].unique():
cellIds = result_df[result_df["cellClass"] == cell_class].index.to_list()
annotation = {"cellClass": cell_class, "visible": True, "cellIds": cellIds}
annotations.append(annotation)
content = {
"cellClasses": params.cell_classes,
"annotations": annotations,
}
return ORJSONResponse(content)
| 2.171875
| 2
|
fabfile.py
|
stianjensen/wikipendium.no
| 19
|
12778756
|
<filename>fabfile.py
import time
import getpass
from fabric.api import *
from fabric.contrib.console import confirm
import subprocess
class Site(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def run(self, cmd):
with cd(self.dir):
sudo(cmd, user=self.user_id)
def backup(self):
self.run('venv/bin/python manage.py backup-to-s3')
def deploy(self):
self.git_pull()
self.update_packages()
self.run('venv/bin/python manage.py migrate')
self.run('venv/bin/python manage.py collectstatic --noinput')
self.restart()
def git_pull(self):
# .pyc files can create ghost behavior when .py files are deleted...
self.run("find . -name '*.pyc' -delete")
self.run("git fetch origin && git reset --hard origin/master")
def git_tag(self):
self.run("git tag | sort -g | tail -n 1 | sed s/$/+1/ | bc | xargs git tag")
self.run("git push --tags && git push")
def rebuild_index(self):
self.run("venv/bin/python manage.py rebuild_index")
def update_packages(self):
self.run("./venv/bin/pip install -r requirements.txt")
def restart(self):
#header("Running: Restart server script: %s" % self.gunicorn)
#run("sudo /etc/init.d/%s restart" % self.gunicorn)
self.run("touch reload")
PROD = Site(
dir='/home/wikipendium-web/wikipendium.no/',
user_id='wikipendium-web'
)
env.hosts = ['wikipendium.no']
@task
def clone_prod_data():
"""
Download production data (database and uploaded files) and insert locally
"""
env.user = prompt("Username on prod server:", default=getpass.getuser())
dump_file = str(time.time()) + ".json"
# Ignore errors on these next steps, so that we are sure we clean up no matter what
with settings(warn_only=True):
# Dump the database to a file...
PROD.run('source venv/bin/activate ' +
'&& nice python manage.py dumpdb > ' +
dump_file)
# clean password hashes
PROD.run('sed -i \'s/"password": "[^"]*"/"password": ""/\' ' + dump_file)
# Then download that file
get(PROD.dir + dump_file, dump_file)
# Replace this db with the contents of the dump
local('python manage.py restoredb < ' + dump_file)
# ... then cleanup the dump files
PROD.run('rm ' + dump_file)
local('rm ' + dump_file)
@task
def deploy():
"""
"""
# mac-only command, just for fun
try:
subprocess.call(['say', '"Ship! Ship! Ship!"'])
except:
pass
print "ship! ship! ship!"
env.user = prompt("Username on prod server:", default=getpass.getuser())
should_tag = tag()
PROD.backup()
PROD.deploy()
if should_tag:
PROD.git_tag()
def header(text):
print ("#" * 45) + "\n# %s\n" % text + ("#" * 45)
@task
def restart():
PROD.restart()
@task
def rebuild_index():
PROD.rebuild_index()
@task
def backup():
"""
Dump a full backup to S3.
"""
env.user = prompt("Username on prod server:", default=getpass.getuser())
PROD.backup()
def tag():
if not confirm("Give new tag for this deployment?"):
if confirm("Are you sure?", default=False):
return False
else:
tag()
else:
return True
| 2.234375
| 2
|
remote_image/example_forms.py
|
LucasCTN/django-remote-image
| 1
|
12778757
|
from django import forms
from django.forms import ModelForm
from .fields import RemoteImageField
class ExampleForm(forms.Form):
remote_image = RemoteImageField(required=True)
class ExampleWhitelistedPNGForm(forms.Form):
remote_image = RemoteImageField(required=True, ext_whitelist=['png'])
class ExampleBlacklistedPNGForm(forms.Form):
remote_image = RemoteImageField(required=True, ext_blacklist=['png'])
| 2.234375
| 2
|
test_db.py
|
timothyhalim/Render-Manager
| 0
|
12778758
|
import os
from db import Controller
db_path = os.path.join( __file__, "..", "RenderManager.db" )
Controller.init(db_path)
# Create Job
job = Controller.create_job(
r"J:\UCG\Episodes\Scenes\EP100\SH002.00A\UCG_EP100_SH002.00A_CMP.nk",
"WRITE_IMG",
r"J:\UCG\UCG_Nuke10.bat",
"renderNuke.py",
frames=[i for i in range(100)]
)
# Query Job
JobList = Controller.Job.select()
for job in JobList:
print("Job", job.code)
print("Status", job.status().name)
print("Left", len([frame.number for frame in job.frame_left()]))
print("Avg", job.avg_time())
print("Eta", job.eta())
print("Clients", job.clients())
print()
# for frame in job.frames():
# print("Frame", frame.number)
| 2.328125
| 2
|
bus/solution.py
|
thegilm/bcn-feb-2019-prework
| 0
|
12778759
|
<gh_stars>0
numberOfStops = 0
stops = [(10,0), (2,4), (5,2)]
for stop in stops:
numberOfStops += 1
print("There are "+str(numberOfStops)+" stops")
pagsPerStop = []
currentPassengers = 0
for stop in stops:
currentPassengers = currentPassengers + stop[0] - stop[1]
pagsPerStop.append(currentPassengers)
#print("Passenger count per stop "+pagsPerStop)
print(pagsPerStop)
currentMax = 0
for stop in pagsPerStop:
if stop > currentMax:
currentMax = stop
print("Max occupation was "+str(currentMax))
average = 0
for stop in pagsPerStop:
average += stop
print("Average accupation is "+str(average/len(pagsPerStop)))
| 3.421875
| 3
|
create_index_export_js.py
|
lyf-coder/nodejs-tool
| 0
|
12778760
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 根据传入的文件夹地址遍历下面的js文件,生成统一导出的index.js,需要注意的是重名的模块
# 获取目录下文件
import os
file_name_list = []
file_rel_path_dict = {}
def handle_dir(path):
if os.path.isdir(path):
dir_files = os.listdir(path)
for dir_file in dir_files:
handle_dir(os.path.join(path, dir_file))
else:
# 获取对应的相对路径 记录到列表中
file_name = os.path.basename(path).rstrip('.js')
file_rel_path_dict[file_name] = os.path.relpath(path, os.getcwd())
# 获取基本文件名 记录到列表中
file_name_list.append(file_name)
write_lines = []
def create_index_js_file():
# 引入
for file_name in file_name_list:
write_lines.append('const {} = require(\'./{}\');\n'.format(file_name,file_rel_path_dict[file_name]))
write_lines.append('\n')
write_lines.append('module.exports = {\n')
# 导出
write_lines.append(','.join(file_name_list))
write_lines.append('\n}')
fo = open(os.path.join(os.getcwd(), "index_new.js"), "w")
fo.writelines(write_lines)
# 关闭文件
fo.close()
dir_path = input('please input dir path: ')
# dir_path = './lib'
dir_path = os.path.abspath(dir_path)
if os.path.isdir(dir_path):
handle_dir(dir_path)
create_index_js_file()
else:
print('please input dir!')
| 2.828125
| 3
|
shawty/shawtier/admin.py
|
SimeonAleksov/shawty
| 0
|
12778761
|
from django.contrib import admin
from .models import URL
admin.site.register(URL)
| 1.304688
| 1
|
pyradur/__init__.py
|
JPEWdev/pyradur
| 0
|
12778762
|
<reponame>JPEWdev/pyradur<gh_stars>0
# MIT License
#
# Copyright (c) 2018-2019 Garmin International or its subsidiaries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .client import Client
import logging
import os
import weakref
logger = logging.getLogger('pyradur')
class Dict(object):
_shared_clients = []
@classmethod
def _cleanup_client(cls, ref):
cls._shared_clients.remove(ref)
def __init__(self, sock_path, var, *, use_cache=True, share_connection=True):
if share_connection:
for client_ref in self._shared_clients:
client = client_ref()
try:
if client is not None and os.path.samefile(sock_path, client.sock_path):
logger.debug('Sharing existing client %s', id(client))
break
except FileNotFoundError:
pass
else:
client = Client(sock_path, use_cache)
self._shared_clients.append(weakref.ref(client, self._cleanup_client))
logger.debug('New shared client %s', id(client))
else:
client = Client(sock_path, use_cache)
logger.debug('New non-shared client %s', id(client))
self.client = client
self.var = var
self.client.validate_var(var)
def close(self):
self.client = None
def invalidate(self, key):
return self.client.invalidate(self.var, key)
def invalidate_all(self):
return self.client.invalidate_all()
def is_cached(self, key):
return self.client.is_cached(self.var, key)
def __getitem__(self, key):
return self.client.get(self.var, key)
def __setitem__(self, key, value):
self.client.set(self.var, key, value)
def __delitem__(self, key):
self.client.delete(self.var, key)
def __contains__(self, key):
return self.client.contains(self.var, key)
def get(self, key, default=None):
return self.client.getdefault(self.var, key, default)
def set(self, key, value):
return self.client.set(self.var, key, value)
def setdefault(self, key, default=None):
return self.client.setdefault(self.var, key, default)
def sync(self):
self.client.sync()
| 1.851563
| 2
|
store/models.py
|
mahinm20/techcastle
| 1
|
12778763
|
from django.db import models
from django.contrib.auth.models import User
class Customer(models.Model):
user = models.OneToOneField(User,null=True,blank=True,on_delete=models.CASCADE)
name= models.CharField(max_length=200,null=True)
email = models.CharField(max_length=200)
def __str__(self):
return self.name
class Mobile(models.Model):
name = models.CharField(max_length=200)
price = models.FloatField()
digital = models.BooleanField(default=False,null=True,blank=True)
image = models.ImageField(null=True, blank=True)
def __str__(self):
return self.name
@property
def imageURL(self):
try:
url = self.image.url
except:
url = ''
return url
# class Laptop(models.Model):
# name = models.CharField(max_length=200)
# price = models.FloatField()
# digital = models.BooleanField(default=False,null=True,blank=True)
# image = models.ImageField(null=True, blank=True)
# def __str__(self):
# return self.name
# @property
# def imageURL(self):
# try:
# url = self.image.url
# except:
# url = ''
# return url
# class Accessories(models.Model):
# name = models.CharField(max_length=200)
# price = models.FloatField()
# digital = models.BooleanField(default=False,null=True,blank=True)
# image = models.ImageField(null=True, blank=True)
# def __str__(self):
# return self.name
# @property
# def imageURL(self):
# try:
# url = self.image.url
# except:
# url = ''
# return url
class Order(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.CASCADE,blank=True,null=True)
date_ordered = models.DateTimeField(auto_now_add=True)
completed = models.BooleanField(default=False,null=True,blank=False)
transaction_id = models.CharField(max_length=200)
def __str__(self):
return str(self.id)
@property
def get_cart_total(self):
orderitems = OrderItem.objects.all()
# total = sum([item.get_laptop_total for item in orderitems]) + sum([item.get_acc_total for item in orderitems])
total = sum([item.get_mobile_total for item in orderitems])
return total
@property
def get_cart_item(self):
orderitems = OrderItem.objects.all()
total = sum([i.quantity for i in orderitems])
return total
@property
def shipping(self):
shipping = False
orderitems = self.orderitem_set.all()
for i in orderitems:
if i.product_m.digital == False:
shipping = True
return shipping
class OrderItem(models.Model):
product_m = models.ForeignKey(Mobile,on_delete=models.SET_NULL,null=True)
# product_l = models.ForeignKey(Laptop,on_delete=models.SET_NULL,null=True)
# product_a = models.ForeignKey(Accessories,on_delete=models.SET_NULL,null=True)
order = models.ForeignKey(Order,on_delete=models.SET_NULL,null=True,blank=True)
quantity = models.IntegerField(default=0,null=True,blank=True)
date_added = models.DateTimeField(auto_now_add=True)
# def __str__(self):
# if (self.product_l__name is not None):
# return str(self.product_l__name)
# elif (self.product_a__name is not None):
# return str(self.product_a__name)
# elif (self.product_m__name is not None):
# return str(self.product_m__name)
# @property
# def get_laptop_total(self):
# if self.product_l:
# total_l = self.product_l.price * self.quantity
# return total_l
# else:
# return 0
# @property
# def get_acc_total(self):
# if self.product_a:
# total_a = self.product_a.price * self.quantity
# return total_a
# else:
# return 0
@property
def get_mobile_total(self):
if self.product_m:
total_m = self.product_m.price * self.quantity
return total_m
else:
return 0
class ShippingAddress(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.SET_NULL,null=True)
order = models.ForeignKey(Order,on_delete=models.SET_NULL,null=True)
address = models.CharField(max_length=200,null=False)
city = models.CharField(max_length=200,null=False)
zipcode = models.CharField(max_length=200,null=False)
state = models.CharField(max_length=200,null=False)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.address
| 2.34375
| 2
|
MakeAutoContainTrials/TopoModels/model.py
|
kasmith/cbmm-project-christmas
| 0
|
12778764
|
<gh_stars>0
from constants import *
from config import *
from parse_walls import trial_segmentation
from scene_graph import SceneGraph
from physicsTable import *
from physicsTable.constants import *
import geometry
import numpy as np
# Helpers for drawing if this is allowed
if USE_PG:
import pygame as pg
from pygame.constants import KEYDOWN
def wait_4_kp(draw_fn, hz=20):
clk = pg.time.Clock()
while True:
draw_fn()
pg.display.flip()
for e in pg.event.get():
if e.type == KEYDOWN:
return
clk.tick(hz)
class TopologyModel(object):
# Initializes all of the decompositions and graphs
# Note: requires the goal in the container to be red!
def __init__(self, trial, use_all_triangulations=True, acd_convexity_by_ball_rad = [1., 2., 4.]):
# Load in the trial and break down the walls
self._trial = trial
self._brad = self._trial.ball[2]
self._bpos = self._trial.ball[0]
self._goal_ul = None
self._goal_lr = None
for g in self._trial.goals:
if g[2] == REDGOAL:
self._goal_ul = g[0]
self._goal_lr = g[1]
if self._goal_ul is None:
raise Exception("No appropriate goal found in the trial")
self._acd_convexities = acd_convexity_by_ball_rad
self._ohull, self._islands = trial_segmentation(self._trial)
# Ear clipping triangulation
self._tris, self._wound_hull = geometry.ear_clip_with_holes(self._ohull, self._islands)
self._has_all_tris = False
if use_all_triangulations:
self.make_all_triangulations()
else:
self._all_tris = [self._tris]
self._tri_graphs = [SceneGraph(tri) for tri in self._all_tris]
self._tri_geosizes = [self._get_max_geodesic(g) for g in self._tri_graphs]
self._tri_goaldists = [self._get_min_goaldist(g) for g in self._tri_graphs]
# Approximate convex decomposition
self._acd = [geometry.approximate_convex_decomposition([self._ohull] + self._islands, cvx*self._brad) \
for cvx in self._acd_convexities]
self._acd_graph = [SceneGraph(acd) for acd in self._acd]
self._acd_geosize = [self._get_max_geodesic(g) for g in self._acd_graph]
self._acd_goaldist = [self._get_min_goaldist(g) for g in self._acd_graph]
def make_all_triangulations(self):
self._all_tris = []
if not self._has_all_tris:
for i in range(len(self._wound_hull)):
newtri = geometry.ear_clip(self._wound_hull[i:] + self._wound_hull[:i])
self._all_tris.append(newtri)
self._has_all_tris = True
def _get_max_geodesic(self, graph):
return graph.maximum_geodesic_distance(self._bpos)
def _get_min_goaldist(self, graph):
# Find where the goal is in relation to graph parts
goal_pts = [self._goal_ul]
goal_ur = [self._goal_lr[0], self._goal_ul[1]]
goal_ll = [self._goal_ul[0], self._goal_lr[1]]
if not all([graph.same_segment(g, goal_ur) for g in goal_pts]):
goal_pts.append(goal_ur)
if not all([graph.same_segment(g, goal_ll) for g in goal_pts]):
goal_pts.append(goal_ll)
if not all([graph.same_segment(g, self._goal_lr) for g in goal_pts]):
goal_pts.append(self._goal_lr)
dists = [graph.point_2_point_distance(g, self._bpos) for g in goal_pts]
return min(dists)
def get_acd_stats(self):
rdict = {}
for cvx, gs, gd, dtg in zip(self._acd_convexities, self._acd_graph, self._acd_geosize, self._acd_goaldist):
sz = gs.size
rdict[cvx] = {'size': sz,
'max_geodesic_dist': gd,
'min_dist_to_goal': dtg}
return rdict
acd = property(get_acd_stats)
def get_tri_stats(self):
return {'size': np.mean([g.size for g in self._tri_graphs]),
'max_geodesic_dist': np.mean(self._tri_geosizes),
'min_dist_to_goal': np.mean(self._tri_goaldists)}
triangulation = property(get_tri_stats)
def _draw_edge_and_point(self, polylist, edge_col, pt_col):
assert USE_PG, "PyGame not loaded!"
tb = self._trial.makeTable()
s = tb.draw()
for poly in polylist:
pg.draw.polygon(s, edge_col, poly, 2)
for pt in poly:
pg.draw.circle(s, pt_col, pt, 4)
return s
def draw_acd(self, acd_idx = 0, edge_col = (0,255,0), pt_col = (255,0,0)):
wait_4_kp(lambda: self._draw_edge_and_point(self._acd[acd_idx], edge_col, pt_col))
def draw_triangles(self, tri_idx = 0, edge_col = (0,255,0), pt_col = (255,0,0)):
wait_4_kp(lambda: self._draw_edge_and_point(self._all_tris[tri_idx], edge_col, pt_col))
def draw_all_triangles(self, edge_col = (0,255,0), pt_col = (255,0,0)):
for i in range(len(self._all_tris)):
self.draw_triangles(i, edge_col, pt_col)
| 2.203125
| 2
|
torch_geometric/transforms/spherical.py
|
cysmnl/geometric_cognition
| 62
|
12778765
|
from math import pi as PI
import torch
class Spherical(object):
r"""Saves the globally normalized three-dimensional spatial relation of
linked nodes as spherical coordinates (mapped to the fixed interval
:math:`[0, 1]`) in its edge attributes.
Args:
cat (bool, optional): Concat pseudo-coordinates to edge attributes
instead of replacing them. (default: :obj:`True`)
.. testsetup::
import torch
from torch_geometric.data import Data
.. testcode::
from torch_geometric.transforms import Spherical
pos = torch.tensor([[0, 0, 0], [0, 1, 1]], dtype=torch.float)
edge_index = torch.tensor([[0, 1], [1, 0]])
data = Data(edge_index=edge_index, pos=pos)
data = Spherical()(data)
print(data.edge_attr)
.. testoutput::
tensor([[1.0000, 0.2500, 0.0000],
[1.0000, 0.7500, 1.0000]])
"""
def __init__(self, cat=True):
self.cat = cat
def __call__(self, data):
(row, col), pos, pseudo = data.edge_index, data.pos, data.edge_attr
assert pos.dim() == 2 and pos.size(1) == 3
cart = pos[col] - pos[row]
rho = torch.norm(cart, p=2, dim=-1)
rho = rho / rho.max()
theta = torch.atan2(cart[..., 1], cart[..., 0]) / (2 * PI)
theta += (theta < 0).type_as(theta)
phi = torch.acos(cart[..., 2] / rho) / PI
spher = torch.stack([rho, theta, phi], dim=1)
if pseudo is not None and self.cat:
pseudo = pseudo.view(-1, 1) if pseudo.dim() == 1 else pseudo
data.edge_attr = torch.cat([pseudo, spher.type_as(pos)], dim=-1)
else:
data.edge_attr = spher
return data
def __repr__(self):
return '{}(cat={})'.format(self.__class__.__name__, self.cat)
| 2.734375
| 3
|
Cartwheel/cartwheel-3d/Python/tests/test_ArmaProcess.py
|
MontyThibault/centre-of-mass-awareness
| 0
|
12778766
|
import unittest
from ArmaProcess import ArmaProcess
import time
class ArmaProcessTestCase(unittest.TestCase):
def testGenSamples(self):
params = [3.75162180e-04, 1.70361201e+00, -7.30441228e-01, -6.22795336e-01, 3.05330848e-01]
fps = 100
ap = ArmaProcess(params[0], params[1:3], params[3:5], fps)
print(ap.generate_n(20))
for _ in range(3):
time.sleep(0.04)
print(ap.generate_frame())
| 2.53125
| 3
|
tests/test_skiprows.py
|
timcera/tstoolbox
| 5
|
12778767
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import pandas
from pandas.testing import assert_frame_equal
from tstoolbox import tstoolbox, tsutils
class TestRead(TestCase):
def setUp(self):
dr = pandas.date_range("2000-01-01", periods=2, freq="D")
ts = pandas.Series([4.5, 4.6], index=dr)
self.read_direct = pandas.DataFrame(ts, columns=["Value"])
self.read_direct.index.name = "Datetime"
self.read_direct = tsutils.memory_optimize(self.read_direct)
self.read_cli = b"""Datetime,Value
2000-01-01,4.5
2000-01-02,4.6
"""
dr = pandas.date_range("2000-01-01", periods=5, freq="D")
ts = pandas.Series([4.5, 4.6, 4.7, 4.8, 4.9], index=dr)
self.read_direct_sparse = pandas.DataFrame(ts, columns=["Value"])
self.read_direct_sparse.index.name = "Datetime"
self.read_direct_sparse = tsutils.memory_optimize(self.read_direct_sparse)
self.read_cli_sparse = b"""Datetime,Value
2000-01-01,4.5
2000-01-02,4.6
2000-01-03,4.7
2000-01-04,4.8
2000-01-05,4.9
"""
def test_read_direct(self):
"""Test read API for single column - daily."""
out = tstoolbox.read("tests/data_simple_extra_rows.csv", skiprows=2)
assert_frame_equal(out, self.read_direct)
def test_read_direct_sparse(self):
"""Test read API for single column - daily."""
out = tstoolbox.read("tests/data_simple_extra_rows_sparse.csv", skiprows=[4, 6])
assert_frame_equal(out, self.read_direct_sparse)
| 2.484375
| 2
|
TestBenchGenerator/tbgen.py
|
anuragnatoo/ELE301P
| 1
|
12778768
|
<reponame>anuragnatoo/ELE301P
import os
import re
import sys
def decimalToBinary(x, bits, binary):
for i in range(bits-1,-1,-1):
k=x>>i
if k&1:
binary.append("1")
else:
binary.append("0")
vinput=sys.argv[1]
vfilename = vinput
tbfilename=vfilename[:-2] + "_tb.v"
vcdfilename=vfilename[:-2] + ".vcd"
print(tbfilename)
vfile = open(vfilename, "r")
tbfile = open(tbfilename, "w+")
tbfile.write("`include \""+vfilename+"\"\n")
tbfile.write("module top;\n")
variableName = []
variableSize = []
for x in vfile:
if re.findall('^\s*input',x):
line=x
if not x[len(x)-2]==';':
while 1:
l=vfile.readline()
line=line+l
if line[len(line)-2]==';':
break
tbfile.write(line.replace("input", "reg"))
#print(re.findall('^\s*input \[(.*)\]',line))
print(line)
print(1)
invar=[]
line=line.replace('\n','')
print(line)
print(2)
line=line.replace(' ','')
print(line)
print(3)
line=line.replace(' ','')
print(line)
print(4)
if re.findall('^\s*input \[(.*)\]',line):
yin= re.findall('^\s*input \[(.*)\]',line)
zin= re.findall('^\s*input \[.*\] (.*);',line)
print(yin)
print(zin)
inbit = yin[0].split(':')
#print(inbit)
size = int(inbit[0])-int(inbit[1]) +1
invar = zin[0].split(',')
#print("Input variables-",invar)
for l in range(len(invar)):
variableName.append(invar[l])
variableSize.append(size)
if re.findall('^\s*input ([^\[])',line):
pin= re.findall('^\s*input (.*);*',line)
print(pin)
print(1)
pin=pin[0].replace(';','')
pin=pin.replace(' ','')
pin=pin.split(",")
print(pin)
print(2)
invar=pin
print(invar)
for i in range(len(pin)):
variableName.append(pin[i])
variableSize.append(1)
if re.findall('^\s*output',x):
line=x
print(line)
if not x[len(x)-2]==';':
while 1:
l=vfile.readline()
line=line+l
if line[len(line)-2]==';':
break
tbfile.write(line.replace("output","wire"))
if re.findall('^\s*module',x):
line=x
if not x[len(x)-2]==';':
while 1:
l=vfile.readline()
line=line+l
if line[len(line)-2]==';':
break
line=line.replace('\n','')
modname = line.replace("module ", "")
monitorvar = line.replace("module ","") #---------->
if re.findall('^.*\((.*)\);',monitorvar):
montestvar = re.findall('^.*\((.*)\);',monitorvar)
m=montestvar[0].split(',')
modname=modname.replace("(", " m0(")
tbfile.write(modname.replace(' ',''))
tbfile.write("\ninitial begin\n")
totalSize=0
for i in range(len(variableSize)):
totalSize=totalSize+variableSize[i]
# print(totalSize)
maxi = input("Enter maximum number of test cases")
if pow(2,totalSize) < int(maxi):
maxr= pow(2,totalSize)
else:
maxr= int(maxi)
for i in range(maxr):
index=0
if i==0:
tbfile.write("\t")
else:
tbfile.write("\t#5 ")
binaryArray=[]
binaryArray.clear()
decimalToBinary(i, totalSize, binaryArray)
#print(binaryArray)
for j in range(len(variableName)):
check=variableSize[j]
tbfile.write(variableName[j]+"="+str(variableSize[j])+"'b")
while check>0:
tbfile.write(binaryArray[index])
index=index+1
check=check-1
tbfile.write(" ; ")
tbfile.write("\n")
tbfile.write("end\n")
tbfile.write("\ninitial begin\n")
tbfile.write("$monitor ($time, \": ")
for i in range(len(m)):
tbfile.write(m[i]+"=%b ")
tbfile.write("\"")
for i in range(len(m)):
tbfile.write(","+m[i])
tbfile.write(");\n")
tbfile.write("$dumpfile(\""+vcdfilename+"\");\n$dumpvars;\n")
tbfile.write("end\n")
tbfile.write("\nendmodule")
vfile.close()
tbfile.close()
| 2.609375
| 3
|
src/user.py
|
osuuster/Ward
| 0
|
12778769
|
class User:
"""User info"""
def __init__(self, email, first, last, device):
self.email = email
self.first = first.lower()
self.last = last.lower()
self.device = device.lower()
def fullname(self):
return ('{} {}'.format(self.first, self.last).title())
| 3.40625
| 3
|
app.py
|
janZub-AI/flask-video-streaming
| 0
|
12778770
|
<gh_stars>0
#!/usr/bin/env python
from importlib import import_module
import os
from flask import Flask, render_template, Response
import imagiz
import cv2
from EmotionDetection.face_detection import FaceClass
app = Flask(__name__)
server=imagiz.TCP_Server(8095)
server.start()
face_class = FaceClass()
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html')
def gen():
# import camera driver
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
if os.environ.get('CAMERA'):
Camera = import_module('camera_' + os.environ['CAMERA']).Camera
else:
from camera import Camera
camera = Camera()
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def gen_streamed():
while True:
try:
message=server.receive()
if not message.image is None:
frame = cv2.imdecode(message.image,cv2.IMREAD_UNCHANGED)
with_faces = face_class.find_faces(frame)
img_str = cv2.imencode('.jpg', with_faces)[1].tostring()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + img_str + b'\r\n')
except:
pass
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen_streamed(),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', threaded=True)
| 2.640625
| 3
|
openbb_terminal/dashboards/widget_helpers.py
|
tehcoderer/GamestonkTerminal
| 255
|
12778771
|
"""Widgets Helper Library.
A library of `ipywidgets` wrappers for notebook based reports and voila dashboards.
The library includes both python code and html/css/js elements that can be found in the
`./widgets` folder.
"""
import os
from jinja2 import Template
def stylesheet():
"""Load a default CSS stylesheet from file."""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "style.css")
) as f:
style = f.read()
return style
def price_card(ticker: str, price: str, price_color: str = "neutral_color") -> str:
"""Prepare a styled HTML element of a 128 by 128 price card.
Parameters
----------
ticker : str
Instrument ticker for the price card
price : str
Instrument price as a string
price_color : str, optional
The color of the price. Accepts "up_color", "down_color" and default "neutral_color"
Returns
-------
str
HTML code as string
"""
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "widgets", "card.j2")
) as f:
template = Template(f.read())
card = template.render(ticker=ticker, price=price, price_color=price_color)
return card
| 2.875
| 3
|
Schedule/Stats.py
|
mprego/NBA
| 0
|
12778772
|
import pandas as pd
import numpy as np
class Stats(object):
'''
Produces stats given a schedule
'''
def __init__(self, games, agg_method, date_col, h_col, a_col, outcome_col, seg_vars = []):
self.games = games
self.agg_method = agg_method
self.date_col = date_col
self.h_col = h_col
self.a_col = a_col
self.outcome_col = outcome_col
self.seg_vars = seg_vars
# Inputs: number of past games, team id, date of current game
# Output: list of most recent n games
def get_last_n_games(self, n, team_id, curr_dt):
#Filter to get past games
games = self.games[self.games[self.date_col]<curr_dt]
#Filters to get past home and away games
a_games = games[games[self.a_col]==team_id]
h_games = games[games[self.h_col] == team_id]
all_games = a_games.append(h_games)
all_games['temp_days'] = [(pd.to_datetime(curr_dt) - pd.to_datetime(x)).days for x in all_games[self.date_col]]
all_games = all_games[all_games['temp_days']<=30]
all_games = all_games.drop('temp_days', axis=1)
all_games = all_games.sort_values(by=self.date_col, ascending=False)
n_games = all_games.head(n)
return n_games
def get_avg(self, games, col, team_id, opp):
h_games = games[games[self.h_col] == team_id]
a_games = games[games[self.a_col] == team_id]
if opp == 0:
a_col = 'A_' + col
h_col = 'H_' + col
else:
a_col = 'H_' + col
h_col = 'A_' + col
h_sum = np.sum(h_games[h_col])
a_sum = np.sum(a_games[a_col])
if len(games) == 0:
return -1
avg = (h_sum + a_sum)*1.0 / (len(games))
return avg
def back_to_back(self, games, curr_dt):
if len(games)==0:
return 0
latest_game = games.sort_values(by=self.date_col, ascending=False).head(1).reset_index(drop=True)
latest_date = latest_game.ix[0,self.date_col]
if (pd.to_datetime(curr_dt) - pd.to_datetime(latest_date)).days == 1:
return 1
return 0
def get_lastn_stats(self, n):
stats = pd.DataFrame()
for index, game in self.games.iterrows():
stats.set_value(index, self.outcome_col, game[self.outcome_col])
a_team = game[self.a_col]
a_games = self.get_last_n_games(n, a_team, game[self.date_col])
h_team = game[self.h_col]
h_games = self.get_last_n_games(n, h_team, game[self.date_col])
poss_cols = self.games.columns.values
poss_cols = self.search_for_cols('H_', poss_cols)
for col in poss_cols:
base_col = col[2:]
stats.set_value(index, ('H_' + base_col + '_' + str(n)), self.get_avg(h_games, base_col, h_team, 0))
stats.set_value(index, ('H_O_' + base_col + '_' + str(n)), self.get_avg(h_games, base_col, h_team, 1))
stats.set_value(index, ('A_' + base_col + '_' + str(n)), self.get_avg(a_games, base_col, a_team, 0))
stats.set_value(index, ('A_O_' + base_col + '_' + str(n)), self.get_avg(a_games, base_col, a_team, 1))
stats.set_value(index, 'H_BTB', self.back_to_back(h_games, game[self.date_col]))
stats.set_value(index, 'A_BTB', self.back_to_back(a_games, game[self.date_col]))
stats.set_value(index, 'H_'+str(n)+'_games', len(h_games))
stats.set_value(index, 'A_'+str(n)+'_games', len(a_games))
for col in self.seg_vars:
stats.set_value(index, col, game[col])
return stats
def search_for_cols(self, pfx, cols):
new_cols = []
pfx_len = len(pfx)
for col in cols:
if col[0:pfx_len] == pfx:
#if col != self.outcome_col:
if col != self.h_col:
if col != self.a_col:
new_cols.append(col)
return new_cols
def get_correl(self, stats):
cor = pd.DataFrame()
for col in stats.columns.values:
if col != self.outcome_col:
cor.set_value(col, 'Correlation', np.corrcoef(x=stats[col], y=stats[self.outcome_col])[0,1])
return cor
| 3.171875
| 3
|
dataset_builder.py
|
philippspohn/TTS-dataset-tools
| 0
|
12778773
|
import math
from pydub import AudioSegment, silence
from pydub.utils import mediainfo
from dearpygui.core import *
import os
import csv
import re
import shutil
from google.cloud import storage
from google.cloud import speech_v1p1beta1 as speech
import config_helper
import time
import silence_cut
def to_millis(timestamp):
timestamp = str(timestamp)
hours, minutes, seconds = (["0", "0"] + timestamp.split(":"))[-3:]
hours = int(hours)
minutes = int(minutes)
seconds = float(seconds)
miliseconds = int(3600000 * hours + 60000 * minutes + 1000 * seconds)
return miliseconds
class Dataset_builder:
def __init__(self):
self.project_dir = None
self.speaker_text_path = None
self.wav_file_path = None
self.index_start = None
self.cut_length = None
self.split_method = None
self.contains_punc = None
self.google_cloud_credentials_path = None
self.transcription = None
def set_values(self, dataset_dir, speaker_text_path, wav_file_path, index_start, cut_length, split_method,
contains_punc, google_cloud_credentials_path, transcription=True):
self.project_dir = dataset_dir
self.speaker_text_path = speaker_text_path
self.wav_file_path = wav_file_path
self.index_start = index_start
if cut_length:
self.cut_length = float(cut_length)
self.split_method = split_method
self.contains_punc = contains_punc
self.google_cloud_credentials_path = google_cloud_credentials_path
self.transcription = transcription
def build_dataset(self):
print("running")
output_wavs_path = os.path.join(self.project_dir, "wavs")
if not os.path.exists(self.project_dir):
os.makedirs(self.project_dir)
if not os.path.exists(output_wavs_path):
os.mkdir(output_wavs_path)
if self.split_method == 0:
set_value("label_build_status", "Detecting silences. This may take several minutes...")
audio_name = self.wav_file_path
w = AudioSegment.from_wav(audio_name)
# s_len = 1000
#
# silence_cuts = silence.split_on_silence(w, min_silence_len=s_len, silence_thresh=-45, keep_silence=True)
#
# cuts = []
# final_cuts = []
#
# def split_wav(wav, l):
# if (wav.duration_seconds * 1000) < (self.cut_length * 1000):
# output = []
# output.append(wav)
# return output
#
# too_long = False
# while True:
# l -= 50
# if l == 0:
# print("Error, could not find small enough silence period for split, giving up")
# output = []
# output.append(wav)
# return output
#
# start = time.time_ns()
# splits = silence.split_on_silence(wav, min_silence_len=l, silence_thresh=-45, keep_silence=True)
# print("Splitting:", round((time.time_ns() - start) / 1000))
#
# start = time.time_ns()
# silence.detect_silence(wav, min_silence_len=l, silence_thresh=-45)
# print("Detecting:", round((time.time_ns() - start) / 1000))
#
# print(f"Trying resplit... (l={l})")
# for s in splits:
# if (s.duration_seconds * 1000) > (self.cut_length * 1000):
# too_long = True
# if too_long == True:
# too_long = False
# else:
# return splits
#
# # Keep splitting until all cuts are under max len
#
# for i, c in enumerate(silence_cuts):
# print(f"Checking phrase {i}/{len(silence_cuts)}...")
# c_splits = split_wav(c, 1000)
# for s in c_splits:
# cuts.append(s)
# # rebuild small cuts into larger, but below split len
# temp_cuts = AudioSegment.empty()
#
# for i, c in enumerate(cuts):
# prev_cuts = temp_cuts
# temp_cuts = temp_cuts + c
#
# if i == (len(cuts) - 1):
# #on final entry
# if (temp_cuts.duration_seconds * 1000) > (self.cut_length * 1000):
# final_cuts.append(prev_cuts)
# final_cuts.append(c)
# else:
# final_cuts.append(temp_cuts)
# else:
# if ((temp_cuts.duration_seconds * 1000) + (cuts[i+1].duration_seconds * 1000)) > (self.cut_length * 1000):
# # combine failed, too long, add what has already been concatenated
# final_cuts.append(temp_cuts)
# temp_cuts = AudioSegment.empty()
segment_size = 25
min_len = int(get_value("input_min_seg_length")) / segment_size
max_len = int(get_value("input_max_seg_length")) / segment_size
final_cuts = silence_cut.speed_slice(w, segment_size=25, min_segments_in_slice=int(min_len),
max_segments_in_slice=int(max_len),
padding_start=int(get_value("input_padding_start")),
padding_end=int(get_value("input_padding_end")))
for i, w in enumerate(final_cuts):
output_wav_file = os.path.join(output_wavs_path, str(i + 1) + ".wav")
w.export(output_wav_file, format="wav")
# Process each cut into google API and add result to csv
output_csv_file = os.path.join(self.project_dir, "output.csv")
print("writing to: " + output_csv_file)
with open(output_csv_file, 'w') as f:
bucket_name = get_value("input_storage_bucket")
newline = ''
for i, c in enumerate(final_cuts):
x = i + 1
if not self.transcription:
f.write("{}wavs/{}.wav|".format(newline, x))
newline = '\n'
continue
print(f"Transcribing entry {x}/{len(final_cuts)}")
self.upload_blob(bucket_name, os.path.join(output_wavs_path, str(x) + ".wav"), "temp_audio.wav",
google_cloud_credentials_path=self.google_cloud_credentials_path)
gcs_uri = "gs://{}/temp_audio.wav".format(bucket_name)
client = speech.SpeechClient.from_service_account_json(filename=self.google_cloud_credentials_path)
audio = speech.RecognitionAudio(uri=gcs_uri)
info = mediainfo(os.path.join(output_wavs_path, str(x) + ".wav"))
sample_rate = info['sample_rate']
if get_value("input_use_videomodel") == 1:
print("Using enchanced google model...")
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=int(sample_rate),
language_code=config_helper.cfg_get("transcription", "language_code"),
enable_automatic_punctuation=True,
enable_word_time_offsets=False,
enable_speaker_diarization=False,
# enhanced model for better performance?
use_enhanced=True,
model="video", # "phone_call or video"
)
else:
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=int(sample_rate),
language_code=config_helper.cfg_get("transcription", "language_code"),
enable_automatic_punctuation=True,
enable_word_time_offsets=False,
enable_speaker_diarization=False,
)
operation = client.long_running_recognize(config=config, audio=audio)
response = operation.result(timeout=28800)
for result in response.results:
text = result.alternatives[0].transcript
# replace some symbols and google API word choice
text = text.replace("%", " percent")
text = text.replace("cuz", "cause")
text = text.replace("-", " ")
text = text.replace("&", "and")
print(text)
set_value("label_build_status", text)
f.write("{}wavs/{}.wav|{}".format(newline, x, text))
newline = '\n'
print('\a') # system beep
set_value("label_build_status", "Done!")
print("Done running builder!")
else:
# Aeneas mode
if not get_value("label_speaker_text_path") or not get_value("label_wav_file_path"):
print("Error, please choose text and/or audio files.")
return
if not os.path.exists("aeneas_out"):
os.mkdir("aeneas_out")
else:
shutil.rmtree("aeneas_out")
os.mkdir("aeneas_out")
if not os.path.exists("aeneas_prepped"):
os.mkdir("aeneas_prepped")
else:
shutil.rmtree("aeneas_prepped")
os.mkdir("aeneas_prepped")
audio_name = self.wav_file_path
with open(self.speaker_text_path, 'r', encoding="utf8") as f:
text = f.read()
text = text.replace(';', '.')
text = text.replace(':', '.')
text = text.replace('-', ' ')
text = text.replace('”', '')
text = text.replace('“', '')
text = text.replace('"', '.')
text = text.replace('—', ' ')
text = text.replace('’', '\'')
text = text.replace(' –', '.')
text = text.strip('\n')
if self.contains_punc:
# remove any duplicate whitespace between words
text = " ".join(text.split())
phrase_splits = re.split(r'(?<=[\.\!\?])\s*',
text) # split on white space between sentences
phrase_splits = list(filter(None, phrase_splits)) # remove empty splits
else:
# no punctuation from speech to text, so we must divid text by word count
phrase_splits = []
temp_line = []
text_split = text.split()
word_count_limit = 16
while len(text_split) > 0:
while len(temp_line) < word_count_limit and len(text_split) > 0:
temp_line.append(text_split.pop(0))
phrase_splits.append(" ".join(temp_line))
temp_line = []
with open('aeneas_prepped/split_text', 'w') as f:
newline = ''
for s in phrase_splits:
if s:
stripped = s.strip() # remove whitespace
f.write(newline + stripped)
newline = '\n'
# os.system('python -m aeneas.tools.execute_task ' + audio_name + ' aeneas_prepped/split_text "task_adjust_boundary_percent_value=50|task_adjust_boundary_algorithm=percent|task_language=en|is_text_type=plain|os_task_file_format=csv" ' + 'aeneas_out/' + audio_name_no_ext + '.csv')
os.system(
'python -m aeneas.tools.execute_task ' + audio_name + ' aeneas_prepped/split_text "task_adjust_boundary_percent_value=50|task_adjust_boundary_algorithm=percent|task_language=en|is_text_type=plain|os_task_file_format=csv" ' + 'aeneas_out/' + os.path.basename(
self.project_dir) + '.csv')
output_exists = False
if os.path.exists("{}/output.csv".format(os.path.basename(self.project_dir))):
# if file exists then prepare for append
output_exists = True
new_csv_file = open("{}/output.csv".format(os.path.basename(self.project_dir)), 'a')
if output_exists:
new_csv_file.write("\n")
with open('aeneas_out/' + os.path.basename(self.project_dir) + '.csv', 'r') as csv_file:
index_count = int(self.index_start)
csv_reader = csv.reader(csv_file, delimiter=',')
csv_reader = list(csv_reader) # convert to list
row_count = len(csv_reader)
newline = ""
for row in csv_reader:
beginning_cut = float(row[1])
end_cut = float(row[2])
text_out = row[3]
text_out = text_out.strip()
print("{} {} {} ".format(beginning_cut, end_cut, text_out))
c_length = end_cut - beginning_cut
# if cut is longer than cut length then split it even more
cut_length = float(self.cut_length)
if c_length > cut_length:
more_cuts = open("aeneas_prepped/temp.csv", 'w')
# save the current cut wav file to run on aeneas again
w = AudioSegment.from_wav(audio_name)
wav_cut = w[(beginning_cut * 1000):(end_cut * 1000)]
wav_cut.export("aeneas_prepped/tempcut.wav", format="wav")
split_list = []
num_cuts = math.ceil(c_length / cut_length)
text_list = text_out.split()
text_list_len = len(text_list)
split_len = math.ceil(text_list_len / num_cuts)
print("too long, making extra {} cuts. with length {}".format(num_cuts, split_len))
for i in range(1, num_cuts + 1):
words = []
for j in range(0, split_len):
if not text_list:
break
words.append(text_list.pop(0))
split_list.append(" ".join(words))
print(split_list)
print()
newline_splits = ''
for phrase in split_list:
more_cuts.write(newline_splits + phrase)
newline_splits = '\n'
more_cuts.close()
os.system(
'python -m aeneas.tools.execute_task ' + "aeneas_prepped/tempcut.wav" + ' aeneas_prepped/temp.csv "task_adjust_boundary_percent_value=50|task_adjust_boundary_algorithm=percent|task_language=en|is_text_type=plain|os_task_file_format=csv" ' + 'aeneas_out/temp_out.csv')
csv_file_temp = open('aeneas_out/temp_out.csv', 'r')
csv_reader_temp = csv.reader(csv_file_temp, delimiter=',')
csv_reader_temp = list(csv_reader_temp) # convert to list
row_count = len(csv_reader_temp)
w = AudioSegment.from_wav("aeneas_prepped/tempcut.wav")
for row in csv_reader_temp:
beginning_cut = float(row[1])
end_cut = float(row[2])
text_out = row[3]
text_out = text_out.strip()
wav_cut = w[(beginning_cut * 1000):(end_cut * 1000)]
new_wav_filename = "wavs/" + str(index_count) + ".wav"
new_csv_file.write("{}{}|{}".format(newline, new_wav_filename, text_out))
wav_cut.export("{}/{}".format(os.path.basename(self.project_dir), new_wav_filename),
format="wav")
index_count += 1
newline = '\n'
csv_file_temp.close()
else:
w = AudioSegment.from_wav(audio_name)
wav_cut = w[(beginning_cut * 1000):(end_cut * 1000)]
new_wav_filename = "wavs/" + str(index_count) + ".wav"
new_csv_file.write("{}{}|{}".format(newline, new_wav_filename, text_out))
wav_cut.export("{}/{}".format(os.path.basename(self.project_dir), new_wav_filename),
format="wav")
index_count += 1
newline = '\n'
new_csv_file.close()
set_value("label_build_status", "Building dataset done!")
# Remove temporary directories
shutil.rmtree("aeneas_prepped")
shutil.rmtree("aeneas_out")
print('\a') # system beep
print("Done with Aeneas!")
def upload_blob(self, bucket_name, source_file_name, destination_blob_name, google_cloud_credentials_path=None):
if not google_cloud_credentials_path:
google_cloud_credentials_path = self.google_cloud_credentials_path
storage_client = storage.Client.from_service_account_json(json_credentials_path=google_cloud_credentials_path)
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
# print("File {} uploaded to {}.".format(source_file_name, destination_blob_name))
def diarization(self, wavfile, bucket_name, project_dir, google_cloud_credentials_path, project_name=None):
if not os.path.exists(project_dir):
os.makedirs(project_dir)
if project_name:
dianame = "diarization-" + project_name + "-" + str(round(time.time_ns() / 1000))
else:
dianame = "diarization-" + os.path.basename(wavfile) + "-" + str(round(time.time_ns() / 1000))
output_dir = os.path.join(project_dir, dianame)
os.mkdir(output_dir)
print("Uploading {} to google cloud storage bucket".format(wavfile))
set_value("label_diarization_run_info", "Uploading file to cloud storage bucket...")
self.upload_blob(bucket_name, wavfile, "temp_audio.wav", google_cloud_credentials_path)
gcs_uri = "gs://{}/temp_audio.wav".format(bucket_name)
set_value("label_diarization_run_info", "Finished uploading.")
client = speech.SpeechClient.from_service_account_json(filename=google_cloud_credentials_path)
audio = speech.RecognitionAudio(uri=gcs_uri)
info = mediainfo(wavfile)
sample_rate = info['sample_rate']
print("Transcribing {} with audio rate {}".format(wavfile, sample_rate))
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=int(sample_rate),
language_code=config_helper.cfg_get("transcription", "language_code"),
enable_automatic_punctuation=True,
enable_word_time_offsets=True,
enable_speaker_diarization=True,
diarization_speaker_count=int(get_value("input_diarization_num")),
)
operation = client.long_running_recognize(config=config, audio=audio)
print("Waiting for operation to complete, this may take several minutes...")
set_value("label_diarization_run_info", "Waiting for operation to complete, this may take several minutes...")
response = operation.result(timeout=28800)
result = response.results[-1]
words = result.alternatives[0].words
active_speaker = 1
transcript = []
current_cut = 0
previous_cut = 0
speaker_wavs = []
for x in range(int(get_value("input_diarization_num"))):
speaker_wavs.append(AudioSegment.empty())
transcript.append("")
w = AudioSegment.from_wav(wavfile)
for word in words:
if word.speaker_tag == active_speaker:
end_time = word.end_time
current_cut = end_time.total_seconds() * 1e3
# print(current_cut)
transcript[active_speaker - 1] += word.word + ' '
else:
# speaker has changed
transcript[active_speaker - 1] += word.word + ' '
w_cut = w[(previous_cut):current_cut]
previous_cut = current_cut
speaker_wavs[active_speaker - 1] = speaker_wavs[active_speaker - 1] + w_cut
active_speaker = word.speaker_tag
# finish last wav cut
w_cut = w[previous_cut:current_cut]
speaker_wavs[active_speaker - 1] = speaker_wavs[active_speaker - 1] + w_cut
for i, wave in enumerate(speaker_wavs):
speaker_output = os.path.join(output_dir, "speaker_{}.wav".format(i + 1))
speaker_wavs[i].export(speaker_output, format="wav")
for i, text in enumerate(transcript):
speaker_output = os.path.join(output_dir, "speaker_{}.txt".format(i + 1))
f = open(speaker_output, 'w')
f.write(transcript[i])
f.close()
set_value("label_diarization_run_info", "Done!")
print("Done with diarization!")
print('\a') # system beep
| 2.515625
| 3
|
examples/explain_helper.py
|
pinjutien/DeepExplain
| 0
|
12778774
|
<gh_stars>0
import pandas as pd
import PIL
import tensorflow as tf
import numpy as np
from sklearn.neighbors import KernelDensity
from scipy.signal import argrelextrema
from scipy.stats import iqr
from utils import plot, plt
import glob
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from deepexplain.tensorflow import DeepExplain
from get_labels_data import get_baseline_data, get_baseline_map
import time
def load_image(image_path, base_image_path, file_name, labels, num_class,
target_size=(256, 256, 3),
normalized_factor=255.0):
assert len(labels) == len(file_name), "length of labels and file_name should be the same."
image_array = []
base_image_array = []
for f in file_name:
# imag_path = "/Users/ptien/tfds-download/apple2orange/testA/" + f
# base_path = "/Users/ptien/tfds-download/apple2orange/experiment2-500000/generated_y/generated_from_" + f
imag_path = image_path + f
base_path = base_image_path + "generated_from_" + f
imag_temp = tf.keras.preprocessing.image.load_img(imag_path, target_size=target_size)
base_imag_temp = tf.keras.preprocessing.image.load_img(base_path, target_size=target_size)
input_np= tf.keras.preprocessing.image.img_to_array(imag_temp)/normalized_factor
base_input_np = tf.keras.preprocessing.image.img_to_array(base_imag_temp)/normalized_factor
image_array += [input_np]
base_image_array += [base_input_np]
imag = np.array(image_array, dtype=np.float)
base_imag = np.array(base_image_array, dtype=np.float)
y_label = labels # apple
return imag, y_label, base_imag
def explain_model(model_path,
imag,
y_label,
num_class,
base_imag,
explain_types,
steps=100,
stochastic_mask_flag=False,
data_type=None,
y_target=None):
tf.keras.backend.clear_session()
# tf.reset_default_graph()
model = tf.keras.models.load_model(model_path)
with DeepExplain(session=tf.keras.backend.get_session()) as de: # <-- init DeepExplain context
# Need to reconstruct the graph in DeepExplain context, using the same weights.
# With Keras this is very easy:
# 1. Get the input tensor to the original model
input_tensor = model.layers[0].input
# 2. We now target the output of the last dense layer (pre-softmax)
# To do so, create a new model sharing the same layers untill the last dense (index -2)
# fModel = Model(inputs=input_tensor, outputs = model.layers[-2].output)
fModel = tf.keras.models.Model(inputs=model.input, outputs = model.output)
target_tensor = fModel(input_tensor) # fModel(input_tensor)
# xs = x_test[0:10]
# ys = y_test[0:10]
xs = imag
# ys = np.array([tf.keras.utils.to_categorical(y_label, 2)])
output = {}
for type_ in explain_types:
print("process {x}".format(x=type_))
t1 = time.time()
if type_ == "expected_intgrad":
baseline_map = get_baseline_map(data_type, y_label if y_target is None else y_label + y_target, subsample=steps)
y_label_categorical = tf.keras.utils.to_categorical(y_label, num_class)
y_target_categorical = tf.keras.utils.to_categorical(y_target, num_class) if y_target is not None else None
output[type_] = de.explain("expected_intgrad", target_tensor, input_tensor, xs, ys=y_label_categorical,
baseline_map=baseline_map, steps=steps, y_target=y_target_categorical)
else:
def get_attribution(y_):
ys = tf.keras.utils.to_categorical(y_, num_class)
if "_base" in type_:
assert base_imag is not None, "Please provide non-trivial baseline.{x}".format(x=type_)
return de.explain(type_.split("_base")[0], target_tensor, input_tensor, xs, ys=ys,
baseline=base_imag, steps=steps, stochastic_mask_flag=stochastic_mask_flag)
elif type_ == "occlusion":
return de.explain(type_, target_tensor, input_tensor, xs, ys=ys)
else:
return de.explain(type_, target_tensor, input_tensor, xs, ys=ys, baseline=None, steps=steps, stochastic_mask_flag=False)
if y_target is not None:
output[type_] = get_attribution(y_label) - get_attribution(y_target)
else:
output[type_] = get_attribution(y_label)
t2 = time.time()
print('run time:', t2 - t1)
# if "grad*input" in explain_types:
# attributions_gradin = de.explain('grad*input', target_tensor, input_tensor, xs, ys=ys)
# output["grad*input"] = attributions_gradin
# if "saliency" in explain_types:
# attributions_sal = de.explain('saliency', target_tensor, input_tensor, xs, ys=ys)
# output["saliency"] = attributions_sal
# if "intgrad" in explain_types:
# attributions_ig = de.explain('intgrad', target_tensor, input_tensor, xs, ys=ys)
# output["intgrad"] = attributions_ig
# if "intgrad_base" in explain_types:
# assert base_imag != None, "Please provide non-trivial baseline."
# attributions_ig_base_line = de.explain('intgrad', target_tensor, input_tensor, xs, ys=ys,baseline=base_imag)
# output["intgrad_base"] = attributions_ig_base_line
# if "deeplift" in explain_types:
# attributions_dl = de.explain('deeplift', target_tensor, input_tensor, xs, ys=ys)
# output["deeplift"] = attributions_dl
# if "deeplift_base" in explain_types:
# attributions_dl_base_line = de.explain('deeplift', target_tensor, input_tensor, xs, ys=ys,baseline=base_imag)
# output["deeplift_base"] = attributions_dl_base_line
# if "elrp" in explain_types:
# attributions_elrp = de.explain('elrp', target_tensor, input_tensor, xs, ys=ys)
# output["elrp"] = attributions_elrp
# attributions_gradin = de.explain('grad*input', target_tensor, input_tensor, xs, ys=ys)
#attributions_sal = de.explain('saliency', target_tensor, input_tensor, xs, ys=ys)
# attributions_ig = de.explain('intgrad', target_tensor, input_tensor, xs, ys=ys)
# attributions_ig_base_line = de.explain('intgrad', target_tensor, input_tensor, xs, ys=ys,baseline=base_imag)
# attributions_dl = de.explain('deeplift', target_tensor, input_tensor, xs, ys=ys)
# attributions_dl_base_line = de.explain('deeplift', target_tensor, input_tensor, xs, ys=ys,baseline=base_imag)
#attributions_elrp = de.explain('elrp', target_tensor, input_tensor, xs, ys=ys)
#attributions_occ = de.explain('occlusion', target_tensor, input_tensor, xs, ys=ys)
# Compare Gradient * Input with approximate Shapley Values
# Note1: Shapley Value sampling with 100 samples per feature (78400 runs) takes a couple of minutes on a GPU.
# Note2: 100 samples are not enough for convergence, the result might be affected by sampling variance
# attributions_sv = de.explain('shapley_sampling', target_tensor, input_tensor, xs, ys=ys, samples=100)
# return attributions_ig, attributions_ig_base_line, attributions_dl, attributions_dl_base_line
return output
def kernel_density(original_image, gan_image, file_name, bandwidth = 0.02, op="min",
filter_=True, custom_std=False, iqr_choice=False):
num_fig = original_image.shape[0]
assert num_fig == len(file_name), "Number of figure is not the same."
diff_image_base = original_image - gan_image
X_plot = np.linspace(-1, 1, 1000)[:, np.newaxis]
bins = np.linspace(-1, 1, 1000)
local_min_max = {}
comparison_op = {
"min": np.less,
"max": np.greater
}
print("compare operator: {x}".format(x=op))
kernel_arr = []
for i in range(num_fig):
X = diff_image_base[i].reshape(-1,1)
n = len(X)
if custom_std:
bandwidth = custom_std*X.std()
if iqr_choice:
# import pdb; pdb.set_trace()
iqr_num = iqr(X)
bandwidth = 0.9* min(X.std(), iqr_num/1.34)*pow(n, -0.2)
if filter_:
X = [ xx for xx in X if abs(xx) >= bandwidth]
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(X)
log_dens = kde.score_samples(X_plot)
kernel_arr += [log_dens]
compare_op = comparison_op[op]
kernel_y = np.exp(log_dens)
local_indexs = argrelextrema(kernel_y, compare_op)[0]
local_min_max[file_name[i]] = X_plot[local_indexs]
del X
return kernel_arr, local_min_max, X_plot
def load_batch_images(image_paths, file_name):
res = []
for p in image_paths:
res_p = []
for f in file_name:
path_ = p + f
imag_temp = tf.keras.preprocessing.image.load_img(path_, target_size=(256, 256, 3))
input_np= tf.keras.preprocessing.image.img_to_array(imag_temp)/255.
res_p += [input_np]
res += [res_p]
res = np.array(res)
return res
def show_kd_plot(image_paths, file_name, filter_=False, output=None, custom_std=False, iqr_choice=False):
images_collection = load_batch_images(image_paths, file_name)
original_imag = images_collection[0]
gan_imag = images_collection[1]
kernel_arr, local_min_max, X_plot = kernel_density(original_imag, gan_imag, file_name,
bandwidth = 0.02, op="min", filter_=filter_,
custom_std=custom_std, iqr_choice=iqr_choice)
gan_imag2 = images_collection[2]
kernel_arr2, local_min_max2, X_plot2 = kernel_density(original_imag, gan_imag2, file_name,
bandwidth = 0.02, op="min", filter_=filter_,
custom_std=custom_std, iqr_choice=iqr_choice)
gan_imag3 = images_collection[3]
kernel_arr3, local_min_max3, X_plot3 = kernel_density(original_imag, gan_imag3, file_name,
bandwidth = 0.02, op="min", filter_=filter_,
custom_std=custom_std, iqr_choice=iqr_choice)
nrows = len(file_name)
ncols = 5
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(3*ncols, 3*nrows))
axes = axes.reshape(nrows, ncols)
y_min,y_max = 0, 10
for i in range(nrows):
# a4 = attributions_dl_base_line[i]
axes[i, 0].imshow(original_imag[i])# .set_title('Original')
axes[i, 1].imshow(gan_imag[i])# .set_title('gan-1')
axes[i, 2].imshow(gan_imag2[i])# .set_title('gan-2')
axes[i, 3].imshow(gan_imag3[i])# .set_title('gan-3')
log_dens = kernel_arr[i]
log_dens2 = kernel_arr2[i]
log_dens3 = kernel_arr3[i]
axes[i, 4].fill(X_plot[:, 0], np.exp(log_dens), fc='green', alpha=0.5, label="gan-1")
axes[i, 4].fill(X_plot2[:, 0], np.exp(log_dens2), fc='blue', alpha=0.3, label="gan-2")
axes[i, 4].fill(X_plot3[:, 0], np.exp(log_dens3), fc='red', alpha=0.5, label="gan-3")
axes[i, 4].axvline(0, y_min, y_max, c="gray", alpha=0.1)
# axes[i, 0].text(0, 5, str(i))
axes[i, 4].set_ylim([y_min,y_max])
if output:
fig.savefig(output)
if __name__ == '__main__':
# image_path = "/Users/ptien/tfds-download/apple2orange/testA/"
# gan_image_path_1 = "/Users/ptien/tfds-download/apple2orange/experiment2-500000/generated_y/generated_from_"
# gan_image_path_2 = "/Users/ptien/tfds-download/apple2orange/experiment-1000/generated_y/generated_from_"
# gan_image_path_3 = "/Users/ptien/tfds-download/apple2orange/experiment-0/generated_y/generated_from_"
image_path = "/Users/ptien/tfds-download/horse2zebra/testA/"
gan_image_path_1 = "/Users/ptien/tfds-download/horse2zebra/experiment-500000/generated_y/generated_from_"
gan_image_path_2 = "/Users/ptien/tfds-download/horse2zebra/experiment-1000/generated_y/generated_from_"
gan_image_path_3 = "/Users/ptien/tfds-download/horse2zebra/experiment-0/generated_y/generated_from_"
image_paths = [image_path, gan_image_path_1, gan_image_path_2, gan_image_path_3]
all_images = glob.glob(image_path + "*.jpg")
batch_size = 10
# file_name = ["n07740461_240.jpg", "n07740461_411.jpg", "n07740461_14960.jpg",
# "n07740461_40.jpg", "n07740461_1690.jpg", "n07740461_12921.jpg"]
# show_kd_plot(image_paths, file_name, filter_=False, output="gan_performance_a2o.jpg",
# custom_std=False, iqr_choice=True)
c = 0
total_images = len(all_images)
while len(all_images) > 0:
print(c, total_images, len(all_images))
batch_images = all_images[:batch_size]
batch_images = [ f.split("/")[-1] for f in batch_images]
show_kd_plot(image_paths, batch_images, filter_=False, output="gan_performance_h2z_batch_{c}.jpg".format(c=c),
custom_std=False, iqr_choice=True)
all_images = all_images[batch_size:]
c += 1
| 2.140625
| 2
|
tests/tagopsdb/database/test_connection.py
|
ifwe/tagopsdb
| 0
|
12778775
|
# Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestConnections(unittest2.TestCase):
def setUp(self):
self.protocol = 'mysql+mysqldb'
self.db_user = 'testuser'
self.db_password = '<PASSWORD>'
self.hostname = 'opsdb.tagged.com'
self.db_name = 'TagOpsDB'
@unittest2.skip('not currently valid')
def test_create_dbconn_string(self):
from tagopsdb.database import create_dbconn_string
params = dict(hostname=self.hostname, db_name=self.db_name)
dbconn_string = create_dbconn_string(self.db_user, self.db_password,
**params)
expect_str = (
self.protocol + '://' + self.db_user + ':' +
self.db_password + '@' + self.hostname + '/' +
self.db_name
)
self.assertEquals(dbconn_string, expect_str)
| 2.546875
| 3
|
rocket_casadi_solution.py
|
bvermeulen/Rocket-and-gravity-turn
| 1
|
12778776
|
'''
Based on:
Gravity Turn Maneuver with direct multiple shooting using CVodes
(c) <NAME>
https://mintoc.de/index.php/Gravity_Turn_Maneuver_(Casadi)
https://github.com/zegkljan/kos-stuff/tree/master/non-kos-tools/gturn
----------------------------------------------------------------
'''
import sys
from pathlib import Path
import casadi as cs
import numpy as np
import pandas as pd
from rocket_input import read_rocket_config
# noinspection PyPep8Naming
def compute_gravity_turn(m0, m1, g0, r0, Isp0, Isp1, Fmax, cd, A, H, rho, h_obj,
v_obj, q_obj, N=300, vel_eps=1e-3):
'''
Computes gravity turn profile
:params:
m0: wet (launch) mass (kg or ton)
m1: dry mass (kg or ton)
g0: gravitational acceleration at zero altitude (m * s^-2 or km * s^-2)
r0: "orbit" radius at zero altitude (body radius) (m or km)
Isp0: specific impulse of the engine(s) at zero altitude (s)
Isp1: specific impulse of the engine(s) in vacuum (s)
Fmax: maximum thrust of the engine(s) (N or MN)
cd: drag coefficient
A: reference area of the vehicle (m^2)
H: scale height of the atmosphere (m or km)
rho: density of the atmosphere at zero altitude (kg * m^-3)
h_obj: target altitude (m or km)
v_obj: target velocity (m * s^-1 of km * s^-1)
q_obj: target angle to vertical (rad)
N: number of shooting interval
vel_eps: initial velocity (must be nonzero, e.g. a very small number)
(m * s^-1 or km * s^-1)
:returns:
a dictionary with results
'''
# Create symbolic variables
x = cs.SX.sym('[m, v, q, h, d]') # Vehicle state
u = cs.SX.sym('u') # Vehicle controls
T = cs.SX.sym('T') # Time horizon (s)
# Introduce symbolic expressions for important composite terms
Fthrust = Fmax * u
Fdrag = 0.5 * A * cd * rho * cs.exp(-x[3] / H) * x[1] ** 2
r = x[3] + r0
g = g0 * (r0 / r) ** 2
vhor = x[1] * cs.sin(x[2])
vver = x[1] * cs.cos(x[2])
Isp = Isp1 + (Isp0 - Isp1) * cs.exp(-x[3] / H)
# Build symbolic expressions for ODE right hand side
mdot = -(Fthrust / (Isp * g0))
vdot = (Fthrust - Fdrag) / x[0] - g * cs.cos(x[2])
hdot = vver
ddot = vhor / r
qdot = g * cs.sin(x[2]) / x[1] - ddot
# Build the DAE function
ode = [mdot, vdot, qdot, hdot, ddot]
quad = u
dae = {'x': x, 'p': cs.vertcat(u, T), 'ode': T * cs.vertcat(*ode), 'quad': T * quad}
I = cs.integrator(
'I', 'cvodes', dae,
{'t0': 0.0, 'tf': 1.0 / N, 'nonlinear_solver_iteration': 'functional'}
)
# Specify upper and lower bounds as well as initial values for DAE
# parameters, states and controls
p_min = [0.0]
p_max = [600.0]
p_init = [300.0]
u_min = [0.0]
u_max = [1.0]
u_init = [0.5]
x0_min = [m0, vel_eps, 0.0, 0.0, 0.0]
x0_max = [m0, vel_eps, 0.5 * cs.pi, 0.0, 0.0]
x0_init = [m0, vel_eps, 0.05 * cs.pi, 0.0, 0.0]
xf_min = [m1, v_obj, q_obj, h_obj, 0.0]
xf_max = [m0, v_obj, q_obj, h_obj, cs.inf]
xf_init = [m1, v_obj, q_obj, h_obj, 0.0]
x_min = [m1, vel_eps, 0.0, 0.0, 0.0]
x_max = [m0, cs.inf, cs.pi, cs.inf, cs.inf]
x_init = [0.5 * (m0 + m1), 0.5 * v_obj, 0.5 * q_obj, 0.5 * h_obj, 0.0]
# Useful variable block sizes
npars = 1 # Number of parameters
nx = x.size1() # Number of states
nu = u.size1() # Number of controls
ns = nx + nu # Number of variables per shooting interval
# Introduce symbolic variables and disassemble them into blocks
V = cs.MX.sym('X', N * ns + nx + npars)
P = V[0]
X = [V[(npars + i * ns):(npars + i * ns + nx)] for i in range(0, N + 1)]
U = [V[(npars + i * ns + nx):(npars + (i + 1) * ns)] for i in range(0, N)]
# Nonlinear constraints and Lagrange objective
G = []
F = 0.0
# Build DMS structure
x0 = p_init + x0_init
for i in range(0, N):
Y = I(x0=X[i], p=cs.vertcat(U[i], P))
G += [Y['xf'] - X[i + 1]]
F = F + Y['qf']
frac = float(i + 1) / N
x0 = x0 + u_init + [x0_init[i] + frac * (xf_init[i] - x0_init[i])
for i in range(0, nx)]
# Lower and upper bounds for solver
lbg = 0.0
ubg = 0.0
lbx = p_min + x0_min + u_min + (N - 1) * (x_min + u_min) + xf_min
ubx = p_max + x0_max + u_max + (N - 1) * (x_max + u_max) + xf_max
# Solve the problem using IPOPT
nlp = {'x': V, 'f': (m0 - X[-1][0]) / (m0 - m1), 'g': cs.vertcat(*G)}
S = cs.nlpsol(
'S', 'ipopt', nlp, {'ipopt': {'tol': 1e-4, 'print_level': 5, 'max_iter': 500}}
)
r = S(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
print('RESULT: {}'.format(S.stats()['return_status']))
if S.stats()['return_status'] in {'Invalid_Number_Detected'}:
return None
# Extract state sequences and parameters from result
x = r['x']
f = r['f']
T = float(x[0])
t = np.linspace(0, T, N + 1)
m = np.array(x[npars::ns]).squeeze()
v = np.array(x[npars + 1::ns]).squeeze()
q = np.array(x[npars + 2::ns]).squeeze()
h = np.array(x[npars + 3::ns]).squeeze()
d = np.array(x[npars + 4::ns]).squeeze()
u = np.concatenate((np.array(x[npars + nx::ns]).squeeze(), [0.0]))
return {
'time': t,
'mass': m,
'vel': v,
'alt': h,
'control': u,
'hor_angle': d,
'ver_angle': q
}
def main(config_file):
( rocket_params,
environment_params,
model_params,
io_params
) = read_rocket_config(config_file)
# Vehicle parameters
m0 = (rocket_params.fuel_mass +
rocket_params.dry_mass) # Launch mass (kg or ton)
m1 = rocket_params.dry_mass # Dry mass (kg or ton)
Isp0 = rocket_params.motor_isp0 # Specific impulse at zero altude (s)
Isp1 = rocket_params.motor_isp1 # Specific impulse at vacuum (s)
A = rocket_params.rocket_area # Reference area (m^2)
Fmax = rocket_params.max_thrust # Maximum thrust (N or MN)
vel_eps = rocket_params.vel # Initial velocity (m/s or km/s)
# Environmental parameters
g0 = environment_params.gravity # Gravitational acceleration at altitude zero (m/s^2 or km/s^2)
r0 = environment_params.radius # Radius at altitude zero (m or km)
cd = environment_params.drag_coefficient # Drag coefficients
H = environment_params.scale_height # Scale height (m or km)
rho = environment_params.density # Density at altitude zero (x 1000)
# Model and target orbit parameters
N = model_params.N # Number of shooting intervals
h_obj = model_params.h_obj # Target altitude (m or km)
v_obj = model_params.v_obj # Target velocity (m/s or km/s)
q_obj = model_params.q_obj / 180 * cs.pi # Target angle to vertical (rad)
# output file
model_file = model_params.model_file
result = compute_gravity_turn(
m0, m1, g0, r0, Isp0, Isp1, Fmax,
cd, A, H, rho, h_obj,
v_obj, q_obj, N=N, vel_eps=vel_eps
)
result_df = pd.DataFrame(result)
result_df.to_excel(model_file, index=False)
print(result_df.head())
if __name__ == '__main__':
config_file_name = 'None'
if len(sys.argv) == 2:
config_file_name = sys.argv[1]
config_file_name = Path(config_file_name)
if not config_file_name.is_file():
print(f'incorrect config file: {config_file_name}')
exit()
main(config_file_name)
| 2.859375
| 3
|
src/GenomeBaser/genomebaser.py
|
mscook/GenomeBaser
| 3
|
12778777
|
<filename>src/GenomeBaser/genomebaser.py
#!/usr/bin/env python
"""
Genomebaser is a tool to manage complete genomes from the NCBI
"""
__title__ = 'GenomeBaser'
__version__ = '0.1.2'
__description__ = "GenomeBaser manages complete (bacterial) genomes from NCBI"
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__url__ = 'http://github.com/mscook/GenomeBaser'
__license__ = 'ECL 2.0'
import os
import re
import glob
import sys
import subprocess
from Bio import SeqIO
import click
def check_for_deps():
"""
Check if 3rd party dependencies (non-python) exist
Requires:
* rsysnc
* prokka-genbank_to_fasta_db
* cd-hit
* makeblastdb
"""
reqs = ["rsync", "prokka-genbank_to_fasta_db", "cd-hit", "makeblastdb"]
for e in reqs:
output = subprocess.Popen(["which", e],
stdout=subprocess.PIPE).communicate()[0]
if output.split("/")[-1].strip() != e:
print "Misisng %s. Please install. Exiting." % (e)
sys.exit()
def fetch_genomes(target_genus_species, db_base=None):
"""
Use rsync to manage periodic updates
Examples:
>>> fetch_genomes("Escherichia coli")
>>>
>>> fetch_genomes("Klebsiella pneumoniae", "/home/me/dbs/")
:param target_genus_species: the genus species as a string
(space delimited)
:returns: the database location
"""
working_dir = os.getcwd()
if db_base is not None:
os.chdir(db_base)
target_genus_species = target_genus_species.replace(" ", "_")
if not os.path.exists(target_genus_species):
os.mkdir(target_genus_species)
os.chdir(target_genus_species)
cmd = ("rsync -av ftp.ncbi.nlm.nih.gov::genomes/Bacteria/"
"%s_*/*.gbk .") % (target_genus_species)
db_loc = os.getcwd()
os.system(cmd)
os.chdir(working_dir)
return db_loc
def genbank_to_fasta(db_loc):
"""
Converts GenBank to fasta while naming using the given in the DEFINITION
Examples:
>>> genbank_to_fasta("/home/mscook/dbs/Klebsiella_pneumoniae"
:param db_loc: the fullpath as a sting to the database location (genus
species inclusive)
:returns: a list of the output fasta files
"""
fasta_files = []
tmp_file = "tmp.gbk"
working_dir = os.getcwd()
os.chdir(db_loc)
infs = glob.glob("*.gbk")
for inf in infs:
cmd = "grep -v 'CONTIG join' "+inf+" > "+tmp_file
os.system(cmd)
os.rename(tmp_file, inf)
for seq_record in SeqIO.parse(inf, "genbank"):
out_fa = re.sub(r'\W+', ' ', seq_record.description
).replace(' ', '-')
if out_fa.endswith('-'):
out_fa = out_fa[:-1]+".fa"
else:
out_fa = out_fa+".fa"
SeqIO.write(seq_record, out_fa, "fasta")
fasta_files.append(out_fa)
dest = out_fa.replace(".fa", ".gbk")
if not os.path.lexists(dest):
os.symlink(inf, dest)
if os.path.exists(tmp_file):
os.remove(tmp_file)
os.chdir(working_dir)
return fasta_files
def partition_genomes(db_loc, fasta_files):
"""
Separate complete genomes from plasmids
..warning:: this partitions on the complete_sequence (plasmid) vs
complete_genome (genome) in filename assumption (in
DEFINITION) line
:param db_loc: the fullpath as a sting to the database location (genus
species inclusive)
:param fasta_files: a list of fasta files
:returns: a list of DEFINITION format named GenBank files
"""
plasmid, genome = [], []
working_dir = os.getcwd()
os.chdir(db_loc)
for e in fasta_files:
if e.find("complete-sequence") != -1:
plasmid.append(e)
elif e.find("complete-genome") != -1:
genome.append(e)
elif e.find("-genome") != -1:
genome.append(e)
else:
print "Could not classify %s" % (e)
print "Continuing..."
if not os.path.exists("plasmid"):
os.mkdir("plasmid")
os.chdir("plasmid")
for e in plasmid:
if not os.path.lexists(e):
os.symlink("../"+e, e)
os.chdir("../")
if not os.path.exists("genome"):
os.mkdir("genome")
os.chdir("genome")
for e in genome:
if not os.path.lexists(e):
os.symlink("../"+e, e)
os.chdir("../")
os.chdir(working_dir)
return genome
def make_prokka(db_loc, genbank_files, target_genus_species):
"""
Make a prokka database of the complete genomes
:param db_loc: the fullpath as a sting to the database location (genus
species inclusive)
:param genbank_files: a list of GenBank files
:param target_genus_species: the genus species as a string
(space delimited)
"""
working_dir = os.getcwd()
os.chdir(db_loc)
target = target_genus_species.split(" ")[0]
if not os.path.exists("prokka"):
os.mkdir("prokka")
prokka_cmd = ("prokka-genbank_to_fasta_db %s --idtag=locus_tag "
"> prokka/%s.faa") % (' '.join(genbank_files), target)
os.system(prokka_cmd.replace(".fa", ".gbk"))
os.chdir("prokka")
cd_hit_cmd = ("cd-hit -i %s.faa -o %s -T 0 "
"-M 0 -g 1 -s 0.8 -c 0.9") % (target, target)
os.system(cd_hit_cmd)
blast_cmd = "makeblastdb -dbtype prot -in %s" % (target)
os.system(blast_cmd)
os.chdir("../")
os.chdir(working_dir)
@click.command()
@click.option('--check_deps/--no-check_deps', default=True,
help='Check that non-python dependencies exist')
@click.option('--all/--no-all', default=False,
help='Get complete, draft and assemblies')
@click.option('--mlst/--no-mlst', default=False,
help='Run MLST typing')
@click.argument("genus")
@click.argument("species")
@click.argument('out_database_location', type=click.Path(exists=True))
def main(check_deps, genus, species, out_database_location):
"""
GenomeBaser is tool to manage complete (bacterial) genomes from the NCBI.
Example usage:
$ GenomeBaser.py Klebsiella pneumoniae ~/dbs
$ # (wait a few months)...
$ GenomeBaser Klebsiella pneumoniae ~/dbs
By <NAME> (<EMAIL>)
**More info at:** https://github.com/mscook/GenomeBaser
"""
if check_deps:
print "Checking for 3rd party dependencies"
check_for_deps()
genus = genus[0].upper()+genus[1:]
gs = genus+" "+species
loc = fetch_genomes(gs, out_database_location)
fas = genbank_to_fasta(loc)
genbanks = partition_genomes(loc, fas)
make_prokka(loc, genbanks, gs)
if __name__ == '__main__':
main()
| 2.46875
| 2
|
python_tc_api/setup.py
|
imec-ilabt/terms-cond-demo-site
| 0
|
12778778
|
<reponame>imec-ilabt/terms-cond-demo-site<gh_stars>0
from setuptools import setup, find_packages
import tcapi
setup(
name="T&C API",
version=tcapi.__version__,
description="Terms & Conditions Web API",
long_description="Terms & Conditions Web API",
url="https://github.com/imec-ilabt/terms-cond-demo-site",
packages=find_packages(exclude=["doc"]),
entry_points={
"console_scripts": [
"tcapi=tcapi.tc_api_app:main"
]
},
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
python_requires='>=3.6',
install_requires=["flask>=1.0.2", "python-dateutil>=2.7.3", "flask-cors", "pytz", "cryptography"],
dependency_links=[],
setup_requires=[],
tests_require=[]
)
| 1.039063
| 1
|
Common/DataModel/Testing/Python/SelectionLoop.py
|
jasper-yeh/VtkDotNet
| 3
|
12778779
|
<reponame>jasper-yeh/VtkDotNet<filename>Common/DataModel/Testing/Python/SelectionLoop.py
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
#
# Demonstrate the use of implicit selection loop as well as closest point
# connectivity
#
# create pipeline
#
sphere = vtk.vtkSphereSource()
sphere.SetRadius(1)
sphere.SetPhiResolution(100)
sphere.SetThetaResolution(100)
selectionPoints = vtk.vtkPoints()
selectionPoints.InsertPoint(0, 0.07325, 0.8417, 0.5612)
selectionPoints.InsertPoint(1, 0.07244, 0.6568, 0.7450)
selectionPoints.InsertPoint(2, 0.1727, 0.4597, 0.8850)
selectionPoints.InsertPoint(3, 0.3265, 0.6054, 0.7309)
selectionPoints.InsertPoint(4, 0.5722, 0.5848, 0.5927)
selectionPoints.InsertPoint(5, 0.4305, 0.8138, 0.4189)
loop = vtk.vtkImplicitSelectionLoop()
loop.SetLoop(selectionPoints)
extract = vtk.vtkExtractGeometry()
extract.SetInputConnection(sphere.GetOutputPort())
extract.SetImplicitFunction(loop)
connect = vtk.vtkConnectivityFilter()
connect.SetInputConnection(extract.GetOutputPort())
connect.SetExtractionModeToClosestPointRegion()
connect.SetClosestPoint(selectionPoints.GetPoint(0))
clipMapper = vtk.vtkDataSetMapper()
clipMapper.SetInputConnection(connect.GetOutputPort())
backProp = vtk.vtkProperty()
backProp.SetDiffuseColor(GetRGBColor('tomato'))
clipActor = vtk.vtkActor()
clipActor.SetMapper(clipMapper)
clipActor.GetProperty().SetColor(GetRGBColor('peacock'))
clipActor.SetBackfaceProperty(backProp)
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(clipActor)
ren1.SetBackground(1, 1, 1)
ren1.ResetCamera()
ren1.GetActiveCamera().Azimuth(30)
ren1.GetActiveCamera().Elevation(30)
ren1.GetActiveCamera().Dolly(1.2)
ren1.ResetCameraClippingRange()
renWin.SetSize(400, 400)
renWin.Render()
# render the image
#
#iren.Start()
| 2.03125
| 2
|
pythonCore/ch02/E15.py
|
Furzoom/learnpython
| 0
|
12778780
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
n1 = int(raw_input('No.1: '))
n2 = int(raw_input('No.2: '))
n3 = int(raw_input('No.3: '))
if n1 < n2:
if n1 < n3:
pass
else:
n1, n3 = n3, n1
elif n2 < n3:
n2, n1 = n1, n2
else:
n1, n3 = n3, n1
if n2 > n3:
n2, n3 = n3, n2
print n1, n2, n3
| 3.828125
| 4
|
Module 3/Chapter 4/ch4_21.py
|
PacktPublishing/Natural-Language-Processing-Python-and-NLTK
| 50
|
12778781
|
<filename>Module 3/Chapter 4/ch4_21.py
import nltk
from nltk.tag import BigramTagger, TrigramTagger
from nltk.corpus import treebank
testing = treebank.tagged_sents()[2000:]
training= treebank.tagged_sents()[:7000]
bigramtag = BigramTagger(training)
print(bigramtag.evaluate(testing))
trigramtag = TrigramTagger(training)
print(trigramtag.evaluate(testing))
| 2.640625
| 3
|
src/infi/recipe/console_scripts/__init__.py
|
Infinidat/infi.recipe.console_scripts
| 1
|
12778782
|
__import__("pkg_resources").declare_namespace(__name__)
from contextlib import contextmanager
from .minimal_packages import MinimalPackagesWorkaround, MinimalPackagesMixin
from .windows import WindowsWorkaround, is_windows
from .virtualenv import VirtualenvWorkaround
from .egg import Scripts
class AbsoluteExecutablePathMixin(object):
def is_relative_paths_option_set(self):
relative_paths = self.options.get('relative-paths',
self.buildout.get('buildout').get('relative-paths', 'false'))
return relative_paths in [True, 'true']
def set_executable_path(self):
if is_windows and not self.is_relative_paths_option_set():
python_executable = self.buildout.get('buildout').get('executable')
self.options['executable'] = python_executable
class Scripts(Scripts, AbsoluteExecutablePathMixin, MinimalPackagesMixin):
def install(self):
self.set_executable_path()
installed_files = super(Scripts, self).install()
WindowsWorkaround.apply(self, False, installed_files)
MinimalPackagesWorkaround.apply(self, installed_files)
VirtualenvWorkaround.apply(self, installed_files)
return installed_files
update = install
@contextmanager
def patch(parent, name, value):
previous = getattr(parent, name, None)
setattr(parent, name, value)
try:
yield
finally:
setattr(parent, name, previous)
@contextmanager
def patch_get_entry_map_for_gui_scripts():
import pkg_resources
_get_entry_map = pkg_resources.get_entry_map
def get_entry_map(dist, group=None):
return _get_entry_map(dist, "gui_scripts")
with patch(pkg_resources, "get_entry_map", get_entry_map):
yield
@contextmanager
def patch_get_entry_info_for_gui_scripts():
import pkg_resources
def get_entry_info(self, group, name):
return self.get_entry_map("gui_scripts" if group == "console_scripts" else group).get(name)
with patch(pkg_resources.Distribution, "get_entry_info", get_entry_info):
yield
class GuiScripts(Scripts, AbsoluteExecutablePathMixin, MinimalPackagesMixin):
def install(self):
with patch_get_entry_map_for_gui_scripts():
with patch_get_entry_info_for_gui_scripts():
self.set_executable_path()
installed_files = super(GuiScripts, self).install()
WindowsWorkaround.apply(self, True, installed_files)
MinimalPackagesWorkaround.apply(self, installed_files)
return installed_files
update = install
# used as entry point to gui-script-test
def nothing():
pass
def patch_buildout_wheel():
import buildout.wheel
import glob
WheelInstaller = buildout.wheel.WheelInstaller
def wrapper(func):
def wrapper(basename):
return WheelInstaller((glob.glob('{}*'.format(basename)) + [basename])[0])
return wrapper
buildout.wheel.WheelInstaller = wrapper(buildout.wheel.WheelInstaller)
def _get_matching_dist_in_location(dist, location):
"""
Check if `locations` contain only the one intended dist.
Return the dist with metadata in the new location.
"""
# Getting the dist from the environment causes the
# distribution meta data to be read. Cloning isn't
# good enough.
import pkg_resources
env = pkg_resources.Environment([location])
dists = [ d for project_name in env for d in env[project_name] ]
dist_infos = [ (d.project_name, d.version) for d in dists ]
if dist_infos == [(dist.project_name, dist.version)]:
return dists.pop()
if dist_infos == [(dist.project_name.lower(), dist.version)]:
return dists.pop()
def patch_zc_buildout_easy_install():
import zc.buildout.easy_install
zc.buildout.easy_install._get_matching_dist_in_location = _get_matching_dist_in_location
# buildout.wheel on Windows is having problems installing non-lower-case wheels
try:
patch_buildout_wheel()
except ImportError:
pass
patch_zc_buildout_easy_install()
| 2.125
| 2
|
pib-crawler-type3/run_pdf_downloader.py
|
PromodhPinto/anuvaad-corpus-tools
| 6
|
12778783
|
<reponame>PromodhPinto/anuvaad-corpus-tools
###############################################################################
# AUTHOR : <NAME>
# AIM : Code to download contents of PIB website in PDF format,
# whose textual content is previously available in same directory
# USAGE : python3 ./run_pdf_downloader.py
###############################################################################
import glob
import pdfkit
# Code Block below could be ignored if code is executed in a machine with GUI
from pyvirtualdisplay import Display
display = Display()
display.start()
# check for existing text files
txtfiles = [f for f in glob.glob("*.txt")]
pdffiles = []
for txtfilename in txtfiles:
pdffiles.append(txtfilename.replace(".txt", ".pdf"))
downpdffiles = [f for f in glob.glob("*.pdf")]
# identify and download PDF while for the Documents which are available in TXT format alone
for filename in pdffiles:
if filename not in downpdffiles:
try:
url="https://www.pib.gov.in/PressReleasePage.aspx?PRID="+str(filename).replace(".pdf", "")
pdfkit.from_url(url, filename)
print(filename, " : downloaded successfully as PDF")
# In case of failure in between processing, those ID's will be saved to another file for re-looking
except Exception as e:
print(e)
print(filename, " : failed ")
file2 = open("PDF_Failedfiles_log.txt", "a")
file2.write("\n")
file2.write(str(filename).replace(".pdf", ""))
file2.close()
| 2.734375
| 3
|
edm_web1/script/stat_log.py
|
zhouli121018/nodejsgm
| 0
|
12778784
|
#!/usr/local/pyenv/versions/edm_web/bin/python
# -*- coding: utf-8 -*-
#
"""
每隔一小时执行
1. 腾讯企业邮箱:获取发送失败和不存在的地址。
2. 真实成功率统计
3. 统计客户10个任务的平均发送成功率
"""
from gevent import monkey
monkey.patch_all()
import gevent
import gevent.pool
import os
import re
import sys
import time
import datetime
import redis
import traceback
import logging
import logging.handlers
from collections import defaultdict
from email.parser import Parser
from newlib import common, filetools, pidfile, db_utils, parse_email, html2text, search_mail_status
from imapclient import IMAPClient
# 全局变量
log = None
signal_stop = False
glb_count_task = None
glb_count_send = None
glb_p_date=None
glb_p_hour=None
glb_time_b=None
glb_time_e=None
glb_process_log_list=None
SCRIPT = os.path.realpath(os.path.join(os.path.split(__file__)[0]))
GLB_PG_LOG_DBNAME = 'pgsql_log'
GLB_MY_MS_DBNAME = 'mysql_ms'
COMMON_VAR_COUNT_HASH = 'common_var_count_hash'
# 收件人域名设置
GLB_QQ_DOMAIN = 'qq.com'
GLB_163_DOMAIN = ['163.com', '126.com']
# 待统计的域名列表
GLB_DOMAINS = ['qq.com', '163.com', '*']
GLB_RE_PATTERN_1 = re.compile(ur'无法发送到\s+((\w|[-+=.])+@\w+([-.]\w+)*\.(\w+))')
GLB_RE_PATTERN_2 = re.compile(ur'不存在')
GLB_RE_PATTERN_3 = re.compile(ur'邮箱空间不足')
GLB_RE_PATTERN_4 = re.compile(ur'said:\s+(.*?\))')
GLB_IMAP_HOSTNAME = 'imap.exmail.qq.com' #gmail的smtp服务器网址
redis_pool = redis.ConnectionPool(host="localhost", port=6379, db=0)
redis = redis.StrictRedis(connection_pool=redis_pool)
_loop_dict = lambda : defaultdict(_loop_dict)
############################################################
# 日志设置
def set_logger(log_file, is_screen=True):
global log
log = logging.getLogger('stat_log')
log.setLevel(logging.INFO)
format = logging.Formatter('%(asctime)-15s %(levelname)s %(message)s')
log_handler = logging.handlers.RotatingFileHandler(log_file, 'a', 5000000, 4)
log_handler.setFormatter(format)
log.addHandler(log_handler)
f = open(log_file, 'a')
sys.stdout = f
# sys.stderr = f
if is_screen:
log_handler = logging.StreamHandler()
log_handler.setFormatter(format)
log.addHandler(log_handler)
# 信号量处理
def signal_handle(mode):
log.info(u"Catch signal: %s" % mode)
global signal_stop
signal_stop = True
############################################################
# 开始结束处理
# 初始化
def init():
global glb_p_date, glb_p_hour, glb_time_b, glb_time_e, glb_process_log_list, glb_count_task, glb_count_send
task_count = 10
send_count = 100
sql = "SELECT task_count, send_count, domain FROM stat_task_setting LIMIT 1;"
res = common.time_call(db_utils.query, GLB_MY_MS_DBNAME, sql)
if res:
task_count = res[0][0]
send_count = res[0][1]
stat_domain_T = res[0][2]
# 统计任务个数
glb_count_task = task_count
# 统计发送数量大于等于
glb_count_send = send_count
t = time.localtime(time.time() - 60 * 60 * 1)
glb_p_date = time.strftime("%Y-%m-%d", t)
glb_p_hour = int(time.strftime("%H", t))
# 根据指定的小时生成要处理的时间区间
glb_time_b = "%02d:00:00" % glb_p_hour
glb_time_e = "%02d:00:00" % (glb_p_hour + 1)
log.info('StatLog, date: %s, hour: %s, time range: %s - %s' % (glb_p_date, glb_p_hour, glb_time_b, glb_time_e))
glb_process_log_list = []
datfile = os.path.join(SCRIPT, 'data', 'logstat.dat')
if os.path.exists(datfile):
for line in file(datfile).read().strip().split("\n"):
glb_process_log_list.append(line)
# 结束
# 添加已处理记录
def finishProcess():
datfile = os.path.join(SCRIPT, 'data', 'logstat.dat')
# 添加日期、时间片数值加至处理队列
glb_process_log_list.insert(0, glb_p_date + ',' + str(glb_p_hour))
# 判断是否
c_pop = len(glb_process_log_list) - 80
if c_pop > 0:
for i in range(0, c_pop):
glb_process_log_list.pop()
# 保存数据至文件
try:
logdata = "\n".join(glb_process_log_list)
file(datfile, 'w').write(logdata)
except Exception, e:
log.error(traceback.format_exc())
return
# 异常处理
def exitProcess():
# 判断是否和当前时间段相同,如相同则忽略
if glb_p_date == time.strftime("%Y-%m-%d") and glb_p_hour == int(time.strftime("%H")):
log.info('StatLog ignore ({},{}), it current time'.format(glb_p_date, glb_p_hour))
return 2
# 检测当前时间段是否已处理过
if '{},{}'.format(glb_p_date, glb_p_hour) in glb_process_log_list:
log.info('StatLog ignore ({},{}), it has been processed'.format(glb_p_date, glb_p_hour))
return 2
if not checkTableExist:
log.info('StatLog not found data table, ignored')
return 2
return 1
# 检测 日志表格是否存在
def checkTableExist():
tb_name = "maillog_%s" % (glb_p_date.replace('-', ''))
sql = "SELECT tablename FROM pg_tables WHERE tablename='%s';" % tb_name
res = db_utils.query(GLB_PG_LOG_DBNAME, sql)
if not res:
return False
return True
############################################################
# 有些测试用户故意测试加一些不存在的地址来测试,这些地址走SMTP账号通道发送后,没有错误回执,
# 可以对中继平台的SMTP账号发出的邮件的日志做一个回传处理,,客户界面和管理员界面都能查询到发送失败和不存在的地址。
def getSmtpData():
tb_name = "maillog_%s" % (glb_p_date.replace('-', ''))
if glb_time_e == '24:00:00':
sql = """
SELECT log_id, smtp_account_id, mail_from, mail_to
FROM %s
WHERE send_time>='%s'
AND is_ok='t' AND return_code='250'
AND customer_id<>'0' AND smtp_account_id is not null;
""" % (tb_name, glb_time_b)
else:
sql = """
SELECT log_id, smtp_account_id, mail_from, mail_to
FROM %s
WHERE send_time>='%s' AND send_time<'%s'
AND is_ok='t' AND return_code='250'
AND customer_id<>'0' AND smtp_account_id is not null;
""" % (tb_name, glb_time_b, glb_time_e)
res = common.time_call(db_utils.query, GLB_PG_LOG_DBNAME, sql)
smtp_dict = {}
for log_id, smtp_account_id, mail_from, mail_to in res:
log_id = int(log_id)
smtp_account_id = int(smtp_account_id)
if smtp_account_id not in smtp_dict:
smtp_dict[smtp_account_id] = []
smtp_dict[smtp_account_id].append(
(log_id, mail_from, mail_to)
)
return smtp_dict
def do_email_text(text, mail_to):
if mail_to not in text:
return None, None
m = GLB_RE_PATTERN_1.search(text)
if m:
email = m.group(1)
if email != mail_to:
return None, None
m2 = GLB_RE_PATTERN_2.search(text)
if m2:
error_type = 1
return_said = 'said: 550 [%s]: Recipient address rejected: Access denied' % mail_to
else:
m3 = GLB_RE_PATTERN_3.search(text)
if m3:
error_type = 2
return_said = 'said: 552 Quota exceeded or service disabled.'
else:
error_type = 3
return_said = 'said: 550 Message was blocked by server'
m4 = GLB_RE_PATTERN_4.search(text)
if m4:
return_said = m4.group()
return error_type, return_said
return None, None
def do_email(msg_content, mail_to):
# 稍后解析出邮件:
msg = Parser().parsestr(msg_content)
email_str = msg.as_string()
p = parse_email.ParseEmail(email_str)
m = p.parseMailTemplate()
try:
text = html2text.beautifulsoup_html2text(
m.get('html_text', '').decode(
m.get('html_charset', 'utf-8'), errors='ignore'
).encode('utf-8')
)
return do_email_text(text, mail_to)
except:
return None, None
def do_worker_imap_update(log_id, mail_to, return_said, error_type, mode='qq'):
return_code = 450 if error_type==1 else 550
tb_name = "maillog_%s" % (glb_p_date.replace('-', ''))
sql = u"UPDATE {} SET return_code=%s, return_said=%s, error_type=%s, is_ok='f' WHERE log_id=%s;".format(tb_name)
args = (return_code, return_said, error_type, log_id)
res = common.time_call(db_utils.do, GLB_PG_LOG_DBNAME, sql, args)
log.info(u'woker_imap {} log_id={}, mail_to={}, error_type={}, return_code={}, return_said={}'.format(mode, log_id, mail_to, error_type, return_code, return_said))
# less stat_log.log | grep 'woker_imap log_id'
return
def do_worker_qq(smtp_account, smtp_password, smtp_list):
# 通过一下方式连接smtp服务器,没有考虑异常情况,详细请参考官方文档
server = IMAPClient(GLB_IMAP_HOSTNAME, ssl= True)
try:
server.login(smtp_account, smtp_password)
server.select_folder('INBOX')
year, mouth, day = glb_p_date.split('-')
date = datetime.date(int(year), int(mouth), int(day))
date2 = date - datetime.timedelta(days=1)
result = server.search(['UNSEEN', 'SINCE', date2])
msgdict = server.fetch(result, ['BODY.PEEK[]'] )
msgtuple=sorted(msgdict.items(), key=lambda e:e[0], reverse=True)
log_ids = []
for message_id, message in msgtuple:
msg_content = message['BODY[]']
for log_id, mail_from, mail_to in smtp_list:
if log_id in log_ids: continue
error_type, return_said = do_email(msg_content, mail_to)
if error_type is not None:
log_ids.append(log_id)
do_worker_imap_update(log_id, mail_to, return_said, error_type)
server.add_flags(message_id, '\\seen')
finally:
server.logout()
return
# ------------------------------------
def do_email_mailrelay(mail_to, mail_from, response_code, response_text):
if response_code==200:
for res in response_text['return_list']:
if ( res['state'] == 'fail_finished' or res['state'] == 'reject' ) and res['mail_from'] == mail_from and res['mail_to'] == mail_to:
if res['error_type'] == 2:
error_type = 1
elif res['error_type'] == 4:
error_type = 2
else:
error_type = 3
deliver_ip = res['deliver_ip'] if res['deliver_ip'] else ''
return_said = u'[{}]:{} {}'.format(deliver_ip, res['deliver_time'], res['return_message'])
return error_type, return_said
return None, None
def connect_mailrelay(url):
retry = 10
while retry:
try:
response_code, response_text = search_mail_status.Client(url).get_asset()
return response_code, response_text
except BaseException as e:
log.error('connect_mailrelay url={}, retry={}'.format(url, retry))
log.error(traceback.format_exc())
retry -= 1
gevent.sleep(3)
return None
def do_worker_mailrelay(smtp_list):
if smtp_list:
mail_to_list = ','.join([res[2] for res in smtp_list])
url = 'http://admin.mailrelay.cn/api_search/mail_status/?mail_to_list={}&search_date={}&search_hour={}&state={}'.format(
mail_to_list, glb_p_date, glb_p_hour, 'fail_finished,reject'
)
res = connect_mailrelay(url)
if res is None:
log.error(u'do_worker_mailrelay smtp_list={}'.format(smtp_list))
return
response_code, response_text = res
for log_id, mail_from, mail_to in smtp_list:
error_type, return_said = do_email_mailrelay(mail_to, mail_from, response_code, response_text)
if error_type is not None:
do_worker_imap_update(log_id, mail_to, return_said, error_type, mode='mailrelay')
return
def do_woker_imap(smtp_account_id, smtp_list):
sql = """
SELECT aa.account, aa.password, bb.smtp_server
FROM core_mss_account aa
INNER JOIN core_mss_server bb ON bb.type_id=aa.type_id
WHERE aa.id=%d AND bb.smtp_server IN ('smtp.exmail.qq.com', 'smtp.mailrelay.cn')
LIMIT 1;
""" % (smtp_account_id, )
res = common.time_call(db_utils.query, GLB_MY_MS_DBNAME, sql)
if not res: return
smtp_account, smtp_password, smtp_server = res[0]
if smtp_server == 'smtp.mailrelay.cn':
do_worker_mailrelay(smtp_list)
elif smtp_server == 'smtp.exmail.qq.com':
do_worker_qq(smtp_account, smtp_password, smtp_list)
return
def woker_imap():
smtp_dict = getSmtpData()
pool = gevent.pool.Pool(10)
for smtp_account_id in smtp_dict:
smtp_list = smtp_dict[smtp_account_id]
pool.spawn(do_woker_imap, smtp_account_id, smtp_list)
pool.join()
return
############################################################
# 处理函数
# 统计指定日期的投递失败数据
def statErrorCountByDate():
# 从数据库取得统计数据
tb_name = "maillog_%s" % (glb_p_date.replace('-', ''))
if glb_time_e == '24:00:00':
sql = """
WITH stat_domain_tmp AS(
SELECT customer_id, task_ident, recv_domain, error_type, is_ok, COUNT(*) as stat_count
FROM %s
WHERE send_time>='%s' AND customer_id<>'0' AND COALESCE(recv_domain, '') != ''
GROUP BY customer_id, task_ident, recv_domain, error_type, is_ok
),
stat_domain AS (
SELECT customer_id, task_ident, error_type, is_ok, stat_count,
CASE WHEN recv_domain ='%s' THEN 'qq.com'
WHEN recv_domain IN %s THEN '163.com'
ELSE '*'
END AS recv_domain
FROM stat_domain_tmp
)
SELECT customer_id, task_ident, error_type, is_ok, SUM(stat_count) AS stat_count, recv_domain
FROM stat_domain
GROUP BY customer_id, task_ident, error_type, is_ok, recv_domain
ORDER BY customer_id, task_ident
""" % (tb_name, glb_time_b, GLB_QQ_DOMAIN, str(tuple(GLB_163_DOMAIN)))
else:
sql = """
WITH stat_domain_tmp AS(
SELECT customer_id, task_ident, recv_domain, error_type, is_ok, COUNT(*) as stat_count
FROM %s
WHERE send_time>='%s' AND send_time<'%s' AND customer_id<>'0' AND COALESCE(recv_domain, '') != ''
GROUP BY customer_id, task_ident, recv_domain, error_type, is_ok
),
stat_domain AS (
SELECT customer_id, task_ident, error_type, is_ok, stat_count,
CASE WHEN recv_domain ='%s' THEN 'qq.com'
WHEN recv_domain IN %s THEN '163.com'
ELSE '*'
END AS recv_domain
FROM stat_domain_tmp
)
SELECT customer_id, task_ident, error_type, is_ok, SUM(stat_count) AS stat_count, recv_domain
FROM stat_domain
GROUP BY customer_id, task_ident, error_type, is_ok, recv_domain
ORDER BY customer_id, task_ident
""" % (tb_name, glb_time_b, glb_time_e, GLB_QQ_DOMAIN, str(tuple(GLB_163_DOMAIN)))
res = common.time_call(db_utils.query, GLB_PG_LOG_DBNAME, sql)
# 组合数据
# stat_data = {}
user_list = []
stat_data = _loop_dict()
for row in res:
customer_id = int(row[0])
# 初始化各域名统计数据
t_ident = row[1]
if not t_ident: continue
recv_domain= row[5]
# 统计计数
count_err_1, count_err_2, count_err_3, count_err_5, count_success = 0, 0, 0, 0, 0
if row[2] == 1:
count_err_1 = row[4]
if row[2] == 2:
count_err_2 = row[4]
if row[2] in [0, 3, 4, 6]:
count_err_3 = row[4]
if row[2] == 5:
count_err_5 = row[4]
if row[3]:
count_success = row[4]
_D = stat_data[customer_id][t_ident][recv_domain]
_D['count_err_1'] = _D.get('count_err_1', 0) + count_err_1
_D['count_err_2'] = _D.get('count_err_2', 0) + count_err_2
_D['count_err_3'] = _D.get('count_err_3', 0) + count_err_3
_D['count_err_5'] = _D.get('count_err_5', 0) + count_err_5
_D['count_success'] = _D.get('count_success', 0) + count_success
# 取得客户列表
if customer_id not in user_list:
user_list.append(customer_id)
return stat_data, user_list
############################################################
# 真实发送数据统计
# 为 MySQL 数据库添加投递失败统计数据
def do_worker_stat(user_id, task_ident, recv_domain, detail_data):
count_success = detail_data['count_success']
count_error = detail_data['count_err_1'] + detail_data['count_err_2'] + detail_data['count_err_3'] + detail_data['count_err_5']
log.info(u'worker_stat user_id={}, task_ident={}, domain={}, sned={}, success={}, err_5={}'.format(
user_id, task_ident, recv_domain, count_success + count_error, count_success, detail_data['count_err_5']
))
t = time.localtime(time.time())
now = time.strftime("%Y-%m-%d %H:%M:%S", t)
sql = """
INSERT INTO stat_task_real (customer_id, task_ident, domain, count_send, count_error, count_err_1, count_err_2, count_err_3, count_err_5, created, updated)
VALUES (%d, '%s', '%s', %d, %d, %d, %d, %d, %d, '%s', '%s')
ON DUPLICATE KEY UPDATE
count_send=count_send + VALUES(count_send),
count_error=count_error + VALUES(count_error),
count_err_1=count_err_1 + VALUES(count_err_1),
count_err_2=count_err_2 + VALUES(count_err_2),
count_err_3=count_err_3 + VALUES(count_err_3),
count_err_5=count_err_5 + VALUES(count_err_5),
updated=VALUES(updated);
""" % (
user_id, task_ident, recv_domain,
count_success + count_error, count_error,
detail_data['count_err_1'], detail_data['count_err_2'],
detail_data['count_err_3'], detail_data['count_err_5'],
now, now
)
common.time_call(db_utils.do, GLB_MY_MS_DBNAME, sql)
return
def worker_stat(stat_data):
pool = gevent.pool.Pool(20)
for user_id in stat_data:
for task_ident in stat_data[user_id]:
for recv_domain in stat_data[user_id][task_ident]:
pool.spawn(do_worker_stat, user_id, task_ident, recv_domain, stat_data[user_id][task_ident][recv_domain])
gevent.sleep(0.01)
pool.join()
return
############################################################
# 统计客户10个任务的平均发送成功率
def do_worker_task_update(user_id, domain, count_succ, count_succ_rej, count_sned, now):
avg_score = round( count_succ*100.00 / count_sned, 2) if count_sned else 0
avg_score_s = round( count_succ*100.00 / count_succ_rej, 2) if count_succ_rej else 0
sql = """
INSERT INTO core_customer_score (customer_id, domain, score, score_s, created, updated)
VALUES (%d, '%s', %.2f, %.2f, '%s', '%s')
ON DUPLICATE KEY UPDATE
score=VALUES(score),
score_s=VALUES(score_s),
updated=VALUES(updated);
""" % (
user_id, domain, avg_score, avg_score_s, now, now
)
common.time_call(db_utils.do, GLB_MY_MS_DBNAME, sql)
log.info(u'worker_task user_id={}, domain={}, avg_score={}, avg_score_s={}'.format(user_id, domain, avg_score, avg_score_s))
return
def do_worker_task(user_id):
log.info(u'worker_task user_id={}'.format(user_id))
t = time.localtime(time.time())
now = time.strftime("%Y-%m-%d %H:%M:%S", t)
sql = """
SELECT count_send, count_error, count_err_5, domain
FROM stat_task_real
WHERE customer_id=%d AND count_send > %d
ORDER BY updated DESC
LIMIT %d
""" % (user_id, glb_count_send, glb_count_task)
res = common.time_call(db_utils.query, GLB_MY_MS_DBNAME, sql)
domin_score = defaultdict(list)
for count_send, count_error, count_err_5, domain in res:
# 成功
count_succ = count_send - count_error
# 除去拒发
count_succ_rej = count_send - count_err_5
domin_score[domain].append(
(count_succ, count_succ_rej, count_send)
)
count_send_all = 0
count_succ_all = 0
count_succ_rej_all = 0
for domain, score_T in domin_score.iteritems():
count_succ_sum = float(sum( [i[0] for i in score_T] ))
count_succ_rej_sum = float(sum( [i[1] for i in score_T] ))
count_send_sum = float(sum( [i[2] for i in score_T] ))
count_succ_all += count_succ_sum
count_succ_rej_all += count_succ_rej_sum
count_send_all += count_send_sum
if domain == '*': continue
do_worker_task_update(user_id, domain, count_succ_sum, count_succ_rej_sum, count_send_sum, now)
if domin_score:
domain = '*'
do_worker_task_update(user_id, domain, count_succ_all, count_succ_rej_all, count_send_all, now)
return
def worker_task(user_list):
pool = gevent.pool.Pool(5)
for user_id in user_list:
pool.spawn(do_worker_task, user_id)
gevent.sleep(0.01)
pool.join()
return
############################################################
# redis服务 统计各个区间分数的客户数
def do_redis_count_s(start, end, domain):
sql = "SELECT COUNT(*) FROM core_customer_score WHERE score_s>=%d AND score_s<%d AND domain='%s';" % ( start, end, domain )
res = common.time_call(db_utils.query, GLB_MY_MS_DBNAME, sql)
return res[0][0]
def do_redis_count(start, end, domain):
sql = "SELECT COUNT(*) FROM core_customer_score WHERE score>=%d AND score<%d AND domain='%s';" % ( start, end, domain )
res = common.time_call(db_utils.query, GLB_MY_MS_DBNAME, sql)
return res[0][0]
def do_worker_redis_count(domain, start, end):
score_key = 'score_{}_{}'.format(domain.replace('.', '_'), start)
score_s_key = 'score_s_{}_{}'.format(domain.replace('.', '_'), start)
count = do_redis_count(start, end, domain)
redis.hset( 'edm_web_core_customer_score', key=score_key, value='{}'.format(count) )
count_s = do_redis_count_s(start, end, domain)
redis.hset( 'edm_web_core_customer_score', key=score_s_key, value='{}'.format(count_s) )
log.info('worker_redis range: {}-{}, domain: {}, count: {}, count_s: {}'.format( start, end, domain, count, count_s))
return
def do_worker_redis_special(domain, start, end):
for start_T in range(start, end, 2):
end_T = start_T + 2
do_worker_redis_count(domain, start_T, end_T)
return
def do_worker_redis(domain, start, end):
if start == 90:
do_worker_redis_special(domain, start, end)
return
do_worker_redis_count(domain, start, end)
return
def worker_redis():
pool = gevent.pool.Pool(10)
for start in range(0, 100, 10):
for domain in GLB_DOMAINS:
end = start + 10
pool.spawn(do_worker_redis, domain, start, end)
pool.join()
return
############################################################
# 主函数
def main():
init()
status = exitProcess()
if status == 2: return
log.info('start woker_imap.....')
woker_imap()
log.info('finish woker_imap.....')
gevent.sleep(0.01)
stat_data, user_list = statErrorCountByDate()
log.info('start worker_stat.....')
worker_stat(stat_data)
log.info('finish worker_stat.....')
gevent.sleep(0.01)
log.info('start worker_task.....')
worker_task(user_list)
log.info('finish worker_task.....')
gevent.sleep(0.01)
log.info('start worker_redis.....')
worker_redis()
log.info('finish worker_redis.....')
finishProcess()
return
if __name__ == "__main__":
log_dir = os.path.join(SCRIPT, 'log')
pid_dir = os.path.join(SCRIPT, 'pid')
data_dir = os.path.join(SCRIPT, 'data')
filetools.make_dir([log_dir, pid_dir, data_dir])
log_file = os.path.join(log_dir, 'stat_log.log')
set_logger(log_file)
pid_file = os.path.join(pid_dir, 'stat_log.pid')
pidfile.register_pidfile(pid_file)
log.info(u'program start...')
t1 = time.time()
EXIT_CODE = 0
try:
main()
except KeyboardInterrupt:
signal_handle('sigint')
except:
log.error(traceback.format_exc())
EXIT_CODE = 1
log.info(u"program spend total time: {}".format(time.time()-t1))
log.info(u"program quit...")
sys.exit(EXIT_CODE)
| 1.742188
| 2
|
chatbot_2/inference.py
|
gustasvs/AI
| 1
|
12778785
|
# https://github.com/tensorflow/examples/blob/master/community/en/transformer_chatbot.ipynb
import tensorflow as tf
# assert tf.__version__.startswith('2')
tf.random.set_seed(1234)
import tensorflow_datasets as tfds
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import pickle
from functions import *
from hparams import *
with open('tokenizer/tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
with open("tokenizer/START_TOKEN", "r") as f:
START_TOKEN = [int(f.read())]
with open("tokenizer/END_TOKEN", "r") as f:
END_TOKEN = [int(f.read())]
with open("tokenizer/VOCAB_SIZE", "r") as f:
VOCAB_SIZE = int(f.read())
def evaluate(sentence):
sentence = preprocess_sentence(sentence)
sentence = tf.expand_dims(
START_TOKEN + tokenizer.encode(sentence) + END_TOKEN, axis=0)
output = tf.expand_dims(START_TOKEN, 0)
for i in range(MAX_LENGTH):
predictions = model(inputs=[sentence, output], training=False)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :]
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if tf.equal(predicted_id, END_TOKEN[0]):
break
# concatenated the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0)
def predict(sentence):
prediction = evaluate(sentence)
predicted_sentence = tokenizer.decode(
[i for i in prediction if i < tokenizer.vocab_size])
return predicted_sentence
def create_model():
model = transformer(
vocab_size=VOCAB_SIZE,
num_layers=NUM_LAYERS,
units=UNITS,
d_model=D_MODEL,
num_heads=NUM_HEADS,
dropout=DROPOUT)
learning_rate = CustomSchedule(D_MODEL)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
model.compile(optimizer=optimizer, loss=loss_function, metrics=[accuracy])
return model
model = create_model()
checkpoint_path = "model/cp.ckpt"
model.load_weights(checkpoint_path)
while True:
question = input("\n--> ")
if question == "" or question == None:
continue
output = predict(question)
print(output)
| 2.9375
| 3
|
tests/test_cli_bulk.py
|
eyeseast/sqlite-utils
| 0
|
12778786
|
<filename>tests/test_cli_bulk.py
from click.testing import CliRunner
from sqlite_utils import cli, Database
import pathlib
import pytest
import subprocess
import sys
import time
@pytest.fixture
def test_db_and_path(tmpdir):
db_path = str(pathlib.Path(tmpdir) / "data.db")
db = Database(db_path)
db["example"].insert_all(
[
{"id": 1, "name": "One"},
{"id": 2, "name": "Two"},
],
pk="id",
)
return db, db_path
def test_cli_bulk(test_db_and_path):
db, db_path = test_db_and_path
result = CliRunner().invoke(
cli.cli,
[
"bulk",
db_path,
"insert into example (id, name) values (:id, :name)",
"-",
"--nl",
],
input='{"id": 3, "name": "Three"}\n{"id": 4, "name": "Four"}\n',
)
assert result.exit_code == 0, result.output
assert [
{"id": 1, "name": "One"},
{"id": 2, "name": "Two"},
{"id": 3, "name": "Three"},
{"id": 4, "name": "Four"},
] == list(db["example"].rows)
def test_cli_bulk_batch_size(test_db_and_path):
db, db_path = test_db_and_path
proc = subprocess.Popen(
[
sys.executable,
"-m",
"sqlite_utils",
"bulk",
db_path,
"insert into example (id, name) values (:id, :name)",
"-",
"--nl",
"--batch-size",
"2",
],
stdin=subprocess.PIPE,
stdout=sys.stdout,
)
# Writing one record should not commit
proc.stdin.write(b'{"id": 3, "name": "Three"}\n\n')
proc.stdin.flush()
time.sleep(1)
assert db["example"].count == 2
# Writing another should trigger a commit:
proc.stdin.write(b'{"id": 4, "name": "Four"}\n\n')
proc.stdin.flush()
time.sleep(1)
assert db["example"].count == 4
proc.stdin.close()
proc.wait()
assert proc.returncode == 0
def test_cli_bulk_error(test_db_and_path):
_, db_path = test_db_and_path
result = CliRunner().invoke(
cli.cli,
[
"bulk",
db_path,
"insert into example (id, name) value (:id, :name)",
"-",
"--nl",
],
input='{"id": 3, "name": "Three"}',
)
assert result.exit_code == 1
assert result.output == 'Error: near "value": syntax error\n'
| 2.28125
| 2
|
output/models/nist_data/atomic/positive_integer/schema_instance/nistschema_sv_iv_atomic_positive_integer_white_space_1_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
12778787
|
<gh_stars>1-10
from output.models.nist_data.atomic.positive_integer.schema_instance.nistschema_sv_iv_atomic_positive_integer_white_space_1_xsd.nistschema_sv_iv_atomic_positive_integer_white_space_1 import NistschemaSvIvAtomicPositiveIntegerWhiteSpace1
__all__ = [
"NistschemaSvIvAtomicPositiveIntegerWhiteSpace1",
]
| 1.148438
| 1
|
Part1_Classification_VectorSpaces/C1_W2_lecture_nb_01_visualizing_naive_bayes.py
|
picsag/NLP
| 0
|
12778788
|
#!/usr/bin/env python
# coding: utf-8
# # Visualizing Naive Bayes
#
# In this lab, we will cover an essential part of data analysis that has not been included in the lecture videos. As we stated in the previous module, data visualization gives insight into the expected performance of any model.
#
# In the following exercise, you are going to make a visual inspection of the tweets dataset using the Naïve Bayes features. We will see how we can understand the log-likelihood ratio explained in the videos as a pair of numerical features that can be fed in a machine learning algorithm.
#
# At the end of this lab, we will introduce the concept of __confidence ellipse__ as a tool for representing the Naïve Bayes model visually.
# In[1]:
import numpy as np # Library for linear algebra and math utils
import pandas as pd # Dataframe library
import matplotlib.pyplot as plt # Library for plots
from utils import confidence_ellipse # Function to add confidence ellipses to charts
# ## Calculate the likelihoods for each tweet
#
# For each tweet, we have calculated the likelihood of the tweet to be positive and the likelihood to be negative. We have calculated in different columns the numerator and denominator of the likelihood ratio introduced previously.
#
# $$log \frac{P(tweet|pos)}{P(tweet|neg)} = log(P(tweet|pos)) - log(P(tweet|neg)) $$
# $$positive = log(P(tweet|pos)) = \sum_{i=0}^{n}{log P(W_i|pos)}$$
# $$negative = log(P(tweet|neg)) = \sum_{i=0}^{n}{log P(W_i|neg)}$$
#
# We did not include the code because this is part of this week's assignment. The __'bayes_features.csv'__ file contains the final result of this process.
#
# The cell below loads the table in a dataframe. Dataframes are data structures that simplify the manipulation of data, allowing filtering, slicing, joining, and summarization.
# In[2]:
data = pd.read_csv('data/bayes_features.csv') # Load the data from the csv file
data.head(5) # Print the first 5 tweets features. Each row represents a tweet
# In[3]:
# Plot the samples using columns 1 and 2 of the matrix
fig, ax = plt.subplots(figsize = (8, 8)) #Create a new figure with a custom size
colors = ['red', 'green'] # Define a color palete
sentiments = ['negative', 'positive']
index = data.index
# Color base on sentiment
for sentiment in data.sentiment.unique():
ix = index[data.sentiment == sentiment]
ax.scatter(data.iloc[ix].positive, data.iloc[ix].negative, c=colors[int(sentiment)], s=0.1, marker='*', label=sentiments[int(sentiment)])
ax.legend(loc='best')
# Custom limits for this chart
plt.xlim(-250,0)
plt.ylim(-250,0)
plt.xlabel("Positive") # x-axis label
plt.ylabel("Negative") # y-axis label
plt.show()
# # Using Confidence Ellipses to interpret Naïve Bayes
#
# In this section, we will use the [confidence ellipse]( https://matplotlib.org/3.1.1/gallery/statistics/confidence_ellipse.html#sphx-glr-gallery-statistics-confidence-ellipse-py) to give us an idea of what the Naïve Bayes model see.
#
# A confidence ellipse is a way to visualize a 2D random variable. It is a better way than plotting the points over a cartesian plane because, with big datasets, the points can overlap badly and hide the real distribution of the data. Confidence ellipses summarize the information of the dataset with only four parameters:
#
# * Center: It is the numerical mean of the attributes
# * Height and width: Related with the variance of each attribute. The user must specify the desired amount of standard deviations used to plot the ellipse.
# * Angle: Related with the covariance among attributes.
#
# The parameter __n_std__ stands for the number of standard deviations bounded by the ellipse. Remember that for normal random distributions:
#
# * About 68% of the area under the curve falls within 1 standard deviation around the mean.
# * About 95% of the area under the curve falls within 2 standard deviations around the mean.
# * About 99.7% of the area under the curve falls within 3 standard deviations around the mean.
#
# <img src=./images/std.jpg width="400" >
#
#
# In the next chart, we will plot the data and its corresponding confidence ellipses using 2 std and 3 std.
# In[ ]:
# Plot the samples using columns 1 and 2 of the matrix
fig, ax = plt.subplots(figsize = (8, 8))
colors = ['red', 'green'] # Define a color palete
sentiments = ['negative', 'positive']
index = data.index
# Color base on sentiment
for sentiment in data.sentiment.unique():
ix = index[data.sentiment == sentiment]
ax.scatter(data.iloc[ix].positive, data.iloc[ix].negative, c=colors[int(sentiment)], s=0.1, marker='*', label=sentiments[int(sentiment)])
# Custom limits for this chart
plt.xlim(-200, 40)
plt.ylim(-200, 40)
plt.xlabel("Positive") # x-axis label
plt.ylabel("Negative") # y-axis label
data_pos = data[data.sentiment == 1] # Filter only the positive samples
data_neg = data[data.sentiment == 0] # Filter only the negative samples
# Print confidence ellipses of 2 std
confidence_ellipse(data_pos.positive, data_pos.negative, ax, n_std=2, edgecolor='black', label=r'$2\sigma$' )
confidence_ellipse(data_neg.positive, data_neg.negative, ax, n_std=2, edgecolor='orange')
# Print confidence ellipses of 3 std
confidence_ellipse(data_pos.positive, data_pos.negative, ax, n_std=3, edgecolor='black', linestyle=':', label=r'$3\sigma$')
confidence_ellipse(data_neg.positive, data_neg.negative, ax, n_std=3, edgecolor='orange', linestyle=':')
ax.legend(loc='lower right')
plt.show()
# In the next cell, we will modify the features of the samples with positive sentiment (1), in a way that the two distributions overlap. In this case, the Naïve Bayes method will produce a lower accuracy than with the original data.
# In[ ]:
data2 = data.copy() # Copy the whole data frame
# The following 2 lines only modify the entries in the data frame where sentiment == 1
data2.negative[data.sentiment == 1] = data2.negative * 1.5 + 50 # Modify the negative attribute
data2.positive[data.sentiment == 1] = data2.positive / 1.5 - 50 # Modify the positive attribute
# Now let us plot the two distributions and the confidence ellipses
# In[ ]:
# Plot the samples using columns 1 and 2 of the matrix
fig, ax = plt.subplots(figsize = (8, 8))
colors = ['red', 'green'] # Define a color palete
sentiments = ['negative', 'positive']
index = data2.index
# Color base on sentiment
for sentiment in data2.sentiment.unique():
ix = index[data2.sentiment == sentiment]
ax.scatter(data2.iloc[ix].positive, data2.iloc[ix].negative, c=colors[int(sentiment)], s=0.1, marker='*', label=sentiments[int(sentiment)])
#ax.scatter(data2.positive, data2.negative, c=[colors[int(k)] for k in data2.sentiment], s = 0.1, marker='*') # Plot a dot for tweet
# Custom limits for this chart
plt.xlim(-200,40)
plt.ylim(-200,40)
plt.xlabel("Positive") # x-axis label
plt.ylabel("Negative") # y-axis label
data_pos = data2[data2.sentiment == 1] # Filter only the positive samples
data_neg = data[data2.sentiment == 0] # Filter only the negative samples
# Print confidence ellipses of 2 std
confidence_ellipse(data_pos.positive, data_pos.negative, ax, n_std=2, edgecolor='black', label=r'$2\sigma$' )
confidence_ellipse(data_neg.positive, data_neg.negative, ax, n_std=2, edgecolor='orange')
# Print confidence ellipses of 3 std
confidence_ellipse(data_pos.positive, data_pos.negative, ax, n_std=3, edgecolor='black', linestyle=':', label=r'$3\sigma$')
confidence_ellipse(data_neg.positive, data_neg.negative, ax, n_std=3, edgecolor='orange', linestyle=':')
ax.legend(loc='lower right')
plt.show()
# To give away: Understanding the data allows us to predict if the method will perform well or not. Alternatively, it will allow us to understand why it worked well or bad.
| 4.34375
| 4
|
scripts/csv2json.py
|
C0deAi/parkfinder-backend
| 2
|
12778789
|
import sys
import csv
import json
def main(data_csv, outfile='out.json'):
with open(data_csv, 'r', encoding='utf-8-sig') as datafile:
reader = csv.DictReader(datafile)
output = {
'parks': [dict(row) for row in reader],
}
with open(outfile, 'w') as out:
json.dump(output, out)
if __name__ == '__main__':
infile, outfile = sys.argv[1], sys.argv[2]
print('Writing data from {} to {}'.format(infile, outfile))
main(sys.argv[1], sys.argv[2])
| 3.390625
| 3
|
src/301-350/P323.py
|
lord483/Project-Euler-Solutions
| 0
|
12778790
|
<reponame>lord483/Project-Euler-Solutions
import numpy as np
from time import time
from numba import jit
# @jit
def solve(times):
upper = 2**32 - 1
cnts = 0
m = 0
for _ in range(times):
cnt = 0
r = 0
random_block = np.random.randint(
low=0, high=upper, size=40, dtype="uint32")
while (r != upper):
r = r | random_block[cnt]
cnt += 1
m = cnt if cnt > m else m
cnts += cnt
print("Max : ", m, " steps: ", times)
return (cnts * 1.0) / times
if __name__ == "__main__":
t0 = time()
res = solve(times=10**7)
print("Final Result : {:.10f} . Time taken : {:.2f} secs.".format(
res, time() - t0))
| 3.109375
| 3
|
setup.py
|
thierry-tct/muteria
| 1
|
12778791
|
<reponame>thierry-tct/muteria
#
#> python3 -m pip install --user --upgrade setuptools wheel
#> python3 -m pip install --user --upgrade twine
#
#> python3 setup.py sdist bdist_wheel
#> python3 -m twine upload dist/muteria-<version>.tar.gz
#> rm -rf dist build muteria.egg-info __pycache__/
#
import os
from setuptools import setup, find_packages
thisdir = os.path.dirname(os.path.abspath(__file__))
# get __version__, _framework_name
exec(open(os.path.join(thisdir, 'muteria', '_version.py')).read())
def get_long_description():
with open(os.path.join(thisdir, "README.md"), "r") as fh:
long_description = fh.read()
return long_description
def get_requirements_list():
"""
requirements_list = []
with open(os.path.join(thisdir, "requirements.txt"), "r") as fh:
for line in fh:
req = line.strip().split()
if len(req) > 0 and not req[0].startswith('#'):
requirements_list.append(req[0])
"""
requirements_list = [
"numpy",
"pandas",
"scipy",
"matplotlib",
"networkx",
"gitpython",
"tqdm",
"joblib",
"jinja2",
#docker # https://docker-py.readthedocs.io/en/stable/index.html
#sh # easy subprocess creation
#enum #(python 2.7)
# SERVER
#"flask",
#"flask_socketio"
]
return requirements_list
setup(
# This is the name of your PyPI-package.
name=_framework_name,
python_requires='>3.3.0',
# Update the version number for new releases
version=__version__,
description='Software Analysis and Testing Framework',
long_description=get_long_description(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/muteria/muteria',
packages = ['muteria'],
#packages=find_packages(),
#py_modules = ['_version'],
include_package_data=True,
install_requires = get_requirements_list(),
# The name of your scipt, and also the command you'll be using for calling it
#scripts=['cli/muteria'],
entry_points={
'console_scripts': [
'muteria=muteria.cli.cli:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Natural Language :: English',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: BSD',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
]
)
| 1.703125
| 2
|
attendance_generator.py
|
innovator-creator-maker/Face-Recognition-Attendance-System
| 0
|
12778792
|
# Recognise Faces using some classification algorithm - like Logistic, KNN, SVM etc.
# 1. load the training data (numpy arrays of all the persons)
# x- values are stored in the numpy arrays
# y-values we need to assign for each person
# 2. Read a video stream using opencv
# 3. extract faces out of it
# 4. use knn to find the prediction of face (int)
# 5. map the predicted id to name of the user
# 6. Display the predictions on the screen - bounding box and name
import cv2
import numpy as np
import os
from datetime import datetime
import time
########## KNN CODE ############
def distance(v1, v2):
# Eucledian
return np.sqrt(((v1 - v2) ** 2).sum())
def markAttendence(name):
with open('present.csv', 'r+') as f:
total_student_in_class = f.readline()
print(total_student_in_class)
nameList = []
absstuds = []
for line in total_student_in_class:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\nthe present students are : \n{name},{dtString}')
def maarkattndnce(namees):
with open('absent.csv', 'r+') as f:
absstuds = []
for nam in total_student_in_class:
if nam not in class_total_present:
entry = nam.split(',')
absstuds.append(entry[0])
if namees not in absstuds:
f.writelines(f'\nabsent students are : \n{absstuds}')
def knn(train, test, k=5):
dist = []
for i in range(train.shape[0]):
# Get the vector and label
ix = train[i, :-1]
iy = train[i, -1]
# Compute the distance from test point
d = distance(test, ix)
dist.append([d, iy])
# Sort based on distance and get top k
dk = sorted(dist, key=lambda x: x[0])[:k]
# Retrieve only the labels
labels = np.array(dk)[:, -1]
# Get frequencies of each label
output = np.unique(labels, return_counts=True)
# Find max frequency and corresponding label
index = np.argmax(output[1])
return output[0][index]
################################
# Init Camera
cap = cv2.VideoCapture(0)
# Face Detection
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
skip = 0
dataset_path = "C:/Users/Samarth/Desktop/knn/data/"
face_data = []
number = []
labels = []
class_id = 0 # Labels for the given file
names = {} # Mapping btw id - name
# Data Preparation
for fx in os.listdir(dataset_path):
if fx.endswith('.npy'):
# Create a mapping btw class_id and name
names[class_id] = fx[:-4]
print("Loaded " + fx)
data_item = np.load(dataset_path + fx)
face_data.append(data_item)
# Create Labels for the class
target = class_id * np.ones((data_item.shape[0],))
class_id += 1
labels.append(target)
face_dataset = np.concatenate(face_data, axis=0)
face_labels = np.concatenate(labels, axis=0).reshape((-1, 1))
print(face_dataset.shape)
print(face_labels.shape)
trainset = np.concatenate((face_dataset, face_labels), axis=1)
print(trainset.shape)
# Testing
attn = []
appn = []
while True:
ret, frame = cap.read()
if ret == False:
continue
faces = face_cascade.detectMultiScale(frame, 1.3, 5)
if (len(faces) == 0):
continue
for face in faces:
x, y, w, h = face
# Get the face ROI
offset = 10
face_section = frame[y - offset:y + h + offset, x - offset:x + w + offset]
face_section = cv2.resize(face_section, (100, 100))
# Predicted Label (out)
out = knn(trainset, face_section.flatten())
# Display on the screen the name and rectangle around it
pred_name = names[int(out)]
cv2.putText(frame, pred_name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
if pred_name not in attn:
attn.append(pred_name)
else:
continue
markAttendence(pred_name)
cv2.imshow("Faces", frame)
path = "C:/Users/Samarth/Desktop/knn/data/"
images = [] # LIST CONTAINING ALL THE IMAGES
className = [] # LIST CONTAINING ALL THE CORRESPONDING CLASS Names
myList = os.listdir(path)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
className.append(os.path.splitext(cl)[0])
total_student_in_class = list(className) ###the toatl students in this class
print(total_student_in_class)
class_total_present = list(attn)
#print(attn)
res_list = []
for i in total_student_in_class:
if i not in class_total_present:
res_list.append(i)
print(res_list)
maarkattndnce(i)
# ai = tuple(total_student_in_class) #name of all the students as a tuple
#print(ai)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 3.625
| 4
|
structuralglass/__init__.py
|
normanrichardson/StructGlassCalcs
| 9
|
12778793
|
import pint
from . import resources
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
# Load the file stream for the units file
unit_file = pkg_resources.open_text(resources, "unit_def.txt")
# Setup pint for the package
ureg = pint.UnitRegistry()
Q_ = ureg.Quantity
ureg.load_definitions(unit_file)
| 2.4375
| 2
|
qiling/qiling/os/posix/stat.py
|
mrTavas/owasp-fstm-auto
| 2
|
12778794
|
<gh_stars>1-10
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import os
class StatBase:
def __init__(self):
self._stat_buf = None
# Never iterate this object!
def __getitem__(self, key):
if type(key) is not str:
raise TypeError
if not key.startswith("__") and key in dir(self._stat_buf):
return self._stat_buf.__getattribute__(key)
return 0
def __getattr__(self, key):
return self.__getitem__(key)
class Stat(StatBase):
def __init__(self, path):
super(Stat, self).__init__()
self._stat_buf = os.stat(path)
class Fstat(StatBase):
def __init__(self, fd):
super(Fstat, self).__init__()
self._stat_buf = os.fstat(fd)
class Lstat(StatBase):
def __init__(self, path):
super(Lstat, self).__init__()
self._stat_buf = os.lstat(path)
| 2.515625
| 3
|
corehq/apps/sms/tests/opt_tests.py
|
akashkj/commcare-hq
| 471
|
12778795
|
from django.test import TestCase
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import clear_plan_version_cache
from corehq.apps.domain.models import Domain
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
from corehq.apps.sms.api import incoming, send_sms_to_verified_number
from corehq.apps.sms.messages import MSG_OPTED_IN, MSG_OPTED_OUT, get_message
from corehq.apps.sms.models import SMS, PhoneBlacklist, PhoneNumber, SQLMobileBackendMapping, SQLMobileBackend
from corehq.apps.sms.tests.util import (
delete_domain_phone_numbers,
setup_default_sms_test_backend,
)
from corehq.form_processor.tests.utils import FormProcessorTestUtils
class OptTestCase(DomainSubscriptionMixin, TestCase):
@classmethod
def setUpClass(cls):
super(OptTestCase, cls).setUpClass()
cls.domain = 'opt-test'
cls.domain_obj = Domain(name=cls.domain)
cls.domain_obj.sms_case_registration_enabled = True
cls.domain_obj.save()
cls.setup_subscription(cls.domain, SoftwarePlanEdition.ADVANCED)
cls.backend, cls.backend_mapping = setup_default_sms_test_backend()
cls.custom_backend = SQLTestSMSBackend.objects.create(
name='MOBILE_BACKEND_CUSTOM_TEST',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id(),
opt_in_keywords=['RESTART'],
opt_out_keywords=['RESTOP']
)
cls.custom_backend_mapping = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='1',
backend=cls.custom_backend,
)
@classmethod
def tearDownClass(cls):
cls.backend_mapping.delete()
cls.backend.delete()
cls.custom_backend_mapping.delete()
cls.custom_backend.delete()
FormProcessorTestUtils.delete_all_cases(cls.domain)
cls.teardown_subscriptions()
cls.domain_obj.delete()
clear_plan_version_cache()
super(OptTestCase, cls).tearDownClass()
def tearDown(self):
PhoneBlacklist.objects.all().delete()
SMS.objects.filter(domain=self.domain).delete()
delete_domain_phone_numbers(self.domain)
def get_last_sms(self, phone_number):
return SMS.objects.filter(domain=self.domain, phone_number=phone_number).order_by('-date')[0]
def test_opt_out_and_opt_in(self):
self.assertEqual(PhoneBlacklist.objects.count(), 0)
incoming('99912345678', 'join opt-test', 'GVI')
v = PhoneNumber.get_two_way_number('99912345678')
self.assertIsNotNone(v)
incoming('99912345678', 'stop', 'GVI')
self.assertEqual(PhoneBlacklist.objects.count(), 1)
phone_number = PhoneBlacklist.objects.get(phone_number='99912345678')
self.assertFalse(phone_number.send_sms)
self.assertEqual(phone_number.domain, self.domain)
self.assertIsNotNone(phone_number.last_sms_opt_out_timestamp)
self.assertIsNone(phone_number.last_sms_opt_in_timestamp)
sms = self.get_last_sms('+99912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, get_message(MSG_OPTED_OUT, context=('START',)))
incoming('99912345678', 'start', 'GVI')
self.assertEqual(PhoneBlacklist.objects.count(), 1)
phone_number = PhoneBlacklist.objects.get(phone_number='99912345678')
self.assertTrue(phone_number.send_sms)
self.assertEqual(phone_number.domain, self.domain)
self.assertIsNotNone(phone_number.last_sms_opt_out_timestamp)
self.assertIsNotNone(phone_number.last_sms_opt_in_timestamp)
sms = self.get_last_sms('+99912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, get_message(MSG_OPTED_IN, context=('STOP',)))
def test_sending_to_opted_out_number(self):
self.assertEqual(PhoneBlacklist.objects.count(), 0)
incoming('99912345678', 'join opt-test', 'GVI')
v = PhoneNumber.get_two_way_number('99912345678')
self.assertIsNotNone(v)
send_sms_to_verified_number(v, 'hello')
sms = self.get_last_sms('+99912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, 'hello')
incoming('99912345678', 'stop', 'GVI')
self.assertEqual(PhoneBlacklist.objects.count(), 1)
phone_number = PhoneBlacklist.objects.get(phone_number='99912345678')
self.assertFalse(phone_number.send_sms)
send_sms_to_verified_number(v, 'hello')
sms = self.get_last_sms('+99912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, 'hello')
self.assertTrue(sms.error)
self.assertEqual(sms.system_error_message, SMS.ERROR_PHONE_NUMBER_OPTED_OUT)
incoming('99912345678', 'start', 'GVI')
self.assertEqual(PhoneBlacklist.objects.count(), 1)
phone_number = PhoneBlacklist.objects.get(phone_number='99912345678')
self.assertTrue(phone_number.send_sms)
send_sms_to_verified_number(v, 'hello')
sms = self.get_last_sms('+99912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, 'hello')
self.assertFalse(sms.error)
self.assertIsNone(sms.system_error_message)
def test_custom_opt_keywords(self):
self.assertEqual(PhoneBlacklist.objects.count(), 0)
incoming('19912345678', 'join opt-test', 'TEST')
v = PhoneNumber.get_two_way_number('19912345678')
self.assertIsNotNone(v)
send_sms_to_verified_number(v, 'hello')
sms = self.get_last_sms('+19912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, 'hello')
incoming('19912345678', 'restop', 'TEST')
self.assertEqual(PhoneBlacklist.objects.count(), 1)
phone_number = PhoneBlacklist.objects.get(phone_number='19912345678')
self.assertFalse(phone_number.send_sms)
send_sms_to_verified_number(v, 'hello')
sms = self.get_last_sms('+19912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, 'hello')
self.assertTrue(sms.error)
self.assertEqual(sms.system_error_message, SMS.ERROR_PHONE_NUMBER_OPTED_OUT)
incoming('19912345678', 'restart', 'TEST')
self.assertEqual(PhoneBlacklist.objects.count(), 1)
phone_number = PhoneBlacklist.objects.get(phone_number='19912345678')
self.assertTrue(phone_number.send_sms)
send_sms_to_verified_number(v, 'hello')
sms = self.get_last_sms('+19912345678')
self.assertEqual(sms.direction, 'O')
self.assertEqual(sms.text, 'hello')
self.assertFalse(sms.error)
self.assertIsNone(sms.system_error_message)
| 1.851563
| 2
|
torchir/regularization.py
|
BDdeVos/TorchIR
| 9
|
12778796
|
import torch
from torch import Tensor
from torchir.utils import identity_grid
def bending_energy_3d(
coord_grid: Tensor, vector_dim: int = -1, dvf_input: bool = False
) -> Tensor:
"""Calculates bending energy penalty for a 3D coordinate grid.
For further details regarding this regularization please read the work by `Rueckert 1999`_.
Args:
coord_grid: 3D coordinate grid, i.e. a 5D Tensor with standard dimensions
(n_samples, 3, z, y, x).
vector_dim: Specifies the location of the vector dimension. Default: -1
dvf_input: If ``True``, coord_grid is assumed a displacement vector field and
an identity_grid will be added. Default: ``False``
Returns:
Bending energy per instance in the batch.
.. _Rueckert 1999: https://ieeexplore.ieee.org/document/796284
"""
assert coord_grid.ndim == 5, "Input tensor should be 5D, i.e. 3D images."
if vector_dim != 1:
coord_grid = coord_grid.movedim(vector_dim, -1)
if dvf_input:
coord_grid = coord_grid + identity_grid(coord_grid.shape[2:], stackdim=0)
d_z = torch.diff(coord_grid, dim=1)
d_y = torch.diff(coord_grid, dim=2)
d_x = torch.diff(coord_grid, dim=3)
d_zz = torch.diff(d_z, dim=1)[:, :, :-2, :-2]
d_zy = torch.diff(d_z, dim=2)[:, :-1, :-1, :-2]
d_zx = torch.diff(d_z, dim=3)[:, :-1, :-2, :-1]
d_yy = torch.diff(d_y, dim=2)[:, :-2, :, :-2]
d_yx = torch.diff(d_y, dim=3)[:, :-2, :-1, :-1]
d_xx = torch.diff(d_x, dim=3)[:, :-2, :-2, :]
return torch.mean(
d_zz ** 2 + d_yy ** 2 + d_xx ** 2 + 2 * (d_zy ** 2 + d_zx ** 2 + d_yx ** 2),
axis=(1, 2, 3, 4),
)
def bending_energy_2d(
coord_grid: Tensor, vector_dim: int = -1, dvf_input: bool = False
) -> Tensor:
"""Calculates bending energy penalty for a 2D coordinate grid.
For further details regarding this regularization please read the work by `Rueckert 1999`_.
Args:
coord_grid: 2D coordinate grid, i.e. a 4D Tensor with standard dimensions
(n_samples, 2, y, x).
vector_dim: Specifies the location of the vector dimension. Default: -1
dvf_input: If ``True``, coord_grid is assumed a displacement vector field and
an identity_grid will be added. Default: ``False``
Returns:
Bending energy per instance in the batch.
.. _Rueckert 1999: https://ieeexplore.ieee.org/document/796284
"""
assert coord_grid.ndim == 4, "Input tensor should be 4D, i.e. 2D images."
if vector_dim != 1:
coord_grid = coord_grid.movedim(vector_dim, -1)
if dvf_input:
coord_grid = coord_grid + identity_grid(coord_grid.shape[2:], stackdim=0)
d_y = torch.diff(coord_grid, dim=1)
d_x = torch.diff(coord_grid, dim=2)
d_yy = torch.diff(d_y, dim=1)[:, :, :-2]
d_yx = torch.diff(d_y, dim=2)[:, :-1, :-1]
d_xx = torch.diff(d_x, dim=2)[:, :-2, :]
return torch.mean(d_yy ** 2 + d_xx ** 2 + 2 * d_yx ** 2, axis=(1, 2, 3))
| 2.90625
| 3
|
codeforces/600C_palindrom.py
|
snsokolov/contests
| 1
|
12778797
|
#!/usr/bin/env python3
# 600C_palindrom.py - Codeforces.com/problemset/problem/600/C by Sergey 2015
import unittest
import sys
###############################################################################
# Palindrom Class (Main Program)
###############################################################################
class Palindrom:
""" Palindrom representation """
def __init__(self, test_inputs=None):
""" Default constructor """
it = iter(test_inputs.split("\n")) if test_inputs else None
def uinput():
return next(it) if it else sys.stdin.readline().rstrip()
# Reading single elements
self.s = uinput()
self.cnt = {}
for c in self.s:
self.cnt[c] = self.cnt.get(c, 0) + 1
self.pcnt = dict(self.cnt)
for i in reversed(sorted(self.pcnt)):
if self.pcnt[i] % 2:
self.pcnt[i] -= 1
found = 0
for j in sorted(self.pcnt):
if self.pcnt[j] % 2:
self.pcnt[j] += 1
found = 1
break
if not found:
self.pcnt[i] += 1
def calculate(self):
""" Main calcualtion function of the class """
result = []
mid = []
for c in sorted(self.pcnt):
n = self.pcnt[c]
if n > 0:
for j in range(n // 2):
result.append(c)
if n % 2:
mid.append(c)
return "".join(result + mid + list(reversed(result)))
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_single_test(self):
""" Palindrom class testing """
# Constructor test
test = "aabc"
d = Palindrom(test)
self.assertEqual(d.cnt["c"], 1)
self.assertEqual(d.pcnt["c"], 0)
# Sample test
self.assertEqual(Palindrom(test).calculate(), "abba")
# Sample test
test = "aabcd"
self.assertEqual(Palindrom(test).calculate(), "abcba")
# Sample test
test = "aabbcccdd"
self.assertEqual(Palindrom(test).calculate(), "abcdcdcba")
# My tests
test = ""
# self.assertEqual(Palindrom(test).calculate(), "0")
# Time limit test
# self.time_limit_test(5000)
def time_limit_test(self, nmax):
""" Timelimit testing """
import random
import timeit
# Random inputs
test = str(nmax) + " " + str(nmax) + "\n"
numnums = [str(i) + " " + str(i+1) for i in range(nmax)]
test += "\n".join(numnums) + "\n"
nums = [random.randint(1, 10000) for i in range(nmax)]
test += " ".join(map(str, nums)) + "\n"
# Run the test
start = timeit.default_timer()
d = Palindrom(test)
calc = timeit.default_timer()
d.calculate()
stop = timeit.default_timer()
print("\nTimelimit Test: " +
"{0:.3f}s (init {1:.3f}s calc {2:.3f}s)".
format(stop-start, calc-start, stop-calc))
if __name__ == "__main__":
# Avoiding recursion limitaions
sys.setrecursionlimit(100000)
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
# Print the result string
sys.stdout.write(Palindrom().calculate())
| 3.671875
| 4
|
MnistClassifier/DataSetImageView.py
|
Ingener74/Nizaje
| 0
|
12778798
|
<reponame>Ingener74/Nizaje
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
class DataSetImageView(FigureCanvas):
def __init__(self, parent=None, width=5, height=5, dpi=100):
self.figure = Figure((width, height), dpi)
self.axis = self.figure.add_subplot(111)
self.set_data(np.array([
[1, 0, 1],
[0, 1, 1],
[0, 1, 0]
], 'float32'))
super(DataSetImageView, self).__init__(self.figure)
self.setParent(parent)
def set_data(self, data: np.ndarray):
self.figure.clf()
self.axis = self.figure.add_subplot(111)
self.axis.imshow(data)
self.figure.canvas.draw()
| 2.609375
| 3
|
py2latex/markdown_parser/__init__.py
|
domdfcoding/py2latex
| 1
|
12778799
|
<gh_stars>1-10
#!/usr/bin/env python
#
# __init__.py
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/rufuspollock/markdown2latex
# BSD Licensed
# Authored by <NAME>: <http://www.rufuspollock.org/>
# Reworked by <NAME> (<EMAIL>) and
# <NAME> (<EMAIL>)
#
# stdlib
import os
import pathlib
import re
from typing import Union
# 3rd party
import markdown
import markdown.treeprocessors # type: ignore
import markdown.util # type: ignore
# this package
from py2latex.markdown_parser.images import ImageTextPostProcessor
from py2latex.markdown_parser.links import LinkTextPostProcessor
from py2latex.markdown_parser.maths import MathTextPostProcessor
from py2latex.markdown_parser.tables import TableTextPostProcessor
from py2latex.markdown_parser.utils import escape_latex_entities, unescape_html_entities
__all__ = [
"LaTeXExtension",
"LaTeXTreeProcessor",
"UnescapeHtmlTextPostProcessor",
"gls",
"load_markdown",
"parse_markdown"
]
def gls(name):
return rf"\gls{{{name}}}"
def load_markdown(filename: Union[str, pathlib.Path, os.PathLike]) -> str:
if not isinstance(filename, pathlib.Path):
filename = pathlib.Path(filename)
return parse_markdown(filename.read_text())
def parse_markdown(string):
out = md.convert(string)
out = re.sub(r"</?root>", '', out)
out = re.sub(r"gls{([^}]*)}", r"\\gls{\1}", out)
out = re.sub(r"citep{([^}]*)}", r"~\\citep{\1}", out)
out = re.sub(r"cite{([^}]*)}", r"~\\cite{\1}", out)
# text_md = re.sub(r"\*\*(.+)\*\*", r"\\textbf{\1}", text_md)
# text_md = re.sub(r"\*(.+)\*", r"\\textit{\1}", text_md)
out = re.sub(r"<sup>(.+)</sup>", r"\\textsuperscript{\1}", out)
out = re.sub(r"<sub>(.+)</sub>", r"\\textsubscript{\1}", out)
out = re.sub(r"<sub>(.+)</sub>", r"\\textsubscript{\1}", out)
return out
class LaTeXExtension(markdown.extensions.Extension):
def __init__(self, configs=None):
self.reset()
def extendMarkdown(self, md, md_globals):
self.md = md
# remove escape pattern -- \\(.*) -- as this messes up any embedded
# math and we don't need to escape stuff any more for html
# for key, pat in self.md.inlinePatterns.items():
# if pat.pattern == markdown.inlinepatterns.ESCAPE_RE:
# self.md.inlinePatterns.pop(key)
# break
# footnote_extension = FootnoteExtension()
# footnote_extension.extendMarkdown(md, md_globals)
latex_tp = LaTeXTreeProcessor()
math_pp = MathTextPostProcessor()
table_pp = TableTextPostProcessor()
image_pp = ImageTextPostProcessor()
link_pp = LinkTextPostProcessor()
unescape_html_pp = UnescapeHtmlTextPostProcessor()
md.treeprocessors["latex"] = latex_tp
md.postprocessors["unescape_html"] = unescape_html_pp
md.postprocessors["math"] = math_pp
md.postprocessors["image"] = image_pp
md.postprocessors["table"] = table_pp
md.postprocessors["link"] = link_pp
def reset(self):
pass
class LaTeXTreeProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, doc):
"""Walk the dom converting relevant nodes to text nodes with relevant
content."""
latex_text = self.tolatex(doc)
doc.clear()
latex_node = markdown.util.etree.Element("root")
latex_node.text = latex_text
doc.append(latex_node)
def tolatex(self, ournode):
buffer = ''
subcontent = ''
if ournode.text:
subcontent += escape_latex_entities(ournode.text)
if ournode.getchildren():
for child in ournode.getchildren():
subcontent += self.tolatex(child)
if ournode.tag == "h1":
buffer += f"\n\\chapter{{{subcontent}}}"
buffer += f"'\n\\label{{chapter:{subcontent.lower().replace(' ', '_')}}}\n'"
elif ournode.tag == "h2":
buffer += f"\n\n\\section{{{subcontent}}}"
buffer += f"'\n\\label{{section:{subcontent.lower().replace(' ', '_')}}}\n'"
elif ournode.tag == "h3":
buffer += f"\n\n\\subsection{{{subcontent}}}"
buffer += f"'\n\\label{{subsection:{subcontent.lower().replace(' ', '_')}}}\n'"
elif ournode.tag == "h4":
buffer += f"\n\\subsubsection{{{subcontent}}}"
buffer += f"'\n\\label{{subsubsection:{subcontent.lower().replace(' ', '_')}}}\n'"
elif ournode.tag == "hr":
buffer += "\\noindent\\makebox[\\linewidth]{\\rule{\\linewidth}{0.4pt}}"
elif ournode.tag == "ul":
# no need for leading \n as one will be provided by li
buffer += f"""
\\begin{{itemize}}{subcontent}
\\end{{itemize}}
"""
elif ournode.tag == "ol":
# no need for leading \n as one will be provided by li
buffer += f"""
\\begin{{enumerate}}{subcontent}
\\end{{enumerate}}
"""
elif ournode.tag == "li":
buffer += f"""
\\item {subcontent.strip()}"""
elif ournode.tag == "blockquote":
# use quotation rather than quote as quotation can support multiple
# paragraphs
buffer += f"""
\\begin{{quotation}}
{subcontent.strip()}
\\end{{quotation}}
"""
# ignore 'code' when inside pre tags
# (mkdn produces <pre><code></code></pre>)
elif (
ournode.tag == "pre"
or (ournode.tag == "pre" and ournode.parentNode.tag != "pre") # TODO: Take a look here
):
buffer += f"""
\\begin{{verbatim}}
{subcontent.strip()}
\\end{{verbatim}}
"""
elif ournode.tag == 'q':
buffer += f"`{subcontent.strip()}'"
elif ournode.tag == 'p':
buffer += f"\n{subcontent.strip()}\n"
# Footnote processor inserts all of the footnote in a sup tag
elif ournode.tag == "sup":
buffer += f"\\footnote{{{subcontent.strip()}}}"
elif ournode.tag == "strong":
buffer += f"\\textbf{{{subcontent.strip()}}}"
elif ournode.tag == "em":
buffer += f"\\emph{{{subcontent.strip()}}}"
# Keep table strcuture. TableTextPostProcessor will take care.
elif ournode.tag == "table":
buffer += f"\n\n<table>{subcontent}</table>\n\n"
elif ournode.tag == "thead":
buffer += f"<thead>{subcontent}</thead>"
elif ournode.tag == "tbody":
buffer += f"<tbody>{subcontent}</tbody>"
elif ournode.tag == "tr":
buffer += f"<tr>{subcontent}</tr>"
elif ournode.tag == "th":
buffer += f"<th>{subcontent}</th>"
elif ournode.tag == "td":
buffer += f"<td>{subcontent}</td>"
elif ournode.tag == "img":
buffer += f'<img src=\"{ournode.get("src")}\" alt=\"{ournode.get("alt")}\" />'
elif ournode.tag == 'a':
buffer += f'<a href=\"{ournode.get("href")}\">{subcontent}</a>'
else:
buffer = subcontent
if ournode.tail:
buffer += escape_latex_entities(ournode.tail)
return buffer
class UnescapeHtmlTextPostProcessor(markdown.postprocessors.Postprocessor):
def run(self, text):
return unescape_html_entities(text)
md = markdown.Markdown()
latex_mdx = LaTeXExtension()
latex_mdx.extendMarkdown(md, markdown.__dict__)
| 1.507813
| 2
|
vesicashapi/__init__.py
|
vesicash/vesicash-python-sdk
| 0
|
12778800
|
import os
"""Script used to define constants"""
PRIVATE_KEY = os.getenv(
'VESICASH_PRIVATE_KEY',
'<KEY>'
)
HEADERS = {'V-Private-Key': PRIVATE_KEY}
api_url = ''
mode = os.getenv('VESICASH_MODE')
if(mode == 'sandbox'):
API_URL = 'https://sandbox.api.vesicash.com/v1/'
else:
API_URL = 'https://api.vesicash.com/v1/'
| 2.203125
| 2
|
projects/avatar_cropping/main.py
|
IDilettant/training-mini-projects
| 0
|
12778801
|
<gh_stars>0
from PIL import Image
def cropp_avatar():
image_monroe = Image.open('monro.jpg')
red_channel, green_channel, blue_channel = image_monroe.split()
cutting_width = 50
red_channel_1 = red_channel.crop((cutting_width, 0, red_channel.width, red_channel.height))
red_channel_2 = red_channel.crop((cutting_width / 2, 0, red_channel.width - cutting_width / 2, red_channel.height))
blended_red_channel = Image.blend(red_channel_1, red_channel_2, 0.5)
blue_channel_1 = red_channel.crop((0, 0, blue_channel.width - cutting_width, blue_channel.height))
blue_channel_2 = blue_channel.crop((cutting_width / 2, 0, blue_channel.width - cutting_width / 2, blue_channel.height))
blended_blue_channel = Image.blend(blue_channel_1, blue_channel_2, 0.5)
cropped_green_channel = green_channel.crop((cutting_width / 2, 0, green_channel.width - cutting_width / 2, green_channel.height))
blurry_image = Image.merge('RGB', (blended_red_channel, cropped_green_channel, blended_blue_channel))
blurry_image.save('blurry_image.jpg')
blurry_image.thumbnail((80, 80), Image.ANTIALIAS)
blurry_image.save('blurry_avatar.jpg')
if __name__ == '__main__':
cropp_avatar()
| 2.609375
| 3
|
tests/conftest.py
|
kostya-ten/iperon
| 1
|
12778802
|
<filename>tests/conftest.py
import asyncio
from http.cookies import SimpleCookie
import httpx
import pytest
from fastapi import Response
from httpx import AsyncClient
from tortoise import Tortoise
from iperon import services, store, typeof, redis
from iperon.app import app as iperon_app
from iperon.app import startup_event, shutdown_event
from iperon.settings import settings
from iperon.typeof import EmailAddress
@pytest.fixture
async def member(event_loop):
member: services.member.Create = services.member.create(
email=EmailAddress('<EMAIL>'),
password='password',
recaptcha='0'
)
member: services.member.Member = await member.save()
yield member
await member.remove()
@pytest.fixture
async def organization(member: services.member.Member):
organization_create: services.organization.Create = services.organization.create(
slug=typeof.Slug(' Test_Organization '),
friendly_name=typeof.FriendlyName('Test Organization'),
environment=store.organization.Environment.Development,
)
organization: services.organization.Organization = await organization_create.save(member=member)
yield organization
await organization.remove()
@pytest.fixture(scope='session')
def event_loop():
return asyncio.new_event_loop()
@pytest.fixture()
async def app(event_loop):
await iperon_app.on_event('startup')(startup_event())
async with redis.RedisClient() as client:
result_flushdb = await client.flushdb()
assert result_flushdb
await Tortoise.generate_schemas()
try:
yield iperon_app
finally:
try:
connection = Tortoise.get_connection('master')
except KeyError:
pass
else:
await connection.execute_query(f'DROP SCHEMA IF EXISTS {settings.db_schema} CASCADE')
await iperon_app.on_event('shutdown')(shutdown_event())
@pytest.fixture
async def client(app, member: services.member.Member):
response = Response()
await member.auth(response=response)
simple_cookie: SimpleCookie = SimpleCookie()
simple_cookie.load(response.headers.get('set-cookie'))
cookie = httpx.Cookies()
for key, morsel in simple_cookie.items():
if key == 'session':
cookie.set(name='session', value=morsel.value, domain='.localhost.local')
client = AsyncClient(app=app, base_url='http://localhost.local', cookies=cookie, headers={'x-real-ip': '127.0.0.1'})
yield client
await client.aclose()
| 1.8125
| 2
|
src/finance_stats/hedge_calculator/__init__.py
|
pralphv/hkportfolioanalysis-backend
| 0
|
12778803
|
from .api import calculate_hedge
| 1.085938
| 1
|
fastapi-alembic-sqlmodel-async/app/crud/crud_hero.py
|
jonra1993/fastapi-alembic-sqlmodel-async
| 15
|
12778804
|
from app.schemas.hero import IHeroCreate, IHeroUpdate
from app.crud.base_sqlmodel import CRUDBase
from app.models.hero import Hero
class CRUDHero(CRUDBase[Hero, IHeroCreate, IHeroUpdate]):
pass
hero = CRUDHero(Hero)
| 1.945313
| 2
|
ecobee_classes_creator/constants.py
|
sfanous/EcobeeClassesCreator
| 0
|
12778805
|
<filename>ecobee_classes_creator/constants.py<gh_stars>0
import os
import sys
if getattr(sys, 'frozen', False):
directory_containing_script = os.path.dirname(sys.executable)
else:
directory_containing_script = sys.path[0]
DEFAULT_LOGGING_CONFIGURATION = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'MultiLine': {
'format': '%(asctime)s %(name)-50s %(funcName)-40s %(levelname)-8s %(message)s',
'()': 'ecobee_classes_creator.formatters.MultiLineFormatter',
},
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'MultiLine',
'class': 'logging.StreamHandler',
},
'rotating_file': {
'level': 'INFO',
'formatter': 'MultiLine',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(
os.path.join(directory_containing_script, 'logs'),
'ecobee_classes_creator.log',
),
'maxBytes': 1024 * 1024 * 10,
'backupCount': 10,
},
},
'loggers': {
'ecobee_classes_creator': {
'handlers': ['console', 'rotating_file'],
'level': 'INFO',
'propagate': True,
}
},
}
DEFAULT_LOG_DIRECTORY_PATH = os.path.join(directory_containing_script, 'logs')
DEFAULT_LOG_FILE_PATH = os.path.join(
DEFAULT_LOG_DIRECTORY_PATH, 'ecobee_classes_creator.log'
)
LOGGING_CONFIGURATION_FILE_PATH = os.path.join(
directory_containing_script, 'ecobee_classes_creator_logging_configuration.json'
)
VERSION = '1.0.2'
| 1.945313
| 2
|
nbprocess/maker.py
|
fastai/nbprocess
| 15
|
12778806
|
<filename>nbprocess/maker.py
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/01_maker.ipynb.
# %% auto 0
__all__ = ['find_var', 'read_var', 'update_var', 'ModuleMaker', 'retr_exports', 'make_code_cell', 'make_code_cells',
'relative_import', 'update_import', 'basic_export_nb2']
# %% ../nbs/01_maker.ipynb 3
from .read import *
from .imports import *
from fastcore.script import *
from fastcore.imports import *
import ast,contextlib
from collections import defaultdict
from pprint import pformat
from textwrap import TextWrapper
# %% ../nbs/01_maker.ipynb 7
def find_var(lines, varname):
"Find the line numbers where `varname` is defined in `lines`"
start = first(i for i,o in enumerate(lines) if o.startswith(varname))
if start is None: return None,None
empty = ' ','\t'
if start==len(lines)-1 or lines[start+1][:1] not in empty: return start,start+1
end = first(i for i,o in enumerate(lines[start+1:]) if o[:1] not in empty)
return start,len(lines) if end is None else (end+start+1)
# %% ../nbs/01_maker.ipynb 9
def read_var(code, varname):
"Eval and return the value of `varname` defined in `code`"
lines = code.splitlines()
start,end = find_var(lines, varname)
if start is None: return None
res = [lines[start].split('=')[-1].strip()]
res += lines[start+1:end]
try: return eval('\n'.join(res))
except SyntaxError: raise Exception('\n'.join(res)) from None
# %% ../nbs/01_maker.ipynb 11
def update_var(varname, func, fn=None, code=None):
"Update the definition of `varname` in file `fn`, by calling `func` with the current definition"
if fn:
fn = Path(fn)
code = fn.read_text()
lines = code.splitlines()
v = read_var(code, varname)
res = func(v)
start,end = find_var(lines, varname)
del(lines[start:end])
lines.insert(start, f"{varname} = {res}")
code = '\n'.join(lines)
if fn: fn.write_text(code)
else: return code
# %% ../nbs/01_maker.ipynb 14
class ModuleMaker:
"Helper class to create exported library from notebook source cells"
def __init__(self, dest, name, nb_path, is_new=True):
dest,nb_path = Path(dest),Path(nb_path)
store_attr()
self.fname = dest/(name.replace('.','/') + ".py")
if is_new: dest.mkdir(parents=True, exist_ok=True)
else: assert self.fname.exists(), f"{self.fname} does not exist"
self.dest2nb = nb_path.relpath(dest)
self.hdr = f"# %% {self.dest2nb}"
# %% ../nbs/01_maker.ipynb 17
_def_types = ast.FunctionDef,ast.AsyncFunctionDef,ast.ClassDef
_assign_types = ast.AnnAssign, ast.Assign, ast.AugAssign
def _val_or_id(it): return [getattr(o, 'value', getattr(o, 'id', None)) for o in it.value.elts]
def _all_targets(a): return L(getattr(a,'elts',a))
def _filt_dec(x): return getattr(x,'id','').startswith('patch')
def _wants(o): return isinstance(o,_def_types) and not any(L(o.decorator_list).filter(_filt_dec))
# %% ../nbs/01_maker.ipynb 18
def retr_exports(trees):
# include anything mentioned in "_all_", even if otherwise private
# NB: "_all_" can include strings (names), or symbols, so we look for "id" or "value"
assigns = trees.filter(risinstance(_assign_types))
all_assigns = assigns.filter(lambda o: getattr(o.targets[0],'id',None)=='_all_')
all_vals = all_assigns.map(_val_or_id).concat()
syms = trees.filter(_wants).attrgot('name')
# assignment targets (NB: can be multiple, e.g. "a=b=c", and/or destructuring e.g "a,b=(1,2)")
assign_targs = L(L(assn.targets).map(_all_targets).concat() for assn in assigns).concat()
exports = (assign_targs.attrgot('id')+syms).filter(lambda o: o and o[0]!='_')
return (exports+all_vals).unique()
# %% ../nbs/01_maker.ipynb 19
@patch
def make_all(self:ModuleMaker, cells):
"Create `__all__` with all exports in `cells`"
if cells is None: return ''
return retr_exports(cells.map(NbCell.parsed_).concat())
# %% ../nbs/01_maker.ipynb 20
def make_code_cell(code): return AttrDict(source=code, cell_type="code")
def make_code_cells(*ss): return dict2nb({'cells':L(ss).map(make_code_cell)}).cells
# %% ../nbs/01_maker.ipynb 23
def relative_import(name, fname, level=0):
"Convert a module `name` to a name relative to `fname`"
assert not level
sname = name.replace('.','/')
if not(os.path.commonpath([sname,fname])): return name
rel = os.path.relpath(sname, fname)
if rel==".": return "."
res = rel.replace(f"..{os.path.sep}", ".")
return "." + res.replace(os.path.sep, ".")
# %% ../nbs/01_maker.ipynb 25
def update_import(source, tree, libname, f=relative_import):
if not tree: return
imps = L(tree).filter(risinstance(ast.ImportFrom))
if not imps: return
src = source.splitlines(True)
for imp in imps:
nmod = f(imp.module, libname, imp.level)
lin = imp.lineno-1
sec = src[lin][imp.col_offset:imp.end_col_offset]
newsec = re.sub(f"(from +){'.'*imp.level}{imp.module}", fr"\1{nmod}", sec)
src[lin] = src[lin].replace(sec,newsec)
return src
@patch
def import2relative(cell:NbCell, libname):
src = update_import(cell.source, cell.parsed_(), libname)
if src: cell.set_source(src)
# %% ../nbs/01_maker.ipynb 27
@patch
def make(self:ModuleMaker, cells, all_cells=None, lib_name=None):
"Write module containing `cells` with `__all__` generated from `all_cells`"
if lib_name is None: lib_name = get_config().lib_name
if all_cells is None: all_cells = cells
for cell in all_cells: cell.import2relative(lib_name)
if not self.is_new: return self._make_exists(cells, all_cells)
self.fname.parent.mkdir(exist_ok=True, parents=True)
_all = self.make_all(all_cells)
trees = cells.map(NbCell.parsed_)
try: last_future = max(i for i,tree in enumerate(trees) if tree and any(
isinstance(t,ast.ImportFrom) and t.module=='__future__' for t in tree))+1
except ValueError: last_future=0
with self.fname.open('w') as f:
f.write(f"# AUTOGENERATED! DO NOT EDIT! File to edit: {self.dest2nb}.")
write_cells(cells[:last_future], self.hdr, f, 0)
tw = TextWrapper(width=120, initial_indent='', subsequent_indent=' '*11, break_long_words=False)
all_str = '\n'.join(tw.wrap(str(_all)))
f.write(f"\n\n# %% auto 0\n__all__ = {all_str}")
write_cells(cells[last_future:], self.hdr, f, 1)
f.write('\n')
# %% ../nbs/01_maker.ipynb 31
@patch
def _update_all(self:ModuleMaker, all_cells, alls):
return pformat(alls + self.make_all(all_cells), width=160)
@patch
def _make_exists(self:ModuleMaker, cells, all_cells=None):
"`make` for `is_new=False`"
if all_cells: update_var('__all__', partial(self._update_all, all_cells), fn=self.fname)
with self.fname.open('a') as f: write_cells(cells, self.hdr, f)
# %% ../nbs/01_maker.ipynb 37
def basic_export_nb2(fname, name, dest=None):
"A basic exporter to bootstrap nbprocess using `ModuleMaker`"
if dest is None: dest = get_config().path('lib_path')
cells = L(c for c in read_nb(fname).cells if re.match(r'#\s*export', c.source))
ModuleMaker(dest=dest, name=name, nb_path=fname).make(cells)
| 2.25
| 2
|
lib/cnn.py
|
zhenghuazx/toxic-comment
| 1
|
12778807
|
<filename>lib/cnn.py
'''
# Created by hua.zheng on 3/8/18.
'''
import tensorflow.contrib.keras as keras
from keras.engine import Layer, InputSpec, InputLayer
from keras.engine import Layer, InputSpec
import tensorflow as tf
from keras.models import Model, Sequential
from keras.utils import multi_gpu_model
from keras.layers import Dropout, Embedding, concatenate
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPooling2D, ZeroPadding1D,GlobalMaxPooling1D, GlobalAveragePooling1D, \
AveragePooling1D, SpatialDropout1D
from keras.layers import Dense, Input, Flatten, BatchNormalization
from keras.layers import Concatenate, Dot, Merge, Multiply, RepeatVector
from keras.layers import Bidirectional, TimeDistributed
from keras.layers import SimpleRNN, LSTM, GRU, Lambda, Permute
from keras.layers.core import Reshape, Activation
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard
from keras.constraints import maxnorm
from keras.regularizers import l2
class KMaxPooling(Layer):
"""
K-max pooling layer that extracts the k-highest activations from a sequence (2nd dimension).
TensorFlow backend.
"""
def __init__(self, k=1, axis=1, **kwargs):
super(KMaxPooling, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
self.k = k
assert axis in [1,2], 'expected dimensions (samples, filters, convolved_values),\
cannot fold along samples dimension or axis not in list [1,2]'
self.axis = axis
# need to switch the axis with the last elemnet
# to perform transpose for tok k elements since top_k works in last axis
self.transpose_perm = [0,1,2] #default
self.transpose_perm[self.axis] = 2
self.transpose_perm[2] = self.axis
def compute_output_shape(self, input_shape):
input_shape_list = list(input_shape)
input_shape_list[self.axis] = self.k
return tuple(input_shape_list)
def call(self, x):
# swap sequence dimension to get top k elements along axis=1
transposed_for_topk = tf.transpose(x, perm=self.transpose_perm)
# extract top_k, returns two tensors [values, indices]
top_k = tf.nn.top_k(transposed_for_topk, k=self.k, sorted=True, name=None)[0]
# return back to normal dimension but now sequence dimension has only k elements
# performing another transpose will get the tensor back to its original shape
# but will have k as its axis_1 size
transposed_back = tf.transpose(top_k, perm=self.transpose_perm)
return transposed_back
class Folding(Layer):
def __init__(self, **kwargs):
super(Folding, self).__init__(**kwargs)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], int(input_shape[2]/2))
def call(self, x):
input_shape = x.get_shape().as_list()
# split the tensor along dimension 2 into dimension_axis_size/2
# which will give us 2 tensors
splits = tf.split(x, num_or_size_splits=int(input_shape[2]/2), axis=2)
# reduce sums of the pair of rows we have split onto
reduce_sums = [tf.reduce_sum(split, axis=2) for split in splits]
# stack them up along the same axis we have reduced
row_reduced = tf.stack(reduce_sums, axis=2)
return row_reduced
def vdnn(embedding_matrix, num_classes, max_seq_len, num_filters=2, filter_sizes=[64, 128, 256, 512], l2_weight_decay=0.0001, dropout_val=0.5,
dense_dim=32, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0, n_cnn_layers=1, pool='max',
add_embeds=False):
#input_ = Input(shape=(max_seq_len,))
model = Sequential([
Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_seq_len,
trainable=train_embeds),
Conv1D(embedding_matrix.shape[1], 3, padding="valid")
])
# 4 pairs of convolution blocks followed by pooling
for filter_size in filter_sizes:
# each iteration is a convolution block
for cb_i in range(num_filters):
model.add(Conv1D(filter_size, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(filter_size, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2, strides=3))
# model.add(KMaxPooling(k=2))
model.add(Flatten())
model.add(Dense(1024, activation="relu"))
model.add(Dense(256, activation="relu"))
if add_sigmoid:
model.add(Dense(num_classes, activation='sigmoid'))
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def mvcnn(embedding_matrix1, embedding_matrix2, num_classes, max_seq_len, num_filters=2, filter_sizes=[3, 5], l2_weight_decay=0.0001, dropout_val=0.5,
dense_dim=32, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0):
text_seq_input = Input(shape=(max_seq_len,), dtype='int32')
text_embedding1 = Embedding(embedding_matrix1.shape[0],
embedding_matrix1.shape[1],
weights=[embedding_matrix1],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
text_embedding2 = Embedding(embedding_matrix2.shape[0],
embedding_matrix2.shape[1],
weights=[embedding_matrix2],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
k_top = 4
layer_1 = []
for text_embedding in [text_embedding1, text_embedding2]:
conv_pools = []
for filter_size in filter_sizes:
l_zero = ZeroPadding1D((filter_size - 1, filter_size - 1))(text_embedding)
l_conv = Conv1D(filters=128, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = KMaxPooling(k=30, axis=1)(l_conv)
conv_pools.append((filter_size, l_pool))
layer_1.append(conv_pools)
last_layer = []
for layer in layer_1: # no of embeddings used
for (filter_size, input_feature_maps) in layer:
l_zero = ZeroPadding1D((filter_size - 1, filter_size - 1))(input_feature_maps)
l_conv = Conv1D(filters=128, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = KMaxPooling(k=k_top, axis=1)(l_conv)
last_layer.append(l_pool)
l_merge = Concatenate(axis=1)(last_layer)
l_flat = Flatten()(l_merge)
l_dense = Dense(128, activation='relu')(l_flat)
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
x = Concatenate()([l_dense, auxiliary_input])
l_out = Dense(num_classes, activation='sigmoid')(x)
if auxiliary:
model = Model(inputs=[text_seq_input, auxiliary_input], outputs=l_out)
else:
model = Model(inputs=text_seq_input, outputs=l_out)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def mgcnn(embedding_matrix1, embedding_matrix2,embedding_matrix3, num_classes, max_seq_len, num_filters=2, filter_sizes=[3,5], l2_weight_decay=0.0001, dropout_val=0.5,
dense_dim=32, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0):
text_seq_input = Input(shape=(max_seq_len,), dtype='int32')
text_embedding1 = Embedding(embedding_matrix1.shape[0],
embedding_matrix1.shape[1],
weights=[embedding_matrix1],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
text_embedding2 = Embedding(embedding_matrix2.shape[0],
embedding_matrix2.shape[1],
weights=[embedding_matrix2],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
text_embedding3 = Embedding(embedding_matrix3.shape[0],
embedding_matrix3.shape[1],
weights=[embedding_matrix3],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
k_top = 4
conv_pools = []
for text_embedding in [text_embedding1, text_embedding2, text_embedding3]:
for filter_size in filter_sizes:
l_zero = ZeroPadding1D((filter_size - 1, filter_size - 1))(text_embedding)
l_conv = Conv1D(filters=64, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = GlobalMaxPooling1D()(l_conv)
conv_pools.append(l_pool)
l_merge = Concatenate(axis=1)(conv_pools)
l_dense = Dense(128, activation='relu', kernel_regularizer=l2(0.01))(l_merge)
l_out = Dense(num_classes, activation='sigmoid')(l_dense)
model = Model(inputs=text_seq_input, outputs=l_out)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
# can be an alternative to mvcnn
def mvcnn3(embedding_matrix1, embedding_matrix2, embedding_matrix3, num_classes, max_seq_len, num_filters=2, filter_sizes=[3, 5], l2_weight_decay=0.0001, dropout_val=0.5,
dense_dim=32, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0):
text_seq_input = Input(shape=(max_seq_len,), dtype='int32')
text_embedding1 = Embedding(embedding_matrix1.shape[0],
embedding_matrix1.shape[1],
weights=[embedding_matrix1],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
text_embedding2 = Embedding(embedding_matrix2.shape[0],
embedding_matrix2.shape[1],
weights=[embedding_matrix2],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
text_embedding3 = Embedding(embedding_matrix3.shape[0],
embedding_matrix3.shape[1],
weights=[embedding_matrix3],
input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
k_top = 4
layer_1 = []
for text_embedding in [text_embedding1, text_embedding2, text_embedding3]:
conv_pools = []
for filter_size in filter_sizes:
l_zero = ZeroPadding1D((filter_size - 1, filter_size - 1))(text_embedding)
l_conv = Conv1D(filters=128, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = KMaxPooling(k=30, axis=1)(l_conv)
conv_pools.append((filter_size, l_pool))
layer_1.append(conv_pools)
last_layer = []
for layer in layer_1: # no of embeddings used
for (filter_size, input_feature_maps) in layer:
l_zero = ZeroPadding1D((filter_size - 1, filter_size - 1))(input_feature_maps)
l_conv = Conv1D(filters=128, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = KMaxPooling(k=k_top, axis=1)(l_conv)
last_layer.append(l_pool)
l_merge = Concatenate(axis=1)(last_layer)
l_flat = Flatten()(l_merge)
l_dense = Dense(128, activation='relu')(l_flat)
l_out = Dense(num_classes, activation='sigmoid')(l_dense)
model = Model(inputs=[text_seq_input], outputs=l_out)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
def cnn2d(embedding_matrix, num_classes, max_seq_len, num_filters=64, filter_sizes=[1,2,3,5], l2_weight_decay=0.0001, dropout_val=0.25,
dense_dim=32, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0, n_cnn_layers=1, pool='max',
add_embeds=False):
text_seq_input = Input(shape=(max_seq_len,))
embeds = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
#input_length=max_seq_len,
trainable=train_embeds)(text_seq_input)
x = SpatialDropout1D(0.3)(embeds)
x = Reshape((max_seq_len, embedding_matrix.shape[1], 1))(x)
pooled = []
for i in filter_sizes:
conv = Conv2D(num_filters, kernel_size=(i, embedding_matrix.shape[1]), kernel_initializer='normal', activation='elu')(x)
maxpool = MaxPooling2D(pool_size=(max_seq_len - i + 1, 1))(conv)
#avepool = AveragePooling2D(pool_size=(max_seq_len - i + 1, 1))(conv)
#globalmax = GlobalMaxPooling2D()(conv)
pooled.append(maxpool)
z = Concatenate(axis=1)(pooled)
z = Flatten()(z)
z = BatchNormalization()(z)
z = Dropout(dropout_val)(z)
if auxiliary:
auxiliary_input = Input(shape=(5,), name='aux_input')
z = Concatenate()([z, auxiliary_input])
output = Dense(num_classes, activation="sigmoid")(z)
if auxiliary:
model = Model(inputs=[text_seq_input, auxiliary_input], outputs=output)
else:
model = Model(inputs=text_seq_input, outputs=output)
if gpus > 0:
model = multi_gpu_model(model, gpus=gpus)
return model
#dpcnn http://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf
#https://github.com/neptune-ml/kaggle-toxic-starter/blob/master/best_configs/fasttext_dpcnn.yaml
def dpcnn(embedding_matrix, num_classes, max_seq_len, num_filters=64, filter_size=3, l2_weight_decay=0.0001, dropout_val=0.25,
dense_dim=256, add_sigmoid=True, train_embeds=False, auxiliary=True, gpus=0, n_cnn_layers=1, pool='max',
add_embeds=False):
max_pool_size = 3
max_pool_strides = 2
spatial_dropout = 0.25
dense_dropout = 0.5
comment = Input(shape=(max_seq_len,))
emb_comment = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], trainable=train_embeds)(comment)
emb_comment = SpatialDropout1D(spatial_dropout)(emb_comment)
block1 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(emb_comment)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
block1 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block1)
block1 = BatchNormalization()(block1)
block1 = PReLU()(block1)
# we pass embedded comment through conv1d with filter size 1 because it needs to have the same shape as block output
# if you choose filter_nr = embed_size (300 in this case) you don't have to do this part and can add emb_comment directly to block1_output
resize_emb = Conv1D(num_filters, kernel_size=1, padding='same', activation='linear')(emb_comment)
resize_emb = PReLU()(resize_emb)
block1_output = add([block1, resize_emb])
block1_output = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block1_output)
block2 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block1_output)
block2 = BatchNormalization()(block2)
block2 = PReLU()(block2)
block2 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block2)
block2 = BatchNormalization()(block2)
block2 = PReLU()(block2)
block2_output = add([block2, block1_output])
block2_output = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block2_output)
block3 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block2_output)
block3 = BatchNormalization()(block3)
block3 = PReLU()(block3)
block3 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block3)
block3 = BatchNormalization()(block3)
block3 = PReLU()(block3)
block3_output = add([block3, block2_output])
block3_output = MaxPooling1D(pool_size=max_pool_size, strides=max_pool_strides)(block3_output)
block4 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block3_output)
block4 = BatchNormalization()(block4)
block4 = PReLU()(block4)
block4 = Conv1D(num_filters, kernel_size=filter_size, padding='same', activation='linear')(block4)
block4 = BatchNormalization()(block4)
block4 = PReLU()(block4)
output = add([block4, block3_output])
output = GlobalMaxPooling1D()(output)
output = Dense(dense_dim, activation='linear')(output)
output = BatchNormalization()(output)
output = PReLU()(output)
output = Dropout(dense_dropout)(output)
output = Dense(6, activation='sigmoid')(output)
model = Model(comment, output)
return model
| 2.265625
| 2
|
main.py
|
ulianaami/tele_bot
| 0
|
12778808
|
import telebot
from settings import TOKEN
from telebot import types
import random
bot = telebot.TeleBot(TOKEN)
file = open('affirmations.txt', 'r', encoding='UTF-8')
affirmations = file.read().split('\n')
file.close()
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
username = message.from_user.username
if message.text == 'Привет' or message.text == 'привет':
bot.send_message(message.from_user.id, f'Привет, {username}\nНапиши: "Аффирмация"')
bot.register_next_step_handler(message, give_affirmation)
elif message.text == '/help':
bot.send_message(message.from_user.id, 'Напиши: "Привет"')
else:
bot.send_message(message.from_user.id, 'Я тебя не понимаю. Напиши /help.')
@bot.message_handler(func=lambda m: True)
def give_affirmation(message):
if message.text == 'аффирмация' or message.text == 'Аффирмация':
keyboard = types.InlineKeyboardMarkup()
key_affirmation = types.InlineKeyboardButton(text='Получить позитивную аффирмацию', callback_data='get_affirm')
keyboard.add(key_affirmation)
bot.send_message(message.from_user.id, text='Чтобы получить позитивную аффирмацию, нажми на кнопку: ',
reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
if call.data == 'get_affirm':
bot.send_message(call.message.chat.id, random.choice(affirmations))
bot.infinity_polling()
| 2.5625
| 3
|
gears/geometry/arc.py
|
gfsmith/gears
| 1
|
12778809
|
from point import Point
from affinematrix import AffineMatrix
from math import pi, sin, cos, atan2, acos
from polyline import Polyline
# To do:
# create arcs in other ways (3 points?)
# blow arc into line segments?
# fix "area" function to work with direction
EPS = 1.0e-6
class Arc(object):
'''
an arc in a plane parallel to the X-Y plane
(start and end angle are not defined in other planes)
startAngle and endAngle are defined CCW from the X-axis, in radians
direction controls the wrap direction of the arc
'''
def __init__(self, center=Point(x=0.0,y=0.0,z=0.0), radius=1.0, startAngle=0.0, endAngle=90.0*pi/180.0, direction='CCW'):
self.center = center
self.radius = float(radius)
self.startAngle = float(startAngle)
self.endAngle = float(endAngle)
self.direction = str(direction)
self.__type__ = type('Arc')
def getEndpoints(self):
return (Point(self.center.x + self.radius*cos(self.startAngle), self.center.y + self.radius*sin(self.startAngle), self.center.z), Point(self.center.x + self.radius*cos(self.endAngle), self.center.y + self.radius*sin(self.endAngle), self.center.z))
def reverse(self):
(self.startAngle,self.endAngle) = (self.endAngle,self.startAngle)
if self.direction == 'CW':
self.direction = 'CCW'
elif self.direction == 'CCW':
self.direction = 'CW'
else:
raise ValueError('Arc direction is not "CW" or "CCW", undefined behavior!- cannot REVERSE')
def __eq__(self,b):
print "EPS_ = %f" % float(EPS)
if ( ( self.center == b.center ) and
( abs(self.radius - b.radius ) < EPS ) and
( abs(self.startAngle - b.startAngle) < EPS ) and
( abs(self.endAngle - b.endAngle) < EPS ) and
( self.direction == b.direction ) ):
return True
else:
return False
def length(self):
'''
chord length
'''
# start angle and end angle are stored in radians, so chord length is simply radius * angle
return ( self.radius * abs(self.startAngle - self.endAngle) )
# def area(self):
# '''
# pie slice area
# '''
# return (pi * self.radius * self.radius * ( abs(self.endAngle-self.startAngle)/(2.0*pi) ) )
def __str__(self):
return 'Arc(center=' + str(self.center) + ', radius=' + str(self.radius) + ', startAngle=' + str(self.startAngle) + ', endAngle=' + str(self.endAngle) + ', direction="' + str(self.direction) + '")'
def __repr__(self):
return str(self)
def dup(self):
# center=Point(x=0.0,y=0.0,z=0.0), radius=1.0, startAngle=0.0, endAngle=90.0*pi/180.0, direction='CW'):
return Arc(center=self.center.dup(),radius=self.radius,startAngle=self.startAngle,endAngle=self.endAngle,direction=self.direction)
def __rmul__(self,a):
if isinstance(a,AffineMatrix):
cp = a * self.center
spx1 = self.radius * cos(self.startAngle) + self.center.x
spy1 = self.radius * sin(self.startAngle) + self.center.y
sp = Point(spx1,spy1)
sp2 = a * sp
sa2 = atan2(sp2.y-cp.y,sp2.x-cp.x)
r2 = sp2.dist(cp)
epx1 = self.radius * cos(self.endAngle) + self.center.x
epy1 = self.radius * sin(self.endAngle) + self.center.y
ep = Point(epx1,epy1)
ep2 = a * ep
ea2 = atan2(ep2.y - cp.y, ep2.x - cp.x)
return Arc(center=cp,radius=r2,startAngle=sa2,endAngle=ea2,direction=self.direction)
else:
raise ValueError('Non-AffineMatrix in Arc __rmul__.')
def toPolyline(self,maxError=1.0e-5):
'''
converts an arc to a Polyline with enough segments so as not to exceed a maxError
'''
theta_step = 2.0 * acos( 1.0 - (maxError/self.radius) ) # the angular step needed to exactly meet maxError condition
theta = self.endAngle - self.startAngle
numSteps = int(abs(theta) / theta_step) + 1
theta_step = theta/numSteps
pList = []
for i in range(numSteps+1):
x = self.center.x + self.radius * cos(self.startAngle + theta_step * i)
y = self.center.y + self.radius * sin(self.startAngle + theta_step * i)
p = Point(x,y,0.0)
pList.append(p)
pl = Polyline(pList)
return pl
#if __name__ == "__main__":
#a=Arc()
#print "a = %s" % str(a)
#b=Arc()
#print "b = %s" % str(b)
#print "EPS = %f" % EPS
#print "a.center = %s, b.center = %s" % (str(a.center), str(b.center))
#print "a.center == b.center : %s" % str(a.center == b.center)
#print "abs(a.radius - b.radius) : %s" % str(abs(a.radius - b.radius))
#print "abs(a.radius - b.radius) < EPS : %s" % str(abs(a.radius-b.radius)<EPS)
#print "abs(a.startAngle - b.startAngle) < EPS : %s" % str(abs(a.startAngle-b.startAngle)<EPS)
#print "abs(a.endAngle - b.endAngle) < EPS : %s" % str(abs(a.endAngle-b.endAngle)<EPS)
#print "a.direction = %s" % str(a.direction)
#print "b.direction = %s" % str(b.direction)
#print "a.direction == b.direction : %s" % str(a.direction==b.direction)
#print "a==b : %s" % str(a==b)
| 3.6875
| 4
|
datasets/custom_data_loader.py
|
wymGAKKI/saps
| 0
|
12778810
|
<gh_stars>0
import torch.utils.data
def customDataloader(args):
args.log.printWrite("=> fetching img pairs in %s" % (args.data_dir))
datasets = __import__('datasets.' + args.dataset)
dataset_file = getattr(datasets, args.dataset)
train_set = getattr(dataset_file, args.dataset)(args, args.data_dir, 'train')
val_set = getattr(dataset_file, args.dataset)(args, args.data_dir, 'val')
if args.concat_data:
args.log.printWrite('****** Using cocnat data ******')
args.log.printWrite("=> fetching img pairs in '{}'".format(args.data_dir2))
train_set2 = getattr(dataset_file, args.dataset)(args, args.data_dir2, 'train')
val_set2 = getattr(dataset_file, args.dataset)(args, args.data_dir2, 'val')
train_set = torch.utils.data.ConcatDataset([train_set, train_set2])
val_set = torch.utils.data.ConcatDataset([val_set, val_set2])
args.log.printWrite('Found Data:\t %d Train and %d Val' % (len(train_set), len(val_set)))
args.log.printWrite('\t Train Batch: %d, Val Batch: %d' % (args.batch, args.val_batch))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=True)
test_loader = torch.utils.data.DataLoader(val_set , batch_size=args.val_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return train_loader, test_loader
def benchmarkLoader(args):
args.log.printWrite("=> fetching img pairs in 'data/%s'" % (args.benchmark))
datasets = __import__('datasets.' + args.benchmark)
dataset_file = getattr(datasets, args.benchmark)
test_set = getattr(dataset_file, args.benchmark)(args, 'test')
args.log.printWrite('Found Benchmark Data: %d samples' % (len(test_set)))
args.log.printWrite('\t Test Batch %d' % (args.test_batch))
test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return test_loader
def shadowDataloader(args):
args.log.printWrite("=> fetching img pairs in %s" % (args.mydata_dir))
datasets = __import__('datasets.' + args.shadowdataset)
dataset_file = getattr(datasets, args.shadowdataset)
train_set = getattr(dataset_file, args.shadowdataset)(args, args.mydata_dir, 'train')
val_set = getattr(dataset_file, args.shadowdataset)(args, args.mydata_dir, 'val')
args.log.printWrite('Found Data:\t %d Train and %d Val' % (len(train_set), len(val_set)))
args.log.printWrite('\t Train Batch: %d, Val Batch: %d' % (args.batch, args.val_batch))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=True)
test_loader = torch.utils.data.DataLoader(val_set , batch_size=args.val_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return train_loader, test_loader
def shadowTestDataloader(args):
args.log.printWrite("=> fetching img pairs in %s" % (args.mydata_dir))
datasets = __import__('datasets.' + args.shadowdataset)
dataset_file = getattr(datasets, args.shadowdataset)
#train_set = getattr(dataset_file, args.shadowdataset)(args, args.mydata_dir, 'train')
val_set = getattr(dataset_file, args.shadowdataset)(args, args.mydata_dir, 'val')
test_loader = torch.utils.data.DataLoader(val_set , batch_size=args.test_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return test_loader
def reflectanceDataloader(args):
args.log.printWrite("=> fetching img pairs in %s" % (args.mydata_dir))
datasets = __import__('datasets.' + args.mydataset)
dataset_file = getattr(datasets, args.mydataset)
train_set = getattr(dataset_file, args.mydataset)(args, args.mydata_dir, 'train')
val_set = getattr(dataset_file, args.mydataset)(args, args.mydata_dir, 'val')
args.log.printWrite('Found Data:\t %d Train and %d Val' % (len(train_set), len(val_set)))
args.log.printWrite('\t Train Batch: %d, Val Batch: %d' % (args.batch, args.val_batch))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=True)
test_loader = torch.utils.data.DataLoader(val_set , batch_size=args.val_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return train_loader, test_loader
def myDataloader(args):
args.log.printWrite("=> fetching img pairs in %s" % (args.mydata_dir))
datasets = __import__('datasets.' + args.mydataset)
dataset_file = getattr(datasets, args.mydataset)
train_set = getattr(dataset_file, args.mydataset)(args, args.mydata_dir, 'train')
val_set = getattr(dataset_file, args.mydataset)(args, args.mydata_dir, 'val')
args.log.printWrite('Found Data:\t %d Train and %d Val' % (len(train_set), len(val_set)))
args.log.printWrite('\t Train Batch: %d, Val Batch: %d' % (args.batch, args.val_batch))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=True)
test_loader = torch.utils.data.DataLoader(val_set , batch_size=args.val_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return train_loader, test_loader
def pokemonDataloader(args):
args.log.printWrite("=> fetching img pairs in %s" % (args.mydata_dir))
datasets = __import__('datasets.' + args.pokemondataset)
dataset_file = getattr(datasets, args.pokemondataset)
train_set = getattr(dataset_file, args.pokemondataset)(args, args.pokemondata_dir, 'train')
val_set = getattr(dataset_file, args.pokemondataset)(args, args.pokemondata_dir, 'val')
args.log.printWrite('Found Data:\t %d Train and %d Val' % (len(train_set), len(val_set)))
args.log.printWrite('\t Train Batch: %d, Val Batch: %d' % (args.batch, args.val_batch))
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=True)
test_loader = torch.utils.data.DataLoader(val_set , batch_size=args.val_batch,
num_workers=args.workers, pin_memory=args.cuda, shuffle=False)
return train_loader, test_loader
| 2.453125
| 2
|
criteria_comparing_sets_pcs/jsd_calculator.py
|
VinAIResearch/PointSWD
| 4
|
12778811
|
<filename>criteria_comparing_sets_pcs/jsd_calculator.py
import os.path as osp
import sys
import torch
import torch.nn as nn
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from metrics_from_point_flow.evaluation_metrics import jsd_between_point_cloud_sets
class JsdCalculator(nn.Module):
def __init__(self):
super(JsdCalculator, self).__init__()
@staticmethod
def forward(sample_pcs, ref_pcs, resolution=28, **kwargs):
sample_pcs = sample_pcs.detach().cpu().numpy()
ref_pcs = ref_pcs.detach().cpu().numpy()
return jsd_between_point_cloud_sets(sample_pcs, ref_pcs, resolution)
if __name__ == "__main__":
sample_pcs = torch.empty(5, 2048, 3).uniform_(0, 1).numpy()
ref_pcs = torch.empty(5, 2048, 3).uniform_(0, 1).numpy()
print(JsdCalculator.forward(sample_pcs, ref_pcs))
| 2.328125
| 2
|
Testing/publications/calendars/__init__.py
|
freder/PageBotExamples
| 5
|
12778812
|
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ <NAME> + <NAME>
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# calendars/__init__.py
#
from pagebot.publications.calendars.photocalendar import PhotoCalendar
CALENDAR_CLASSES = {
'Photo': PhotoCalendar, # Eanch month a photo and a table of month days
}
if __name__ == "__main__":
import doctest
import sys
sys.exit(doctest.testmod()[0])
| 1.945313
| 2
|
migrations/0001_initial.py
|
bm424/churchmanager
| 0
|
12778813
|
<filename>migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-05 22:59
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Church',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('short_description', models.CharField(max_length=128)),
('long_description', models.TextField()),
('photo', models.ImageField(blank=True, upload_to='')),
('address_line_1', models.CharField(max_length=64)),
('address_line_2', models.CharField(max_length=64)),
('postcode', models.CharField(max_length=10, validators=[django.core.validators.RegexValidator(message='Invalid postcode.', regex='^([a-zA-Z](([0-9][0-9]?)|([a-zA-Z][0-9][0-9]?)|([a-zA-Z]?[0-9][a-zA-Z])) ?[0-9][abd-hjlnp-uw-zABD-HJLNP-UW-Z]{2})$')])),
('email', models.EmailField(blank=True, max_length=254)),
('phone_number', models.CharField(blank=True, max_length=20)),
('website', models.URLField(blank=True)),
('slug', models.SlugField(editable=False, unique=True)),
],
options={
'verbose_name_plural': 'Churches',
},
),
]
| 1.882813
| 2
|
kokki/cookbooks/apache2/recipes/default.py
|
samuel/kokki
| 11
|
12778814
|
<gh_stars>10-100
from kokki import Package, Directory, Service, File, StaticFile, Execute, Template
PLATFORM_CONFIGS = dict(
centos = "httpd",
redhat = "httpd",
fedora = "httpd",
suse = "httpd",
debian = "apache2",
ubuntu = "apache2",
)
Package("apache2",
package_name = "httpd" if env.system.platform in ("centos", "redhat", "fedora", "suse") else "apache2")
Directory(env.config.apache.log_dir, mode = 0700, owner = env.config.apache.user, group = env.config.apache.user)
if env.system.platform in ("centos", "redhat", "fedora", "suse"):
Service("apache2",
service_name = "httpd",
restart_command = "/sbin/service httpd restart && sleep 1",
reload_command = "/sbin/service httpd reload && sleep 1",
supports_restart = True,
supports_reload = True,
supports_status = True)
File("/usr/local/bin/apache2_module_conf_generate.pl",
mode = 0755,
owner = "root",
group = "root",
content = StaticFile("apache2/files/apache2_module_conf_generate.pl"))
for d in ('sites-available', 'sites-enabled', 'mods-available', 'mods-enabled'):
Directory("%s/%s" % (env.config.apache.dir, d),
mode = 0755,
owner = "root",
group = "root")
libdir = "lib64" if env.system.architecture == "x86_64" else "lib"
Execute("generate-module-list",
command = "/usr/local/bin/apache2_module_conf_generate.pl /usr/%s/httpd/modules /etc/httpd/mods-available" % libdir)
# %w{a2ensite a2dissite a2enmod a2dismod}.each do |modscript|
# template "/usr/sbin/#{modscript}" do
# source "#{modscript}.erb"
# mode 0755
# owner "root"
# group "root"
# end
# end
#
# # installed by default on centos/rhel, remove in favour of mods-enabled
# file "#{node[:apache][:dir]}/conf.d/proxy_ajp.conf" do
# action :delete
# backup false
# end
# file "#{node[:apache][:dir]}/conf.d/README" do
# action :delete
# backup false
# end
#
# # welcome page moved to the default-site.rb temlate
# file "#{node[:apache][:dir]}/conf.d/welcome.conf" do
# action :delete
# backup false
# end
else: # debian, ubuntu
Service("apache2",
supports_restart = True,
supports_reload = True,
supports_status = True)
Directory("%s/ssl" % env.config.apache.dir,
mode = 0755,
owner = "root",
group = "root")
File("apache2.conf",
path = ("%s/conf/httpd.conf" if env.system.platform in ("centos", "redhat", "fedora") else "%s/apache2.conf") % env.config.apache.dir,
content = Template("apache2/apache2.conf.j2"),
owner = "root",
group = "root",
mode = 0644,
notifies = [("restart", env.resources["Service"]["apache2"])])
File("apache2-security",
path = "%s/conf.d/security" % env.config.apache.dir,
content = Template("apache2/security.j2"),
owner = "root",
group = "root",
mode = 0644,
notifies = [("restart", env.resources["Service"]["apache2"])])
File("apache2-charset",
path = "%s/conf.d/charset" % env.config.apache.dir,
content = Template("apache2/charset.j2"),
owner = "root",
group = "root",
mode = 0644,
notifies = [("restart", env.resources["Service"]["apache2"])])
File("apache2-ports.conf",
path = "%s/ports.conf" % env.config.apache.dir,
content = Template("apache2/ports.conf.j2"),
owner = "root",
group = "root",
mode = 0644,
notifies = [("restart", env.resources["Service"]["apache2"])])
# File("apache2-default",
# path = "%s/sites-available/default" % env.config.apache.dir,
# content = Template("apache2/default-site.j2"),
# owner = "root",
# group = "root",
# mode = 0644,
# noifies = [("restart", env.resources["Service"]["apache2"])])
File("apache2-default-000",
path = "%s/sites-enabled/000-default" % env.config.apache.dir,
action = "delete")
env.cookbooks.apache2.module("alias", conf=False)
# env.cookbooks.apache2.module("status", conf=True)
# include_recipe "apache2::mod_status"
# include_recipe "apache2::mod_alias"
# include_recipe "apache2::mod_auth_basic"
# include_recipe "apache2::mod_authn_file"
# include_recipe "apache2::mod_authz_default"
# include_recipe "apache2::mod_authz_groupfile"
# include_recipe "apache2::mod_authz_host"
# include_recipe "apache2::mod_authz_user"
# include_recipe "apache2::mod_autoindex"
# include_recipe "apache2::mod_dir"
# include_recipe "apache2::mod_env"
# include_recipe "apache2::mod_mime"
# include_recipe "apache2::mod_negotiation"
# include_recipe "apache2::mod_setenvif"
# include_recipe "apache2::mod_log_config" if platform?("centos", "redhat", "suse")
| 1.945313
| 2
|
core/forms.py
|
uktrade/directory-ui-supplier
| 2
|
12778815
|
from django.forms import Select
from django.utils import translation
from django.utils.translation import ugettext as _
from directory_components import forms, fields
from directory_constants import choices
class SearchForm(forms.Form):
term = fields.CharField(
max_length=255,
required=False,
)
industries = fields.ChoiceField(
required=False,
choices=(
(('', _('All industries')),) + choices.INDUSTRIES
),
widget=Select(attrs={'dir': 'ltr'})
)
def get_language_form_initial_data():
return {
'lang': translation.get_language()
}
| 2.0625
| 2
|
epi_judge_python/search_frequent_items.py
|
shobhitmishra/CodingProblems
| 0
|
12778816
|
<filename>epi_judge_python/search_frequent_items.py
from typing import Iterator, List
from test_framework import generic_test, test_utils
# Finds the candidates which may occur > n / k times.
def search_frequent_items(k: int, stream: Iterator[str]) -> List[str]:
# TODO - you fill in here.
return []
def search_frequent_items_wrapper(k, stream):
return search_frequent_items(k, iter(stream))
if __name__ == '__main__':
exit(
generic_test.generic_test_main('search_frequent_items.py',
'search_frequent_items.tsv',
search_frequent_items_wrapper,
test_utils.unordered_compare))
| 2.75
| 3
|
Part_1_beginner/17_For_in_range_loop/solutions_for_in_range/exercise_1.py
|
Mikma03/InfoShareacademy_Python_Courses
| 0
|
12778817
|
<filename>Part_1_beginner/17_For_in_range_loop/solutions_for_in_range/exercise_1.py
# Poproś użytkownika o podanie numeru telefonu.
# Następnie wypisz informacje ile razy występuje w nim każda cyfra.
phone_number = input("Podaj numer telefonu: ")
for digit in range(10):
digit_times_in_number = phone_number.count(str(digit))
print(f"Cyfra {digit} występuje w Twoim numerze: {digit_times_in_number} razy")
| 4.1875
| 4
|
codegen_mat24.py
|
Martin-Seysen/mmgroup
| 14
|
12778818
|
r"""Generation of C code dealing with the Mathieu group Mat24
Generating the ``mmgroup.mat24`` extension
..........................................
Function ``mat24_make_c_code()`` generates C code for basic computations
in the Golay code, its cocode, and the Mathieu group Mat24. It also
generates code for computations in the Parker loop and in its
automorphism group.
The generated C modules are used in the python extension
``mmgroup.mat24``. The functions used by that extension are contained
in a shared library with name ``mmgroup_mat24.dll``. The reason for
creating such a shared library is that that these functions are also
called by C functions written for other python extensions.
We use the C code generation mechanism in class
``generate_c.TableGenerator``. Here a .c file and a .h file is
crreated from file ``mat24_functions.ske`` in subdirectory
``src/mmgroup/dev/mat24``. The .ske file is like a .c file, but
augmented with some code generation statements for entering tables
and automatically generated code into the .c file to be generated.
This .ske file may also have statements for automatically
generating a .h file declaring the exported functions.
We create an instance ``tg`` of class ``TableGenerator`` for
generating the .c files. The table generator ``tg`` takes two
dictionaries ``tables`` and ``directives`` as arguments. These
dictionaries provide user-defined tables and directives for the
code generator. Class ``Mat24`` in module
``mmgroup.dev.mat24.mat24_ref`` has methods ``tables()``
and ``directives()`` creating the required tables and directives.
Generating the ``mmgroup.generators`` extension
.............................................
Function ``generators_make_c_code`` generates C code for computing
the monomial part of the operation of the elements :math`\xi` and
:math`\xi^2`of the monster group. These C functions are used for
computing (rather large) tables required for the implmentation of
the functions that compute the operation :math`\xi` and :math`\xi^2`
on a representation of the monster.
The generation of the ``mmgroup.generators`` extension is similar
to the generation of the ``mmgroup.mat24`` extension. Here the
list of .ske file is given in the list GENERATORS_C_FILES.
For each file in that list a C file is created.
A common header with name given by H_GENERATORS_NAME is created
from all these .ske files, prependend by the header files in
the list GENERATORS_H_FILES. A .pxd file with name
PXD_GENERATORS_NAME is created from that header file. That .pxd
file will also contain the declarations in the string
PXD_DECLARATIONS.
All input files are read fom the directory SKE_DIR.
Location of the output files
............................
The location of the generated output files is controlled by certain
variables in module config.py. Each of these variables specifies the
name of a directory.
Files with extension .c, .h go to the directory ``C_DIR``. Files with
extension .pxd, .pxi, .pyx go to the directory ``PXD_DIR``.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import types
import sys
import re
import os
import subprocess
import shutil
from config import SRC_DIR, DEV_DIR, C_DIR, PXD_DIR
from config import REAL_SRC_DIR
sys.path.append(REAL_SRC_DIR)
from mmgroup.dev.mat24.mat24_ref import Mat24
from mmgroup.dev.generators.gen_xi_ref import GenXi
from mmgroup.generate_c import TableGenerator, make_doc
from mmgroup.generate_c import pxd_to_pyx
########################################################################
# Generate mat24_functions.c
########################################################################
pxd_declarations = """
from libc.stdint cimport uint32_t, uint16_t, uint8_t
"""
def mat24_make_c_code():
"""Create .c and .h file with the functionality of class Mat24
The input of this function is the file MAT24_C_FILE.ske that
contains a (much faster) C version of the functions in class
mmgroup.dev.mat24.mat24_ref.Mat24.
The functions in the .ske file make use of the tables that have
been generated for this module and also of some functions for
generating C code automatically. An example where generating
code automatically makes sense is the matrix multiplication with
a constant bit matrix.
The code generating process is described in class TableGenerator
in module make_c_tables.
"""
print("Creating C source from file mat24_functions.ske\n")
MAT24_C_FILE = "mat24_functions"
SKE_DIR = os.path.join(DEV_DIR, "mat24")
# The follwing two tables can't easily be computed earlier
Mat24.tables["Mat24_doc_basis"] = Mat24.str_basis()
generator = TableGenerator(Mat24.tables, Mat24.directives)
f = os.path.join(SKE_DIR, MAT24_C_FILE)
path_ = os.path.join(C_DIR, MAT24_C_FILE)
#print("pwd", os.getcwd())
#print(os.path.realpath(path_ + ".c"))
generator.generate(f + ".ske", path_ + ".c", path_ + ".h")
## generator.export_tables(file_name = "mat24_export.py")
generator.generate_pxd(
os.path.join(PXD_DIR, MAT24_C_FILE + ".pxd"),
MAT24_C_FILE + ".h",
pxd_declarations
)
print("C files for extension mat24 have been created" )
########################################################################
# Generate c files for module 'generators'
########################################################################
SKE_DIR = os.path.join(DEV_DIR, "generators")
GENERATORS_C_FILES = [
"gen_xi_functions",
"mm_group_n",
"gen_leech",
"gen_leech3",
"gen_leech_reduce",
"gen_random",
]
GENERATORS_H_START = """
// %%GEN h
#ifndef MMGROUP_GENERATORS_H
#define MMGROUP_GENERATORS_H
// %%GEN c
"""
GENERATORS_H_END = """
// %%GEN h
#endif // ifndef MMGROUP_GENERATORS_H
// %%GEN c
"""
GENERATORS_H_FILES = [
GENERATORS_H_START,
"mmgroup_generators.h",
]
GENERATORS_TABLE_CLASSES = [
GenXi
]
H_GENERATORS_NAME = "mmgroup_generators.h"
PXD_GENERATORS_NAME = "generators.pxd"
PXI_GENERATORS_NAME = "generators.pxi"
PXD_DECLARATIONS = """
from libc.stdint cimport uint64_t, uint32_t, uint16_t, uint8_t
from libc.stdint cimport int64_t, int32_t
"""
def generators_make_c_code():
"""Create .c and .h file with the functionality of class Mat24Xi
"""
print("Creating C sources for the 'generators' extension\n")
# Setp table and directives for code generation
GenXi.tables["GenXi_doc"] = GenXi # can't do this earlier
tables = {}
directives = {}
for table_class in GENERATORS_TABLE_CLASSES:
table_instance = table_class()
tables.update(table_instance.tables)
directives.update(table_instance.directives)
print(tables.keys())
tg = TableGenerator(tables, directives)
# Generate c files
all_ske_files = [os.path.join(SKE_DIR, name)
for name in GENERATORS_H_FILES]
for name in GENERATORS_C_FILES:
ske_file = name + ".ske"
ske_path = os.path.join(SKE_DIR, ske_file)
c_file = name + ".c"
c_path = os.path.join(C_DIR, c_file)
print("Creating %s from %s" % (c_file, ske_file))
tg.generate(ske_path, c_path)
all_ske_files.append(ske_path)
# generate .h file
all_ske_files.append(GENERATORS_H_END)
h_file = H_GENERATORS_NAME
h_path = os.path.join(C_DIR, h_file)
pxd_file = PXD_GENERATORS_NAME
print("Creating %s from previous .ske files" % h_file)
tg.generate(all_ske_files, None, h_path)
# generate .pxd file
tg.generate_pxd(
os.path.join(PXD_DIR, PXD_GENERATORS_NAME),
h_file,
PXD_DECLARATIONS
)
print("C files for extension 'generators' have been created" )
# generate .pxi file
def pxi_comment(text, f):
print("\n" + "#"*70 + "\n### %s\n" % text + "#"*70 + "\n\n",
file=f
)
f_pxi = open(os.path.join(PXD_DIR, PXI_GENERATORS_NAME), "wt")
pxi_comment(
"Wrappers for C functions from file %s" % PXD_GENERATORS_NAME,
f_pxi
)
print(PXD_DECLARATIONS, file = f_pxi)
pxi_content = pxd_to_pyx(
os.path.join(PXD_DIR, PXD_GENERATORS_NAME),
os.path.split(PXD_GENERATORS_NAME)[0],
select = True
)
print(pxi_content, file = f_pxi)
f_pxi.close()
########################################################################
# Main program
########################################################################
if __name__ == "__main__":
mat24_make_c_code()
generators_make_c_code()
| 2.625
| 3
|
app/models.py
|
dzendjo/aimagicbot
| 6
|
12778819
|
<gh_stars>1-10
from motor.motor_asyncio import AsyncIOMotorClient
from umongo import Instance, Document, fields, Schema, ValidationError
import asyncio
from bson.objectid import ObjectId
import pymongo
import datetime
import pytz
from collections import OrderedDict
import os
import data
from pprint import pprint
db = AsyncIOMotorClient(data.DB_HOST, port=data.DB_PORT)[data.DB_NAME]
instance = Instance(db)
@instance.register
class User(Document):
class Meta:
collection_name = 'users'
indexes = []
id = fields.IntField(required=True, unique=True, attribute='_id')
process_flag = fields.BooleanField(default=False)
created = fields.DateTimeField(required=True)
visited = fields.DateTimeField(required=True)
username = fields.StrField(required=True, allow_none=True)
first_name = fields.StrField(required=True)
last_name = fields.StrField(required=True, allow_none=True, default=None)
language_code = fields.StrField(required=True, allow_none=True)
language = fields.StrField(required=True)
async def create_indexes():
await User.ensure_indexes()
if __name__ == '__main__':
pass
| 2.296875
| 2
|
src/grab/__init__.py
|
Boomatang/git-grab
| 2
|
12778820
|
<filename>src/grab/__init__.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Top-level package for grab."""
__version__ = "0.4.0"
__releases__ = ["0.4.0", "0.3.0", "0.2.0", "0.1.2", "0.1.1"]
from .api import * # noqa
| 1.28125
| 1
|
algorithms/distribution_based/column_model.py
|
Soton-Song/valentine
| 0
|
12778821
|
<filename>algorithms/distribution_based/column_model.py
import numpy as np
import pickle
from data_loader.data_objects.column import Column
from utils.utils import convert_data_type
class CorrelationClusteringColumn(Column):
"""
A class used to represent a column of a table in the Correlation Clustering algorithm
Attributes
----------
data : list
The data contained in the column
quantiles : int
The number of quantiles used in the histogram creation
ranks : list
A list containing the ranks of the column
quantile_histogram : QuantileHistogram
The quantile histogram representation of the column using the sorted ranks of the data
Methods
-------
get_histogram()
Returns the quantile histogram of the column
get_original_data()
Returns the original data instances
"""
def __init__(self, name: str, data: list, table_name: str, dataset_name: str, quantiles: int):
"""
Parameters
----------
name : str
The name of the column
data : list
The data instances of the column
table_name : str
The name of the table
dataset_name : str
The name of the dataset
quantiles: int
The number of quantiles of the column's quantile histogram
"""
super().__init__(name, data, table_name)
self.quantiles = quantiles
self.dataset_name = dataset_name
self.ranks = self.get_global_ranks(self.data, self.dataset_name)
self.quantile_histogram = None
def get_histogram(self):
"""Returns the quantile histogram of the column"""
return self.quantile_histogram
def get_original_data(self):
"""Returns the original data instances"""
return self.data
@staticmethod
def get_global_ranks(column: list, dataset_name: str):
"""
Function that gets the column data, reads the pickled global ranks and produces a ndarray that contains the
ranks of the data .
Parameters
----------
column : list
The column data
dataset_name : str
The name of the dataset
Returns
-------
ndarray
The ndarray that contains the ranks of the data
"""
with open('cache/global_ranks/' + dataset_name + '.pkl', 'rb') as pkl_file:
global_ranks: dict = pickle.load(pkl_file)
ranks = np.array(sorted([global_ranks[convert_data_type(x)] for x in column]))
return ranks
| 3.265625
| 3
|
moto/redshift/exceptions.py
|
EvaSDK/moto
| 1
|
12778822
|
from __future__ import unicode_literals
import json
from werkzeug.exceptions import BadRequest
class RedshiftClientError(BadRequest):
def __init__(self, code, message):
super(RedshiftClientError, self).__init__()
self.description = json.dumps({
"Error": {
"Code": code,
"Message": message,
'Type': 'Sender',
},
'RequestId': '6876f774-7273-11e4-85dc-39e55ca848d1',
})
class ClusterNotFoundError(RedshiftClientError):
def __init__(self, cluster_identifier):
super(ClusterNotFoundError, self).__init__(
'ClusterNotFound',
"Cluster {0} not found.".format(cluster_identifier))
class ClusterSubnetGroupNotFoundError(RedshiftClientError):
def __init__(self, subnet_identifier):
super(ClusterSubnetGroupNotFoundError, self).__init__(
'ClusterSubnetGroupNotFound',
"Subnet group {0} not found.".format(subnet_identifier))
class ClusterSecurityGroupNotFoundError(RedshiftClientError):
def __init__(self, group_identifier):
super(ClusterSecurityGroupNotFoundError, self).__init__(
'ClusterSecurityGroupNotFound',
"Security group {0} not found.".format(group_identifier))
class ClusterParameterGroupNotFoundError(RedshiftClientError):
def __init__(self, group_identifier):
super(ClusterParameterGroupNotFoundError, self).__init__(
'ClusterParameterGroupNotFound',
"Parameter group {0} not found.".format(group_identifier))
class InvalidSubnetError(RedshiftClientError):
def __init__(self, subnet_identifier):
super(InvalidSubnetError, self).__init__(
'InvalidSubnet',
"Subnet {0} not found.".format(subnet_identifier))
| 2.328125
| 2
|
scripts/run-nb-experiment.py
|
yzhan298/radossim
| 1
|
12778823
|
import argparse
import json
import papermill as pm
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input Jupyter notebook")
parser.add_argument("output", help="output Jupyter notebook")
parser.add_argument("parameters", help="parameter file in JSON")
args = parser.parse_args()
parameters = json.load(open(args.parameters), parse_float=float)
pm.execute_notebook(args.input, args.output, parameters)
| 2.609375
| 3
|
subt/ros/robot/src/laserscan_to_pointcloud.py
|
robotika/osgar
| 12
|
12778824
|
<filename>subt/ros/robot/src/laserscan_to_pointcloud.py
#!/usr/bin/python
import rospy
from sensor_msgs.msg import PointCloud2, LaserScan
from laser_geometry import LaserProjection
class LaserScanToPointCloud:
def __init__(self):
self.laserProj = LaserProjection()
self.pointCloudPublisher = rospy.Publisher("/points", PointCloud2, queue_size = 1)
self.laserScanSubscriber = rospy.Subscriber("/scan", LaserScan, self.laserScanCallback)
def laserScanCallback(self, data):
self.pointCloudPublisher.publish(self.laserProj.projectLaser(data))
if __name__ == "__main__":
rospy.init_node("LaserScanToPointCloud")
laserScanToPointCloud = LaserScanToPointCloud()
rospy.spin()
| 2.265625
| 2
|
pcg_gazebo/parsers/urdf/inertial.py
|
TForce1/pcg_gazebo
| 40
|
12778825
|
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLBase
from .mass import Mass
from .origin import Origin
from .inertia import Inertia
class Inertial(XMLBase):
_NAME = 'inertial'
_TYPE = 'urdf'
_CHILDREN_CREATORS = dict(
mass=dict(creator=Mass),
origin=dict(creator=Origin),
inertia=dict(creator=Inertia)
)
def __init__(self):
XMLBase.__init__(self)
self.reset()
@property
def mass(self):
return self._get_child_element('mass')
@mass.setter
def mass(self, value):
self._add_child_element('mass', value)
@property
def origin(self):
return self._get_child_element('origin')
@origin.setter
def origin(self, value):
self._add_child_element('origin', value)
@property
def inertia(self):
return self._get_child_element('inertia')
@inertia.setter
def inertia(self, value):
self._add_child_element('inertia', value)
def to_sdf(self):
from ..sdf import create_sdf_element
obj = create_sdf_element('inertial')
obj.pose = self.origin.to_sdf()
obj.inertia = self.inertia.to_sdf()
obj.mass = self.mass.to_sdf()
return obj
| 2.078125
| 2
|
event/example.py
|
BrickOzp/TrainController
| 3
|
12778826
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import thread
from time import sleep
import datetime
import time
class ExampleEvent:
def __init__(self, train):
print("ExampleEvent init");
self.train = train
def sensorDetected(self, rId):
pass
def stopAndTurn(self, *args):
print("[{}] Detected ExampleEvent".format(datetime.datetime.now()));
speed = self.train.getSpeed()
self.train.onStop()
sleep(5)
self.train.toggleDirection()
self.train.setSpeed(speed, True)
def directionToggled(self, direction):
pass
| 2.765625
| 3
|
tfx/orchestration/kubeflow/utils.py
|
jolks/tfx
| 1
|
12778827
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility for Kubeflow-based orchestrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from typing import Text
from kfp import dsl
from tfx.orchestration.experimental.runtime_parameter import runtime_string_parameter
def replace_placeholder(serialized_component: Text) -> Text:
"""Replaces the RuntimeParameter placeholders with kfp.dsl.PipelineParam."""
placeholders = re.findall(runtime_string_parameter.PARAMETER_PATTERN,
serialized_component)
for placeholder in placeholders:
parameter = runtime_string_parameter.RuntimeStringParameter.parse(
placeholder)
dsl_parameter = dsl.PipelineParam(name=parameter.name)
serialized_component = serialized_component.replace(placeholder,
str(dsl_parameter))
return serialized_component
| 1.882813
| 2
|
app/auth/__init__.py
|
bluethon/flasky-learn
| 1
|
12778828
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-04-25 23:12:05
# @Author : Bluethon (<EMAIL>)
# @Link : http://github.com/bluethon
from flask import Blueprint
auth = Blueprint('auth', __name__)
# noinspection PyUnresolvedReferences
from . import views
| 1.398438
| 1
|
app/tests/test_crawler.py
|
gontarz/image-crawler
| 0
|
12778829
|
# -*- coding: utf-8 -*-
"""
description:
"""
import unittest
import os
from bs4 import BeautifulSoup
from app.crawler import visible, extract_images_links, extract_text
from app.settings import BASE_DIR
class TestCrawler(unittest.TestCase):
def test_visible(self):
"""
in test_crawler.html all visible text contains 't' and non-visible 'f'
"""
with open(os.path.join(BASE_DIR, 'app/tests/test_html/test_crawler.html')) as html:
soup = BeautifulSoup(html, 'html.parser')
data = soup.findAll(text=True)
result = {text.strip() for text in filter(visible, data) if text.strip()}
self.assertEqual({'t'}, result)
result = [elem for elem in data if visible(elem)]
self.assertTrue(all(result))
class TestCrawlerAsync(unittest.IsolatedAsyncioTestCase):
async def test_extract_text(self):
with open(os.path.join(BASE_DIR, 'app/tests/test_html/example.html')) as html:
crawled = await extract_text(html)
expected = '''Example Domain
This domain is for use in illustrative examples in documents. You may use this
domain in literature without prior coordination or asking for permission.
More information...'''
self.assertEqual(expected, crawled)
async def test_extract_images_links(self):
with open(os.path.join(BASE_DIR, 'app/tests/test_html/test_crawler.html')) as html:
crawled = await extract_images_links(html)
expected = {'test1', 'test2', 'test3'}
self.assertEqual(expected, crawled)
if __name__ == '__main__':
unittest.main()
| 2.703125
| 3
|
detectors/eighteen/ensemble.py
|
zhampel/FakeFinder
| 0
|
12778830
|
<reponame>zhampel/FakeFinder
import cv2
import numpy as np
import copy
import math
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
from face_detect_lib.models.retinaface import RetinaFace
from face_detect_lib.layers.functions.prior_box import PriorBox
from face_detect_lib.utils.box_utils import decode_batch, decode_landm_batch, decode, decode_landm
from utils import *
from models import *
__all__ = ['Ensemble', 'pipeline_cfg']
cfg_mnet = {
'name': 'mobilenet0.25',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 32,
'ngpu': 1,
'epoch': 250,
'decay1': 190,
'decay2': 220,
'image_size': 640,
'pretrain': True,
'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3},
'in_channel': 32,
'out_channel': 64
}
class Config:
def __init__(self):
self.cuda = True
self.face_pretrained_path = './weights/mobilenetV1X0.25_pretrain.tar'
self.face_model_path = './weights/mobilenet0.25_Final.pth'
self.model_name = 'mobile0.25'
self.origin_size = False
self.confidence_threshold = 0.02
self.top_k = 5000
self.nms_threshold = 0.4
self.keep_top_k = 750
self.target_size = 400
self.max_size = 2150
self.model_cfg = cfg_mnet
self.vis_thres = 0.8
pipeline_cfg = Config()
def detect_face(img_list, detect_record):
im_shape = img_list[0].shape
detect_key = str(im_shape[0]) + '*' + str(im_shape[1])
if detect_key not in detect_record:
# print(detect_key + ' not in dict')
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(pipeline_cfg.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(resize * im_size_max) > pipeline_cfg.max_size:
resize = float(pipeline_cfg.max_size) / float(im_size_max)
im_height, im_width = int(
im_shape[0] * resize), int(im_shape[1] * resize)
detect_record[detect_key] = {
'resize': resize, 'resized_h': im_height, 'resized_w': im_width}
priorbox = PriorBox(pipeline_cfg.model_cfg,
image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(pipeline_cfg.device)
detect_record[detect_key]['priors'] = priors
# detect face
detect_info = detect_record[detect_key]
resize = detect_info['resize']
resize_img_list = []
result_dets_list = []
batch_size = 8
detect_nms_time = 0
for img_idx, img in enumerate(img_list):
if detect_info['resize'] != 1:
img = cv2.resize(img, None, None, fx=detect_info['resize'], fy=detect_info['resize'],
interpolation=cv2.INTER_LINEAR)
img = np.float32(img)
else:
img = np.float32(img)
resize_img_list.append(img)
img_idx += 1
if img_idx % batch_size == 0 or img_idx == len(img_list):
im_height, im_width, _ = resize_img_list[0].shape
scale = torch.Tensor([resize_img_list[0].shape[1], resize_img_list[0].shape[0], resize_img_list[0].shape[1],
resize_img_list[0].shape[0]])
resize_img_list = np.stack(resize_img_list, axis=0) # [n,h,w,c]
resize_img_list -= (104, 117, 123)
resize_img_list = resize_img_list.transpose(0, 3, 1, 2)
resize_img_list = torch.from_numpy(resize_img_list)
resize_img_list = resize_img_list.to(pipeline_cfg.device)
scale = scale.to(pipeline_cfg.device)
loc, conf, landms = pipeline_cfg.net(resize_img_list)
priors = detect_info['priors']
prior_data = priors.data
boxes = decode_batch(loc.data, prior_data,
pipeline_cfg.model_cfg['variance'])
boxes = boxes * scale / resize # [batchsize, proposals, 4]
scores = conf[:, :, 1] # [batchsize, proposals]
detect_nms_begin = 0
for per_idx in range(boxes.shape[0]):
box, score = boxes[per_idx, :, :], scores[per_idx, :]
inds = torch.nonzero(
score > pipeline_cfg.confidence_threshold)[:, 0]
box, score = box[inds, :], score[inds]
dets = torch.cat((box, score[:, None]), dim=1)
keep = torchvision.ops.nms(
box, score, pipeline_cfg.nms_threshold)
dets = dets[keep, :]
dets = dets.data.cpu().numpy()
result_dets_list.append(dets)
resize_img_list = []
detect_nms_end = 0
detect_nms_time += detect_nms_end - detect_nms_begin
return result_dets_list
def init_face_detecor():
torch.set_grad_enabled(False)
pipeline_cfg.net = RetinaFace(
cfg=pipeline_cfg.model_cfg, model_path=pipeline_cfg.face_pretrained_path, phase='test')
pipeline_cfg.net = load_model(
pipeline_cfg.net, pipeline_cfg.face_model_path, pipeline_cfg.cuda)
pipeline_cfg.net.eval()
cudnn.benchmark = True
pipeline_cfg.device = torch.device("cuda" if pipeline_cfg.cuda else "cpu")
pipeline_cfg.net = pipeline_cfg.net.to(pipeline_cfg.device)
return pipeline_cfg.net
def get_image_score(face_scale, cls_model, softmax_func, aligned_faces, isRGB, mean, std, isScale, isFlip=False):
try:
# aligned_faces #[faces,frames,H,W,C] BGR
img_aligned_faces = aligned_faces.clone().detach()
img_aligned_faces = img_aligned_faces.permute(
[0, 1, 4, 2, 3]) # [faces,frames,c,h,w] BGR
if isRGB:
img_aligned_faces = img_aligned_faces[:, :, [2, 1, 0], :, :]
img_frames = 35
interval = max(1, math.ceil(img_aligned_faces.shape[1] / img_frames))
img_aligned_faces = img_aligned_faces[:, 0::interval, :, :, :]
img_frames = (img_aligned_faces.shape[1] // 5) * 5
img_aligned_faces = img_aligned_faces[:, :img_frames, :, :, :]
all_score, score = [], 0
for face_idx in range(img_aligned_faces.shape[0]):
one_face_aligned = img_aligned_faces[face_idx, :, :, :, :]
one_face_aligned_mean = (
one_face_aligned - mean) / std # [frames,c,h,w]
if isFlip:
one_face_aligned_mean_flip = torch.flip(
one_face_aligned_mean, dims=[3])
one_face_aligned_input = torch.cat(
(one_face_aligned_mean, one_face_aligned_mean_flip), dim=0)
output = cls_model(one_face_aligned_input)
output = (output[:img_frames, :] + output[img_frames:, :]) / 2
else:
output = cls_model(one_face_aligned_mean)
output = output.view(-1, 5, 2)
output = output.mean(1)
output = softmax_func(output)
output = output[:, 1].cpu().numpy() # [6,1]
if output[output > 0.85].shape[0] / output.shape[0] > 0.7:
score = output[output > 0.85].mean()
elif output[output < 0.15].shape[0] / output.shape[0] > 0.7:
score = output[output < 0.15].mean()
else:
score = output.mean()
all_score.append(score)
all_score = np.array(all_score)
score_max, score_min, score_avg = np.max(
all_score), np.min(all_score), np.mean(all_score)
if score_max > 0.9:
score = score_max
elif len(np.where(all_score > 0.6)[0]) == all_score.shape[0]:
score = score_max
elif len(np.where(all_score < 0.4)[0]) == all_score.shape[0]:
score = score_min
else:
score = score_avg
if isScale:
if score >= 0.98 or score <= 0.02:
score = (score - 0.5) * 0.96 + 0.5
except Exception as e:
print(e)
score = -1
return score
def get_sf_score(face_scale, cls_model, softmax_func, aligned_faces, isRGB, mean, std):
try:
# aligned_faces [faces,frames,H,W,C] BGR
sf_aligned_faces = aligned_faces.clone().detach()
sf_aligned_faces = sf_aligned_faces.permute(
[0, 4, 1, 2, 3]) # [faces,c,frames,h,w]
if isRGB:
sf_aligned_faces = sf_aligned_faces[:, [2, 1, 0], :, :, :]
sf_aligned_faces = (sf_aligned_faces - mean) / std
sf_output = cls_model(sf_aligned_faces)
sf_output = softmax_func(sf_output)
sf_output = sf_output[:, 1].cpu().numpy()
sf_max, sf_min, sf_avg = np.max(sf_output), np.min(
sf_output), np.mean(sf_output)
if sf_max > 0.9:
sf_score = sf_max
elif len(np.where(sf_output > 0.6)[0]) == sf_output.shape[0]:
sf_score = sf_max
elif len(np.where(sf_output < 0.4)[0]) == sf_output.shape[0]:
sf_score = sf_min
else:
sf_score = sf_avg
except Exception as e:
print(e)
sf_score = -1
return sf_score
def get_final_score(score_list, weight_list):
final_score = 0
assert len(score_list) == len(weight_list)
new_score_list, new_weight_list = [], []
for idx, score in enumerate(score_list):
if score != -1:
new_score_list.append(score)
new_weight_list.append(weight_list[idx])
new_scores, new_weights = np.array(
new_score_list), np.array(new_weight_list)
if len(new_weights) == 0:
return -1
# print('new_scores:', new_scores, 'new_weights',
# new_weights / np.sum(new_weights))
final_score = np.sum(new_scores * (new_weights / np.sum(new_weights)))
return final_score
def get_final_score_policy(score_list, weight_list, img_start_idx, sf_weight):
assert len(score_list) == len(weight_list)
sf_score_list, sf_weight_list = score_list[:
img_start_idx], weight_list[:img_start_idx]
img_score_list, img_weight_list = score_list[img_start_idx:], weight_list[img_start_idx:]
new_sf_score_list, new_sf_weight_list, new_img_score_list, new_img_weight_list = [], [], [], []
for idx, score in enumerate(sf_score_list):
if score != -1:
new_sf_score_list.append(score)
new_sf_weight_list.append(sf_weight_list[idx])
for idx, score in enumerate(img_score_list):
if score != -1:
new_img_score_list.append(score)
new_img_weight_list.append(img_weight_list[idx])
new_sf_scores, new_sf_weights = np.array(
new_sf_score_list), np.array(new_sf_weight_list)
new_img_scores, new_img_weights = np.array(
new_img_score_list), np.array(new_img_weight_list)
sf_success, img_success = True, True
# sf
if new_sf_scores.shape[0] != 0:
if len(np.where(new_sf_scores > 0.8)[0]) / new_sf_scores.shape[0] > 0.7:
new_sf_y_scores, new_sf_y_weights = new_sf_scores[new_sf_scores >
0.8], new_sf_weights[new_sf_scores > 0.8]
sf_score = np.sum(new_sf_y_scores *
(new_sf_y_weights / np.sum(new_sf_y_weights)))
elif len(np.where(new_sf_scores < 0.2)[0]) / new_sf_scores.shape[0] > 0.7:
new_sf_y_scores, new_sf_y_weights = new_sf_scores[new_sf_scores <
0.2], new_sf_weights[new_sf_scores < 0.2]
sf_score = np.sum(new_sf_y_scores *
(new_sf_y_weights / np.sum(new_sf_y_weights)))
else:
sf_score = np.sum(
new_sf_scores * (new_sf_weights / np.sum(new_sf_weights)))
else:
sf_success = False
# img
if new_img_scores.shape[0] != 0:
if len(np.where(new_img_scores > 0.8)[0]) / new_img_scores.shape[0] > 0.7:
new_img_y_scores, new_img_y_weights = new_img_scores[new_img_scores > 0.8], new_img_weights[
new_img_scores > 0.8]
img_score = np.sum(new_img_y_scores *
(new_img_y_weights / np.sum(new_img_y_weights)))
elif len(np.where(new_img_scores < 0.2)[0]) / new_img_scores.shape[0] > 0.7:
new_img_y_scores, new_img_y_weights = new_img_scores[new_img_scores < 0.2], new_img_weights[
new_img_scores < 0.2]
img_score = np.sum(new_img_y_scores *
(new_img_y_weights / np.sum(new_img_y_weights)))
else:
img_score = np.sum(
new_img_scores * (new_img_weights / np.sum(new_img_weights)))
else:
img_success = False
if sf_success and img_success:
final_score = sf_score * sf_weight + (1 - sf_weight) * img_score
elif sf_success and not img_success:
final_score = sf_score
elif img_success and not sf_success:
final_score = img_score
else:
final_score = -1
return final_score
def predict_batch(img_list, sf_model1, sf_model2, sf_model3, xcp_model, b3_model, res34_model, b1_model, b1long_model,
b1short_model, b0_model, sf_model4, softmax_func, detect_record):
# face det
aligned_faceses, noface_flag = detect_video_face(img_list, detect_record)
if noface_flag == -1:
return -1
sf1_score, sf2_score, sf3_score, sf4_score, xcp_score, b3_score, res34_score, b1_score, b1long_score, b1short_score, b0_score = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
scale_num = len(aligned_faceses)
# slowfast model infer
sf_mean = torch.from_numpy(
np.array([110.63666788 / 255, 103.16065604 / 255, 96.29023126 / 255], dtype=np.float32)).reshape(
[1, -1, 1, 1, 1]).cuda()
sf_std = torch.from_numpy(
np.array([38.7568578 / 255, 37.88248729 / 255, 40.02898126 / 255], dtype=np.float32)).reshape(
[1, -1, 1, 1, 1]).cuda()
xcp_mean = torch.from_numpy(
np.array([0.5, 0.5, 0.5], dtype=np.float32)).reshape([1, -1, 1, 1]).cuda()
xcp_std = torch.from_numpy(
np.array([0.5, 0.5, 0.5], dtype=np.float32)).reshape([1, -1, 1, 1]).cuda()
b3_mean = torch.from_numpy(np.array(
[0.485, 0.456, 0.406], dtype=np.float32)).reshape([1, -1, 1, 1]).cuda()
b3_std = torch.from_numpy(np.array(
[0.229, 0.224, 0.225], dtype=np.float32)).reshape([1, -1, 1, 1]).cuda()
for aligned_faces in aligned_faceses:
# print('aligned_faces shape:', aligned_faces.shape)
aligned_faces = np.float32(aligned_faces)
face_scale, face_num, align_frames = aligned_faces.shape[
3], aligned_faces.shape[0], aligned_faces.shape[1]
# init scale tensor
aligned_faces = torch.from_numpy(aligned_faces)
if pipeline_cfg.cuda:
aligned_faces = aligned_faces.cuda() # [faces,frames,H,W,C]
aligned_faces /= 255
# xcp inference
if face_scale == 299:
xcp_score = get_image_score(face_scale, xcp_model, softmax_func, aligned_faces, False, xcp_mean, xcp_std,
False, True)
b3_score = get_image_score(face_scale, b3_model, softmax_func, aligned_faces, True, b3_mean, b3_std, True,
False)
b1_score = get_image_score(face_scale, b1_model, softmax_func, aligned_faces, True, b3_mean, b3_std, False,
True)
b1long_score = get_image_score(face_scale, b1long_model, softmax_func, aligned_faces, True, b3_mean, b3_std,
False, False)
b1short_score = get_image_score(face_scale, b1short_model, softmax_func, aligned_faces, True, b3_mean,
b3_std, False, False)
b0_score = get_image_score(face_scale, b0_model, softmax_func, aligned_faces, True, b3_mean, b3_std, False,
True)
if face_scale == 256:
res34_score = get_image_score(face_scale, res34_model, softmax_func, aligned_faces, True, b3_mean, b3_std,
True, True)
sf1_score = get_sf_score(
face_scale, sf_model1, softmax_func, aligned_faces, True, sf_mean, sf_std)
sf2_score = get_sf_score(
face_scale, sf_model2, softmax_func, aligned_faces, True, sf_mean, sf_std)
sf3_score = get_sf_score(
face_scale, sf_model3, softmax_func, aligned_faces, True, sf_mean, sf_std)
sf4_score = get_sf_score(
face_scale, sf_model4, softmax_func, aligned_faces, True, sf_mean, sf_std)
score_list = [sf1_score, sf2_score, sf3_score, sf4_score, xcp_score, b3_score, res34_score, b1_score, b1long_score,
b1short_score, b0_score]
# print(score_list)
sf_weight_np, img_weight_np = np.array(
[10, 8, 4, 8]), np.array([10, 6, 4, 10, 8, 8, 7])
sf_weight_np = sf_weight_np / np.sum(sf_weight_np) * 0.4
img_weight_np = img_weight_np / np.sum(img_weight_np) * 0.6
weight_np = np.concatenate((sf_weight_np, img_weight_np))
weight_list = list(weight_np)
# print(weight_list)
final_score = get_final_score_policy(
score_list, weight_list, len(sf_weight_np), 0.4)
return final_score
def detect_video_face(img_list, detect_record):
num_frames = len(img_list)
num_faces = 0
face_count = {}
img_h, img_w = img_list[0].shape[0], img_list[0].shape[1]
face_list = []
dets_list = detect_face(img_list, detect_record)
# detect_face_time = detect_tmp_end - detect_tmp_begin
# global DETECT_FACE_TIME
# DETECT_FACE_TIME += detect_face_time
# print('detect face time:', detect_face_time)
for idx, img_raw in enumerate(img_list):
# preserve only faces with confidence above threshold
dets = dets_list[idx][np.where(dets_list[idx][:, 4] >= pipeline_cfg.vis_thres)][:, :4].astype(
np.int64) # [m,15]
face_list.append(dets)
if len(dets) not in face_count:
face_count[len(dets)] = 0
face_count[len(dets)] += 1
# vote for the number of faces that most frames agree on
max_count = 0
for num in face_count:
if face_count[num] > max_count:
num_faces = num
max_count = face_count[num]
if num_faces <= 0:
return None, -1
active_faces = None
face_tubes = []
for frame_idx in range(num_frames):
cur_faces = face_list[frame_idx] #
if len(cur_faces) <= 0:
continue
if active_faces is not None:
ious = vanilla_bbox_iou_overlaps(cur_faces, active_faces)
max_iou, max_idx = np.max(ious, axis=1), np.argmax(ious, axis=1)
mark = [False for _ in range(len(active_faces))]
else:
max_iou, max_idx = None, None
for face_idx in range(len(cur_faces)):
# IoU threshold 0.5 for determining whether is the same person
if max_iou is None or max_iou[face_idx] < 0.5:
face = copy.deepcopy(cur_faces[face_idx])
if active_faces is None:
active_faces = face[np.newaxis, :]
else:
active_faces = np.concatenate(
(active_faces, face[np.newaxis, :]), axis=0)
face_tubes.append([[frame_idx, face_idx]])
else:
correspond_idx = max_idx[face_idx]
# Each face tube can only add at most one face from a frame
if mark[correspond_idx]:
continue
mark[correspond_idx] = True
active_faces[correspond_idx] = cur_faces[face_idx]
face_tubes[correspond_idx].append([frame_idx, face_idx])
# Choose num_faces longest face_tubes as chosen faces
face_tubes.sort(key=lambda tube: len(tube), reverse=True)
if len(face_tubes) < num_faces:
num_faces = len(face_tubes)
num_faces = min(num_faces, 2)
face_tubes = face_tubes[:num_faces]
aligned_faces_img_256, aligned_faces_img_299, aligned_faces_img_320 = [], [], []
for face_idx in range(num_faces):
cur_face_list, source_frame_list = [], []
# record max crop_bbox size
tube_idx, max_size = 0, 0
for frame_idx in range(num_frames):
cur_face = face_tubes[face_idx][tube_idx]
next_face = None if tube_idx == len(
face_tubes[face_idx]) - 1 else face_tubes[face_idx][tube_idx + 1]
# find nearest frame inside face tube
if next_face is not None and abs(cur_face[0] - frame_idx) > abs(next_face[0] - frame_idx):
tube_idx += 1
cur_face = next_face
face = copy.deepcopy(face_list[cur_face[0]][cur_face[1]])
cur_face_list.append(face)
source_frame_list.append(cur_face[0])
_, _, size = get_boundingbox(face, img_w, img_h)
if size > max_size:
max_size = size
# align face size
max_size = max_size // 2 * 2
max_size = min(max_size, img_w, img_h)
# adjust to max face size and crop faces
cur_faces_img_256, cur_faces_img_299, cur_faces_img_320 = [], [], []
for frame_idx in range(num_frames):
x1, y1, size = adjust_boundingbox(
cur_face_list[frame_idx], img_w, img_h, max_size)
img = img_list[source_frame_list[frame_idx]
][y1:y1 + size, x1:x1 + size, :]
img_256 = cv2.resize(
img, (256, 256), interpolation=cv2.INTER_LINEAR)
cur_faces_img_256.append(img_256)
img_299 = cv2.resize(
img, (299, 299), interpolation=cv2.INTER_LINEAR)
cur_faces_img_299.append(img_299)
cur_faces_numpy_256 = np.stack(
cur_faces_img_256, axis=0) # [num_frames, h, w, c]
cur_faces_numpy_299 = np.stack(
cur_faces_img_299, axis=0) # [num_frames, h, w, c]
aligned_faces_img_256.append(cur_faces_numpy_256)
aligned_faces_img_299.append(cur_faces_numpy_299)
# [num_faces, num_frames, h, w, c]
aligned_faces_numpy_256 = np.stack(aligned_faces_img_256, axis=0)
# [num_faces, num_frames, h, w, c]
aligned_faces_numpy_299 = np.stack(aligned_faces_img_299, axis=0)
return [aligned_faces_numpy_256, aligned_faces_numpy_299], 1
class Ensemble:
def __init__(self, cls_model_ckpt: str, xcp_model_ckpt: str, slow_fast_2_ckpt: str,
slow_fast_3_ckpt: str, b3_model_ckpt: str, res34_model_ckpt: str, b1_model_ckpt: str,
b1long_model_ckpt: str, b1short_model_ckpt: str, b0_model_ckpt: str, slow_fast_4_ckpt: str,
frame_nums: int, cuda=True):
self.cls_model_ckpt = cls_model_ckpt
self.xcp_model_ckpt = xcp_model_ckpt
self.cls_model2_ckpt = slow_fast_2_ckpt
self.cls_model3_ckpt = slow_fast_3_ckpt
self.cls_model4_ckpt = slow_fast_4_ckpt
self.b3_model_ckpt = b3_model_ckpt
self.res34_model_ckpt = res34_model_ckpt
self.b1_model_ckpt = b1_model_ckpt
self.b1long_model_ckpt = b1long_model_ckpt
self.b1short_model_ckpt = b1short_model_ckpt
self.b0_model_ckpt = b0_model_ckpt
self.frame_nums = frame_nums
self.cuda = cuda
self.detect_record = {}
self.init_model()
def init_model(self):
self.face_det_model = init_face_detecor()
self.face_cls_model = init_slow_fast_model(
self.cls_model_ckpt, self.cuda)
self.face_cls_model2 = init_slow_fast_model(
self.cls_model2_ckpt, self.cuda)
self.face_cls_model3 = init_slow_fast_model(
self.cls_model3_ckpt, self.cuda)
self.face_cls_model4 = init_slow_fast_model(
self.cls_model4_ckpt, self.cuda)
self.xcp_cls_model = init_xception_cls_model(
self.xcp_model_ckpt, self.cuda)
self.b3_cls_model = init_b3_cls_model(self.b3_model_ckpt, self.cuda)
self.res34_cls_model = init_res34_cls_model(
self.res34_model_ckpt, self.cuda)
self.b1_cls_model = init_b1_cls_model(self.b1_model_ckpt, self.cuda)
self.b1long_cls_model = init_b1_cls_model(
self.b1long_model_ckpt, self.cuda)
self.b1short_cls_model = init_b1_cls_model(
self.b1short_model_ckpt, self.cuda)
self.b0_cls_model = init_b0_cls_model(self.b0_model_ckpt, self.cuda)
def inference(self, video_pth):
post_func = nn.Softmax(dim=1)
# init_begin = time.time()
# self.init_model()
# init_end = time.time()
# print('init model time:', init_end - init_begin)
# submission = pd.read_csv("./sample_submission.csv", dtype='unicode')
score = 0.5
try:
# print(video_pth)
if video_pth.split('.')[-1] != 'mp4':
return score
# extract image
# print(video_pth)
reader = cv2.VideoCapture(video_pth)
video_cnt = reader.get(cv2.CAP_PROP_FRAME_COUNT)
interval = max(1, math.ceil(video_cnt / self.frame_nums))
# print('video_cnt:', video_cnt, 'interval:', interval)
count, test_count = 0, 0
success = True
img_list = []
while success:
if count % interval == 0:
success, image = reader.read()
if success:
img_list.append(image)
else:
success = reader.grab()
count += 1
reader.release()
with torch.no_grad():
score = predict_batch(img_list, self.face_cls_model, self.face_cls_model2, self.face_cls_model3,
self.xcp_cls_model, self.b3_cls_model, self.res34_cls_model, self.b1_cls_model,
self.b1long_cls_model, self.b1short_cls_model, self.b0_cls_model,
self.face_cls_model4, post_func, self.detect_record)
except Exception as e:
print(e)
score = -1
# print('score:', score)
if score < 0 or score > 1:
score = 0.5
return score
| 1.734375
| 2
|
gists/python-threading-example/threading-example.py
|
Senzing/knowledge-base
| 1
|
12778831
|
#! /usr/bin/env python3
import random
import threading
import time
class TestThread(threading.Thread):
def run(self):
for loop_number in range(10):
print("{0} Loop: {1}".format(self.name, loop_number))
time.sleep(random.randint(1, 5))
# Construct threads.
threads = []
for thread_number in range(5):
thread = TestThread()
thread.name = "thread-{0}".format(thread_number)
threads.append(thread)
# Start threads.
for thread in threads:
thread.start()
# Wait for threads to stop.
for thread in threads:
thread.join()
| 3.65625
| 4
|
ytelapi/controllers/conference_controller.py
|
Ytel-Inc/YtelAPI-Python
| 0
|
12778832
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
from .base_controller import BaseController
from ..api_helper import APIHelper
from ..configuration import Configuration
from ..http.auth.basic_auth import BasicAuth
class ConferenceController(BaseController):
"""A Controller to access Endpoints in the ytelapi API."""
def create_list_conferences(self,
page=None,
pagesize=None,
friendly_name=None,
date_created=None):
"""Does a POST request to /conferences/listconference.json.
Retrieve a list of conference objects.
Args:
page (int, optional): The page count to retrieve from the total
results in the collection. Page indexing starts at 1.
pagesize (int, optional): Number of individual resources listed in
the response per page
friendly_name (string, optional): Only return conferences with the
specified FriendlyName
date_created (string, optional): Conference created date
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/listconference.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'page': page,
'pagesize': pagesize,
'FriendlyName': friendly_name,
'DateCreated': date_created
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_hangup_participant(self,
participant_sid,
conference_sid):
"""Does a POST request to /conferences/hangupParticipant.json.
Remove a participant from a conference.
Args:
participant_sid (string): The unique identifier for a participant
object.
conference_sid (string): The unique identifier for a conference
object.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/hangupParticipant.json'
_query_parameters = {
'ParticipantSid': participant_sid
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'ConferenceSid': conference_sid
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_play_audio(self,
conference_sid,
participant_sid,
audio_url):
"""Does a POST request to /conferences/playAudio.json.
Play an audio file during a conference.
Args:
conference_sid (string): The unique identifier for a conference
object.
participant_sid (string): The unique identifier for a participant
object.
audio_url (AudioUrlEnum): The URL for the audio file that is to be
played during the conference. Multiple audio files can be
chained by using a semicolon.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/playAudio.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'ConferenceSid': conference_sid,
'ParticipantSid': participant_sid,
'AudioUrl': audio_url
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_list_participants(self,
conference_sid,
page=None,
pagesize=None,
muted=None,
deaf=None):
"""Does a POST request to /conferences/listParticipant.json.
Retrieve a list of participants for an in-progress conference.
Args:
conference_sid (string): The unique identifier for a conference.
page (int, optional): The page count to retrieve from the total
results in the collection. Page indexing starts at 1.
pagesize (int, optional): The count of objects to return per
page.
muted (bool, optional): Specifies if participant should be muted.
deaf (bool, optional): Specifies if the participant should hear
audio in the conference.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/listParticipant.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'ConferenceSid': conference_sid,
'Page': page,
'Pagesize': pagesize,
'Muted': muted,
'Deaf': deaf
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_conference(self,
url,
mfrom,
to,
method=None,
status_call_back_url=None,
status_call_back_method=None,
fallback_url=None,
fallback_method=None,
record=None,
record_call_back_url=None,
record_call_back_method=None,
schedule_time=None,
timeout=None):
"""Does a POST request to /conferences/createConference.json.
Here you can experiment with initiating a conference call through Ytel
and view the request response generated when doing so.
Args:
url (string): URL requested once the conference connects
mfrom (string): A valid 10-digit number (E.164 format) that will
be initiating the conference call.
to (string): A valid 10-digit number (E.164 format) that is to
receive the conference call.
method (string, optional): Specifies the HTTP method used to
request the required URL once call connects.
status_call_back_url (string, optional): URL that can be requested
to receive notification when call has ended. A set of default
parameters will be sent here once the conference is finished.
status_call_back_method (string, optional): Specifies the HTTP
methodlinkclass used to request StatusCallbackUrl.
fallback_url (string, optional): URL requested if the initial Url
parameter fails or encounters an error
fallback_method (string, optional): Specifies the HTTP method used
to request the required FallbackUrl once call connects.
record (bool, optional): Specifies if the conference should be
recorded.
record_call_back_url (string, optional): Recording parameters will
be sent here upon completion.
record_call_back_method (string, optional): Specifies the HTTP
method used to request the required URL once conference
connects.
schedule_time (string, optional): Schedule conference in future.
Schedule time must be greater than current time
timeout (int, optional): The number of seconds the call stays on
the line while waiting for an answer. The max time limit is
999 and the default limit is 60 seconds but lower times can be
set.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/createConference.json'
_query_parameters = {
'Url': url
}
_query_builder = APIHelper.append_url_with_query_parameters(_query_builder,
_query_parameters, Configuration.array_serialization)
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'From': mfrom,
'To': to,
'Method': method,
'StatusCallBackUrl': status_call_back_url,
'StatusCallBackMethod': status_call_back_method,
'FallbackUrl': fallback_url,
'FallbackMethod': fallback_method,
'Record': record,
'RecordCallBackUrl': record_call_back_url,
'RecordCallBackMethod': record_call_back_method,
'ScheduleTime': schedule_time,
'Timeout': timeout
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_view_participant(self,
conference_sid,
participant_sid):
"""Does a POST request to /conferences/viewParticipant.json.
Retrieve information about a participant by its ParticipantSid.
Args:
conference_sid (string): The unique identifier for a conference
object.
participant_sid (string): The unique identifier for a participant
object.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/viewParticipant.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'ConferenceSid': conference_sid,
'ParticipantSid': participant_sid
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_view_conference(self,
conference_sid):
"""Does a POST request to /conferences/viewconference.json.
Retrieve information about a conference by its ConferenceSid.
Args:
conference_sid (string): The unique identifier of each conference
resource
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/viewconference.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'ConferenceSid': conference_sid
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def add_participant(self,
conference_sid,
participant_number,
muted=None,
deaf=None):
"""Does a POST request to /conferences/addParticipant.json.
Add Participant in conference
Args:
conference_sid (string): The unique identifier for a conference
object.
participant_number (string): The phone number of the participant
to be added.
muted (bool, optional): Specifies if participant should be muted.
deaf (bool, optional): Specifies if the participant should hear
audio in the conference.
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/addParticipant.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'ConferenceSid': conference_sid,
'ParticipantNumber': participant_number,
'Muted': muted,
'Deaf': deaf
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
def create_silence_participant(self,
conference_sid,
participant_sid,
muted=None,
deaf=None):
"""Does a POST request to /conferences/deafMuteParticipant.json.
Deaf Mute Participant
Args:
conference_sid (string): ID of the active conference
participant_sid (string): ID of an active participant
muted (bool, optional): Mute a participant
deaf (bool, optional): Make it so a participant cant hear
Returns:
string: Response from the API.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# Prepare query URL
_query_builder = Configuration.base_uri
_query_builder += '/conferences/deafMuteParticipant.json'
_query_url = APIHelper.clean_url(_query_builder)
# Prepare form parameters
_form_parameters = {
'conferenceSid': conference_sid,
'ParticipantSid': participant_sid,
'Muted': muted,
'Deaf': deaf
}
# Prepare and execute request
_request = self.http_client.post(_query_url, parameters=_form_parameters)
BasicAuth.apply(_request)
_context = self.execute_request(_request)
self.validate_response(_context)
# Return appropriate type
return _context.response.raw_body
| 2.515625
| 3
|
app/tasks.py
|
tristan-c/FancyTest
| 0
|
12778833
|
from carotte import Carotte
from app.connector import *
my_app = Carotte()
@my_app.task
def refreshYoutube(author=None):
youtube = youtubeConnector(username="")
log = youtube.check()
return log
| 1.773438
| 2
|
sphinx/util/stemmer/__init__.py
|
danieleades/sphinx
| 0
|
12778834
|
"""Word stemming utilities for Sphinx."""
import warnings
import snowballstemmer
from sphinx.deprecation import RemovedInSphinx70Warning
class PorterStemmer:
def __init__(self):
warnings.warn(f"{self.__class__.__name__} is deprecated, use "
"snowballstemmer.stemmer('porter') instead.",
RemovedInSphinx70Warning, stacklevel=2)
self.stemmer = snowballstemmer.stemmer('porter')
def stem(self, p: str, i: int, j: int) -> str:
warnings.warn(f"{self.__class__.__name__}.stem() is deprecated, use "
"snowballstemmer.stemmer('porter').stemWord() instead.",
RemovedInSphinx70Warning, stacklevel=2)
return self.stemmer.stemWord(p)
class BaseStemmer:
def __init__(self):
warnings.warn(f"{self.__class__.__name__} is deprecated, use "
"snowballstemmer.stemmer('porter') instead.",
RemovedInSphinx70Warning, stacklevel=3)
def stem(self, word: str) -> str:
raise NotImplementedError
class PyStemmer(BaseStemmer):
def __init__(self): # NoQA
super().__init__()
self.stemmer = snowballstemmer.stemmer('porter')
def stem(self, word: str) -> str:
warnings.warn(f"{self.__class__.__name__}.stem() is deprecated, use "
"snowballstemmer.stemmer('porter').stemWord() instead.",
RemovedInSphinx70Warning, stacklevel=2)
return self.stemmer.stemWord(word)
class StandardStemmer(BaseStemmer):
def __init__(self): # NoQA
super().__init__()
self.stemmer = snowballstemmer.stemmer('porter')
def stem(self, word: str) -> str:
warnings.warn(f"{self.__class__.__name__}.stem() is deprecated, use "
"snowballstemmer.stemmer('porter').stemWord() instead.",
RemovedInSphinx70Warning, stacklevel=2)
return self.stemmer.stemWord(word)
def get_stemmer() -> BaseStemmer:
warnings.warn("get_stemmer() is deprecated, use "
"snowballstemmer.stemmer('porter') instead.",
RemovedInSphinx70Warning, stacklevel=2)
return PyStemmer()
| 2.609375
| 3
|
test/00_unit_test/TestHectareVhdlGen.py
|
MicroTCA-Tech-Lab/hectare
| 5
|
12778835
|
#! /usr/bin/env python3
"""
Copyright (c) 2020 Deutsches Elektronen-Synchrotron DESY
See LICENSE.txt for license details.
"""
import enum
import sys
import unittest
from systemrdl.rdltypes import AccessType
from hectare._hectare_types import Field, Register
from hectare._HectareVhdlGen import HectareVhdlGen
class TestHectareVhdlGen(unittest.TestCase):
DATA_W_BYTES = 4
def test_single_addr(self):
reg = Register("myreg", 8)
s = HectareVhdlGen._gen_single_addr(reg, self.DATA_W_BYTES)
self.assertEqual(s, "constant C_ADDR_MYREG : integer := 2;")
def test_single_field_range(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_field_range("myreg", field)
self.assertEqual(l[0], "constant C_FIELD_MYREG_MYFIELD_MSB : integer := 15;")
self.assertEqual(l[1], "constant C_FIELD_MYREG_MYFIELD_LSB : integer := 8;")
def test_gen_single_reg(self):
reg = Register("myreg", 8)
s = HectareVhdlGen._gen_single_reg(reg, self.DATA_W_BYTES)
self.assertEqual(s, "signal reg_myreg : std_logic_vector(32-1 downto 0);")
def test_gen_single_port(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_port("myreg", field)
self.assertEqual(l[0], "myreg_myfield_o : out std_logic_vector(7 downto 0);")
self.assertEqual(l[1], "myreg_myfield_i : in std_logic_vector(7 downto 0);")
def test_gen_single_port_onebit(self):
field = Field("myfield", 8, 8, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_port("myreg", field)
self.assertEqual(l[0], "myreg_myfield_o : out std_logic;")
self.assertEqual(l[1], "myreg_myfield_i : in std_logic;")
def test_gen_single_port_swmod(self):
field = Field("myfield", 8, 15, AccessType.r, AccessType.rw, swmod=True)
l = HectareVhdlGen._gen_single_port("myreg", field)
self.assertEqual(len(l), 2, "expect to generate _o and _swmod ports")
self.assertEqual(l[0], "myreg_myfield_o : out std_logic_vector(7 downto 0);")
self.assertEqual(l[1], "myreg_myfield_swmod : out std_logic;")
def test_gen_single_hw_access_reg(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(l[0], "myreg_myfield_o <= reg_myreg(15 downto 8);")
self.assertEqual(
l[1], "reg_myreg(15 downto 8) <= myreg_myfield_i when rising_edge(clk);"
)
def test_gen_single_hw_access_no_reg(self):
field = Field("myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=False)
self.assertEqual(l[0], "myreg_myfield_o <= reg_myreg(15 downto 8);")
self.assertEqual(l[1], "reg_myreg(15 downto 8) <= myreg_myfield_i;")
def test_gen_single_hw_access_no_reg_onebit(self):
field = Field("myfield", 8, 8, AccessType.rw, AccessType.rw, swmod=False)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(l[0], "myreg_myfield_o <= reg_myreg(8);")
self.assertEqual(l[1], "reg_myreg(8) <= myreg_myfield_i when rising_edge(clk);")
def test_gen_single_hw_access_enum_out(self):
class ColorSel(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
field = Field(
"myfield", 0, 2, AccessType.r, AccessType.rw, swmod=False, encode=ColorSel
)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(
l[0],
"myreg_myfield_o <= ColorSel_t'val(to_integer(unsigned(reg_myreg(2 downto 0))));",
)
def test_gen_single_hw_access_enum_in(self):
class ColorSel(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
field = Field(
"myfield", 0, 2, AccessType.w, AccessType.rw, swmod=False, encode=ColorSel
)
l = HectareVhdlGen._gen_single_hw_access("myreg", field, in_reg=True)
self.assertEqual(
l[0],
"reg_myreg(2 downto 0) <= std_logic_vector(to_unsigned(ColorSel_t'pos(myreg_myfield_i), 3)) when rising_edge(clk);",
)
def test_gen_single_reg_swmod_no_swmod(self):
reg = Register("myreg", 0)
reg.fields.append(
Field("myfield1", 0, 7, AccessType.rw, AccessType.rw, swmod=False)
)
reg.fields.append(
Field("myfield2", 8, 15, AccessType.rw, AccessType.rw, swmod=False)
)
swmod_reg = HectareVhdlGen._gen_single_reg_swmod(reg, self.DATA_W_BYTES)
self.assertIsNone(
swmod_reg, "if none of the fields has swmod, no swmod reg is generated"
)
def test_gen_single_reg_swmod_with_swmod(self):
reg = Register("myreg", 0)
reg.fields.append(
Field("myfield1", 0, 7, AccessType.rw, AccessType.rw, swmod=False)
)
reg.fields.append(
Field("myfield2", 8, 15, AccessType.rw, AccessType.rw, swmod=True)
)
swmod_reg = HectareVhdlGen._gen_single_reg_swmod(reg, self.DATA_W_BYTES)
self.assertEqual(
swmod_reg,
"signal reg_myreg_swmod : std_logic;",
"if at least one reg has swmod attribute set, reg is generated",
)
def test_gen_single_enum_type(self):
class ColorSel(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
field = Field(
"myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False, encode=ColorSel
)
lines = HectareVhdlGen._gen_single_enum_type(field)
self.assertEqual(
len(lines),
1 + 3 + 1,
"one line per each item, declaration and closing bracket",
)
self.assertTrue("RED" in lines[1])
self.assertTrue("GREEN" in lines[2])
self.assertTrue("BLUE" in lines[3])
def test_gen_single_enum_type_invalid(self):
""" generates un-supported encoding (inc != 1) and checks if generator raises expection """
class ColorSelInvalid(enum.Enum):
RED = 0
GREEN = 1
BLUE = 10 # <- this is not supported
field = Field(
"myfield",
8,
15,
AccessType.rw,
AccessType.rw,
swmod=False,
encode=ColorSelInvalid,
)
self.assertRaises(AssertionError, HectareVhdlGen._gen_single_enum_type, field)
def test_gen_single_reset_assignment(self):
RESET_VAL = 0x12
field = Field(
"myfield", 8, 15, AccessType.rw, AccessType.rw, swmod=False, reset=RESET_VAL
)
line = HectareVhdlGen._gen_single_reset_assignment("myreg", field)
assign_val = line.split("<=")[1].strip().replace(";", "")
self.assertEqual(assign_val, '"{0:08b}"'.format(RESET_VAL), "reset value")
self.assertEqual(len(assign_val), 8+2, "assign value must be of same size as the field")
if __name__ == "__main__":
unittest.main()
| 2.4375
| 2
|
tests/test_rfc9092.py
|
CBonnell/pyasn1-alt-modules
| 2
|
12778836
|
#
# This file is part of pyasn1-alt-modules software.
#
# Created by <NAME>
# Copyright (c) 2020-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_alt_modules import pem
from pyasn1_alt_modules import rfc5652
from pyasn1_alt_modules import rfc9092
class GeofeedCSVTestCase(unittest.TestCase):
pem_text = """\
<KEY>
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
assert asn1Object['contentType'] == rfc5652.id_signedData
sd, rest = der_decoder(asn1Object['content'],
asn1Spec=rfc5652.SignedData())
self.assertFalse(rest)
self.assertTrue(sd.prettyPrint())
self.assertEqual(asn1Object['content'], der_encoder(sd))
found = False
for sa in sd['signerInfos'][0]['signedAttrs']:
if sa['attrType'] == rfc5652.id_contentType:
ct, rest = der_decoder(sa['attrValues'][0],
asn1Spec=rfc5652.ContentType())
self.assertFalse(rest)
self.assertTrue(ct.prettyPrint())
self.assertEqual(sa['attrValues'][0], der_encoder(ct))
self.assertEqual(rfc9092.id_ct_geofeedCSVwithCRLF, ct)
found = True
assert found
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.140625
| 2
|
cogs/fire.py
|
NieR1711/Fire
| 0
|
12778837
|
"""
MIT License
Copyright (c) 2019 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from jishaku.paginators import PaginatorInterface, PaginatorEmbedInterface, WrappedPaginator
from fire.converters import Member
from discord.ext import commands
from aiotrello import Trello
from typing import Union
import discord
import datetime
import os
import platform
import json
import time
import psutil
import asyncio
import traceback
import humanfriendly
import inspect
import textwrap
import io
import copy
import aiohttp
import subprocess
import random
launchtime = datetime.datetime.utcnow()
process = psutil.Process(os.getpid())
print("fire.py has been loaded")
def config(path: str = None):
with open('config.json', 'r') as cfg:
config = json.load(cfg)
if path != None:
return config[path]
else:
return config
config = config()
def isadmin(ctx):
"""Checks if the author is an admin"""
if str(ctx.author.id) not in config('admins'):
admin = False
else:
admin = True
return admin
class firecog(commands.Cog, name="Main Commands"):
def __init__(self, bot):
self.bot = bot
self.trello = Trello(key=config['trellokey'], token=config['trellotoken'])
self.launchtime = launchtime
self._last_result = None
def cleanup_code(self, content):
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
return content.strip('` \n')
@commands.command(name="invite")
async def inviteme(self, ctx):
return await ctx.send("https://gaminggeek.dev/fire")
@commands.command(description="Shows you my ping to discord's servers")
async def ping(self, ctx):
"""PFXping"""
latency = round(self.bot.latency * 1000)
start = round(time.time()*1000)
msg = await ctx.send(content="Pinging...")
end = round(time.time()*1000)
elapsed = round(end - start)
color = ctx.author.color
embed = discord.Embed(title=f":ping_pong: {elapsed}ms.\n:heartpulse: {latency}ms.", colour=color, timestamp=datetime.datetime.utcnow())
await msg.edit(content="`Pong!`", embed=embed)
@commands.command(description="Suggest a feature")
@commands.cooldown(1, 300, commands.BucketType.user)
async def suggest(self, ctx, *, suggestion: str):
"""PFXsuggest <suggestion>"""
if suggestion == None:
await ctx.send("You can't suggest nothing!")
else:
board = await self.trello.get_board(lambda b: b.name == "Fire")
suggestions = await board.get_list(lambda l: l.name == "Suggestions")
card = await suggestions.create_card(suggestion, f"Suggested by {ctx.author.name} ({ctx.author.id})")
now = datetime.datetime.utcnow().strftime('%d/%m/%Y @ %I:%M:%S %p')
await card.add_comment(f"Suggested in channel {ctx.channel.name} ({ctx.channel.id}) in guild {ctx.guild.name} ({ctx.guild.id}) at {now} UTC")
await ctx.send(f"Thanks! Your suggestion was added to the Trello @ <{card.url}>. Any abuse will lead to being blacklisted from Fire!")
@commands.command(description="Shows you some stats about me.", aliases=['about'])
async def stats(self, ctx):
"""PFXstats"""
msg = await ctx.send('Gathering info...')
delta_uptime = datetime.datetime.utcnow() - launchtime
hours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)
minutes, seconds = divmod(remainder, 60)
days, hours = divmod(hours, 24)
uptime = f"{days}d, {hours}h, {minutes}m, {seconds}s"
cpustats = psutil.cpu_percent()
ramuse = (process.memory_info().rss / 1024) / 1000
online = 0
idle = 0
dnd = 0
offline = 0
streaming = 0
members = self.bot.get_all_members()
for member in members:
if str(member.status) == 'online':
online = online + 1
if str(member.status) == 'idle':
idle = idle + 1
if str(member.status) == 'dnd':
dnd = dnd + 1
if str(member.status) == 'offline':
offline = offline + 1
try:
activity = member.activities[0]
if isinstance(member.activities[0], discord.activity.Streaming):
streaming = streaming + 1
except Exception:
pass
users = online + idle + dnd + offline
embed = discord.Embed(colour=ctx.author.color, timestamp=datetime.datetime.utcnow())
ownerboi = self.bot.get_user(287698408855044097)
embed.set_author(name=f"Bot made by {ownerboi}", url="https://gaminggeek.dev", icon_url=str(ownerboi.avatar_url))
embed.add_field(name="Runtime", value=f"{uptime}", inline=False)
embed.add_field(name="CPU", value=f"{round(cpustats)}%", inline=False)
embed.add_field(name="RAM", value=f"{ramuse} MB", inline=False)
embed.add_field(name="Version Info", value=f"discord.py {discord.__version__} | Python: 3.7.4", inline=False)
embed.add_field(name="Guilds", value=f"{len(self.bot.guilds)}", inline=True)
embed.add_field(name="Prefix", value=f"{ctx.prefix}", inline=True)
embed.add_field(name="Commands", value=len(self.bot.commands), inline=True)
embed.add_field(name="Members", value=f"{self.bot.get_emoji(313956277808005120)} {online:,d}\n{self.bot.get_emoji(313956277220802560)} {idle:,d}\n{self.bot.get_emoji(313956276893646850)} {dnd:,d}\n{self.bot.get_emoji(313956277132853248)} {streaming:,d}\n{self.bot.get_emoji(313956277237710868)} {offline:,d}\nTotal: {users:,d}\n ", inline=False)
await msg.edit(content=None, embed=embed)
@commands.command(description="Shows you all the guilds I'm in.")
async def listguilds(self, ctx):
"""PFXlistguilds"""
if not isadmin(ctx):
return
paginator = WrappedPaginator(prefix='```vbs', suffix='```', max_size=1500)
gcount = 1
for guild in self.bot.guilds:
if guild == ctx.guild:
current = ' (HERE)'
else:
current = ''
#paginator.add_line(f'[{gcount}] {guild.name}{current} || {guild.owner} || {guild.member_count} Members')
paginator.add_line(f'[{gcount}] {guild.name}{current} || {guild.owner} || {guild.member_count} Members')
gcount = gcount + 1
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
await interface.send_to(ctx)
@commands.command(name='rpc', description='View someone\'s rich presence')
async def rpc(self, ctx, *, member: Member = None, MSG: discord.Message = None, ACT: int = 0):
"""PFXrpc [<member>]"""
if not member:
member = ctx.author
if ACT == -1:
return
try:
activity = member.activities[ACT]
except IndexError:
if ACT != 0:
return
activity = None
embed = None
if activity != None:
if activity.name == 'Spotify':
adict = activity.to_dict()
embed = discord.Embed(color=activity.color, timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'{member}\'s Spotify Info', icon_url='https://cdn.discordapp.com/emojis/471412444716072960.png')
embed.add_field(name='Song', value=activity.title, inline=False)
embed.add_field(name='Artists', value=', '.join(activity.artists), inline=False)
embed.add_field(name='Album', value=activity.album, inline=False)
duration = humanfriendly.format_timespan(activity.duration)
now = datetime.datetime.utcnow()
elapsed = humanfriendly.format_timespan(now - activity.start)
left = humanfriendly.format_timespan(activity.end - now)
if 'day' in left:
left = '0:00:00'
embed.add_field(name='Times', value=f'Duration: {duration}\nElapsed: {elapsed}\nLeft: {left}', inline=False)
embed.add_field(name='Listen to this track', value=f'[{activity.title}](https://open.spotify.com/track/{activity.track_id})', inline=False)
embed.set_thumbnail(url=activity.album_cover_url)
elif type(activity) == discord.Streaming:
embed = discord.Embed(color=discord.Color.purple(), timestamp=datetime.datetime.utcnow())
embed.set_author(name=f'{member}\'s Stream Info', icon_url='https://cdn.discordapp.com/emojis/603188557242433539.png')
if member.bot:
embed.add_field(name='Title', value=activity.name, inline=False)
else:
embed.add_field(name='Title', value=activity.name, inline=False)
embed.add_field(name='Twitch Name', value=activity.twitch_name, inline=False)
if activity.details != None:
embed.add_field(name='Game', value=activity.details, inline=False)
embed.add_field(name='URL', value=f'[{activity.twitch_name}]({activity.url})', inline=False)
elif type(activity) == discord.Activity:
embed = discord.Embed(color=member.color, timestamp=datetime.datetime.utcnow())
if activity.small_image_url != None:
embed.set_author(name=f'{member}\'s Game Info', icon_url=activity.small_image_url)
else:
embed.set_author(name=f'{member}\'s Game Info')
embed.add_field(name='Game', value=activity.name, inline=False)
now = datetime.datetime.utcnow()
elapsed = None
if activity.start:
elapsed = humanfriendly.format_timespan(now - activity.start)
if activity.details != None and activity.state != None and elapsed != None:
embed.add_field(name='Details', value=f'{activity.details}\n{activity.state}\n{elapsed} elapsed', inline=False)
elif activity.state != None and elapsed != None:
embed.add_field(name='Details', value=f'{activity.state}\n{elapsed} elapsed', inline=False)
elif activity.details != None and elapsed != None:
embed.add_field(name='Details', value=f'{activity.details}\n{elapsed} elapsed', inline=False)
elif activity.details != None and activity.state !=None and elapsed == None:
embed.add_field(name='Details', value=f'{activity.details}\n{activity.state}', inline=False)
elif activity.state != None and elapsed == None:
embed.add_field(name='Details', value=f'{activity.state}', inline=False)
elif activity.details != None and elapsed == None:
embed.add_field(name='Details', value=f'{activity.details}', inline=False)
if activity.large_image_url != None:
embed.set_thumbnail(url=activity.large_image_url)
else:
pass
if embed:
if MSG:
await MSG.edit(embed=embed)
def react_check(reaction, user):
return user.id == ctx.author.id
try:
reaction, user = await self.bot.wait_for('reaction_add', check=react_check, timeout=120)
except asyncio.TimeoutError:
return
if reaction.emoji == '⏹':
await MSG.delete()
elif reaction.emoji == '◀':
await MSG.remove_reaction('◀', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT-1)
elif reaction.emoji == '▶':
await MSG.remove_reaction('▶', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT+1)
else:
MSG = await ctx.send(embed=embed)
await MSG.add_reaction('⏹')
await MSG.add_reaction('◀')
await MSG.add_reaction('▶')
def react_check(reaction, user):
return user.id == ctx.author.id
try:
reaction, user = await self.bot.wait_for('reaction_add', check=react_check, timeout=120)
except asyncio.TimeoutError:
return
if reaction.emoji == '⏹':
await MSG.delete()
elif reaction.emoji == '◀':
await MSG.remove_reaction('◀', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT-1)
elif reaction.emoji == '▶':
await MSG.remove_reaction('▶', ctx.author)
await ctx.invoke(self.bot.get_command('rpc'), member=member, MSG=MSG, ACT=ACT+1)
else:
await ctx.send(f'{discord.utils.escape_mentions(discord.utils.escape_markdown(str(member)))} doesn\'t seem to be playing something with rich presence integration...')
else:
await ctx.send(f'{discord.utils.escape_mentions(discord.utils.escape_markdown(str(member)))} doesn\'t seem to be playing something with rich presence integration...')
@commands.command(description="dab")
async def dab(self, ctx):
"""PFXdab"""
await ctx.send(f"{ctx.message.author.mention}, <o/")
@commands.command(description="idk")
async def warm(self, ctx, *, warm: str):
"""PFXwarm <item>"""
await ctx.send(f'🔥 Warming up {discord.utils.escape_mentions(discord.utils.escape_markdown(warm))}')
@commands.command(description='Cow goes moo')
async def cowsay(self, ctx, *, cow: str):
"""PFXcowsay <text>"""
async with aiohttp.ClientSession() as session:
async with session.get(f'http://cowsay.morecode.org/say?message={cow}&format=json') as resp:
body = await resp.json()
cow = body['cow']
cow = discord.utils.escape_mentions(cow).replace('`', '')
await ctx.send(f'```{cow}```')
@commands.command(description='ascii text')
async def ascii(self, ctx, *, text: str):
"""PFXascii <text>"""
textsplit = text.split(' ')
text = '+'.join(textsplit)
async with aiohttp.ClientSession() as session:
async with session.get(f'http://artii.herokuapp.com/make?text={text}') as resp:
body = await resp.text()
try:
asciimsg = discord.utils.escape_mentions(body).replace('`', '')
await ctx.send(f'```{asciimsg}```')
except discord.HTTPException as e:
e = str(e)
if 'Must be 2000 or fewer in length.' in e:
return await ctx.send('That message is too long. Try a shorter one!')
@commands.command(name='👏', aliases=['clap'], description='Emphasize your message with claps')
async def clap(self, ctx, *, clappyboi: str = 'You need to provide a message for me to emphasize'):
'''PFXclap <message>'''
message = discord.utils.escape_mentions(clappyboi)
message = message.split(' ')
message = ' 👏 '.join(message)
await ctx.send(message + ' 👏')
@commands.command(name="8ball")
async def eightball(self, ctx, *, q: str = None):
if not q:
return await ctx.send(f'<a:fireFailed:603214400748257302> You need to ask a question!')
possible = ["It is certain.", "It is decidedly so.", "Without a doubt.", "Yes - definitely.", "You may rely on it.", "As I see it, yes.", "Most likely.", "Outlook good.", "Yes.", "Signs point to yes.",
"Reply hazy, try again.", "Ask again later.", "Better not tell you now.", "Cannot predict now.", "Concentrate and ask again.",
"Don't count on it.", "My reply is no.", "My sources say no.", "Outlook not so good.", "Very doubtful."]
answer = random.choice(possible)
await ctx.send(answer)
def setup(bot):
bot.add_cog(firecog(bot))
| 1.609375
| 2
|
test_sql.py
|
konstdimasik/python_code
| 0
|
12778838
|
<filename>test_sql.py<gh_stars>0
import sqlite3
from random import randint
global db
global sql
db = sqlite3.connect('test_server.db')
sql = db.cursor()
sql.execute("""CREATE TABLE IF NOT EXISTS users (
id SERIAL PRIMARY KEY,
login TEXT,
password TEXT,
cash BIGINT
)""")
db.commit()
def reg():
user_login = input('Login: ')
user_password = input('Password: ')
sql.execute(f"SELECT login FROM users WHERE login = '{user_login}'")
if sql.fetchone() is None:
sql.execute(f"INSERT INTO users VALUES (?, ?, ?)", (user_login, user_password, 0))
db.commit()
print('Зарегистрировано!')
else:
print('Такой логин уже есть')
for value in sql.execute("SELECT * FROM users"):
print(value)
def casino():
user_login = input('Log in: ')
number = randint(1, 2)
sql.execute(f"SELECT login FROM users WHERE login = '{user_login}'")
if sql.fetchone() is None:
print("Такого логина не существует. Зарегистрируйтесь")
reg()
else:
if number == 1:
sql.execute(f'UPDATE users SET cash = {1000} WHERE login = "{user_login}"')
db.commit()
else:
print('Вы проиграли!')
def enter():
for i in sql.execute('SELECT * FROM users'):
print(i)
def main():
casino()
enter()
main()
| 3.46875
| 3
|
openspeech/modules/add_normalization.py
|
CanYouImagine/openspeech
| 207
|
12778839
|
<filename>openspeech/modules/add_normalization.py
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
class AddNorm(nn.Module):
"""
Add & Normalization layer proposed in "Attention Is All You Need".
Transformer employ a residual connection around each of the two sub-layers,
(Multi-Head Attention & Feed-Forward) followed by layer normalization.
"""
def __init__(self, sublayer: nn.Module, d_model: int = 512) -> None:
super(AddNorm, self).__init__()
self.sublayer = sublayer
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, *args):
residual = args[0]
outputs = self.sublayer(*args)
if isinstance(outputs, tuple):
return self.layer_norm(outputs[0] + residual), outputs[1]
return self.layer_norm(outputs + residual)
| 2.140625
| 2
|
find_missing_files_in_sequence.py
|
neilrjones/DevOps-Python-tools
| 1
|
12778840
|
#!/usr/bin/env python
# coding=utf-8
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2020-07-31 11:03:17 +0100 (Fri, 31 Jul 2020)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Finds missing files by numeric sequence, assuming a uniformly numbered file naming convention across files
Files / directories are given as arguments or via standard input
Directories are recursed and their files examined for missing numbers before each one
Only supply files / directories that should be sharing a contiguously numbered file naming convention in each
single run of this tool
Accounts for zero padding in numbered files
Caveats:
- This is more complicated than you'd first think as there are so many file naming variations that no code could ever
be universally bulletproof and will likely require advanced regex tuning to match your use case and naming convention
- Won't detect missing files higher than the highest numbered file as there is no way to know how many there should be.
If you are looking for missing MP3 files, then you might be able to check the mp3 tag metadata using programs like
'mediainfo' to get the total number of tracks and see if the files go that high
- Returns globs by default instead of explicit missing filenames since suffixes can vary after numbers. If you have a
simple enough use case with a single fixed filename convention such as 'blah_01.txt' then you can find code to print
the missing files more explicitly, but in the general case you cannot account for suffix naming that isn't consistent,
such as chapters of audiobooks eg.
'blah 01 - chapter about X.mp3'
'blah 02 - chapter about Y.mp3'
so in the general case you cannot always infer suffixes, hence why it is left as globs. If you are sure that the
suffixes don't change then you can specify --fixed-suffix and it will infer each file's suffix as the basis for any
numerically missing files in the sequence, but if used where this is not the case, it'll generate a lot of false
positives that the default globbing mode would have handled
- Doesn't currently find entire missing CD / disks in the naming format, but you should be able to see those cases
easily by eye
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
#import logging
import os
import re
import sys
import traceback
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
from harisekhon.utils import log, log_option, validate_regex, isInt, UnknownError
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.3.2'
# pylint: disable=too-many-instance-attributes
class FindMissingFiles(CLI):
def __init__(self):
# Python 2.x
super(FindMissingFiles, self).__init__()
# Python 3.x
# super().__init__()
self.paths = []
self.regex_default = r'(?<!dis[ck]\s)' + \
r'(?<!CD\s)' + \
r'(?<!-)' + \
r'(?<!-\d)' + \
r'(?<!-\d\d)' + \
r'(?<!0)' + \
r'(?<!\d\.)' + \
r'(?<!\.mp)' + \
r'(\d+)' + \
r'(?![\w,@-])' + \
r'(?!\.\d)'
self.exclude_default = r'^\d+\s'
self.regex = None
self.include = None
self.exclude = None
self.fixed_suffix = False
self.missing_files = []
def add_options(self):
super(FindMissingFiles, self).add_options()
self.add_opt('-r', '--regex', metavar='REGEX', default=self.regex_default,
help='Regex capture of the portion of the filename to compare ' + \
'- must have capture brackets capturing an integer ' + \
'(default: "{}" )'\
.format(self.regex_default))
self.add_opt('-i', '--include', metavar='REGEX',
help=r"Include only paths that match the given case-insensitive regex (eg. '\.mp3$')")
self.add_opt('-e', '--exclude', metavar='REGEX', default=self.exclude_default,
help='Exclude paths that match the given case-insensitive regex (default: "{}" )'\
.format(self.exclude_default))
self.add_opt('-s', '--fixed-suffix', action='store_true',
help='Assume fixed suffixes and infer explicit filenames rather than globs. The reason this ' + \
'is not the default is that if this is not the case and there is some variation in ' + \
'suffixes, such as with audiobook chapters, then you will hit a lot of false positives ' + \
'that would have been caught by globbing')
def process_options(self):
super(FindMissingFiles, self).process_options()
self.regex = self.get_opt('regex')
self.include = self.get_opt('include')
self.exclude = self.get_opt('exclude')
self.fixed_suffix = self.get_opt('fixed_suffix')
validate_regex(self.regex)
self.regex = re.compile('(.*?)' + self.regex + '(.*)', re.I)
if self.include is not None:
validate_regex(self.include)
self.include = re.compile(self.include, re.I)
if self.exclude is not None:
validate_regex(self.exclude)
self.exclude = re.compile(self.exclude, re.I)
if self.args:
self.paths = self.args
else:
self.paths = sys.stdin.readlines()
log_option('paths', self.paths)
def is_included(self, path):
if not self.include:
return True
if self.include.search(path):
log.debug("including path: %s", path)
return True
return False
def is_excluded(self, path):
if not self.exclude:
return False
if self.exclude.search(path):
log.debug("excluding path: %s", path)
return True
return False
def run(self):
for path in self.paths:
if self.is_excluded(path):
continue
if not self.is_included(path):
continue
if not os.path.exists(path):
raise UnknownError('path not found: {}'.format(path))
if os.path.isdir(path):
self.process_directory(directory=path)
elif os.path.isfile(path):
self.check_file(filename=path)
def process_directory(self, directory):
for root, dirs, files in os.walk(directory, topdown=True):
for filename in files:
file_path = os.path.join(root, filename)
if not self.is_included(file_path):
continue
if self.is_excluded(file_path):
continue
self.check_file(filename=file_path)
for dirname in dirs:
dir_path = os.path.join(root, dirname)
if not self.is_included(dir_path):
continue
if self.is_excluded(dir_path):
continue
# massive depth directories will hit a recursion limit here but this is very rare in the real world
# and probably a sign the filesystem should be better structured
self.process_directory(directory=dir_path)
def check_file(self, filename):
log.debug('checking file \'%s\'', filename)
match = self.regex.search(os.path.basename(filename))
if not match:
log.debug('no numeric regex match for file, probably not a sequential file' + \
', skipping \'%s\'', filename)
return
# will error out here if you've supplied your own regex without capture brackets
# or if you've got pre-captures - let this bubble to user to fix their regex
file_prefix = os.path.join(os.path.dirname(filename), match.group(1))
file_number = match.group(2)
file_suffix = match.group(3)
if not isInt(file_number):
raise UnknownError('regex captured non-float for filename: {}'.format(filename))
if file_prefix is None:
file_prefix = ''
if file_suffix is None:
file_suffix = ''
padding = len(file_number)
file_number = int(file_number)
while file_number > 1:
file_number = self.determine_missing_file_backfill(file_prefix, file_number, padding, file_suffix)
if self.missing_files:
print('\n'.join(reversed(self.missing_files)))
self.missing_files = []
def determine_missing_file_backfill(self, file_prefix, file_number, padding, file_suffix):
file_number -= 1
if self.fixed_suffix:
explicit_last_filename = '{}{:0>%(padding)s}{}' % {'padding': padding}
explicit_last_filename = explicit_last_filename.format(file_prefix, file_number, file_suffix)
if not os.path.isfile(explicit_last_filename):
self.missing_files.append(explicit_last_filename)
else:
file_number = -1
else:
expected_last_filename_glob = '{}{:0>%(padding)s}*' % locals()
expected_last_filename_glob = expected_last_filename_glob.format(file_prefix, file_number)
if not glob.glob(expected_last_filename_glob):
self.missing_files.append(expected_last_filename_glob)
else:
file_number = -1
return file_number
if __name__ == '__main__':
FindMissingFiles().main()
| 2.953125
| 3
|
start.py
|
aragaer/partner
| 1
|
12778841
|
#!/usr/bin/env python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GObject
from partner import PartnerService
def main():
"""
Entry point.
"""
DBusGMainLoop(set_as_default=True)
PartnerService()
GObject.MainLoop().run()
if __name__ == '__main__':
main()
| 1.648438
| 2
|
tworaven_apps/api_docs/urls.py
|
TwoRavens/TwoRavens
| 20
|
12778842
|
from django.conf.urls import url
from tworaven_apps.api_docs import views, views_swagger
urlpatterns = (
url(r'^grpc-test-form$',
views.view_test_form,
name='view_test_form'),
#url(r'^v1/swagger.yml$',
# views_swagger.view_swagger_doc_v1,
# name='view_swagger_doc_v1'),
)
| 1.476563
| 1
|
processing.py
|
summa-platform/summa-deeptagger
| 0
|
12778843
|
<filename>processing.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import pickle
import json
import numpy as np
from nltk.tokenize import sent_tokenize, word_tokenize
from keras.utils.np_utils import to_categorical
import gzip
embeddings_folder = "word_vectors/"
def load_word_vectors(langs, emb='40'):
"""
Function to load pre-trained word vectors.
Output:
dictionary {
"french": np.array([w-fr_1,w-fr_2,...,w-fr_|V-fr|])
"german": np.array([w-de_1,w-de_2,...,w-de_|V-de|])
...
}
dictionary {
"french": [w-fr_1, ...]
"german": [w-de_1, ...]
}
"""
print("[*] Loading %s word vectors..." % ','.join(langs))
wvec, vocab = {}, {}
print(emb, embeddings_folder)
for fname in os.listdir(embeddings_folder):
if fname in langs:
with (gzip.open if fname.endswith('.gz') else open)(embeddings_folder+fname, 'rb') as f:
try:
lang_embeddings = pickle.load(f, encoding='latin1')
except TypeError:
f.seek(0) # reset
lang_embeddings = pickle.load(f)
lang = fname.split('.')[0]
wvec[lang] = lang_embeddings[1]
vocab[lang] = list(lang_embeddings[0])
sys.stdout.write("\t%s\r" % (fname) )
sys.stdout.flush()
return wvec, vocab
def compute_label_indexes(x, y, labels):
Y_labs, X_labs, ids = [], [], []
for idx in range(len(y)):
y_labs = []
for yy, yidx in enumerate(y[idx]):
y_key = '_'.join([str(yy) for yy in yidx])
if len(yidx) > 0 and y_key in labels:
y_labs.append(labels.index(y_key))
if len(y_labs) > 0 and len(x[idx]) > 0:
Y_labs.append(y_labs)
X_labs.append(x[idx])
ids.append(idx)
return X_labs, Y_labs, ids
def load_yvec(wvec, y_idxs, wpad):
wdim = wvec[0].shape[0]
total = wpad
y_vec = np.zeros((wpad, wdim))
vecs = wvec[y_idxs[:wpad]]
y_vec[0:len(vecs)] = vecs
return y_vec
def load_xvec(wvec, x_idxs, wpad, spad):
wdim = wvec[0].shape[0]
total = spad*wpad
x_vec =[]
for j,x in enumerate(x_idxs):
vecs = wvec[x[:wpad]]
zeros = np.zeros((wpad, wdim))
zeros[0:len(vecs)] = vecs
if j == 0:
x_vec = zeros
else:
x_vec = np.vstack([x_vec, zeros])
if x_vec.shape[0] < total:
szeros = np.zeros((total - x_vec.shape[0], wdim))
x_vec = np.vstack([x_vec,szeros])
else:
x_vec = x_vec[:total,:]
return x_vec
def load_vecs_multilabel(wvec, labels, x_idxs, y_idxs, wpad, spad, simple_mode):
X_vecs, Y_labels = [], []
wdim = wvec[0].shape[0]
total = spad*wpad
for idx, x_idx_set in enumerate(x_idxs):
x_vec =[]
for j,x in enumerate(x_idx_set):
vecs = wvec[x[:wpad]]
zeros = np.zeros((wpad, wdim))
zeros[0:len(vecs)] = vecs
if j == 0:
x_vec = zeros
else:
x_vec = np.vstack([x_vec, zeros])
if x_vec.shape[0] < total:
szeros = np.zeros((total - x_vec.shape[0], wdim))
x_vec = np.vstack([x_vec,szeros])
else:
x_vec = x_vec[:total,:]
X_vecs.append(x_vec)
try:
y_cats = np.sum(to_categorical(y_idxs[idx], nb_classes=len(labels)),axis=0)
except:
y_cats = np.array([])
y_cats[y_cats>1] = 1
Y_labels.append(y_cats)
return np.array(X_vecs), np.array(Y_labels)
def load_data(lang, ltype, vocab, path='', emb='512'):
print("[*] Loading %s documents..." % lang)
print("\t%s" % path)
doc_folder = path
docs = os.listdir(doc_folder)
X_idxs, Y_idxs, Y_cidxs, Y_ridxs, fnames = [],[],[],[], []
for i, fname in enumerate(docs):
if fname.find('.json') > -1:
doc = json.load(open(doc_folder+fname))
sys.stdout.write("\t%s (%d/%d)\r" % (lang, i+1, len(docs)) )
sys.stdout.flush()
title = doc["name"].lower()
if 'teaser' in doc:
teaser = doc["teaser"].lower()
else:
teaser = ""
if 'text' in doc:
body = doc['text'].lower()
else:
continue
refgroups, keywords = [], []
if "referenceGroups" in doc:
refgroups = doc["referenceGroups"]
for refgroup in refgroups:
if refgroup["type"] == "Keywords":
if lang == "arabic":
keywords = [w["name"].strip().lower() for w in refgroup['items']]
else:
keywords = [w["name"].strip().lower() for w in refgroup['items']]
category = doc["categoryName"].lower()
routes = doc["trackingInfo"]["customCriteria"]["x10"].lower().split("::")[2:]
sentences = [clean(title)]
sentences += sent_tokenize(clean(teaser))
sentences += sent_tokenize(clean(body))
# Exctract word ids and vectors per sentence
x, x_ids = [], []
for sentence in sentences:
vecs, vecs_ids = [], []
for word in word_tokenize(sentence):
try:
idx = vocab[lang].index(word)
vecs_ids.append(idx)
except:
continue
if len(vecs_ids) > 0:
x_ids.append(vecs_ids)
y_ids = extract_wordids(keywords, lang, vocab) #
y_cids = extract_wordids([category], lang, vocab)
y_rids = extract_wordids(routes, lang, vocab)
X_idxs.append(x_ids)
Y_idxs.append(y_ids)
Y_cidxs.append(y_cids)
Y_ridxs.append(y_rids)
fnames.append(fname)
h = {'X_idxs': X_idxs,
'Y_idxs': Y_idxs,
'Y_cidxs': Y_cidxs,
'Y_ridxs': Y_ridxs,
'fnames':fnames}
if ltype == "kw":
return X_idxs, Y_idxs, fnames
elif ltype == "cat":
return X_idxs, Y_cidxs, fnames
elif ltype == "rou":
return X_idxs, Y_ridxs, fnames
def extract_wordids(keywords, lang, vocab):
y_ids = []
for keyword in keywords:
keyword = keyword.strip()
vecs_ids = []
for word in keyword.split():
try:
idy = vocab[lang].index(word)
vecs_ids.append(idy)
except:
continue
if len(vecs_ids) > 0:
y_ids.append(vecs_ids)
return y_ids
def clean(text):
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
text = text.replace('\t', ' ')
return text
| 2.578125
| 3
|
wordlist/cpf_tools.py
|
andradjp/tools
| 0
|
12778844
|
"""
Authon: <NAME>
Data: 12/05/2018
"""
import random
def gera_cpf():#Função para gerar CPF
cpf = list(random.choices([0,1,2,3,4,5,6,7,8,9], k=9))#Gera o CPF Aleatório
#Cálculo do primeiro digito verificador
pesos = [10, 9, 8, 7, 6, 5, 4, 3, 2]
primeiro_digito = []
for idx,i in enumerate(cpf):
primeiro_digito.append(i * pesos[idx])
primeiro_digito = sum(primeiro_digito)
if (primeiro_digito % 11) < 2:
cpf.append(0)
else:
cpf.append(11 - (primeiro_digito % 11))
#Cálculo do segundo dígito verificador
pesos = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
segundo_digito = []
for idx,i in enumerate(cpf):
segundo_digito.append(i * pesos[idx])
segundo_digito = sum(segundo_digito)
if (segundo_digito % 11) < 2:
cpf.append(0)
else:
cpf.append(11 - (segundo_digito % 11))
return '{}{}{}.{}{}{}.{}{}{}-{}{}'.format(*cpf)
def verifica_cpf(cpf):#Função para verificar se o CPF é válido
cpf = cpf.replace('.','')
cpf = cpf.replace('-', '')
cpf = list(map(int, cpf))
cpf_temp = cpf[0:9]
pesos = [10, 9, 8, 7, 6, 5, 4, 3, 2]
primeiro_digito = []
for idx, i in enumerate(cpf_temp):
primeiro_digito.append(i * pesos[idx])
primeiro_digito = sum(primeiro_digito)
if (primeiro_digito % 11) < 2:
cpf_temp.append(0)
else:
cpf_temp.append(11 - (primeiro_digito % 11))
pesos = [11, 10, 9, 8, 7, 6, 5, 4, 3, 2]
segundo_digito = []
for idx,i in enumerate(cpf_temp):
segundo_digito.append(i * pesos[idx])
segundo_digito = sum(segundo_digito)
if (segundo_digito % 11) < 2:
cpf_temp.append(0)
else:
cpf_temp.append(11 - (segundo_digito % 11))
if cpf == cpf_temp:
return 'CPF valido!'
else:
return 'CPF invalido'
for x in range(50):
print(gera_cpf())
| 3.546875
| 4
|
actions/cloudbolt_plugins/power_off_expired_servers/power_off_expired_servers.py
|
p6knewman/cloudbolt-forge
| 34
|
12778845
|
<reponame>p6knewman/cloudbolt-forge
from utilities import events
def run(job, logger=None, **kwargs):
"""
A post-expire hook that, given a list of expired servers, powers off any that are not off yet.
The expiration_date parameter is used to determine whether a server is expired.
Also updates their history with the event & an explanation.
"""
for server in job.server_set.all():
if server.power_status != "POWEROFF":
job.set_progress("Powering off %s." % server.hostname)
server.power_off()
msg = "Server powered off because it expired at %s." % (server.expiration_date)
events.add_server_event('MODIFICATION', server, msg, job=job)
return '', '', ''
| 2.90625
| 3
|
UI/Container.py
|
FearlessClock/RobotFactory
| 0
|
12778846
|
import pygame
from pygame.math import Vector2
from pygame.rect import Rect
from UI.Button import Button
class Container:
""" Position in screen space
menuSize in screen space"""
def __init__(self, position: Vector2, menuSize: Vector2):
self.size = menuSize
self.position = position
self.buttons = []
"""Buttons in the list have to be placed in relation to the container and not the screen"""
def addButton(self, button: Button):
self.buttons.append(button);
def drawContainer(self, surface, fontRenderer):
pygame.draw.rect(surface, (0, 255, 0), Rect(self.position.x, self.position.y, self.size.x, self.size.y))
for i in range(len(self.buttons)):
self.buttons[i].draw(surface, self.position, fontRenderer)
def getButtonPressed(self, clickPos):
relativePos = clickPos - self.position
for button in self.buttons:
if button.rect.x < relativePos.x < button.rect.topright[0] and button.rect.y < relativePos.y < button.rect.bottomright[1]:
button.click()
return True
return False
| 3.625
| 4
|
app/migrations/0006_auto_20200530_1922.py
|
fluix-dev/cloak
| 3
|
12778847
|
# Generated by Django 3.0.6 on 2020-05-30 23:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_formfield_multiple_choices'),
]
operations = [
migrations.AlterModelOptions(
name='formfield',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='formfield',
name='order',
field=models.PositiveIntegerField(default=0),
),
]
| 1.648438
| 2
|
db_storage/urls.py
|
jskopek/frame
| 0
|
12778848
|
from django.conf.urls import patterns, include, url
from db_storage.views import ImageView
urlpatterns = patterns('',
url(r'^(?P<file_name>[^/]+)$', ImageView.as_view(), name='db_storage_image'),
)
| 1.84375
| 2
|
readtagger/cli/findcluster.py
|
bardin-lab/read_tagger
| 3
|
12778849
|
<gh_stars>1-10
import logging
import click
from readtagger.findcluster import ClusterManager
from readtagger import VERSION
import multiprocessing_logging
@click.command()
@click.option('-i',
'--input_path',
help='Find cluster in this BAM file.',
type=click.Path(exists=True),
required=True)
@click.option('-r',
'--region',
help='Find clusters in this Region (Format is chrX:2000-1000).',
default=None,)
@click.option('--max_proper_pair_size',
help='Maximum proper pairs size. If not given will be inferred from the data.',
default=0,)
@click.option('--output_bam',
help='Write out BAM file with cluster information to this path. '
'Reads will have an additional "CD" tag to indicate the cluster number',
type=click.Path(exists=False))
@click.option('--output_gff',
help='Write out GFF file with cluster information to this path.',
type=click.Path(exists=False))
@click.option('--output_vcf',
help='Write out VCF file with cluster information to this path.',
type=click.Path(exists=False))
@click.option('--output_fasta',
help='Write out supporting evidence for clusters to this path.',
type=click.Path(exists=False))
@click.option('--sample_name',
default=None,
help='Sample name to use when writing out clusters in GFF file. '
'Default is to infer the name from the input filename.',
)
@click.option('--include_duplicates/--no-include_duplicates',
help='Include reads marked as duplicates when finding clusters.',
default=False)
@click.option('--transposon_reference_fasta',
type=click.Path(exists=True),
help=('Transposon fasta to align clipped reads to. '
'Not necessary if BWA index is provided.'),
default=None,
required=False)
@click.option('--transposon_bwa_index',
help='Transposon BWA index to align clipped reads to',
default=None,
required=False)
@click.option('--genome_reference_fasta',
type=click.Path(exists=True),
help=('Genome fasta to align clipped reads to. '
'Not necessary if BWA index is provided.'),
default=None,
required=False)
@click.option('--genome_bwa_index',
help='Genome BWA index to align clipped reads to',
default=None,
required=False)
@click.option('--min_mapq',
help="Only consider reads with MAPQ equal to or higher than this setting.",
default=4,
type=click.IntRange(0, 60))
@click.option('-t',
'--threads',
help='Threads to use for cap3 assembly step', default=1, type=click.IntRange(1, 100))
@click.option('--shm_dir',
envvar="SHM_DIR",
help='Path to shared memory folder', default=None, type=click.Path(exists=True))
@click.option('-v', '--verbosity', default='DEBUG', help="Set the default logging level.")
@click.option('-l',
'--log_to',
default=None,
help='Write logs to this file',
type=click.Path(exists=False))
@click.version_option(version=VERSION)
def findcluster(**kwds):
"""Find clusters of reads that support a TE insertion."""
logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s - %(message)s',
filename=kwds.pop('log_to'),
level=getattr(logging, kwds.pop('verbosity')))
multiprocessing_logging.install_mp_handler()
return ClusterManager(**kwds)
| 2.34375
| 2
|
docs/api-references/conf.py
|
bayeshack2016/icon-service
| 52
|
12778850
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
# -- Path setup --------------------------------------------------------------
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'ICONService API References'
copyright = '2019, ICON Foundation'
author = 'ICON Foundation'
about = {}
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../iconservice/__version__.py')
with open(path, 'r', encoding='utf-8') as f:
exec(f.read(), about)
version = about["__version__"]
release = ''
# -- General configuration ---------------------------------------------------
needs_sphinx = '1.8'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'recommonmark'
]
source_suffix = {'.md': 'markdown'}
master_doc = 'index'
add_module_names = False
autodoc_mock_imports = [
"setproctitle",
"plyvel",
"earlgrey",
"iconcommons",
"coincurve",
]
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_show_sourcelink = False
# -- Options for HTMLHelp output ---------------------------------------------
htmlhelp_basename = 'ICONServicedoc'
# -- Options for manual page output ------------------------------------------
man_pages = [
(master_doc, 'iconservice', 'ICONService Documentation',
[author], 1)
]
# -- recommenmark configuration -------------------------------------------------
github_doc_root = 'https://github.com/rtfd/recommonmark/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
| 1.539063
| 2
|