max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app/api.py | ethan-homan/f1-lineup-optimization-api | 0 | 12771251 | <gh_stars>0
import logging
from fastapi import APIRouter
from fastapi.encoders import jsonable_encoder
from app.types import UserModelSpec
from app.solver import solve_top_lineups
from app.model_spec import LineupOptimizationModelSpec
gunicorn_error_logger = logging.getLogger("gunicorn.error")
logging.root.handlers.extend(gunicorn_error_logger.handlers)
logging.root.setLevel(gunicorn_error_logger.level)
router = APIRouter()
@router.get("/health")
def health() -> dict:
return jsonable_encoder(
dict(status="ok")
)
@router.post("/optimize-lineup/")
async def optimize_lineup(model: UserModelSpec):
model = LineupOptimizationModelSpec(**dict(model))
solutions = solve_top_lineups(
5,
players=model.player_scores,
budget=model.budget,
allow_teammates=model.allow_teammates,
include_override=model.include_override,
exclude_override=model.exclude_override,
turbo_override=model.turbo_override,
no_turbo_override=model.no_turbo_override,
)
return jsonable_encoder(
solutions
)
| 2.203125 | 2 |
deeplearning/layers/optimizers.py | cbschaff/nlimb | 12 | 12771252 | """
Defining standard tensorflow optimizers as modules.
"""
import tensorflow as tf
from deeplearning import module
from deeplearning import tf_util as U
class SGD(module.Optimizer):
ninputs = 1
def __init__(self, name, loss, lr=1e-4, momentum=0.0, clip_norm=None):
super().__init__(name, loss)
self.lr = lr
self.momentum = momentum
self.clip_norm = clip_norm
def _build(self, loss):
# ops for updating the learning rate
self._lr = tf.Variable(self.lr, name='lr', trainable=False)
self._lr_placeholder = tf.placeholder(tf.float32, shape=(), name='lr_ph')
self._update_lr = self._lr.assign(self._lr_placeholder)
params = self.trainable_variables()
self._flatgrad = U.flatgrad(loss, params, self.clip_norm)
grads = tf.gradients(loss, params)
if self.clip_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, self.clip_norm)
opt = tf.train.MomentumOptimizer(self.lr, momentum=self.momentum)
return grads, opt.apply_gradients(list(zip(grads, params)))
def _add_run_args(self, outs, feed_dict, **flags):
super()._add_run_args(outs, feed_dict, **flags)
if 'flatgrad' in flags and flags['flatgrad']:
outs['flatgrad'] = self._flatgrad
def update_lr(self, new_lr):
self.lr = new_lr
sess = tf.get_default_session()
sess.run(self._update_lr, feed_dict={self._lr_placeholder:self.lr})
# convenience method
def flatgrad(self, inputs, state=[]):
return self.run(inputs, state, out=False, state_out=False, flatgrad=True)['flatgrad']
class Adam(module.Optimizer):
ninputs = 1
def __init__(self, name, loss, lr=1e-4, beta1=0.9, beta2=0.999, clip_norm=None):
super().__init__(name, loss)
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.clip_norm = clip_norm
def _build(self, loss):
# ops for updating the learning rate
self._lr = tf.Variable(self.lr, name='lr', trainable=False)
self._lr_placeholder = tf.placeholder(tf.float32, shape=(), name='lr_ph')
self._update_lr = self._lr.assign(self._lr_placeholder)
params = self.trainable_variables()
self._flatgrad = U.flatgrad(loss, params, self.clip_norm)
grads = tf.gradients(loss, params)
if self.clip_norm is not None:
grads, grad_norm = tf.clip_by_global_norm(grads, self.clip_norm)
opt = tf.train.AdamOptimizer(self.lr, beta1=self.beta1, beta2=self.beta2)
return grads, opt.apply_gradients(list(zip(grads, params)))
def _add_run_args(self, outs, feed_dict, **flags):
super()._add_run_args(outs, feed_dict, **flags)
if 'flatgrad' in flags and flags['flatgrad']:
outs['flatgrad'] = self._flatgrad
def update_lr(self, new_lr):
self.lr = new_lr
sess = tf.get_default_session()
sess.run(self._update_lr, feed_dict={self._lr_placeholder:self.lr})
# convenience method
def flatgrad(self, inputs, state=[]):
return self.run(inputs, state, out=False, state_out=False, flatgrad=True)['flatgrad']
| 2.59375 | 3 |
10_File_Handling/01_Files.py | AmanDhimanD/Python_CompleteCode | 2 | 12771253 | f=open('This.txt','r')
# this is Default read mode of file
f=open('This.txt') #open file
# data=f.read()
data=f.read(5) #Starting 5 characters from file
print(data)
f.close() | 3.65625 | 4 |
third/time_Arrow.py | gottaegbert/penter | 13 | 12771254 | # https://github.com/arrow-py/arrow
"""
>>> import arrow
>>> arrow.get('2013-05-11T21:23:58.970460+07:00')
<Arrow [2013-05-11T21:23:58.970460+07:00]>
>>> utc = arrow.utcnow()
>>> utc
<Arrow [2013-05-11T21:23:58.970460+00:00]>
>>> utc = utc.shift(hours=-1)
>>> utc
<Arrow [2013-05-11T20:23:58.970460+00:00]>
>>> local = utc.to('US/Pacific')
>>> local
<Arrow [2013-05-11T13:23:58.970460-07:00]>
>>> local.timestamp
1368303838
>>> local.format()
'2013-05-11 13:23:58 -07:00'
>>> local.format('YYYY-MM-DD HH:mm:ss ZZ')
'2013-05-11 13:23:58 -07:00'
>>> local.humanize()
'an hour ago'
>>> local.humanize(locale='ko_kr')
'1시간 전'
>>> arrow.utcnow()
<Arrow [2013-05-07T04:20:39.369271+00:00]>
>>> arrow.now()
<Arrow [2013-05-06T21:20:40.841085-07:00]>
>>> arrow.now('US/Pacific')
<Arrow [2013-05-06T21:20:44.761511-07:00]>
>>> arrow.get(1367900664)
<Arrow [2013-05-07T04:24:24+00:00]>
>>> arrow.get(1367900664.152325)
<Arrow [2013-05-07T04:24:24.152325+00:00]>
>>> arrow.get(datetime.utcnow())
<Arrow [2013-05-07T04:24:24.152325+00:00]>
>>> arrow.get(datetime(2013, 5, 5), 'US/Pacific')
<Arrow [2013-05-05T00:00:00-07:00]>
>>> from dateutil import tz
>>> arrow.get(datetime(2013, 5, 5), tz.gettz('US/Pacific'))
<Arrow [2013-05-05T00:00:00-07:00]>
>>> arrow.get(datetime.now(tz.gettz('US/Pacific')))
<Arrow [2013-05-06T21:24:49.552236-07:00]>
>>> arrow.get('2013-05-05 12:30:45', 'YYYY-MM-DD HH:mm:ss')
<Arrow [2013-05-05T12:30:45+00:00]>
>>> arrow.get('June was born in May 1980', 'MMMM YYYY')
<Arrow [1980-05-01T00:00:00+00:00]>
>>> arrow.get(2013, 5, 5)
<Arrow [2013-05-05T00:00:00+00:00]>
>>> past = arrow.utcnow().shift(hours=-1)
>>> past.humanize()
'an hour ago'
# 只需要时间间隔
>>> present = arrow.utcnow()
>>> future = present.shift(hours=2)
>>> future.humanize(present)
'in 2 hours'
>>> future.humanize(present, only_distance=True)
'2 hours'
# 不同的时间粒度
>>> present = arrow.utcnow()
>>> future = present.shift(minutes=66)
>>> future.humanize(present, granularity="minute")
'in 66 minutes'
>>> future.humanize(present, granularity=["hour", "minute"])
'in an hour and 6 minutes'
>>> present.humanize(future, granularity=["hour", "minute"])
'an hour and 6 minutes ago'
>>> future.humanize(present, only_distance=True, granularity=["hour", "minute"])
'an hour and 6 minutes'
>>> arrow.utcnow().span('hour')
(<Arrow [2013-05-07T05:00:00+00:00]>, <Arrow [2013-05-07T05:59:59.999999+00:00]>)
# 向下取整
>>> arrow.utcnow().floor('hour')
<Arrow [2013-05-07T05:00:00+00:00]>
# 向上取整
>>> arrow.utcnow().ceil('hour')
<Arrow [2013-05-07T05:59:59.999999+00:00]>
# 时间范围序列
>>> start = datetime(2013, 5, 5, 12, 30)
>>> end = datetime(2013, 5, 5, 17, 15)
>>> for r in arrow.Arrow.span_range('hour', start, end):
... print r
...
(<Arrow [2013-05-05T12:00:00+00:00]>, <Arrow [2013-05-05T12:59:59.999999+00:00]>)
(<Arrow [2013-05-05T13:00:00+00:00]>, <Arrow [2013-05-05T13:59:59.999999+00:00]>)
(<Arrow [2013-05-05T14:00:00+00:00]>, <Arrow [2013-05-05T14:59:59.999999+00:00]>)
(<Arrow [2013-05-05T15:00:00+00:00]>, <Arrow [2013-05-05T15:59:59.999999+00:00]>)
(<Arrow [2013-05-05T16:00:00+00:00]>, <Arrow [2013-05-05T16:59:59.999999+00:00]>)
""" | 2.78125 | 3 |
udemy-data-structures-and-algorithms/15-recursion/15.5_factorial_memoization.py | washimimizuku/python-data-structures-and-algorithms | 0 | 12771255 | <filename>udemy-data-structures-and-algorithms/15-recursion/15.5_factorial_memoization.py
# Create cache for known results
factorial_memo = {}
def factorial(k):
if k < 2:
return 1
if not k in factorial_memo:
factorial_memo[k] = k * factorial(k-1)
return factorial_memo[k]
print(factorial_memo)
print(factorial(4))
print(factorial_memo)
print(factorial(5))
print(factorial_memo)
| 4 | 4 |
app/mod/mod_utils.py | jacklul/openvr_fsr_app | 0 | 12771256 | <filename>app/mod/mod_utils.py
from pathlib import Path, WindowsPath
from typing import Dict, Optional
import app.mod
from app.app_settings import AppSettings
from app.globals import get_data_dir
from app.mod import BaseModType
from app.util.utils import get_file_hash
def get_mod(manifest, mod_type):
""" Get Mod Class Object by int type specifier
:param dict manifest:
:param int mod_type:
:rtype: app.mod.base_mod.BaseMod
"""
mod_type_class = getattr(app.mod, BaseModType.mod_types.get(mod_type))
return mod_type_class(manifest)
def get_available_mods(manifest):
""" Get iterator with available Mod Types
:param dict manifest:
:rtype: list[app.mod.base_mod.BaseMod]
"""
for mod_type in BaseModType.mod_types.keys():
yield get_mod(manifest, mod_type)
def check_mod_data_dir(custom_data_dir: Path, mod_type: int):
if custom_data_dir and Path(custom_data_dir).exists():
mod = get_mod(dict(), mod_type)
dll_exists = Path(Path(custom_data_dir) / mod.DLL_NAME).exists()
cfg_exists = Path(Path(custom_data_dir) / mod.settings.CFG_FILE).exists()
if dll_exists and cfg_exists:
return True
return False
def update_mod_data_dirs() -> Dict[int, str]:
mod_dirs = dict()
for mod_type in BaseModType.mod_types.keys():
data_dir_name = BaseModType.mod_data_dir_names[mod_type]
mod_dirs[mod_type] = str(WindowsPath(get_data_dir() / data_dir_name))
custom_src_data_dir = AppSettings.mod_data_dirs.get(mod_type)
if check_mod_data_dir(custom_src_data_dir, mod_type):
mod_dirs[mod_type] = custom_src_data_dir
AppSettings.mod_data_dirs.update(mod_dirs)
return mod_dirs
def get_mod_version_from_dll(engine_dll: Path, mod_type: int) -> Optional[str]:
if not engine_dll.exists():
return
file_hash = get_file_hash(engine_dll.as_posix())
version_dict = dict()
if mod_type == BaseModType.fsr:
version_dict = AppSettings.open_vr_fsr_versions
elif mod_type == BaseModType.foveated:
version_dict = AppSettings.open_vr_foveated_versions
elif mod_type == BaseModType.vrp:
version_dict = AppSettings.vrperfkit_versions
for version, hash_str in version_dict.items():
if file_hash != hash_str:
continue
return version
| 2.328125 | 2 |
tests/integrated/settings_test.py | BGASM/pyentrez | 0 | 12771257 | <reponame>BGASM/pyentrez<filename>tests/integrated/settings_test.py
import configargparse
import cmdline as cmd
def main() -> None:
"""Command line parsing to determine how pyEntrez should be run.
Check if user is a new user and run functions to set up user directory.
Determine if pyEntrez should be run in TUI or CMD mode.
Exisiting user can set up a new workspace by passing --INIT argument.
Version checking arg.
Attributes:
credentials (list): List of user credentials returned from argparse.
args (vars): Vars/dict of all args returned from argparse.
parse (configargparse.ArgParser): ArgParser to parse the CMD arguments.
"""
parse, path = cmd.get_parser()
args = parse.parse_args()
args = vars(args)
for key in args:
print(f'Check{key}:{args[key]}')
if __name__ == '__main__':
main()
| 2.296875 | 2 |
mysocialauth/social_auth_app/apps.py | itssubas/django-social-auth | 0 | 12771258 | <reponame>itssubas/django-social-auth
from django.apps import AppConfig
class SocialAuthAppConfig(AppConfig):
name = 'social_auth_app'
| 1.226563 | 1 |
config.py | volltin/openranwen | 9 | 12771259 | <reponame>volltin/openranwen<filename>config.py
#自行替换
BOTTOKEN="<KEY>"
BOTNAME="ranwengamebot"
#PROXY="socks5h://localhost:1080"
PROXY=""
DEBUG=True
PY=True#py钱
| 1.3125 | 1 |
fonty/models/font/font.py | jamesssooi/font-cli | 12 | 12771260 | '''font.py: Class to manage individual fonts.'''
import os
import codecs
from typing import Dict, Any, Optional
from fontTools.ttLib import TTFont
from fonty.lib.variants import FontAttribute
from fonty.lib.font_name_ids import FONT_NAMEID_FAMILY, FONT_NAMEID_FAMILY_PREFFERED, \
FONT_NAMEID_VARIANT, FONT_NAMEID_VARIANT_PREFFERED
from .font_format import FontFormat
class Font(object):
'''Class to manage individual fonts.'''
# Class Properties ------------------------------------------------------- #
path_to_font: str
family: str
variant: FontAttribute
name_table: Optional[Dict[Any, Any]] = None
# Constructor ------------------------------------------------------------ #
def __init__(
self,
path_to_font: str,
family: str = None,
variant: FontAttribute = None
) -> None:
self.path_to_font = path_to_font
# Get family name
self.family = family if family else self.get_family_name()
# Get variant
self.variant = variant if variant else self.get_variant()
# Class Methods ----------------------------------------------------------- #
def install(self):
'''Installs this font to the system.'''
from fonty.lib.install import install_fonts
# Install the font on to the system
installed_font = install_fonts(self)
return installed_font
def generate_filename(self, ext: str = None) -> str:
'''Generate a suitable filename from this font's name tables.'''
family_name = self.get_family_name()
variant = self.get_variant()
if ext is None:
_, ext = os.path.splitext(self.path_to_font)
ext = ext if ext is not '' else '.otf' # Fallback to .otf
return '{family}-{variant}{ext}'.format(
family=family_name,
variant=variant.print(long=True),
ext=ext
)
def parse(self) -> 'Font':
'''Parse the font's metadata from the font's name table.'''
if not self.path_to_font or not os.path.isfile(self.path_to_font):
raise Exception
font = TTFont(file=self.path_to_font)
if self.name_table is None:
self.name_table = {}
# Parse font file and retrieve family name and variant
for record in font['name'].names:
# Decode bytes
if b'\x00' in record.string:
data = record.string.decode('utf-16-be')
elif b'\xa9' in record.string:
data = codecs.decode(record.string, errors='ignore')
else:
data = codecs.decode(record.string, errors='ignore')
self.name_table[str(record.nameID)] = data
return self
def get_name_data_from_id(self, name_id: str) -> str:
'''Gets data from the font's name table via the name id.'''
if self.name_table is None:
self.parse()
return self.name_table.get(name_id, None)
def get_family_name(self) -> str:
'''Get family name from the font's name tables.'''
if self.name_table is None:
self.parse()
family_name = self.get_name_data_from_id(FONT_NAMEID_FAMILY)
family_name_preferred = self.get_name_data_from_id(FONT_NAMEID_FAMILY_PREFFERED)
return family_name_preferred if family_name_preferred else family_name
def get_variant(self) -> FontAttribute:
'''Get the font attributes from the font's name tables.'''
if self.name_table is None:
self.parse()
variant = self.get_name_data_from_id(FONT_NAMEID_VARIANT)
variant_preferred = self.get_name_data_from_id(FONT_NAMEID_VARIANT_PREFFERED)
variant = variant_preferred if variant_preferred else variant
return FontAttribute.parse(variant)
def convert(self, path: str, font_format: 'FontFormat' = None) -> str:
'''Converts this font to either woff or woff2 formats.'''
_, ext = os.path.splitext(os.path.basename(self.path_to_font))
font = TTFont(file=self.path_to_font)
# Get font flavor
if font_format:
if font_format == FontFormat.WOFF:
font.flavor = 'woff'
ext = '.woff'
elif font_format == FontFormat.WOFF2:
font.flavor = 'woff2'
ext = '.woff2'
else:
raise Exception # Only woff and woff2 supported for now
# Create output directory if it doesn't exist
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
if os.path.isdir(path):
path = os.path.join(path, '') # Append trailing slash
# Generate output paths
output_path = os.path.join(os.path.dirname(path), self.generate_filename(ext))
# Convert and save
font.save(file=output_path)
return output_path
| 2.8125 | 3 |
jupyterlabpymolpysnips/Programming/synch.py | MooersLab/pymolpysnips | 0 | 12771261 | cmd.do('cmd.sync(timeout=1.0,poll=0.05);')
| 1.148438 | 1 |
FVC_Prediction.py | yyren/FVC | 11 | 12771262 | <filename>FVC_Prediction.py
import argparse
from FVC_utils import load_from_pickle, printx, time, path
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def predictor_series(X_test, clfkit, normalizer=None):
"""Xs_test should be scaled
clfkit is a single-item dict
"""
if normalizer:
X_test = normalizer.transform(X_test)
model = list(clfkit.keys())[0]
clf = clfkit[model]
printx('########### predicting using {} model ... ############'.format(model))
t0 = time.time()
y_pred = clf.predict(X_test)
y_score = clf.predict_proba(X_test)[:,-1]
printx('use {:.0f} s\n\n'.format(time.time()-t0))
return y_pred, y_score
parser = argparse.ArgumentParser(
description='extract the complex region' )
parser.add_argument('--in_file', type=str,
help="snp/indel feature file")
parser.add_argument('--model', type=str, default="pretrain/gatk.model",
help="the model trained on your own data or the pretrain model")
parser.add_argument('--out_file', type=str, default="prediction.txt",
help="the filtered mutation (labeled by 0) and the propability that it is true mutation")
args = parser.parse_args()
in_file = args.in_file
model_file = args.model
result_file = args.out_file
mut_feat = pd.read_csv(in_file, header=None, sep=' ', low_memory=False).values
result = pd.DataFrame(mut_feat[:,1:6], columns=['chr','loc0', 'loc1','ref','target'])
X_test = mut_feat[:,6:]
clfkit, normalizer = load_from_pickle(model_file)
y_pred, y_score = predictor_series(X_test, clfkit[0], normalizer)
result['prediction'] = y_pred
result['probability'] = y_score
result.to_csv(result_file, index=False, sep='\t')
| 2.46875 | 2 |
app/controllers/vote.py | Axtell-io/Axtell | 15 | 12771263 | from flask import g, abort, redirect, url_for
from app.instances import db
from app.models.Post import Post
from app.models.Answer import Answer
from app.models.PostVote import PostVote
from app.models.AnswerVote import AnswerVote
# noinspection PyUnresolvedReferences
import app.routes.post
# noinspection PyUnresolvedReferences
import app.routes.user_settings
# noinspection PyUnresolvedReferences
import app.routes.auth
def get_post_vote_breakdown(post_id):
post = Post.query.filter_by(id=post_id).first()
if post is None:
return abort(404)
votes = list(map(lambda vote: vote.vote, PostVote.query.filter_by(post_id=post_id).all()))
upvotes = votes.count(1)
downvotes = votes.count(-1)
return {"upvote": upvotes, "downvote": downvotes}
def get_answer_vote_breakdown(answer_id):
answer = Answer.query.filter_by(id=answer_id).first()
if answer is None:
return abort(404)
votes = list(map(lambda vote: vote.vote, AnswerVote.query.filter_by(answer_id=answer_id).all()))
upvotes = votes.count(1)
downvotes = votes.count(-1)
return {"upvote": upvotes, "downvote": downvotes}
def get_post_vote(post_id):
current_user = g.user
if current_user is None:
return {"vote": 0, "breakdown": get_post_vote_breakdown(post_id)}
post_votes = PostVote.query.filter_by(post_id=post_id, user_id=current_user.id).first()
if post_votes is None:
vote = 0
else:
vote = post_votes.vote
return {"vote": vote, "breakdown": get_post_vote_breakdown(post_id)}
def get_answer_vote(answer_id):
current_user = g.user
if current_user is None:
return {"vote": 0, "breakdown": get_answer_vote_breakdown(answer_id)}
answer_votes = AnswerVote.query.filter_by(answer_id=answer_id, user_id=current_user.id).first()
if answer_votes is None:
vote = 0
else:
vote = answer_votes.vote
return {"vote": vote, "breakdown": get_answer_vote_breakdown(answer_id)}
def do_post_vote(post_id, vote):
current_user = g.user
if current_user is None:
return abort(401)
# ensure that vote is a valid value
try:
vote = int(vote)
except ValueError:
return abort(400)
if vote not in (-1, 0, 1):
return abort(400)
post = Post.query.filter_by(id=post_id).first()
# ensure that user is not voting on own content
if post.user_id == g.user.id:
return abort(403)
# handle changing existing vote
prev_vote = PostVote.query.filter_by(post_id=post_id, user_id=current_user.id).first()
if prev_vote is not None:
prev_vote.vote = vote
db.session.commit()
else:
new_vote = PostVote(post_id=post_id, vote=vote, user_id=current_user.id)
current_user.post_votes.append(new_vote)
post = Post.query.filter_by(id=post_id).first()
post.votes.append(new_vote)
db.session.add(new_vote)
db.session.commit()
return {"vote": vote, "breakdown": get_post_vote_breakdown(post_id)}
def do_answer_vote(answer_id, vote):
current_user = g.user
if current_user is None:
return abort(401)
# ensure that vote is a valid value
try:
vote = int(vote)
except ValueError:
return abort(400)
if vote not in (-1, 0, 1):
return abort(400)
answer = Answer.query.filter_by(id=answer_id).first()
# ensure that user is not voting on own content
if answer.user_id == g.user.id:
return abort(403)
# handle changing existing vote
prev_vote = AnswerVote.query.filter_by(answer_id=answer_id, user_id=current_user.id).first()
if prev_vote is not None:
prev_vote.vote = vote
db.session.commit()
else:
new_vote = AnswerVote(answer_id=answer_id, vote=vote, user_id=current_user.id)
current_user.answer_votes.append(new_vote)
answer.votes.append(new_vote)
db.session.add(new_vote)
db.session.commit()
return {"vote": vote, "breakdown": get_answer_vote_breakdown(answer_id)}
| 2.4375 | 2 |
official/vision/beta/projects/yolo/ops/kmeans_anchors_test.py | duncanriach-nvidia/tensorflow-models | 1 | 12771264 | <gh_stars>1-10
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""kmeans_test tests."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.beta.projects.yolo.ops import kmeans_anchors
class KMeansTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((9, 3, 100))
def test_kmeans(self, k, anchors_per_scale, samples):
sample_list = []
for _ in range(samples):
boxes = tf.convert_to_tensor(np.random.uniform(0, 1, [k * 100, 4]))
sample_list.append({
"groundtruth_boxes": boxes,
"width": 10,
"height": 10
})
kmeans = kmeans_anchors.AnchorKMeans()
cl = kmeans(
sample_list, k, anchors_per_scale, image_resolution=[512, 512, 3])
cl = tf.convert_to_tensor(cl)
self.assertAllEqual(tf.shape(cl).numpy(), [k, 2])
if __name__ == "__main__":
tf.test.main()
| 2.171875 | 2 |
asynch/proto/progress.py | stdc105/asynch | 0 | 12771265 | from asynch.proto import constants
from asynch.proto.io import BufferedReader
class Progress:
def __init__(self, reader: BufferedReader):
self.rows = 0
self.bytes = 0
self.total_rows = 0
self.written_rows = 0
self.written_bytes = 0
self.reader = reader
async def read(
self, server_revision,
):
self.rows = await self.reader.read_varint()
self.bytes = await self.reader.read_varint()
revision = server_revision
if revision >= constants.DBMS_MIN_REVISION_WITH_TOTAL_ROWS_IN_PROGRESS:
self.total_rows = await self.reader.read_varint()
if revision >= constants.DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO:
self.written_rows = await self.reader.read_varint()
self.written_bytes = await self.reader.read_varint()
def increment(self, another_progress):
self.rows += another_progress.rows
self.bytes += another_progress.bytes
self.total_rows += another_progress.total_rows
self.written_rows += another_progress.written_rows
self.written_bytes += another_progress.written_bytes
| 2.515625 | 3 |
generator.py | FrankWhoee/RS-10 | 0 | 12771266 | from keras.models import load_model
import numpy as np
from encoding import encode
from encoding import decode
model = load_model('Model-0.1.hf')
post_title = input("What do you want to know from u/rogersimon10? \n")
post_title = "What’s the worst thing you’ve eaten out of politeness?"
encoded_title = np.array(encode(post_title, padding=192))
encoded_title.reshape((-1))
print(encoded_title)
print(encoded_title.shape)
print(len(encoded_title))
encoded_answer = model.predict(encoded_title)
decoded_answer = decode(encoded_answer)
print(decoded_answer) | 3.296875 | 3 |
pyspedas/erg/satellite/erg/xep/xep.py | nickssl/pyspedas | 3 | 12771267 | import cdflib
import numpy as np
from pytplot import clip, options, store_data, ylim, zlim
from ..load import load
def xep(trange=['2017-06-01', '2017-06-02'],
datatype='omniflux',
level='l2',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
uname=None,
passwd=None,
time_clip=False,
ror=True):
"""
This function loads data from the XEP-e experiment from the Arase mission
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options:
level: str
Data level; Valid options:
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
ror: bool
If set, print PI info and rules of the road
Returns:
List of tplot variables created.
"""
initial_notplot_flag = False
if notplot:
initial_notplot_flag = True
if (datatype == 'omniflux') or (datatype == '2dflux'):
# to avoid failure of creation Tplot variables (at store_data.py) of xep
notplot = True
file_res = 3600. * 24
prefix = 'erg_xep_'+level+'_'
pathformat = 'satellite/erg/xep/'+level+'/'+datatype + \
'/%Y/%m/erg_xep_'+level+'_'+datatype+'_%Y%m%d_v??_??.cdf'
loaded_data = load(pathformat=pathformat, trange=trange, level=level, datatype=datatype, file_res=file_res, prefix=prefix, suffix=suffix, get_support_data=get_support_data,
varformat=varformat, varnames=varnames, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update, uname=uname, passwd=passwd)
if (len(loaded_data) > 0) and ror:
out_files = load(pathformat=pathformat, trange=trange, level=level, datatype=datatype, file_res=file_res, prefix=prefix, suffix=suffix, get_support_data=get_support_data,
varformat=varformat, varnames=varnames, downloadonly=True, notplot=notplot, time_clip=time_clip, no_update=True, uname=uname, passwd=<PASSWORD>)
cdf_file = cdflib.CDF(out_files[0])
try:
gatt = cdf_file.globalattsget()
# --- print PI info and rules of the road
print(' ')
print(
'**************************************************************************')
print(gatt["LOGICAL_SOURCE_DESCRIPTION"])
print('')
print('Information about ERG XEP')
print('')
print('PI: ', gatt['PI_NAME'])
print("Affiliation: "+gatt["PI_AFFILIATION"])
print('')
print('RoR of ERG project common: https://ergsc.isee.nagoya-u.ac.jp/data_info/rules_of_the_road.shtml.en')
print('RoR of XEP: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Xep')
print('')
print('Contact: erg_xep_info at isee.nagoya-u.ac.jp')
print(
'**************************************************************************')
except:
print('printing PI info and rules of the road was failed')
if initial_notplot_flag or downloadonly:
return loaded_data
if isinstance(loaded_data, dict):
if datatype == 'omniflux':
tplot_variables = []
if prefix + 'FEDO_SSD' + suffix in loaded_data:
v_vars_min = loaded_data[prefix + 'FEDO_SSD' + suffix]['v'][0]
v_vars_max = loaded_data[prefix + 'FEDO_SSD' + suffix]['v'][1]
v_vars = np.sqrt(v_vars_min * v_vars_max) # Geometric mean
store_data(prefix + 'FEDO_SSD' + suffix, data={'x': loaded_data[prefix + 'FEDO_SSD' + suffix]['x'],
'y': loaded_data[prefix + 'FEDO_SSD' + suffix]['y'],
'v': v_vars})
tplot_variables.append(prefix + 'FEDO_SSD' + suffix)
if prefix + 'FEDO_SSD' + suffix in tplot_variables:
# remove minus valuse of y array
clip(prefix + 'FEDO_SSD' + suffix, 0., 5000.)
# set spectrogram plot option
options(prefix + 'FEDO_SSD' + suffix, 'Spec', 1)
# set y axis to logscale
options(prefix + 'FEDO_SSD' + suffix, 'ylog', 1)
# set yrange
options(prefix + 'FEDO_SSD' + suffix,
'yrange', [4.0e+02, 4.5e+03])
# set z axis to logscale
options(prefix + 'FEDO_SSD' + suffix, 'zlog', 1)
# set zrange
options(prefix + 'FEDO_SSD' + suffix,
'zrange', [1.0e-01, 1.0e+3])
# change colormap option
options(prefix + 'FEDO_SSD' + suffix, 'Colormap', 'jet')
# set ztitle
options(prefix + 'FEDO_SSD' + suffix,
'ztitle', '[/cm^{2}-str-s-keV]')
# set ytitle
options(prefix + 'FEDO_SSD' + suffix,
'ytitle', 'XEP\nomniflux\nLv2\nEnergy')
# set ysubtitle
options(prefix + 'FEDO_SSD' + suffix, 'ysubtitle', '[keV]')
ylim(prefix + 'FEDO_SSD' + suffix, 4.0e+02, 4.5e+03)
zlim(prefix + 'FEDO_SSD' + suffix, 1.0e-01, 1.0e+3)
return tplot_variables
if datatype == '2dflux':
tplot_variables = []
if prefix + 'FEDU_SSD' + suffix in loaded_data:
store_data(prefix + 'FEDU_SSD' + suffix,
data={'x': loaded_data[prefix + 'FEDU_SSD' + suffix]['x'],
'y': loaded_data[prefix + 'FEDU_SSD' + suffix]['y'],
'v1': np.sqrt(loaded_data[prefix + 'FEDU_SSD' + suffix]['v'][:, 0]
* loaded_data[prefix + 'FEDU_SSD' + suffix]['v'][:, 1]), # Geometric mean of 'v'
'v2': [i for i in range(16)]}) # [0, 1, 2, .., 15]
tplot_variables.append(prefix + 'FEDU_SSD' + suffix)
if prefix + 'FEDU_SSD' + suffix in tplot_variables:
clip(prefix + 'FEDU_SSD' + suffix, -1.0e+10, 1.0e+10)
return tplot_variables
return loaded_data
| 2.765625 | 3 |
users/migrations/0008_remove_user_is_superuser.py | amado-developer/ReadHub-RestfulAPI | 0 | 12771268 | <gh_stars>0
# Generated by Django 3.0.6 on 2020-06-05 02:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20200605_0245'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='is_superuser',
),
]
| 1.40625 | 1 |
cogs/balance.py | Nadro-J/tipbot-v2 | 3 | 12771269 | <filename>cogs/balance.py
import discord
from discord.ext import commands
from utils import rpc_module, mysql_module, parsing, checks
#result_set = database response with parameters from query
#db_bal = nomenclature for result_set["balance"]
#snowflake = snowflake from message context, identical to user in database
#wallet_bal = nomenclature for wallet reponse
rpc = rpc_module.Rpc()
mysql = mysql_module.Mysql()
class Balance(commands.Cog):
def __init__(self, bot):
self.bot = bot
config = parsing.parse_json('config.json')
self.currency_symbol = config["currency_symbol"]
self.stake_id = config["stake_bal"]
self.donate = config["donation"]
self.coin_name = config["currency_name"]
self.bot_name = config["description"]
#parse the embed section of the config file
embed_config = parsing.parse_json('config.json')["embed_msg"]
self.thumb_embed = embed_config["thumb_embed_url"]
self.footer_text = embed_config["footer_msg_text"]
self.embed_color = int(embed_config["color"], 16)
@commands.command(pass_context=True)
async def test(self, ctx):
embed=discord.Embed(title="You are now registered! :tada:\nUse {}deposit to view your {} address", color=self.embed_color)
embed.set_footer(text=self.footer_text)
await ctx.author.send(embed=embed)
@commands.command(pass_context=True, aliases=['bal'])
async def balance(self, ctx):
"""Display your balance"""
# Set important variables
snowflake = ctx.message.author.id
# Check if user exists in db
mysql.check_for_user(str(snowflake))
balance = mysql.get_balance(str(snowflake), check_update=True)
balance_unconfirmed = mysql.get_balance(str(snowflake), check_unconfirmed = True)
# get the users staking rewards
stakes = mysql.get_tip_amounts_from_id(self.stake_id, str(snowflake))
# get the users donated amount
donations = mysql.get_tip_amounts_from_id(str(snowflake), self.donate)
# Execute and return SQL Query
# Simple embed function for displaying username and balance
embed=discord.Embed(title="You requested your **Balance**", color=self.embed_color)
embed.set_author(name=self.bot_name)
embed.add_field(name=":man_farmer: User", value=ctx.message.author.mention, inline=True)
embed.add_field(name=":moneybag: Balance", value="{:.8f} {}".format(round(float(balance), 8),self.currency_symbol), inline=True)
embed.set_thumbnail(url="http://{}".format(self.thumb_embed))
if float(balance_unconfirmed) != 0.0:
embed.add_field(name=":heavy_check_mark: Unconfirmed Deposits", value="{:.8f} {}".format(round(float(balance_unconfirmed), 8), self.currency_symbol), inline=False)
if float(sum(stakes)) != 0.0:
embed.add_field(name=":cut_of_meat: Total Staking rewards", value="{:.8f} {}".format(round(float(stakes), 8), self.currency_symbol), inline=False)
if float(sum(donations)) != 0.0:
embed.add_field(name=":dollar: Total Donations sent", value="{:.8f} {}".format(round(float(sum(donations)), 8), self.currency_symbol), inline=False)
embed.set_footer(text=self.footer_text)
try:
await ctx.author.send(embed=embed)
if ctx.message.guild is not None:
await ctx.message.delete()
await ctx.send("{}, I PMed you your **Balance**! Make sure to double check that it is from me!".format(ctx.message.author.mention))
except discord.HTTPException:
await ctx.send("I need the `Embed links` permission to send this")
def setup(bot):
bot.add_cog(Balance(bot))
| 2.671875 | 3 |
tests/test_library_matching.py | omigami/matchmsextras | 2 | 12771270 | import os
import sys
import numpy as np
import pytest
from matchms import Spectrum
from spec2vec import Spec2Vec
from spec2vec import SpectrumDocument
path_root = os.path.dirname(os.getcwd())
sys.path.insert(0, os.path.join(path_root, "matchmsextras"))
from matchmsextras.library_search import library_matching
def test_library_matching():
spectrum_1 = Spectrum(mz=np.array([100, 150, 200.]),
intensities=np.array([0.7, 0.2, 0.1]),
metadata={'precursor_mz': 500.5})
spectrum_2 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.4, 0.2, 0.1]),
metadata={'precursor_mz': 500.11})
spectrum_3 = Spectrum(mz=np.array([100, 140, 190.]),
intensities=np.array([0.3, 0.5, 0.2]),
metadata={'precursor_mz': 501.1})
spectrum_4 = Spectrum(mz=np.array([97.5, 137.5, 200.]),
intensities=np.array([0.8, 0.5, 0.4]),
metadata={'precursor_mz': 500.1})
documents_library = [SpectrumDocument(s) for s in [spectrum_1, spectrum_2, spectrum_3]]
documents_query = [SpectrumDocument(spectrum_4)]
found_matches = library_matching(documents_query, documents_library,
model=None,
presearch_based_on=["precursor_mz"],
include_scores=["cosine", "modcosine"],
ignore_non_annotated=False,
intensity_weighting_power=0.5,
allowed_missing_percentage=5.0,
cosine_tol=2.0,
mass_tolerance=2.0,
mass_tolerance_type="Dalton")
scores_cosine = found_matches[0].values[:,0]
expected_scores_cosine = np.array([0.05312127152597306, 0.0, 0.0])
scores_modcos = found_matches[0].values[:,2]
expected_scores_modcos = np.array([0.05312127152597306, 0.0, 0.7757282939050968])
assert list(scores_cosine) == [pytest.approx(x, 1e-6) for x in expected_scores_cosine], \
"Expected different scores."
assert list(scores_modcos) == [pytest.approx(x, 1e-6) for x in expected_scores_modcos], \
"Expected different mod. cosine scores."
assert np.all(found_matches[0].values[:,3] == np.array([1, 0, 2])), \
"Expected different number of matches"
assert np.all(found_matches[0].values[:,4]), "Expected all mass matches to be True"
| 2.28125 | 2 |
communicator/services/ivy_service.py | sartography/uva-covid19-testing-communicator | 1 | 12771271 | import csv
import json
from datetime import datetime
from parser import ParserError
import globus_sdk
import pytz
import sentry_sdk
from dateutil import parser
from communicator import app, db
from communicator.errors import CommError
from communicator.models.ivy_file import IvyFile
from communicator.models.sample import Sample
from os import listdir, remove
from os.path import isfile, join
class IvyService(object):
"""Opens files uploaded to the server from IVY and imports them into the database. """
def __init__(self):
self.path = app.config['IVY_IMPORT_DIR']
self.GLOBUS_CLIENT_ID = app.config['GLOBUS_CLIENT_ID']
self.GLOBUS_TRANSFER_RT = app.config['GLOBUS_TRANSFER_RT']
self.GLOBUS_TRANSFER_AT = app.config['GLOBUS_TRANSFER_AT']
self.EXPIRES_AT = 1600601877
self.GLOBUS_IVY_ENDPOINT = app.config['GLOBUS_IVY_ENDPOINT']
self.GLOBUS_DTN_ENDPOINT = app.config['GLOBUS_DTN_ENDPOINT']
self.GLOBUS_IVY_PATH = app.config['GLOBUS_IVY_PATH']
self.GLOBUS_DTN_PATH = app.config['GLOBUS_DTN_PATH']
self.transfer_client = None
self.transfer_client_date = datetime.now()
def load_directory(self):
"""Loads files from a local directory, returning a tuple containing the list
of files, and the list of samples respectively"""
onlyfiles = [f for f in listdir(self.path) if isfile(join(self.path, f))]
app.logger.info(f'Loading directory {self.path}')
samples = []
files = []
for file_name in onlyfiles:
file_samples = IvyService.samples_from_ivy_file(self.path, file_name)
ivy_file = db.session.query(IvyFile).filter(IvyFile.file_name == file_name).first()
if not ivy_file:
ivy_file = IvyFile(file_name=file_name, sample_count=len(file_samples))
else:
ivy_file.date_added = datetime.now()
ivy_file.sample_count = len(file_samples)
files.append(ivy_file)
samples.extend(file_samples)
app.logger.info(f'Loaded {len(file_samples)} samples from file {file_name}')
app.logger.info(f'Loading a total of {len(samples)} samples from {len(files)} files')
return files, samples
@staticmethod
def samples_from_ivy_file(path, file_name):
rows = []
with open(join(path, file_name), 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter='|')
for row in reader:
sample = IvyService.record_to_sample(row, file_name)
rows.append(sample)
return rows
@staticmethod
def record_to_sample(dictionary, file_name):
"""Creates a Test Result from a record read in from the IVY CSV File"""
sample = Sample()
try:
try:
sample.date = parser.parse(dictionary["Test Date Time"])
tz = pytz.timezone("America/New_York")
sample.date = tz.localize(sample.date)
except Exception as pe:
sentry_sdk.capture_message(f"Failed to parse date for barcode '{dictionary['Test Bar Code']}', '{pe}'")
sample.date = datetime.now()
sample.barcode = dictionary['Test Bar Code']
sample.student_id = dictionary["Student ID"]
sample.phone = dictionary["Student Cellphone"]
sample.email = dictionary["Student Email"]
sample.location = dictionary["Test Kiosk Loc"]
sample.result_code = dictionary["Test Result Code"]
sample.ivy_file = file_name
sample.in_ivy = True
return sample
except KeyError as e:
raise CommError("100", f"Invalid CSV Record, missing column {e}")
def get_transfer_client(self):
# Cache the client so we don't create a new one for every call, but don't hold on to it for too long.
if self.transfer_client is not None:
seconds = (self.transfer_client_date - datetime.now()).total_seconds()
if seconds < 3200:
return self.transfer_client
self.client = globus_sdk.NativeAppAuthClient(self.GLOBUS_CLIENT_ID)
self.client.oauth2_start_flow(refresh_tokens=True)
# Refresh the token - so we don't get logged out.
oauth_data = self.client.oauth2_refresh_token(self.GLOBUS_TRANSFER_RT)
new_at = oauth_data.data['access_token']
transfer_authorizer = globus_sdk.RefreshTokenAuthorizer(self.GLOBUS_TRANSFER_RT, self.client,
access_token=<PASSWORD>,
expires_at=self.EXPIRES_AT)
transfer_client = globus_sdk.TransferClient(authorizer=transfer_authorizer)
# Be sure to activate both endpoints
r = transfer_client.endpoint_autoactivate(self.GLOBUS_DTN_ENDPOINT, if_expires_in=3600)
r2 = transfer_client.endpoint_autoactivate(self.GLOBUS_IVY_ENDPOINT, if_expires_in=3600)
if r['code'] == 'AutoActivationFailed' or r2['code'] == 'AutoActivationFailed':
app.logger.error('Endpoint({}) Not Active! Error! Source message: {}'.format(self.GLOBUS_CLIENT_ID, r['message']))
elif r['code'] == 'AutoActivated.CachedCredential' or r2['code'] == 'AutoActivated.CachedCredential':
app.logger.error('Endpoint({}) autoactivated using a cached credential.'.format(self.GLOBUS_CLIENT_ID))
elif r['code'] == 'AutoActivated.GlobusOnlineCredential' or r2['code'] == 'AutoActivated.GlobusOnlineCredential':
app.logger.error(('Endpoint({}) autoactivated using a built-in Globus credential.').format(self.GLOBUS_CLIENT_ID))
elif r['code'] == 'AlreadyActivated' or r2['code'] == 'AlreadyActivated':
app.logger.info('Endpoint({}) already active until at least {}'.format(self.GLOBUS_CLIENT_ID, 3600))
self.transfer_client = transfer_client
self.transfer_client_date = datetime.now()
return self.transfer_client
def request_transfer(self):
file_count = self.get_file_count_from_globus()
app.logger.info(f"There are {file_count} files ready for transfer from Globus")
if (file_count > 0):
app.logger.info("Located file(s) in Globus, requesting a transfer.")
tc = self.get_transfer_client()
tdata = globus_sdk.TransferData(tc, self.GLOBUS_IVY_ENDPOINT, self.GLOBUS_DTN_ENDPOINT, label="Transfer",
sync_level="checksum")
tdata.add_item(self.GLOBUS_IVY_PATH, self.GLOBUS_DTN_PATH, recursive = True)
transfer_result = tc.submit_transfer(tdata)
def get_file_count_from_globus(self):
tc = self.get_transfer_client()
response = tc.operation_ls(self.GLOBUS_IVY_ENDPOINT, path=self.GLOBUS_IVY_PATH)
count = 0
if "DATA" in response:
for data_item in response["DATA"]:
if data_item['DATA_TYPE'] == "file":
count += 1
return count
def delete_file(self, file_name):
try:
remove(join(self.path, file_name))
except OSError as e: ## if failed, report it back to the user ##
app.logger.error("Error Deleting File: %s - %s." % (e.filename, e.strerror))
# tc = self.get_transfer_client()
# ddata = globus_sdk.DeleteData(tc, self.GLOBUS_DTN_ENDPOINT, recursive=True)
# file_path = f"{self.GLOBUS_DTN_PATH}/{file_name}"
# ddata.add_item(file_path)
# delete_result = tc.submit_delete(ddata)
# app.logger.info("Requested deleting file: " + file_path)
# app.logger.info("Deleted Covid-vpr file:" + str(delete_result))
#
# ddata = globus_sdk.DeleteData(tc, self.GLOBUS_IVY_ENDPOINT, recursive=True)
# file_path = f"{self.GLOBUS_IVY_PATH}/{file_name}"
# ddata.add_item(file_path)
# delete_result = tc.submit_delete(ddata)
# app.logger.info("Requested deleting file: " + file_path)
# app.logger.info("Deleted ics file:" + str(delete_result))
def get_access_token(self):
"""Purely for the command line, in the event we need to create a new access token,
but this should be exceedingly rare, a good token can last a very very long time."""
client = globus_sdk.NativeAppAuthClient(self.GLOBUS_CLIENT_ID)
client.oauth2_start_flow(refresh_tokens=True)
authorize_url = client.oauth2_get_authorize_url()
print('Please go to this URL and login: {0}'.format(authorize_url))
# this is to work on Python2 and Python3 -- you can just use raw_input() or
# input() for your specific version
get_input = getattr(__builtins__, 'raw_input', input)
auth_code = get_input(
'Please enter the code you get after login here: ').strip()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
globus_auth_data = token_response.by_resource_server['auth.globus.org']
# let's get stuff for the Globus Transfer service
globus_transfer_data = token_response.by_resource_server['transfer.api.globus.org']
# the refresh token and access token, often abbr. as RT and AT
transfer_rt = globus_transfer_data['refresh_token']
transfer_at = globus_transfer_data['access_token']
expires_at_s = globus_transfer_data['expires_at_seconds']
# Now we've got the data we need, but what do we do?
# That "GlobusAuthorizer" from before is about to come to the rescue
authorizer = globus_sdk.RefreshTokenAuthorizer(
transfer_rt, client, access_token=transfer_at)
# and try using `tc` to make TransferClient calls. Everything should just
# work -- for days and days, months and months, even years
tc = globus_sdk.TransferClient(authorizer=authorizer)
print("The Transfer Token is:" + transfer_rt)
print("The Access Token: " + transfer_at)
print("Expires At: " + str(expires_at_s))
| 2.234375 | 2 |
tests/test_dirichlet.py | BerenMillidge/inferactively | 0 | 12771272 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit Tests
__author__: <NAME>, <NAME>, <NAME>
"""
import os
import sys
import unittest
import numpy as np
from scipy.io import loadmat
sys.path.append(".")
from inferactively.distributions import Categorical, Dirichlet # nopep8
class TestDirichlet(unittest.TestCase):
def test_init_empty(self):
d = Dirichlet()
self.assertEqual(d.ndim, 2)
def test_init_overload(self):
with self.assertRaises(ValueError):
values = np.random.rand(3, 2)
_ = Dirichlet(dims=2, values=values)
def test_float_conversion(self):
values = np.array([2, 3])
self.assertEqual(values.dtype, np.int)
d = Dirichlet(values=values)
self.assertEqual(d.values.dtype, np.float64)
def test_init_dims_expand(self):
d = Dirichlet(dims=[5])
self.assertEqual(d.shape, (5, 1))
def test_init_dims_int_expand(self):
d = Dirichlet(dims=5)
self.assertEqual(d.shape, (5, 1))
def test_multi_factor_init_dims(self):
d = Dirichlet(dims=[[5, 4], [4, 3]])
self.assertEqual(d.shape, (2,))
self.assertEqual(d[0].shape, (5, 4))
self.assertEqual(d[1].shape, (4, 3))
def test_multi_factor_init_values(self):
values_1 = np.random.rand(5, 4)
values_2 = np.random.rand(4, 3)
values = np.array([values_1, values_2])
d = Dirichlet(values=values)
self.assertEqual(d.shape, (2,))
self.assertEqual(d[0].shape, (5, 4))
self.assertEqual(d[1].shape, (4, 3))
def test_multi_factor_init_values_expand(self):
values_1 = np.random.rand(5)
values_2 = np.random.rand(4)
values = np.array([values_1, values_2])
d = Dirichlet(values=values)
self.assertEqual(d.shape, (2,))
self.assertEqual(d[0].shape, (5, 1))
self.assertEqual(d[1].shape, (4, 1))
def test_normalize_multi_factor(self):
values_1 = np.random.rand(5)
values_2 = np.random.rand(4, 3)
values = np.array([values_1, values_2])
d = Dirichlet(values=values)
normed = Categorical(values=d.mean(return_numpy=True))
self.assertTrue(normed.is_normalized())
def test_normalize_single_dim(self):
values = np.array([1.0, 1.0])
d = Dirichlet(values=values)
expected_values = np.array([[0.5], [0.5]])
self.assertTrue(np.array_equal(d.mean(return_numpy=True), expected_values))
def test_normalize_two_dim(self):
values = np.array([[1.0, 1.0], [1.0, 1.0]])
d = Dirichlet(values=values)
expected_values = np.array([[0.5, 0.5], [0.5, 0.5]])
self.assertTrue(np.array_equal(d.mean(return_numpy=True), expected_values))
def test_remove_zeros(self):
values = np.array([[1.0, 0.0], [1.0, 1.0]])
d = Dirichlet(values=values)
self.assertTrue((d.values == 0.0).any())
d.remove_zeros()
self.assertFalse((d.values == 0.0).any())
def test_contains_zeros(self):
values = np.array([[1.0, 0.0], [1.0, 1.0]])
d = Dirichlet(values=values)
self.assertTrue(d.contains_zeros())
values = np.array([[1.0, 1.0], [1.0, 1.0]])
d = Dirichlet(values=values)
self.assertFalse(d.contains_zeros())
"""
def test_entropy(self):
values = np.random.rand(3, 2)
entropy = -np.sum(values * np.log(values), 0)
d = Dirichlet(values=values)
self.assertTrue(np.array_equal(d.entropy(return_numpy=True), entropy))
"""
def test_log(self):
values = np.random.rand(3, 2)
log_values = np.log(values)
d = Dirichlet(values=values)
self.assertTrue(np.array_equal(d.log(return_numpy=True), log_values))
def test_copy(self):
values = np.random.rand(3, 2)
d = Dirichlet(values=values)
d_copy = d.copy()
self.assertTrue(np.array_equal(d_copy.values, d.values))
d_copy.values = d_copy.values * 2
self.assertFalse(np.array_equal(d_copy.values, d.values))
def test_ndim(self):
values = np.random.rand(3, 2)
d = Dirichlet(values=values)
self.assertEqual(d.ndim, d.values.ndim)
def test_shape(self):
values = np.random.rand(3, 2)
d = Dirichlet(values=values)
self.assertEqual(d.shape, (3, 2))
def test_expectation_single_factor(self):
""" tests implementation of expect_log method against matlab version (single factor)
"""
array_path = os.path.join(os.getcwd(), "tests/data/wnorm_a.mat")
mat_contents = loadmat(file_name=array_path)
result = mat_contents["result"]
d = Dirichlet(values=mat_contents["A"])
result_py = d.expectation_of_log(return_numpy=True)
self.assertTrue(np.isclose(result, result_py).all())
def test_expectation_multi_factor(self):
""" tests implementation of expect_log method against matlab version (multi factor)
"""
array_path = os.path.join(os.getcwd(), "tests/data/wnorm_b.mat")
mat_contents = loadmat(file_name=array_path)
result_1 = mat_contents["result_1"]
result_2 = mat_contents["result_2"]
d = Dirichlet(values=mat_contents["A"][0])
result_py = d.expectation_of_log(return_numpy=True)
self.assertTrue(
np.isclose(result_1, result_py[0]).all() and np.isclose(result_2, result_py[1]).all()
)
if __name__ == "__main__":
unittest.main()
| 2.71875 | 3 |
mundo-2/condicoes/ex039.py | fln99/curso-python | 0 | 12771273 | <filename>mundo-2/condicoes/ex039.py
from datetime import date
ano_atual = date.today().year
print('-+-' * 14)
print('{:^40}'.format('Exército do Python'))
print('Consulte a situação de teu alistamento!')
print('-+-' * 14)
ano_nasc = int(input('Ano de seu nascimento: '))
idade_usuario = ano_atual - ano_nasc
if idade_usuario > 18:
print('Você se alistou ou deveria ter se alistado a {} ano(s) atrás!'.format(idade_usuario - 18))
elif idade_usuario == 18:
print('Você deve se alistar este ano! Não atrase ou será multado.')
elif idade_usuario < 18:
print('Você deverá se alistar daqui {} ano(s)!'.format(18 - idade_usuario))
print('Fique atento ao calendário!')
print('Deixe seu feedback no nosso site!') | 3.8125 | 4 |
tests/test_helpers.py | baurt/sqladmin | 319 | 12771274 | from sqladmin.helpers import secure_filename
def test_secure_filename(monkeypatch):
assert secure_filename("My cool movie.mov") == "My_cool_movie.mov"
assert secure_filename("../../../etc/passwd") == "etc_passwd"
assert (
secure_filename("i contain cool \xfcml\xe4uts.txt")
== "i_contain_cool_umlauts.txt"
)
assert secure_filename("__filename__") == "filename"
assert secure_filename("foo$&^*)bar") == "foobar"
| 2.34375 | 2 |
test/test_log.py | S3DEV/utils3 | 0 | 12771275 | <gh_stars>0
"""------------------------------------------------------------------------------------------------
Program: test_log
Purpose: Unit test for utils.log
Dependents: utils.log
Developer: <NAME>
Email: <EMAIL>
Use: > cd /package_root/test
> python test_log.py
---------------------------------------------------------------------------------------------------
UPDATE LOG:
Date Programmer Version Update
05.03.18 <NAME> 0.0.1 Permanently branched for Python 3 from the Python 2.7
utils module.
05.03.18 <NAME> 0.0.2 BUG01: "ResourceWarning: unclosed file <_io.TextIOWrapper \
name='nul' mode='w' encoding='cp1252'>" thrown on:
text = [line.strip() for line in open(log_path, 'r')
.readlines()]
FIX01: Replaced statement with 'with' statement, so the
file is closed automatically on completion.
------------------------------------------------------------------------------------------------"""
import os
import re
import unittest
from utils3.log import Log
class TestLog(unittest.TestCase):
def test_log(self):
# VARIABLES
log_path = 'c:/temp/utils_log_unittest.log'
header = 'datetime,host,user,text'
entry = 'this is a test'
pattern_entry = r'([0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{6},' \
'[0-9a-zA-Z].*,' \
'[0-9a-zA-Z].*,' + entry + ')'
exp = re.compile(pattern_entry)
# INSTANTIATE LOG CLASS
_log = Log(filepath=log_path, autofill=True, printheader=True,
headertext=header)
# WRITE LOG FILE
_log.write(text=entry)
_log.write_blank_line()
# READ IN LOG FILE
with open(log_path, 'r') as f:
text = [line.strip() for line in f.readlines()]
# TEST HEADER
self.assertTrue(text[0] == header)
# TEST LOG ENTRY AGAINST REGEX
self.assertTrue(len(exp.findall(text[1])) == 1)
# TEST FOR BLANK LINE (WRITTEN BY write_blank_line() METHOD)
self.assertTrue(text[2] == '')
# DELETE THE LOG FILE
if os.path.exists(log_path): os.remove(log_path)
# ----------------------------------------------------------------------
# MAIN PROGRAM CONTROLLER
def main():
# RUN UNIT TESTS
unittest.main()
# RUN PROGRAM
if __name__ == '__main__': main()
| 2.15625 | 2 |
dswizard/core/model.py | Ennosigaeon/dswizzard | 0 | 12771276 | <filename>dswizard/core/model.py
from __future__ import annotations
import re
from collections import namedtuple
from enum import Enum
from typing import Optional, List, TYPE_CHECKING, Tuple, Union, Any, Dict
import joblib
import numpy as np
import openml
from ConfigSpace import ConfigurationSpace
from ConfigSpace.configuration_space import Configuration
from ConfigSpace.read_and_write import json as config_json
from openml import OpenMLClassificationTask
from sklearn.base import BaseEstimator
import dswizard.components.util as comp_util
from dswizard.components.base import EstimatorComponent
from dswizard.components.meta_features import MetaFeatureFactory
from dswizard.util import util
if TYPE_CHECKING:
from dswizard.pipeline.pipeline import FlexiblePipeline
from dswizard.components.meta_features import MetaFeatures
class StatusType(Enum):
"""Class to define numbers for status types"""
SUCCESS = 1
TIMEOUT = 2
CRASHED = 3
ABORT = 4
MEMOUT = 5
CAPPED = 6
INEFFECTIVE = 7
DUPLICATE = 8
# Namedtuple instead of class to allow sharing between processes
ConfigKey = namedtuple('ConfigKey', 'hash idx')
class MetaInformation:
def __init__(self,
start_time: float,
metric: str,
openml_task: int,
openml_fold: int,
data_file: str,
config: Dict[str, Any]
):
# Information available before optimization
self.start_time = start_time
self.metric = metric
self.is_minimization = util.metric_sign(self.metric) == 1
self.openml_task = openml_task
self.openml_fold = openml_fold
self.data_file = data_file
self.config = config
# Information available after optimization
self.end_time: Optional[float] = None
self.n_structures: Optional[int] = None
self.n_configs: Optional[int] = None
self.iterations: Optional[Dict] = None
self.incumbent: Optional[float] = None
def as_dict(self):
return {
'start_time': self.start_time,
'metric': self.metric,
'is_minimization': self.is_minimization,
'openml_task': self.openml_task,
'openml_fold': self.openml_fold,
'end_time': self.end_time,
'n_structures': self.n_structures,
'n_configs': self.n_configs,
'iterations': self.iterations,
'data_file': self.data_file,
'incumbent': self.incumbent,
'config': self.config
}
class CandidateId:
"""
a triplet of ints that uniquely identifies a configuration. the convention is id = (iteration, budget index,
running index)
"""
def __init__(self, iteration: int, structure: int, config: Union[int, str] = None):
"""
:param iteration:the iteration of the optimization algorithms. E.g, for Hyperband that is one round of
Successive Halving
:param structure: this is simply an int >= 0 that sort the configs into the order they where sampled, i.e.
(x,x,0) was sampled before (x,x,1).
:param config: the budget (of the current iteration) for which this configuration was sampled by the
optimizer. This is only nonzero if the majority of the runs fail and Hyperband resamples to fill empty
slots, or you use a more 'advanced' optimizer.
"""
self.iteration = iteration
self.structure = structure
self.config = config
def as_tuple(self):
return self.iteration, self.structure, self.config
def with_config(self, config: Union[int, str]) -> 'CandidateId':
return CandidateId(self.iteration, self.structure, config)
def without_config(self) -> 'CandidateId':
return CandidateId(self.iteration, self.structure)
@property
def external_name(self):
if self.config is None:
return f'{self.iteration:02d}:{self.structure:02d}'
else:
return f'{self.iteration:02d}:{self.structure:02d}:{self.config:02d}'
def __repr__(self):
return str(self)
def __str__(self):
return str(self.as_tuple())
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
if isinstance(other, CandidateId):
return self.as_tuple() == other.as_tuple()
elif isinstance(other, tuple):
return self.as_tuple() == other
else:
return False
def __lt__(self, other):
return self.as_tuple() < other.as_tuple()
@staticmethod
def parse(cid: str) -> CandidateId:
tokens = list(map(lambda x: int(x), cid.split(':')))
return CandidateId(*tokens)
@staticmethod
def from_model_file(name: str) -> CandidateId:
sub_string = re.search(r'(\d+-\d+-\d+)', name).group(1)
return CandidateId(*map(int, sub_string.split('-')))
class Runtime:
def __init__(self, training_time: float, timestamp: float):
self.training_time = training_time
self.timestamp = timestamp
def as_dict(self):
return {
'training_time': self.training_time,
'timestamp': self.timestamp,
}
@staticmethod
def from_dict(raw: Dict) -> 'Optional[Runtime]':
if raw is None:
return None
return Runtime(**raw)
class Result:
def __init__(self,
cid: CandidateId,
status: Optional[StatusType] = None,
config: Configuration = None,
loss: Optional[float] = None,
structure_loss: Optional[float] = None,
runtime: Runtime = None,
partial_configs: Optional[List[PartialConfig]] = None,
transformed_X: np.ndarray = None):
self.cid = cid
self.status = status
self.config = config
# structure_loss can be used if a dedicated loss for structure search is necessary
if structure_loss is None:
structure_loss = loss
self.loss = loss
self.structure_loss = structure_loss
self.runtime = runtime
self.transformed_X = transformed_X
if partial_configs is None:
partial_configs = []
self.partial_configs: List[PartialConfig] = partial_configs
self.model_file: Optional[str] = None
def as_dict(self, budget: float = None, loss_sign: float = 1):
d = {
'model_file': self.model_file,
'id': self.cid.external_name,
'status': self.status.name,
'loss': self.loss * loss_sign,
'structure_loss': self.structure_loss, # by definition always a min. problem, no need to adjust sign
'runtime': self.runtime.as_dict() if self.runtime is not None else None,
'config': self.config.get_dictionary(),
'origin': self.config.origin if self.config is not None else None,
}
if budget is not None:
d['budget'] = budget
return d
@staticmethod
def from_dict(raw: Dict, cs: ConfigurationSpace) -> 'Result':
config = Configuration(cs, raw['config'])
config.origin = raw['origin']
return Result(CandidateId.parse(raw['id']), StatusType[raw['status']], config,
raw['loss'], raw['structure_loss'], Runtime.from_dict(raw['runtime']))
class CandidateStructure:
def __init__(self,
configspace: Optional[ConfigurationSpace],
pipeline: FlexiblePipeline,
cfg_keys: List[ConfigKey],
budget: float = 1):
self.configspace = configspace
self.pipeline = pipeline
self.cfg_keys = cfg_keys
self.budget = budget
# noinspection PyTypeChecker
self.cid: CandidateId = None
self.status: str = 'QUEUED'
self.results: List[Result] = []
def get_incumbent(self) -> Optional[Result]:
if len(self.results) == 0:
return None
return min(self.results, key=lambda res: res.loss)
def add_result(self, result: Result):
self.results.append(result)
def __hash__(self):
return hash(self.configspace)
def __eq__(self, other: object) -> bool:
if isinstance(other, CandidateStructure):
return self.configspace == other.configspace
return False
@property
def steps(self):
return self.pipeline.steps
def as_dict(self):
return {
'cid': self.cid.without_config().external_name,
'pipeline': comp_util.serialize(self.pipeline),
'cfg_keys': [(key.hash, key.idx) for key in self.cfg_keys],
'budget': self.budget,
'configspace': config_json.write(self.configspace) if self.configspace is not None else None,
}
def is_proxy(self):
return self.configspace is None and self.pipeline is None and self.cfg_keys is None
@staticmethod
def from_dict(raw: Dict) -> 'CandidateStructure':
# noinspection PyTypeChecker
cs = CandidateStructure(config_json.read(raw['configspace']), None, raw['cfg_keys'], raw['budget'])
cs.cid = CandidateId.parse(raw['cid'])
cs.pipeline = comp_util.deserialize(**raw['pipeline'])
cs.cfg_keys = [ConfigKey(*t) for t in raw['cfg_keys']]
return cs
@staticmethod
def proxy() -> 'CandidateStructure':
# noinspection PyTypeChecker
return CandidateStructure(None, None, None)
class Job:
# noinspection PyTypeChecker
def __init__(self, cid: CandidateId, cutoff: float = None):
self.cid = cid
self.time_submitted: float = None
self.time_started: float = None
self.time_finished: float = None
self.result: Result = None
self.cutoff = cutoff
class EvaluationJob(Job):
def __init__(self,
ds: Dataset,
candidate_id: CandidateId,
cs: Union[CandidateStructure, EstimatorComponent],
cutoff: float = None,
config: Optional[Configuration] = None,
cfg_keys: Optional[List[ConfigKey]] = None):
super().__init__(candidate_id, cutoff)
self.ds: Dataset = ds
self.cs: Union[CandidateStructure, EstimatorComponent] = cs
self.config = config
self.cfg_keys = cfg_keys
# Decorator pattern only used for better readability
@property
def component(self) -> Union[BaseEstimator, FlexiblePipeline]:
if isinstance(self.cs, CandidateStructure):
return self.cs.pipeline
else:
return self.cs
class StructureJob(Job):
def __init__(self, ds: Dataset, cs: CandidateStructure, cutoff: float = None):
super().__init__(cs.cid.without_config(), cutoff)
self.ds = ds
self.cs = cs
class Dataset:
def __init__(self,
X: np.ndarray,
y: np.ndarray,
metric: str = 'f1',
cutoff: int = 120,
task: int = None,
fold: int = None,
feature_names: List[str] = None):
self.X = X
self.y = y
if metric not in util.valid_metrics:
raise KeyError(f'Unknown metric {metric}')
self.metric = metric
self.cutoff = cutoff
self.mf_dict, self.meta_features = MetaFeatureFactory.calculate(X, y, timeout=self.cutoff)
self.task = task
self.fold = fold
self.feature_names = feature_names
def store(self, file_name: str):
joblib.dump((self.X, self.y, self.feature_names), file_name)
@staticmethod
def from_openml(task: int, fold: int, metric: str):
# noinspection PyTypeChecker
task: OpenMLClassificationTask = openml.tasks.get_task(task)
train_indices, test_indices = task.get_train_test_split_indices(fold=fold)
X, y = task.get_X_and_y(dataset_format='dataframe')
X_train = X.values[train_indices, :]
y_train = y.values[train_indices]
X_test = X.values[test_indices, :]
y_test = y.values[test_indices]
feature_names = X.columns.tolist()
ds = Dataset(X_train, y_train, metric=metric, task=task.task_id, fold=fold, feature_names=feature_names)
ds_test = Dataset(X_test, y_test, metric=metric, task=task.task_id, fold=fold, feature_names=feature_names)
return ds, ds_test
class PartialConfig:
def __init__(self, cfg_key: Tuple[float, int],
configuration: Configuration,
name: str,
mf: Optional[MetaFeatures]):
self.cfg_key = cfg_key
self.config: Configuration = configuration
self.name = name
if mf is None:
mf = np.zeros((1, 1))
self.mf = mf
def is_empty(self):
# noinspection PyUnresolvedReferences
return len(self.config.configuration_space.get_hyperparameters()) == 0
def as_dict(self):
# meta data are serialized via pickle
# noinspection PyUnresolvedReferences
return {
'config': self.config.get_array().tolist(),
'configspace': config_json.write(self.config.configuration_space),
'cfg_key': self.cfg_key,
'name': self.name,
'mf': self.mf.tolist()
}
@staticmethod
def from_dict(raw: Dict, origin: str) -> 'PartialConfig':
# meta data are deserialized via pickle
config = Configuration(config_json.read(raw['configspace']), vector=np.array(raw['config']))
config.origin = origin
# noinspection PyTypeChecker
return PartialConfig(raw['cfg_key'], config, raw['name'], np.array(raw['mf']))
def __eq__(self, other):
if isinstance(other, PartialConfig):
return self.name == other.name
else:
return self.name == other
def __hash__(self):
return hash(self.name)
| 1.945313 | 2 |
modeler/api/__init__.py | yngtodd/modeler | 0 | 12771277 | <gh_stars>0
from .node import (
Node, HierarchicalNode
)
from .interaction import Interaction
from .scene import Scene
| 1.148438 | 1 |
erase-by-hist.py | todorokit/tensorflow_cnn_image_sample | 0 | 12771278 | import os, sys, time, re
import cv2
import deeptool
sameImages= []
cachedImages = None
def isSameImage(imghist, checkhist):
ret = cv2.compareHist(imghist, checkhist, 0)
ret2 = cv2.compareHist(imghist, checkhist, 1)
ret3 = cv2.compareHist(imghist, checkhist, 2)
ret4 = cv2.compareHist(imghist, checkhist, 3)
return ret > 0.995 and ret2 < 300, (ret, ret2, ret3, ret4)
def eraseByHist(file):
global sameImages, cachedImages
img = cv2.imread(file)
img = cv2.resize(img, (64, 64))
imghist = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8], [0,256,0,256,0,256])
reg = re.compile(r"(\d+).(png|jpe?g)$")
sys.stdout.flush()
m = reg.search(file)
fileId = int(m.group(1))
if cachedImages is None:
cachedImages = {file: (imghist, fileId)}
else:
appended = False
score = (0, 0 , 0, 0)
for cachedfile in cachedImages:
# sys.stdout.write ("*")
# sys.stdout.flush()
cachedImgHist, cachedFileId = cachedImages[cachedfile]
isSame , score= isSameImage(imghist, cachedImgHist)
if isSame and fileId < cachedFileId + 210:
# sys.stdout.write ("!")
# sys.stdout.flush()
sameImages.append((file, cachedfile, score))
appended = True
break
if not appended:
cachedImages[file] = (imghist, fileId)
def main(args):
global sameImages, cachedImages
if len(args) == 1:
print(args[0]+" dirname")
exit()
cachedImages = None
if os.path.isdir(args[1]):
start = time.time()
i = 0
for file in deeptool.listDir(args[1]):
if os.path.isdir(file):
print("--"+file+"--")
sys.stdout.flush()
cachedImages = None
for file2 in deeptool.listDir(file):
eraseByHist(file2)
i = i + 1
if (i % 1000 == 0):
end = time.time()
print (" %g data / sec" % ( 1000 / (end - start) ))
start = end
else:
eraseByHist(file)
i = i + 1
if (i % 1000 == 0):
end = time.time()
print (" %g data / sec" % ( 1000 / (end - start) ))
start = end
else:
print(args[0] + " dirname")
exit()
print("--------------------------------")
for file, matchfile,score in sameImages:
os.unlink(file)
print (file, matchfile, score)
main(sys.argv)
| 2.453125 | 2 |
rasa_nlu/tokenizers/yaha_tokenizer.py | hetaomilk123/Rasa_NLU_Chi | 1,304 | 12771279 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 14:54:35 2017
@author: user
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from typing import Any
from typing import Dict
from typing import List
from typing import Text
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.tokenizers import Tokenizer, Token
from rasa_nlu.components import Component
from rasa_nlu.training_data import Message
from rasa_nlu.training_data import TrainingData
import sys
from yaha import Cuttor
reload(sys)
sys.setdefaultencoding('utf-8')
class YahaTokenizer(Tokenizer, Component):
name = "tokenizer_yaha"
provides = ["tokens"]
cuttor = Cuttor()
def __init__(self):
pass
@classmethod
def required_packages(cls):
# type: () -> List[Text]
return ["yaha"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUConfig, **Any) -> None
if config['language'] != 'zh':
raise Exception("tokenizer_yaha is only used for Chinese. Check your configure json file.")
for example in training_data.training_examples:
example.set("tokens", self.tokenize(example.text))
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.set("tokens", self.tokenize(message.text))
def tokenize(self, text):
# type: (Text) -> List[Token]
tokenized = self.cuttor.tokenize(text.decode('utf-8'), search=True)
tokens = [Token(word, start) for (word, start, end) in tokenized]
return tokens
| 2.53125 | 3 |
microcosm_pubsub/chain/__init__.py | Sinon/microcosm-pubsub | 5 | 12771280 | <reponame>Sinon/microcosm-pubsub<filename>microcosm_pubsub/chain/__init__.py
from microcosm_pubsub.chain.chain import Chain # noqa: F401
from microcosm_pubsub.chain.decorators import binds, extracts # noqa: F401
from microcosm_pubsub.chain.statements import ( # noqa: F401
assign,
assign_constant,
assign_function,
call,
extract,
for_each,
switch,
try_chain,
when,
)
| 1.382813 | 1 |
pyscripts/train_ae.py | udion/mscopy_AE | 0 | 12771281 | from models import *
from utils import *
from tensorboard_logger import configure, log_value
import os
try:
os.makedirs('../train_logs')
except OSError:
pass
vgg19_exc = VGG19_extractor(torchvision.models.vgg19(pretrained=True))
vgg19_exc = vgg19_exc.cuda()
E1 = Encoder(n_res_blocks=10)
D1 = Decoder(n_res_blocks=10)
A = AE(E1, D1)
A = A.cuda()
def train_ae(model, modelName, batchsz):
########## logging stuff
configure('../train_logs/'+modelName+'_bsz{}'.format(batchsz), flush_secs=5)
print('I configured .. ')
########################
def mynorm2(x):
m1 = torch.min(x)
m2 = torch.max(x)
if m2-m1 < 1e-6:
return x
else:
return (x-m1)/(m2-m1)
mytransform2 = transforms.Compose(
[transforms.RandomCrop((121,121)),
# transforms.Lambda( lambda x : Image.fromarray(gaussian_filter(x, sigma=(10,10,0)) )),
# transforms.Resize((41,41)),
transforms.ToTensor(),
transforms.Lambda( lambda x : mynorm2(x) )])
trainset = dsets.ImageFolder(root='../sample_dataset/train/',transform=mytransform2)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchsz, shuffle=True, num_workers=2)
testset = dsets.ImageFolder(root='../sample_dataset/test/',transform=mytransform2)
testloader = torch.utils.data.DataLoader(testset, batch_size=batchsz, shuffle=True, num_workers=2)
# def mynorm2(x):
# m1 = torch.min(x)
# m2 = torch.max(x)
# return (x-m1)/(m2-m1)
# mytransform2 = transforms.Compose(
# [transforms.RandomCrop((41,41)),
# transforms.ToTensor(),
# transforms.Lambda( lambda x : mynorm2(x))])
# trainset = dsets.ImageFolder(root='../sample_dataset/train/',transform=mytransform2)
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchsz, shuffle=True, num_workers=2)
# testset = dsets.ImageFolder(root='../sample_dataset/test/',transform=mytransform2)
# testloader = torch.utils.data.DataLoader(testset, batch_size=batchsz, shuffle=True, num_workers=2)
testiter = iter(testloader)
testX, _ = next(testiter)
def eval_model(model):
X = testX
print('input looks like ...')
plt.figure()
imshow(torchvision.utils.make_grid(X))
X = Variable(X).cuda()
Y = model(X)
print('output looks like ...')
plt.figure()
imshow2(torchvision.utils.make_grid(Y.data.cpu()))
nepoch = 500
Criterion2 = nn.MSELoss()
Criterion1 = nn.L1Loss()
optimizer = optim.Adam(model.parameters(), lr=1e-5)
loss_track = []
for eph in range(nepoch):
dataloader = iter(trainloader)
print('starting epoch {} ...'.format(eph))
mean_L2_term = 0
mean_vl3_term = 0
mean_total_loss = 0
tot_count = 0
for i, (X, _) in enumerate(dataloader):
tot_count += X.size()[0]
X = Variable(X).cuda()
optimizer.zero_grad()
reconX = model(X)
l2 = Criterion2(reconX, X)
t1, t2, t3 = vgg19_exc(X)
rt1, rt2, rt3 = vgg19_exc(reconX)
# t1 = Variable(t1.data)
# rt1 = Variable(rt1.data)
# t2 = Variable(t2.data)
# rt2 = Variable(rt2.data)
t3 = Variable(t3.data)
rt3 = Variable(rt3.data)
vl3 = Criterion2(rt3, t3)
reconTerm = 10*l2 + vl3
loss = reconTerm
loss.backward()
optimizer.step()
mean_L2_term += l2.data[0]
mean_vl3_term += vl3.data[0]
mean_total_loss += loss.data[0]
# if i%rec_interval == 0:
# loss_track.append(loss.data[0])
# if i%disp_interval == 0:
# print('epoch:{}, iter: {}, L2term:{}, vl3: {}, reconTerm: {}'.format(
# eph, i, l2.data[0], vl3.data[0], reconTerm.data[0]))
mean_L2_term /= tot_count
mean_vl3_term /= tot_count
mean_total_loss /= tot_count
log_value('L2_term', mean_L2_term, eph)
log_value('vl3_term', mean_vl3_term, eph)
log_value('total_loss', mean_total_loss, eph)
print('epoch:{}, mean_L2term:{}, mean_vl3: {}, mean_reconTerm: {}'.format(eph, mean_L2_term, mean_vl3_term, mean_total_loss))
save_model(model, modelName+'.pth')
return loss_track
| 2.109375 | 2 |
mcts_a.py | balaz94/adaptive-rl | 0 | 12771282 | import torch
import math
class Node:
def __init__(self, state, probs, value, length, moves, terminal = False):
self.state = state
self.moves = torch.nonzero(moves)
self.P = probs[self.moves].view(-1)
self.P = self.P / self.P.sum()
self.value = value
self.length = length
size, _ = self.moves.shape
self.size = size
self.N = torch.zeros(size, dtype = torch.int32)
self.Q = torch.zeros(size)
self.L = torch.zeros(size)
self.T = terminal
self.children = {}
def getProbs(self, size):
probs = self.N.float() / self.N.sum()
all_probs = torch.zeros(size)
all_probs[self.moves.view(-1)] = probs
return all_probs
class MCTS:
def __init__(self, cpuct, beta, max_steps):
self.cpuct = cpuct
self.beta = beta
self.max_steps = max_steps * 1.0
self.nodes = {}
self.root = None
self.current_node = None
self.parents = []
def set_parents(self, parents):
self.parents = parents
def selection(self, step):
game_indicies = torch.nonzero(self.root.N == 0)
for index in game_indicies:
self.parents = [(self.root, index)]
self.current_node = None
return
parents = []
node = self.root
best_player = True
while True:
if node.T == True:
parents.reverse()
self.parents = parents
self.current_node = node
return
N_sum = node.N.sum().item()
sq = math.sqrt(float(N_sum))
if best_player:
alpha = step / self.max_steps
if N_sum > 0:
b = node.Q + self.cpuct * node.P * sq / (1.0 + node.N)
c = node.Q + self.beta * node.L
u = alpha * b + (1 - alpha) * c
index = torch.argmax(u).item()
else:
index = torch.argmax(node.P).item()
else:
if N_sum > 0:
u = node.Q + self.cpuct * node.P * sq / (1.0 + node.N)
index = torch.argmax(u).item()
else:
index = torch.argmax(node.P).item()
parents.append((node, index))
if index in node.children:
node = node.children[index]
else:
parents.reverse()
self.parents = parents
self.current_node = None
return
step += 1
def backup(self, node, parents):
v = node.value
l = node.length
for parent, i in parents:
v = - v
count = parent.N[i] + 1
parent.Q[i] = (parent.N[i] * parent.Q[i] + v) / count
parent.L[i] = (parent.N[i] * parent.L[i] + l) / count
parent.N[i] = count
l -= 1
| 2.765625 | 3 |
CS1_BostonCollege/HW2_CS1/temperature.py | gonzalosc2/LearningPython | 0 | 12771283 | # author: <NAME>
# assigment: Homework #2
# description: contains three functions
# First function:
# Input: temperature value in degrees Centigrade
# Output: temperature value in degrees Fahrenheit
# Second function:
# Input: temperature value in degrees Fahrenheit
# Output: temperature value in degrees Centigrade
# Third function:
# Input: temperature value in degrees Fahrenheit and wind speed in mph
# Output: wind chill factor for those parameters
#Converts a Centigrade temperature to a Fahrenheit temperature
def centigrade_to_fahrenheit(T_c):
T_f = 9/5 * T_c + 32
return T_f
#Converts a Fahrenheit temperature to a Centigrade temperature\
def fahrenheit_to_centigrade(T_f):
T_c = 5/9 * (T_f - 32)
return T_c
#Calculates a wind chill factor (the "old" one)
def wind_chill_factor(TEMPERATURE,WIND):
wc = 0.0817 * (3.71 * WIND**0.5 + 5.81 - 0.25 * WIND) * (TEMPERATURE - 91.4) + 91.4
return wc
| 4.1875 | 4 |
app/models/search.py | adrianruizmora/c-ster | 0 | 12771284 | # Third-party Libraries
from flask_restful import Resource, reqparse
import pyvo
from astropy.io.votable import parse
class Search(Resource):
def __init__(self) -> None:
super().__init__()
self.service = pyvo.dal.TAPService("http://voparis-tap-planeto.obspm.fr/tap")
def get(self, database):
whitelist_databases = {
"exoplanet.epn_core": self.exoplanet()
}
if database in whitelist_databases:
return whitelist_databases.get(database)
def exoplanet(self):
parser = reqparse.RequestParser()
parser.add_argument("star_distance", required=True, location="args")
args = parser.parse_args()
star_distance = args["star_distance"]
service = self.service
query = "SELECT * FROM exoplanet.epn_core WHERE star_distance = %s" % (star_distance,)
results = service.search(query)
response = {}
for result in range(len(results)):
target_name = results[result].get("target_name")
star_name = results[result].get("star_name")
star_distance = results[result].get("star_distance")
response[target_name] = {
"star_name": star_name,
"star_distance": star_distance
}
return response
| 2.6875 | 3 |
plugin.video.rebirth/resources/lib/modules/cache.py | TheWardoctor/wardoctors-repo | 1 | 12771285 | # -*- coding: utf-8 -*-
################################################################################
# | #
# | ______________________________________________________________ #
# | :~8a.`~888a:::::::::::::::88......88:::::::::::::::;a8~".a88::| #
# | ::::~8a.`~888a::::::::::::88......88::::::::::::;a8~".a888~:::| #
# | :::::::~8a.`~888a:::::::::88......88:::::::::;a8~".a888~::::::| #
# | ::::::::::~8a.`~888a::::::88......88::::::;a8~".a888~:::::::::| #
# | :::::::::::::~8a.`~888a:::88......88:::;a8~".a888~::::::::::::| #
# | :::::::::::: :~8a.`~888a:88 .....88;a8~".a888~:::::::::::::::| #
# | :::::::::::::::::::~8a.`~888......88~".a888~::::::::::::::::::| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ..............................................................| #
# | ..............................................................| #
# | 8888888888888888888888888888......8888888888888888888888888888| #
# | ::::::::::::::::::a888~".a88......888a."~8;:::::::::::::::::::| #
# | :::::::::::::::a888~".a8~:88......88~888a."~8;::::::::::::::::| #
# | ::::::::::::a888~".a8~::::88......88:::~888a."~8;:::::::::::::| #
# | :::::::::a888~".a8~:::::::88......88::::::~888a."~8;::::::::::| #
# | ::::::a888~".a8~::::::::::88......88:::::::::~888a."~8;:::::::| #
# | :::a888~".a8~:::::::::::::88......88::::::::::::~888a."~8;::::| #
# | a888~".a8~::::::::::::::::88......88:::::::::::::::~888a."~8;:| #
# | #
# | Rebirth Addon #
# | Copyright (C) 2017 Cypher #
# | #
# | This program is free software: you can redistribute it and/or modify #
# | it under the terms of the GNU General Public License as published by #
# | the Free Software Foundation, either version 3 of the License, or #
# | (at your option) any later version. #
# | #
# | This program is distributed in the hope that it will be useful, #
# | but WITHOUT ANY WARRANTY; without even the implied warranty of #
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# | GNU General Public License for more details. #
# | #
################################################################################
import ast
import hashlib
import re
import time
from resources.lib.modules import control
try:
from sqlite3 import dbapi2 as db, OperationalError
except ImportError:
from pysqlite2 import dbapi2 as db, OperationalError
"""
This module is used to get/set cache for every action done in the system
"""
cache_table = 'cache'
def get(function, duration, *args):
# type: (function, int, object) -> object or None
"""
Gets cached value for provided function with optional arguments, or executes and stores the result
:param function: Function to be executed
:param duration: Duration of validity of cache in hours
:param args: Optional arguments for the provided function
"""
try:
key = _hash_function(function, args)
cache_result = cache_get(key)
if cache_result:
if _is_cache_valid(cache_result['date'], duration):
return ast.literal_eval(cache_result['value'].encode('utf-8'))
fresh_result = repr(function(*args))
if not fresh_result:
# If the cache is old, but we didn't get fresh result, return the old cache
if cache_result:
return cache_result
return None
cache_insert(key, fresh_result)
return ast.literal_eval(fresh_result.encode('utf-8'))
except Exception:
return None
def timeout(function, *args):
try:
key = _hash_function(function, args)
result = cache_get(key)
return int(result['date'])
except Exception:
return None
def cache_get(key):
# type: (str, str) -> dict or None
try:
cursor = _get_connection_cursor()
cursor.execute("SELECT * FROM %s WHERE key = ?" % cache_table, [key])
return cursor.fetchone()
except OperationalError:
return None
def cache_insert(key, value):
# type: (str, str) -> None
cursor = _get_connection_cursor()
now = int(time.time())
cursor.execute(
"CREATE TABLE IF NOT EXISTS %s (key TEXT, value TEXT, date INTEGER, UNIQUE(key))"
% cache_table
)
update_result = cursor.execute(
"UPDATE %s SET value=?,date=? WHERE key=?"
% cache_table, (value, now, key))
if update_result.rowcount is 0:
cursor.execute(
"INSERT INTO %s Values (?, ?, ?)"
% cache_table, (key, value, now)
)
cursor.connection.commit()
def cache_clear():
try:
cursor = _get_connection_cursor()
for t in [cache_table, 'rel_list', 'rel_lib']:
try:
cursor.execute("DROP TABLE IF EXISTS %s" % t)
cursor.execute("VACUUM")
cursor.commit()
except:
pass
except:
pass
def _get_connection_cursor():
conn = _get_connection()
return conn.cursor()
def _get_connection():
control.makeFile(control.dataPath)
conn = db.connect(control.cacheFile)
conn.row_factory = _dict_factory
return conn
def _dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def _hash_function(function_instance, *args):
return _get_function_name(function_instance) + _generate_md5(args)
def _get_function_name(function_instance):
return re.sub('.+\smethod\s|.+function\s|\sat\s.+|\sof\s.+', '', repr(function_instance))
def _generate_md5(*args):
md5_hash = hashlib.md5()
[md5_hash.update(str(arg)) for arg in args]
return str(md5_hash.hexdigest())
def _is_cache_valid(cached_time, cache_timeout):
now = int(time.time())
diff = now - cached_time
return (cache_timeout * 3600) > diff
| 1.679688 | 2 |
utils/loglikelihood.py | MrHuff/DIF-NLDL | 0 | 12771286 | import torch
from utils.model_utils import *
#Class conditional loglikelihood!
def elbo_recon(prediction,target):
error = (prediction - target).view(prediction.size(0), -1)
error = error ** 2
error = torch.sum(error, dim=-1)
return error
def calculate_ELBO(model,real_images):
with torch.no_grad():
real_mu, real_logvar, z_real, rec = model(real_images)
loss_rec = elbo_recon(rec,real_images)
loss_kl = model.kl_loss(real_mu, real_logvar)
ELBO = loss_rec+loss_kl
return -ELBO.squeeze()
def estimate_loglikelihoods(dataloader_test, model,s=1000):
_loglikelihood_estimates = []
_elbo_estimates = []
_class = []
tensor_s = torch.tensor(s).float()
with torch.no_grad():
for iteration, (batch, c) in enumerate(tqdm.tqdm(dataloader_test)):
_elbo = []
for i in tqdm.trange(s):
with autocast():
ELBO = calculate_ELBO(model,batch.cuda())
_elbo.append(ELBO)
_elbo_estimates.append(ELBO)
likelihood_est = torch.stack(_elbo,dim=1)
# print(torch.logsumexp(likelihood_est,dim=1).cpu()-torch.log(tensor_s))
_loglikelihood_estimates.append(torch.logsumexp(likelihood_est,dim=1).cpu()-torch.log(tensor_s))
_class.append(c)
_elbo_estimates = torch.cat(_elbo_estimates,dim=0)
_class = torch.cat(_class,dim=0)
_loglikelihood_estimates = torch.cat(_loglikelihood_estimates,dim=0)
return _loglikelihood_estimates,_elbo_estimates,_class
def calculate_metrics(_loglikelihood_estimates,_elbo_estimates,_class):
with torch.no_grad():
loglikelihood_estimate = _loglikelihood_estimates.mean(0)
ELBO = _elbo_estimates.mean()
loglikelihood_estimate_A =_loglikelihood_estimates[~_class].mean(0)
loglikelihood_estimate_B = _loglikelihood_estimates[_class].mean(0)
ELBO_A = _elbo_estimates[~_class].mean()
ELBO_B = _elbo_estimates[_class].mean()
return loglikelihood_estimate.item(),ELBO.item(),loglikelihood_estimate_A.item(),loglikelihood_estimate_B.item(),ELBO_A.item(),ELBO_B.item()
| 2.21875 | 2 |
tests/docs/test_async_constructor.py | adriangb/di | 57 | 12771287 | import pytest
from docs_src.async_constructor import main
@pytest.mark.anyio("asyncio")
async def test_async_constructor() -> None:
await main()
| 1.835938 | 2 |
xyz_to_zmat_orca.py | LIVazquezS/xyz_to_orcaint | 0 | 12771288 | <reponame>LIVazquezS/xyz_to_orcaint
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 19:08:50 2021
@author: <NAME>
"""
import argparse
from openbabel import pybel
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
required.add_argument("-i", "--input", type=str, help="input xyz", required=True)
required.add_argument("-o", "--output", type=str, help="output name", required=True)
args = parser.parse_args()
print('Your input file:', args.input)
print('Your output file:', args.output)
def gen_interm_zmat(infile,outfile):
mol = next(pybel.readfile("xyz", infile))
mol.write("fh",outfile)
def read_zmat(file):
with open(file) as f:
contents = f.read().splitlines()
natom = int(contents[1])
l1 = contents[2].split()
l2 = contents[3].split()
l3 = contents[4].split()
rs = [0.0,l2[2],l3[2]]
angles = [0.0,0.0,l3[4]]
labels = [l1[0],l2[0],l3[0]]
dhs = [0.0,0.0,0.0,0.0]
a1_index = [0,l2[1],l3[1]]
a2_index = [0,0,l3[3]]
a3_index = [0,0,0]
for i in range(0,natom-3):
label, a1, r, a2, a, a3, dh = contents[5+i].split()
labels.append(label)
angles.append(a)
rs.append(r)
dhs.append(dh)
a1_index.append(a1)
a2_index.append(a2)
a3_index.append(a3)
return natom,labels,rs,angles,dhs,a1_index,a2_index,a3_index
def gen_orca_mat(natom,labels,rs,angles,dhs,a1_index,a2_index,a3_index,name):
name = str(name) + '.zmat'
block = []
for k in range(0,natom):
block.append('{:3s} {} {} {} {:15.10f} {:15.10f} {:15.10f}'.format(labels[k],a1_index[k],a2_index[k],a3_index[k],float(rs[k]),float(angles[k]),float(dhs[k])))
with open(name, 'w', newline='') as file:
for item in block:
file.write("%s\n" % item)
def main(infile,ofile):
gen_interm_zmat(infile,'tmp.zmat')
natom,labels,rs,angles,dhs,a1_index,a2_index,a3_index = read_zmat('tmp.zmat')
gen_orca_mat(natom,labels,rs,angles,dhs,a1_index,a2_index,a3_index,ofile)
os.remove('tmp.zmat')
print('Conversion Done, Be happy')
main(args.input,args.output)
| 2.75 | 3 |
nltkma/test/unit/test_json2csv_corpus.py | aydtmiri/nltk-ma | 0 | 12771289 | <gh_stars>0
# Natural Language Toolkit: Twitter client
#
# Copyright (C) 2001-2021 NLTK Project
# Author: <NAME> <<EMAIL>>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Regression tests for `json2csv()` and `json2csv_entities()` in Twitter
package.
"""
from pathlib import Path
import pytest
from nltkma.corpus import twitter_samples
from nltkma.twitter.common import json2csv, json2csv_entities
def files_are_identical(pathA, pathB):
"""
Compare two files, ignoring carriage returns,
leading whitespace, and trailing whitespace
"""
f1 = [l.strip() for l in pathA.read_bytes().splitlines()]
f2 = [l.strip() for l in pathB.read_bytes().splitlines()]
return f1 == f2
subdir = Path(__file__).parent / 'files'
@pytest.fixture
def infile():
with open(twitter_samples.abspath("tweets.20150430-223406.json")) as infile:
return [next(infile) for x in range(100)]
def test_textoutput(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.text.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.text.csv'
json2csv(infile, outfn, ['text'], gzip_compress=False)
assert files_are_identical(outfn, ref_fn)
def test_tweet_metadata(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.tweet.csv.ref'
fields = [
'created_at',
'favorite_count',
'id',
'in_reply_to_status_id',
'in_reply_to_user_id',
'retweet_count',
'retweeted',
'text',
'truncated',
'user.id',
]
outfn = tmp_path / 'tweets.20150430-223406.tweet.csv'
json2csv(infile, outfn, fields, gzip_compress=False)
assert files_are_identical(outfn, ref_fn)
def test_user_metadata(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.user.csv.ref'
fields = ['id', 'text', 'user.id', 'user.followers_count', 'user.friends_count']
outfn = tmp_path / 'tweets.20150430-223406.user.csv'
json2csv(infile, outfn, fields, gzip_compress=False)
assert files_are_identical(outfn, ref_fn)
def test_tweet_hashtag(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.hashtag.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.hashtag.csv'
json2csv_entities(
infile,
outfn,
['id', 'text'],
'hashtags',
['text'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_tweet_usermention(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.usermention.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.usermention.csv'
json2csv_entities(
infile,
outfn,
['id', 'text'],
'user_mentions',
['id', 'screen_name'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_tweet_media(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.media.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.media.csv'
json2csv_entities(
infile,
outfn,
['id'],
'media',
['media_url', 'url'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_tweet_url(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.url.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.url.csv'
json2csv_entities(
infile,
outfn,
['id'],
'urls',
['url', 'expanded_url'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_userurl(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.userurl.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.userurl.csv'
json2csv_entities(
infile,
outfn,
['id', 'screen_name'],
'user.urls',
['url', 'expanded_url'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_tweet_place(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.place.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.place.csv'
json2csv_entities(
infile,
outfn,
['id', 'text'],
'place',
['name', 'country'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_tweet_place_boundingbox(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.placeboundingbox.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.placeboundingbox.csv'
json2csv_entities(
infile,
outfn,
['id', 'name'],
'place.bounding_box',
['coordinates'],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_retweet_original_tweet(tmp_path, infile):
ref_fn = subdir / 'tweets.20150430-223406.retweet.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.retweet.csv'
json2csv_entities(
infile,
outfn,
['id'],
'retweeted_status',
[
'created_at',
'favorite_count',
'id',
'in_reply_to_status_id',
'in_reply_to_user_id',
'retweet_count',
'text',
'truncated',
'user.id',
],
gzip_compress=False,
)
assert files_are_identical(outfn, ref_fn)
def test_file_is_wrong(tmp_path, infile):
"""
Sanity check that file comparison is not giving false positives.
"""
ref_fn = subdir / 'tweets.20150430-223406.retweet.csv.ref'
outfn = tmp_path / 'tweets.20150430-223406.text.csv'
json2csv(infile, outfn, ['text'], gzip_compress=False)
assert not files_are_identical(outfn, ref_fn)
| 2.5 | 2 |
src/data_mgmt/helpers/mqtt_pub.py | ammpio/stromm | 1 | 12771290 | <reponame>ammpio/stromm
import logging
from os import getenv, path
import json
import paho.mqtt.client as mqtt
from typing import Dict, List, Optional
import ssl
logger = logging.getLogger(__name__)
MQTT_CLEAN_SESSION = False
MQTT_QOS = 1
MQTT_RETAIN = False
MQTT_CONN_SUCCESS = 0
MQTT_PUB_SUCCESS = 0
# Attempt to send (including waiting for PUBACK) at most 2 message at a time
MAX_INFLIGHT_MESSAGES = 2
# Only use the internal MQTT queue minimally
# (note that 0 = unlimited queue size, so 1 is the minimum)
MAX_QUEUED_MESSAGES = 2
# Minimum delay before retrying a CONNECT
RECONNECT_MIN_DELAY = 1
# MAximum delay before retrying a CONNECT
RECONNECT_MAX_DELAY = 120
# Time period between retrying a PUBLISH that hasn't been acknowledged
MESSAGE_RETRY = 30
class MQTTPublisher():
def __init__(self, node_id: str, access_key: str, config: Dict, client_id_suffix: Optional[str] = None) -> None:
if client_id_suffix is None:
client_id = node_id
else:
client_id = f'{node_id}-{client_id_suffix}'
client = mqtt.Client(client_id=client_id, clean_session=MQTT_CLEAN_SESSION)
client.enable_logger(logger)
client.tls_set(
ca_certs=path.join(getenv('SNAP', '.'), 'resources', 'certs', config['cert']),
cert_reqs=ssl.CERT_NONE
)
client.username_pw_set(node_id, access_key)
client.max_inflight_messages_set(MAX_INFLIGHT_MESSAGES)
client.max_queued_messages_set(MAX_QUEUED_MESSAGES)
client.reconnect_delay_set(min_delay=RECONNECT_MIN_DELAY, max_delay=RECONNECT_MAX_DELAY)
client.message_retry_set(MESSAGE_RETRY)
client.on_connect = self.__on_connect
client.on_disconnect = self.__on_disconnect
client.connect_async(host=config['host'], port=config['port'])
client.loop_start()
self._client = client
self._host = config['host']
self._node_id = node_id
self._connected = False
def publish(self, payload: Dict, subtopic: str = None) -> None:
if not self._connected:
logger.warning("MQTT client not yet connected; not publishing")
return False
mqtt_topic = self.__get_topic(subtopic)
rc = self._client.publish(
mqtt_topic,
self.__get_mqtt_payload(payload),
qos=MQTT_QOS, retain=MQTT_RETAIN
)
logger.debug(f"PUSH [mqtt] Published with response code: {rc}")
# TODO: Use an onpublish callback to ascertain whether the message
# was actually published, rather than the "fire and forget" approach.
# The latter only results in an error if the MQTT module's internal
# queue is full (this is parameterized above)
if rc[0] == MQTT_PUB_SUCCESS:
logger.debug("PUSH [mqtt] Successfully published")
return True
else:
logger.debug("PUSH [mqtt] Error - Message not published")
return False
def __get_topic(self, subtopic: str) -> str:
mqtt_topic = f"a/{self._node_id}/{subtopic}"
return mqtt_topic
@staticmethod
def __get_mqtt_payload(payload: dict) -> str:
mqtt_payload = json.dumps(payload, separators=(',', ':'))
return mqtt_payload
def __on_connect(self, client: mqtt.Client, userdata, flags, rc: List) -> None:
# Callback for when the client receives a CONNACK response from the server.
if rc == MQTT_CONN_SUCCESS:
logger.info(f"Successfully connected to MQTT host {self._host}")
self._connected = True
else:
logger.error(f"Connection attempt to {self._host} yielded result code {rc}")
def __on_disconnect(self, client: mqtt.Client, userdata, rc: List) -> None:
if rc == MQTT_CONN_SUCCESS:
logger.info(f"Successfully disconnected to MQTT host {self._host}")
else:
logger.error(f"Disconnection from {self._host} with result code {rc}")
self._connected = False
| 2.296875 | 2 |
data_analysis/visualization/eye_image_annotation.py | vedb/data_analysis | 0 | 12771291 | from glob import glob
import cv2
import os
import sys
import yaml
import matplotlib as mpl
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
from ellipses import LSqEllipse # The code is pulled from https://github.com/bdhammel/least-squares-ellipse-fitting
import time
# This annotation script is written by <NAME> and <NAME> inspired by DeepVog repo by <NAME>
def _get_annotation_path_from_image_path(image_file_name, path, eye_part):
# print('get txt file name: ', path + image_file_name + eye_part + '.txt')
return path + image_file_name + eye_part + '.txt'
def fit_pupil(image_path, saving_directory, curr_image_number, plot=False, write_annotation=False, eye_part='pupil'):
# Mouse enumeration for development use only
# Todo: Remove this after we're done with the design
'''
BACK = 8
FORWARD = 9
LEFT = 1
MIDDLE = 2
RIGHT = 3
'''
upper_color = 'purple'
lower_color = 'green'
if 'pupil' in eye_part:
point_color = 'yellow'
fill_color = 'orange'
elif 'iris' in eye_part:
point_color = 'blue'
fill_color = 'cyan'
elif 'upper' in eye_part:
point_color = 'purple'
fill_color = 'grey'
elif 'lower' in eye_part:
point_color = 'green'
fill_color = 'white'
base = os.path.basename(image_path)
image_file_name = os.path.splitext(base)[0]
result = 'success'
while True:
plt.ion()
fig, ax = plt.subplots(figsize=(15, 15))
img = imread(image_path)
ax.set_title('Annotating {} for ID:{}\n File Name:{}'.format(eye_part.replace('_',''),curr_image_number, os.path.basename(image_path)))
ax.imshow(img, cmap='gray')
ax.set_xlim(-20, 420)
ax.set_ylim(-20, 420)
if 'upper' in eye_part or 'lower' in eye_part:
if 'upper' in eye_part:
annotated_text_file_name = _get_annotation_path_from_image_path(image_file_name, saving_directory,
'_lower')
my_color = lower_color
elif 'lower' in eye_part:
annotated_text_file_name = _get_annotation_path_from_image_path(image_file_name, saving_directory,
'_upper')
my_color = upper_color
if os.path.exists(annotated_text_file_name):
with open(annotated_text_file_name.replace(".txt", "_points.txt")) as f:
w, h = [x for x in next(f).split()] # read first line
array = []
for line in f: # read rest of lines
array.append([np.float(x) for x in line.split(',')])
previous_x = [np.float(x[0]) for x in array]
previous_y = [np.float(x[1]) for x in array]
ax.plot(previous_x, previous_y, c=my_color, marker='x')
key_points = plt.ginput(-1, mouse_pop=2, mouse_stop=3,
timeout=-1) # If negative, accumulate clicks until the input is terminated manually.
points_x = [x[0] for x in key_points]
points_y = [x[1] for x in key_points]
if not key_points:
plt.close()
result = 'proceed'
break
if 'pupil' in eye_part or 'iris' in eye_part:
fitted = LSqEllipse()
fitted.fit([points_x, points_y])
center_coord, width, height, angle = fitted.parameters()
axes = np.array([width, height])
angle = np.rad2deg(angle)
elif 'upper' in eye_part or 'lower' in eye_part:
poly = np.poly1d(np.polyfit(points_x, points_y, 4))
print("\npoly calculated:", poly)
print('\n')
if write_annotation:
annotated_text_file_name = _get_annotation_path_from_image_path(image_file_name, saving_directory,
eye_part)
with open(annotated_text_file_name, 'w+') as f:
# if all([c <= 50 for c in center_coord]):
# points_str = '-1:-1'
# else:
if 'pupil' in eye_part or 'iris' in eye_part:
points_str = '{}, {}'.format(center_coord[0], center_coord[1])
f.write(points_str)
elif 'upper' in eye_part or 'lower' in eye_part:
f.write('{}, {}\n'.format(min(points_x), points_y[np.argmin(points_x)]))
print("The left most eyelid point: {:.2f} {:.2f}".format(min(points_x), points_y[np.argmin(points_x)]))
f.write('{}, {}\n'.format(max(points_x), points_y[np.argmax(points_x)]))
print("The right most eyelid point: {:.2f} {:.2f}".format(max(points_x), points_y[np.argmax(points_x)]))
with open(annotated_text_file_name.replace(".txt","_points.txt"), 'w+') as f: # For detecting selected
for point in key_points:
f.write('{}, {}\n'.format(point[0], point[1]))
if plot:
all_x = [x[0] for x in key_points]
all_y = [x[1] for x in key_points]
plt.scatter(x=all_x, y=all_y, c=point_color, marker='x')
if 'pupil' in eye_part or 'iris' in eye_part:
ell = mpl.patches.Ellipse(xy=center_coord, width=axes[0] * 2,
height=axes[1] * 2, angle=angle, fill=True, color=fill_color, alpha=0.4)
ax.add_artist(ell)
elif 'upper' in eye_part or 'lower' in eye_part:
for my_x in np.arange(min(points_x), max(points_x), 1):
my_y = poly(my_x)
plt.plot(my_x, my_y, c=point_color, marker='o')
output_image_file = saving_directory + image_file_name + eye_part + "_ellipse.png"
fig.savefig(output_image_file)
print("saved: ", os.path.basename(output_image_file))
print('\n')
plt.show()
confirmation_point = plt.ginput(1, timeout=-1, mouse_add=3, mouse_stop=3)
plt.close()
if len(confirmation_point) == 0:
break
# Hacky way to read the q press to quit the loop otherwise the QT doesn't let go of the thread
# TODO: gracefully stop the tool
time.sleep(.01)
answer = input("")
print('q pressed!!', answer)
if answer == 'q':
print("\n\nQuiting the Annotation tool!")
result = 'quit'
# plt.ioff()
# plt.close()
# sys.exit(0)
break
return result
def annotate(image_directory, saving_directory, eye_part='pupil'):
images_paths = sorted(glob(os.path.join(image_directory, '*png')))
# imag_paths = sorted(glob(os.path.join(base_dir, '*jpg')))
annotation_paths = glob(os.path.join(saving_directory, '*txt'))
i = 0
for image_path in images_paths:
print("Running Annotation for: {} ID: {}/{}".format(os.path.basename(image_path), i, len(images_paths)))
base = os.path.basename(image_path)
image_file_name = os.path.splitext(base)[0]
annotated_text_file_name = _get_annotation_path_from_image_path(image_file_name, saving_directory, eye_part)
if annotated_text_file_name in annotation_paths:
print("Found the existing txt file for: ", os.path.basename(annotated_text_file_name))
else:
result = fit_pupil(image_path=image_path, saving_directory=saving_directory, curr_image_number=i, plot=True, write_annotation=True, eye_part=eye_part)
if result == 'quit':
print("\n\nQuit!!\n\n")
break
i = i + 1
def parse_pipeline_parameters(parameters_fpath):
param_dict = dict()
with open(parameters_fpath, "r") as stream:
param_dict = yaml.safe_load(stream)
return param_dict
if __name__ == '__main__':
plt = mpl.pyplot
fig = plt.figure()
# mpl.rcParams["savefig.directory"] = os.chdir(
# os.path.dirname('/home/kamran/Downloads/eye_image_annotation_results/'))
# File Path for the yaml file
parameters_fpath = os.getcwd() + "/annotation_parameters.yaml"
param_dict = parse_pipeline_parameters(parameters_fpath)
image_directory = param_dict['directory']['image_directory']
saving_directory = param_dict['directory']['saving_directory']
eye_part = param_dict['annotation']['eye_part']
print(param_dict)
print(eye_part)
if 'pupil' in eye_part:
eye_part = 'pupil'
elif 'iris' in eye_part:
eye_part = 'iris'
elif 'upper' in eye_part:
eye_part = 'upper'
elif 'lower' in eye_part:
eye_part = 'lower'
else:
raise ValueError("Wrong Eye Part for Annotation!!!")
annotate(image_directory=image_directory, saving_directory=saving_directory, eye_part='_' + eye_part)
sys.exit(0)
| 2.46875 | 2 |
invenio_app_ils/literature/covers_builder.py | NRodriguezcuellar/invenio-app-ils | 41 | 12771292 | <reponame>NRodriguezcuellar/invenio-app-ils<gh_stars>10-100
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Document configuration callbacks."""
from flask import url_for
def build_ils_demo_cover_urls(metadata):
"""Build working ulrs for demo data."""
cover_metadata = metadata.get("cover_metadata", {})
isbn = cover_metadata.get("ISBN", "")
if isbn:
return build_openlibrary_urls(isbn)
return build_placeholder_urls()
def build_openlibrary_urls(isbn):
"""Build Open Library urls."""
url = "https://covers.openlibrary.org/b/isbn"
return {
"is_placeholder": False,
"small": "{url}/{isbn}-S.jpg".format(url=url, isbn=isbn),
"medium": "{url}/{isbn}-M.jpg".format(url=url, isbn=isbn),
"large": "{url}/{isbn}-L.jpg".format(url=url, isbn=isbn),
}
def build_placeholder_urls():
"""Build urls for default cover placeholders."""
image_path = url_for(
"invenio_app_ils.static",
filename="images/placeholder.png",
_external=True,
)
return {
"is_placeholder": True,
"small": image_path,
"medium": image_path,
"large": image_path,
}
| 1.835938 | 2 |
test/unittests/test_AnimalN.py | mudkipmaster/gwlf-e | 0 | 12771293 | import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.Output.AvAnimalNSum import AnimalN
class TestAnimalN(VariableUnitTest):
def test_AnimalN(self):
z = self.z
np.testing.assert_array_almost_equal(
AnimalN.AnimalN_f(z.NYrs, z.NGPctManApp, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt, z.AnimalDailyN,
z.NGAppNRate, z.Prec, z.DaysMonth,
z.NGPctSoilIncRate, z.GRPctManApp, z.GRAppNRate, z.GRPctSoilIncRate, z.NGBarnNRate,
z.AWMSNgPct, z.NgAWMSCoeffN,
z.RunContPct, z.RunConCoeffN, z.PctGrazing, z.GRBarnNRate, z.AWMSGrPct, z.GrAWMSCoeffN,
z.PctStreams, z.GrazingNRate),
AnimalN.AnimalN(z.NYrs, z.NGPctManApp, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt, z.AnimalDailyN,
z.NGAppNRate, z.Prec, z.DaysMonth,
z.NGPctSoilIncRate, z.GRPctManApp, z.GRAppNRate, z.GRPctSoilIncRate, z.NGBarnNRate,
z.AWMSNgPct, z.NgAWMSCoeffN,
z.RunContPct, z.RunConCoeffN, z.PctGrazing, z.GRBarnNRate, z.AWMSGrPct, z.GrAWMSCoeffN,
z.PctStreams, z.GrazingNRate), decimal=7)
| 2.21875 | 2 |
doc/examples/2_seismics/plot_04_koenigsee.py | baender/gimli | 0 | 12771294 | <filename>doc/examples/2_seismics/plot_04_koenigsee.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. _ex:koenigsee:
Field data inversion ("Koenigsee")
==================================
This minimalistic example shows how the Refraction Manager can be used to invert
a field data set. Here, we consider the Koenigsee data set, which represents
classical refraction seismics data set with slightly heterogeneous overburden
and some high-velocity bedrock. The data file can be found in the `pyGIMLi
example data repository
<https://github.com/gimli-org/example-data/blob/master/traveltime/koenigsee.sgt>`_.
"""
# sphinx_gallery_thumbnail_number = 2
################################################################################
# We import pyGIMLi and the refraction manager.
import pygimli as pg
from pygimli.physics import TravelTimeManager
################################################################################
# The helper function `pg.getExampleFile` downloads the data set and saves it
# into a temporary location.
data = pg.getExampleFile("traveltime/koenigsee.sgt", load=True, verbose=True)
################################################################################
# We initialize the refraction manager.
mgr = TravelTimeManager()
################################################################################
# Let's have a look at the data in the form of traveltime curves and apparent
# velocity images.
mgr.showData(data) # show first arrivals as curves (done later with response)
#TODO mgr.showVA(data) # show data as apparent velocity image
################################################################################
# Finally, we call the `invert` method and plot the result.The mesh is created
# based on the sensor positions on-the-fly. Yes, it is really as simple as that.
mgr.invert(data, secNodes=3, paraMaxCellSize=5.0,
zWeight=0.2, vTop=500, vBottom=5000,
verbose=1)
ax, cbar = mgr.showResult()
mgr.showRayPaths(ax=ax, color="w", lw=0.3, alpha=0.5)
################################################################################
# Show result and fit of measured data and model response.
mgr.showResultAndFit()
################################################################################
# You can play around with the gradient starting model (`vtop` and `vbottom`
# arguments) and the regularization strength `lam`. You can also customize the
# mesh.
| 2.59375 | 3 |
TomographyRNJGeneral.py | GKovanis/Thesis-Project | 0 | 12771295 | import numpy as np
from math import log10
from math import sqrt
import time
import networkx as nx
import matplotlib.pyplot as plt
import pydot
import csv
class Graph(object):
def __init__(self):
self.root = None #root/source node is the start of the graph/tree and multicast source
self.nodes = []
self.leaves = []
class Node(object):
def __init__(self):
self.father = None #node's parent
self.id = None #ID of node
self.data = [] # tcpdump of the multicast packets of the end-nodes (and only end-nodes, internal nodes do not have data)
self.children = []
#In Packet Loss, we only care about the packet ID
def GetLinkLossDumps():
#Tcpdumps of source node stored at root.data
tcpfile = [line.rstrip('\n') for line in open('dumps/n1.txt')] #opens the tcpdump as a list of strings, we suppose that source connects only to one router to the rest of tree
for line in range(len(tcpfile)):
if "tos 0x7" in tcpfile[line]: # tos 0x7 is the characteristic I chose to distinguish my UDP packets used for tomography
temp = tcpfile[line].split()
graph.root.data.append(int(temp[7].replace(",",""))) #We keep only the packet ID
#tcpdump of every leave/destination node stored at node.data
for i in range(len(graph.leaves)):
filename = "dumps/n%d.txt" % (graph.leaves[i].id) #example tcpdump file path "thesisdumps/1/n1" if node 1 is a leaf
tcpfile = [line.rstrip('\n') for line in open(filename)]
for line in range(len(tcpfile)):
if "tos 0x7" in tcpfile[line]: # tos 0x7 is the characteristic I chose to distinguish my UDP packets used for tomography
temp = tcpfile[line].split()
graph.leaves[i].data.append(int(temp[7].replace(",",""))) #We keep only the packet ID
#In Link Delay and Utilization, we need both the packet ID and the timestamp of the packet
def GetLinkDelayDumps():
#Tcpdumps of source node stored at root.data
tcpfile = [line.rstrip('\n') for line in open('dumps/n1.txt')] #opens the tcpdump as a list of strings, we suppose that source connects only to one router to the rest of tree
for line in range(len(tcpfile)):
if "tos 0x7" in tcpfile[line]: # tos 0x7 is the characteristic I chose to distinguish my UDP packets used for tomography
temp = tcpfile[line].split()
graph.root.data.append([temp[0], int(temp[7].replace(",",""))]) #We keep the timestamp and the packet ID
#tcpdump of every leave/destination node stored at node.data
for i in range(len(graph.leaves)):
filename = "dumps/n%d.txt" % (graph.leaves[i].id) #example tcpdump file path "thesisdumps/1/n1" if node 1 is a leaf
tcpfile = [line.rstrip('\n') for line in open(filename)]
for line in range(len(tcpfile)):
if "tos 0x7" in tcpfile[line]: # tos 0x7 is the characteristic I chose to distinguish my UDP packets used for tomography
temp = tcpfile[line].split()
graph.leaves[i].data.append([temp[0], int(temp[7].replace(",",""))]) #We keep the timestamp and the packet ID
for node in range(len(graph.leaves)):
TimestampsIntoDelay(graph.root.data,graph.leaves[node].data,node) #we need to turn each timestamp into path delay for each packet
#root's delay is 0 in all packets (starting point)
for k in range(len(graph.root.data)):
graph.root.data[k][0] = float(0)
#Function that measures path Delay from a timestamp, in our algorithm turns every initial leaf's timestamps into delays (difference between start and finish)
def TimestampsIntoDelay(dump1,dump2,node):
startingpackets=len(dump1) #tcpdump of start node
endingpackets = len(dump2) #tcpdump of end node
for packet in range(endingpackets):
i = 0 # if we are sure that the packets will arive in order, i = packet for faster runtime
#find packets with same ID
while (dump1[i][1] != dump2[packet][1]):
i += 1
#measure delay for each packet
#seconds difference
timestamp1 = dump1[i][0]
timestamp2 = dump2[packet][0]
secondsdiff = (int(timestamp2[0:2])*3600+int(timestamp2[3:5])*60+int(timestamp2[6:8]))-(int(timestamp1[0:2])*3600+int(timestamp1[3:5])*60+int(timestamp1[6:8]))
#fractions of second
fraction1 = float("0"+timestamp1[8:15])
fraction2 = float("0"+timestamp2[8:15])
#delay
packetdelay=float("{0:.10f}".format(float(secondsdiff)+fraction2-fraction1))
graph.leaves[node].data[packet][0] = packetdelay #change timestamp with delay
# Function that estimates the distances based on the link loss parameter
def EstimateDistancesLoss():
# At this point, graph.nodes = U (= source + destination nodes)
NumberOfNodes = len(graph.nodes)
# Matrix is symmetric -> We only need to traverse through upper triangular and then complete the symmetrical elements
# Also, diagonal of the Matrix will be zero (by definition d(i,i) == 0)
for i in range(NumberOfNodes):
Xi = len(graph.nodes[i].data)/TotalProbes
for j in range(i+1,NumberOfNodes):
# How the distance metric is calculated can be seen in the provided documentation
Xj = len(graph.nodes[j].data)/TotalProbes
XiXj = len(set(graph.nodes[i].data)&set(graph.nodes[j].data))/TotalProbes
distance = log10(Xi*Xj/XiXj**2)
#Symmetric matrix
EstDistMatrix[graph.nodes[i].id][graph.nodes[j].id] = distance
EstDistMatrix[graph.nodes[j].id][graph.nodes[i].id] = distance
# Function that estimates the distances based on the link delay variance parameter
def EstimateDistancesDelayVar():
# At this point, graph.nodes = U (= source + destination nodes)
NumberOfNodes = len(graph.nodes)
# Matrix is symmetric -> We only need to traverse through upper triangular and then complete the symmetrical elements
# Also, diagonal of the Matrix will be zero (by definition d(i,i) == 0)
for i in range(NumberOfNodes):
meanTi = sum([graph.nodes[i].data[k][0] for k in range(len(graph.nodes[i].data))])/len(graph.nodes[i].data)
for j in range(i+1,NumberOfNodes):
# How the distance metric is calculated can be seen in the provided documentation
meanTj = sum([graph.nodes[j].data[k][0] for k in range(len(graph.nodes[j].data))])/len(graph.nodes[j].data)
# Compute the variances
varTi = (sum([(graph.nodes[i].data[k][0]-meanTi)**2 for k in range(len(graph.nodes[i].data))]))/(len(graph.nodes[i].data)-1)
varTj = (sum([(graph.nodes[j].data[k][0]-meanTj)**2 for k in range(len(graph.nodes[j].data))]))/(len(graph.nodes[j].data)-1)
# Find Common ID between the 2 nodes' packets
CommonIDs = []
for k1 in range(len(graph.nodes[i].data)):
for k2 in range(len(graph.nodes[j].data)):
if (graph.nodes[i].data[k1][1] == graph.nodes[j].data[k2][1]):
CommonIDs.append(graph.nodes[i].data[k1][1])
# Compute the covariance
covTiTj = Covariance(i,j,CommonIDs,meanTi,meanTj)
distance = varTi + varTj - 2*covTiTj
# Symmetric matrix
EstDistMatrix[graph.nodes[i].id][graph.nodes[j].id] = distance
EstDistMatrix[graph.nodes[j].id][graph.nodes[i].id] = distance
"""
# Function that estimates the distances based on the link utilization parameter
def EstimateDistancesUtil():
# At this point, graph.nodes = U (= source + destination nodes)
NumberOfNodes = len(graph.nodes)
# Epsilon is a small value to acount for possible measurement noise, defined by user
epsilon = 0.00001
# Matrix is symmetric -> We only need to traverse through upper triangular and then complete the symmetrical elements
# Also, diagonal of the Matrix will be zero (by definition d(i,i) == 0)
for i in range(NumberOfNodes):
minTi = min([graph.nodes[i].data[k][0] for k in range(len(graph.nodes[i].data))])
YiPackets = [graph.nodes[i].data[k][1] for k in range(len(graph.nodes[i].data)) if (graph.nodes[i].data[k][0]-minTi <= epsilon)]
Yi = len(YiPackets)/TotalProbes
for j in range(i+1,NumberOfNodes):
# How the distance metric is calculated can be seen in the provided documentation
minTj = min([graph.nodes[j].data[k][0] for k in range(len(graph.nodes[j].data))])
YjPackets = [graph.nodes[j].data[k][1] for k in range(len(graph.nodes[j].data)) if (graph.nodes[j].data[k][0]-minTj <= epsilon)]
Yj = len(YjPackets)/TotalProbes
YiYj = len(set(YiPackets)&set(YjPackets))/TotalProbes
distance = log10(Yi*Yj/YiYj**2)
# Symmetric matrix
EstDistMatrix[graph.nodes[i].id][graph.nodes[j].id] = distance
EstDistMatrix[graph.nodes[j].id][graph.nodes[i].id] = distance
"""
# Function that computes the covariance of nodes i,j
def Covariance(i,j,CommonIDs,meanTi,meanTj):
#Initiliazations
covar = 0
pos1 = 0
pos2 = 0
length1 = len(graph.nodes[i].data)
length2 = len(graph.nodes[j].data)
for packetID in CommonIDs:
#find position of packetID in node i
for k1 in range(pos1,length1):
if (graph.nodes[i].data[k1][1] == packetID):
pos1=k1
break
#find position of packetID in node j
for k2 in range(pos2,length2):
if (graph.nodes[j].data[k2][1] == packetID):
pos2=k2
break
covar += (graph.nodes[i].data[pos1][0]-meanTi)*(graph.nodes[j].data[pos2][0]-meanTj)
covar = covar/(len(CommonIDs)-1)
return covar
def EstimateScoreFunction():
# At this point, graph.leaves = D (= destination nodes)
NumberOfLeaves = len(graph.leaves)
# Matrix is symmetric -> We only need to traverse through upper triangular and then complete the symmetrical elements
# Also, diagonal of the Matrix will be equal to zero (we need pair of nodes)
for i in range(NumberOfLeaves):
for j in range(i+1,NumberOfLeaves):
# Score Function is calulated like this:
# ρ(i,j) = (d(s,i)+d(s,j)-d(i,j))/2
score = (EstDistMatrix[0][graph.leaves[i].id] + EstDistMatrix[0][graph.leaves[j].id] - EstDistMatrix[graph.leaves[i].id][graph.leaves[j].id])/2
#Symmetric matrix
ScoreFunction[graph.leaves[i].id][graph.leaves[j].id] = score
ScoreFunction[graph.leaves[j].id][graph.leaves[i].id] = score
# Function that calculates Δ(delta) so that the General Tree algorithm can be properly implemented
def CalculateDelta():
if (param == 'loss'):
successrate = 0.9995 #should be equal to the minimum link length in terms of loss, changes based on each topology
delta = -log10(successrate)
elif (param == 'delayvar'):
delta = 0.000001 #should be equal to the minimum link length in terms of delay variance, changes based on each topology
else:
pass #Link Utilization not measured in our algorithm
return delta
# Function that visualizes the discovered topology/tree in a .png file
def DrawTopology(param):
#Create Graph
G = pydot.Dot(graph_type='graph') #G =nx.Graph()
for i in range(len(graph.nodes)-1,-1,-1):
for j in range(len(graph.nodes[i].children)):
edge = pydot.Edge(graph.nodes[i].id,graph.nodes[i].children[j].id)
G.add_edge(edge)
#Draw Graph with desired Parameters
G.write_png('Results/'+param+'.png')
# Function that writes the results for each inference parameter in a .csv file
def ExtractResults(param):
# Success/Loss Rate of each Link
if (param == 'loss'):
with open('Results/Loss.csv', 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Link', 'Success Rate'])
for i in range(1,len(graph.nodes)):
SuccessRate = EstDistMatrix[graph.nodes[i].father.id][graph.nodes[i].id]
SuccessRate = 10**(-SuccessRate)
filewriter.writerow([graph.nodes[i].id,SuccessRate])
# Delay Variance of each Link
elif (param == 'delayvar'):
with open('Results/DelayVariance.csv', 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Link', 'Delay Variance'])
for i in range(1,len(graph.nodes)):
#LinkDelayVar = sqrt(EstDistMatrix[graph.nodes[i].father.id][graph.nodes[i].id]) ###If I want the Standard Deviation instead of Variance
LinkDelayVar = EstDistMatrix[graph.nodes[i].father.id][graph.nodes[i].id]
filewriter.writerow([graph.nodes[i].id,LinkDelayVar])
# Utilization of each LinkUtil
else:
"""
with open('Results/Utilization.csv', 'w') as csvfile:
filewriter = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
filewriter.writerow(['Link', 'Utilization'])
for i in range(1,len(graph.nodes)):
LinkUtil = EstDistMatrix[graph.nodes[i].father.id][graph.nodes[i].id]
LinkUtil = 10**(-LinkUtil)
filewriter.writerow([graph.nodes[i].id,LinkUtil])
"""
pass
### Start of Script ###
# input: Destination Nodes' IDs (Leaves) are given in the DstNodes.txt file
# Create a list with all the Destination Nodes=
DstNodes = [line.rstrip('\n').split(' ') for line in open('DstNodes.txt')]
DstNodes = list(map(int,DstNodes[0]))
# All the inference parameters we want to measure
inferparams = ['loss','delayvar','utilization']
# Perform the algorithm for each inference parameter in the inferparams list
for param in inferparams:
# Initial Graph Creation
# V = {s} : only source node initially on graph
# E = { } : no edges created initially
graph = Graph()
#creation of source node
node = Node()
node.id = 0 #node ID of root is 0
graph.root = node
graph.nodes.append(graph.root)
# Destination Nodes and Graph leaves are the same
# So we create the graph leaves (without any edges yet) to be able to extract the tcpdumps correctly
for i in range(len(DstNodes)):
node = Node()
node.id = DstNodes[i]
graph.nodes.append(node)
graph.leaves.append(node)
######### Algorithm: Rooted Neighbor-Joining (RNJ) Algorithm for Binary Trees #########
#We don't know number of nodes, so we start giving ID numbers to new nodes, starting from max ID of the existing Destination nodes
FreeID = max(DstNodes) + 1
#Get the tcpdumps for the root node and the leaves
if (param == 'loss'):
GetLinkLossDumps()
elif (param == 'delayvar'):
GetLinkDelayDumps()
else:
break #delete if you want to measure link utilization too
pass #GetLinkDelayDumps() used also for utilization
#Total Probes are equal to the probes sent from the source
TotalProbes = len(graph.root.data)
# Estimated Distance Matrix, default size = up to 200 nodes topology
# Holds the distance metric values for each path,
# (i,j) element -> Distance metric of path from node i to node j, d(i,j)
EstDistMatrix = np.zeros((200,200),dtype='f')
#Create the Estimate Distances Matrix
if (param == 'loss'):
EstimateDistancesLoss()
elif(param == 'delayvar'):
EstimateDistancesDelayVar()
else:
pass #EstimateDistancesUtil() used
# Step 1
# Score Function matrix, default size = up to 200 nodes topology (keep same with Estimated Distance matrix)
# Hold the score function for each pair of nodes i,j
# (i,j) element -> distance metric for pair of nodes i,j, ρ(i,j)
ScoreFunction = np.zeros((200,200),dtype='f')
EstimateScoreFunction()
# necessary to start the algorithm correctly, normally we shouldn't append destination nodes upon creation but it helped the tcpdumps function
graph.nodes = []
graph.nodes.append(graph.root)
# Step 2.1
while (len(graph.leaves) != 1):
# Find i*,j* in D with the largest ScoreFunction (tie is broken arbitrarily as we only take the first occurence)
NumberOfLeaves = len(graph.leaves)
# max initialization
maxScore = 0
Istar=Jstar = 0
# find the max score
for i in range(NumberOfLeaves):
for j in range(i+1,NumberOfLeaves):
if (ScoreFunction[graph.leaves[i].id][graph.leaves[j].id] >= maxScore):
maxScore = ScoreFunction[graph.leaves[i].id][graph.leaves[j].id]
Istar = graph.leaves[i].id
Jstar = graph.leaves[j].id
#Create a node f as parent of i* and j*
FatherNode = Node()
FatherNode.id = FreeID
FreeID += 1
# D = D \ {i*,j*}
# V = V U {i*,j*} , E = E U {(f,i*),(f,j*)}
for i in range(len(graph.leaves)): # for i*
if (graph.leaves[i].id == Istar):
graph.nodes.append(graph.leaves[i]) # V = V U {i*}
graph.leaves[i].father = FatherNode # E U {(f,i*)}
FatherNode.children.append(graph.leaves[i]) # E U {(f,i*)}
del graph.leaves[i] # D = D \ {i*}
break
for i in range(len(graph.leaves)): # for j*
if (graph.leaves[i].id == Jstar):
graph.nodes.append(graph.leaves[i]) # V = V U {j*}
graph.leaves[i].father = FatherNode # E U {(f,j*)}
FatherNode.children.append(graph.leaves[i]) # E U {(f,i*)}
del graph.leaves[i] # D = D \ {j*}
break
# Step 2.2
# d(s,f) = ρ(i*,j*)
EstDistMatrix[0][FatherNode.id] = ScoreFunction[Istar][Jstar]
EstDistMatrix[FatherNode.id][0] = EstDistMatrix[0][FatherNode.id] # SYMMETRY
# d(f,i*) = d(s,i*) - ρ(i*,j*)
EstDistMatrix[FatherNode.id][Istar] = EstDistMatrix[0][Istar] - ScoreFunction[Istar][Jstar]
EstDistMatrix[Istar][FatherNode.id] = EstDistMatrix[FatherNode.id][Istar] # SYMMETRY
# d(f,j*) = d(s,j*) - ρ(i*,j*)
EstDistMatrix[FatherNode.id][Jstar] = EstDistMatrix[0][Jstar] - ScoreFunction[Istar][Jstar]
EstDistMatrix[Jstar][FatherNode.id] = EstDistMatrix[FatherNode.id][Jstar] # SYMMETRY
# Step 2.3
# In this step we find if there are more than 2 siblings (if Istar,Jstar nodes have another sibling)
#Calculate Δ based on the link parameter that is inferred
delta = CalculateDelta()
# For every k in D such that ρ(i*,j*) - ρ(i*,k) <= Δ/2:
LeavesToDel = []
for k in range (len(graph.leaves)):
SiblID = graph.leaves[k].id # SiblID = node k ID
if (ScoreFunction[Istar][Jstar] - ScoreFunction[Istar][SiblID] <= delta/2):
# d(f,k) = d(s,k) - ρ(i*,j*)
EstDistMatrix[FatherNode.id][SiblID] = EstDistMatrix[0][SiblID] - ScoreFunction[Istar][Jstar]
EstDistMatrix[SiblID][FatherNode.id] = EstDistMatrix[FatherNode.id][SiblID] #SYMMETRY
# D = D \ {k}
# V = V U {k} , E = E U {(f,k)}
graph.nodes.append(graph.leaves[k]) # V = V U {k}
graph.leaves[k].father = FatherNode # E U {(f,k)}
FatherNode.children.append(graph.leaves[k]) # E U {(f,k)}
LeavesToDel.append(k) # D = D \ {k}, store the values to del together
#create a temporary list that will have all the graph.leaves nodes besides those that we want to delete from step 2.3
temp = []
for i in range(len(graph.leaves)):
if i not in LeavesToDel:
temp.append(graph.leaves[i])
graph.leaves = temp
# Step 2.4
for k in range(len(graph.leaves)):
# d(k,f) = 1/2[d(k,i*)-d(f,i*)] + 1/2[d(k,j*)-d(f,j*)]
EstDistMatrix[graph.leaves[k].id][FatherNode.id] = 0.5*(EstDistMatrix[graph.leaves[k].id][Istar]-EstDistMatrix[FatherNode.id][Istar]) + 0.5*(EstDistMatrix[graph.leaves[k].id][Jstar]-EstDistMatrix[FatherNode.id][Jstar])
EstDistMatrix[FatherNode.id][graph.leaves[k].id] = EstDistMatrix[graph.leaves[k].id][FatherNode.id] #SYMMETRY
# ρ(k,f) = 1/2[ρ(k,i*)+ρ(k,j*)]
ScoreFunction[graph.leaves[k].id][FatherNode.id] = 0.5*(ScoreFunction[graph.leaves[k].id][Istar]+ScoreFunction[graph.leaves[k].id][Jstar])
ScoreFunction[FatherNode.id][graph.leaves[k].id] = ScoreFunction[graph.leaves[k].id][FatherNode.id] # SYMMETRY
# D = D U f
graph.leaves.append(FatherNode)
# If |D| = 1, for the i in D: V = V U {i} , E = E U (s,i)
graph.nodes.append(graph.leaves[0])
graph.leaves[0].father = graph.root
graph.root.children = [graph.leaves[0]]
# Draw the Topology produced by Tomography
# variable "param" is used to draw the topology based on the specific inference parameter each time
DrawTopology(param)
# Write the results for each inference parameter performed in a csv file
ExtractResults(param)
| 2.78125 | 3 |
tests/test_ytu.py | yaph/ytu | 0 | 12771296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ytu
def test_is_youtube():
tests = [
('http://youtu.be/zoLVUxKCWhY', True),
('http://www.youtube.com/watch?v=VvRC0wxM-yM', True),
('http://wwwwwwyoutube.com/watch?v=VvRC0wxM-yM', False),
('http://example.com/zoLVUxKCWhY', False)
]
for t in tests:
assert ytu.is_youtube(t[0]) is t[1]
def test_video_id():
# All these URLs occurred in the reddit submission corpus.
tests = [
('http://youtu.be/zoLVUxKCWhY', 'zoLVUxKCWhY'),
('http://www.youtube.com/watch?v=VvRC0wxM-yM', 'VvRC0wxM-yM'),
('http://www.youtube.com/watch?v=thsc60UTUIE&feature=youtu.be', 'thsc60UTUIE'),
('http://www.youtube.com/watch?v=a3asbkY0tTE?', 'a3asbkY0tTE'),
('http://www.youtube.com/watch?v=oHg5SJYRHA0???', 'oHg5SJYRHA0'),
('http://www.youtube.com/watch?v=55jUNNPT1eMads/4/NaQOUKyR9CY', '55jUNNPT1eM'),
('https://www.youtube.com/verify_age?next_url=http%3A//www.youtube.com/watch%3Fv%3DGqj1N9qeWXI%26feature%3Dmfu_in_order%26list%3DUL', 'Gqj1N9qeWXI'),
('https://www.youtube.com//watch?v=PQGrIsYUm4c', 'PQGrIsYUm4c'), # 2 leading slashes in path
('https://www.youtube.com/v/j4FNGsNY3nI&amp;rel=0&amp;egm=0&amp;showinfo=0&amp;fs=1', 'j4FNGsNY3nI'),
('https://www.youtube.com/embed/mGnyH-SCZpM?autoplay=1&hd=1&KeepThis=true&TB_iframe=true&height=370&width=640?autoplay=1&hd=1', 'mGnyH-SCZpM'),
('https://www.youtube.com/verify_age?&next_url=/watch%3Fv%3DsTPsFIsxM3w', 'sTPsFIsxM3w'),
('https://www.youtube.com/attribution_link?a=qbb_5VvcvY8&u=%2Fwatch%3Fv%3DFgFeVlw2Ywg%26feature%3Dshare', 'FgFeVlw2Ywg'),
('https://www.youtube.com/attribution_link?a=ar77oUQIEOcNs-Wdao4XJw&u=%2Fwatch%3Fv%3D0eXS1NI6Q6Y%26feature%3Dshare', '0eXS1NI6Q6Y'),
('https://www.youtube.com/?v=_RSaYVgd7yk', None),
('https://www.youtube.com/watch?v=U3M8pXZusQ', None)
]
for t in tests:
assert ytu.video_id(t[0]) == t[1]
| 2.625 | 3 |
alipay/aop/api/response/AlipayOpenServicemarketCommodityQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12771297 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenServicemarketCommodityQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenServicemarketCommodityQueryResponse, self).__init__()
self._app_hot_logo = None
self._audit_memo = None
self._authorization_file = None
self._biz_type_code = None
self._category_code = None
self._category_id = None
self._commodity_affiliation = None
self._commodity_id = None
self._contactor = None
self._create_date = None
self._log_url = None
self._mobile_visiturl = None
self._name = None
self._phone = None
self._status = None
self._sub_status = None
self._subtitle = None
self._test_detail = None
self._test_report = None
self._title = None
self._user_guide = None
self._user_id = None
@property
def app_hot_logo(self):
return self._app_hot_logo
@app_hot_logo.setter
def app_hot_logo(self, value):
self._app_hot_logo = value
@property
def audit_memo(self):
return self._audit_memo
@audit_memo.setter
def audit_memo(self, value):
self._audit_memo = value
@property
def authorization_file(self):
return self._authorization_file
@authorization_file.setter
def authorization_file(self, value):
self._authorization_file = value
@property
def biz_type_code(self):
return self._biz_type_code
@biz_type_code.setter
def biz_type_code(self, value):
self._biz_type_code = value
@property
def category_code(self):
return self._category_code
@category_code.setter
def category_code(self, value):
self._category_code = value
@property
def category_id(self):
return self._category_id
@category_id.setter
def category_id(self, value):
self._category_id = value
@property
def commodity_affiliation(self):
return self._commodity_affiliation
@commodity_affiliation.setter
def commodity_affiliation(self, value):
self._commodity_affiliation = value
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def contactor(self):
return self._contactor
@contactor.setter
def contactor(self, value):
self._contactor = value
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def log_url(self):
return self._log_url
@log_url.setter
def log_url(self, value):
self._log_url = value
@property
def mobile_visiturl(self):
return self._mobile_visiturl
@mobile_visiturl.setter
def mobile_visiturl(self, value):
self._mobile_visiturl = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
self._phone = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def sub_status(self):
return self._sub_status
@sub_status.setter
def sub_status(self, value):
self._sub_status = value
@property
def subtitle(self):
return self._subtitle
@subtitle.setter
def subtitle(self, value):
self._subtitle = value
@property
def test_detail(self):
return self._test_detail
@test_detail.setter
def test_detail(self, value):
self._test_detail = value
@property
def test_report(self):
return self._test_report
@test_report.setter
def test_report(self, value):
self._test_report = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def user_guide(self):
return self._user_guide
@user_guide.setter
def user_guide(self, value):
self._user_guide = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def parse_response_content(self, response_content):
response = super(AlipayOpenServicemarketCommodityQueryResponse, self).parse_response_content(response_content)
if 'app_hot_logo' in response:
self.app_hot_logo = response['app_hot_logo']
if 'audit_memo' in response:
self.audit_memo = response['audit_memo']
if 'authorization_file' in response:
self.authorization_file = response['authorization_file']
if 'biz_type_code' in response:
self.biz_type_code = response['biz_type_code']
if 'category_code' in response:
self.category_code = response['category_code']
if 'category_id' in response:
self.category_id = response['category_id']
if 'commodity_affiliation' in response:
self.commodity_affiliation = response['commodity_affiliation']
if 'commodity_id' in response:
self.commodity_id = response['commodity_id']
if 'contactor' in response:
self.contactor = response['contactor']
if 'create_date' in response:
self.create_date = response['create_date']
if 'log_url' in response:
self.log_url = response['log_url']
if 'mobile_visiturl' in response:
self.mobile_visiturl = response['mobile_visiturl']
if 'name' in response:
self.name = response['name']
if 'phone' in response:
self.phone = response['phone']
if 'status' in response:
self.status = response['status']
if 'sub_status' in response:
self.sub_status = response['sub_status']
if 'subtitle' in response:
self.subtitle = response['subtitle']
if 'test_detail' in response:
self.test_detail = response['test_detail']
if 'test_report' in response:
self.test_report = response['test_report']
if 'title' in response:
self.title = response['title']
if 'user_guide' in response:
self.user_guide = response['user_guide']
if 'user_id' in response:
self.user_id = response['user_id']
| 1.859375 | 2 |
layerserver/migrations/0019_geojsonlayer_permissions.py | aroiginfraplan/giscube-admin | 5 | 12771298 | <filename>layerserver/migrations/0019_geojsonlayer_permissions.py
# Generated by Django 2.2.11 on 2020-05-06 05:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0011_update_proxy_permissions'),
('giscube', '0016_datasetmetadata'),
('layerserver', '0018_auto_20200318_0727'),
]
operations = [
migrations.CreateModel(
name='GeoJsonLayerUserPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('can_view', models.BooleanField(default=True, verbose_name='Can view')),
('layer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_permissions', to='layerserver.GeoJsonLayer')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
),
migrations.CreateModel(
name='GeoJsonLayerGroupPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('can_view', models.BooleanField(default=True, verbose_name='Can view')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group', verbose_name='Group')),
('layer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='group_permissions', to='layerserver.GeoJsonLayer')),
],
options={
'verbose_name': 'Group',
'verbose_name_plural': 'Groups',
},
),
migrations.AddField(
model_name='geojsonlayer',
name='anonymous_view',
field=models.BooleanField(default=False, verbose_name='Can view'),
),
]
| 1.742188 | 2 |
controllers/mobile.py | whanderley/eden | 27 | 12771299 | <filename>controllers/mobile.py
# -*- coding: utf-8 -*-
"""
Mobile Forms - Controllers
"""
module = request.controller
# -----------------------------------------------------------------------------
def forms():
"""
Controller to download a list of available forms
"""
if request.env.request_method == "GET":
if auth.permission.format == "json":
if settings.get_mobile_masterkey_filter():
# Filter form list by master key
masterkey_id = 0 # filtering is mandatory
# Expect the client to send a master key UUID in GET vars
masterkey_uid = request.get_vars.get("mkuid")
if masterkey_uid:
table = s3db.auth_masterkey
query = (table.uuid == masterkey_uid)
masterkey = db(query).select(table.id,
limitby = (0, 1),
).first()
if masterkey:
masterkey_id = masterkey.id
# Alternatively, allow the client to authenticate with
# the expected master key
elif auth.s3_logged_in() and auth.user and auth.user.masterkey_id:
masterkey_id = auth.user.masterkey_id
else:
# Do not filter the form list by master key
masterkey_id = None
response.headers["Content-Type"] = "application/json"
return s3base.S3MobileFormList(masterkey_id=masterkey_id).json()
else:
error(415, "Invalid request format")
else:
error(405, "Unsupported request method")
# -----------------------------------------------------------------------------
def error(status, message):
"""
Raise HTTP error status in non-interactive controllers
@param status: the HTTP status code
@param message: the error message
"""
headers = {"Content-Type":"text/plain"}
current.log.error(message)
raise HTTP(status, body=message, web2py_error=message, **headers)
# END =========================================================================
| 2.390625 | 2 |
mobile_insight/monitor/dm_collector/__init__.py | unknownhandX/mobileinsight-core | 0 | 12771300 | # -*- coding: utf-8 -*-
__all__ = [
"DMCollector", # P4A: THIS LINE WILL BE DELETED ###
"FormatError",
"DMLogPacket",
"dm_collector_c"
]
from dm_collector import DMCollector # P4A: THIS LINE WILL BE DELETED ###
from dm_endec import DMLogPacket, FormatError
import dm_collector_c
| 1.523438 | 2 |
test/unit/test_adapter.py | NielsZeilemaker/dbt-spark | 0 | 12771301 | import mock
import unittest
import dbt.adapters
import dbt.flags as flags
from pyhive import hive
from dbt.adapters.spark import SparkAdapter
import agate
from .utils import config_from_parts_or_dicts, inject_adapter
class TestSparkAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = True
self.project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': False,
}
}
def get_target_http(self, project):
return config_from_parts_or_dicts(project, {
'outputs': {
'test': {
'type': 'spark',
'method': 'http',
'schema': 'analytics',
'host': 'myorg.sparkhost.com',
'port': 443,
'token': '<PASSWORD>',
'cluster': '01234-23423-coffeetime',
}
},
'target': 'test'
})
def get_target_thrift(self, project):
return config_from_parts_or_dicts(project, {
'outputs': {
'test': {
'type': 'spark',
'method': 'thrift',
'schema': 'analytics',
'host': 'myorg.sparkhost.com',
'port': 10001,
'user': 'dbt'
}
},
'target': 'test'
})
def test_http_connection(self):
config = self.get_target_http(self.project_cfg)
adapter = SparkAdapter(config)
def hive_http_connect(thrift_transport):
self.assertEqual(thrift_transport.scheme, 'https')
self.assertEqual(thrift_transport.port, 443)
self.assertEqual(thrift_transport.host, 'myorg.sparkhost.com')
self.assertEqual(thrift_transport.path, '/sql/protocolv1/o/0/01234-23423-coffeetime')
with mock.patch.object(hive, 'connect', new=hive_http_connect):
connection = adapter.acquire_connection('dummy')
self.assertEqual(connection.state, 'open')
self.assertNotEqual(connection.handle, None)
def test_thrift_connection(self):
config = self.get_target_thrift(self.project_cfg)
adapter = SparkAdapter(config)
def hive_thrift_connect(host, port, username):
self.assertEqual(host, 'myorg.sparkhost.com')
self.assertEqual(port, 10001)
self.assertEqual(username, 'dbt')
with mock.patch.object(hive, 'connect', new=hive_thrift_connect):
connection = adapter.acquire_connection('dummy')
self.assertEqual(connection.state, 'open')
self.assertNotEqual(connection.handle, None)
| 2.1875 | 2 |
sieve_common/k8s_event.py | OmerKahani/sieve | 0 | 12771302 | <filename>sieve_common/k8s_event.py
import json
from typing import Dict, List, Optional, Set, Tuple, Union
from sieve_common.event_delta import diff_event, conflicting_event_payload
from sieve_common.default_config import sieve_config
from controllers import deployment_name
HEAR_READ_FILTER_FLAG = True
ERROR_MSG_FILTER_FLAG = True
# flags for time travel only
DELETE_ONLY_FILTER_FLAG = True
DELETE_THEN_RECREATE_FLAG = True
# flags for obs gap only
CANCELLABLE_FLAG = True
# flags for atom vio only
READ_BEFORE_WRITE_FLAG = True
ALLOWED_ERROR_TYPE = ["NoError"]
SIEVE_BEFORE_HEAR_MARK = "[SIEVE-BEFORE-HEAR]"
SIEVE_AFTER_HEAR_MARK = "[SIEVE-AFTER-HEAR]"
SIEVE_BEFORE_WRITE_MARK = "[SIEVE-BEFORE-WRITE]"
SIEVE_AFTER_WRITE_MARK = "[SIEVE-AFTER-WRITE]"
SIEVE_AFTER_READ_MARK = "[SIEVE-AFTER-READ]"
SIEVE_BEFORE_RECONCILE_MARK = "[SIEVE-BEFORE-RECONCILE]"
SIEVE_AFTER_RECONCILE_MARK = "[SIEVE-AFTER-RECONCILE]"
SIEVE_API_EVENT_MARK = "[SIEVE-API-EVENT]"
EVENT_NONE_TYPE = "NONE_TYPE"
class APIEventTypes:
ADDED = "ADDED"
MODIFIED = "MODIFIED"
DELETED = "DELETED"
class OperatorHearTypes:
ADDED = "Added"
UPDATED = "Updated"
DELETED = "Deleted"
REPLACED = "Replaced" # Replaced is emitted when we encountered watch errors and had to do a relist
SYNC = "Sync" # Sync is for synthetic events during a periodic resync
class OperatorWriteTypes:
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
DELETEALLOF = "DeleteAllOf"
PATCH = "Patch"
STATUS_UPDATE = "StatusUpdate"
STATUS_PATCH = "StatusPatch"
# We do not include Sync and Replaced here
detectable_operator_hear_types = [
OperatorHearTypes.ADDED,
OperatorHearTypes.UPDATED,
OperatorHearTypes.DELETED,
]
detectable_operator_write_types = [
OperatorWriteTypes.CREATE,
OperatorWriteTypes.UPDATE,
OperatorWriteTypes.DELETE,
OperatorWriteTypes.PATCH,
OperatorWriteTypes.STATUS_UPDATE,
OperatorWriteTypes.STATUS_PATCH,
]
def consistent_event_type(operator_hear_type: str, operator_write_type: str):
both_create = (
operator_hear_type == OperatorHearTypes.ADDED
and operator_write_type == OperatorWriteTypes.CREATE
)
both_update = operator_hear_type == OperatorHearTypes.UPDATED and (
operator_write_type == OperatorWriteTypes.UPDATE
or operator_write_type == OperatorWriteTypes.PATCH
or operator_write_type == OperatorWriteTypes.STATUS_UPDATE
or operator_write_type == OperatorWriteTypes.STATUS_PATCH
)
both_delete = (
operator_hear_type == OperatorHearTypes.DELETED
and operator_write_type == OperatorWriteTypes.DELETE
)
return both_create or both_update or both_delete
def conflicting_event_type(prev_operator_hear_type: str, cur_operator_hear_type: str):
other_then_delete = (
prev_operator_hear_type != OperatorHearTypes.DELETED
and cur_operator_hear_type == OperatorHearTypes.DELETED
)
delete_then_other = (
prev_operator_hear_type == OperatorHearTypes.DELETED
and cur_operator_hear_type != OperatorHearTypes.DELETED
)
return other_then_delete or delete_then_other
def extract_uid(obj: Dict):
assert "metadata" in obj, "missing metadata in: " + str(obj)
obj_uid = obj["metadata"]["uid"] if "uid" in obj["metadata"] else None
return obj_uid
def extract_namespace_name(obj: Dict):
assert "metadata" in obj, "missing metadata in: " + str(obj)
# TODO(Wenqing): Sometimes metadata doesn't carry namespace field, may dig into that later
obj_name = obj["metadata"]["name"]
obj_namespace = (
obj["metadata"]["namespace"]
if "namespace" in obj["metadata"]
else sieve_config["namespace"]
)
return obj_namespace, obj_name
def extract_generate_name(obj: Dict):
obj_uid = None
if "metadata" in obj:
obj_uid = (
obj["metadata"]["generateName"]
if "generateName" in obj["metadata"]
else None
)
else:
obj_uid = obj["generateName"] if "generateName" in obj else None
return obj_uid
def operator_related_resource(
project: str, rtype: str, name: str, obj: Dict, taint_list: List[Tuple[str, str]]
):
depl_name = deployment_name[project]
if depl_name in name:
return True
obj_metadata = obj
if "metadata" in obj:
obj_metadata = obj["metadata"]
if "ownerReferences" in obj_metadata:
for owner in obj_metadata["ownerReferences"]:
# if owner["kind"].lower() == "deployment" and owner["name"] == depl_name:
# return True
for taint in taint_list:
if owner["kind"].lower() == taint[0] and owner["name"] == taint[1]:
return True
return False
def is_generated_random_name(name: str, generate_name: str):
return name.startswith(generate_name) and len(name) == len(generate_name) + 5
def generate_key(resource_type: str, namespace: str, name: str):
return "/".join([resource_type, namespace, name])
def api_key_to_rtype_namespace_name(api_key):
tokens = api_key.split("/")
assert len(tokens) >= 4
namespace = tokens[-2]
name = tokens[-1]
if tokens[-4] == "services" and tokens[-3] == "endpoints":
rtype = "endpoints"
elif tokens[-4] == "services" and tokens[-3] == "specs":
rtype = "service"
elif tokens[-3].endswith("s"):
rtype = tokens[-3][:-1]
else:
rtype = tokens[-3]
return rtype, namespace, name
class APIEvent:
def __init__(self, etype: str, key: str, obj_str: str):
self.__etype = etype
self.__key = key
assert key.startswith("/")
self.__rtype, self.__namespace, self.__name = api_key_to_rtype_namespace_name(
key
)
self.__obj_str = obj_str
self.__obj_map = json.loads(obj_str)
@property
def etype(self):
return self.__etype
@property
def key(self):
return self.__key
@property
def rtype(self):
return self.__rtype
@property
def namespace(self):
return self.__namespace
@property
def name(self):
return self.__name
@property
def obj_str(self):
return self.__obj_str
@property
def obj_map(self):
return self.__obj_map
class OperatorHear:
def __init__(self, id: str, etype: str, rtype: str, obj_str: str):
self.__id = int(id)
self.__etype = etype
self.__rtype = rtype
self.__obj_str = obj_str
self.__obj_map = json.loads(obj_str)
self.__namespace, self.__name = extract_namespace_name(self.obj_map)
self.__start_timestamp = -1
self.__end_timestamp = -1
self.__key = generate_key(self.rtype, self.namespace, self.name)
self.__slim_prev_obj_map = None
self.__slim_cur_obj_map = None
self.__prev_etype = EVENT_NONE_TYPE
self.__cancelled_by = set()
self.__signature_counter = 1
@property
def id(self):
return self.__id
@property
def etype(self):
return self.__etype
@property
def rtype(self):
return self.__rtype
@property
def obj_str(self):
return self.__obj_str
@property
def obj_map(self):
return self.__obj_map
@property
def namespace(self):
return self.__namespace
@property
def name(self):
return self.__name
@property
def start_timestamp(self):
return self.__start_timestamp
@property
def end_timestamp(self):
return self.__end_timestamp
@property
def key(self):
return self.__key
@property
def slim_prev_obj_map(self):
return self.__slim_prev_obj_map
@property
def slim_cur_obj_map(self):
return self.__slim_cur_obj_map
@property
def prev_etype(self):
return self.__prev_etype
@property
def cancelled_by(self):
return self.__cancelled_by
@property
def signature_counter(self):
return self.__signature_counter
@start_timestamp.setter
def start_timestamp(self, start_timestamp: int):
self.__start_timestamp = start_timestamp
@end_timestamp.setter
def end_timestamp(self, end_timestamp: int):
self.__end_timestamp = end_timestamp
@slim_prev_obj_map.setter
def slim_prev_obj_map(self, slim_prev_obj_map: Dict):
self.__slim_prev_obj_map = slim_prev_obj_map
@slim_cur_obj_map.setter
def slim_cur_obj_map(self, slim_cur_obj_map: Dict):
self.__slim_cur_obj_map = slim_cur_obj_map
@prev_etype.setter
def prev_etype(self, prev_etype: str):
self.__prev_etype = prev_etype
@cancelled_by.setter
def cancelled_by(self, cancelled_by: Set):
self.__cancelled_by = cancelled_by
@signature_counter.setter
def signature_counter(self, signature_counter: int):
self.__signature_counter = signature_counter
class OperatorWrite:
def __init__(self, id: str, etype: str, rtype: str, error: str, obj_str: str):
self.__id = int(id)
# do not handle DELETEALLOF for now
assert etype != OperatorWriteTypes.DELETEALLOF
self.__etype = etype
self.__rtype = rtype
self.__error = error
self.__obj_str = obj_str
self.__obj_map = json.loads(obj_str)
self.__namespace, self.__name = extract_namespace_name(self.obj_map)
self.__start_timestamp = -1
self.__end_timestamp = -1
self.__range_start_timestamp = -1
self.__range_end_timestamp = -1
self.__read_types = set()
self.__read_keys = set()
self.__owner_controllers = set()
self.__key = generate_key(self.rtype, self.namespace, self.name)
self.__slim_prev_obj_map = None
self.__slim_cur_obj_map = None
self.__prev_etype = EVENT_NONE_TYPE
self.__signature_counter = 1
@property
def id(self):
return self.__id
@property
def etype(self):
return self.__etype
@property
def rtype(self):
return self.__rtype
@property
def error(self):
return self.__error
@property
def obj_str(self):
return self.__obj_str
@property
def obj_map(self):
return self.__obj_map
@property
def namespace(self):
return self.__namespace
@property
def name(self):
return self.__name
@property
def read_types(self):
return self.__read_types
@property
def read_keys(self):
return self.__read_keys
@property
def start_timestamp(self):
return self.__start_timestamp
@property
def end_timestamp(self):
return self.__end_timestamp
@property
def range_start_timestamp(self):
return self.__range_start_timestamp
@property
def range_end_timestamp(self):
return self.__range_end_timestamp
@property
def owner_controllers(self):
return self.__owner_controllers
@property
def key(self):
return self.__key
@property
def slim_prev_obj_map(self):
return self.__slim_prev_obj_map
@property
def slim_cur_obj_map(self):
return self.__slim_cur_obj_map
@property
def prev_etype(self):
return self.__prev_etype
@property
def signature_counter(self):
return self.__signature_counter
@start_timestamp.setter
def start_timestamp(self, start_timestamp: int):
self.__start_timestamp = start_timestamp
@end_timestamp.setter
def end_timestamp(self, end_timestamp: int):
self.__end_timestamp = end_timestamp
@read_types.setter
def read_types(self, read_types: Set[str]):
self.__read_types = read_types
@read_keys.setter
def read_keys(self, read_keys: Set[str]):
self.__read_keys = read_keys
@slim_prev_obj_map.setter
def slim_prev_obj_map(self, slim_prev_obj_map: Dict):
self.__slim_prev_obj_map = slim_prev_obj_map
@slim_cur_obj_map.setter
def slim_cur_obj_map(self, slim_cur_obj_map: Dict):
self.__slim_cur_obj_map = slim_cur_obj_map
@prev_etype.setter
def prev_etype(self, prev_etype: str):
self.__prev_etype = prev_etype
@signature_counter.setter
def signature_counter(self, signature_counter: int):
self.__signature_counter = signature_counter
def set_range(self, start_timestamp: int, end_timestamp: int):
assert start_timestamp < end_timestamp
self.__range_start_timestamp = start_timestamp
self.__range_end_timestamp = end_timestamp
class OperatorRead:
def __init__(
self,
etype: str,
rtype: str,
namespace: str,
name: str,
error: str,
obj_str: str,
):
self.__etype = etype
self.__rtype = rtype
self.__error = error
self.__key_to_obj = {}
self.__key_set = set()
self.__end_timestamp = -1
if etype == "Get":
key = generate_key(self.rtype, namespace, name)
self.key_set.add(key)
self.key_to_obj[key] = json.loads(obj_str)
else:
objs = json.loads(obj_str)["items"]
for obj in objs:
key = generate_key(
self.rtype, obj["metadata"]["namespace"], obj["metadata"]["name"]
)
assert key not in self.key_set
assert key not in self.key_to_obj
self.key_set.add(key)
self.key_to_obj[key] = obj
@property
def etype(self):
return self.__etype
@property
def rtype(self):
return self.__rtype
@property
def error(self):
return self.__error
@property
def key_set(self):
return self.__key_set
@property
def key_to_obj(self):
return self.__key_to_obj
@property
def end_timestamp(self):
return self.__end_timestamp
@end_timestamp.setter
def end_timestamp(self, end_timestamp: int):
self.__end_timestamp = end_timestamp
class OperatorHearIDOnly:
def __init__(self, id: str):
self.__id = int(id)
@property
def id(self):
return self.__id
class OperatorWriteIDOnly:
def __init__(self, id: str):
self.__id = int(id)
@property
def id(self):
return self.__id
class ReconcileBegin:
def __init__(self, controller_name: str, round_id: str):
self.__controller_name = controller_name
self.__round_id = round_id
self.__end_timestamp = -1
@property
def controller_name(self):
return self.__controller_name
@property
def round_id(self):
return self.__round_id
@property
def end_timestamp(self):
return self.__end_timestamp
@end_timestamp.setter
def end_timestamp(self, end_timestamp: int):
self.__end_timestamp = end_timestamp
class ReconcileEnd:
def __init__(self, controller_name: str, round_id: str):
self.__controller_name = controller_name
self.__round_id = round_id
self.__end_timestamp = -1
@property
def controller_name(self):
return self.__controller_name
@property
def round_id(self):
return self.__round_id
@property
def end_timestamp(self):
return self.__end_timestamp
@end_timestamp.setter
def end_timestamp(self, end_timestamp: int):
self.__end_timestamp = end_timestamp
def parse_operator_hear(line: str) -> OperatorHear:
assert SIEVE_BEFORE_HEAR_MARK in line
tokens = line[line.find(SIEVE_BEFORE_HEAR_MARK) :].strip("\n").split("\t")
return OperatorHear(tokens[1], tokens[2], tokens[3], tokens[4])
def parse_operator_write(line: str) -> OperatorWrite:
assert SIEVE_AFTER_WRITE_MARK in line
tokens = line[line.find(SIEVE_AFTER_WRITE_MARK) :].strip("\n").split("\t")
return OperatorWrite(tokens[1], tokens[2], tokens[3], tokens[4], tokens[5])
def parse_operator_read(line: str) -> OperatorRead:
assert SIEVE_AFTER_READ_MARK in line
tokens = line[line.find(SIEVE_AFTER_READ_MARK) :].strip("\n").split("\t")
if tokens[1] == "Get":
return OperatorRead(
tokens[1], tokens[2], tokens[3], tokens[4], tokens[5], tokens[6]
)
else:
# When using List, the resource type is like xxxlist so we need to trim the last four characters here
assert tokens[2].endswith("list")
return OperatorRead(tokens[1], tokens[2][:-4], "", "", tokens[3], tokens[4])
def parse_operator_hear_id_only(line: str) -> OperatorHearIDOnly:
assert SIEVE_AFTER_HEAR_MARK in line or SIEVE_BEFORE_HEAR_MARK in line
if SIEVE_AFTER_HEAR_MARK in line:
tokens = line[line.find(SIEVE_AFTER_HEAR_MARK) :].strip("\n").split("\t")
return OperatorHearIDOnly(tokens[1])
else:
tokens = line[line.find(SIEVE_BEFORE_HEAR_MARK) :].strip("\n").split("\t")
return OperatorHearIDOnly(tokens[1])
def parse_operator_write_id_only(line: str) -> OperatorWriteIDOnly:
assert SIEVE_AFTER_WRITE_MARK in line or SIEVE_BEFORE_WRITE_MARK in line
if SIEVE_AFTER_WRITE_MARK in line:
tokens = line[line.find(SIEVE_AFTER_WRITE_MARK) :].strip("\n").split("\t")
return OperatorWriteIDOnly(tokens[1])
else:
tokens = line[line.find(SIEVE_BEFORE_WRITE_MARK) :].strip("\n").split("\t")
return OperatorWriteIDOnly(tokens[1])
def parse_reconcile(line: str) -> Union[ReconcileBegin, ReconcileEnd]:
assert SIEVE_BEFORE_RECONCILE_MARK in line or SIEVE_AFTER_RECONCILE_MARK in line
if SIEVE_BEFORE_RECONCILE_MARK in line:
tokens = line[line.find(SIEVE_BEFORE_RECONCILE_MARK) :].strip("\n").split("\t")
return ReconcileBegin(tokens[1], tokens[2])
else:
tokens = line[line.find(SIEVE_AFTER_RECONCILE_MARK) :].strip("\n").split("\t")
return ReconcileEnd(tokens[1], tokens[2])
def parse_api_event(line: str) -> APIEvent:
assert SIEVE_API_EVENT_MARK in line
tokens = line[line.find(SIEVE_API_EVENT_MARK) :].strip("\n").split("\t")
return APIEvent(tokens[1], tokens[2], tokens[3])
def conflicting_event(
prev_operator_hear: OperatorHear,
cur_operator_hear: OperatorHear,
masked_keys: Set[str],
masked_paths: Set[str],
) -> bool:
if conflicting_event_type(prev_operator_hear.etype, cur_operator_hear.etype):
return True
elif (
prev_operator_hear.etype != OperatorHearTypes.DELETED
and cur_operator_hear.etype != OperatorHearTypes.DELETED
and conflicting_event_payload(
prev_operator_hear.slim_cur_obj_map,
cur_operator_hear.obj_map,
masked_keys,
masked_paths,
)
):
return True
return False
def is_creation_or_deletion(etype: str):
is_hear_creation_or_deletion = (
etype == OperatorHearTypes.ADDED or etype == OperatorHearTypes.DELETED
)
is_write_creation_or_deletion = (
etype == OperatorWriteTypes.CREATE or etype == OperatorWriteTypes.DELETE
)
return is_hear_creation_or_deletion or is_write_creation_or_deletion
def get_event_signature(event: Union[OperatorHear, OperatorWrite]):
assert isinstance(event, OperatorHear) or isinstance(event, OperatorWrite)
signature = (
event.etype
if is_creation_or_deletion(event.etype)
else "\t".join(
[
event.etype,
json.dumps(event.slim_prev_obj_map, sort_keys=True),
json.dumps(event.slim_cur_obj_map, sort_keys=True),
]
)
)
return signature
| 1.867188 | 2 |
config/style_transfer_config.py | baishalidutta/Neural-Style | 1 | 12771303 | <reponame>baishalidutta/Neural-Style<filename>config/style_transfer_config.py
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2021 <NAME>"
__license__ = "Apache License 2.0"
__version__ = "0.1"
# import the necessary packages
import os
# define the content layer from which feature maps will be extracted
contentLayers = ["block4_conv2"]
# define the list of style layer blocks from our pre-trained CNN
styleLayers = [
"block1_conv1",
"block2_conv1",
"block3_conv1",
"block4_conv1",
"block5_conv1"
]
# define the style weight, content weight, and total-variational
# loss weight, respectively (these are the values you'll want to
# tune to generate new style transfers)
styleWeight = 1.0
contentWeight = 1e4
tvWeight = 20.0
# define the number of epochs to train for along with the steps
# per each epoch
epochs = 15
stepsPerEpoch = 100
# define the path to the input content image, input style image,
# final output image, and path to the directory that will store
# the intermediate outptus
contentImage = os.path.sep.join(["inputs", "jp.jpg"])
styleImage = os.path.sep.join(["inputs", "mcescher.jpg"])
finalImage = "final.png"
intermOutputs = "intermediate_outputs"
| 2.296875 | 2 |
example/example/schema.py | andrenerd/django-multiform-authentication | 7 | 12771304 | from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
ShemaView = get_schema_view(
openapi.Info(
title='Multauth Example API',
default_version='v1',
description='Authentication flow: email, password and passcode (using Google Authenticator or similar app)',
# terms_of_service="https://www.google.com/policies/terms/",
# contact=openapi.Contact(email="<EMAIL>"),
# license=openapi.License(name="BSD License"),
),
#validators=['flex', 'ssv'],
public=True,
permission_classes=(permissions.AllowAny,),
)
| 1.929688 | 2 |
hardhat/recipes/socat.py | stangelandcl/hardhat | 0 | 12771305 | from .base import GnuRecipe
class SocatRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(SocatRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'ce3efc17e3e544876ebce7cd6c85b3c2' \
'79fda057b2857fcaaf67b9ab8bdaf034'
self.name = 'socat'
self.version = '1.7.3.2'
self.version_regex = r'(?P<version>\d+\.\d+\.\d+)'
self.depends = ['autotools', 'openssl']
self.url = 'http://www.dest-unreach.org/socat/download/' \
'socat-$version.tar.gz'
| 2.078125 | 2 |
app/views.py | Lumiahna/guildbit | 51 | 12771306 | <reponame>Lumiahna/guildbit
import re
from flask import render_template, request, redirect, session, url_for, g, flash, json
from flask_login import login_user, logout_user, current_user
import settings
from app import app, db, lm, oid, cache, babel
from app.controllers.home import HomeView
from app.controllers.server import ServerView
from app.controllers.admin import AdminView, AdminServersView, AdminPortsView, AdminHostsView, AdminFeedbackView
from app.controllers.admin import AdminTokensView, AdminToolsView, AdminUsersView, AdminPackagesView, AdminBansView
from app.controllers.payment import PaymentView
from app.forms import LoginForm
from app.models import User, Notice, ROLE_USER
from app.util import get_steam_userinfo
## Flask-babel localization
@babel.localeselector
def get_locale():
language = request.cookies.get('language')
if language:
return language
return request.accept_languages.best_match(settings.LANGUAGES.keys())
## Flask-Login required user loaders
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
## Request processing
@app.before_request
def before_request():
g.user = current_user # Required for flask-login
## Context processors
@app.context_processor
@cache.cached(timeout=100, key_prefix='display_notice')
def display_notice():
"""
Context processor for displaying a notice (if enabled) on the base template header area
"""
notice = Notice.query.get(1) # First entry is the base header notice
return dict(notice=notice)
## Login/Logout views
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'], ask_for_optional=['fullname'])
return render_template('auth/login.html',
title='Sign In',
form=form,
providers=settings.OPENID_PROVIDERS)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
@oid.after_login
def after_login(resp):
_steam_id_re = re.compile('steamcommunity.com/openid/id/(.*?)$')
match = _steam_id_re.search(resp.identity_url)
g.user = User.get_or_create(match.group(1))
steam_data = get_steam_userinfo(g.user.steam_id)
g.user.nickname = steam_data['personaname']
db.session.commit()
session['user_id'] = g.user.id
flash('You are logged in as %s' % g.user.nickname)
return redirect(oid.get_next_url())
## Error views
@app.errorhandler(404)
def page_not_found(error):
return render_template('error_pages/404.html'), 404
@app.errorhandler(500)
def page_not_found(error):
return render_template('error_pages/500.html'), 500
## Register flask-classy views
HomeView.register(app, route_base='/')
ServerView.register(app)
PaymentView.register(app)
AdminView.register(app)
AdminServersView.register(app, route_prefix='/admin/', route_base='/servers')
AdminPortsView.register(app, route_prefix='/admin/', route_base='/ports')
AdminUsersView.register(app, route_prefix='/admin/', route_base='/users')
AdminHostsView.register(app, route_prefix='/admin/', route_base='/hosts')
AdminToolsView.register(app, route_prefix='/admin/', route_base='/tools')
AdminFeedbackView.register(app, route_prefix='/admin/', route_base='/feedback')
AdminTokensView.register(app, route_prefix='/admin/', route_base='/tokens')
AdminPackagesView.register(app, route_prefix='/admin/', route_base='/packages')
AdminBansView.register(app, route_prefix='/admin/', route_base='/bans')
| 2.0625 | 2 |
xjson/__init__.py | mikegribov/filedjson | 0 | 12771307 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 00:22:32 2020
@author: <NAME>
"""
import os
import copy
from typing import Union, Any
try:
import simplejson as json
except ImportError:
import json
from .plugins.base_file import BaseFilePlugin, _info
from .exceptions.file_exceptions import FileNotFoundException
from .options import Options
from .xnodes import XNode, XDict, XList, create_xnode
from .file_list import FileList
# plugins
from .plugins.plugin_json import PluginJson
from .plugins.plugin_xjson import PluginXJson
from .plugins.plugin_text import PluginText
from .plugins.plugin_csv import PluginCsv
from .plugins.plugin_yaml import PluginYaml
from .plugins.plugin_xml import PluginXml
# /plugins
_index, _aliases, _required_plugins, default_exts \
= 'index', '_aliases', {'PluginJson', 'PluginXJson', 'PluginText', 'PluginCsv', 'PluginXml', 'PluginYaml'}, ['json', 'xjson', 'xml']
class XJson:
def __init__(self, name: str = '', **options) -> None:
self._options = Options(options)
self.structure = XDict(owner=self) # result structure
self._load_plugins()
self.file_list = FileList()
if name > '':
self._scan(name)
def _load_plugins(self):
self.plugins = {}
list = _required_plugins
try:
list.update(set(self.options.plugins))
except KeyError:
pass
for name in list:
cl = globals().get(name, None)
if cl is not None:
self.plugins[name] = cl
def _scan(self, name: str) -> None:
''' Scan the directory or file to form common structure'''
file_name = name
exts = [''] + ['.' + val for val in default_exts]
for ext in exts:
if os.path.exists(file_name + ext):
self.structure = self._node_from_file(file_name + ext)
break
def _get_index_file(self, path):
"""find index file with extension priority from default_exts"""
_fn = os.path.join(path, "index")
for ext in default_exts:
fn = _fn + "." + ext
if os.path.exists(fn):
return fn
def _node_from_file(self, file_name: str) -> XNode:
"""Create nmode from file """
file = self.file_list.get(file_name)
if file.is_file:
node = self._apply_plugins(file_name)
else:
index_fn = self._get_index_file(file_name)
index_file = self.file_list.get(index_fn)
if index_file is None:
node = XDict(owner=self, _file=file)
else:
node = self._apply_plugins(index_fn)
files = os.listdir(file_name)
for fn in files:
#if fn == _index.split(".")[:-1]:
if index_file is not None and fn == index_file.name:
continue
(name, ext) = os.path.splitext(fn)
if name not in node:
node[name] = XDict(owner=self, _file=file)
value = self._node_from_file(os.path.join(file_name, fn))
if isinstance(value, XDict):
node[name].update(value)
elif isinstance(value, XList):
node[name].append(value)
else:
node[name] = value
return node
def _apply_plugins(self, file_name: str) -> XNode:
'''Apply plugins to the file file_name and create & return node'''
for name in self.plugins:
Plugin = self.plugins[name]
plugin = Plugin(file_name)
if plugin.check():
return plugin.get()
return XDict(self)
def __str__(self):
return self.dump(self.structure)
def clear(self):
self.structure = {}
def refresh(self, name = '') -> None:
self.clear()
self._scan(name)
def alias(self, name: str):
self.structure.alias(name)
def get_root_value(self, name) -> str:
""" return root value, only str or int (return str from int)"""
result = ''
if name in self.structure:
val = self.structure[name]
if isinstance(val, str):
result = val
elif isinstance(val, int):
result = str(val)
return result
@property
def options(self) -> Options:
return self._options
def _dump_val(self, node, key='', short=True, indent='', exclude_info=True):
return "{}{}{}\n".format(indent, key + (": " if key else ""), node)
def _dump_arr(self, node: XList, key='', short=True, indent='', exclude_info=True):
result = ''
n = 0
for value in node:
value = self.dump(value, key="#" + str(n), short=short, indent=indent + ". ", exclude_info=exclude_info)
result += value
n += 1
result = '{0}{1}{2}'.format(indent, (key + ": \n" if key else ""), result)
return result
def _dump_obj(self, node: XDict, key='', short=True, indent='', exclude_info=True):
result = ''
for name in node:
if exclude_info and name == _info:
continue
value = node[name]
result += self.dump(value, key=name, short=short, indent=indent + ". ", exclude_info=exclude_info)
result = '{}{}{}'.format(indent, (key + ": \n" if key else ""), result)
return result
def dump(self, node, key='', short=True, indent='', exclude_info = True):
result = ''
#if node is None:
# node = self.structure #TODO: зацикливается, если None внутри, надо обработать этот вариант по-другому
if isinstance(node, XList):
result = self._dump_arr(node, key=key, short=short, indent=indent)
elif isinstance(node, XDict):
result = self._dump_obj(node, key=key, short=short, indent=indent)
else:
result = self._dump_val(node, key=key, short=short, indent=indent)
return result
def _copy_node(self, node: Union[dict, list] = None, exclude_info = False):
if isinstance(node, dict): # for DICT
result = {}
for name in node:
if exclude_info and name == _info:
continue
value = node[name]
if isinstance(value, dict) or isinstance(value, list):
value = self._copy_node(value, exclude_info)
result[name] = value
else: # for LIST
result = []
for value in node:
if isinstance(value, dict) or isinstance(value, list):
value = self._copy_node(value, exclude_info)
result.append(value)
return result
def copy_from(self, src):
self._options = src.options
self._load_plugins()
self.structure = src._copy_node(src.structure, False)
return self
def from_dict(self, data: dict):
self.structure = copy.deepcopy(data)
return self
def to_dict(self, exclude_info = True):
return self._copy_node(self.structure, exclude_info)
| 2.109375 | 2 |
Dynamic Programming/152. Maximum Product Subarray.py | Wolemercy/leetcode | 0 | 12771308 | <gh_stars>0
# https://leetcode.com/problems/maximum-product-subarray/
class Solution:
def maxProduct(self, nums: List[int]) -> int:
res = nums[0]
maxNum, minNum = 1, 1
for num in nums:
tempMax = num * maxNum
tempMin = num * minNum
maxNum = max(num, tempMax, tempMin)
minNum = min(num, tempMax, tempMin)
res = max(res, maxNum)
return res
| 3.25 | 3 |
recipes/summary/driver_trips.py | svetasmirnova/mysqlcookbook | 1 | 12771309 | #!/usr/bin/python3
# driver_trips.py: summarize miles per driver and show a list for each
# driver of the trips they took.
# Two approaches are demonstrated:
# - Two queries. First query retrieves the summary values, second the
# list entries. Print the list entries, preceding the list for each
# driver with the corresponding summary information.
# - Single query to retrieve the list entries. Iterate through the list
# once to compute the summary values, and a second time to print the
# summary and list information.
import mysql.connector
import cookbook
try:
conn = cookbook.connect()
print("Summary, method 1:")
#@ _TWO_QUERY_
# select total miles per driver and construct a dictionary that
# maps each driver name to days on the road and miles driven
name_map = {}
cursor = conn.cursor()
cursor.execute('''
SELECT name, COUNT(name), SUM(miles)
FROM driver_log GROUP BY name
''')
for (name, days, miles) in cursor:
name_map[name] = (days, miles)
# select trips for each driver and print the report, displaying the
# summary entry for each driver prior to the list of trips
cursor.execute('''
SELECT name, trav_date, miles
FROM driver_log ORDER BY name, trav_date
''')
cur_name = ""
for (name, trav_date, miles) in cursor:
if cur_name != name: # new driver; print driver's summary info
print("Name: %s; days on road: %d; miles driven: %d" %
(name, name_map[name][0], name_map[name][1]))
cur_name = name
print(" date: %s, trip length: %d" % (trav_date, miles))
cursor.close()
#@ _TWO_QUERY_
print("")
print("Summary, method 2:")
#@ _ONE_QUERY_
# get list of trips for the drivers
cursor = conn.cursor()
cursor.execute('''
SELECT name, trav_date, miles FROM driver_log
ORDER BY name, trav_date
''')
# fetch rows into data structure because we
# must iterate through them multiple times
rows = cursor.fetchall()
cursor.close()
# iterate through rows once to construct a dictionary that
# maps each driver name to days on the road and miles driven
# (the dictionary entries are lists rather than tuples because
# we need mutable values that can be modified in the loop)
name_map = {}
for (name, trav_date, miles) in rows:
if name not in name_map: # initialize entry if nonexistent
name_map[name] = [0, 0]
name_map[name][0] += 1 # count days
name_map[name][1] += miles # sum miles
# iterate through rows again to print the report, displaying the
# summary entry for each driver prior to the list of trips
cur_name = ""
for (name, trav_date, miles) in rows:
if cur_name != name: # new driver; print driver's summary info
print("Name: %s; days on road: %d; miles driven: %d" %
(name, name_map[name][0], name_map[name][1]))
cur_name = name
print(" date: %s, trip length: %d" % (trav_date, miles))
#@ _ONE_QUERY_
except mysql.connector.Error as e:
print("Error: %s" % e)
else:
conn.close()
| 3.984375 | 4 |
backend/reports/migrations/0001_initial.py | nikhilkutinha/athena | 1 | 12771310 | # Generated by Django 3.1.6 on 2021-02-12 07:40
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('iso', models.CharField(blank=True, default=None, max_length=2)),
('flag', models.FileField(upload_to='flags', validators=[django.core.validators.FileExtensionValidator(['svg', 'png'])])),
('longitude', models.DecimalField(blank=True, decimal_places=8, max_digits=16, null=True)),
('latitude', models.DecimalField(blank=True, decimal_places=8, max_digits=16, null=True)),
('type', models.CharField(choices=[('CTY', 'Country'), ('PRV', 'Province')], max_length=3)),
('population', models.PositiveIntegerField(blank=True, null=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reports.region')),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('confirmed', models.PositiveIntegerField(blank=True, null=True)),
('active', models.PositiveIntegerField(blank=True, null=True)),
('deaths', models.PositiveIntegerField(blank=True, null=True)),
('recovered', models.PositiveIntegerField(blank=True, null=True)),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reports', to='reports.region')),
],
),
]
| 1.71875 | 2 |
Data Structures/Arrays/Reverse.py | d3xt3r0/Data-Structures-And-Algorithms | 4 | 12771311 | # Program to reverse an array
def reverseArray(arr : list) :
for i in range(len(arr) // 2) :
arr[i], arr[len(arr)-1-i] = arr[len(arr)-1-i], arr[i]
if __name__ == "__main__":
arr = [1,2,3,4,5,6,7]
reverseArray(arr)
print(arr)
| 4.40625 | 4 |
app/core/filtro.py | taniodev/chat-serv | 0 | 12771312 | from unicodedata import normalize
def normalizar(texto: str) -> str:
"""
Normalize um texto qualquer.
Substitui os caracteres especiais do texto.
Exemplo:
>>> normalizar(' AçúcAR ')
'acucar'
"""
texto = normalize('NFKD', texto)
texto = texto.encode('iso-8859-1', 'ignore').decode('iso-8859-1')
texto = texto.strip()
return texto.lower()
| 3.609375 | 4 |
main.py | compactcoder/QuickNotepad | 5 | 12771313 | <reponame>compactcoder/QuickNotepad
import os
from tkinter import Tk,ttk, PhotoImage, Menu, Frame, Text, Scrollbar, IntVar,GROOVE, \
StringVar, BooleanVar, Button, END, Label, INSERT, Toplevel, Entry, Checkbutton
import tkinter.filedialog
import tkinter.messagebox
Program_Name = "QuickNotepad"
File_Name = None
root = Tk()
# Setting icon of root window
title_icon = PhotoImage(file='icons/Titleicon.png')
root.iconphoto(False, title_icon)
root.geometry('750x500')
root.minsize(750,500)
root.title(Program_Name)
#other functions
def Oncontentchanged(event=None):
Updatelineno()
Updatecursorinfobar()
def Showpopupmenu(event):
popupmenu.tk_popup(event.x_root, event.y_root)
def Togglehighlight(event=None):
if to_highlight_line.get():
Tohighlightline()
else:
Undohighlight()
def Undohighlight():
contenttext.tag_remove("active_line", 1.0, "end")
def Updatecursorinfobar(event=None):
r, c = contenttext.index(INSERT).split('.')
l_num, c_num = str(int(r)), str(int(c) + 1)
infotext = "Line: {0} | Column: {1}".format(l_num, c_num)
cursorbarinfo.config(text=infotext)
def Writetofile(file_name):
try:
content = contenttext.get(1.0, "end")
with open(file_name, 'w') as file:
file.write(content)
except IOError:
tkinter.messagebox.showwarning("Save", "Could not save the file.")
def Getlinenumbers():
output = ''
if show_line_no.get():
row, col = contenttext.index("end").split('.')
for i in range(1, int(row)):
output += str(i) + '\n'
return output
def Searchoutput(needle, if_ignore_case, content_text,
search_toplevel, search_box):
content_text.tag_remove('match', '1.0', END)
matches_found = 0
if needle:
start_pos = '1.0'
while True:
start_pos = content_text.search(needle, start_pos,
nocase=if_ignore_case, stopindex=END)
if not start_pos:
break
end_pos = '{}+{}c'.format(start_pos, len(needle))
content_text.tag_add('match', start_pos, end_pos)
matches_found += 1
start_pos = end_pos
content_text.tag_config(
'match', foreground='red', background='yellow')
search_box.focus_set()
search_toplevel.title('{} matches found'.format(matches_found))
#TODO filemenu command
def Newfile(event=None):
root.title("Untitled")
global File_Name
File_Name = None
contenttext.delete(1.0, END)
Oncontentchanged()
def Openfile(event=None):
inputfilename = tkinter.filedialog.askopenfilename(defaultextension=".txt",
filetypes=[("All Files", "*.*"), ("Text Documents", "*.txt")])
if inputfilename:
global File_Name
File_Name = inputfilename
root.title('{}-{}'.format(os.path.basename(File_Name), Program_Name))
contenttext.delete(1.0, END)
with open(File_Name) as _file:
contenttext.insert(1.0, _file.read())
Oncontentchanged()
def Savefile(event=None):
global File_Name
if not File_Name:
SaveAsfile()
else:
Writetofile(File_Name)
return "break"
def SaveAsfile(event=None):
inputfilename = tkinter.filedialog.asksaveasfilename(
initialfile="Untitled",defaultextension=".txt",
filetypes=[("All Files", "*.*"), ("Text Documents", "*.txt")])
if inputfilename:
global File_Name
File_Name = inputfilename
Writetofile(File_Name)
root.title('{}-{}'.format(os.path.basename(File_Name), Program_Name))
return "break"
def Exiteditor():
if tkinter.messagebox.askokcancel("Quit?", "Are You Sure?"):
root.destroy()
#
#TODO editmenu command
def Undo(event=None):
print("undo executed")
contenttext.event_generate(("<<Undo>>"))
Oncontentchanged()
def Redo(event=None):
contenttext.event_generate(("<<Redo>>"))
Oncontentchanged()
return "break"
def Cuttext():
contenttext.event_generate("<<Cut>>")
Oncontentchanged()
return "break"
def Copytext():
contenttext.event_generate("<<Copy>>")
def Pastetext():
contenttext.event_generate("<<Paste>>")
Oncontentchanged()
return "break"
def Findtext(event=None):
searchtoplevel = Toplevel(root)
toplevel_icon = PhotoImage(file='icons/Findtext.png')
searchtoplevel.title('Find Text')
searchtoplevel.iconphoto(False,toplevel_icon)
searchtoplevel.transient(root)
Label(searchtoplevel, text="Find All:").grid(row=0, column=0, sticky='e')
search_entry_widget = Entry(searchtoplevel,relief=GROOVE, width=25)
search_entry_widget.grid(row=0, column=1, padx=2, pady=2, sticky='we')
search_entry_widget.focus_set()
ignore_case_value = IntVar()
Checkbutton(searchtoplevel, text='Ignore Case', variable=ignore_case_value).grid(
row=1, column=1, sticky='e', padx=2, pady=2)
Button(searchtoplevel, text="Find All",relief=GROOVE,
command=lambda: Searchoutput(
search_entry_widget.get(), ignore_case_value.get(),
contenttext, searchtoplevel, search_entry_widget)
).grid(row=0, column=2, sticky='e' + 'w', padx=2, pady=2)
def Closesearchwindow():
contenttext.tag_remove('match', '1.0', END)
searchtoplevel.destroy()
searchtoplevel.protocol('WM_DELETE_WINDOW', Closesearchwindow)
return "break"
def Selectalltext(event=None):
contenttext.tag_add('sel', '1.0', 'end')
return "break"
#TODO formatmenu command
def Changefonts(event=None):
selected_font = font_choice.get()
selected_font_size = font_size_choice.get()
font_tupple = (selected_font,selected_font_size)
contenttext.config(font=font_tupple)
#TODO viewmenu command
def Updatelineno(event=None):
line_numbers = Getlinenumbers()
linenumberbar.config(state='normal')
linenumberbar.delete('1.0', 'end')
linenumberbar.insert('1.0', line_numbers)
linenumberbar.config(state='disabled')
def Showcursorinfobar():
show_cursor_info_check = show_cursor_info.get()
if show_cursor_info_check:
cursorbarinfo.pack(expand='no', fill=None, side='right', anchor='se')
else:
cursorbarinfo.pack_forget()
def Tohighlightline(interval=100):
contenttext.tag_remove("active_line", 1.0, "end")
contenttext.tag_add("active_line", "insert linestart", "insert lineend+1c")
contenttext.after(interval, Togglehighlight)
def Changethemes(event=None):
selected_theme = theme_choice.get()
fg_bg_colors = color_schemes.get(selected_theme)
fg_color, bg_color = fg_bg_colors.split(".")
contenttext.config(background=bg_color, fg=fg_color)
#TODO aboutmenu command
def Help():
help_string1=" Help Guide: QuickNotepad \n From View menu you can change Theme,\n"
help_string2=" Turn On/Off Line Numbers,Cursor Location,\n And Highlighting Current Line"
tkinter.messagebox.showinfo("Help","{}{}".format(help_string1,help_string2))
def About():
tkinter.messagebox.showinfo("About",
"{}{}".format(Program_Name,
"\nPython Tkinter GUI App \nDeveloped by @compactcoder"))
#TODO menubar icons SETUP
#filemenu images
new_file_icon = PhotoImage(file='icons/Newfile.png')
open_file_icon = PhotoImage(file='icons/Openfile.png')
save_file_icon = PhotoImage(file='icons/Savefile.png')
#editmenu images
undo_icon = PhotoImage(file='icons/Undo.png')
redo_icon = PhotoImage(file='icons/Redo.png')
cut_icon = PhotoImage(file='icons/Cuttext.png')
copy_icon = PhotoImage(file='icons/Copytext.png')
paste_icon = PhotoImage(file='icons/Pastetext.png')
findtext_icon = PhotoImage(file='icons/Findtext.png')
#mainmenubar setup
menubar = Menu(root)
#TODO filemenu GUI
filemenu = Menu(menubar, tearoff=0)
#filemenu adding commands
filemenu.add_command(label="New", accelerator="Ctrl+N", compound="left",
image=new_file_icon, underline=0, command=Newfile)
filemenu.add_command(label="Open", accelerator="Ctrl+O", compound="left",
image=open_file_icon, underline=0, command=Openfile)
filemenu.add_command(label="Save", accelerator="Ctrl+S", compound="left",
image=save_file_icon, underline=0, command=Savefile)
filemenu.add_command(label="SaveAs", accelerator="Ctrl+Shift+N",
underline=0, command=SaveAsfile)
filemenu.add_separator()
filemenu.add_command(label="Exit", accelerator="Alt+F4", underline=0,
command=Exiteditor)
menubar.add_cascade(label='File', menu=filemenu)
#TODO editmenu GUI
editmenu = Menu(menubar, tearoff=0)
#editmenu adding commands
editmenu.add_command(label="Undo", accelerator="Ctrl+Z", compound="left",
image=undo_icon, underline=0, command=Undo)
editmenu.add_command(label="Redo", accelerator="Ctrl+Y", compound="left",
image=redo_icon, underline=0, command=Redo)
editmenu.add_separator()
editmenu.add_command(label="Cut", accelerator="Ctrl+X", compound="left",
image=cut_icon, underline=0, command=Cuttext)
editmenu.add_command(label="Copy", accelerator="Ctrl+C", compound="left",
image=copy_icon, underline=0, command=Copytext)
editmenu.add_command(label="Paste", accelerator="Ctrl+V", compound="left",
image=paste_icon, underline=0, command=Pastetext)
editmenu.add_separator()
editmenu.add_command(label="Find", accelerator="Ctrl+F", compound="left",
image=findtext_icon, underline=1, command=Findtext)
editmenu.add_command(label="Select All", accelerator="Ctrl+A", compound="left",
underline=7, command=Selectalltext)
menubar.add_cascade(label='Edit', menu=editmenu)
#TODO formatmenu GUI
formatmenu = Menu(menubar,tearoff=0)
#format menu adding command
fontsmenu = Menu(formatmenu,tearoff=0)
formatmenu.add_cascade(label='Fonts',menu=fontsmenu)
fontlist=["Arial","Courier New","Comic Sans MS","Calibre","Fixedsys",
"MS Sans Serif","MS Serif","Symbol", "System", "Times New Roman","Verdana"]
font_choice = StringVar()
font_choice.set('Arial')
for f in fontlist:
fontsmenu.add_radiobutton(label=f, variable=font_choice,
command=Changefonts)
fontsizemenu = Menu(formatmenu,tearoff=0)
formatmenu.add_cascade(label='Font Size',menu=fontsizemenu)
fontsizelist = [i for i in range(101)]
font_size_choice = IntVar()
font_size_choice.set(14)
for f in fontsizelist:
fontsizemenu.add_radiobutton(label=f, variable=font_size_choice,
command=Changefonts)
menubar.add_cascade(label='Format', menu=formatmenu)
#TODO viewmenu GUI
viewmenu = Menu(menubar, tearoff=0)
#viewmenu variables and adding commands
show_line_no = IntVar()
show_line_no.set(1)
viewmenu.add_checkbutton(label="Show Line Numbers", variable=show_line_no,
command=Updatelineno)
show_cursor_info = IntVar()
show_cursor_info.set(1)
viewmenu.add_checkbutton(label="Show Cursor Location at Bottom",
variable=show_cursor_info, command=Showcursorinfobar)
to_highlight_line = BooleanVar()
viewmenu.add_checkbutton(label="Highlight Current Line", onvalue=1, offvalue=0,
variable=to_highlight_line, command=Tohighlightline)
themesmenu = Menu(viewmenu, tearoff=0)
viewmenu.add_cascade(label="Themes", menu=themesmenu)
color_schemes = {
'Day Light': '#000000.#FFFFFF',
'Greygarious': '#83406A.#D1D4D1',
'Aquamarine': '#5B8340.#D1E7E0',
'Bold Beige': '#4B4620.#FFF0E1',
'Cobalt Blue': '#ffffBB.#3333aa',
'Olive Green': '#D1E7E0.#5B8340',
'Night Mode': '#FFFFFF.#000000',
}
theme_choice = StringVar()
theme_choice.set('Day Light')
for t in sorted(color_schemes):
themesmenu.add_radiobutton(label=t, variable=theme_choice,
command=Changethemes)
menubar.add_cascade(label='View', menu=viewmenu)
#TODO helpmenu GUI
helpmenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label='Help', menu=helpmenu)
#helpmenu adding command
helpmenu.add_command(label="Help", command=Help)
helpmenu.add_command(label="About", command=About)
#configure mainmenu
root.config(menu=menubar)
#TODO shortcutbar GUI
shortcutbar = Frame(root, height=25,borderwidth=0)
shortcutbar.pack(expand="no", fill="x")
icons = ['Newfile', 'Openfile', 'Savefile', 'Cuttext', 'Copytext',
'Pastetext', 'Undo', 'Redo', 'Findtext']
for icon in icons:
toolbar_icon = PhotoImage(file="icons/{}.png".format(icon))
cmd = eval(icon)
toolbar = Button(shortcutbar, image=toolbar_icon, command=cmd,relief=GROOVE)
toolbar.image = toolbar_icon
toolbar.pack(side='left')
#TODO font combobox
fontcomboboxlabel=ttk.Label(shortcutbar,text=' Select Fonts') #combobar label
fontcomboboxlabel.pack(side='left')
fontcombobox = ttk.Combobox(shortcutbar, width = 20, values=fontlist ,
textvariable = font_choice,state='readonly')
fontcombobox.pack(side='left')
fontcombobox.current(0)
fontcombobox.bind("<<ComboboxSelected>>", Changefonts)
#TODO fontsize combobox
fontsizecomboboxlabel = ttk.Label(shortcutbar,text='Font Size') #combobar label
fontsizecomboboxlabel.pack(side='left')
fontsizecombobox = ttk.Combobox(shortcutbar, width=5,values=fontsizelist,
textvariable=font_size_choice,state='readonly')
fontsizecombobox.pack(side='left')
fontsizecombobox.current(13)
fontsizecombobox.bind("<<ComboboxSelected>>",Changefonts)
#bottomframe for Cursor bar
bottomframe = Frame(root, height=25,borderwidth=0)
bottomframe.pack(side = 'bottom', fill="x")
#TODO linebar GUI
linenumberbar = Text(root, width=4, padx=0, takefocus=0, border=0,
background="#f0f0f0", state="disabled", wrap="word")
linenumberbar.pack(side="left",fill="y")
#TODO scrollbar GUI
scrollbar = Scrollbar(root)
scrollbar.pack(side='right', fill='both')
#TODO contentext GUI
contenttext = Text(root, wrap='word',yscrollcommand=scrollbar.set,
font=(font_choice.get(),font_size_choice.get()), undo=1)
contenttext.pack(expand='yes', fill='both')
#syncing scrollbar with textarea
scrollbar.config(command=contenttext.yview)
#TODO cursorbarinfo GUI
cursorbarinfo = Label(bottomframe, text='Line: 1 | Column: 1')
cursorbarinfo.pack(fill=None, side='right', anchor='se')
#TODO keybindings GUI
contenttext.bind('<KeyPress-F1>', Help)
contenttext.bind('<Control-N>', Newfile)
contenttext.bind('<Control-n>', Newfile)
contenttext.bind('<Control-O>', Openfile)
contenttext.bind('<Control-o>', Openfile)
contenttext.bind('<Control-S>', Savefile)
contenttext.bind('<Control-s>', Savefile)
contenttext.bind('<Control-f>', Findtext)
contenttext.bind('<Control-F>', Findtext)
contenttext.bind('<Control-A>', Selectalltext)
contenttext.bind('<Control-a>', Selectalltext)
contenttext.bind('<Control-y>', Redo)
contenttext.bind('<Control-Y>', Redo)
contenttext.bind('<Control-z>', Undo)
contenttext.bind('<Control-Z>', Undo)
contenttext.bind('<Any-KeyPress>', Oncontentchanged)
contenttext.bind('<Button-3>', Showpopupmenu)
contenttext.tag_configure('active_line', background='ivory2')
#TODO popupmenu config.
popupmenu = Menu(contenttext, tearoff=0)
popuplabellist=['Cut', 'Copy', 'Paste', 'Undo', 'Redo']
popupcmdlist = ['Cuttext', 'Copytext', 'Pastetext', 'Undo', 'Redo']
for i in range(5):
cmd = eval(popupcmdlist[i])
popupmenu.add_command(label=popuplabellist[i], compound="left", command=cmd)
popupmenu.add_separator()
popupmenu.add_command(label='Select All Text', command=Selectalltext)
contenttext.focus_set()
root.protocol('Wm_DELETE_WINDOW', Exiteditor)
root.mainloop() | 2.828125 | 3 |
data_structure/heap/max_heap.py | khoadnse/algorithm | 0 | 12771314 | <filename>data_structure/heap/max_heap.py
"""
Max Heap
"""
import unittest
from typing import TypeVar
from data_structure.heap.abstract_heap import AbstractHeap
T = TypeVar('T')
class MaxHeap(AbstractHeap[T]):
def __init__(self, array: list[T], cap: int = 10):
if array:
super().__init__(max(len(array), cap))
for i in range(len(array)):
self._data[i] = array[i]
self._size = len(array)
self.build_heap()
else:
super().__init__(cap)
def heapify(self, index: int):
left, right = self._left(index), self._right(index)
largest = left if left < self._size and self._data[left] > self._data[index] else index
if right < self._size and self._data[right] > self._data[largest]:
largest = right
if largest != index:
self.swap(index, largest)
self.heapify(largest)
def insert(self, value: T):
if self.is_full():
raise RuntimeError('Heap overflow')
self._data[self._size] = value
cur = self._size
while cur > 0 and self._data[cur] > self._data[self._parent(cur)]:
self.swap(cur, self._parent(cur))
cur = self._parent(cur)
self._size += 1
class TestMaxHeap(unittest.TestCase):
def test_integer_heap(self):
heap: MaxHeap[int] = MaxHeap([6, 8, 10, 26, 9, 2, 40, 22, 5, 32, 3], 13)
self.assertFalse(heap.is_empty())
self.assertEqual(heap.size, 11)
heap.insert(18)
heap.insert(6)
self.assertEqual(heap.size, 13)
self.assertListEqual([element for element in heap],
[40, 32, 18, 26, 9, 10, 6, 22, 5, 8, 3, 2, 6])
with self.assertRaises(RuntimeError):
heap.insert(19)
self.assertEqual(heap.remove(5), 10)
self.assertEqual(heap.remove(8), 5)
self.assertEqual(heap.remove(0), 40)
self.assertEqual(heap.remove(2), 18)
self.assertEqual(heap.remove(5), 6)
self.assertEqual(heap.remove(6), 6)
self.assertEqual(heap.remove(1), 26)
self.assertEqual(heap.remove(2), 8)
self.assertEqual(heap.remove(3), 3)
self.assertEqual(heap.remove(3), 9)
self.assertEqual(heap.remove(1), 22)
self.assertEqual(heap.remove(0), 32)
self.assertEqual(heap.remove(0), 2)
with self.assertRaises(RuntimeError):
heap.remove(0)
if __name__ == '__main__':
unittest.main()
| 3.390625 | 3 |
dash/dashblocks/migrations/0001_initial.py | eHealthAfrica/dash | 0 | 12771315 | <filename>dash/dashblocks/migrations/0001_initial.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orgs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DashBlock',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('title', models.CharField(help_text=b'The title for this block of content, optional', max_length=255, null=True, blank=True)),
('summary', models.TextField(help_text=b'The summary for this item, should be short', null=True, blank=True)),
('content', models.TextField(help_text=b'The body of text for this content block, optional', null=True, blank=True)),
('image', models.ImageField(help_text=b'Any image that should be displayed with this content block, optional', null=True, upload_to=b'dashblocks', blank=True)),
('color', models.CharField(help_text=b'A background color to use for the image, in the format: #rrggbb', max_length=16, null=True, blank=True)),
('link', models.CharField(help_text=b'Any link that should be associated with this content block, optional', max_length=255, null=True, blank=True)),
('video_id', models.CharField(help_text=b'The id of the YouTube video that should be linked to this item', max_length=255, null=True, blank=True)),
('tags', models.CharField(help_text=b'Any tags for this content block, separated by spaces, can be used to do more advanced filtering, optional', max_length=255, null=True, blank=True)),
('priority', models.IntegerField(default=0, help_text=b'The priority for this block, higher priority blocks come first')),
('created_by', models.ForeignKey(help_text=b'The user which originally created this item', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(help_text=b'The user which last modified this item', to=settings.AUTH_USER_MODEL)),
('org', models.ForeignKey(help_text=b'The organization this content block belongs to', to='orgs.Org')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DashBlockImage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('image', models.ImageField(height_field=b'height', width_field=b'width', upload_to=b'dashblock_images/')),
('caption', models.CharField(max_length=64)),
('priority', models.IntegerField(default=0, null=True, blank=True)),
('width', models.IntegerField()),
('height', models.IntegerField()),
('created_by', models.ForeignKey(help_text=b'The user which originally created this item', to=settings.AUTH_USER_MODEL)),
('dashblock', models.ForeignKey(to='dashblocks.DashBlock')),
('modified_by', models.ForeignKey(help_text=b'The user which last modified this item', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DashBlockType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('name', models.CharField(help_text=b'The human readable name for this content type', unique=True, max_length=75)),
('slug', models.SlugField(help_text=b'The slug to idenfity this content type, used with the template tags', unique=True)),
('description', models.TextField(help_text=b'A description of where this content type is used on the site and how it will be dsiplayed', null=True, blank=True)),
('has_title', models.BooleanField(default=True, help_text=b'Whether this content should include a title')),
('has_image', models.BooleanField(default=True, help_text=b'Whether this content should include an image')),
('has_rich_text', models.BooleanField(default=True, help_text=b'Whether this content should use a rich HTML editor')),
('has_summary', models.BooleanField(default=True, help_text=b'Whether this content should include a summary field')),
('has_link', models.BooleanField(default=True, help_text=b'Whether this content should include a link')),
('has_gallery', models.BooleanField(default=False, help_text=b'Whether this content should allow upload of additional images, ie a gallery')),
('has_color', models.BooleanField(default=False, help_text=b'Whether this content has a color field')),
('has_video', models.BooleanField(default=False, help_text=b'Whether this content should allow setting a YouTube id')),
('has_tags', models.BooleanField(default=False, help_text=b'Whether this content should allow tags')),
('created_by', models.ForeignKey(help_text=b'The user which originally created this item', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(help_text=b'The user which last modified this item', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='dashblock',
name='dashblock_type',
field=models.ForeignKey(verbose_name=b'Content Type', to='dashblocks.DashBlockType', help_text=b'The category, or type for this content block'),
preserve_default=True,
),
]
| 1.828125 | 2 |
4depcheck/cli/dep_check_cli_parser.py | DrGruby/4depcheck | 5 | 12771316 | <reponame>DrGruby/4depcheck
#
# Licensed to 4depcheck under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. 4depcheck licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
import sys
import os
class DepCheckCLIParser:
# -- Public methods
# DepCheckCLIParser Constructor
def __init__(self):
super(DepCheckCLIParser, self).__init__()
self.parser = argparse.ArgumentParser(prog='4depcheck.py')
self.parser.add_argument('project_name', metavar='PROJECT_NAME', type=str,
help='Project name for this analysis')
self.parser.add_argument('dir', metavar='PATH_TO_SCAN', type=str, help='The path to scan')
self.parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.1.0')
self.args, self.unknown = self.parser.parse_known_args(sys.argv[1:])
# Verify command line arguments
status = self.verify_args(self.args)
if status != 0:
exit(status)
# -- Getters
# Gets project name
def get_project_name(self):
return self.args.project_name
# Gets dir
def get_dir(self):
return self.args.dir
# -- Static methods
# Verify command line arguments
@staticmethod
def verify_args(args):
if not os.path.isdir(args.dir):
print('[ERROR] The path argument is not valid.')
return 1
return 0
| 2.1875 | 2 |
smoketests/tests/configless_test.py | erlware-deprecated/sinan | 7 | 12771317 | <reponame>erlware-deprecated/sinan
import unittest
import sin_testing as st
import os
import shutil
import pexpect
class TestConfigless(st.SmokeTest):
# clean the project
@st.sinan("-p ctest_project -n 0.1.0 clean")
def clean_configless(self, child, appdesc):
child.expect(pexpect.EOF)
self.assertTrue(not os.path.isdir(os.path.join(os.getcwd(), "_build")))
return appdesc
@st.sinan("-p ctest_project -n 0.1.0 build")
def build_configless(self, child, app_desc):
child.expect(pexpect.EOF)
build_dir = os.path.join(os.getcwd(),
"_build",
"ctest_project",
"lib")
self.assertTrue(os.path.isdir(build_dir))
for n in app_desc.app_names:
app_dir = os.path.join(build_dir, "%s-0.1.0" % n)
self.assert_dirs_exist(app_dir,
"ebin",
"src",
"include",
"doc")
self.assert_files_exist(app_dir,
[ "src", n + "_sup.erl"],
["src", n + "_app.erl"],
["ebin", n + "_sup.beam"],
["ebin", n + "_app.beam"])
def test_configless_project(self):
app_desc = st.AppDesc(user_name = "Smoke Test User",
email = "<EMAIL>",
copyright_holder = "Smoke Test Copy, LLC.",
project_name = "configless_project",
project_version = "0.1.0.0",
app_names = ["app1", "app2", "app3"])
self.do_run(app_desc)
os.remove(os.path.join("sinan.config"))
self.clean_configless(app_desc)
self.build_configless(app_desc)
def test_configless_single_app(self):
app_desc = st.AppDesc(user_name = "Smoke Test User",
email = "<EMAIL>",
copyright_holder = "Smoke Test Copy, LLC.",
project_name = "app1",
project_version = "0.1.0.0",
app_names = ["app1"])
self.do_run(app_desc)
shutil.move(os.path.join("lib", "app1", "src"),
".")
shutil.move(os.path.join("lib", "app1", "include"),
".")
shutil.move(os.path.join("lib", "app1", "ebin"),
".")
shutil.rmtree(os.path.join("lib"))
os.remove(os.path.join("sinan.config"))
self.do_clean(app_desc)
self.do_build(app_desc)
if __name__ == '__main__':
unittest.main()
| 2.09375 | 2 |
dismod/utils.py | r0x0d/dismod | 0 | 12771318 | <filename>dismod/utils.py<gh_stars>0
import fnmatch
import os
from typing import Any
from typing import Generator
from typing import List
from typing import Tuple
def collect_files_in_module(filepath: str, ignore_folder: str) -> List[str]:
""" """
matches = []
for root, dirs, filenames in os.walk(filepath):
[
dirs.remove(d) # type: ignore
for d in list(dirs)
if ignore_folder and (d in ignore_folder)
]
for filename in fnmatch.filter(filenames, "*.py"):
matches.append(os.path.join(root, filename))
return matches
def neighborhood(
iterable: List[Any],
) -> Generator[Any, Any, Any]:
""" """
if not iterable:
return iterable
iterator = iter(iterable)
prev_item = None
current_item = next(iterator) # throws StopIteration if empty.
for next_item in iterator:
yield (prev_item, current_item, next_item)
prev_item = current_item
current_item = next_item
yield (prev_item, current_item, None)
def search_key_in_iterable(
key: str,
value: str,
iterable: List[Any],
) -> Tuple[Any, ...]:
""" """
return tuple(
next(
(
(index, thing)
for index, thing in enumerate(iterable)
if thing[key] == value
),
(None, None),
),
)
def split_list_in_chunks(
elements: List[Any],
chunk_size: int,
) -> Generator[Any, Any, Any]:
"""Yield chunk_size number of striped chunks from elements."""
for index in range(0, chunk_size):
yield elements[index::chunk_size]
| 2.65625 | 3 |
documentation_multiprocessing/08_reference/01_process_and_exceptions/process_and_exceptions_08_oop_11.py | software-foundations/learning-distributed-systems | 0 | 12771319 | import multiprocessing as mp
import itertools
QUEUE = mp.Queue()
class Routine(mp.Process):
def __init__(self, number: int, *args, **kwargs):
mp.Process.__init__(self, *args, **kwargs)
self.number = number
def target(self, number: int) -> int:
return 2 * number
def run(self):
result = self.target(self.number)
QUEUE.put(result)
def main() -> None:
for i in range(5):
routine = Routine(number=i)
routine.start()
routine.join()
for _ in range(5):
print(QUEUE.get())
if __name__ == '__main__':
main()
| 3.390625 | 3 |
inverse_covariance/tests/quic_graph_lasso_test.py | aldanor/skggm | 0 | 12771320 | <filename>inverse_covariance/tests/quic_graph_lasso_test.py
import numpy as np
import pytest
from numpy.testing import assert_raises
from numpy.testing import assert_allclose
from sklearn import datasets
from inverse_covariance import (
QuicGraphicalLasso,
QuicGraphicalLassoCV,
QuicGraphicalLassoEBIC,
quic,
)
def custom_init(X):
init_cov = np.cov(X, rowvar=False)
return init_cov, np.max(np.abs(np.triu(init_cov)))
class TestQuicGraphicalLasso(object):
@pytest.mark.parametrize(
"params_in, expected",
[
(
{},
[
3.2437533337151625,
3.4490549523890648,
9.7303201146340168,
3.673994442010553e-11,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
),
(
{"lam": 0.5, "mode": "trace"},
[
3.2437533337151625,
3.4490549523890652,
32.290292419357321,
0.21836515326396364,
],
), # NOQA
(
{
"lam": 0.5,
"mode": "path",
"path": np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5]),
},
[
8.3256240637201717,
9.7862122341861983,
22.752074276274861,
1.6530965731149066e-08,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "cov"},
[
0.0071706976421055616,
1394.564448134179,
50.890448754467911,
7.1054273576010019e-15,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": custom_init},
[
0.0071706976421055616,
1394.564448134179,
50.890448754467911,
7.1054273576010019e-15,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "spearman"},
[3.1622776601683795, 3.1622776601683795, 10.0, 1.7763568394002505e-15],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "kendalltau"},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
), # NOQA
],
)
def test_integration_quic_graphical_lasso(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLasso(**params_in)
ic.fit(X)
result_vec = [
np.linalg.norm(ic.covariance_),
np.linalg.norm(ic.precision_),
np.linalg.norm(ic.opt_),
np.linalg.norm(ic.duality_gap_),
]
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
@pytest.mark.parametrize(
"params_in, expected",
[
(
{},
[
3.2437533337151625,
3.4490549523890648,
9.7303201146340168,
3.673994442010553e-11,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
),
(
{"lam": 0.5, "mode": "trace"},
[
3.2437533337151625,
3.4490549523890652,
32.290292419357321,
0.21836515326396364,
],
), # NOQA
(
{
"lam": 0.5,
"mode": "path",
"path": np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5]),
},
[
8.3256240637201717,
9.7862122341861983,
22.752074276274861,
1.6530965731149066e-08,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "cov"},
[
0.0071706976421055616,
1394.564448134179,
50.890448754467911,
7.1054273576010019e-15,
],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "spearman"},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
), # NOQA
(
{"lam": 1.0, "max_iter": 100, "init_method": "kendalltau"},
[3.1622776601683795, 3.1622776601683795, 10.0, 0.0],
), # NOQA
],
)
def test_integration_quic_graphical_lasso_fun(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
lam = 0.5
if "lam" in params_in:
lam = params_in["lam"]
del params_in["lam"]
S = np.corrcoef(X, rowvar=False)
if "init_method" in params_in:
if params_in["init_method"] == "cov":
S = np.cov(X, rowvar=False)
del params_in["init_method"]
precision_, covariance_, opt_, cpu_time_, iters_, duality_gap_ = quic(
S, lam, **params_in
)
result_vec = [
np.linalg.norm(covariance_),
np.linalg.norm(precision_),
np.linalg.norm(opt_),
np.linalg.norm(duality_gap_),
]
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
@pytest.mark.parametrize(
"params_in, expected",
[
(
{"n_refinements": 1},
[4.6528, 32.335, 3.822, 1.5581289048993696e-06, 0.01],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
},
[4.6765, 49.24459, 3.26151, 6.769744583801085e-07],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": "cov",
},
[0.0106, 21634.95296, 57.6289, 0.00039],
),
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": custom_init,
},
[0.0106, 21634.95296, 57.6289, 0.00039],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": "spearman",
},
[
4.8315707207048622,
38.709631332689789,
2.8265068394116657,
1.5312382906085276e-07,
],
), # NOQA
(
{
"lam": 0.5 * np.ones((10, 10)) - 0.5 * np.diag(np.ones((10,))),
"n_refinements": 1,
"init_method": "kendalltau",
},
[
4.9007318106601074,
85.081499460930743,
2.0463861650623159,
0.00012530384889419821,
],
), # NOQA
],
)
def test_integration_quic_graphical_lasso_cv(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLassoCV(**params_in)
ic.fit(X)
result_vec = [
np.linalg.norm(ic.covariance_),
np.linalg.norm(ic.precision_),
np.linalg.norm(ic.opt_),
np.linalg.norm(ic.duality_gap_),
]
if isinstance(ic.lam_, float):
result_vec.append(ic.lam_)
elif isinstance(ic.lam_, np.ndarray):
assert ic.lam_.shape == params_in["lam"].shape
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
assert len(ic.grid_scores_) == len(ic.cv_lams_)
@pytest.mark.parametrize(
"params_in, expected",
[
({}, [3.1622776601683795, 3.1622776601683795, 0.91116275611548958]),
({"lam": 0.5 * np.ones((10, 10))}, [4.797, 2.1849]),
(
{"lam": 0.5 * np.ones((10, 10)), "init_method": custom_init},
[0.0106, 35056.88460],
), # NOQA
],
)
def test_integration_quic_graphical_lasso_ebic(self, params_in, expected):
"""
Just tests inputs/outputs (not validity of result).
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLassoEBIC(**params_in)
ic.fit(X)
result_vec = [np.linalg.norm(ic.covariance_), np.linalg.norm(ic.precision_)]
if isinstance(ic.lam_, float):
result_vec.append(ic.lam_)
elif isinstance(ic.lam_, np.ndarray):
assert ic.lam_.shape == params_in["lam"].shape
print(result_vec)
assert_allclose(expected, result_vec, atol=1e-1, rtol=1e-1)
def test_invalid_method(self):
"""
Test behavior of invalid inputs.
"""
X = datasets.load_diabetes().data
ic = QuicGraphicalLasso(method="unknownmethod")
assert_raises(NotImplementedError, ic.fit, X)
| 2.28125 | 2 |
scripts/generate_submit_set.py | imos/icfpc2021 | 4 | 12771321 | import subprocess
import json
def main(set_path):
for (problem_id, line) in enumerate(open(set_path).readlines()):
if line.strip() == "":
continue
problem_id = problem_id + 1
submission_id, globalist_source_problem = line.split(' ')
globalist_source_problem = int(globalist_source_problem)
print(submission_id, globalist_source_problem)
subprocess.run(
f'curl "https://icfpc.sx9.jp/submission?submission_id={submission_id}" '
f'> "tmp.txt"',
shell=True,
check=True,
)
j = json.load(open("tmp.txt"))
if globalist_source_problem != 0:
j["bonuses"][0]["problem"] = globalist_source_problem
with open(f"{problem_id}.json", "w") as f:
json.dump(j, f)
if __name__ == '__main__':
import fire
fire.Fire(main)
| 2.09375 | 2 |
04_vehicle_detection/functions_detection.py | ifding/self-driving-car | 14 | 12771322 | import cv2
import matplotlib.pyplot as plt
import numpy as np
from functions_feat_extraction import image_to_features
from project_5_utils import stitch_together
def draw_labeled_bounding_boxes(img, labeled_frame, num_objects):
"""
Starting from labeled regions, draw enclosing rectangles in the original color frame.
"""
# Iterate through all detected cars
for car_number in range(1, num_objects + 1):
# Find pixels with each car_number label value
rows, cols = np.where(labeled_frame == car_number)
# Find minimum enclosing rectangle
x_min, y_min = np.min(cols), np.min(rows)
x_max, y_max = np.max(cols), np.max(rows)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=(255, 0, 0), thickness=6)
return img
def compute_heatmap_from_detections(frame, hot_windows, threshold=5, verbose=False):
"""
Compute heatmaps from windows classified as positive, in order to filter false positives.
"""
h, w, c = frame.shape
heatmap = np.zeros(shape=(h, w), dtype=np.uint8)
for bbox in hot_windows:
# for each bounding box, add heat to the corresponding rectangle in the image
x_min, y_min = bbox[0]
x_max, y_max = bbox[1]
heatmap[y_min:y_max, x_min:x_max] += 1 # add heat
# apply threshold + morphological closure to remove noise
_, heatmap_thresh = cv2.threshold(heatmap, threshold, 255, type=cv2.THRESH_BINARY)
heatmap_thresh = cv2.morphologyEx(heatmap_thresh, op=cv2.MORPH_CLOSE,
kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(13, 13)), iterations=1)
if verbose:
f, ax = plt.subplots(1, 3)
ax[0].imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
ax[1].imshow(heatmap, cmap='hot')
ax[2].imshow(heatmap_thresh, cmap='hot')
plt.show()
return heatmap, heatmap_thresh
def compute_windows_multiscale(image, verbose=False):
"""
Naive implementation of multiscale window search.
"""
h, w, c = image.shape
windows_multiscale = []
windows_32 = slide_window(image, x_start_stop=[None, None],
y_start_stop=[4 * h // 8, 5 * h // 8],
xy_window=(32, 32), xy_overlap=(0.8, 0.8))
windows_multiscale.append(windows_32)
windows_64 = slide_window(image, x_start_stop=[None, None],
y_start_stop=[4 * h // 8, 6 * h // 8],
xy_window=(64, 64), xy_overlap=(0.8, 0.8))
windows_multiscale.append(windows_64)
windows_128 = slide_window(image, x_start_stop=[None, None], y_start_stop=[3 * h // 8, h],
xy_window=(128, 128), xy_overlap=(0.8, 0.8))
windows_multiscale.append(windows_128)
if verbose:
windows_img_32 = draw_boxes(image, windows_32, color=(0, 0, 255), thick=1)
windows_img_64 = draw_boxes(image, windows_64, color=(0, 255, 0), thick=1)
windows_img_128 = draw_boxes(image, windows_128, color=(255, 0, 0), thick=1)
stitching = stitch_together([windows_img_32, windows_img_64, windows_img_128], (1, 3),
resize_dim=(1300, 500))
cv2.imshow('', stitching)
cv2.waitKey()
return np.concatenate(windows_multiscale)
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
"""
Implementation of a sliding window in a region of interest of the image.
"""
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] is None:
x_start_stop[0] = 0
if x_start_stop[1] is None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] is None:
y_start_stop[0] = 0
if y_start_stop[1] is None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
x_span = x_start_stop[1] - x_start_stop[0]
y_span = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
n_x_pix_per_step = np.int(xy_window[0] * (1 - xy_overlap[0]))
n_y_pix_per_step = np.int(xy_window[1] * (1 - xy_overlap[1]))
# Compute the number of windows in x / y
n_x_windows = np.int(x_span / n_x_pix_per_step) - 1
n_y_windows = np.int(y_span / n_y_pix_per_step) - 1
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions.
for i in range(n_y_windows):
for j in range(n_x_windows):
# Calculate window position
start_x = j * n_x_pix_per_step + x_start_stop[0]
end_x = start_x + xy_window[0]
start_y = i * n_y_pix_per_step + y_start_stop[0]
end_y = start_y + xy_window[1]
# Append window position to list
window_list.append(((start_x, start_y), (end_x, end_y)))
# Return the list of windows
return window_list
def draw_boxes(img, bbox_list, color=(0, 0, 255), thick=6):
"""
Draw all bounding boxes in `bbox_list` onto a given image.
:param img: input image
:param bbox_list: list of bounding boxes
:param color: color used for drawing boxes
:param thick: thickness of the box line
:return: a new image with the bounding boxes drawn
"""
# Make a copy of the image
img_copy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bbox_list:
# Draw a rectangle given bbox coordinates
tl_corner = tuple(bbox[0])
br_corner = tuple(bbox[1])
cv2.rectangle(img_copy, tl_corner, br_corner, color, thick)
# Return the image copy with boxes drawn
return img_copy
# Define a function you will pass an image and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, feat_extraction_params):
hot_windows = [] # list to receive positive detection windows
for window in windows:
# Extract the current window from original image
resize_h, resize_w = feat_extraction_params['resize_h'], feat_extraction_params['resize_w']
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]],
(resize_w, resize_h))
# Extract features for that window using single_img_features()
features = image_to_features(test_img, feat_extraction_params)
# Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
# Predict on rescaled features
prediction = clf.predict(test_features)
# If positive (prediction == 1) then save the window
if prediction == 1:
hot_windows.append(window)
# Return windows for positive detections
return hot_windows
| 2.90625 | 3 |
plot/plot.py | naumovda/stokes | 0 | 12771323 | import os
from mpl_toolkits import mplot3d
from matplotlib import cm
import matplotlib.pyplot as plt
import pandas as pd
def plot_3d(data, x, y, z):
ax = plt.axes(projection="3d")
ax.plot_trisurf(df[x], df[y], df[z], cmap=cm.Blues)
ax.set_xticks(df[x].values)
ax.set_yticks(df[y].values)
ax.set_xlabel(x)
ax.set_ylabel(y)
return ax
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__))
df = pd.read_csv("../data/csv/calculation.csv", sep=";")
fig = plt.figure(figsize=(14, 8))
ax = plot_3d(df, x="Beta", y="Alfa", z="J")
plt.show()
| 2.96875 | 3 |
tests/transformers/test_text_transformers.py | altescy/xsklearn | 0 | 12771324 | <gh_stars>0
from xsklearn.transformers.text_transformers import Lowercase
def test_lowercase() -> None:
inputs = ["HeLLo WoRLD!", "fOO bAr bAz"]
lower = Lowercase()
results = lower.fit_transform(inputs)
assert len(results) == 2
assert results[0] == "hello world!"
assert results[1] == "foo bar baz"
| 3.015625 | 3 |
scatterauth/forms.py | caniko2/django-scatter-auth | 0 | 12771325 | <filename>scatterauth/forms.py
from scatterauth.settings import app_settings
from django import forms
from django.contrib.auth import authenticate, get_user_model
from django.utils.translation import ugettext_lazy as _
class LoginForm(forms.Form):
nonce = forms.CharField(widget=forms.HiddenInput, max_length=12)
public_key = forms.CharField(widget=forms.HiddenInput, max_length=53)
res = forms.CharField(widget=forms.HiddenInput, max_length=101)
# def clean_signature(self):
# sig = self.cleaned_data['signature']
# if len(sig) != 101:
# raise forms.ValidationError(_('Invalid signature'))
# return sig
# list(set()) here is to eliminate the possibility of double including the address field
signup_fields = list(set(app_settings.SCATTERAUTH_USER_SIGNUP_FIELDS + [app_settings.SCATTERAUTH_USER_PUBKEY_FIELD]))
class SignupForm(forms.ModelForm):
def clean_address_field(self):
# validate_eth_address(self.cleaned_data[app_settings.SCATTERAUTH_USER_PUBKEY_FIELD])
return self.cleaned_data[app_settings.SCATTERAUTH_USER_PUBKEY_FIELD]
class Meta:
model = get_user_model()
fields = signup_fields
# hack to set the method for cleaning address field
setattr(SignupForm, 'clean_' + app_settings.SCATTERAUTH_USER_PUBKEY_FIELD, SignupForm.clean_address_field)
| 2.328125 | 2 |
Python/Football.py | luisgepeto/CodeEvalExamples | 0 | 12771326 | import sys
class TeamsDictionary(object):
def __init__(self):
self.teams = {}
def add_team(self, team, country):
if team not in self.teams:
self.teams[team] = []
self.teams[team].append(country)
def __repr__(self):
repr = ""
for team in sorted(self.teams):
repr += team+ ":"
repr += ','.join(map(str, sorted(self.teams[team])))
repr += "; "
return repr.strip()
class Country(object):
def __init__(self, country_id, teams_string, teams_dictionary):
self.country_id = country_id
self.teams = teams_string.split(" ")
self.teams_dictionary = teams_dictionary
def process(self):
for team in self.teams:
self.teams_dictionary.add_team(team, self.country_id)
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
teams_dictionary = TeamsDictionary()
for i, teams_string in enumerate(test.rstrip().split("|")):
country = Country(i+1, teams_string.strip(), teams_dictionary)
country.process()
print(repr(teams_dictionary))
| 3.46875 | 3 |
tests/factories/input/definitions/integer_input_definition.py | TheLabbingProject/django_analyses | 1 | 12771327 | from factory import Faker
from factory.django import DjangoModelFactory
class IntegerInputDefinitionFactory(DjangoModelFactory):
key = Faker("pystr", min_chars=3, max_chars=50)
required = Faker("pybool")
description = Faker("sentence")
min_value = Faker("pyint", min_value=-20, max_value=-10)
max_value = Faker("pyint", min_value=20, max_value=40)
default = Faker("pyint", min_value=-10, max_value=20)
class Meta:
model = "django_analyses.IntegerInputDefinition"
| 2.421875 | 2 |
dashboard/dashboard/change_internal_only.py | oneumyvakin/catapult | 4 | 12771328 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides the web interface for changing internal_only property of a Bot."""
import logging
from google.appengine.api import taskqueue
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
from dashboard import add_point_queue
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.common import datastore_hooks
from dashboard.common import stored_object
from dashboard.models import anomaly
from dashboard.models import graph_data
# Number of Row entities to process at once.
_MAX_ROWS_TO_PUT = 25
# Number of TestMetadata entities to process at once.
_MAX_TESTS_TO_PUT = 25
# Which queue to use for tasks started by this handler. Must be in queue.yaml.
_QUEUE_NAME = 'migrate-queue'
class ChangeInternalOnlyHandler(request_handler.RequestHandler):
"""Changes internal_only property of Bot, TestMetadata, and Row."""
def get(self):
"""Renders the UI for selecting bots."""
masters = {}
bots = graph_data.Bot.query().fetch()
for bot in bots:
master_name = bot.key.parent().string_id()
bot_name = bot.key.string_id()
bots = masters.setdefault(master_name, [])
bots.append({
'name': bot_name,
'internal_only': bot.internal_only,
})
logging.info('MASTERS: %s', masters)
self.RenderHtml('change_internal_only.html', {
'masters': masters,
})
def post(self):
"""Updates the selected bots internal_only property.
POST requests will be made by the task queue; tasks are added to the task
queue either by a kick-off POST from the front-end form, or by this handler
itself.
Request parameters:
internal_only: "true" if turning on internal_only, else "false".
bots: Bots to update. Multiple bots parameters are possible; the value
of each should be a string like "MasterName/platform-name".
test: An urlsafe Key for a TestMetadata entity.
cursor: An urlsafe Cursor; this parameter is only given if we're part-way
through processing a Bot or a TestMetadata.
Outputs:
A message to the user if this request was started by the web form,
or an error message if something went wrong, or nothing.
"""
# /change_internal_only should be only accessible if one has administrator
# privileges, so requests are guaranteed to be authorized.
datastore_hooks.SetPrivilegedRequest()
internal_only_string = self.request.get('internal_only')
if internal_only_string == 'true':
internal_only = True
elif internal_only_string == 'false':
internal_only = False
else:
self.ReportError('No internal_only field')
return
bot_names = self.request.get_all('bots')
test_key_urlsafe = self.request.get('test')
cursor = self.request.get('cursor', None)
if bot_names and len(bot_names) > 1:
self._UpdateMultipleBots(bot_names, internal_only)
self.RenderHtml('result.html', {
'headline': ('Updating internal_only. This may take some time '
'depending on the data to update. Check the task queue '
'to determine whether the job is still in progress.'),
})
elif bot_names and len(bot_names) == 1:
self._UpdateBot(bot_names[0], internal_only, cursor=cursor)
elif test_key_urlsafe:
self._UpdateTest(test_key_urlsafe, internal_only, cursor=cursor)
def _UpdateBotWhitelist(self, bot_master_names, internal_only):
"""Updates the global bot_whitelist object, otherwise subsequent add_point
calls will overwrite our work."""
bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY)
bot_names = [b.split('/')[1] for b in bot_master_names]
if internal_only:
bot_whitelist = [b for b in bot_whitelist if b not in bot_names]
else:
bot_whitelist.extend(bot_names)
bot_whitelist = list(set(bot_whitelist))
bot_whitelist.sort()
stored_object.Set(add_point_queue.BOT_WHITELIST_KEY, bot_whitelist)
def _UpdateMultipleBots(self, bot_names, internal_only):
"""Kicks off update tasks for individual bots and their tests."""
self._UpdateBotWhitelist(bot_names, internal_only)
for bot_name in bot_names:
taskqueue.add(
url='/change_internal_only',
params={
'bots': bot_name,
'internal_only': 'true' if internal_only else 'false'
},
queue_name=_QUEUE_NAME)
def _UpdateBot(self, bot_name, internal_only, cursor=None):
"""Starts updating internal_only for the given bot and associated data."""
master, bot = bot_name.split('/')
bot_key = ndb.Key('Master', master, 'Bot', bot)
if not cursor:
# First time updating for this Bot.
bot_entity = bot_key.get()
if bot_entity.internal_only != internal_only:
bot_entity.internal_only = internal_only
bot_entity.put()
else:
cursor = datastore_query.Cursor(urlsafe=cursor)
# Fetch a certain number of TestMetadata entities starting from cursor. See:
# https://developers.google.com/appengine/docs/python/ndb/queryclass
# Start update tasks for each existing subordinate TestMetadata.
test_query = graph_data.TestMetadata.query(
graph_data.TestMetadata.master_name == master,
graph_data.TestMetadata.bot_name == bot)
test_keys, next_cursor, more = test_query.fetch_page(
_MAX_TESTS_TO_PUT, start_cursor=cursor, keys_only=True)
for test_key in test_keys:
taskqueue.add(
url='/change_internal_only',
params={
'test': test_key.urlsafe(),
'internal_only': 'true' if internal_only else 'false',
},
queue_name=_QUEUE_NAME)
if more:
taskqueue.add(
url='/change_internal_only',
params={
'bots': bot_name,
'cursor': next_cursor.urlsafe(),
'internal_only': 'true' if internal_only else 'false',
},
queue_name=_QUEUE_NAME)
def _UpdateTest(self, test_key_urlsafe, internal_only, cursor=None):
"""Updates the given TestMetadata and associated Row entities."""
test_key = ndb.Key(urlsafe=test_key_urlsafe)
if not cursor:
# First time updating for this TestMetadata.
test_entity = test_key.get()
if test_entity.internal_only != internal_only:
test_entity.internal_only = internal_only
test_entity.put()
# Update all of the Anomaly entities for this test.
# Assuming that this should be fast enough to do in one request
# for any one test.
anomalies = anomaly.Anomaly.GetAlertsForTest(test_key)
for anomaly_entity in anomalies:
if anomaly_entity.internal_only != internal_only:
anomaly_entity.internal_only = internal_only
ndb.put_multi(anomalies)
else:
cursor = datastore_query.Cursor(urlsafe=cursor)
# Fetch a certain number of Row entities starting from cursor.
rows_query = graph_data.Row.query(
graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
rows, next_cursor, more = rows_query.fetch_page(
_MAX_ROWS_TO_PUT, start_cursor=cursor)
for row in rows:
if row.internal_only != internal_only:
row.internal_only = internal_only
ndb.put_multi(rows)
if more:
taskqueue.add(
url='/change_internal_only',
params={
'test': test_key_urlsafe,
'cursor': next_cursor.urlsafe(),
'internal_only': 'true' if internal_only else 'false',
},
queue_name=_QUEUE_NAME)
| 1.890625 | 2 |
tests/constants.py | raiden-network/raiden-wizard | 9 | 12771329 | <reponame>raiden-network/raiden-wizard
import tempfile
from pathlib import Path
TESTING_TEMP_FOLDER = Path(tempfile.gettempdir()).joinpath("raiden-wizard-testing")
TESTING_KEYSTORE_FOLDER = TESTING_TEMP_FOLDER.joinpath("keystore")
| 1.835938 | 2 |
tests/test_version.py | goalsgame/ueimporter | 0 | 12771330 | import json
import ueimporter.version as version
def test_ueimporter_json_with_tag_will_succeed():
version_dict = {
'GitReleaseTag': '4.27.1-release'
}
assert version.UEImporterJson(
version_dict).git_release_tag == '4.27.1-release'
def test_ueimporter_json_without_key_will_yield_empty_tag():
version_dict = {
'MisspelledGitReleaseTag': '4.27.1-release'
}
assert version.UEImporterJson(version_dict).git_release_tag == ''
assert version.UEImporterJson({}).git_release_tag == ''
def test_ueimporter_json_set_tag_will_succeed():
version_dict = {
'GitReleaseTag': '4.27.1-release'
}
ueimporter_json = version.UEImporterJson(version_dict)
ueimporter_json.git_release_tag = '4.27.2-release'
assert ueimporter_json.git_release_tag == '4.27.2-release'
def test_ueimporter_json_will_yeild_json_with_tag():
version_dict = {
'GitReleaseTag': '4.27.1-release'
}
assert version.UEImporterJson(version_dict).to_json(indent=4) == \
"""{
"GitReleaseTag": "4.27.1-release"
}"""
def test_ueimporter_json_without_key_will_yeild_json_with_empty_tag():
assert version.UEImporterJson({}).to_json(indent=4) == \
"""{
"GitReleaseTag": ""
}"""
def test_ueimporter_json_with_invalid_key_will_yeild_json_with_valid_keys():
version_dict = {
'GitReleaseTag': '4.27.1-release',
'ThisKeyDoesNotBelong': 'SomeValue',
}
assert version.UEImporterJson(version_dict).to_json(indent=4) == \
"""{
"GitReleaseTag": "4.27.1-release"
}"""
def test_from_build_version_json():
file_content = json.dumps({
'MajorVersion': '4',
'MinorVersion': '27',
'PatchVersion': '1',
})
assert version.from_build_version_json(file_content) == '4.27.1'
def test_from_build_version_json_without_patch_will_fail():
file_content = json.dumps({
'MajorVersion': '4',
'MinorVersion': '27',
})
assert version.from_build_version_json(file_content) == None
def test_from_git_release_tag():
assert version.from_git_release_tag('4.27.1-release') == '4.27.1'
assert version.from_git_release_tag('4.27.2-release') == '4.27.2'
assert version.from_git_release_tag(
'5.0.0-early-access-1') == '5.0.0'
assert version.from_git_release_tag(
'5.0.0-early-access-2') == '5.0.0'
def test_from_git_release_tag_without_patch_will_fail():
assert version.from_git_release_tag('4.27.0-release') == '4.27.0'
| 2.484375 | 2 |
Q/questionnaire/tests/tests_unit/tests_views/test_views_legacy.py | ES-DOC/esdoc-questionnaire | 0 | 12771331 | ####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2015 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
__author__ = "allyn.treshansky"
"""
.. module:: test_views_legacy
Tests the redirect_legacy_projects decorator
"""
from django.core.urlresolvers import reverse
from Q.questionnaire.tests.test_base import TestQBase
from Q.questionnaire.q_utils import add_parameters_to_url, FuzzyInt
from Q.questionnaire.views.views_legacy import *
class Test(TestQBase):
def setUp(self):
super(Test, self).setUp()
# just setup some silly test projects
# I don't care if they're valid or not
# as long as 1 has 'is_legacy=True' it's good enough
self.current_project = QProject(
name="current_project",
title="Current Project",
email=self.test_user.email,
is_legacy=False,
)
self.current_project.save()
self.legacy_project = QProject(
name="legacy_project",
title="Legacy Project",
email=self.test_user.email,
is_legacy=True,
)
self.legacy_project.save()
def tearDown(self):
super(Test, self).tearDown()
#####################
# redirection tests #
#####################
# using the 'q_project' view as my test.
# it doesn't really matter which one I use,
# as long as it has the '@redirect_legacy_projects' decorator.
# this decorator only applies to "GET" requests,
# so I don't need to worry about any potential data passed in,
# but I still check for explicit parameters in the URL just in-case.
def test_redirect_legacy_projects(self):
test_params = {
"a": "a",
"b": "b",
}
current_request_url = add_parameters_to_url(reverse("project", kwargs={
"project_name": "current_project",
}), **test_params)
legacy_request_url = add_parameters_to_url(reverse("project", kwargs={
"project_name": "legacy_project",
}), **test_params)
# check that a non-legacy view did not redirect and returned a normal status_code...
response = self.client.get(current_request_url)
with self.assertRaises(AssertionError):
self.assertRedirects(response, expected_url=LEGACY_HOST+current_request_url)
self.assertEqual(response.status_code, 200)
import ipdb; ipdb.set_trace()
# TODO: THIS ASSERTION FAILS
# check that a legacy view did redirect and the status_code was either 301 or 302...
response = self.client.get(legacy_request_url)
self.assertRedirects(response, expected_url=LEGACY_HOST+legacy_request_url, status_code=FuzzyInt(301, 302), fetch_redirect_response=False)
| 2.234375 | 2 |
Second.py | DheerajKN/Python-with-pygame | 1 | 12771332 | s="\t\tHello\n"
print(s.strip())
| 1.953125 | 2 |
data/train/python/89a40e2c776916f399452dad6381942ff0919c6ccontroller.py | harshp8l/deep-learning-lang-detection | 84 | 12771333 | <reponame>harshp8l/deep-learning-lang-detection
import sys
from chimera.core.exceptions import ObjectNotFoundException
_global_private_controller_singleton = None
def ConsoleController ():
global _global_private_controller_singleton
if not _global_private_controller_singleton:
_global_private_controller_singleton = _ConsoleControllerSingleton()
return _global_private_controller_singleton
class _ConsoleControllerSingleton (object):
def __init__ (self):
self.controller = None
self.commander = None
def setController (self, controller):
self.controller = controller
def setCommander (self, commander):
self.commander = commander
def quit (self):
self.commander.quit(True)
def getManager(self):
return self.controller.getManager()
def getObject (self, name):
if not self.controller:
return False
try:
obj = self.getManager().getProxy(name)
except ObjectNotFoundException:
return False
return obj
| 2.203125 | 2 |
leetcode/35/35.py | yukienomiya/competitive-programming | 0 | 12771334 | class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
s = 0
e = len(nums) - 1
while(s <= e):
m = (s + e) // 2
if (nums[m] < target):
s = m + 1
elif (nums[m] > target):
e = m - 1
else:
return m
return s | 3.6875 | 4 |
piccolo/apps/migrations/commands/forwards.py | teners/piccolo | 0 | 12771335 | <filename>piccolo/apps/migrations/commands/forwards.py
from __future__ import annotations
import sys
import typing as t
from piccolo.apps.migrations.auto import MigrationManager
from piccolo.apps.migrations.commands.base import BaseMigrationManager
from piccolo.apps.migrations.tables import Migration
from piccolo.conf.apps import AppConfig, MigrationModule
class ForwardsMigrationManager(BaseMigrationManager):
def __init__(
self,
app_name: str,
migration_id: str,
fake: bool = False,
*args,
**kwargs,
):
self.app_name = app_name
self.migration_id = migration_id
self.fake = fake
super().__init__()
async def run_migrations(self, app_config: AppConfig) -> None:
already_ran = await Migration.get_migrations_which_ran(
app_name=self.app_name
)
migration_modules: t.Dict[
str, MigrationModule
] = self.get_migration_modules(app_config.migrations_folder_path)
ids = self.get_migration_ids(migration_modules)
print(f"All migration ids = {ids}")
havent_run = sorted(set(ids) - set(already_ran))
print(f"Haven't run = {havent_run}")
if len(havent_run) == 0:
# Make sure a status of 0 is returned, as we don't want this
# to appear as an error in automated scripts.
print("No migrations left to run!")
sys.exit(0)
if self.migration_id == "all":
subset = havent_run
elif self.migration_id == "1":
subset = havent_run[:1]
else:
try:
index = havent_run.index(self.migration_id)
except ValueError:
sys.exit(f"{self.migration_id} is unrecognised")
else:
subset = havent_run[: index + 1]
for _id in subset:
if self.fake:
print(f"Faked {_id}")
else:
migration_module = migration_modules[_id]
response = await migration_module.forwards()
if isinstance(response, MigrationManager):
await response.run()
print(f"-> Ran {_id}")
await Migration.insert().add(
Migration(name=_id, app_name=self.app_name)
).run()
async def run(self):
print("Running migrations ...")
await self.create_migration_table()
app_config = self.get_app_config(app_name=self.app_name)
await self.run_migrations(app_config)
async def forwards(
app_name: str, migration_id: str = "all", fake: bool = False
):
"""
Runs any migrations which haven't been run yet.
:param app_name:
The name of the app to migrate. Specify a value of 'all' to run
migrations for all apps.
:param migration_id:
Migrations will be ran up to and including this migration_id.
Specify a value of 'all' to run all of the migrations. Specify a
value of '1' to just run the next migration.
:param fake:
If set, will record the migrations as being run without actually
running them.
"""
if app_name == "all":
sorted_app_names = BaseMigrationManager().get_sorted_app_names()
for _app_name in sorted_app_names:
print(f"\nMigrating {_app_name}")
print("------------------------------------------------")
manager = ForwardsMigrationManager(
app_name=_app_name, migration_id="all", fake=fake
)
await manager.run()
else:
manager = ForwardsMigrationManager(
app_name=app_name, migration_id=migration_id, fake=fake
)
await manager.run()
| 2.09375 | 2 |
matplotlib-tuxing.py | jinghuquan/jing2019-1-7 | 0 | 12771336 | <reponame>jinghuquan/jing2019-1-7
import matplotlib.pyplot as plt
plt.rcParams['font.sans-aerif'] = ['SimHei'] # 用来正常显示中文标签
dete = ['2018/7/21', '2018/7/22', '2018/7/23', '2018/7/24', '2018/7/25', '2018/7/26', '2018/7/21'] | 1.570313 | 2 |
ejercicio1/reducer.py | josefigueirasm/MUEI-ICS-MAP-REDUCE | 0 | 12771337 | #!/usr/bin/env python
import sys
NULL = "-9999.0"
minCheck = 0
for line in sys.stdin:
# remove leading and trailing whitespace
clean_line = line.strip()
# parse the input we got from mapper.py
temp, city = clean_line.split()
# Check if the value is valid and it's the first time it enters
if temp != NULL and minCheck == 0:
print clean_line
minCheck = 1
# The last time the loop asigns this variable is the last line, so it's the
# max temperature
max_line = clean_line
print max_line
| 3.40625 | 3 |
predict_image.py | blertal/keras_imagenet | 0 | 12771338 | <filename>predict_image.py<gh_stars>0
"""predict_image.py
This script is for testing a trained Keras ImageNet model. The model
could be one of the following 2 formats:
1. tf.keras model (.h5)
2. optimized TensorRT engine (.engine)
Example usage #1:
$ python3 predict_image.py saves/googlenet_bn-model-final.h5 \
sample.jpg
Example usage #2:
$ python3 predict_image.py tensorrt/googlenet_bn.engine \
sample.jpg
"""
import argparse
import numpy as np
import cv2
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('model',
help='a tf.keras model or a TensorRT engine, e.g. saves/googlenet_bn-model-final.h5 or tensorrt/googlenet_bn.engine')
parser.add_argument('jpg',
help='an image file to be predicted')
args = parser.parse_args()
return args
def preprocess(img):
"""Preprocess an image for Keras ImageNet model inferencing."""
if img.ndim != 3:
raise TypeError('bad ndim of img')
if img.dtype != np.uint8:
raise TypeError('bad dtype of img')
img = cv2.resize(img, (224, 224))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)
img *= (2.0/255) # normalize to: 0.0~2.0
img -= 1.0 # subtract mean to make it: -1.0~1.0
img = np.expand_dims(img, axis=0)
return img
def infer_with_tf(img, model):
"""Inference the image with TensorFlow model."""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from utils.utils import config_keras_backend, clear_keras_session
from models.adamw import AdamW
config_keras_backend()
# load the trained model
net = tf.keras.models.load_model(model, compile=False,
custom_objects={'AdamW': AdamW})
predictions = net.predict(img)[0]
clear_keras_session()
return predictions
def init_trt_buffers(cuda, trt, engine):
"""Initialize host buffers and cuda buffers for the engine."""
assert engine[0] == 'input_1:0'
assert engine.get_binding_shape(0)[1:] == (224, 224, 3)
size = trt.volume((1, 224, 224, 3)) * engine.max_batch_size
host_input = cuda.pagelocked_empty(size, np.float32)
cuda_input = cuda.mem_alloc(host_input.nbytes)
assert engine[1] == 'Logits/Softmax:0'
assert engine.get_binding_shape(1)[1:] == (1000,)
size = trt.volume((1, 1000)) * engine.max_batch_size
host_output = cuda.pagelocked_empty(size, np.float32)
cuda_output = cuda.mem_alloc(host_output.nbytes)
return host_input, cuda_input, host_output, cuda_output
def infer_with_trt(img, model):
"""Inference the image with TensorRT engine."""
import pycuda.autoinit
import pycuda.driver as cuda
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
with open(model, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
assert len(engine) == 2, 'ERROR: bad number of bindings'
host_input, cuda_input, host_output, cuda_output = init_trt_buffers(
cuda, trt, engine)
stream = cuda.Stream()
context = engine.create_execution_context()
context.set_binding_shape(0, (1, 224, 224, 3))
np.copyto(host_input, img.ravel())
cuda.memcpy_htod_async(cuda_input, host_input, stream)
if trt.__version__[0] >= '7':
context.execute_async_v2(bindings=[int(cuda_input), int(cuda_output)],
stream_handle=stream.handle)
else:
context.execute_async(bindings=[int(cuda_input), int(cuda_output)],
stream_handle=stream.handle)
cuda.memcpy_dtoh_async(host_output, cuda_output, stream)
stream.synchronize()
return host_output
def main():
args = parse_args()
# load the cls_list (index to class name)
with open('data/synset_words.txt') as f:
cls_list = sorted(f.read().splitlines())
# load and preprocess the test image
img = cv2.imread(args.jpg)
if img is None:
raise SystemExit('cannot load the test image: %s' % args.jpg)
img = preprocess(img)
# predict the image
if args.model.endswith('.h5'):
predictions = infer_with_tf(img, args.model)
elif args.model.endswith('.engine'):
predictions = infer_with_trt(img, args.model)
else:
raise SystemExit('ERROR: bad model')
# postprocess
top5_idx = predictions.argsort()[::-1][:5] # take the top 5 predictions
for i in top5_idx:
print('%5.2f %s' % (predictions[i], cls_list[i]))
if __name__ == '__main__':
main()
| 2.828125 | 3 |
scratch/normalize.py | bemineni/USCIS | 0 | 12771339 | import argparse
import csv
def findCategory(status):
# Received state
if "Fingerprint Fee Was Received" in status:
return 1
elif "Expedite Request Denied" in status:
return 1
elif "Case Was Received" in status:
return 1
elif "Case Was Reopened" in status:
return 1
elif "Duplicate Notice Was Mailed" in status:
return 1
elif "Fees Were Waived" in status:
return 1
elif "Fee Refund Was Mailed" in status:
return 1
elif "Date of Birth Was Updated" in status:
return 1
elif "Name Was Updated" in status:
return 1
elif "Fee Will Be Refunded" in status:
return 1
elif "Notice Was Returned To USCIS Because The Post Office Could Not Deliver It" in status:
return 1
# Expedite request
elif "Expedite Request Received" in status:
return 20
# Finger prints taken
elif "Show Fingerprints Were Taken" in status:
return 30
# RFE
elif "Request for Initial Evidence Was Sent" in status:
return 40
elif "Request for Additional Evidence Was Sent" in status:
return 40
# RFE received
elif "Response To USCIS' Request For Evidence Was Received" in status:
return 50
elif "Request For Evidence Was Received" in status:
return 50
elif "Correspondence Was Received And USCIS Is Reviewing It" in status:
return 50
# transerfed
elif "Case Transferred To Another Office" in status:
return 60
elif "Case Was Transferred And A New Office Has Jurisdiction" in status:
return 60
# Interview
elif "Request To Reschedule My Appointment Was Received" in status:
return 70
elif "Ready to Be Scheduled for An Interview" in status:
return 70
elif "Interview Was Rescheduled" in status:
return 70
elif "Interview Was Scheduled" in status:
return 70
elif "Case Was Updated To Show That No One Appeared for In-Person Processing" in status:
return 70
# Interview complete
elif "Interview Was Completed And My Case Must Be Reviewed" in status:
return 80
# Approved
elif "Card Was Mailed To Me" in status:
return 90
elif "Case Was Approved" in status:
return 90
elif "Interview Cancelled And Notice Ordered" in status:
return 90
elif "Notice Explaining USCIS Actions Was Mailed" in status:
return 90
elif "Case Closed Benefit Received By Other Means" in status:
return 90
elif "Card Was Returned To USCIS" in status:
return 90
elif "New Card Is Being Produced" in status:
return 90
# Denied
elif "Case Was Denied" in status:
return 100
elif "Petition/Application Was Rejected For Insufficient Funds" in status:
return 100
elif "Withdrawal Acknowledgement Notice Was Sent" in status:
return 100
# Rejected
elif "Case Rejected" in status:
return 110
elif "Case Was Rejected" in status:
return 110
elif "Card Was Picked Up By The United States Postal Service" in status:
return 120
elif "New Card Is Being Produced" in status:
return 120
elif "Card Was Delivered To Me By The Post Office" in status:
return 120
# everything else
else:
return 130
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--infile', help="Input CSV file")
parser.add_argument('--outfile', help="Ouput CSV file")
args = parser.parse_args()
with open(args.infile, newline='') as inputcsvfile, open(args.outfile, 'w', newline='') as outcsvfile:
inreader = csv.reader(inputcsvfile, delimiter=',')
outwriter = csv.writer(outcsvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in inreader:
row[4] = findCategory(row[2])
outwriter.writerow(row)
| 2.625 | 3 |
demo_sms.py | ZompaSenior/pygsm | 0 | 12771340 | <gh_stars>0
import time
from pygsm import GsmModem
gsm = GsmModem(port = "/dev/ttyUSB0", logger = GsmModem.debug_logger).boot()
print("Waiting for network...")
s = gsm.wait_for_network()
with open("messaggio.txt", "rt") as f:
messaggio = f.read()
with open("lista.csv", "rt") as f:
for i, l in enumerate(f):
gsm.send_sms(l.strip(), messaggio)
time.sleep(1)
gsm.disconnect() | 2.734375 | 3 |
src/wrangle_data.py | RobBlumberg/DSCI_522_Group_302 | 1 | 12771341 | <gh_stars>1-10
#!/home/dkruszew/anaconda3/bin/python
# -*- coding: utf-8 -*-
# author: <NAME>
# date: 2020-01-21
#
"""This script imports raw .csv files for horse-racing data in Hong Kong from user-defined file-path,
performs pre-preprocessing, merges the files together, and writes them to a user-specified location
on the local machine as data_train.csv and data_test.csv. This script takes the filepath where the raw
data is saved and the location where the user would like the compiled data to be written to locally.
Usage: wrangle_data.py <file_path_in> <file_path_out>
Arguments:
<file_path_in> Path where the raw data exists.
<file_path_out> Path where the compiled data is to be written to locally.
"""
import os
import numpy as np
import pandas as pd
from docopt import docopt
from sklearn.model_selection import train_test_split
opt = docopt(__doc__)
def main(file_path_in, file_path_out):
"""
entry point for script. take in raw data file_path and output file_path from commandline,
and warngles data for downstream use.
Parameters
----------
file_path_in
the location where the raw data is hosted. Assumes that the folder contains 5 files:
- horse_info.csv
- results.csv
- trackwork.csv
- barrier.csv
- comments.csv
file_path_out
- the desired filepath to put the wrangled data into.
Returns
-------
None if executed successfully, otherwise raises Exception.
"""
horse_info, results, comments, trackwork, barrier = import_files(file_path_in)
complete_dataset = merge_results(horse_info, results, comments, trackwork, barrier)
split_and_write_data(complete_dataset, file_path_out)
print(f"successfully written data to {file_path_out}!\n")
def import_files(filepath):
"""
imports the relevant csvs from the filepath specified.
Parameters
----------
filepath
the filepath where the raw csv files live at.
Returns
five Pandas DataFrames
- horse_info
- results
- comments
- trackwork
- barrier
-------
"""
print("==========\nstarting import...\n")
horse_info = pd.read_csv(f"{filepath}/horse_info.csv", index_col=0)
results = pd.read_csv(f"{filepath}/results.csv", index_col=0)
comments = pd.read_csv(f"{filepath}/comments.csv", index_col=0)
trackwork = pd.read_csv(f"{filepath}/trackwork.csv", index_col=0)
barrier = pd.read_csv(f"{filepath}/barrier.csv", index_col=0)
print("==========\nsuccessfully imported CSV data!\n")
return horse_info, results, comments, trackwork, barrier
def merge_results(horse_info, results, comments, trackwork, barrier):
"""
returns a merged dataframe of relevant .csv files.
Parameters
----------
horse_info
dataframe holding horse info.
results
dataframe holding race results.
comments
dataframe holding comments for race.
trackwork
dataframe holding track work information.
barrier
dataframe holding barrier trial results.
Returns
-------
a Pandas dataframe containing relevants input dataframes.
"""
print("==========\nstarting merge...\n")
# Merge comments onto results
results['dataset'] = 'results'
results_comments = pd.merge(results, comments, how="left", on=["horseno", "date", "raceno", "plc"])
# Rename barrier time which is the same as finish time in results
barrier.rename(columns={'time': 'finishtime'}, inplace=True)
barrier['dataset'] = 'barrier'
# Merge barrier onto results_comments
barrier_binded = pd.concat([results_comments, barrier], axis = 0, ignore_index=False, sort=False)
# Merge horse_info onto data frame
merged_data = pd.merge(barrier_binded, horse_info, how='left', on=['horse'])
# Removed the columns with _ch as this indicated Chinese.
final_data = merged_data[merged_data.columns[~merged_data.columns.str.contains('.*_ch')]]
# Drop repeated columns and unnessary indexes
final_data = final_data.drop(['trainer_y'], axis=1)
final_data['date'] = pd.to_datetime(final_data['date'])
print("==========\ncompleted merge!\n")
return final_data
def split_and_write_data(final_data, filepath):
"""
splits the data into a train and test sets with a 8/2 split, then writes them to the specified file path.
Parameters
----------
final_data
the dataframe containing the merged data
filepath
the user-specified filepath
Returns
-------
None
"""
print("==========\nstarting to split data into test and train sets\n")
# shuffle and split the data.
data_train, data_test = train_test_split(final_data,
random_state=1,
test_size=0.2,
shuffle=True)
print("==========\ndata is split, writing to file\n")
data_train.to_csv(f"{filepath}/data_train.csv")
data_test.to_csv(f"{filepath}/data_test.csv")
def path_validation():
if (os.path.exists(opt["<file_path_in>"])) and (os.path.exists(opt["<file_path_out>"])):
pass
else:
raise ValueError("File paths do not exist")
path_validation()
# script entry point
if __name__ == '__main__':
main(opt["<file_path_in>"], opt["<file_path_out>"])
| 3.234375 | 3 |
src/contrib/change_chunk_names.py | memento42429/metashape-scripts | 40 | 12771342 | <filename>src/contrib/change_chunk_names.py
import Metashape
"""
Metashape Chunk Name Changer Script (v 1.0)
<NAME>, Feb 2021
Usage:
Workflow -> Batch Process -> Add -> Run script
This script changes chunks name refering to the first image name of the chunk.
When the image name include "_", this splits the name and join first three words.
ex) image name: "M33333_human_CR_000" -> chunk name: "M33333_human_CR"
"""
compatible_major_version = "1.8"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
doc = Metashape.app.document
chunks = doc.chunks
for chunk in chunks:
if chunk.enabled is True:
camera_name = str(chunk.cameras[0].label)
chunk_name = "_".join(camera_name.split("_")[:3])
chunk.label = chunk_name | 2.328125 | 2 |
app.py | cjoshi7/covid19-date-selector | 2 | 12771343 | import json
import pandas as pd
import plotly.express as px
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from urllib.request import urlopen
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
buttons = dbc.ButtonGroup(
[
html.Form(action="https://github.com/cjoshi7/covid19-date-selector",
children=dbc.Button("Documentation", color="primary", type="submit")),
html.Form(action="https://github.com/nytimes/covid-19-data/blob/master/us-counties.csv",
children=dbc.Button("Download Dataset", color="primary", type="submit"))
]
)
radiobuttons = dcc.RadioItems(
id="datatype",
options = [
{"label": "Infection Rate", "value": "cases"},
{"label": "Death Rate", "value": "deaths"}
],
value = "cases",
className="radio",
)
date_selector = dcc.DatePickerSingle(
id="dateselector",
min_date_allowed="2020-1-21",
max_date_allowed="2021-2-5",
initial_visible_month="2021-2-5",
date="2021-2-5"
)
jumbotron = dbc.Jumbotron(
[
html.H1("COVID-19 Date Selector", className="display-3"),
html.P(
"Visualize the spread of the virus on a specific day",
className="lead",
),
html.Hr(className="my-2"),
html.P(
"@cjoshi7",
),
html.P(buttons),
]
)
app.layout = html.Div([
html.Div(jumbotron),
html.Div(date_selector),
html.Div(radiobuttons),
html.Div(
[dcc.Graph(id="choropleth")]
)
# Include multiple types of graphs for each date.
])
@app.callback(
Output("choropleth", "figure"),
Input("dateselector", "date"),
Input("datatype", "value"))
def display_choropleth(date, datatype):
color = "Viridis"
label = "Infection Rate"
if datatype == "deaths":
color = "hot"
label = "Death Rate"
with urlopen("https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json") as response:
counties = json.load(response)
raw_df = pd.read_csv("us-counties.csv",
dtype={"fips": str})
filtered_df = raw_df[raw_df.date == date]
fig = px.choropleth(filtered_df, geojson=counties, locations='fips', color=datatype,
color_continuous_scale=color,
range_color=(0, filtered_df[datatype].mean() * 1.5),
scope="usa",
labels={datatype: label}
)
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
return fig
if __name__ == '__main__':
app.run_server(debug=True)
| 2.703125 | 3 |
atividade-3/questao1.py | David-Marcoss/POO1-PYTHON | 0 | 12771344 | <reponame>David-Marcoss/POO1-PYTHON
from datetime import date
class pessoa:
def __init__(self,nome,data,altura):
self._nome = nome
self._data = data
self._altura = altura
def imprimir_dados(self):
print(f"nome:{self._nome}\ndata de nacimento:{self._data}\naltura:{self._altura}\nidade: {self.idade()}\n")
def idade(self):
dt = self.data.split("/")
if( (len(dt[0]) <3 ) and (len(dt[1]) <3 ) and (len(dt[2]) <5)):
ano_atual = date.today()
ano = int(dt[2])
idade1 = ano_atual.year - ano
return idade1
else:
print("Erro: nao foi possivel calcular idade")
return -1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self,nome):
self._nome = nome
@property
def data(self):
return self._data
@data.setter
def data(self, data):
dt = data.split("/")
if( (len(dt[0]) <2 ) and (len(dt[1]) <2 ) and (len(dt[2]) <4)):
self.data.copy(data)
else:
print("Erro! formato da data de nacimemto errado")
@property
def altura(self):
return self._altura
@altura.setter
def altura(self, altura):
if(altura>0):
self._altura = altura
else:
print("Erro: altura não pode ser negatva")
p1 = pessoa("david","14/03/2001",1.70)
p2 = pessoa("carlos","14/03/1991",1.70)
p1.imprimir_dados()
p2.imprimir_dados()
p1.idade() | 3.78125 | 4 |
src/home_finder/settings/seloger.py | nekomamoushi/home-finder | 0 | 12771345 | <filename>src/home_finder/settings/seloger.py
# -*- coding: utf-8 -*-
from pathlib import Path
from home_finder.utils.file import csv_load
INSEE_CODE_CITES_FILENAME = Path(__file__).parent.parent.parent.parent / "res" / "minified-code-postal-code-insee-iledefrance-2015.csv"
class SelogerSettings(object):
SEARCH_URL = "https://www.seloger.com/list.htm"
def __init__(self, settings):
self._settings = settings
@property
def url(self):
temp_url = self.SEARCH_URL
temp_url += "?types=1&projects=2&enterprise=0&natures=1,2&picture=15"
temp_url += "&surface={0}".format(self.process_surface())
temp_url += "&rooms={0}".format(self.process_rooms())
temp_url += "&bedrooms={0}".format(self.process_bedrooms())
temp_url += "&price={0}".format(self.process_price())
temp_url += "&places={0}".format(self.process_cities())
temp_url += "&qsVersion=1.0"
return temp_url
def process_surface(self):
s = self._settings.surface
s_min = "NaN" if s[0] == 0 else s[0]
s_max = "NaN" if s[1] == 0 else s[1]
return "{0}/{1}".format(s_min, s_max)
def process_rooms(self):
return self._settings.rooms
def process_bedrooms(self):
return self._settings.bedrooms
def process_price(self):
p = self._settings.price
p_min = "NaN" if p[0] == 0 else p[0]
p_max = "NaN" if p[1] == 0 else p[1]
return "{0}/{1}".format(p_min, p_max)
def process_cities(self):
def city_row(codes, city):
for code in codes:
if city == code[0]:
break
else:
error_msg = "ERROR: <{}> does not exists".format(city)
error_msg += "ERROR: See {}".format(INSEE_CODE_CITES_FILENAME)
raise Exception(error_msg)
return code
def process_code(code):
if code == "75":
return code
return "{0}0{1}".format(code[0:2], code[2:])
def build_places(places):
# In: [75, 940046]
# Out: [{cp:75}|{ci:940046}]
temp_places = ""
for place in places:
if place == "75":
temp_places += "{{cp:75}}|"
else:
temp_places += "{{ci:{0}}}|".format(place)
temp_places = temp_places.rstrip('|')
return '[' + temp_places + ']'
insee_codes = csv_load(INSEE_CODE_CITES_FILENAME)
places = []
for city in self._settings.cities:
row = city_row(insee_codes, city.upper())
processed = process_code(row[2])
places.append(processed)
return build_places(places)
| 2.78125 | 3 |
src/output.py | awsassets/superfish | 0 | 12771346 | <gh_stars>0
def save_in_csv(login, filename="data"):
try:
open(filename+".csv", "a").write(f"{login.ip}, {login.email}, {login.pwd}\n")
except Exception as err:
print(err)
exit(1)
def save_in_text(login, filename="data"):
try:
open(filename+".txt", "a").write(f"Ip:{login.ip} Email:{login.email} Password:{<PASSWORD>")
except Exception as err:
print(err)
exit(1)
def autosave(res, Login):
while True:
if res == "csv":
save_in_csv(Login)
break
elif res == "text":
save_in_text(Login)
break
else:
import logs
logs.warn("out format not suported! using: csv")
res = "csv" | 3.53125 | 4 |
Stream_Learning_CIFAR10_Fig-4b/model_utils.py | justincurl/SynapticMetaplasticityBNN | 17 | 12771347 | <reponame>justincurl/SynapticMetaplasticityBNN<filename>Stream_Learning_CIFAR10_Fig-4b/model_utils.py
import numpy as np
import pandas as pd
import math
import torch
import torchvision
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
from datetime import datetime
from PIL import Image
class SignActivation(torch.autograd.Function): # We define a sign activation with derivative equal to clip
@staticmethod
def forward(ctx, i):
result = i.sign()
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i, = ctx.saved_tensors
grad_i = grad_output.clone()
grad_i[i.abs() > 1.0] = 0
return grad_i
def Binarize(tensor):
return tensor.sign()
class BinarizeLinear(torch.nn.Linear):
def __init__(self, *kargs, **kwargs):
super(BinarizeLinear, self).__init__(*kargs, **kwargs)
def forward(self, input):
if input.size(1) != 784:
input.data=Binarize(input.data)
if not hasattr(self.weight,'org'):
self.weight.org=self.weight.data.clone()
self.weight.data=Binarize(self.weight.org)
out = torch.nn.functional.linear(input, self.weight)
if not self.bias is None:
self.bias.org=self.bias.data.clone()
out += self.bias.view(1, -1).expand_as(out)
return out
class BinarizeConv2d(torch.nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
def forward(self, input):
if input.size(1) != 3:
input.data = Binarize(input.data)
if not hasattr(self.weight,'org'):
self.weight.org=self.weight.data.clone()
self.weight.data=Binarize(self.weight.org)
out = torch.nn.functional.conv2d(input, self.weight, None, self.stride,
self.padding, self.dilation, self.groups)
if not self.bias is None:
self.bias.org=self.bias.data.clone()
out += self.bias.view(1, -1, 1, 1).expand_as(out)
return out
def normal_init(m):
if m.__class__.__name__.find('Binarize') !=-1:
torch.nn.init.xavier_normal_(m.weight)
#elif m.__class__.__name__.find('BatchNorm') !=-1:
# torch.nn.init.ones_(m.weight)
class ConvBNN(torch.nn.Module):
def __init__(self, init = "gauss", width = 0.01, channels = [128,128,256,256,512,512]):
super(ConvBNN, self).__init__()
# input: (mb x 3 x 32 x 32)
self.features = torch.nn.Sequential(BinarizeConv2d(3, channels[0], kernel_size=3, padding=1, bias=False), #out: (mb x channels[0] x 32 x 32)
torch.nn.BatchNorm2d(channels[0], affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
BinarizeConv2d(channels[0],channels[1], kernel_size=3, padding=1, bias=False), #out: (mb x channels[1] x 32 x 32)
torch.nn.MaxPool2d(kernel_size=2), #out: (mb x channels[1] x 16 x 16)
torch.nn.BatchNorm2d(channels[1], affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
BinarizeConv2d(channels[1],channels[2],kernel_size=3, padding=1, bias=False), #out: (mb x channels[2] x 16 x 16)
torch.nn.BatchNorm2d(channels[2], affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
BinarizeConv2d(channels[2],channels[3],kernel_size=3, padding=1, bias=False), #out: (mb x channels[3] x 16 x 16)
torch.nn.MaxPool2d(kernel_size=2), #out: (mb x channels[3] x 8 x 8)
torch.nn.BatchNorm2d(channels[3], affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
BinarizeConv2d(channels[3],channels[4],kernel_size=3, padding=1, bias=False), #out: (mb x channels[4] x 8 x 8)
torch.nn.BatchNorm2d(channels[4], affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
BinarizeConv2d(channels[4],channels[5],kernel_size=3, bias=False), #out: (mb x channels[5] x 6 x 6)
torch.nn.MaxPool2d(kernel_size=2), #out: (mb x channels[5] x 3 x 3)
torch.nn.BatchNorm2d(channels[5], affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
)
self.classifier = torch.nn.Sequential(BinarizeLinear(channels[5]*9,2048, bias=False),
torch.nn.BatchNorm1d(2048, affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
torch.nn.Dropout(0.5),
BinarizeLinear(2048,2048, bias=False),
torch.nn.BatchNorm1d(2048, affine=True, track_running_stats=True),
torch.nn.Hardtanh(inplace=True),
torch.nn.Dropout(0.5),
BinarizeLinear(2048,10, bias=False),
torch.nn.BatchNorm1d(10, affine=True, track_running_stats=True),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Adam_meta(torch.optim.Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), meta = 0.75, eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, meta=meta, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam_meta, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam_meta, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if len(p.size())!=1:
state['followed_weight'] = np.random.randint(p.size(0)),np.random.randint(p.size(1))
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
if group['weight_decay'] != 0:
grad.add_(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
binary_weight_before_update = torch.sign(p.data)
condition_consolidation = (torch.mul(binary_weight_before_update,exp_avg) > 0.0 )
decayed_exp_avg = torch.mul(torch.ones_like(p.data)-torch.pow(torch.tanh(group['meta']*torch.abs(p.data)),2) ,exp_avg)
if len(p.size())==1: # True if p is bias, false if p is weight
p.data.addcdiv_(-step_size, exp_avg, denom)
else:
#p.data.addcdiv_(-step_size, exp_avg , denom) #normal update
p.data.addcdiv_(-step_size, torch.where(condition_consolidation, decayed_exp_avg, exp_avg) , denom) #assymetric lr for metaplasticity
return loss
def train(model, train_loader, current_task_index, optimizer, device, criterion = torch.nn.CrossEntropyLoss(), verbose = False):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if torch.cuda.is_available():
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
# This loop is for BNN parameters having 'org' attribute
for p in list(model.parameters()): # blocking weights with org value greater than a threshold by setting grad to 0
if hasattr(p,'org'):
p.data.copy_(p.org)
optimizer.step()
# This loop is only for BNN parameters as they have 'org' attribute
for p in list(model.parameters()): # updating the org attribute
if hasattr(p,'org'):
p.org.copy_(p.data)
def test(model, test_loader, device, frac = 1, criterion = torch.nn.CrossEntropyLoss(), verbose = False):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if torch.cuda.is_available():
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += criterion(output, target).item() # sum up batch loss
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
test_acc = round( 100. * float(correct) * frac / len(test_loader.dataset) , 2)
if verbose :
print('Test accuracy: {}/{} ({:.2f}%)'.format(
correct, len(test_loader.dataset),
test_acc))
return test_acc
def plot_parameters(model, path, save=True):
fig = plt.figure(figsize=(15, 30))
i = 1
for (n, p) in model.named_parameters():
if (n.find('bias') == -1) and (len(p.size()) != 1): #bias or batchnorm weight -> no plot
fig.add_subplot(8,2,i)
if model.__class__.__name__.find('B') != -1: #BVGG -> plot p.org
if hasattr(p,'org'):
weights = p.org.data.cpu().numpy()
else:
weights = p.data.cpu().numpy()
binet = 100
else:
weights = p.data.cpu().numpy() #TVGG or FVGG plot p
binet = 50
i+=1
plt.title( n.replace('.','_') )
plt.hist( weights.flatten(), binet)
if save:
time = datetime.now().strftime('%H-%M-%S')
fig.savefig(path+'/'+time+'_weight_distribution.png')
plt.close()
| 2.359375 | 2 |
pyobs/utils/simulation/telescope.py | pyobs/pyobs-core | 4 | 12771348 | from __future__ import annotations
import threading
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.units as u
from typing import Tuple, List
import random
from pyobs.object import Object
from pyobs.utils.enums import MotionStatus
class SimTelescope(Object):
"""A simulated telescope on an equitorial mount."""
__module__ = 'pyobs.utils.simulation'
def __init__(self, world: 'SimWorld', position: Tuple[float, float] = None, offsets: Tuple[float, float] = None,
pointing_offset: Tuple[float, float] = None, move_accuracy: float = 2.,
speed: float = 20., focus: float = 50, filters: List[str] = None, filter: str = 'clear',
drift: Tuple[float, float] = None, focal_length: float = 5000., *args, **kwargs):
"""Initializes new telescope.
Args:
world: World object.
position: RA/Dec tuple with position of telescope in degrees.
offsets: RA/Dec offsets of telescope in arcsecs.
pointing_offset: Pointing offset in RA/Dec in arcsecs.
move_accuracy: Accuracy of movements in RA/Dec, i.e. random error after any movement [arcsec].
speed: Speed of telescope in deg/sec.
focus: Telescope focus.
filters: List of filters.
filter: Current filter.
drift: RA/Dec drift of telescope in arcsec/sec.
focal_length: Focal length of telescope in mm.
"""
Object.__init__(self, *args, **kwargs)
# store
self.world = world
self.status = MotionStatus.IDLE
self.status_callback = None
# init
self._position = SkyCoord(0. * u.deg, 0. * u.deg, frame='icrs') if position is None else \
SkyCoord(position[0] * u.deg, position[1] * u.deg, frame='icrs')
self._offsets = (0., 0.) if offsets is None else offsets
self.pointing_offset = (20., 2.) if pointing_offset is None else pointing_offset
self.move_accuracy = (1, 1) if move_accuracy is None else move_accuracy
self.speed = speed # telescope speed in deg/sec
self.focus = focus
self.filters = ['clear', 'B', 'V', 'R'] if filters is None else filters
self.filter = filter
self.drift = (0.01, 0.0001) if drift is None else drift # arcsec/sec in RA/Dec
self.focal_length = focal_length
# private stuff
self._drift = (0., 0.)
self._dest_coords = None
# locks
self._pos_lock = threading.RLock()
# threads
self.add_thread_func(self._move_thread)
@property
def position(self):
return self._position
@property
def offsets(self):
return self._offsets
def _change_motion_status(self, status: MotionStatus):
"""Change the current motion status.
Args:
status: New motion status
"""
# call callback
if self.status_callback is not None and status != self.status:
self.status_callback(status)
# set it
self.status = status
@property
def real_pos(self):
# calculate offsets
dra = (self._offsets[0] * u.deg + self._drift[0] * u.arcsec) / np.cos(np.radians(self._position.dec.degree))
ddec = self._offsets[1] * u.deg + self._drift[1] * u.arcsec
# return position
with self._pos_lock:
return SkyCoord(ra=self._position.ra + dra,
dec=self._position.dec + ddec,
frame='icrs')
def move_ra_dec(self, coords):
"""Move telescope to given RA/Dec position.
Args:
coords: Destination coordinates.
"""
# change status
self._change_motion_status(MotionStatus.SLEWING)
# calculate random RA/Dec offsets
acc = self.move_accuracy / 3600.
ra = random.gauss(coords.ra.degree, acc / np.cos(np.radians(coords.dec.degree))) * u.deg
dec = random.gauss(coords.dec.degree, acc) * u.deg
# set coordinates
self._dest_coords = SkyCoord(ra=ra, dec=dec, frame='icrs')
def set_offsets(self, dra, ddec):
"""Move RA/Dec offsets.
Args:
dra: RA offset [deg]
ddec: Dec offset [deg]
"""
# calculate random RA/Dec offsets
acc = self.move_accuracy / 3600.
ra, dec = random.gauss(dra, acc), random.gauss(ddec, acc)
# set offsets
self._offsets = (ra, dec)
def _move_thread(self):
"""Move the telescope over time."""
# run until closed
while not self.closing.is_set():
# do we have destination coordinates?
if self._dest_coords is not None:
# calculate moving vector
vra = (self._dest_coords.ra.degree - self._position.ra.degree) * \
np.cos(np.radians(self._position.dec.degree))
vdec = self._dest_coords.dec.degree - self._position.dec.degree
# get direction
length = np.sqrt(vra**2 + vdec**2)
# do we reach target?
if length < self.speed:
# set it
with self._pos_lock:
# set position and reset destination
self._change_motion_status(MotionStatus.TRACKING)
self._position = self._dest_coords
self._dest_coords = None
# set some random drift around the pointing error
self._drift = (random.gauss(self.pointing_offset[0], self.pointing_offset[0] / 10.),
random.gauss(self.pointing_offset[1], self.pointing_offset[1] / 10.))
else:
# norm vector and get movement
dra = vra / length * self.speed / np.cos(np.radians(self._position.dec.degree)) * u.deg
ddec = vdec / length * self.speed * u.deg
# apply it
with self._pos_lock:
self._change_motion_status(MotionStatus.SLEWING)
self._position = SkyCoord(ra=self._position.ra + dra,
dec=self._position.dec + ddec,
frame='icrs')
else:
# no movement, just drift
# calculate constant drift
drift_ra = random.gauss(self.drift[0], self.drift[0] / 10.)
drift_dec = random.gauss(self.drift[1], self.drift[1] / 10.)
# and apply it
with self._pos_lock:
self._drift = (self._drift[0] + drift_ra, self._drift[1] + drift_dec)
# sleep a second
self.closing.wait(1)
__all__ = ['SimTelescope']
| 2.671875 | 3 |
spider/ip_proxies/ip_proxies/spiders/jiangxianli.py | LZC6244/ip_proxy_pool | 17 | 12771349 | # -*- coding: utf-8 -*-
import random
import scrapy
from scrapy import Request
from ip_proxies.spiders.base import BaseSpider
from ip_proxies.items import IpProxiesItem
from ip_proxies.settings import TEST_URLS, LOG_FILE
class JiangxianliSpider(BaseSpider):
name = 'jiangxianli'
# allowed_domains = ['jiangxianli.com']
start_urls = ['http://ip.jiangxianli.com/?page=1']
custom_settings = {
'LOG_FILE': LOG_FILE.replace('log/', f'log/{name}__', 1),
}
def parse(self, response):
# print(response.url)
# 每一页的代理 ip 信息列表
ip_info_li = response.xpath('//table[@class="table table-hover table-bordered table-striped"]/tbody//tr')
# IP PORT 匿名度 类型 的列表
title_li = ['ip', 'port', 'anonymity', 'net_type']
for i in ip_info_li:
item = IpProxiesItem()
td_li = i.xpath('./td/text()').getall()
# 该网站只有 "高匿" "透明" 两种分类
# 我们不需要 "透明" , 去除
if td_li[3] in '透明':
continue
for k, v in zip(title_li, td_li[1:5]):
item[k] = v
# td_li[5] -> 中国 广东 汕尾
# td_li[6] -> 联通
# 二者合在一起才是所需数据
item['ip_location'] = ' '.join(td_li[5:7]) if len(td_li) == 12 else ''
if not item['net_type']:
item['net_type'] = 'HTTP'
# 从上述数据组合代理为如下格式 :http://some_proxy_server:port
proxy = item['net_type'].lower() + '://' + item['ip'] + ':' + item['port']
# 需加 dont_filter=True 因为测试代理是否可用每次都是访问固定的几个页面来测试
request = Request(url=random.choice(TEST_URLS), headers=self.headers,
meta={'proxy': proxy, 'item': item},
callback=self.verify_porxy, dont_filter=True)
yield request
# 该网站页数不固定,可能为为 2~3 页或更多页
# 此处通过是否仍能点击下一页判断
if_next = response.xpath('//ul[@class="pagination"]/li')
if if_next:
if_next = if_next[-1]
if not if_next.xpath('./@class'):
next_url = if_next.xpath('./a/@href').get()
next_request = Request(url=next_url, callback=self.parse, dont_filter=True)
yield next_request
| 2.5 | 2 |
admin/test/test_acceptance.py | wallnerryan/flocker-profiles | 0 | 12771350 | <reponame>wallnerryan/flocker-profiles
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``admin.acceptance``.
"""
from uuid import UUID
from zope.interface.verify import verifyObject
from twisted.trial.unittest import SynchronousTestCase
from ..acceptance import (
IClusterRunner, ManagedRunner, generate_certificates,
DISTRIBUTIONS,
)
from flocker.ca import RootCredential
from flocker.provision import PackageSource
from flocker.provision._install import ManagedNode
from flocker.acceptance.testtools import DatasetBackend
class ManagedRunnerTests(SynchronousTestCase):
"""
Tests for ``ManagedRunner``.
"""
def test_interface(self):
"""
``ManagedRunner`` provides ``IClusterRunner``.
"""
runner = ManagedRunner(
node_addresses=[b'192.0.2.1'],
package_source=PackageSource(
version=b"",
os_version=b"",
branch=b"",
build_server=b"",
),
distribution=b'centos-7',
dataset_backend=DatasetBackend.zfs,
dataset_backend_configuration={},
)
self.assertTrue(
verifyObject(IClusterRunner, runner)
)
class GenerateCertificatesTests(SynchronousTestCase):
"""
Tests for ``generate_certificates``.
"""
def test_cluster_id(self):
"""
The certificates generated are for a cluster with the given identifier.
"""
cluster_id = UUID("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb")
node = ManagedNode(
address=b"192.0.2.17", distribution=DISTRIBUTIONS[0],
)
certificates = generate_certificates(cluster_id, [node])
root = RootCredential.from_path(certificates.directory)
self.assertEqual(
cluster_id,
UUID(root.organizational_unit),
)
| 1.757813 | 2 |