max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
rally/plugins/openstack/scenarios/nova/flavors.py
|
TeamXgrid/xgrid-rally
| 1
|
12782351
|
<gh_stars>1-10
# Copyright 2015: Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _LW
from rally.common import logging
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.nova import utils
from rally.task import validation
"""Scenarios for Nova flavors."""
LOG = logging.getLogger(__name__)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(users=True)
@scenario.configure(name="NovaFlavors.list_flavors")
class ListFlavors(utils.NovaScenario):
def run(self, detailed=True, **kwargs):
"""List all flavors.
Measure the "nova flavor-list" command performance.
:param detailed: True if the flavor listing
should contain detailed information
:param kwargs: Optional additional arguments for flavor listing
"""
self._list_flavors(detailed, **kwargs)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["nova"]},
name="NovaFlavors.create_and_list_flavor_access")
class CreateAndListFlavorAccess(utils.NovaScenario):
def run(self, ram, vcpus, disk, **kwargs):
"""Create a non-public flavor and list its access rules
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param kwargs: Optional additional arguments for flavor creation
"""
# NOTE(pirsriva): access rules can be listed
# only for non-public flavors
if kwargs.get("is_public", False):
LOG.warning(_LW("is_public cannot be set to True for listing "
"flavor access rules. Setting is_public to False"))
kwargs["is_public"] = False
flavor = self._create_flavor(ram, vcpus, disk, **kwargs)
self.assertTrue(flavor)
self._list_flavor_access(flavor.id)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["nova"]},
name="NovaFlavors.create_flavor_and_add_tenant_access")
class CreateFlavorAndAddTenantAccess(utils.NovaScenario):
def run(self, ram, vcpus, disk, **kwargs):
"""Create a flavor and Add flavor access for the given tenant.
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param kwargs: Optional additional arguments for flavor creation
"""
flavor = self._create_flavor(ram, vcpus, disk, **kwargs)
self.assertTrue(flavor)
self._add_tenant_access(flavor.id, self.context["tenant"]["id"])
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["nova"]},
name="NovaFlavors.create_flavor")
class CreateFlavor(utils.NovaScenario):
def run(self, ram, vcpus, disk, **kwargs):
"""Create a flavor.
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param kwargs: Optional additional arguments for flavor creation
"""
self._create_flavor(ram, vcpus, disk, **kwargs)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["nova"]},
name="NovaFlavors.create_and_get_flavor")
class CreateAndGetFlavor(utils.NovaScenario):
"""Scenario for create and get flavor."""
def run(self, ram, vcpus, disk, **kwargs):
"""Create flavor and get detailed information of the flavor.
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param kwargs: Optional additional arguments for flavor creation
"""
flavor = self._create_flavor(ram, vcpus, disk, **kwargs)
self._get_flavor(flavor.id)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["nova"]},
name="NovaFlavors.create_and_delete_flavor")
class CreateAndDeleteFlavor(utils.NovaScenario):
def run(self, ram, vcpus, disk, **kwargs):
"""Create flavor and delete the flavor.
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param kwargs: Optional additional arguments for flavor creation
"""
flavor = self._create_flavor(ram, vcpus, disk, **kwargs)
self._delete_flavor(flavor.id)
@validation.required_services(consts.Service.NOVA)
@validation.required_openstack(admin=True)
@scenario.configure(context={"admin_cleanup": ["nova"]},
name="NovaFlavors.create_flavor_and_set_keys")
class CreateFlavorAndSetKeys(utils.NovaScenario):
def run(self, ram, vcpus, disk, extra_specs, **kwargs):
"""Create flavor and set keys to the flavor.
Measure the "nova flavor-key" command performance.
the scenario first create a flavor,then add the extra specs to it.
:param ram: Memory in MB for the flavor
:param vcpus: Number of VCPUs for the flavor
:param disk: Size of local disk in GB
:param extra_specs: additional arguments for flavor set keys
:param kwargs: Optional additional arguments for flavor creation
"""
flavor = self._create_flavor(ram, vcpus, disk, **kwargs)
self._set_flavor_keys(flavor, extra_specs)
| 1.835938
| 2
|
log_generator/es_settings.py
|
DobyLov/loggenerator
| 0
|
12782352
|
# es_settings
index_template = {
"index_patterns": ["loggen-*"],
"settings": {
"number_of_shards": 1,
"number_of_replicas": 0,
"index.lifecycle.name": "ilm_loggen",
"index.lifecycle.rollover_alias": "loggen_rollov"
}
}
test_index_template = {
"index_patterns": ["loggen-*"],
"settings": {
"index.lifecycle.name": "ilm_loggen",
"index.lifecycle.rollover_alias": "loggen_rollov"
}
}
# es_index_settings
settings = {
"settings" : {
"index" : {
"number_of_shards" : "1",
"number_of_replicas" : "0",
}
}
}
es_ilm_settings = {
"policy": {
"phases": {
"hot": {
"actions": {
"rollover": {
"max_age": "5d",
"max_size": "1gb"
},
"set_priority": {
"priority": 100
}
}
},
"delete": {
"min_age": "5d",
"actions": {
"delete": {}
}
}
}
}
}
# es_mapping
mapping = {
"properties": {
"age": {
"type": "long"
},
"country": {
"properties": {
"country_long": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"country_short": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"location": {
"properties": {
"latitude": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"longitude": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
},
"region": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"time_zone": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"town": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
},
"dateTime": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"hasTags": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"ip_address": {
"type": "ip"
},
"message_tweet": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"user": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
},
"uuid": {
"type": "text",
"fields": {
"keyword": {
"type": "keyword",
"ignore_above": 256
}
}
}
}
}
| 1.375
| 1
|
puchkidb/puchkidb_cli/commands/show.py
|
triump0870/puchkidb
| 0
|
12782353
|
from json import dumps
from .base import Base
class Show(Base):
"""Show the databases and the tables using this command.
Usage:
puchkidb show [options]
options:
dbs: show all the databases in the server
tables: show all the tables inside a database
Example:
puchkidb show dbs
"""
def run(self):
print('Hello, world!')
print('You supplied the following options:', dumps(self.options, indent=2, sort_keys=True))
| 2.953125
| 3
|
Leetcoding-Actions/Explore-Monthly-Challenges/2020-12/15-squaresOfASortedArray.py
|
shoaibur/SWE
| 1
|
12782354
|
<gh_stars>1-10
class Solution:
def sortedSquares(self, nums: List[int]) -> List[int]:
n = len(nums)
i, j, k = 0, n - 1, n - 1
result = [0] * n
while i <= j:
if abs(nums[j] > abs(nums[i])):
result[k] = nums[j] * nums[j]
j -= 1
else:
result[k] = nums[i] * nums[i]
i += 1
k -= 1
return result
| 3.046875
| 3
|
interview_qns/max_mushrooms_picked.py
|
shiram/learning-django-rest
| 0
|
12782355
|
<gh_stars>0
"""
Problem: You are given a non-empty, zero-indexed array A of n (1 � n � 100 000) integers
a0, a1, . . . , an−1 (0 � ai � 1 000). This array represents number of mushrooms growing on the
consecutive spots along a road. You are also given integers k and m (0 � k, m < n).
A mushroom picker is at spot number k on the road and should perform m moves. In
one move she moves to an adjacent spot. She collects all the mushrooms growing on spots
she visits. The goal is to calculate the maximum number of mushrooms that the mushroom
picker can collect in m moves.
For example, consider array A such that:
2 3 7 5 1 3 9
0 1 2 3 4 5 6
The mushroom picker starts at spot k = 4 and should perform m = 6 moves. She might
move to spots 3, 2, 3, 4, 5, 6 and thereby collect 1 + 5 + 7 + 3 + 9 = 25 mushrooms. This is the
maximal number of mushrooms she can collect.
Solution O(m2): Note that the best strategy is to move in one direction optionally followed
by some moves in the opposite direction. In other words, the mushroom picker should not
change direction more than once. With this observation we can find the simplest solution.
Make the first p = 0, 1, 2, . . . , m moves in one direction, then the next m − p moves in the
opposite direction. This is just a simple simulation of the moves of the mushroom picker
which requires O(m2) time.
Solution O(n+m): A better approach is to use prefix sums. If we make p moves in one direction, we can calculate the maximal opposite location of the mushroom picker. The mushroom
picker collects all mushrooms between these extremes. We can calculate the total number of
collected mushrooms in constant time by using prefix sums.
"""
def prefix_sums(A):
n = len(A)
p = [0] * (n + 1)
for k in range(1, n + 1):
p[k] = p[k - 1] + A[k - 1]
return p
def count_total(P, x, y):
return P[y + 1] - P[x]
def mushrooms(A, k, m):
n = len(A)
result = 0
pref = prefix_sums(A)
print("The Prefix Sum: {}".format(pref))
for p in range(min(m, k) + 1):
left_pos = k - p
print("Left Pos loop 1 index {} : {}".format(p, left_pos))
right_pos = min(n - 1, max(k, k + m - 2 * p))
print("Right Pos loop 1 index {} : {}".format(p, right_pos))
result = max(result, count_total(pref, left_pos, right_pos))
print("result loop 1 index {}: {}".format(p, result))
for p in range(min(m + 1, n - k)):
left_pos = k + p
print("Left Pos loop 2 index {} : {}".format(p, left_pos))
left_pos = max(0, min(k, k - (m - 2 * p)))
print("Right Pos loop 2 index {} : {}".format(p, right_pos))
result = max(result, count_total(pref, left_pos, right_pos))
print("result loop 2 index {}: {}".format(p, result))
return result
A = [2,3,7,5,1,3,9]
m = 6
k = 4
res = mushrooms(A, k, m)
print()
print("-------------------------- RESULT ------------------------")
print(res)
| 3.796875
| 4
|
pythonidbot/inline/hints.py
|
hexatester/pythonidbot
| 0
|
12782356
|
import logging
from fuzzywuzzy import process
from telegram import (
Update,
InlineQuery,
InlineQueryResultArticle,
InlineKeyboardButton,
InlineKeyboardMarkup,
)
from telegram.ext import CallbackContext
from typing import List, Optional
from pythonidbot.constants import HINTS
from pythonidbot.utils.helpers import article, build_menu
class Hints:
def __init__(self, hints: Optional[List[dict]] = None):
self.logger = logging.getLogger(__name__)
self.score_cutoff = 60
self.hints = hints or list(HINTS.values())
self.hints_q_dict = dict(enumerate([hint["help"] for hint in self.hints]))
self.hashtag_q_dict = dict(enumerate([hint["key"] for hint in self.hints]))
@property
def hints_article(self):
return [self.article(hint) for hint in self.hints]
def __call__(
self,
query: str,
limit: int = 10,
score_cutoff: int = 60,
) -> List[InlineQueryResultArticle]:
return self.find(query, limit, score_cutoff)
def find(
self,
query: str,
limit: int = 10,
score_cutoff: int = 60,
) -> List[InlineQueryResultArticle]:
self.logger.debug(f"Mencari hint dengan keyword `{query}`")
best_hints = process.extractBests(
query=query,
choices=self.hints_q_dict,
score_cutoff=score_cutoff,
limit=limit,
)
self.logger.debug(f"Ditemukan {len(best_hints)} kemungkinan hint")
return [self.hints_article[z] for (x, y, z) in best_hints] if best_hints else []
def article(
self,
data: dict,
query: Optional[str] = None,
markup: Optional[InlineKeyboardMarkup] = None,
) -> InlineQueryResultArticle:
message: str = data["message"]
if "{query}" in message:
query = query or data["default"]
message = message.replace("{query}", query)
if "buttons" in data:
markup = self.make_button(data["buttons"])
return article(
title=data["help"],
description=f'{data["key"]}: {message}',
message_text=message,
reply_markup=markup,
)
def make_button(self, buttons: List[dict]) -> InlineKeyboardMarkup:
keyboards: List[InlineKeyboardButton] = list()
for data in buttons:
keyboards.append(InlineKeyboardButton(**data))
menu = build_menu(keyboards, 1)
return InlineKeyboardMarkup(menu)
def hashtag_handler(self, update: Update, context: CallbackContext):
if not update.inline_query:
return
query = update.inline_query.query
query = query.lstrip("#")
hashtag = query.split(" ")[0]
query = query.lstrip(hashtag)
if len(hashtag) < 3:
return
result = process.extractOne(
hashtag,
choices=self.hashtag_q_dict,
score_cutoff=self.score_cutoff,
)
if not result:
return
value, score, index = result
results = [self.article(self.hints[index], query=query)]
return update.inline_query.answer(results)
| 2.0625
| 2
|
plot/compare_algorithms.py
|
alod83/srp
| 2
|
12782357
|
# Compare Algorithms
import pandas
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import label_binarize
import os
import sys
config_path = "utilities/"
sys.path.append(os.path.abspath(config_path))
from MyAPI import MyAPI
api = MyAPI()
X, Y = api.get_dataset(0, start_index=0,end_index=20000, nr=20000)
# prepare models
models = []
models.append(('KNN', KNeighborsClassifier()))
models.append(('Decision Tree', DecisionTreeClassifier()))
models.append(('Gaussian', GaussianNB()))
models.append(('SVM', SVC()))
classes=list(set(Y))
# prepare configuration for cross validation test harness
seed = 7
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.savefig('compare.png')
| 2.578125
| 3
|
cobl/lexicon/migrations/0148_fill_romanisedsymbol.py
|
Bibiko/CoBL-public
| 3
|
12782358
|
<reponame>Bibiko/CoBL-public<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
RomanisedSymbol = apps.get_model("lexicon", "RomanisedSymbol")
allowedRomanised = {
'a', 'n', '-', 'm', 'i', 's', 'e', 't', '', 'ě', 'b', 'o', 'j', 'l',
'd', 'v', 'ý', 'r', 'h', 'k', 'š', '̆', 'y', 'u', 'p', 'c', 'ì', 'á',
'′', 'z', 'ř', 'à', 'ò', 'ž', 'í', 'x', 'č', 'ḁ', 'è', 'f', 'ъ', '̀',
'é', 'ù', ' ', 'g', 'ä', 'ó', 'w', '̯', 'ɪ', '', 'ï', 'ŋ', '̀', '̥',
'ə', 'ɣ', 'ú', '́', 'ṡ', 'ç', 'ā', '̣', '(', ')', 'ſ', 'ω', 'ł', 'ę',
'ß', '[', ']', 'ô', '?', "'", 'ă', 'ġ', 'ẛ', 'ʾ', 'ʒ', '̌', 'ώ', 'ľ',
'ň', '≠', 'î', 'ṃ', 'û', 'ō', '́', 'ü', 'ḫ', 'ṣ', '̄', 'ū', '/', 'ș',
'ī', 'ḥ', '*', 'ẓ', 'ë', 'ṭ', 'ś', 'ḯ', 'ĩ', 'K', 'I', 'S', 'N', 'A',
'æ', 'q', 'ĕ', '̇', 'ḷ', 'V', 'ø', 'H', 'C', 'Z', 'U', 'ċ', 'J', 'ʌ',
'̃', 'â', 'ː', 'ē', 'ʿ', 'D', 'O', 'M', 'L', 'ê', 'P', 'T', 'ṙ', 'R',
'̧', 'ʰ', 'ǖ', '˜', 'ḍ', 'ć', 'ñ', 'ǧ', 'ṛ', 'å', 'ė', 'ů', 'E', '͡',
'G', 'W', 'ö', '~', 'ź', 'ɛ', 'ɜ', 'ț', 'ð', '.', 'B', 'ť', ':', '̐',
'F', '̲', 'ṇ', 'ɡ', '˳', 'ʊ', 'ǫ', 'ą', 'X', 'ż', 'ũ', 'ḗ', 'ʉ', '″',
'ď', 'ģ', 'ṅ', '̜', 'ẏ', 'ɨ', 'ŭ', 'β', 'ã', 'ṁ', 'ŕ', 'ǰ', 'ˊ', 'θ',
'Y', 'þ', 'ļ', '+', 'ŗ', 'õ', 'ɑ', '·', 'ḱ', 'ṓ', 'ń', 'ķ', 'ᵤ', ';',
'ọ', '=', 'ņ', 'ẽ', 'ṯ', 'ʸ', 'ɔ', '̱', 'ʈ', 'ŵ', 'ᵛ', 'χ', '¬', 'ş',
'ÿ', 'ȳ', 'ṕ', 'ĭ', '`', '₂', 'ǣ', '̂', '̈́', 'ˤ', 'œ', '̈', 'ˑ', 'ⁱ',
'ʷ', '&', 'ᴵ', 'ǝ', 'ƕ', 'Q', 'ẖ', 'ǘ', 'į', 'ǔ', 'ε', 'ν', 'ǵ', 'đ',
'ʎ', '̰', '̊', 'ˈ', 'Š', 'ų', 'Ä', '‑', 'ŷ', 'Ø', 'ŝ', 'ʁ', 'Ī', 'ɸ',
'ƫ', 'ы', 'к', 'ỹ', 'ɫ', '‘'}
RomanisedSymbol.objects.bulk_create([
RomanisedSymbol(symbol=s) for s in allowedRomanised
])
def reverse_func(apps, schema_editor):
RomanisedSymbol = apps.get_model("lexicon", "RomanisedSymbol")
RomanisedSymbol.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [('lexicon', '0147_romanisedsymbol')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| 1.679688
| 2
|
lib/jsnes.py
|
Ekleog/jsmark
| 0
|
12782359
|
import os
import subprocess as subp
import sys
class Bench:
moreisbetter = False
def run(interp):
os.chdir(os.path.join(sys.path[0], 'vendor', 'jsnes', 'test'))
res = subp.run([interp, 'shell-bench.js'], stdout=subp.PIPE, universal_newlines=True)
if res.returncode != 0:
return "Error when trying to run jsnes with interpreter " + interp + "!"
warmup, result = 0., 0.
nwarmup, nresult = 0., 0.
for line in res.stdout.split('\n'):
res = line.split(':')
if len(res) != 2:
continue
if res[0] == 'warmup':
warmup += float(res[1])
nwarmup += 1
elif res[0] == 'result':
result += float(res[1])
nresult += 1
return {
'warmup': warmup / nwarmup,
'~~~~ Result': result / nresult,
}
| 2.296875
| 2
|
happy_birthder_script_tests.py
|
wis-software/rocketchat-tests-based-on-splinter
| 2
|
12782360
|
<filename>happy_birthder_script_tests.py
#!/usr/bin/env python3
# Copyright 2018 <NAME> <<EMAIL>>
# Copyright 2018 <NAME> <<EMAIL>>
# Copyright 2018 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests related to the hubot-happy-birthder script. """
# pylint: disable=fixme
import re
import sys
import time
from argparse import ArgumentParser
from datetime import datetime, timedelta
from selenium.webdriver.support.wait import WebDriverWait
from base import RocketChatTestCase
class HappyBirthderScriptTestCase(RocketChatTestCase): # pylint: disable=too-many-public-methods
"""Tests for the hubot-happy-birthder script. """
def __init__(self, addr, username, password, reminder_interval_time, **kwargs):
RocketChatTestCase.__init__(self, addr, username, password, **kwargs)
self.schedule_pre_test_case('choose_general_channel')
self._reminder_interval_time = int(reminder_interval_time)
self._test_birthday = '01.01.1990'
self._test_incorrect_birthdays_dates = ['32.01.2000', '01.13.2000',
'31.02.2000']
self._bot_name = 'meeseeks'
self._test_user_for_blacklist = 'test_user_for_blacklist'
self._fwd_date = datetime.now().replace(year=datetime.now().year - 1).strftime('%d.%m.%Y')
#
# Private methods
#
@staticmethod
def _get_date_with_shift(shift):
return (datetime.now() + timedelta(days=shift)).strftime('%d.%m.%Y')
@staticmethod
def _get_channel_pattern(name, date):
d_m = date[:-5]
return f'{name}-birthday-channel-{d_m}-id[0-9]{{3}}'
@staticmethod
def _get_congratulation_pattern(username):
pattern = (f'https?://media.tenor.com/images/[0-9a-z]*/tenor.gif\n'
f'Today is birthday of @{username}!\n'
f'[w+]*')
return pattern
@staticmethod
def _get_fwd_congratulation_pattern(usernames, years_counts):
pattern = f'.*'
for name, count in zip(usernames, years_counts): # pylint: disable=unused-variable
pattern += (f'\n@{name} has been a part of our team for {count} '
f'{"year" if count==1 else "years"} and')
return pattern[:-4] + "!"
def _wait_reminder(self):
time.sleep(self._reminder_interval_time)
#
# Public methods
#
def test_admins_birthday_set(self):
"""Tests if it's possible on behalf of the admin to set a birth date. """
self.choose_general_channel()
# TODO: test with invalid dates
self.send_message('{} birthday set {} {}'.
format(self._bot_name, self.username,
self._test_birthday))
assert self.check_latest_response_with_retries(
"Saving {}'s birthday.".format(self.username))
def test_admins_birthdays_on(self):
"""Tests if it's possible on behalf of the admin to get the list of
users with the specified birth date.
"""
day_month = self._test_birthday[:-5]
# TODO: test with invalid dates
self.send_message('{} birthdays on {}'.
format(self._bot_name, day_month))
assert self.check_latest_response_with_retries(
'@{}'.format(self.username))
def test_invoking_birthday_delete_by_admin(self):
"""Tests if it's possible on behalf of the admin to delete the
specified birth date.
"""
self.send_message('{} birthday delete {}'.
format(self._bot_name, self.username))
assert self.check_latest_response_with_retries(
"Removing {}'s birthday.".format(self.username))
def test_specifying_date_birth_by_admin(self):
"""Tests if it's possible on behalf of the admin to specify their own
birth date.
"""
self.switch_channel(self._bot_name)
assert self.check_latest_response_with_retries(
'Hmm...\n'
'It looks like you forgot to set the date of birth.\n'
'Please enter it (DD.MM.YYYY).'
)
self.send_message(self._test_birthday)
assert self.check_latest_response_with_retries(
'I memorized you birthday, well done! 😉'
)
def test_specifying_date_birth_by_new_user(self):
"""Tests if it's possible on behalf of an ordinary user to specify
their own birth date.
"""
self.create_user()
close_btn = self.find_by_css('button[data-action="close"]')
assert close_btn
close_btn.click()
self.logout()
self.login(use_test_user=True)
self.switch_channel(self._bot_name)
try:
assert self.check_latest_response_with_retries(
'Welcome to WIS Software! 🎉\n'
'Emm... where was I?\n'
'Oh! Please, enter your date birth (DD.MM.YYYY).'
)
except AssertionError:
assert self.check_latest_response_with_retries(
'Hmm...\n'
'It looks like you forgot to set the date of birth.\n'
'Please enter it (DD.MM.YYYY).'
)
self.send_message(self._test_birthday)
assert self.check_latest_response_with_retries(
'I memorized you birthday, well done! 😉'
)
def test_invoking_birthday_set_by_ordinary_user(self):
"""Test if it's not possible on behalf of an ordinary user to specify
someone's birth date.
"""
self.choose_general_channel()
self.send_message('{} birthday set {} {}'.format(self._bot_name,
self.username,
self._test_birthday))
assert self.check_latest_response_with_retries('Permission denied.')
def test_invoking_birthday_delete_by_ordinary_user(self):
"""Test if it's not possible on behalf of an ordinary user to delete
someone's birth date.
"""
self.send_message('{} birthday delete {} {}'.format(
self._bot_name, self.username, self._test_birthday))
assert self.check_latest_response_with_retries('Permission denied.')
self.logout()
self.login()
def test_creating_birthday_channel(self):
"""Tests if a birthday channel is automatically created. """
self.choose_general_channel()
test_date = self._get_date_with_shift(7)
self.send_message('{} birthday set {} {}'.
format(self._bot_name, self.test_username,
test_date))
assert self.check_latest_response_with_retries(
"Saving {}'s birthday.".format(self.test_username)
)
self._wait_reminder()
private_channels = self.find_by_css('.rooms-list__list.type-p')
assert private_channels
lst_of_channels = private_channels.text.split('\n')
assert lst_of_channels
pattern = self._get_channel_pattern(self.test_username,
self._get_date_with_shift(0))
assert bool(re.match(pattern, lst_of_channels[-1]))
self.switch_channel(lst_of_channels[-1])
assert self.check_latest_response_with_retries(
'@{} is having a birthday soon, so let\'s discuss a present.'
.format(self.test_username)
)
def test_checking_absence_of_test_user_in_channel(self):
"""Tests if the user, who is having a birthday soon, is not in the birthday channel. """
channel_options = self.find_by_css(
'.rc-room-actions__action.tab-button.js-action')
assert len(channel_options) >= 3
channel_options[2].click()
members_list = self.find_by_css('.rc-member-list__user')
assert members_list
assert all([member.text != self.test_username
for member in members_list])
def test_reminder_of_upcoming_birthday_7_days_in_advance(self):
"""Tests the bot reminds about the upcoming birthday 7 days in advance. """
self.switch_channel(self._bot_name)
assert self.check_latest_response_with_retries(
'@{} is having a birthday on {}.'
.format(self.test_username, self._get_date_with_shift(7)[:-5])
)
def test_reminder_of_upcoming_birthday_1_days_in_advance(self):
"""Makes sure the bot reminds about the upcoming birthday 1 days in advance. """
self.choose_general_channel()
self.send_message('{} birthday set {} {}'.
format(self._bot_name, self.test_username,
self._get_date_with_shift(1)))
self.switch_channel(self._bot_name)
assert self.check_latest_response_with_retries(
'@{} is having a birthday tomorrow.'.format(self.test_username),
attempts_number=self._reminder_interval_time
)
def test_deleting_birthday_channel(self):
"""Tests if a birthday channel is automatically deleted. """
self.choose_general_channel()
test_date = self._get_date_with_shift(-3)
self.send_message('{} birthday set {} {}'.format(self._bot_name,
self.test_username,
test_date))
self._wait_reminder()
private_channels = self.find_by_css('.rooms-list__list.type-p')
assert private_channels
lst_of_channels = private_channels.text.split('\n')
assert lst_of_channels
pattern = self._get_channel_pattern(self.test_username, test_date)
assert all([not bool(re.match(pattern, channel))
for channel in lst_of_channels])
def test_birthday_message(self):
"""Makes sure the bot writes a birthday message to #general devoted to
the user who is having a birthday.
"""
self.choose_general_channel()
self.send_message('{} birthday set {} {}'.
format(self._bot_name, self.username,
self._get_date_with_shift(0)))
pattern = self._get_congratulation_pattern(self.username)
assert self.check_latest_response_with_retries(pattern, match=True,
attempts_number=self._reminder_interval_time)
def test_birthdays_list_command_with_no_birthday(self):
"""Tests the case when someone is invoking 'birthdays list' but there is
no any birth date stored.
"""
self.send_message('{} birthday delete {}'.format(self._bot_name,
self.username))
self.send_message('{} birthday delete {}'.format(self._bot_name,
self.test_username))
self.send_message('{} birthdays list'.format(self._bot_name))
assert self.check_latest_response_with_retries('Oops... No results.')
def test_invoking_birthdays_list_with_1_birth_date(self):
"""Tests the case when someone is invoking 'birthdays list' but there is
the only birth date stored.
"""
self.send_message('{} birthday set {} {}'.format(self._bot_name,
self.username,
self._test_birthday))
self.send_message('{} birthdays list'.format(self._bot_name))
assert self.check_latest_response_with_retries(
'*Birthdays list*\n@{} was born on {}'.format(self.username,
self._test_birthday))
def test_invoking_birthdays_list_with_2_birth_dates(self):
"""Tests the case when someone is invoking 'birthdays list' but there
are only 2 birth dates stored.
"""
self.choose_general_channel()
admins_birthday = self._get_date_with_shift(25)
self.send_message('{} birthday set {} {}'.format(self._bot_name,
self.username,
admins_birthday))
users_birthday = self._get_date_with_shift(20)
self.send_message('{} birthday set {} {}'.format(self._bot_name,
self.test_username,
users_birthday))
# test user should be first in birthdays list
self.send_message('{} birthdays list'.format(self._bot_name))
assert self.check_latest_response_with_retries(
'*Birthdays list*\n'
'@{} was born on {}\n'
'@{} was born on {}'.format(self.test_username, users_birthday,
self.username, admins_birthday))
def test_birthday_channel_blacklist(self): # pylint: disable=too-many-locals,too-many-statements
"""Makes sure that the user, who is in the blacklist, is not invited
in birthday channels.
"""
# create user for blacklist
options_btn = self.browser.find_by_css(
'.sidebar__toolbar-button.rc-tooltip.rc-tooltip--down.js-button'
)
options_btn.last.click()
administration_btn = self.browser.find_by_css('.rc-popover__item-text')
administration_btn.click()
users_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Users"]')
assert users_btn
self.browser.driver.execute_script('arguments[0].click();',
users_btn[0])
add_user_btn = self.find_by_css('button[aria-label="Add User"]')
assert add_user_btn
add_user_btn.click()
input_name_el = self.find_by_css('input#name')
assert input_name_el
input_name_el.first.fill(self._test_user_for_blacklist)
input_username_el = self.find_by_css('input#username')
assert input_username_el
input_username_el.first.fill(self._test_user_for_blacklist)
input_email_el = self.find_by_css('input#email')
assert input_email_el
input_email_el.first.fill(
<EMAIL>'.format(self._test_user_for_blacklist))
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.first.click()
input_password_el = self.find_by_css('input#password')
assert input_password_el
input_password_el.first.fill('<PASSWORD>')
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.last.click()
role_option = self.find_by_css('option[value="user"]')
assert role_option
role_option.first.click()
add_role_btn = self.find_by_css('button#addRole')
assert add_role_btn
add_role_btn.first.click()
welcome_ckbx = self.find_by_css('label[for="sendWelcomeEmail"]')
assert welcome_ckbx
welcome_ckbx.first.click()
save_btn = self.find_by_css('.rc-button.rc-button--primary.save')
assert save_btn
save_btn.first.click()
close_btn = self.find_by_css('button[data-action="close"]')
assert close_btn
close_btn.click()
self.choose_general_channel()
test_date = self._get_date_with_shift(7)
self.send_message('{} birthday set {} {}'.
format(self._bot_name, self.test_username,
test_date))
self._wait_reminder()
private_channels = self.find_by_css('.rooms-list__list.type-p')
assert private_channels
lst_of_channels = private_channels.text.split('\n')
assert lst_of_channels
self.switch_channel(lst_of_channels[-1])
channel_options = self.find_by_css(
'.rc-room-actions__action.tab-button.js-action')
assert len(channel_options) >= 3
channel_options[2].click()
members_list = self.find_by_css('.rc-member-list__user')
assert len(members_list) == 2
self.choose_general_channel()
# for deleting birthdays chat
test_date = self._get_date_with_shift(-3)
self.send_message('{} birthday set {} {}'.
format(self._bot_name, self.test_username,
test_date))
self._wait_reminder()
self.send_message('{} birthday delete {}'.
format(self._bot_name, self.test_username))
self.send_message('{} birthday delete {}'.
format(self._bot_name, self.test_username))
options_btn = self.browser.driver.find_elements_by_css_selector(
'.sidebar__toolbar-button.rc-tooltip.rc-tooltip--down.js-button')
assert options_btn
self.browser.driver.execute_script('arguments[0].click();',
options_btn[-1])
administration_btn = self.browser.find_by_css('.rc-popover__item-text')
administration_btn.click()
users_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Users"]')
assert users_btn
self.browser.driver.execute_script('arguments[0].click();',
users_btn[0])
selected_user = self.browser.find_by_xpath(
'//td[@class="border-component-color"][text()="{0}"]'
.format(self._test_user_for_blacklist))
assert selected_user
selected_user.first.click()
try:
delete_btn = self.find_by_xpath(
'//button[@class="js-action rc-user-info-action__item"]'
'[text()="Delete"]'
)
assert delete_btn
except AssertionError:
more_btn = self.find_by_css(
'button.rc-tooltip.rc-room-actions__button.js-more'
'[aria-label="More"]'
)
assert more_btn
more_btn.first.click()
delete_btn = self.find_by_xpath(
'//li[@class="rc-popover__item js-action"]'
'/span[text()="Delete"]'
)
assert delete_btn
delete_btn.first.click()
confirm_btn = self.find_by_css('input[value="Yes, delete it!"]')
assert confirm_btn
confirm_btn.first.click()
WebDriverWait(self.browser.driver, 10).until(
lambda _: self._check_modal_window_visibility())
close_btn = self.browser.driver.find_elements_by_css_selector(
'button[data-action="close"]')
assert close_btn
self.browser.driver.execute_script('arguments[0].click();',
close_btn[0])
def test_fwd_set_for_admin(self):
"""Tests if it's possible on behalf of the admin to specify a first
working date for a user.
"""
self.switch_channel(self._bot_name)
self.send_message('{} fwd set {} {}'.
format(self._bot_name, self.username,
self._fwd_date))
assert self.check_latest_response_with_retries(
"Saving {}'s first working day.".format(self.username))
def test_fwd_reminder_for_admin(self):
"""Makes sure the bot writes a message to #general containing a
congratulation on the work anniversary (the case when there is
the only user celebrating the work anniversary).
"""
self.choose_general_channel()
assert self.check_latest_response_with_retries(
self._get_fwd_congratulation_pattern([self.username, ], [1, ]),
match=True, attempts_number=80)
def test_fwd_reminder_for_new_user(self):
"""Makes sure the bot writes a message to #general containing a
congratulation on the work anniversary (the case when there are
two users celebrating the work anniversary).
"""
self.switch_channel(self._bot_name)
self.send_message('{} fwd set {} {}'.
format(self._bot_name, self.test_username,
self._fwd_date))
assert self.check_latest_response_with_retries(
"Saving {}'s first working day.".format(self.test_username))
self.choose_general_channel()
users = [self.username, self.test_username]
assert self.check_latest_response_with_retries(
self._get_fwd_congratulation_pattern(users, [1, 1]),
match=True,
attempts_number=self._reminder_interval_time)
def test_fwd_list(self):
"""Tests the case when someone is invoking 'fwd list' but there are
only 2 dates stored.
"""
self.switch_channel(self._bot_name)
self.send_message('{} fwd list'.format(self._bot_name))
assert self.check_latest_response_with_retries(
'*First working days list*\n'
'@{} joined our team {}\n'
'@{} joined our team {}'.format(self.username, self._fwd_date,
self.test_username,
self._fwd_date),
attempts_number=self._reminder_interval_time)
self.remove_user()
self.switch_channel(self._bot_name)
self.send_message('{} fwd list'.format(self._bot_name))
assert self.check_latest_response_with_retries(
'*First working days list*\n'
'@{} joined our team {}'.format(self.username, self._fwd_date),
attempts_number=self._reminder_interval_time)
def main():
"""The main entry point. """
parser = ArgumentParser(description='usage: %prog [options] arguments')
parser.add_argument('-a', '--host', dest='host', type=str,
help='allows specifying domain or IP of the Rocket.Chat host')
parser.add_argument('-u', '--username', dest='username', type=str,
help='allows specifying admin username')
parser.add_argument('-p', '--password', dest='password', type=str,
help='allows specifying admin password')
parser.add_argument('-w', '--wait', dest='wait', type=int,
help="allows specifying time "
"for waiting reminder\'s work(secs)")
options = parser.parse_args()
if not options.host:
options.host = 'http://127.0.0.1:8006'
sys.stderr.write(
'Host is not specified. Defaults to {}.\n'.format(options.host)
)
if not options.username:
parser.error('Username is not specified')
if not options.password:
parser.error('Password is not specified')
if not options.wait:
options.wait = '100'
sys.stderr.write(
'Waiting time is not specified. Defaults to {}.\n'.format(options.wait)
)
test_cases = HappyBirthderScriptTestCase(options.host, options.username,
options.password,
reminder_interval_time=options.wait,
create_test_user=False)
exit_code = test_cases.run()
sys.exit(exit_code)
if __name__ == '__main__':
main()
| 2.15625
| 2
|
cropper.py
|
alfcrisci/pySIC
| 1
|
12782361
|
###############################################################
import os
import cv2
import math
import numpy as np
from scipy.ndimage import interpolation as inter
from scipy.ndimage import rotate
###############################################################
C_percentage = 0
ACCEPTED_EXTENSIONS = (".jpeg", ".jpg", ".png", ".tif", ".tiff", ".bmp", ".dib", ".jpe", ".jp2", ".webp", ".pbm", ".pgm", ".ppm", ".sr", ".ras")
###############################################################
def euclidian_distance(first, second):
return math.sqrt(sum([pow(max(x, y) - min(x, y), 2) for x, y in zip(first, second)]))
def color_difference(first, second, precision = 100):
return euclidian_distance(first, second) > precision
def precision(arr, angle):
hit = np.sum(inter.rotate(arr, angle, reshape = False, order = 0), axis = 1)
prec = np.sum((hit[1:]-hit[:-1])**2)
return prec
def rotateImage(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT, borderValue=(255,255,255))
return result
def crop(abs_folder_in, abs_folder_out, debug):
global C_percentage
images_list = [i for i in os.listdir(abs_folder_in) if i.endswith(ACCEPTED_EXTENSIONS) and i[:2] != "ad"]
if debug: print("\n".join(images_list))
images_list = sorted(images_list, key = lambda i: int(i[:-4]))
for c, image_path in enumerate(images_list, 1):
original_image = cv2.imread(os.path.join(abs_folder_in, image_path), 0)
sheet = cv2.resize(original_image, (0, 0), fx = 0.125, fy = 0.125)
ret, sheet = cv2.threshold(sheet, 127, 255, cv2.THRESH_BINARY)
wd, ht = sheet.shape
pix = np.array(sheet, np.uint8)
bin_img = 1 - (pix / 255.0)
limit, delta = 10, 1
angles = np.arange(-limit, limit+delta, delta)
scores = [precision(bin_img, angle) for angle in angles]
best = angles[scores.index(max(scores))]
original_image = rotateImage(cv2.imread(os.path.join(abs_folder_in, image_path)), best)
w, h, z = original_image.shape
K = 500 / max(w, h)
resized_image = cv2.resize(original_image, (0, 0), fx = K, fy = K)
w, h, z = resized_image.shape
mx, my = int(w / 2), int(h / 2)
startx = 0
starty = 0
endx = w
endy = h
for i in range(1, w):
if color_difference(resized_image[i, my], resized_image[i - 1, my]):
startx = i
break
for i in range(w - 2, 0, -1):
if color_difference(resized_image[i, my], resized_image[i + 1, my]):
endx = i
break
for i in range(1, h):
if color_difference(resized_image[mx, i], resized_image[mx, i - 1]):
starty = i
break
for i in range(h - 2, 0, -1):
if color_difference(resized_image[mx, i], resized_image[mx, i + 1]):
endy = i
break
if endx <= startx:
endx = w
if endy <= starty:
endy = h
startx, starty, endx, endy = int(startx * (1 / K)), int(starty * (1 / K)), int(endx * (1 / K)), int(endy * (1 / K))
jump = int(1 / K * 10)
if debug:
print("Angle : ", best)
print("K : ", K, " jump : ", jump)
print("(", startx, ", ", starty, ") -> (", endx, ", ", endy, ")")
print("Saving...")
if (endx-jump) - (startx+jump) < (w*K)/3 or (endy-jump) - (starty+jump) < (h*K)/3:
cv2.imwrite(os.path.join(abs_folder_out, str(c) + ".jpg"), original_image)
else:
cv2.imwrite(os.path.join(abs_folder_out, str(c) + ".jpg"), original_image[startx + jump : endx - jump, starty + jump : endy - jump])
if debug: print("Done ", c, " of ", len(images_list))
C_percentage += 1 / len(images_list)
C_percentage = 0
def get_percentage():
global C_percentage
return C_percentage
###############################################################
| 2.296875
| 2
|
cogs/admin.py
|
staciax/ValorantStoreChecker-discord-bot
| 37
|
12782362
|
<reponame>staciax/ValorantStoreChecker-discord-bot
import discord
from discord.ext import commands
class Admin(commands.Cog):
"""Error handler"""
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@commands.command()
# @commands.is_owner()
async def sync(self, ctx: commands.Context, sync_type: str = None) -> None:
if self.bot.owner_id is None:
if ctx.author.guild_permissions.administrator != True:
await ctx.reply("You don't have **Administrator permission(s)** to run this command!", delete_after=30)
return
if sync_type is None:
return await ctx.send('you need to specify a sync type.')
try:
if sync_type == 'guild':
guild = discord.Object(id=ctx.guild.id)
self.bot.tree.copy_global_to(guild=guild)
await self.bot.tree.sync(guild=guild)
await ctx.reply(f"Synced guild !")
elif sync_type == 'global':
await self.bot.tree.sync()
await ctx.reply(f"Synced global !")
except discord.Forbidden:
await ctx.send("Bot don't have permission to sync. : https://cdn.discordapp.com/attachments/939097458288496682/950613059150417970/IMG_3279.png")
except discord.HTTPException:
await ctx.send('Failed to sync.', delete_after=30)
@commands.command()
# @commands.is_owner()
async def unsync(self, ctx: commands.Context, unsync_type: str = None) -> None:
if self.bot.owner_id is None:
if ctx.author.guild_permissions.administrator != True:
await ctx.reply("You don't have **Administrator permission(s)** to run this command!", delete_after=30)
return
if unsync_type is None:
return await ctx.send('you need to specify a unsync type.')
try:
if unsync_type == 'guild':
guild = discord.Object(id=ctx.guild.id)
commands = self.bot.tree.get_commands(guild=guild)
for command in commands:
self.bot.tree.remove_command(command, guild=guild)
await self.bot.tree.sync(guild=guild)
await ctx.reply(f"Un-Synced guild !")
elif unsync_type == 'global':
commands = self.bot.tree.get_commands()
for command in commands:
self.bot.tree.remove_command(command)
await self.bot.tree.sync()
await ctx.reply(f"Un-Synced global !")
except discord.Forbidden:
await ctx.send("Bot don't have permission to unsync. : https://cdn.discordapp.com/attachments/939097458288496682/950613059150417970/IMG_3279.png")
except discord.HTTPException:
await ctx.send('Failed to unsync.', delete_after=30)
async def setup(bot: commands.Bot) -> None:
await bot.add_cog(Admin(bot))
| 2.28125
| 2
|
day-16/part-1/chloe.py
|
lypnol/adventofcode-2017
| 16
|
12782363
|
from runners.python import Submission
class ChloeSubmission(Submission):
def run(self, s):
string = list('abcdefghijklmnop')
choregraphie = s.split(',')
for mouvement in choregraphie:
if mouvement[0] == 's':
longueur_groupe = int(mouvement[1:])
stringA = string[len(string) - longueur_groupe:]
stringB = string[: len(string) - longueur_groupe]
string = stringA + stringB
if mouvement[0] == 'x':
position1 = int(mouvement[mouvement.index('x') + 1: mouvement.index('/')])
position2 = int(mouvement[mouvement.index('/') + 1:])
exchange = string[position1]
string[position1] = string[position2]
string[position2] = exchange
if mouvement[0] == 'p':
element1 = mouvement[mouvement.index('p') + 1: mouvement.index('/')]
element2 = mouvement[mouvement.index('/') + 1:]
position1 = string.index(element1)
position2 = string.index(element2)
partner = string[position1]
string[position1] = string[position2]
string[position2] = partner
return ''.join(string)
| 3.46875
| 3
|
stubs.min/Autodesk/Revit/DB/__init___parts/Sweep.py
|
ricardyn/ironpython-stubs
| 1
|
12782364
|
<reponame>ricardyn/ironpython-stubs<gh_stars>1-10
class Sweep(GenericForm,IDisposable):
""" A sweep solid or void form. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
IsTrajectorySegmentationEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The trajectory segmentation option for the sweep.
Get: IsTrajectorySegmentationEnabled(self: Sweep) -> bool
Set: IsTrajectorySegmentationEnabled(self: Sweep)=value
"""
MaxSegmentAngle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The maximum segment angle of the sweep in radians.
Get: MaxSegmentAngle(self: Sweep) -> float
Set: MaxSegmentAngle(self: Sweep)=value
"""
Path3d=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The selected curves used for the sweep path.
Get: Path3d(self: Sweep) -> Path3d
"""
PathSketch=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sketched path for the sweep.
Get: PathSketch(self: Sweep) -> Sketch
"""
ProfileSketch=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The profile sketch of the sweep.
Get: ProfileSketch(self: Sweep) -> Sketch
"""
ProfileSymbol=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The family symbol profile details for the sweep.
Get: ProfileSymbol(self: Sweep) -> FamilySymbolProfile
"""
| 2.03125
| 2
|
Posture/show_marked.py
|
TY-Projects/Virtual-Yoga-App
| 0
|
12782365
|
<filename>Posture/show_marked.py<gh_stars>0
import cv2
import numpy as np
cap = cv2.VideoCapture("./Videos/yoga_women7.avi")
semaPhore = False
while True:
ret, frame = cap.read()
if not ret:
break
print(frame[0,0,0])
if(not ret):
break
if(frame[0,0,0] > 250):
semaPhore = not semaPhore
if(semaPhore):
cv2.imshow("Window", frame)
k = cv2.waitKey(30)
if k == 27 or k == 32:
break
| 2.75
| 3
|
hyk/information/models/info_rutine.py
|
morwen1/hack_your_body
| 0
|
12782366
|
<reponame>morwen1/hack_your_body
#django
from django.db import models
#utils
from hyk.utils.models.abstract_hyb import HybModel
from .Info_abstract import Info
class Info_rutine (Info) :
reps = models.IntegerField()
| 2.09375
| 2
|
PyQt5/Widget/DrawPointDemo.py
|
zhaokai0402/PyQt5-Study
| 0
|
12782367
|
<gh_stars>0
import sys, os, math
#pylint: disable=E0602
sys.path.append(os.getcwd())
from importQt import *
class DrawPointDemo(QWidget):
def __init__(self):
super().__init__()
self.resize(300, 200)
self.setWindowTitle("draw Point in window")
def paintEvent(self, event):
painter = QPainter(self)
self.drawPoints(painter)
def drawPoints(self, painter):
painter.setPen(Qt.red)
size = self.size()
for i in range(1000):
x = 100 * ( -1 + 2.0 * i / 1000.0) + size.width() / 2.0
y = (-50 * math.sin((x - size.width() / 2.0) * math.pi / 50.0)
+ size.height() / 2.0)
painter.drawPoint(x, y)
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = DrawPointDemo()
demo.show()
sys.exit(app.exec())
| 2.6875
| 3
|
2021_TetCTF/SuperCalc/findingChars.py
|
Haltz01/CTFs_Writeups
| 0
|
12782368
|
<gh_stars>0
#!/usr/bin/python3
wantedChars = ['$', '_', 'G', 'E', 'T', '[', ']']
availableChars = [
'0',
'1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'(',
')',
'.',
'~',
'^',
'|',
'&',
'+',
'-',
'*',
'/'
]
for char1 in availableChars:
for char2 in availableChars:
if (chr(ord(char1) ^ ord(char2)) in wantedChars):
print(char1 + " ^ " + char2 + " = " + chr(ord(char1) ^ ord(char2)))
wantedChars.remove(chr(ord(char1) ^ ord(char2)))
if (chr(ord(char1) & ord(char2)) in wantedChars):
print(char1 + " & " + char2 + " = " + chr(ord(char1) & ord(char2)))
wantedChars.remove(chr(ord(char1) & ord(char2)))
if (chr(ord(char1) | ord(char2)) in wantedChars):
print(char1 + " | " + char2 + " = " + chr(ord(char1) | ord(char2)))
wantedChars.remove(chr(ord(char1) | ord(char2)))
print("===============================")
for char1 in availableChars:
for char2 in availableChars:
for char3 in availableChars:
if (chr(ord(char1) ^ ord(char2) ^ ord(char3)) in wantedChars):
print(char1 + " ^ " + char2 + " ^ " + char3 + " = " + chr(ord(char1) ^ ord(char2) ^ ord(char3)))
wantedChars.remove(chr(ord(char1) ^ ord(char2) ^ ord(char3)))
if (chr(ord(char1) & ord(char2) ^ ord(char3)) in wantedChars):
print(char1 + " & " + char2 + " ^ " + char3 + " = " + chr(ord(char1) & ord(char2) ^ ord(char3)))
wantedChars.remove(chr(ord(char1) & ord(char2) ^ ord(char3)))
if (chr(ord(char1) | ord(char2) ^ ord(char3)) in wantedChars):
print(char1 + " | " + char2 + " ^ " + char3 + " = " + chr(ord(char1) | ord(char2) ^ ord(char3)))
wantedChars.remove(chr(ord(char1) | ord(char2) ^ ord(char3)))
| 3.625
| 4
|
daireVeMerkeziKareler/daireVeMerkeziKareler.py
|
emremrt98/Python-Projeleri
| 0
|
12782369
|
import turtle
captain = turtle.Turtle()
print("\n--------------------------")
print("Ad ve Soyad : <NAME>\nOkul No : 203305028")
print("----------------------------")
kenar = float(input("Kenar Uzunluklarini Gir : "))
yariCap = float(input("Dairenin Yaricapini Gir : "))
x = 0
y = 0
count = 0
kenar += yariCap
captain.shape("circle")
for i in range(4):
captain.up()
captain.goto(x,y)
captain.down()
captain.circle(yariCap)
count += 1
if(count == 1):
x = kenar + yariCap
elif(count == 2):
x = 0
y = (-kenar) - yariCap
elif(count == 3):
x = kenar + yariCap
captain.up()
captain.goto(0,yariCap)
captain.down()
for i in range(4):
captain.shape("arrow")
captain.dot()
captain.forward(x)
captain.right(90)
| 3.796875
| 4
|
setup.py
|
vvanholl/smsgateway
| 23
|
12782370
|
<filename>setup.py
#!/usr/bin/env python
from setuptools import setup
with open('VERSION') as version_file:
version = version_file.read()
with open('README.rst') as readme_file:
long_description = readme_file.read()
with open('requirements.txt') as requirements_file:
install_requires = requirements_file.read().splitlines()
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Communications',
'Topic :: Communications :: Telephony',
'Topic :: System :: Networking',
'Topic :: System :: Networking :: Monitoring',
]
data_files = [
('/etc', ['etc/smsgateway.yml'])
]
scripts = [
'bin/smsgateway'
]
setup(
name='smsgateway-gammu',
version=version,
description='Send SMS messages via REST api using Gammu',
long_description=long_description,
url='https://github.com/vvanholl/smsgateway-gammu',
author='<NAME>',
author_email='<EMAIL>',
license='Apache License, Version 2.0',
install_requires=install_requires,
classifiers=classifiers,
data_files=data_files,
scripts=scripts
)
| 1.390625
| 1
|
sponge-app/sponge-app-demo-service/sponge/sponge_demo_context_actions_active.py
|
mnpas/sponge
| 9
|
12782371
|
"""
Sponge Knowledge Base
Demo - Action - Active context actions
"""
class ContextActionsActiveInactive(Action):
def onConfigure(self):
self.withLabel("Action with active/inactive context actions").withArgs([
BooleanType("active").withLabel("Active").withDefaultValue(False)
]).withNonCallable().withFeatures({"contextActions":[
SubAction("ContextActionsActiveInactive_ContextAction1").withArg("active", "active"),
SubAction("ContextActionsActiveInactive_ContextAction2")
]})
class ContextActionsActiveInactive_ContextAction1(Action):
def onConfigure(self):
self.withLabel("Active/inactive context action").withArg(BooleanType("active").withNullable().withFeature("visible", False)).withNoResult()
self.withActivatable()
def onIsActive(self, context):
return context.args[0] if context.args is not None and context.args[0] is not None else False
def onCall(self, active):
pass
class ContextActionsActiveInactive_ContextAction2(Action):
def onConfigure(self):
self.withLabel("Context action 2").withNoArgs().withNoResult().withFeature("visible", False)
def onCall(self):
pass
| 2.640625
| 3
|
WEEKS/CD_Sata-Structures/_RESOURCES/sprint-prep/codesignal-my-solutions-master/boxBlurImage/boxBlurImage.py
|
webdevhub42/Lambda
| 0
|
12782372
|
<filename>WEEKS/CD_Sata-Structures/_RESOURCES/sprint-prep/codesignal-my-solutions-master/boxBlurImage/boxBlurImage.py
def boxBlur(image):
m = len(image) - 2
n = len(image[0]) - 2
blur_img = [[0] * n for _ in range(m)]
for i in range(1, m + 1):
for j in range(1, n + 1):
indexes = [
[i - 1, j - 1],
[i - 1, j],
[i - 1, j + 1],
[i, j - 1],
[i, j],
[i, j + 1],
[i + 1, j - 1],
[i + 1, j],
[i + 1, j + 1],
]
avg = int(math.floor(sum([image[k][l] for (k, l) in indexes]) / 9))
blur_img[i - 1][j - 1] = avg
return blur_img
| 3.0625
| 3
|
tensorflow/python/ops/filesystem_ops.py
|
wainshine/tensorflow
| 54
|
12782373
|
<reponame>wainshine/tensorflow
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Filesystem related operations."""
from tensorflow.python.ops import gen_filesystem_ops as _gen_filesystem_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
@tf_export('experimental.filesystem_set_configuration')
def filesystem_set_configuration(scheme, key, value, name=None):
"""Set configuration of the file system.
Args:
scheme: File system scheme.
key: The name of the configuration option.
value: The value of the configuration option.
name: A name for the operation (optional).
Returns:
None.
"""
return _gen_filesystem_ops.file_system_set_configuration(
scheme, key=key, value=value, name=name)
# pylint: enable=protected-access
| 1.546875
| 2
|
gym_rubiks_cube/envs/functions.py
|
marc131183/3DimensionalRendering
| 0
|
12782374
|
from logging import error
import numpy as np
class Plane:
def __init__(self, origin: np.array, vector1: np.array, vector2: np.array) -> None:
self.origin = origin
self.vector1 = vector1
self.vector2 = vector2
def getBarycentricCoordinates(self, point: np.array, direction: np.array):
a = np.array([self.vector1, self.vector2, -direction]).T
b = point - self.origin
sol = np.linalg.solve(a, b)
return np.array([sol[0], sol[1]])
def convertBarycentricCoordinates(self, x, y):
return self.origin + x * self.vector1 + y * self.vector2
class Sphere:
"""https://math.stackexchange.com/questions/268064/move-a-point-up-and-down-along-a-sphere"""
def __init__(self, radius) -> None:
self.radius = radius
self.current_pos = [90, -90]
def rotate(self, x, y):
self.current_pos[0] = (self.current_pos[0] + x) % 360
self.current_pos[1] = (self.current_pos[1] + y) % 360
theta, phi = np.deg2rad(self.current_pos[0]), np.deg2rad(self.current_pos[1])
return np.array(
[
self.radius * np.sin(theta) * np.cos(phi),
self.radius * np.sin(theta) * np.sin(phi),
self.radius * np.cos(theta),
]
)
class RotationMatrix3D:
def __init__(self) -> None:
pass
def __call__(
self, object_to_rotate: np.ndarray, axis: int, angle: float
) -> np.ndarray:
if axis == 0:
rotation_matrix = np.array(
[
[1, 0, 0],
[0, np.cos(angle), -np.sin(angle)],
[0, np.sin(angle), np.cos(angle)],
]
)
elif axis == 1:
rotation_matrix = np.array(
[
[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0],
[-np.sin(angle), 0, np.cos(angle)],
]
)
elif axis == 2:
rotation_matrix = np.array(
[
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
else:
raise error("Invalid argument for axis, options are 0, 1, 2")
return np.matmul(rotation_matrix, object_to_rotate)
def getQuadrant(x: float, y: float):
if x == 0 and y == 0:
return -1
if x >= 0 and y >= 0:
return 1
elif x > 0 and y < 0:
return 2
elif x <= 0 and y <= 0:
return 3
elif x < 0 and y > 0:
return 4
| 3.140625
| 3
|
networks/dorn/dp/metircs/__init__.py
|
EvoCargo/mono_depth
| 0
|
12782375
|
<gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
def build_metrics(cfg):
mod = __import__("{}.{}".format(__name__, "metrics"), fromlist=[''])
return getattr(mod, "Metrics")() # noqa
# return mod.Metrics
| 1.585938
| 2
|
tests/test_numba_toolbox_gpu.py
|
3d-pli/SLIX
| 5
|
12782376
|
import SLIX
if SLIX.toolbox.gpu_available:
print(SLIX.toolbox.gpu_available)
from SLIX.GPU import _toolbox as ntoolbox
import cupy
from numba import cuda
threads_per_block = (1, 1)
blocks_per_grid = (1, 1)
class TestNumbaToolboxGPU:
def test_peak_cleanup(self):
test_one_peak = cupy.array([0, 1, 0, 0]).reshape((1, 1, 4))
result = cupy.zeros(test_one_peak.shape, dtype='int8')
ntoolbox._peaks[blocks_per_grid, threads_per_block](test_one_peak, result)
cuda.synchronize()
assert cupy.all(cupy.array([0, 1, 0, 0]) == result)
test_two_peak = cupy.array([0, 1, 1, 0]).reshape((1, 1, 4))
result = cupy.zeros(test_two_peak.shape, dtype='int8')
ntoolbox._peaks[blocks_per_grid, threads_per_block](test_two_peak, result)
assert cupy.all(cupy.array([0, 1, 0, 0]) == result)
test_three_peak = cupy.array([0, 1, 1, 1, 0]).reshape((1, 1, 5))
result = cupy.zeros(test_three_peak.shape, dtype='int8')
ntoolbox._peaks[blocks_per_grid, threads_per_block](test_three_peak, result)
assert cupy.all(cupy.array([0, 0, 1, 0, 0]) == result)
test_double_three_peak = cupy.array([0, 1, 1, 1, 0, 1, 1, 1, 0]).reshape((1, 1, 9))
result = cupy.zeros(test_double_three_peak.shape, dtype='int8')
ntoolbox._peaks[blocks_per_grid, threads_per_block](test_double_three_peak, result)
assert cupy.all(cupy.array([0, 0, 1, 0, 0, 0, 1, 0, 0]) == result)
def test_prominence(self):
test_array = cupy.array([0, 0.1, 0.2, 0.4, 0.8, 1, 0.5, 0.7, 0.9, 0.5, 0.3, 0.95, 0], dtype='float32')\
.reshape((1, 1, 13))
peaks = cupy.array([0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], dtype='int8').reshape((1, 1, 13))
expected_prominence = cupy.array([0, 0, 0, 0, 0, 1, 0, 0, 0.4, 0, 0, 0.65, 0]).reshape((1, 1, 13))
toolbox_prominence = cupy.zeros(expected_prominence.shape, dtype='float32')
ntoolbox._prominence[blocks_per_grid, threads_per_block](test_array, peaks, toolbox_prominence)
print(toolbox_prominence)
assert cupy.all(cupy.isclose(expected_prominence, toolbox_prominence))
def test_peakwidth(self):
test_array = cupy.array([0, 0.1, 0.2, 0.5, 0.8, 1, 0.77, 0.7, 0.66, 0.5, 0.74, 0.98, 0.74], dtype='float32')\
.reshape((1, 1, 13))
peaks = cupy.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0], dtype='int8').reshape((1, 1, 13))
prominence = cupy.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0.48, 0], dtype='float32').reshape((1, 1, 13))
expected_width = cupy.array([0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 2, 0]).reshape((1, 1, 13))
toolbox_width = cupy.zeros(expected_width.shape, dtype='float32')
ntoolbox._peakwidth[blocks_per_grid, threads_per_block](test_array, peaks, prominence, toolbox_width, 0.5)
assert cupy.all(toolbox_width == expected_width)
def test_peakdistance(self):
test_arr = cupy.array(([False, False, True, False, False, False, False, True, False] +
[False] * 15), dtype='int8')\
.reshape((1, 1, 24))
expected_distance = 75
toolbox_distance = cupy.zeros(test_arr.shape, dtype='float32')
ntoolbox._peakdistance[blocks_per_grid, threads_per_block]\
(test_arr,
cupy.zeros(test_arr.shape, dtype='float32'),
cupy.array([[2]], dtype='int8'),
toolbox_distance)
assert toolbox_distance[0, 0, 2] == expected_distance
assert toolbox_distance[0, 0, 7] == 360 - expected_distance
def test_direction(self):
# Test for one peak
one_peak_arr = cupy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\
.reshape((1, 1, 24)).astype('int8')
expected_direction = cupy.array([45, ntoolbox.BACKGROUND_COLOR, ntoolbox.BACKGROUND_COLOR])
toolbox_direction = cupy.zeros((1, 1, 3), dtype='float32')
ntoolbox._direction[blocks_per_grid, threads_per_block]\
(one_peak_arr,
cupy.zeros(one_peak_arr.shape, dtype='float32'),
cupy.array([[1]], dtype='int8'),
toolbox_direction,
0)
assert cupy.all(expected_direction == toolbox_direction)
# Test for one direction with 180°+-35° distance
two_peak_arr = cupy.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])\
.reshape((1, 1, 24)).astype('int8')
expected_direction = cupy.array([135, ntoolbox.BACKGROUND_COLOR, ntoolbox.BACKGROUND_COLOR])
ntoolbox._direction[blocks_per_grid, threads_per_block]\
(two_peak_arr,
cupy.zeros(two_peak_arr.shape, dtype='float32'),
cupy.array([[2]], dtype='int8'),
toolbox_direction,
0)
assert cupy.all(expected_direction == toolbox_direction)
# Test for (invalid) two directions with 180°+-35° distance
four_peak_arr = cupy.array([0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) \
.reshape((1, 1, 24)).astype('int8')
expected_direction = cupy.array([ntoolbox.BACKGROUND_COLOR, ntoolbox.BACKGROUND_COLOR, ntoolbox.BACKGROUND_COLOR])
ntoolbox._direction[blocks_per_grid, threads_per_block] \
(four_peak_arr,
cupy.zeros(four_peak_arr.shape, dtype='float32'),
cupy.array([[4]], dtype='int8'),
toolbox_direction,
0)
assert cupy.all(expected_direction == toolbox_direction)
def test_centroid_correction_bases(self):
# simple test case: one distinct peak
test_array = cupy.array([0] * 9 + [1] + [0] * 14).reshape((1, 1, 24))
test_high_peaks = SLIX.toolbox.peaks(test_array)
test_reverse_peaks = SLIX.toolbox.peaks(-test_array)
left_bases = cupy.zeros(test_array.shape, dtype='uint8')
right_bases = cupy.zeros(test_array.shape, dtype='uint8')
ntoolbox._centroid_correction_bases[blocks_per_grid, threads_per_block]\
(test_array,
test_high_peaks,
test_reverse_peaks,
left_bases,
right_bases)
assert cupy.sum(left_bases) == 1
assert cupy.sum(right_bases) == 1
# simple test case: one distinct peak
test_array = cupy.array([0] * 8 + [0.95, 1, 0.5] + [0] * 13, dtype='float32').reshape((1, 1, 24))
test_high_peaks = SLIX.toolbox.peaks(test_array)
test_reverse_peaks = SLIX.toolbox.peaks(-test_array)
ntoolbox._centroid_correction_bases[blocks_per_grid, threads_per_block] \
(test_array,
test_high_peaks,
test_reverse_peaks,
left_bases,
right_bases)
assert cupy.sum(left_bases) == 2
assert cupy.sum(right_bases) == 1
# simple test case: centroid is between two measurements
test_array = cupy.array([0] * 8 + [1, 1] + [0] * 14).reshape((1, 1, 24))
test_high_peaks = SLIX.toolbox.peaks(test_array)
test_reverse_peaks = SLIX.toolbox.peaks(-test_array)
ntoolbox._centroid_correction_bases[blocks_per_grid, threads_per_block] \
(test_array,
test_high_peaks,
test_reverse_peaks,
left_bases,
right_bases)
assert cupy.sum(left_bases) == 1
assert cupy.sum(right_bases) == 2
# more complicated test case: wide peak plateau
test_array = cupy.array([0] * 8 + [1, 1, 1] + [0] * 13).reshape((1, 1, 24))
test_high_peaks = SLIX.toolbox.peaks(test_array)
test_reverse_peaks = SLIX.toolbox.peaks(-test_array)
ntoolbox._centroid_correction_bases[blocks_per_grid, threads_per_block] \
(test_array,
test_high_peaks,
test_reverse_peaks,
left_bases,
right_bases)
assert cupy.sum(left_bases) == 2
assert cupy.sum(right_bases) == 2
def test_centroid(self):
image = cupy.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\
.reshape((1, 1, 24))
left = cupy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\
.reshape((1, 1, 24))
right = cupy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\
.reshape((1, 1, 24))
peak = cupy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\
.reshape((1, 1, 24))
result_centroid = cupy.zeros(image.shape, dtype='float32')
ntoolbox._centroid[blocks_per_grid, threads_per_block](image, peak, left, right, result_centroid)
assert cupy.sum(result_centroid) == 0
| 2.125
| 2
|
antlir/vm/tpm.py
|
vmagro/antlir
| 0
|
12782377
|
<reponame>vmagro/antlir
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import subprocess
import threading
from dataclasses import dataclass, field
from typing import Iterable, List, Union, Awaitable
from antlir.fs_utils import Path
from antlir.unshare import Unshare
from antlir.vm.common import SidecarProcess
logger = logging.getLogger(__name__)
class TPMError(Exception):
pass
# NOTE: all the sidecars in vm.py use async api, while the QEMU process
# uses subprocess.Popen; since we want the TPM messages to be piped to stdout
# in real time (as opposed to next schedule point of the async loop), this
# software TPM process also needs to use subprocess.Popen along with an output
# reader thread that pipes to logger.
# The wait method here is marked async for the same reason.
class PseudoAsyncProcess(SidecarProcess):
def __init__(self, args: List[Union[str, bytes]]):
proc = subprocess.Popen(
args,
preexec_fn=os.setpgrp,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
text=True,
)
super().__init__(proc)
self.__reader = threading.Thread(
target=self.__pipe_output,
)
self.__reader.start()
def __pipe_output(self):
for line in self._proc.stderr:
logger.debug("TPM: {}".format(line.strip()))
async def wait(self):
# other side of the read pipe is closed by the kill
self.__reader.join()
self._proc.stderr.close()
self._proc.wait()
@dataclass(frozen=True)
class VmTPM(object):
context_path: Path
sock_path: Path = field(init=False)
state_path: Path = field(init=False)
def __post_init__(self):
# sacrifices for a frozen instance with init fields, see:
# https://docs.python.org/3/library/dataclasses.html#frozen-instances
object.__setattr__(
self, "sock_path", self.context_path / "tpm_ctrl.sock"
)
object.__setattr__(self, "state_path", self.context_path / "tpm_state")
os.mkdir(self.state_path)
async def start_sidecar(
self,
ns: Unshare,
binary: str,
timeout_ms: int,
) -> PseudoAsyncProcess:
logger.debug(f"Starting software TPM... [context: {self.context_path}]")
proc = PseudoAsyncProcess(
ns.nsenter_as_user(binary, *self.__get_sidecar_args())
)
try:
self.sock_path.wait_for(timeout_ms=timeout_ms)
except FileNotFoundError:
await proc.kill()
raise TPMError(
f"Software TPM device fail to create socket in: {timeout_ms}ms"
)
logger.debug(f"TPM sidecar PID: {proc.pid}")
return proc
def __get_sidecar_args(self) -> Iterable[str]:
args = [
"socket",
"--tpm2",
"--tpmstate",
f"dir={self.state_path}",
"--ctrl",
f"type=unixio,path={self.sock_path}",
]
if 0 < logger.getEffectiveLevel() <= logging.DEBUG:
# this level 20 is basically binary, anything >= 5 generates output
args.extend(
[
"--log",
"level=20",
]
)
return args
@property
def qemu_args(self) -> Iterable[str]:
return (
"-chardev",
f"socket,id=chtpm,path={self.sock_path}",
"-tpmdev",
"emulator,id=tpm0,chardev=chtpm",
"-device",
"tpm-tis,tpmdev=tpm0",
)
| 2.015625
| 2
|
src/thexb/UTIL_converters.py
|
harris-2374/THEx
| 0
|
12782378
|
def convert_window_size_to_int(ws):
"""This function converts the shorthand input window size
and returns an integer of the same value (i.e. "100kb" == int(100000))"""
ws = ws.lower()
window_units = ws[-2:]
try:
window_int = int(ws[:-2])
except ValueError:
raise ValueError("Please give window size as integer (i.e., 10kb)")
converters = {
'bp': int(ws[:-2]),
'kb': int(ws[:-2])*(10**3),
'mb': int(ws[:-2])*(10**6),
'gb': int(ws[:-2])*(10**9)
}
try:
return converters[window_units]
except KeyError:
print(window_units)
raise KeyError("Invalid unit type provided. Options=[bp, kb, mb, gb]")
# raise TypeError("Invalid window size value, must be a integer followed by bp, kb, mb, gb (i.e., 10bp, 10kb, 10mb, 10gb)")
| 3.984375
| 4
|
mapocr/segmentation.py
|
karimbahgat/mapocr
| 0
|
12782379
|
<reponame>karimbahgat/mapocr<filename>mapocr/segmentation.py
import itertools
import math
import numpy as np
import PIL
from PIL import Image, ImageCms, ImageMorph, ImageFilter, ImageOps, ImageMath
import cv2
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff_matrix import delta_e_cie2000
def rgb_to_lab(im):
srgb_p = ImageCms.createProfile("sRGB")
lab_p = ImageCms.createProfile("LAB")
rgb2lab = ImageCms.buildTransformFromOpenProfiles(srgb_p, lab_p, "RGB", "LAB")
im = ImageCms.applyTransform(im, rgb2lab)
return im
def color_difference(im, color):
im_arr = np.array(im)
# If wanna do larger more quicker, then do it tiled, eg:
## tw,th = 300,300
## diff = np.zeros(tuple(reversed(im.size)))
## for y in range(0,im.size[1],th):
## print y
## for x in range(0,im.size[0],tw):
## x2 = min(x + tw, im.size[0])
## y2 = min(y + th, im.size[1])
## tile = im.crop([x,y,x2,y2])
## tdiff = mapfit.segmentation.color_difference(tile, col)
## tdiff[tdiff>thresh] = 255
## diff[y:y2, x:x2] = tdiff
## Image.fromarray(diff).show()
#target = color
#diffs = [pairdiffs[tuple(sorted([target,oth]))] for oth in colors if oth != target]
target = convert_color(sRGBColor(*color, is_upscaled=True), LabColor).get_value_tuple()
counts,colors = zip(*im.getcolors(256)) # somehow much faster than numpy...
#colors,counts = np.unique(im_arr.reshape(-1, 3), return_counts=True, axis=0)
colors,counts = list(map(tuple,colors)), list(map(int,counts))
colors_lab = [convert_color(sRGBColor(*col, is_upscaled=True), LabColor).get_value_tuple()
for col in colors]
diffs = delta_e_cie2000(target, np.array(colors_lab))
difftable = dict(list(zip(colors,diffs)))
diff_im = np.zeros((im.height,im.width))
im_arr_idx = im_arr.dot(np.array([1, 256, 65536])) #im_arr[:,:,0] + (im_arr[:,:,1]*256) + (im_arr[:,:,2]*256*256)
for col,diff in difftable.items():
#print col
#diff_im[(im_arr[:,:,0]==col[0])&(im_arr[:,:,1]==col[1])&(im_arr[:,:,2]==col[2])] = diff # 3 lookup is slow
#wherecolor = (im_arr==col).all(axis=2)
#diff_im[wherecolor] = diff
wherecolor = im_arr_idx == (col[0] + (col[1]*256) + (col[2]*256*256))
diff_im[wherecolor] = diff
return diff_im
def color_differences(colors):
colors_lab = [convert_color(sRGBColor(*col, is_upscaled=True), LabColor).get_value_tuple()
for col in colors]
pairdiffs = dict()
for lcol1,group in itertools.groupby(itertools.combinations(colors_lab, 2), key=lambda pair: pair[0]):
group = list(group)
#print lcol1, len(group), group[0]
_,lcol2s = zip(*group)
diffs = delta_e_cie2000(lcol1, np.array(lcol2s))
col1 = colors[colors_lab.index(lcol1)]
for lcol2,diff in zip(lcol2s, diffs):
col2 = colors[colors_lab.index(lcol2)]
pair = tuple(sorted([col1,col2]))
pairdiffs[pair] = diff
#print len(colors_lab)*len(colors_lab), pairdiffs.shape
#PIL.Image.fromarray(pairdiffs).show()
#fsdfs
return pairdiffs
def group_colors(colors, thresh=10, categories=None):
pairdiffs = color_differences(colors)
diffgroups = dict([(c,[]) for c in categories]) if categories else dict()
for c in colors:
# find closest existing group that is sufficiently similar
dists = [(gc,pairdiffs[tuple(sorted([c,gc]))])
for gc in diffgroups.keys()]
similar = [(gc,dist) for gc,dist in dists if c != gc and dist < thresh]
if similar:
# find the most similar group color
nearest = sorted(similar, key=lambda x: x[1])[0][0]
diffgroups[nearest].append(c)
# update that group key as the new central color (lowest avg dist to group members)
gdists = [(gc1,[pairdiffs[tuple(sorted([c,gc]))] for gc2 in diffgroups[nearest]]) for gc1 in diffgroups[nearest]]
central = sorted(gdists, key=lambda gc_gds: sum(gc_gds[1])/float(len(gc_gds[1])))[0][0]
diffgroups[central] = diffgroups.pop(nearest)
else:
# create new group
diffgroups[c] = [c]
# set each group key to avg color of group members
## for c,gcols in diffgroups.items():
## rs,gs,bs = zip(*gcols)
## avgcol = np.mean(rs), np.mean(gs), np.mean(bs)
## diffgroups[avgcol] = diffgroups.pop(c)
return diffgroups
def view_colors(colors):
import pyagg
c=pyagg.Canvas(1000,200)
c.percent_space()
x = 2
for i,g in enumerate(colors):
#print(i, g)
x += 2
c.draw_line([(x,0),(x,100)], fillcolor=g, fillsize=2)
c.get_image().show()
def quantize(im):
quant = im.convert('P', palette=PIL.Image.ADAPTIVE, colors=256).convert('RGB')
return quant
def mask_image(im, poly, invert=False):
mask = np.zeros((im.size[1], im.size[0]))
cv2.drawContours(mask, [poly], -1, 255, -1)
#PIL.Image.fromarray(mask).show()
new_im = np.array(im)
trueval = 255 if invert else 0
new_im[mask==trueval] = 0
new_im = PIL.Image.fromarray(new_im)
return new_im
def edge_filter(im):
# preprocess
#im = im.filter(ImageFilter.BoxBlur(3))
#im = im.filter(ImageFilter.EDGE_ENHANCE())
#im.show()
# lab values
lab = rgb_to_lab(im)
l,a,b = lab.split()
# find edges of avg
## avg = (np.array(l) + np.array(a) + np.array(b)) / 3.0
## avg = PIL.Image.fromarray(avg.astype(np.uint8))
## edges = avg.filter(ImageFilter.FIND_EDGES())
# find max edge
edges_l = l.filter(ImageFilter.FIND_EDGES())
edges_a = a.filter(ImageFilter.FIND_EDGES())
edges_b = b.filter(ImageFilter.FIND_EDGES())
edges = ImageMath.eval('convert(max(max(l,a),b), "L")', {'l':edges_l, 'a':edges_a, 'b':edges_b})
# threshold
edges = edges.point(lambda v: 255 if v >= 255/2 else 0)
#edges.show()
# remove outer edge?
edges = ImageOps.crop(edges, 1)
edges = ImageOps.expand(edges, 1, fill=0)
return edges
def color_changes(im, neighbours=1):
im_arr = np.array(im)
# quantize and convert colors
quant = im.convert('P', palette=PIL.Image.ADAPTIVE, colors=256)
quant_arr = np.array(quant)
qcounts,qcolors = zip(*sorted(quant.getcolors(256), key=lambda x: x[0]))
counts,colors = zip(*sorted(quant.convert('RGB').getcolors(256), key=lambda x: x[0]))
# calc diffs
pairdiffs = color_differences(colors)
pairdiffs_arr = np.zeros((256,256))
for k,v in list(pairdiffs.items()):
qx,qy = (qcolors[colors.index(k[0])], qcolors[colors.index(k[1])])
pairdiffs_arr[qx,qy] = v
#PIL.Image.fromarray(pairdiffs_arr*50).show()
# detect color edges
orig_flat = np.array(quant).flatten()
diff_im_flat = np.zeros(quant.size).flatten()
buff = (neighbours,neighbours)
for xoff in range(-buff[0], buff[0]+1, 1):
for yoff in range(-buff[1], buff[1]+1, 1):
if xoff == yoff == 0: continue
off_flat = np.roll(quant, (xoff,yoff), (0,1)).flatten()
#diff_im_flat = diff_im_flat + pairdiffs_arr[orig_flat,off_flat]
diff_im_flat = np.maximum(diff_im_flat, pairdiffs_arr[orig_flat,off_flat])
#diff_im_flat = diff_im_flat / 8.0
diff_im = diff_im_flat.reshape((im.height, im.width))
return diff_im
def close_edge_gaps(im):
grow = ImageMorph.MorphOp(op_name='dilation4')
changes,im = grow.apply(im)
#changes,im = grow.apply(im)
shrink = ImageMorph.MorphOp(op_name='erosion4')
changes,im = shrink.apply(im)
#changes,im = shrink.apply(im)
return im
def detect_map_outline(im):
# assumes image shows grayscale edges
edges = im
# find contours
contours,_ = cv2.findContours(np.array(edges), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# find map frame as contour larger than third of the image
largest = sorted(contours, key=lambda cnt: cv2.contourArea(cnt))[-1]
w,h = im.size
im_area = w*h
if cv2.contourArea(largest) > im_area/(3.0**2):
# maybe also require that contour doesnt touch edges of the image
xbuf,ybuf = 2,2 #w/100.0, h/100.0 # 1 percent
if largest[:,:,0].min() > 0+xbuf and largest[:,:,0].max() < w-xbuf \
and largest[:,:,1].max() > 0+ybuf and largest[:,:,1].max() < h-ybuf:
return largest
def detect_boxes(im):
# detect boxes from contours
# https://stackoverflow.com/questions/11424002/how-to-detect-simple-geometric-shapes-using-opencv
# https://stackoverflow.com/questions/46641101/opencv-detecting-a-circle-using-findcontours
# see esp: https://www.learnopencv.com/blob-detection-using-opencv-python-c/
contours,_ = cv2.findContours(np.array(im), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#im_arr_draw = cv2.cvtColor(np.array(im), cv2.COLOR_GRAY2RGB)
#from random import randrange
boxes = []
im_area = im.size[0] * im.size[1]
for cnt in contours: #sorted(contours,key=lambda c: cv2.contourArea(c), reverse=True)[:10]:
#cv2.drawContours(im_arr_draw, [cnt], -1, (randrange(0,255),randrange(0,255),randrange(0,255)), -1) #-1, (0,255,0), 1)
epsilon = 5 # pixels # 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
if len(approx) == 4:
# less than half the size (a quarter corner), and bigger than one-sixteenth (half of half of half of a quarter corner)
if (im_area/(2.0**2)) > cv2.contourArea(cnt) > (im_area/(16.0**2)):
boxes.append(cnt) #approx
#PIL.Image.fromarray(im_arr_draw).show()
return boxes
##class Quad:
## def __init__(self, x, y, w, h):
## self.x = x
## self.y = y
## self.w = w
## self.h = h
## self.children = []
##
## def __repr__(self):
## return 'Quad({}, {}, {}, {}, children={})'.format(self.x, self.y, self.w, self.h, len(self.children))
##
## def bbox(self):
## x1,y1 = self.x - self.w/2.0, self.y - self.h/2.0
## x2,y2 = x1+self.w, y1+self.h
## return x1,y1,x2,y2
##
## def split(self):
## if self.children:
## for subq in self.children:
## subq.split()
## else:
## halfwidth = self.w/2.0
## halfheight = self.h/2.0
## quad = Quad(self.x-halfwidth/2.0, self.y-halfheight/2.0,
## halfwidth, halfheight)
## self.children.append(quad)
## quad = Quad(self.x+halfwidth/2.0, self.y-halfheight/2.0,
## halfwidth, halfheight)
## self.children.append(quad)
## quad = Quad(self.x+halfwidth/2.0, self.y+halfheight/2.0,
## halfwidth, halfheight)
## self.children.append(quad)
## quad = Quad(self.x-halfwidth/2.0, self.y+halfheight/2.0,
## halfwidth, halfheight)
## self.children.append(quad)
##
## def sample(self):
## if self.children:
## q = self.children.pop(0)
## if q.children:
## # if child quad has children
## subq = q.sample() # sample the quad
## self.children.append(q) # send to back of the line
## return subq
## else:
## return q
class Quad:
def __init__(self, x, y, w, h):
'''Breadth-first quad tree that infinitely samples each quad to get a spatially balanced sampling with finer and finer detail,
and splits subquads when necessary'''
self.x = x
self.y = y
self.w = w
self.h = h
self.children = []
def __repr__(self):
return 'Quad({}, {}, {}, {}, children={})'.format(self.x, self.y, self.w, self.h, len(self.children))
def bbox(self):
x1,y1 = self.x - self.w/2.0, self.y - self.h/2.0
x2,y2 = x1+self.w, y1+self.h
return x1,y1,x2,y2
def split(self):
if self.children:
# already has children, use existing
pass
else:
halfwidth = self.w/2.0
halfheight = self.h/2.0
# upper left
quad = Quad(self.x-halfwidth/2.0, self.y-halfheight/2.0,
halfwidth, halfheight)
self.children.append(quad)
# lower right
quad = Quad(self.x+halfwidth/2.0, self.y+halfheight/2.0,
halfwidth, halfheight)
self.children.append(quad)
# upper right
quad = Quad(self.x+halfwidth/2.0, self.y-halfheight/2.0,
halfwidth, halfheight)
self.children.append(quad)
# lower left
quad = Quad(self.x-halfwidth/2.0, self.y+halfheight/2.0,
halfwidth, halfheight)
self.children.append(quad)
def sample(self):
'''Samples this quad, either as itself or by further sampling the next in line of its child quads.'''
# if doesnt have any children, split and return self
if not self.children:
self.split()
return self
# get next quad
q = self.children.pop(0)
samp = q.sample()
self.children.append(q) # send to back of the line
return samp
#####################
def image_segments(im):
'''This is the main user function'''
# resize to smaller resolution
w,h = im.size
dw,dh = 1000,1000
ratio = max(w/float(dw), h/float(dh))
im_small = im.resize((int(w/ratio), int(h/ratio)), PIL.Image.NEAREST)
#im_small.show()
# filter to edges
#edges = edge_filter(im_small)
# OR edges from 3x3 color changes
changes = color_changes(im_small, neighbours=1)
thresh = 10
changes[changes < thresh] = 0
changes[changes >= thresh] = 255
edges = PIL.Image.fromarray(changes.astype(np.uint8))
# grow to close small edge holes
#edges = close_edge_gaps(edges)
# NOTE: masking should be done on each sample image
# find map outline
#edges.show()
map_outline = detect_map_outline(edges)
if map_outline is not None:
# convert back to original image coords
map_outline = (map_outline * ratio).astype(np.uint64)
# find boxes
#edges = edges.point(lambda v: 255-v) # inverse, since we expect the inside of boxes to be cleaner/less noise?
#edges.show()
boxes = detect_boxes(edges)
# convert back to original image coords
boxes = [(box*ratio).astype(np.uint64) for box in boxes]
return map_outline, boxes
def sample_quads(bbox, samplesize):
'''Samples quads of samplesize from a bbox region in a rotating fashion, until all parts of bbox region has been sampled'''
x1,y1,x2,y2 = bbox
w = abs(x2-x1)
h = abs(y2-y1)
sw,sh = samplesize
# divide image into quad tiles
cx = (x1+x2)/2.0
cy = (y1+y2)/2.0
quads = Quad(cx, cy, w, h)
quads.split()
# sample quads in a rotating fashion until all parts of bbox has been covered by the samplesize
#xmin,ymin,xmax,ymax = min(x1,x2),min(y1,y2),max(x1,x2),max(y1,y2)
#sxmin,symin,sxmax,symax = cx,cy,cx,cy
intersects_centroid = 0
while True:
# sample 4 quads
for _ in range(4):
q = quads.sample()
sq = Quad(q.x, q.y, sw, sh)
yield sq
# check if sample quad intersects the region centroid
_sxmin,_symin,_sxmax,_symax = sq.bbox()
if not (_sxmax < cx or _sxmin > cx or _symax < cy or _symin > cy):
intersects_centroid += 1
# when 4 sample quads touches the region centroid, the entire region has been covered
if intersects_centroid >= 4:
break
| 2.78125
| 3
|
src/tests/unit/test_mail_models.py
|
uescher/byro
| 0
|
12782380
|
<gh_stars>0
import pytest
from byro.mails.models import EMail
from byro.mails.send import SendMailException
@pytest.mark.django_db
@pytest.mark.parametrize('skip_queue', (True, False))
def test_template_to_mail(mail_template, skip_queue):
count = EMail.objects.count()
mail_template.to_mail('test@localhost', skip_queue=skip_queue)
assert EMail.objects.count() == count + 1
obj = EMail.objects.last()
assert obj.subject == mail_template.subject
assert obj.text == mail_template.text
assert obj.to == 'test@localhost'
assert (obj.sent is None) is not skip_queue
assert str(mail_template) == mail_template.subject
@pytest.mark.django_db
def test_template_to_mail_fail(mail_template):
mail_template.subject = '{incorrect_key}'
count = EMail.objects.count()
with pytest.raises(SendMailException):
mail_template.to_mail('test@localhost')
assert EMail.objects.count() == count
@pytest.mark.django_db
def test_mail_cannot_send_sent_mail(sent_email):
with pytest.raises(Exception):
sent_email.send()
@pytest.mark.django_db
def test_mail_can_copy_to_draft(sent_email):
count = EMail.objects.count()
assert sent_email.sent is not None
new_mail = sent_email.copy_to_draft()
assert new_mail.subject == sent_email.subject
assert new_mail.to == sent_email.to
assert new_mail.sent is None
assert new_mail.pk != sent_email.pk
assert EMail.objects.count() == count + 1
@pytest.mark.django_db
def test_mail_can_send_regular_mail(email):
assert email.sent is None
email.send()
email.refresh_from_db()
assert email.sent is not None
@pytest.mark.django_db
def test_attachment_ids(email):
assert email.attachment_ids == []
| 2.140625
| 2
|
02.NeoRegionLoader.py
|
buzzfactoryFE/AwsGraphDB
| 8
|
12782381
|
import os, csv, json
from py2neo import Graph
with open("./AWSNEoConfig.json") as c:
conf = json.load(c)
c.close
graph = Graph(conf[0]["NeoParametes"][0]["Url"], auth=(conf[0]["NeoParametes"][0]["Username"], conf[0]["NeoParametes"][0]["Password"]))
regions ="US East (Ohio),us-east-2\n\
US East (N. Virginia),us-east-1\n\
US West (N. California),us-west-1\n\
US West (Oregon),us-west-2\n\
Asia Pacific (Hong Kong),ap-east-1\n\
Asia Pacific (Mumbai),ap-south-1\n\
Asia Pacific (Osaka-Local),ap-northeast-3\n\
Asia Pacific (Seoul),ap-northeast-2\n\
Asia Pacific (Singapore),ap-southeast-1\n\
Asia Pacific (Sydney),ap-southeast-2\n\
Asia Pacific (Tokyo),ap-northeast-1\n\
Canada (Central),ca-central-1\n\
Europe (Frankfurt),eu-central-1\n\
Europe (Ireland),eu-west-1\n\
Europe (London),eu-west-2\n\
Europe (Paris),eu-west-3\n\
Europe (Stockholm),eu-north-1\n\
Middle East (Bahrain),me-south-1\n\
South America (São Paulo),sa-east-1"
def NeoInsertRegions(Name,id):#
query = 'MERGE (: AWSRegion { Name: "'+Name+'" , Id : "'+id+'" })'
print(query)
result = graph.run(query).to_table()
print(result)
#print(regions)
reader = csv.reader(regions.split('\n'), delimiter=',')
for row in reader:
NeoInsertRegions(row[0],row[1])
| 2.84375
| 3
|
background/send_fake_charge_message.py
|
B-ROY/TESTGIT
| 2
|
12782382
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import sys
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_ROOT, os.pardir))
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../')))
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../push_util')))
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../../')))
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../../..')))
sys.path.append(os.path.abspath(os.path.join(os.path.abspath(__file__), '../../../..')))
from base.settings import load_django_settings
load_django_settings('live_video.base', 'live_video.app')
from app.util.messageque.msgsender import MessageSender
from app.customer.models.account import TradeBalanceOrder
import datetime
import random
def push_fake_charge_message(num):
now_time = datetime.datetime.now()
twelve_hours_ago = now_time - datetime.timedelta(hours=12)
balance_orders = TradeBalanceOrder.objects.filter(status=1, filled_time__lte=now_time, filled_time__gte=twelve_hours_ago)
count = balance_orders.count()
if num > count:
num = count
print count
indexes = random.sample(range(0, count), num)
for i in indexes:
order = balance_orders[i]
MessageSender.send_charge_info_message(order.user.sid, order.user.nickname, order.money)
if __name__ == '__main__':
num = random.randint(1, 4)
push_fake_charge_message(num)
| 1.992188
| 2
|
python/example_code/lookoutvision/csv_to_manifest.py
|
grjan7/aws-doc-sdk-examples
| 1
|
12782383
|
<filename>python/example_code/lookoutvision/csv_to_manifest.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to create an Amazon Lookout for Vision manifest file from a CSV file.
The CSV file format is image location,anomaly classification (normal or anomaly)
For example:
s3://s3bucket/circuitboard/train/anomaly/train_11.jpg,anomaly
s3://s3bucket/circuitboard/train/normal/train_1.jpg,normal
If necessary, use the bucket argument to specify the Amazon S3 bucket folder for the images.
More information: https://docs.aws.amazon.com/lookout-for-vision/latest/developer-guide/ex-csv-manifest.html
"""
# snippet-start:[python.example_code.lookoutvision.Scenario_CSVtoManifest]
from datetime import datetime, timezone
import argparse
import logging
import csv
import os
import json
logger = logging.getLogger(__name__)
def check_errors(csv_file):
"""
Checks for duplicate images and incorrect classifications in a CSV file.
If duplicate images or invalid anomaly assignments are found, an errors CSV file
and deduplicated CSV file are created. Only the first
occurrence of a duplicate is recorded. Other duplicates are recorded in the errors file.
:param csv_file: The source CSV file
:return: True if errors or duplicates are found, otherwise false.
"""
logger.info("Checking %s.", csv_file)
errors_found = False
errors_file = f"{os.path.splitext(csv_file)[0]}_errors.csv"
deduplicated_file = f"{os.path.splitext(csv_file)[0]}_deduplicated.csv"
with open(csv_file, 'r', encoding="UTF-8") as input_file,\
open(deduplicated_file, 'w', encoding="UTF-8") as dedup,\
open(errors_file, 'w', encoding="UTF-8") as errors:
reader = csv.reader(input_file, delimiter=',')
dedup_writer = csv.writer(dedup)
error_writer = csv.writer(errors)
line = 1
entries = set()
for row in reader:
# Skip empty lines.
if not ''.join(row).strip():
continue
# Record any incorrect classifications.
if not row[1].lower() == "normal" and not row[1].lower() == "anomaly":
error_writer.writerow(
[line, row[0], row[1], "INVALID_CLASSIFICATION"])
errors_found = True
# Write first image entry to dedup file and record duplicates.
key = row[0]
if key not in entries:
dedup_writer.writerow(row)
entries.add(key)
else:
error_writer.writerow([line, row[0], row[1], "DUPLICATE"])
errors_found = True
line += 1
if errors_found:
logger.info("Errors found check %s.", errors_file)
else:
os.remove(errors_file)
os.remove(deduplicated_file)
return errors_found
def create_manifest_file(csv_file, manifest_file, s3_path):
"""
Read a CSV file and create an Amazon Lookout for Vision classification manifest file.
:param csv_file: The source CSV file.
:param manifest_file: The name of the manifest file to create.
:param s3_path: The Amazon S3 path to the folder that contains the images.
"""
logger.info("Processing CSV file %s.", csv_file)
image_count = 0
anomalous_count = 0
with open(csv_file, newline='', encoding="UTF-8") as csvfile,\
open(manifest_file, "w", encoding="UTF-8") as output_file:
image_classifications = csv.reader(
csvfile, delimiter=',', quotechar='|')
# Process each row (image) in the CSV file.
for row in image_classifications:
# Skip empty lines.
if not ''.join(row).strip():
continue
source_ref = str(s3_path) + row[0]
classification = 0
if row[1].lower() == 'anomaly':
classification = 1
anomalous_count += 1
# Create the JSON line.
json_line = {}
json_line['source-ref'] = source_ref
json_line['anomaly-label'] = str(classification)
metadata = {}
metadata['confidence'] = 1
metadata['job-name'] = "labeling-job/anomaly-classification"
metadata['class-name'] = row[1]
metadata['human-annotated'] = "yes"
metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')
metadata['type'] = "groundtruth/image-classification"
json_line['anomaly-label-metadata'] = metadata
output_file.write(json.dumps(json_line))
output_file.write('\n')
image_count += 1
logger.info("Finished creating manifest file %s.\n"
"Images: %s\nAnomalous: %s",
manifest_file,
image_count,
anomalous_count)
return image_count, anomalous_count
def add_arguments(parser):
"""
Add command line arguments to the parser.
:param parser: The command line parser.
"""
parser.add_argument(
"csv_file", help="The CSV file that you want to process."
)
parser.add_argument(
"--s3_path", help="The Amazon S3 bucket and folder path for the images."
" If not supplied, column 1 is assumed to include the Amazon S3 path.", required=False
)
def main():
logging.basicConfig(level=logging.INFO,
format="%(levelname)s: %(message)s")
try:
# Get command line arguments.
parser = argparse.ArgumentParser(usage=argparse.SUPPRESS)
add_arguments(parser)
args = parser.parse_args()
s3_path = args.s3_path
if s3_path is None:
s3_path = ""
csv_file = args.csv_file
csv_file_no_extension = os.path.splitext(csv_file)[0]
manifest_file = csv_file_no_extension + '.manifest'
# Create manifest file if there are no duplicate images.
if check_errors(csv_file):
print(f"Issues found. Use {csv_file_no_extension}_errors.csv "\
"to view duplicates and errors.")
print(f"{csv_file}_deduplicated.csv contains the first"\
"occurrence of a duplicate.\n"
"Update as necessary with the correct information.")
print(f"Re-run the script with {csv_file_no_extension}_deduplicated.csv")
else:
print('No duplicates found. Creating manifest file.')
image_count, anomalous_count = create_manifest_file(csv_file, manifest_file, s3_path)
print(f"Finished creating manifest file: {manifest_file} \n")
normal_count = image_count-anomalous_count
print(f"Images processed: {image_count}")
print(f"Normal: {normal_count}")
print(f"Anomalous: {anomalous_count}")
except FileNotFoundError as err:
logger.exception("File not found.:%s", err)
print(f"File not found: {err}. Check your input CSV file.")
if __name__ == "__main__":
main()
# snippet-end:[python.example_code.lookoutvision.Scenario_CSVtoManifest]
| 2.984375
| 3
|
tests/test_hashutils.py
|
thurask/bbarchivist
| 7
|
12782384
|
#!/usr/bin/env python3
"""Test the hashutils module."""
import hashlib
import os
from shutil import copyfile, rmtree
import pytest
from bbarchivist import hashutils as bh
try:
import unittest.mock as mock
except ImportError:
import mock
__author__ = "Thurask"
__license__ = "WTFPL v2"
__copyright__ = "2015-2019 Thurask"
def setup_module(module):
"""
Create necessary files.
"""
if not os.path.exists("temp_hashutils"):
os.mkdir("temp_hashutils")
os.chdir("temp_hashutils")
with open("tempfile.txt", "w") as targetfile:
targetfile.write("Jackdaws love my big sphinx of quartz")
os.mkdir("skipme")
def teardown_module(module):
"""
Delete necessary files.
"""
os.chdir("..")
rmtree("temp_hashutils", ignore_errors=True)
class TestClassHashutils:
"""
Test hash utilities.
"""
def test_crc32(self):
"""
Test CRC32 checksum.
"""
assert bh.zlib_hash("tempfile.txt", "crc32") == "ed5d3f26"
def test_adler32(self):
"""
Test Adler32 checksum.
"""
assert bh.zlib_hash("tempfile.txt", "adler32") == "02470dcd"
def test_sha0(self):
"""
Test SHA-0 hash.
"""
if "sha" not in hashlib.algorithms_available:
pass
else:
assert bh.ssl_hash("tempfile.txt", "sha") == "d26b25f6170daf49e31e68bf57f6164815c368d8"
def test_sha0_unavail(self, capsys):
"""
Test SHA-0 hash, if not available.
"""
with mock.patch("hashlib.new", mock.MagicMock(side_effect=ValueError)):
bh.ssl_hash("tempfile.txt", "sha")
assert "SHA HASH FAILED" in capsys.readouterr()[0]
def test_sha1(self):
"""
Test SHA-1 hash.
"""
assert bh.hashlib_hash("tempfile.txt", hashlib.sha1()) == "71dc7ce8f27c11b792be3f169ecf985865e276d0"
def test_sha224(self):
"""
Test SHA-224 hash.
"""
assert bh.hashlib_hash("tempfile.txt", hashlib.sha224()) == "7bcd7b77f63633bf0f7db181106f08eb630a58c521b109be1cc4a404"
def test_sha256(self):
"""
Test SHA-256 hash.
"""
assert bh.hashlib_hash("tempfile.txt", hashlib.sha256()) == "f118871c45171d5fe4e9049980959e033eeeabcfa12046c243fda310580e8a0b"
def test_sha384(self):
"""
Test SHA-384 hash.
"""
assert bh.hashlib_hash("tempfile.txt", hashlib.sha384()) == "76620873c0d27873c137b082425c6e87e3d601c4b19241a1f2222f7f700a2fe8d3c648b26f62325a411cb020bff527be"
def test_sha512(self):
"""
Test SHA-512 hash.
"""
assert bh.hashlib_hash("tempfile.txt", hashlib.sha512()) == "b66a5e8aa9b9705748c2ee585b0e1a3a41288d2dafc3be2db12fa89d2f2a3e14f9dec11de4ba865bb51eaa6c2cfeb294139455e34da7d827a19504b0906c01c1"
def test_md4(self):
"""
Test MD4 hash.
"""
if "md4" not in hashlib.algorithms_available:
pass
else:
assert bh.ssl_hash("tempfile.txt", "md4") == "df26ada1a895f94e1f1257fad984e809"
def test_md4_unavail(self, capsys):
"""
Test MD4 hash, if not available.
"""
with mock.patch("hashlib.new", mock.MagicMock(side_effect=ValueError)):
bh.ssl_hash("tempfile.txt", "md4")
assert "MD4 HASH FAILED" in capsys.readouterr()[0]
def test_md5(self):
"""
Test MD5 hash.
"""
assert bh.hashlib_hash("tempfile.txt", hashlib.md5()) == "822e1187fde7c8d55aff8cc688701650"
def test_ripemd160(self):
"""
Test RIPEMD160 hash.
"""
if "ripemd160" not in hashlib.algorithms_available:
pass
else:
assert bh.ssl_hash("tempfile.txt", "ripemd160") == "f3e191024c33768e2589e2efca53d55f4e4945ee"
def test_ripemd160_unavail(self, capsys):
"""
Test RIPEMD160 hash, if not available.
"""
with mock.patch("hashlib.new", mock.MagicMock(side_effect=ValueError)):
bh.ssl_hash("tempfile.txt", "ripemd160")
assert "RIPEMD160 HASH FAILED" in capsys.readouterr()[0]
def test_sha3224(self):
"""
Test SHA3-224 hash.
"""
if "sha3_224" not in hashlib.algorithms_available:
pass
else:
assert bh.hashlib_hash("tempfile.txt", hashlib.sha3_224()) == "93cc89107b9bd807dead1ae95ce8c4b0f9b8acb2a3eef704e2fad109"
def test_sha3256(self):
"""
Test SHA3-256 hash.
"""
if "sha3_256" not in hashlib.algorithms_available:
pass
else:
assert bh.hashlib_hash("tempfile.txt", hashlib.sha3_256()) == "a9797b62d8b3573c9134406f42e601219e086150e6c2f32c90c5cee0149b6877"
def test_sha3384(self):
"""
Test SHA3-384 hash.
"""
if "sha3_384" not in hashlib.algorithms_available:
pass
else:
assert bh.hashlib_hash("tempfile.txt", hashlib.sha3_384()) == "1ae83352968f601e16eff076f5967dd356edce4c4c5629e3939123b7507efbaafd1dabc1e459f8e47f7a05df718e5927"
def test_sha3512(self):
"""
Test SHA3-512 hash.
"""
if "sha3_512" not in hashlib.algorithms_available:
pass
else:
assert bh.hashlib_hash("tempfile.txt", hashlib.sha3_512()) == "2ca12b585486d0f775f9fd438a73525b37b1214bc36a8b0ae611d0f1261e8d32b47b923b406c46cc80cc178598d41d42abee3eae5b1c23164b817342e22580e2"
def test_whirlpool(self):
"""
Test Whirlpool hash.
"""
if "whirlpool" not in hashlib.algorithms_available:
pass
else:
assert bh.ssl_hash("tempfile.txt", "whirlpool") == "9835d12f3cb3ea3934635e4a7cc918e489379ed69d894ebc2c09bbf99fe72567bfd26c919ad666e170752abfc4b8c37b376f5102f9e5de59af2b65efc2e01293"
def test_whirlpool_unavail(self, capsys):
"""
Test Whirlpool hash, if not available.
"""
with mock.patch("hashlib.new", mock.MagicMock(side_effect=ValueError)):
bh.ssl_hash("tempfile.txt", "whirlpool")
assert "WHIRLPOOL HASH FAILED" in capsys.readouterr()[0]
def test_escreens(self):
"""
Test EScreens code generation.
"""
pin = "acdcacdc"
app = "10.3.2.500"
uptime = "69696969"
assert bh.calculate_escreens(pin, app, uptime, duration=2) == "E4A25067"
def test_verifier(self):
"""
Test batch hashing.
"""
confload = {}
confload['adler32'] = True
confload['crc32'] = True
confload['md4'] = True if "md4" in hashlib.algorithms_available else False
confload['md5'] = True
confload['sha0'] = True if "sha" in hashlib.algorithms_available else False
confload['sha1'] = True
confload['sha224'] = True
confload['sha256'] = True
confload['sha384'] = True
confload['sha512'] = True
confload['ripemd160'] = True if "ripemd160" in hashlib.algorithms_available else False
confload['whirlpool'] = True if "whirlpool" in hashlib.algorithms_available else False
confload['sha3224'] = True if "sha3_224" in hashlib.algorithms_available else False
confload['sha3256'] = True if "sha3_256" in hashlib.algorithms_available else False
confload['sha3384'] = True if "sha3_384" in hashlib.algorithms_available else False
confload['sha3512'] = True if "sha3_512" in hashlib.algorithms_available else False
confload['blocksize'] = "16777216"
print(confload)
with mock.patch('bbarchivist.hashutils.verifier_config_loader', mock.MagicMock(return_value=confload)):
bh.verifier(os.getcwd())
stocklines = [
b"ADLER32",
b"02470DCD tempfile.txt",
b"CRC32",
b"ED5D3F26 tempfile.txt"]
if confload["md4"]:
stocklines.extend([
b"MD4",
b"DF26ADA1A895F94E1F1257FAD984E809 tempfile.txt"])
stocklines.extend([
b"MD5",
b"822E1187FDE7C8D55AFF8CC688701650 tempfile.txt"])
if confload["sha0"]:
stocklines.extend([
b"SHA0",
b"D26B25F6170DAF49E31E68BF57F6164815C368D8 tempfile.txt"])
stocklines.extend([
b"SHA1",
b"71DC7CE8F27C11B792BE3F169ECF985865E276D0 tempfile.txt",
b"SHA224",
b"7BCD7B77F63633BF0F7DB181106F08EB630A58C521B109BE1CC4A404 tempfile.txt",
b"SHA256",
b"F118871C45171D5FE4E9049980959E033EEEABCFA12046C243FDA310580E8A0B tempfile.txt",
b"SHA384",
b"76620873C0D27873C137B082425C6E87E3D601C4B19241A1F2222F7F700A2FE8D3C648B26F62325A411CB020BFF527BE tempfile.txt",
b"SHA512",
b"B66A5E8AA9B9705748C2EE585B0E1A3A41288D2DAFC3BE2DB12FA89D2F2A3E14F9DEC11DE4BA865BB51EAA6C2CFEB294139455E34DA7D827A19504B0906C01C1 tempfile.txt"])
if confload["ripemd160"]:
stocklines.extend([
b"RIPEMD160",
b"F3E191024C33768E2589E2EFCA53D55F4E4945EE tempfile.txt"])
if confload["whirlpool"]:
stocklines.extend([
b"WHIRLPOOL",
b"9835D12F3CB3EA3934635E4A7CC918E489379ED69D894EBC2C09BBF99FE72567BFD26C919AD666E170752ABFC4B8C37B376F5102F9E5DE59AF2B65EFC2E01293 tempfile.txt"])
if confload["sha3224"]:
stocklines.extend([
b"SHA3224",
b"93CC89107B9BD807DEAD1AE95CE8C4B0F9B8ACB2A3EEF704E2FAD109 tempfile.txt"])
if confload["sha3256"]:
stocklines.extend([
b"SHA3256",
b"A9797B62D8B3573C9134406F42E601219E086150E6C2F32C90C5CEE0149B6877 tempfile.txt"])
if confload["sha3384"]:
stocklines.extend([
b"SHA3384",
b"1AE83352968F601E16EFF076F5967DD356EDCE4C4C5629E3939123B7507EFBAAFD1DABC1E459F8E47F7A05DF718E5927 tempfile.txt"])
if confload["sha3512"]:
stocklines.extend([
b"SHA3512",
b"2CA12B585486D0F775F9FD438A73525B37B1214BC36A8B0AE611D0F1261E8D32B47B923B406C46CC80CC178598D41D42ABEE3EAE5B1C23164B817342E22580E2 tempfile.txt"])
stocklines2 = []
for item in stocklines:
item2 = item.strip()
item2 = item2.replace(b'\r\n', b'')
item2 = item2.replace(b'\n', b'')
item2 = item2.replace(b'\r', b'')
stocklines2.append(item2)
filename = "tempfile.txt.cksum"
with open(filename, "rb") as checksumfile:
content = checksumfile.read().splitlines()
content2 = []
for item in content:
item2 = item.strip()
item2 = item2.replace(b'\r\n', b'')
item2 = item2.replace(b'\n', b'')
item2 = item2.replace(b'\r', b'')
content2.append(item2)
for idx, value in enumerate(content2):
assert stocklines2[idx] == value
def test_verifier_multiple(self):
"""
Test batch hashing, but with multiple files.
"""
for i in range(17):
copyfile("tempfile.txt", "tempfile{0}.txt".format(i))
self.test_verifier()
def test_verifier_fail(self, capsys):
"""
Test batch hashing failure.
"""
with mock.patch("concurrent.futures.ThreadPoolExecutor.submit", mock.MagicMock(side_effect=Exception)):
with pytest.raises(SystemExit):
bh.verifier(os.getcwd())
assert "SOMETHING WENT WRONG" in capsys.readouterr()[0]
class TestClassHashutilsConfig:
"""
Test reading/writing configs with ConfigParser.
"""
@classmethod
def setup_class(cls):
"""
Create dictionaries for self.
"""
cls.hashdict = {}
cls.hashdict['crc32'] = False
cls.hashdict['adler32'] = False
cls.hashdict['sha1'] = True
cls.hashdict['sha224'] = False
cls.hashdict['sha256'] = True
cls.hashdict['sha384'] = False
cls.hashdict['sha512'] = False
cls.hashdict['sha3224'] = False
cls.hashdict['sha3256'] = False
cls.hashdict['sha3384'] = False
cls.hashdict['sha3512'] = False
cls.hashdict['md5'] = True
cls.hashdict['md4'] = False
cls.hashdict['ripemd160'] = False
cls.hashdict['whirlpool'] = False
cls.hashdict['sha0'] = False
cls.hashdict['blocksize'] = 16777216
def test_hash_loader(self):
"""
Test reading hash settings.
"""
try:
os.remove("bbarchivist.ini")
except (OSError, IOError):
pass
with mock.patch('bbarchivist.iniconfig.config_homepath', mock.MagicMock(return_value=os.getcwd())):
assert bh.verifier_config_loader() == self.hashdict
def test_hash_writer(self):
"""
Test writing hash settings.
"""
hash2 = self.hashdict
hash2['sha512'] = True
try:
os.remove("bbarchivist.ini")
except (OSError, IOError):
pass
with mock.patch('bbarchivist.iniconfig.config_homepath', mock.MagicMock(return_value=os.getcwd())):
with mock.patch('bbarchivist.hashutils.verifier_config_loader', mock.MagicMock(return_value=hash2)):
bh.verifier_config_writer()
assert bh.verifier_config_loader() == hash2
| 2.453125
| 2
|
scripts/random_plots.py
|
JLans/jl_spectra_2_structure
| 0
|
12782385
|
<filename>scripts/random_plots.py
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 17 19:33:16 2019
@author: lansf
"""
from __future__ import division
import os
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from jl_spectra_2_structure import IR_GEN
from jl_spectra_2_structure.plotting_tools import set_figure_settings
set_figure_settings('paper')
Downloads_folder = os.path.join(os.path.expanduser("~"),'Downloads')
CNCOconv = IR_GEN(ADSORBATE='CO',INCLUDED_BINDING_TYPES=['1,2,3,4']\
,TARGET='binding_type',NUM_TARGETS=4)
y_convCNCO = CNCOconv._get_probabilities(300000,4)
y_convGCN = CNCOconv._get_probabilities(300000,10)
G = gridspec.GridSpec(1, 2)
plt.figure(0,figsize=(3.5,2.2),dpi=300)
ax1 = plt.subplot(G[0,0])
plt.hist(y_convCNCO[:,0],bins=10,edgecolor='black')
plt.ylabel('Number of Samples')
plt.xlabel('Binding-type fraction')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax1.text(0.85,0.9,'(a)',transform=ax1.transAxes)
ax2 = plt.subplot(G[0,1])
plt.hist(y_convGCN[:,0],bins=10,edgecolor='black')
#plt.ylabel('Number of Samples')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.xlabel('GCN group fraction')
ax2.text(0.85,0.9,'(b)',transform=ax2.transAxes)
plt.savefig(os.path.join(Downloads_folder,'Sample_Distribution_paper.png'), format='png')
plt.close()
| 1.960938
| 2
|
src/dictionary.py
|
rzjfr/sayit
| 0
|
12782386
|
import os
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
class Dictionary:
"""audio related parts"""
def __init__(self, word, files_path='~/.sayit'):
self.word = word
self.user_agent = UserAgent().random # Random user_agent for http calls
self.files_path = os.path.expanduser(files_path)
if not os.path.isdir(files_path):
os.makedirs(files_path, exist_ok=True)
def show(self):
""" who the definition part of the word """
file_path = self._get_file(self.word)
if not os.path.exists(file_path):
raise Exception("File cannot be found for {}.".format(self.word))
with open(file_path, 'r') as html:
soup = BeautifulSoup(html, 'html.parser')
# Phonetics
phonetics = soup.find('div', class_="phons_br")
if phonetics:
print(phonetics.get_text())
# Origin
origin = soup.find('span', unbox="wordorigin")
if origin:
print(origin.get_text(" "))
# Definitions
senses = soup.find('ol', class_='senses_multiple')
if senses:
self._print_definitions(senses)
# Idioms
idioms = soup.find_all('span', class_='idm-g')
if idioms:
self._print_idioms(idioms)
def _get_file(self, word):
""" download if not already downloaded """
word_audio = word + "__gb" # to save the file in audio sharded format
word_path = "{}/oxford/uk/{}/{}/{}".format(self.files_path,
word_audio[0],
word_audio[0: 3],
word_audio[0: 5])
file_path = "{}/{}.html".format(word_path, word)
if not os.path.exists(file_path):
os.makedirs(word_path, exist_ok=True)
self._save_word(word, word_path)
return file_path
def _save_word(self, word, path):
""" download and save the binary file to the given path """
uri = self._create_uri(word)
headers = {'User-Agent': self.user_agent}
request = requests.get(uri, headers=headers)
if request.status_code != 200:
raise Exception("{} cannot be found on the server.".format(word))
with open('{}/{}.html'.format(path, word), 'wb') as f:
for chunk in request:
f.write(chunk)
return uri
def _create_uri(self, word):
"""create oxford learner dictionary mp3 uri"""
base = "https://www.oxfordlearnersdictionaries.com/definition/english/"
return base + word
def _print_definitions(self, html):
"""prints definitions"""
for i, sense in enumerate(html):
if str(sense) != " ":
meaning = sense.find('span', class_='def')
if meaning:
meaning = meaning.text
title = sense.find('span', class_='cf')
label = sense.find('span', class_='dtxt')
labels = sense.find('span', class_='labels')
if label:
meaning = "({}) {}".format(label.text, meaning)
if labels:
meaning = "{} {}".format(labels.text, meaning)
if title:
meaning = "{}: {}".format(title.text, meaning)
print("{}. {}".format(i+1, meaning))
examples = [item.text for item in sense.find_all('li', class_="") if item]
for example in examples:
if example:
print(" • {}".format(example))
def _print_idioms(self, html):
"""prints idioms"""
print("\nIdioms:")
for idiom in html:
if idiom:
print(" ⦾ {}".format(idiom.find('div').get_text(" ", strip=True)))
label = idiom.find('span', class_='labels')
description = idiom.find('span', class_='def').get_text(" ", strip=True)
if label:
description = "{} {}".format(label.text, description)
print(" {}".format(description))
for example in idiom.find_all('span', class_='x'):
print(" . {}".format(example.text))
| 2.90625
| 3
|
hackerrank/The Minion Game.py
|
safakhan413/leetcode
| 0
|
12782387
|
<reponame>safakhan413/leetcode<filename>hackerrank/The Minion Game.py
from itertools import permutations
from collections import Counter
def minion_game(string):
# finding all substrings of s
# vowels= ['A', 'E', 'I', 'O', 'U']
# substrings = []
# for i in range(len(string)):
# for j in range(i+1, len(string)+ 1):
# substrings.append(string[i:j])
# # make a dictionary of alls trings
# subs_count = Counter(substrings)
# kev,stu = 0,0
#
# for k,v in subs_count.items():
# if k[0] in vowels:
# kev += v
# else:
# stu += v
# if kev > stu:
# print('kevin', kev)
# elif stu > kev:
# print('Stuart', stu)
# else:
# print('Draw')
####### More efficient solution ###################
player1 = 0;
player2 = 0;
str_len = len(string)
for i in range(str_len):
if string[i] in "AEIOU":
player1 += (str_len) - i
else:
player2 += (str_len) - i
if player1 > player2:
print("Kevin", player1)
elif player1 < player2:
print("Stuart", player2)
elif player1 == player2:
print("Draw")
else:
print("Draw")
minion_game('BANANA')
''' Algorithm
Output: name and score for the winner
Score calculation criteria: find all substrings of string S
S has to make words beginning with vowel
K has to make words beginning with consonants
1. put them all in a dictionary and add 1 everytime you go through string again and find tahts ubstring
2. keep 2 varibles stuart and kevin. If cosnonants add to stuart, if vowel add to kevin
3. compare the two values and print teh winner with score
'''
| 3.8125
| 4
|
dask/context.py
|
epervago/dask
| 2
|
12782388
|
"""
Control global computation context
"""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
_globals = defaultdict(lambda: None)
_globals['callbacks'] = set()
class set_options(object):
""" Set global state within controled context
This lets you specify various global settings in a tightly controlled
``with`` block.
Valid keyword arguments currently include the following::
get - the scheduler to use
pool - a thread or process pool
cache - Cache to use for intermediate results
func_loads/func_dumps - loads/dumps functions for serialization of data
likely to contain functions. Defaults to
cloudpickle.loads/cloudpickle.dumps
optimizations - List of additional optimizations to run
Examples
--------
>>> with set_options(get=dask.get): # doctest: +SKIP
... x = np.array(x) # uses dask.get internally
"""
def __init__(self, **kwargs):
self.old = _globals.copy()
_globals.update(kwargs)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
_globals.clear()
_globals.update(self.old)
| 2.4375
| 2
|
open3dlab/open3dlab-scraper.py
|
piyushd26/scrapers
| 1
|
12782389
|
<gh_stars>1-10
from bs4 import BeautifulSoup as soup
import requests
import os
import re
import argparse
from multiprocessing.pool import ThreadPool
from tqdm.auto import tqdm
from database import Project, User, Download, Database
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
BASE_URL = "https://smutba.se"
def scrape(project_id):
db = Database()
if db.get_project(project_id):
# print("Project {} exists in the datatbase".format(project_id))
return True
url = "https://smutba.se/project/{}/".format(project_id)
page_html = requests.get(url)
if page_html.status_code == 404:
print("Project {} does not exist on the site".format(project_id))
return False
page_soup = soup(page_html.content, "html5lib")
project = Project(project_id)
project.title = page_soup.find("h1", {"id": "file_title"}).text
images_e = page_soup.find_all("img", {"class": "project-detail-image-main"})
for e in images_e:
project.images.append(BASE_URL + e["src"])
project.description = page_soup.find("div", {"class": "panel__body"}).decode_contents()
user_id = page_soup.find("h4", {"class": "panel__avatar-title"}).find("a").get("href", "").split("/")[-2]
user = User(user_id)
user.name = page_soup.find("span", {"class": "username"}).text
user.add_user()
project.user = user
info = page_soup.find("div", {"class": "panel__footer"}).find_all("dd")
project.posted = info[0].text
project.views = info[1].text
project.category = info[2].text
project.licence = info[3].text
trs = page_soup.find("tbody").find_all("tr")
for i in range(0, len(trs), 2):
tr = trs[i].find_all("td")
if len(tr) < 4:
break
filename = tr[0].strong.text
downloads = tr[1].text
created = tr[2].text
filesize = tr[3].text
links = trs[i+1].find_all("a")
download = Download(filename)
download.downloads = downloads
download.created = created
download.filesize = filesize
download.project_id = project.id
for link in links:
download.urls.append(BASE_URL + link.get("href", ""))
download.add_download()
success = project.add_project()
if not success:
print("Project {} was not successfully added to the database".format(project_id))
print("Project {} added to the database".format(project_id))
def download_file(data, retries=0):
local_filename = data[0]
url = data[1]
if os.path.isfile(local_filename):
return url
try:
response = requests.get(url, stream=True)
with tqdm.wrapattr(open(local_filename, "wb"), "write", miniters=1,
total=int(response.headers.get('content-length', 0)),
desc=local_filename[-20:]) as fout:
for chunk in response.iter_content(chunk_size=4096):
fout.write(chunk)
except requests.exceptions.HTTPError:
if retries > 5:
return
print("retrying", url)
retries += 1
download_file((local_filename, url), retries)
return url
def get_file_url(url):
page_html = requests.get(url)
if page_html.status_code == 404:
print("File {} does not exist on the site".format(url))
return False
page_soup = soup(page_html.content, "html5lib")
file_url = page_soup.find("div", {"class": "project-description-div"}).find("a").get("href", "")
return BASE_URL + file_url
def format_name(title):
title = re.sub('[?/|\\\}{:<>*"]', '', title)
if len(title) > 190:
title = title[:120]
return title
def download_all():
db = Database()
download_list = db.get_not_downloaded_download()
download_list_urls = []
for download in download_list:
urls = db.get_url(download[0])
url = get_file_url(urls[0][1])
if not url:
continue
file_dir = os.path.join(BASE_DIR, "project", str(download[5]).zfill(5))
file_location = os.path.join(file_dir, format_name(download[1]))
if not os.path.exists(file_dir):
os.makedirs(file_dir)
download_list_urls.append((file_location, url))
db.set_download_download(download[0])
with ThreadPool(processes=4) as tp:
for imap_result in tp.imap_unordered(download_file, download_list_urls):
pass
project_list = db.get_not_downloaded_project()
for project in project_list:
images = db.get_image_urls(project[0])
for image in images:
file_dir = os.path.join(BASE_DIR, "project", str(project[0]).zfill(5), "images")
file_location = os.path.join(file_dir, os.path.basename(image[1]))
if not os.path.exists(file_dir):
os.makedirs(file_dir)
download_file((file_location, image[1]))
db.set_download_project(project[0])
def main():
parser = argparse.ArgumentParser(description="Archive assets from Smutbase and Open3DLab")
parser.add_argument("--start", type=int, help="Starting project ID", default=1)
parser.add_argument("--end", type=int, help="Ending project ID", default=32500)
parser.add_argument("-o", "--output", type=str, help="Set download directory")
args = parser.parse_args()
global BASE_DIR
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
if args.output:
BASE_DIR = os.path.abspath(args.output)
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
for project_id in range(args.start, args.end+1):
scrape(project_id)
download_all()
main()
| 2.625
| 3
|
tests/test_sedas_api.py
|
oskarfkrauss/sedas_pyapi
| 3
|
12782390
|
"""
Copyright 2019 Satellite Applications Catapult
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from urllib.error import HTTPError
from sedas_pyapi.sedas_api import SeDASAPI
class TestSeDASAPI(unittest.TestCase):
def test_login_bad_creds(self):
sedas = SeDASAPI("bogus", "<PASSWORD> real password")
self.assertRaises(
HTTPError,
sedas.login
)
def test_blank_username(self):
sedas = SeDASAPI("", "is not a real password")
self.assertRaises(
ValueError,
sedas.login
)
def test_blank_password(self):
sedas = SeDASAPI("is not a real username", "")
self.assertRaises(
ValueError,
sedas.login
)
| 2.265625
| 2
|
Exercicios/ex081.py
|
mauroalbuquerque/Python-CursoEmVideo
| 0
|
12782391
|
<reponame>mauroalbuquerque/Python-CursoEmVideo<filename>Exercicios/ex081.py<gh_stars>0
num = list()
while True:
while True:
valor = input('Digite um valor: ')
if valor.isnumeric():
valor = int(valor)
break
else:
print('Valor Incorreto. Por favor, tente novamente!')
num.append(valor)
while True:
resposta = input('Deseja continuar [S/N]: ').strip().lower()[0]
if resposta != 's' and resposta != 'n':
print('Dados Incorretos. Por favor, tente novamente!')
else:
break
if resposta == 'n':
break
print(num)
print(f'Foram inseridos {len(num)}')
num.sort(reverse=True)
print(f'A lista em ordem decrescente -> {num}')
if 5 in num:
print('O valor 5 se encontra na lista')
else:
print('O valor 5 não se encontra na lista')
| 3.609375
| 4
|
balancesheet/mongoData/equities.py
|
tylertjburns/ledgerkeeper
| 0
|
12782392
|
<filename>balancesheet/mongoData/equities.py
import mongoengine
from balancesheet.mongoData.valueSnapshot import ValueSnapshot
class Equity(mongoengine.Document):
# Top Level Elements
name = mongoengine.StringField(required=True)
description = mongoengine.StringField(required=True)
account_id = mongoengine.StringField(required=True)
equityClass = mongoengine.StringField(required=True)
equityType = mongoengine.StringField(required=True)
interestRate = mongoengine.FloatField(default=0.0)
equityTimeHorizon = mongoengine.StringField(required=True)
equityStatus = mongoengine.StringField(required=True)
equityContingency = mongoengine.StringField(required=True)
# Embedded Documents
snapshots = mongoengine.EmbeddedDocumentListField(ValueSnapshot)
# Meta
meta = {
'db_alias': 'core',
'collection': 'equities'
}
| 2.109375
| 2
|
rest_framework_idempotency_key/utils.py
|
hardcoretech/djangorestframework-idempotency-key
| 5
|
12782393
|
def raise_if(expression, error):
if expression:
raise error
| 1.664063
| 2
|
teamcat_service/doraemon/business/project/project_service.py
|
zhangyin2088/Teamcat
| 6
|
12782394
|
#coding=utf-8
'''
Created on 2015-10-23
@author: Devuser
'''
from doraemon.project.models import Project,ProjectMember,Product,ProjectModule,Version
from gatesidelib.common.simplelogger import SimpleLogger
from django.contrib.admin.models import DELETION,CHANGE,ADDITION
from business.project.version_service import VersionService
from business.auth_user.user_service import UserService
import random
class ProjectService(object):
'''
classdocs
'''
@staticmethod
def get_latest_projects_include_me(request):
result = list()
latest_projects = ProjectService.get_latest_projects(request)
my_projects = ProjectService.get_projects_include_me(request)
my_project_ids = [project.id for project in my_projects]
for project in latest_projects:
if project and project.id in my_project_ids:
result.append(project)
return result[0:6]
@staticmethod
def get_latest_projects(request):
result=list()
latest_project_ids=VersionService.get_latests_project_ids(request)
for project_id in latest_project_ids:
temp_project=Project.objects.get(project_id)
result.append(temp_project)
return result
@staticmethod
def get_projects_include_me(request,product_id='all'):
if product_id==None:
product_id="0"
if UserService.is_admin(request.user.id):
return Project.objects.all()
member_list= ProjectMember.objects.all().filter(PMMember=request.user.id)
project_ids=[member.PMProjectID for member in member_list]
if product_id.upper()=="ALL":
result=Project.objects.all().filter(id__in=project_ids)
else:
result=Project.objects.all().filter(id__in=project_ids).filter(Product=int(product_id))
return result
@staticmethod
def get_products_include_me(request):
my_projects=ProjectService.get_projects_include_me(request)
prodcut_ids=list()
for project in my_projects:
if project.Product not in prodcut_ids:
prodcut_ids.append(project.Product)
return Product.objects.all().filter(id__in=prodcut_ids)
@staticmethod
def get_project_modules(project_id):
return ProjectModule.objects.project_modules(int(project_id))
@staticmethod
def create_project(request):
try:
project=Project()
project=ProjectService.init_project(request.data, project)
project.PBCreator=request.user.id
project.save()
if str(request.user.id)!=str(project.PBLead):
ProjectService.add_member(request.user.id,project.id,5)
ProjectService.add_member(project.PBLead,project.id,4)
else:
ProjectService.add_member(request.user.id,project.id,4)
ProjectService.create_version(project,request.user)
ProjectService.log_create_activity(request.user, project)
except Exception as ex:
SimpleLogger.error(ex)
@staticmethod
def create_version(project,user):
version=Version()
version.VProjectID=project.id
version.VVersion='1.0.0'
version.CFTCommitor=user.id
version.save()
VersionService.log_create_activity(user, version)
@staticmethod
def edit_project(request,projectid):
temp_project=Project.objects.get(projectid)
project=ProjectService.init_project(request.POST, temp_project)
project.save()
ProjectService.log_change_activity(request.user, project)
@staticmethod
def delete_project(request,projectid):
print(projectid)
project=Project.objects.get(projectid)
project.IsActive=0
project.save()
ProjectService.log_delete_activity(request.user, project)
@staticmethod
def init_project(validate_data,project):
tmp_project=project
tmp_project.PBTitle=validate_data.get('PBTitle')
tmp_project.PBDescription=validate_data.get('PBDescription')
tmp_project.PBKey=validate_data.get('PBKey')
tmp_project.PBPlatform=validate_data.get('PBPlatform')
tmp_project.PBVisiableLevel=validate_data.get('PBVisiableLevel')
tmp_project.PBLead=validate_data.get('PBLead')
tmp_project.Product=0
tmp_project.PBHttpUrl=validate_data.get('PBHttpUrl')
tmp_project.PBAvatar="/static/global/images/project-icon/scenery-"+str(random.randint(1, 24))+".png"
return tmp_project
@staticmethod
def add_member(user,projectid,Role):
project_member=ProjectMember()
project_member.PMProjectID=projectid
project_member.PMMember=user
project_member.PMRoleID=Role
project_member.PMRoleType=1
project_member.save()
@staticmethod
def log_create_activity(user,project):
Project.objects.log_action(user.id,project.id,project.PBTitle,ADDITION,"创建了项目",project.id)
@staticmethod
def log_delete_activity(user,project):
Project.objects.log_action(user.id,project.id,project.PBTitle,DELETION,"删除了项目",project.id)
@staticmethod
def log_change_activity(user,project):
Project.objects.log_action(user.id,project.id,project.PBTitle,CHANGE,"修改了项目",project.id)
| 2.09375
| 2
|
render.py
|
tranquada/ga-capstone
| 4
|
12782395
|
from jinja2 import Environment, PackageLoader, select_autoescape
from IPython.display import HTML
# MODULE INTRODUCTION
"""This module contains dislay functions to render the different data layers
using Jinja2 templates and IPython rendering methods for Jupyter Notebook."""
# GLOBAL VARIABLE DECLARATIONS
ENV = Environment(
loader=PackageLoader('ga-capstone', 'templates'),
autoescape=select_autoescape(['html', 'xml'])
)
def div(format, data): # HTML formatting utility function
"""Wraps 'data' inside a div of class 'format' for HTML printing."""
d = '<div class="{}">{}</div>' # Basic div template
return d.format(format, data)
def char(i, char, map_char, state):
"""Formats character information for html display. state can be
[blank/bold/plain/tag]."""
top = div(map_char, char) # Create div for top part with characetr
bottom = div(state, i) # Create div for botm part with index
filling = top + bottom # Stick th two together to fill the charbox
return div('charbox', filling)
def line(raw, char_map, states, pad=True):
"""Formats line information for html display. 'raw' should come from
data.raw, and 'char_map' should come from styler(data.map). pad=True will
add black squares to fill the row up to 80 characters."""
filling = ''
for i, (c, m, s) in zip(raw, char_map, states):
filling += char(i, c, m, s)
if pad: # Check line length and initialize pad_length if creating a pad
pad_length = 80 - len(raw)
for x in range(pad_length):
filling += char(len(raw) + x, '', 'e', 'blank')
return div('stringbox', filling)
def show(content, style):
"""Takes a string representing HTML content and packages it for display in
IPython/Jupyter Notebook"""
window = div('window', content)
return div('viewer', window)
| 3.3125
| 3
|
mailslurp_client/models/webhook_projection.py
|
mailslurp/mailslurp-client-python
| 6
|
12782396
|
<reponame>mailslurp/mailslurp-client-python<gh_stars>1-10
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://www.mailslurp.com/docs/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mailslurp_client.configuration import Configuration
class WebhookProjection(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'created_at': 'datetime',
'event_name': 'str',
'id': 'str',
'inbox_id': 'str',
'name': 'str',
'updated_at': 'datetime',
'url': 'str'
}
attribute_map = {
'created_at': 'createdAt',
'event_name': 'eventName',
'id': 'id',
'inbox_id': 'inboxId',
'name': 'name',
'updated_at': 'updatedAt',
'url': 'url'
}
def __init__(self, created_at=None, event_name=None, id=None, inbox_id=None, name=None, updated_at=None, url=None, local_vars_configuration=None): # noqa: E501
"""WebhookProjection - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._created_at = None
self._event_name = None
self._id = None
self._inbox_id = None
self._name = None
self._updated_at = None
self._url = None
self.discriminator = None
self.created_at = created_at
if event_name is not None:
self.event_name = event_name
self.id = id
self.inbox_id = inbox_id
if name is not None:
self.name = name
self.updated_at = updated_at
self.url = url
@property
def created_at(self):
"""Gets the created_at of this WebhookProjection. # noqa: E501
:return: The created_at of this WebhookProjection. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this WebhookProjection.
:param created_at: The created_at of this WebhookProjection. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created_at is None: # noqa: E501
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def event_name(self):
"""Gets the event_name of this WebhookProjection. # noqa: E501
:return: The event_name of this WebhookProjection. # noqa: E501
:rtype: str
"""
return self._event_name
@event_name.setter
def event_name(self, event_name):
"""Sets the event_name of this WebhookProjection.
:param event_name: The event_name of this WebhookProjection. # noqa: E501
:type: str
"""
allowed_values = ["EMAIL_RECEIVED", "NEW_EMAIL", "NEW_CONTACT", "NEW_ATTACHMENT", "EMAIL_OPENED", "EMAIL_READ"] # noqa: E501
if self.local_vars_configuration.client_side_validation and event_name not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `event_name` ({0}), must be one of {1}" # noqa: E501
.format(event_name, allowed_values)
)
self._event_name = event_name
@property
def id(self):
"""Gets the id of this WebhookProjection. # noqa: E501
:return: The id of this WebhookProjection. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this WebhookProjection.
:param id: The id of this WebhookProjection. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def inbox_id(self):
"""Gets the inbox_id of this WebhookProjection. # noqa: E501
:return: The inbox_id of this WebhookProjection. # noqa: E501
:rtype: str
"""
return self._inbox_id
@inbox_id.setter
def inbox_id(self, inbox_id):
"""Sets the inbox_id of this WebhookProjection.
:param inbox_id: The inbox_id of this WebhookProjection. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and inbox_id is None: # noqa: E501
raise ValueError("Invalid value for `inbox_id`, must not be `None`") # noqa: E501
self._inbox_id = inbox_id
@property
def name(self):
"""Gets the name of this WebhookProjection. # noqa: E501
:return: The name of this WebhookProjection. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this WebhookProjection.
:param name: The name of this WebhookProjection. # noqa: E501
:type: str
"""
self._name = name
@property
def updated_at(self):
"""Gets the updated_at of this WebhookProjection. # noqa: E501
:return: The updated_at of this WebhookProjection. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this WebhookProjection.
:param updated_at: The updated_at of this WebhookProjection. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and updated_at is None: # noqa: E501
raise ValueError("Invalid value for `updated_at`, must not be `None`") # noqa: E501
self._updated_at = updated_at
@property
def url(self):
"""Gets the url of this WebhookProjection. # noqa: E501
:return: The url of this WebhookProjection. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this WebhookProjection.
:param url: The url of this WebhookProjection. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and url is None: # noqa: E501
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WebhookProjection):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, WebhookProjection):
return True
return self.to_dict() != other.to_dict()
| 2.28125
| 2
|
modules/server/physim.py
|
bullseyestudio/guns-game
| 0
|
12782397
|
<gh_stars>0
""" Physics simulation module for Guns!, the tank game.
Even more docstringy stuff goes here.
Todo: I have no idea what physics sim functions we need yet, but we should have
the module.
Todo: This will probably end up breaking into bits.
"""
pass
| 1.46875
| 1
|
winners/nontargeted-attack/teaflow/cleverhans/attacks.py
|
geekpwn/caad2018
| 50
|
12782398
|
from abc import ABCMeta
import numpy as np
from six.moves import xrange
import warnings
import collections
import cleverhans.utils as utils
from cleverhans.model import Model, CallableModelWrapper
class Attack(object):
"""
Abstract base class for all attack classes.
"""
__metaclass__ = ABCMeta
def __init__(self, model, back='tf', sess=None):
"""
:param model: An instance of the Model class.
:param back: The backend to use. Either 'tf' (default) or 'th'.
:param sess: The tf session to run graphs in (use None for Theano)
"""
if not(back == 'tf' or back == 'th'):
raise ValueError("Backend argument must either be 'tf' or 'th'.")
if back == 'th' and sess is not None:
raise Exception("A session should not be provided when using th.")
if not isinstance(model, Model):
if hasattr(model, '__call__'):
pass
else:
raise ValueError("The model argument should be an instance of"
" the Model class.")
if back == 'th':
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
# Prepare attributes
self.model = model
self.back = back
self.sess = sess
# We are going to keep track of old graphs and cache them.
self.graphs = {}
# When calling generate_np, arguments in the following set should be
# fed into the graph, as they are not structural items that require
# generating a new graph.
# This dict should map names of arguments to the types they should
# have.
# (Usually, the target class will be a feedable keyword argument.)
self.feedable_kwargs = {}
# When calling generate_np, arguments in the following set should NOT
# be fed into the graph, as they ARE structural items that require
# generating a new graph.
# This list should contain the names of the structural arguments.
self.structural_kwargs = []
def generate(self, x, **kwargs):
"""
Generate the attack's symbolic graph for adversarial examples. This
method should be overriden in any child class that implements an
attack that is expressable symbolically. Otherwise, it will wrap the
numerical implementation as a symbolic operator.
:param x: The model's symbolic inputs.
:param **kwargs: optional parameters used by child classes.
:return: A symbolic representation of the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
error = "Sub-classes must implement generate."
raise NotImplementedError(error)
def construct_graph(self, fixed, feedable, x_val, hash_key):
# try our very best to create a TF placeholder for each of the
# feedable keyword arguments, and check the types are one of
# the allowed types
import tensorflow as tf
new_kwargs = dict(x for x in fixed.items())
for name, value in feedable.items():
given_type = self.feedable_kwargs[name]
if isinstance(value, np.ndarray):
new_shape = [None] + list(value.shape[1:])
new_kwargs[name] = tf.placeholder(given_type, new_shape)
elif isinstance(value, utils.known_number_types):
new_kwargs[name] = tf.placeholder(given_type, shape=[])
else:
raise ValueError("Could not identify type of argument " +
name + ": " + str(value))
# x is a special placeholder we always want to have
x_shape = [None] + list(x_val.shape)[1:]
x = tf.placeholder(tf.float32, shape=x_shape)
# now we generate the graph that we want
x_adv = self.generate(x, **new_kwargs)
self.graphs[hash_key] = (x, new_kwargs, x_adv)
if len(self.graphs) >= 10:
warnings.warn("Calling generate_np() with multiple different "
"structural paramaters is inefficient and should"
" be avoided. Calling generate() is preferred.")
def generate_np(self, x_val, **kwargs):
"""
Generate adversarial examples and return them as a Numpy array.
Sub-classes *should not* implement this method unless they must
perform special handling of arguments.
:param x_val: A Numpy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A Numpy array holding the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
import tensorflow as tf
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict((k, v) for k, v in kwargs.items()
if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = dict((k, v) for k, v in kwargs.items()
if k in self.feedable_kwargs)
if len(fixed) + len(feedable) < len(kwargs):
warnings.warn("Supplied extra keyword arguments that are not "
"used in the graph computation. They have been "
"ignored.")
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items()))
if hash_key not in self.graphs:
self.construct_graph(fixed, feedable, x_val, hash_key)
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {x: x_val}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
return self.sess.run(x_adv, feed_dict)
def parse_params(self, params=None):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
:param params: a dictionary of attack-specific parameters
:return: True when parsing was successful
"""
return True
class MultipleModelAttack(object):
"""
Abstract base class for all attack classes.
"""
__metaclass__ = ABCMeta
def __init__(self, models, back='tf', sess=None):
"""
:param models: An instance of the Model class.
:param back: The backend to use. Either 'tf' (default) or 'th'.
:param sess: The tf session to run graphs in (use None for Theano)
"""
if not(back == 'tf' or back == 'th'):
raise ValueError("Backend argument must either be 'tf' or 'th'.")
if back == 'th' and sess is not None:
raise Exception("A session should not be provided when using th.")
for model in models:
if not isinstance(model, Model):
if hasattr(model, '__call__'):
warnings.warn("CleverHans support for supplying a callable"
" instead of an instance of the Model class is"
" deprecated and will be dropped on 2018-01-11.")
else:
raise ValueError("The model argument should be an instance of"
" the Model class.")
if back == 'th':
warnings.warn("CleverHans support for Theano is deprecated and "
"will be dropped on 2017-11-08.")
# Prepare attributes
self.model1 = models[0]
self.model2 = models[1]
self.model3 = models[2]
self.back = back
self.sess = sess
# We are going to keep track of old graphs and cache them.
self.graphs = {}
# When calling generate_np, arguments in the following set should be
# fed into the graph, as they are not structural items that require
# generating a new graph.
# This dict should map names of arguments to the types they should
# have.
# (Usually, the target class will be a feedable keyword argument.)
self.feedable_kwargs = {}
# When calling generate_np, arguments in the following set should NOT
# be fed into the graph, as they ARE structural items that require
# generating a new graph.
# This list should contain the names of the structural arguments.
self.structural_kwargs = []
def generate(self, x, **kwargs):
"""
Generate the attack's symbolic graph for adversarial examples. This
method should be overriden in any child class that implements an
attack that is expressable symbolically. Otherwise, it will wrap the
numerical implementation as a symbolic operator.
:param x: The model's symbolic inputs.
:param **kwargs: optional parameters used by child classes.
:return: A symbolic representation of the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
error = "Sub-classes must implement generate."
raise NotImplementedError(error)
def construct_graph(self, fixed, feedable, x_val, hash_key):
# try our very best to create a TF placeholder for each of the
# feedable keyword arguments, and check the types are one of
# the allowed types
import tensorflow as tf
new_kwargs = dict(x for x in fixed.items())
for name, value in feedable.items():
given_type = self.feedable_kwargs[name]
if isinstance(value, np.ndarray):
new_shape = [None] + list(value.shape[1:])
new_kwargs[name] = tf.placeholder(given_type, new_shape)
elif isinstance(value, utils.known_number_types):
new_kwargs[name] = tf.placeholder(given_type, shape=[])
else:
raise ValueError("Could not identify type of argument " +
name + ": " + str(value))
# x is a special placeholder we always want to have
x_shape = [None] + list(x_val.shape)[1:]
x = tf.placeholder(tf.float32, shape=x_shape)
# now we generate the graph that we want
x_adv = self.generate(x, **new_kwargs)
self.graphs[hash_key] = (x, new_kwargs, x_adv)
if len(self.graphs) >= 10:
warnings.warn("Calling generate_np() with multiple different "
"structural paramaters is inefficient and should"
" be avoided. Calling generate() is preferred.")
def generate_np(self, x_val, **kwargs):
"""
Generate adversarial examples and return them as a Numpy array.
Sub-classes *should not* implement this method unless they must
perform special handling of arguments.
:param x_val: A Numpy array with the original inputs.
:param **kwargs: optional parameters used by child classes.
:return: A Numpy array holding the adversarial examples.
"""
if self.back == 'th':
raise NotImplementedError('Theano version not implemented.')
import tensorflow as tf
if self.sess is None:
raise ValueError("Cannot use `generate_np` when no `sess` was"
" provided")
# the set of arguments that are structural properties of the attack
# if these arguments are different, we must construct a new graph
fixed = dict((k, v) for k, v in kwargs.items()
if k in self.structural_kwargs)
# the set of arguments that are passed as placeholders to the graph
# on each call, and can change without constructing a new graph
feedable = dict((k, v) for k, v in kwargs.items()
if k in self.feedable_kwargs)
if len(fixed) + len(feedable) < len(kwargs):
warnings.warn("Supplied extra keyword arguments that are not "
"used in the graph computation. They have been "
"ignored.")
if not all(isinstance(value, collections.Hashable)
for value in fixed.values()):
# we have received a fixed value that isn't hashable
# this means we can't cache this graph for later use,
# and it will have to be discarded later
hash_key = None
else:
# create a unique key for this set of fixed paramaters
hash_key = tuple(sorted(fixed.items()))
if hash_key not in self.graphs:
self.construct_graph(fixed, feedable, x_val, hash_key)
x, new_kwargs, x_adv = self.graphs[hash_key]
feed_dict = {x: x_val}
for name in feedable:
feed_dict[new_kwargs[name]] = feedable[name]
return self.sess.run(x_adv, feed_dict)
def parse_params(self, params=None):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
:param params: a dictionary of attack-specific parameters
:return: True when parsing was successful
"""
return True
class FastGradientMethod(Attack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm (and is known as the "Fast Gradient Sign Method"). This
implementation extends the attack to other norms, and is therefore called
the Fast Gradient Method.
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model, back='tf', sess=None):
"""
Create a FastGradientMethod instance.
"""
super(FastGradientMethod, self).__init__(model, back, sess)
self.feedable_kwargs = {'eps': np.float32,
'y': np.float32,
'clip_min': np.float32,
'clip_max': np.float32}
self.structural_kwargs = ['ord']
if not isinstance(self.model, Model):
self.model = CallableModelWrapper(self.model, 'probs')
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tensor with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
if self.back == 'tf':
from .attacks_tf import fgm
else:
from .attacks_th import fgm
return fgm(x, self.model.get_probs(x), y=self.y, eps=self.eps,
ord=self.ord, clip_min=self.clip_min,
clip_max=self.clip_max)
def parse_params(self, eps=0.3, ord=np.inf, y=None, clip_min=None,
clip_max=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (optional float) attack step size (input variation)
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param y: (optional) A tensor with the model labels. Only provide
this parameter if you'd like to use true labels when crafting
adversarial samples. Otherwise, model predictions are used as
labels to avoid the "label leaking" effect (explained in this
paper: https://arxiv.org/abs/1611.01236). Default is None.
Labels should be one-hot-encoded.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.ord = ord
self.y = y
self.clip_min = clip_min
self.clip_max = clip_max
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, int(1), int(2)]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if self.back == 'th' and self.ord != np.inf:
raise NotImplementedError("The only FastGradientMethod norm "
"implemented for Theano is np.inf.")
return True
class MultiModelIterativeMethod(MultipleModelAttack):
"""
The Basic Iterative Method (Kurakin et al. 2016). The original paper used
hard labels for this attack; no label smoothing.
"""
def __init__(self, models, back='tf', sess=None):
"""
Create a BasicIterativeMethod instance.
"""
super(MultiModelIterativeMethod, self).__init__(models, back, sess)
self.feedable_kwargs = {'eps': np.float32,
'eps_iter': np.float32,
'y': np.float32,
'clip_min': np.float32,
'clip_max': np.float32}
self.structural_kwargs = ['ord', 'nb_iter']
if not isinstance(self.model1, Model):
self.model1 = CallableModelWrapper(self.model1, 'probs')
if not isinstance(self.model2, Model):
self.model2 = CallableModelWrapper(self.model2, 'probs')
if not isinstance(self.model3, Model):
self.model3 = CallableModelWrapper(self.model3, 'probs')
def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param eps: (required float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (required float) step size for each attack iteration
:param nb_iter: (required int) Number of attack iterations.
:param y: (required) A tensor with the model labels.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
import tensorflow as tf
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
# Initialize loop variables
eta = 0
# Fix labels to the first model predictions for loss computation
# model_preds1 = self.model1.get_probs(x)
# model_preds2 = self.model2.get_probs(x)
model_preds3 = self.model3.get_probs(x)
model_preds = model_preds3
preds_max = tf.reduce_max(model_preds, 1, keep_dims=True)
y = tf.to_float(tf.equal(model_preds, preds_max))
fgsm_params = {'eps': self.eps_iter, 'y': y, 'ord': self.ord}
for i in range(self.nb_iter):
FGSM1 = FastGradientMethod(self.model1, back=self.back, sess=self.sess)
FGSM2 = FastGradientMethod(self.model2, back=self.back, sess=self.sess)
FGSM3 = FastGradientMethod(self.model3, back=self.back, sess=self.sess)
# Compute this step's perturbation
eta1 = FGSM1.generate(x + eta, **fgsm_params) - x
eta2 = FGSM2.generate(x + eta, **fgsm_params) - x
eta3 = FGSM3.generate(x + eta, **fgsm_params) - x
eta = eta1 * 0.333 + eta2 * 0.333 + eta3 * 0.333
# Clipping perturbation eta to self.ord norm ball
if self.ord == np.inf:
eta = tf.clip_by_value(eta, -self.eps, self.eps)
elif self.ord in [1, 2]:
reduc_ind = list(xrange(1, len(eta.get_shape())))
if self.ord == 1:
norm = tf.reduce_sum(tf.abs(eta),
reduction_indices=reduc_ind,
keep_dims=True)
elif self.ord == 2:
norm = tf.sqrt(tf.reduce_sum(tf.square(eta),
reduction_indices=reduc_ind,
keep_dims=True))
eta = eta * self.eps / norm
# Define adversarial example (and clip if necessary)
adv_x = x + eta
if self.clip_min is not None and self.clip_max is not None:
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x
def parse_params(self, eps=0.3, eps_iter=0.05, nb_iter=10, y=None,
ord=np.inf, clip_min=None, clip_max=None, **kwargs):
"""
Take in a dictionary of parameters and applies attack-specific checks
before saving them as attributes.
Attack-specific parameters:
:param eps: (required float) maximum distortion of adversarial example
compared to original input
:param eps_iter: (required float) step size for each attack iteration
:param nb_iter: (required int) Number of attack iterations.
:param y: (required) A tensor with the model labels.
:param ord: (optional) Order of the norm (mimics Numpy).
Possible values: np.inf, 1 or 2.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# Save attack-specific parameters
self.eps = eps
self.eps_iter = eps_iter
self.nb_iter = nb_iter
self.y = y
self.ord = ord
self.clip_min = clip_min
self.clip_max = clip_max
# Check if order of the norm is acceptable given current implementation
if self.ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
if self.back == 'th':
error_string = "BasicIterativeMethod is not implemented in Theano"
raise NotImplementedError(error_string)
return True
| 2.734375
| 3
|
medicare_appeals/tests/appeals_tests.py
|
18F/medicare-appeals-prototyping
| 1
|
12782399
|
import pytest
from medicare_appeals.appeals import models
from medicare_appeals.tests import factories
@pytest.fixture(scope='function')
def build_an_appeal():
"""
Build a single appeal
"""
appeal = factories.AppealFactory()
@pytest.fixture(scope='function')
def build_two_appeals():
"""
Build two appeals with the description 'test{n}'
"""
appeal1 = factories.AppealFactory(description='test0')
appeal2 = factories.AppealFactory(description='test1')
@pytest.mark.django_db
def test_appeal(build_an_appeal):
"""
An appeal should be created
"""
assert models.Appeal.objects.count() == 1
@pytest.mark.django_db
def test_two_appeals(build_two_appeals):
"""
Two appeals should be created with description 'test{n}'
"""
appeals = models.Appeal.objects.all()
assert appeals.count() == 2
for idx, appeal in enumerate(appeals):
assert appeal.description == 'test{0}'.format(idx)
| 2.40625
| 2
|
nbaspa/data/tasks/io.py
|
ak-gupta/nbaspa
| 1
|
12782400
|
<gh_stars>1-10
"""Simple tasks for loading data from the ``BaseRequest`` API."""
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Union
import fsspec
import numpy as np
import pandas as pd
from prefect import Task
from ..factory import NBADataFactory
import nbaspa.data.endpoints as endpoints
from ..endpoints.parameters import ParameterValues, SEASONS, Season
class GenericLoader(Task):
"""Load the data from a given endpoint.
Parameters
----------
loader : str
The name of a class in the ``nba_survival.data.endpoints``
subpackage.
**kwargs
Keyword arguments for ``prefect.Task``.
Attributes
----------
_loader : BaseRequest
The object that will load the data.
"""
def __init__(self, loader: str, **kwargs):
"""Init method."""
self._loader = getattr(endpoints, loader)
super().__init__(**kwargs)
def run( # type: ignore
self,
output_dir: str,
filesystem: Optional[str] = "file",
dataset_type: Optional[str] = None,
**kwargs,
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""Retrieve the data.
Parameters
----------
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
**kwargs
Keyword arguments for initializating the ``_loader``.
Returns
-------
Dict
A dictionary with one key per dataset in the output.
"""
loader = self._loader(output_dir=output_dir, filesystem=filesystem, **kwargs)
loader.load()
if dataset_type is None:
return {key: loader.get_data(key) for key in loader.datasets}
else:
return loader.get_data(dataset_type=dataset_type)
class FactoryGetter(Task):
"""Retrieve a dataset from ``NBADataFactory``."""
def run( # type: ignore
self, factory: NBADataFactory, dataset_type: str = "default"
) -> pd.DataFrame:
"""Retrieve a dataset from ``NBADataFactory``.
Parameters
----------
factory : NBADataFactory
The loaded factory class.
dataset_type : str, optional (default "default")
The dataset type.
Returns
-------
pd.DataFrame
The concatenated data.
"""
return factory.get_data(dataset_type=dataset_type)
class PlayByPlayLoader(Task):
"""Load the play-by-data for a given day."""
def run( # type: ignore
self,
header: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
) -> pd.DataFrame:
"""Load the play-by-play data for a given day.
Parameters
----------
header : pd.DataFrame
The output of ``Scoreboard.get_data("GameHeader")``.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
pd.DataFrame
The output dataset.
"""
calls: List[Tuple[str, Dict]] = []
games = np.unique(header["GAME_ID"])
for game in games:
calls.append(("PlayByPlay", {"GameID": game}))
# Create the factory and load the data
factory = NBADataFactory(
calls=calls, output_dir=output_dir, filesystem=filesystem
)
factory.load()
data = factory.get_data()
data["PLAYER1_ID"] = data["PLAYER1_ID"].astype(float)
data["PLAYER2_ID"] = data["PLAYER2_ID"].astype(float)
return data
class WinProbabilityLoader(Task):
"""Load the NBA win probability for each game in a day."""
def run( # type: ignore
self,
header: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
) -> pd.DataFrame:
"""Load the NBA win probability for each game in a day.
Parameters
----------
header : pd.DataFrame
The output of ``Scoreboard.get_data("GameHeader")``.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` implementation to use.
Returns
-------
pd.DataFrame
The output dataset.
"""
calls: List[Tuple[str, Dict]] = []
games = np.unique(header["GAME_ID"])
for game in games:
calls.append(("WinProbability", {"GameID": game}))
# Create the factory and load the data
factory = NBADataFactory(
calls=calls, output_dir=output_dir, filesystem=filesystem
)
factory.load()
return factory.get_data("WinProbPBP")
class GameLogLoader(Task):
"""Get team game logs."""
teams: Set = ParameterValues().TeamID
def run( # type: ignore
self,
season: str,
output_dir: str,
filesystem: Optional[str] = "file",
) -> pd.DataFrame:
"""Get team game logs.
Parameters
----------
season : str
The season string.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
pd.DataFrame
The output dataset.
"""
calls: List[Tuple[str, Dict]] = []
for team in self.teams:
if not str(team).startswith("16"):
continue
calls.append(("TeamGameLog", {"TeamID": team, "Season": season}))
factory = NBADataFactory(
calls=calls, output_dir=output_dir, filesystem=filesystem
)
factory.load()
return factory.get_data()
class LineupLoader(Task):
"""Get team lineup stats.
For lineup and overall team statistics we need to get rolling data over the season.
"""
def run( # type: ignore
self,
season: str,
GameDate: str,
linescore: pd.DataFrame,
) -> NBADataFactory:
"""Get team lineup stats.
Parameters
----------
season : str
The season string.
GameDate : str
The game date.
linescore : pd.DataFrame
The output of ``Scoreboard.get_data("LineScore")``. We will use this to only
pull the rolling data we need.
Returns
-------
NBADataFactory
Loaded data factory
"""
GameDate = datetime.strptime(GameDate, "%m/%d/%Y")
if (GameDate - SEASONS[season]["START"]).days < 14:
self.logger.info("Pulling previous season data")
current = Season(year=int(season[0:4]))
season = str(current - 1)
GameDate = GameDate + timedelta(days=-1)
calls: List[Tuple[str, Dict]] = []
teams = np.unique(linescore["TEAM_ID"])
for team in teams:
params = {
"TeamID": team,
"Season": season,
"MeasureType": "Advanced",
"DateFrom": SEASONS[season]["START"].strftime("%m/%d/%Y"),
"DateTo": GameDate.strftime("%m/%d/%Y"),
}
calls.append(("TeamLineups", params))
factory = NBADataFactory(calls=calls)
factory.get()
return factory
class RotationLoader(Task):
"""Load the team rotations for all games in a given day."""
def run( # type: ignore
self,
header: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
) -> Dict[str, pd.DataFrame]:
"""Load the team rotations for all games in a given day.
Parameters
----------
header : pd.DataFrame
The output of ``Scoreboard.get_data("GameHeader")``.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
Dict
A dictionary with two keys: ``HomeTeam`` and ``AwayTeam``.
"""
calls: List[Tuple[str, Dict]] = []
games = np.unique(header["GAME_ID"])
for game in games:
calls.append(("GameRotation", {"GameID": game}))
# Create the factory and load the data
factory = NBADataFactory(
calls=calls, output_dir=output_dir, filesystem=filesystem
)
factory.load()
return {
"HomeTeam": factory.get_data(dataset_type="HomeTeam"),
"AwayTeam": factory.get_data(dataset_type="AwayTeam"),
}
class ShotChartLoader(Task):
"""Load the shotcharts for all games in a given day."""
def run( # type: ignore
self,
header: pd.DataFrame,
season: str,
output_dir: str,
filesystem: Optional[str] = "file",
) -> Dict[str, pd.DataFrame]:
"""Load the shotcharts for all games in a given day.
Parameters
----------
header : pd.DataFrame
The output of ``Scoreboard.get_data("GameHeader")``.
season : str
The season string.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
pd.DataFrame
The shotcharts
"""
calls: List[Tuple[str, Dict]] = []
games = np.unique(header["GAME_ID"])
for game in games:
calls.append(("ShotChart", {"GameID": game, "Season": season}))
# Create the factory and load the data
factory = NBADataFactory(
calls=calls, output_dir=output_dir, filesystem=filesystem
)
factory.load()
return factory.get_data()
class BoxScoreLoader(Task):
"""Load all boxscores for games in in a given day."""
def run( # type: ignore
self,
header: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
) -> pd.DataFrame:
"""Load all boxscores for games in a given day.
Parameters
----------
header : pd.DataFrame
The output of ``Scoreboard.get_data("GameHeader")``.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
pd.DataFrame
The output dataset.
"""
calls: List[Tuple[str, Dict]] = []
games = np.unique(header["GAME_ID"])
for game in games:
calls.append(("BoxScoreTraditional", {"GameID": game}))
# Create the factory and load the data
factory = NBADataFactory(
calls=calls, output_dir=output_dir, filesystem=filesystem
)
factory.load()
return factory.get_data("PlayerStats")
class ShotZoneLoader(Task):
"""Load the shot zone data for each player in each game."""
def run( # type: ignore
self,
season: str,
GameDate: str,
boxscore: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
) -> pd.DataFrame:
"""Load the shot zone data for each player in each game.
Parameters
----------
season : str
The season string.
GameDate : str
The game date.
boxscore : pd.DataFrame
The output from ``BoxScoreTraditional.get_data("PlayerStats")``.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
pd.DataFrame
The output dataset.
"""
GameDate = datetime.strptime(GameDate, "%m/%d/%Y")
if (GameDate - SEASONS[season]["START"]).days < 14:
self.logger.info("Pulling previous season data")
current = Season(year=int(season[0:4]))
season = str(current - 1)
GameDate = GameDate + timedelta(days=-1)
# For each player, get the gamelog
calls: List[Tuple[str, Dict]] = []
for _, row in boxscore.iterrows():
if pd.isnull(row["FGA"]) or row["FGA"] == 0:
continue
calls.append(
(
"PlayerGameLog",
{"PlayerID": row["PLAYER_ID"], "Season": season},
)
)
# Create the factory and load the data
logfactory = NBADataFactory(
calls=calls,
output_dir=Path(Path(output_dir).parent, season),
filesystem=filesystem,
)
logfactory.load()
playerlogs = logfactory.get_data()
playerlogs["GAME_DATE"] = pd.to_datetime(playerlogs["GAME_DATE"])
games = np.unique(
playerlogs[
(playerlogs["GAME_DATE"] >= SEASONS[season]["START"])
& (playerlogs["GAME_DATE"] <= SEASONS[season]["END"])
& (playerlogs["GAME_DATE"] <= GameDate)
]["Game_ID"].values
)
shotcalls: List[Tuple[str, Dict]] = [
("ShotChart", {"GameID": game, "Season": season}) for game in games
]
# Then get all of their shots from the season
shotfactory = NBADataFactory(
calls=shotcalls,
output_dir=Path(Path(output_dir).parent, season),
filesystem=filesystem,
)
shotfactory.load()
playershots = shotfactory.get_data()
# Aggregate to get FG_PCT for each SHOT_ZONE_BASIC
agg = playershots.groupby(["PLAYER_ID", "SHOT_ZONE_BASIC"]).sum()[
["SHOT_ATTEMPTED_FLAG", "SHOT_MADE_FLAG"]
]
agg.reset_index(inplace=True)
agg.rename(
columns={
"SHOT_ATTEMPTED_FLAG": "FGA",
"SHOT_MADE_FLAG": "FGM",
"SHOT_ZONE_BASIC": "GROUP_VALUE",
},
inplace=True,
)
agg["FG_PCT"] = agg["FGM"] / agg["FGA"]
return agg
class GeneralShootingLoader(Task):
"""Load previous season general shooting data for each player in each game."""
def run( # type: ignore
self,
season: str,
boxscore: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
) -> pd.DataFrame:
"""Load the general shooting data for each player in each game.
Parameters
----------
season : str
The season string.
boxscore : pd.DataFrame
The output from ``BoxScoreTraditional.get_data("PlayerStats")``.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
Returns
-------
pd.DataFrame
The output dataset.
"""
current = Season(year=int(season[0:4]))
season = str(current - 1)
calls: List[Tuple[str, Dict]] = []
for _, row in boxscore.iterrows():
if pd.isnull(row["FGA"]) or row["FGA"] == 0:
continue
calls.append(
(
"PlayerDashboardGeneral",
{"PlayerID": row["PLAYER_ID"], "Season": season},
)
)
# Create the factory and load the data
factory = NBADataFactory(
calls=calls,
output_dir=Path(Path(output_dir).parent, season),
filesystem=filesystem,
)
factory.load()
return factory.get_data("OverallPlayerDashboard")
class SaveData(Task):
"""Save the game data."""
def run( # type: ignore
self,
data: pd.DataFrame,
output_dir: str,
filesystem: Optional[str] = "file",
mode: Optional[str] = "model",
):
"""Save the game data.
Saves the data to ``output_dir/data_{GameID}.csv``
Parameters
----------
data : pd.DataFrame
The clean data.
output_dir : str
The directory containing the data.
filesystem : str, optional (default "file")
The name of the ``fsspec`` filesystem to use.
mode : str, optional (default "model")
The type of clean data to save. If ``model``, save to the directory
``model-data``. If ``rating``, save to ``rating-data``.
Returns
-------
None
"""
# Define subdirectory
if mode == "model":
subdir = "model-data"
elif mode == "rating":
subdir = "rating-data"
else:
raise ValueError("Please supply a valid value for ``mode``")
# Get the filesystem
fs = fsspec.filesystem(filesystem)
fs.mkdirs(Path(output_dir, subdir), exist_ok=True)
grouped = data.groupby("GAME_ID")
for name, group in grouped:
if not name.startswith("002"):
self.logger.warning(f"{name} is not a regular season game. Skipping...")
continue
fpath = Path(output_dir, subdir, f"data_{name}.csv")
self.logger.info(f"Writing data for game {name} to {str(fpath)}")
with fs.open(fpath, "wb") as buf:
group.to_csv(buf, sep="|", mode="wb")
| 2.828125
| 3
|
kamrecsys/utils/kammath.py
|
tkamishima/kamrecsys
| 7
|
12782401
|
<filename>kamrecsys/utils/kammath.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Summary of Mathematical Functions
"""
from __future__ import (
print_function,
division,
absolute_import,
unicode_literals)
from six.moves import xrange
# =============================================================================
# Imports
# =============================================================================
import logging
from scipy.special import expit
import numpy as np
# =============================================================================
# Metadata variables
# =============================================================================
# =============================================================================
# Public symbols
# =============================================================================
__all__ = []
# =============================================================================
# Constants
# =============================================================================
# =============================================================================
# Variables
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
def safe_sigmoid(x):
"""
safe_sigmoid function
To restrict the range of sigmoid function within [1e-15, 1 - 1e-15],
domain of inputs is clipped into [-SIGMOID_DOM,+SIGMOID_DOM], where
SIGMOID_DOM = :math:`log( (1 - 10^{-15}) / 10^{-15})` =
34.538776394910684
Parameters
----------
x : array_like, shape=(n_data), dtype=float
arguments of function
Returns
-------
sig : array, shape=(n_data), dtype=float
1.0 / (1.0 + exp(- x))
"""
# import numpy as np
# from scipy.special import expit
x = np.clip(x, -34.538776394910684, 34.538776394910684)
return expit(x)
# =============================================================================
# Classes
# =============================================================================
# =============================================================================
# Module initialization
# =============================================================================
# init logging system
logger = logging.getLogger('kamrecsys')
if not logger.handlers:
logger.addHandler(logging.NullHandler())
# =============================================================================
# Test routine
# =============================================================================
def _test():
""" test function for this module
"""
# perform doctest
import sys
import doctest
doctest.testmod()
sys.exit(0)
# Check if this is call as command script
if __name__ == '__main__':
_test()
| 2.234375
| 2
|
theanompi/easgd_server.py
|
uoguelph-mlrg/Theano-MPI
| 65
|
12782402
|
from __future__ import absolute_import
from theanompi.lib.base import MPI_GPU_Process
from mpi4py import MPI
server_alpha = 0.5
class EASGD_Server(MPI_GPU_Process):
'''
An implementation of the server process in the Elastic Averaging SGD rule
https://arxiv.org/abs/1412.6651
implementation idea from platoon:
https://github.com/mila-udem/platoon/tree/master/platoon/channel
'''
def __init__(self, device):
MPI_GPU_Process.__init__(self, device) # setup ctx, comm
self.worker_gpucomm = {} # gpucomm to be setup through worker registration
self.worker_id = {}
self.first_worker_id = None
self.valid = {}
self.uidx = {}
self.adj_lr = {}
self.last = None
self.last_uidx = 0
self.start_time = None
self.uepoch = 0
self.last_uepoch = 0
def process_request(self, model, worker_id, worker_rank, message):
reply = None
import time
# Connection related request
if message in ['sync_register']:
if self.first_worker_id == None:
self.first_worker_id = worker_id
print('[Server] recording worker is %s' % worker_id)
reply = 'first'
# rank -> id -> gpucomm
self.worker_id[str(worker_rank)] = int(worker_id)
print('[Server] registered worker %d' % worker_id)
return reply
try:
valid = self.valid['%s' % worker_id]
amount = self.uidx['%s' % worker_id]
adj_lr = self.adj_lr['%s' % worker_id]
except KeyError:
self.valid['%s' % worker_id] = False
self.adj_lr['%s' % worker_id] = False
self.uidx['%s' % worker_id] = 0
# when a new worker joins
self.adj_lr = self.adj_lr.fromkeys(self.adj_lr, True)
# Training related requests
if message == 'next':
if self.start_time is None:
self.start_time = time.time()
# stop when finish all epochs
if sum(self.uidx.values()) >= self.validFreq*model.n_epochs:
print("[Server] Total training time %.2fh" % \
((time.time() - self.start_time)/3600.0))
reply = 'stop'
elif self.valid['%s' % worker_id]:
self.valid['%s' % worker_id] = False
reply = 'val'
elif self.adj_lr['%s' % worker_id]:
self.adj_lr['%s' % worker_id] = False
reply = 'adjust_hyperp'
else:
reply = 'train'
elif 'done' in message:
self.uidx['%s' % worker_id] += message['done']
#print '[Server] uidx %d' % sum(self.uidx.values())
elif message == 'uepoch':
reply = [self.uepoch, len(self.worker_gpucomm)]
if message in ['next', 'uepoch'] or 'done' in message:
now_uidx = sum(self.uidx.values())
self.uepoch = int(now_uidx/self.validFreq)
if self.last_uepoch != self.uepoch:
#print "[Server] now global epoch %d" % self.uepoch
self.last_uepoch = self.uepoch
# when a epoch is finished
self.adj_lr = self.adj_lr.fromkeys(self.adj_lr, True)
#self.valid = self.valid.fromkeys(self.valid, True)
# only the first worker validates
self.valid["%s" % self.first_worker_id] = True
if self.last == None:
self.last = float(time.time())
if now_uidx - self.last_uidx >= 40:
now = float(time.time())
print('[Server] %d time per 40 batches: %.2f s' % \
(self.uepoch, (now - self.last)))
self.last_uidx = now_uidx
self.last = now
return reply
def action_after(self, model, worker_id, worker_rank, message):
if message == 'disconnect':
self.worker_gpucomm.pop(str(worker_id))
print('[Server] disconnected with worker %d' % worker_id)
elif message == 'stop':
print('[Server] stopped by %d' % worker_id)
import sys
sys.exit(0)
if message == 'sync_register':
gpucomm = self.get_intranode_pair_comm(pair=(0,worker_rank))
self.worker_gpucomm[str(worker_id)]= gpucomm
elif message == 'exchange':
self.exchanger.gpucomm = self.worker_gpucomm[str(worker_id)]
# self.exchanger.dest = worker_rank
self.exchanger.exchange()
elif message == 'copy_to_local':
self.exchanger.gpucomm = self.worker_gpucomm[str(worker_id)]
# self.exchanger.dest = worker_rank
self.exchanger.copy_to_local()
def build(self, model):
from theanompi.lib.helper_funcs import check_model
# check model has necessary attributes
check_model(model)
# choose the type of exchanger
from theanompi.lib.exchanger import EASGD_Exchanger
self.exchanger = EASGD_Exchanger(alpha=server_alpha,
param_list=model.params,
etype='server')
self.validFreq = model.data.n_batch_train
def run(self, model):
if self.comm == None:
print('Server communicator not initialized')
return
print('server started')
while True:
# Wait for next request from client
request = self.comm.recv(source=MPI.ANY_SOURCE, tag=199)
# Do some process work and formulate a reply
reply = self.process_request(model, request['id'],
request['rank'],request['message'])
# Send reply back to client
self.comm.send(reply, dest=request['rank'], tag=200)
# Do some action work after reply
self.action_after(model, request['id'],
request['rank'], request['message'])
if __name__ == '__main__':
import sys
device = sys.argv[1]
modelfile = sys.argv[2]
modelclass = sys.argv[3]
try:
cpulist = sys.argv[4]
except:
pass
else: # optional binding cores using hwloc
from theanompi.lib.hwloc_utils import bind_to_socket_mem,detect_socket_num
bind_to_socket_mem(cpulist, label='train')
detect_socket_num(debug=True, label='train')
server = EASGD_Server(device)
config={}
config['verbose'] = False #(server.rank==0)
config['rank'] = 0
config['size'] = 1
config['no_paraload'] = True
import importlib
mod = importlib.import_module(modelfile)
modcls = getattr(mod, modelclass)
model = modcls(config)
server.build(model)
server.run(model)
| 2.25
| 2
|
ponnobot/management/commands/daraz_crawl.py
|
ahmedshahriar/bd-ponno
| 3
|
12782403
|
<reponame>ahmedshahriar/bd-ponno
from django.core.management.base import BaseCommand
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from ponnobot.spiders.daraz_spider import DarazSpider
class Command(BaseCommand):
help = "Release the Mke Crawler"
def handle(self, *args, **options):
process = CrawlerProcess(get_project_settings())
process.crawl(DarazSpider)
process.start()
| 1.8125
| 2
|
tests/test_plugin.py
|
fopina/tgbotplug
| 1
|
12782404
|
from tgbot import plugintest
from sample_plugin import TestPlugin
class TestPluginTest(plugintest.PluginTestCase):
def setUp(self):
self.plugin = TestPlugin()
self.bot = self.fake_bot(
'',
plugins=[self.plugin],
)
def test_print_commands(self):
from cStringIO import StringIO
out = StringIO()
self.bot.print_commands(out=out)
self.assertEqual(out.getvalue(), '''\
echo - right back at ya
echo2 - right back at ya
save - save a note
read - read a note
savegroup - save a group note
readgroup - read a group note
''')
def test_reply(self):
self.receive_message('/echo test')
self.assertReplied('test')
self.receive_message('/echo sound 1 2 3')
self.assertReplied('sound 1 2 3')
def test_need_reply_user(self):
self.receive_message('test')
self.assertNoReplies()
self.receive_message('/echo')
self.assertReplied('echo what?')
self.receive_message('test')
self.assertReplied('test')
self.receive_message('sound')
self.assertNoReplies()
def test_need_reply_by_message_id(self):
self.receive_message('/echo')
self.assertReplied('echo what?')
self.clear_queues()
# wrong reply id, should be ignored
self.receive_message('test', reply_to_message=3)
self.assertNoReplies()
# correct reply id
self.receive_message('test', reply_to_message=2)
self.assertReplied('test')
def test_need_reply_group(self):
chat = {
'id': 1,
'title': 'test group',
'type': 'group',
}
self.receive_message('/echo', chat=chat)
self.assertReplied('echo what?')
# non-selective need_reply, should accept from any user
self.receive_message(
'test',
chat=chat,
sender={
'id': 2,
'first_name': 'Jane',
'last_name': 'Doe',
}
)
self.assertReplied('test')
def test_need_reply_selective_group(self):
chat = {
'id': 1,
'title': 'test group',
'type': 'group',
}
self.receive_message('/echo2', chat=chat)
self.assertReplied('echo what?')
# selective need_reply, should ignore other user
self.receive_message(
'test',
chat=chat,
sender={
'id': 2,
'first_name': 'Jane',
'last_name': 'Doe',
}
)
self.assertNoReplies()
self.receive_message(
'test',
chat=chat,
)
self.assertReplied('test')
def test_plugin_data_single(self):
self.receive_message('/save test 123')
self.assertReplied('saved')
self.receive_message('/read', sender={
'id': 2,
'first_name': 'Jane',
'last_name': 'Doe',
})
self.assertReplied('no note saved')
self.receive_message('/read')
self.assertReplied('your note: test 123')
self.receive_message('/save test 321')
self.assertReplied('saved')
self.receive_message('/read')
self.assertReplied('your note: test 321')
def test_plugin_data_group(self):
chat = {
'id': 99,
'title': 'test group',
}
self.receive_message('/savegroup test 123', chat=chat)
self.assertReplied('saved')
self.receive_message('/readgroup')
self.assertReplied('no note saved')
self.receive_message('/readgroup', chat=chat)
self.assertReplied('this group note: test 123')
self.receive_message('/readgroup', chat=chat, sender={
'id': 2,
'first_name': 'Jane',
'last_name': 'Doe',
})
self.assertReplied('this group note: test 123')
def test_prefix_cmd(self):
self.receive_message('/prefixcmd1')
self.assertReplied('1')
self.receive_message('/prefixcmd12@test_bot')
self.assertReplied('12')
self.receive_message('/prefixcmd@test_bot 123')
self.assertReplied('123')
def test_list_keys(self):
sender2 = {
'id': 2,
'first_name': 'Jane',
'last_name': 'Doe',
}
chat1 = {
'id': 3,
'title': 'test chat',
}
self.receive_message('/save note1') # 1 1
self.receive_message('/save note2', sender=sender2) # 2 2
self.receive_message('/save note3', chat=chat1) # 3 1
self.receive_message('/save note4', sender=sender2, chat=chat1) # 3 2
self.assertEqual(
list(self.plugin.iter_data_keys()),
['1', '2', '3'],
)
self.assertEqual(
list(self.plugin.iter_data_key_keys()),
[],
)
self.assertEqual(
list(self.plugin.iter_data_key_keys('1')),
['1'],
)
self.assertEqual(
list(self.plugin.iter_data_key_keys('2')),
['2'],
)
self.assertEqual(
list(self.plugin.iter_data_key_keys('3')),
['1', '2'],
)
self.plugin.save_data(3, key2=2)
self.assertEqual(
list(self.plugin.iter_data_key_keys('3')),
['1'],
)
| 2.4375
| 2
|
lib/encoder_configuration_unittest.py
|
Jesseyx/compare-codecs
| 50
|
12782405
|
<reponame>Jesseyx/compare-codecs
#!/usr/bin/python
# Copyright 2015 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for encoder_configuration."""
import os
import unittest
import encoder_configuration
class TestConfiguration(unittest.TestCase):
def test_defaults(self):
# These tests verify reading from the current environment.
# They should work no matter what the variables are set to,
# as long as required variables are set.
self.assertEquals(os.environ['CODEC_WORKDIR'],
encoder_configuration.conf.workdir())
self.assertEquals(os.environ['WORKDIR'],
encoder_configuration.conf.sysdir())
self.assertEquals(os.environ['CODEC_TOOLPATH'],
encoder_configuration.conf.tooldir())
self.assertEquals(os.getenv('CODEC_SCOREPATH', ''),
':'.join(encoder_configuration.conf.scorepath()))
def test_scorepath_variants(self):
# This test works by modifying the environment and then creating
# a new configuration object. It does not use the global configuration
# object.
if 'CODEC_SCOREPATH' in os.environ:
del os.environ['CODEC_SCOREPATH']
os.environ['CODEC_WORKDIR'] = 'strange_string'
my_conf = encoder_configuration.Configuration()
self.assertEquals(0, len(my_conf.scorepath()))
os.environ['CODEC_SCOREPATH'] = 'a:b'
my_conf = encoder_configuration.Configuration()
self.assertEquals(['a', 'b'], my_conf.scorepath())
def test_override(self):
encoder_configuration.conf.override_workdir_for_test('new_value')
self.assertEquals('new_value',
encoder_configuration.conf.workdir())
if __name__ == '__main__':
unittest.main()
| 2.359375
| 2
|
src/mainControllerClass.py
|
SeferMirza/CommandWithEyesOpenCV
| 0
|
12782406
|
import cv2
import numpy as np
import sys, getopt
import time
import dlib
import math
i=False
class Controller():
def __init__(self):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
self.frame = None
self.ret = None
self.faces = None
self.cap = cv2.VideoCapture(0)
#open phone camera API
self.address = None#"https://192.168.43.1:8080/video"
self.threshold = 35
self.grayFrame = None
self.cutEyes = None
self.img = cv2.imread("anime.jpg")
self.cutEyesGray = None
self.contours = None
self.capThreshold = None
self.left_eye = None
self.maskEyes = None
self.landmarks = None
self.min_x = None
self.max_x = None
self.min_y = None
self.max_y = None
self.otps = None
self.args = None
self.cameraIs = False
self.thresholdIs = False
self.rgbIs = False
self.eyeLinesIs = False
self.fx = 1
self.fy = 1
self.check = True
self.calibrationIsOkey = False
self.testImage = None
self.background = None
self.eyeCenter = None
self.maxArray = np.array([[0,0]])
self.maxMean = None
self.minArray = np.array([[0,0]])
self.minMean = None
self.key = None
self.time = 0
self.optIfBlock = 2
self.startTimeIfBlock = True
self.CalibFinishBlock = False
self.finalScreen = False
self.screenSizeX=1920
self.screenSizeY=1080
def getOtps(self):
try:
self.otps, self.args = getopt.getopt(sys.argv[1:],"h:c:t:r:a:e:",["help","cameradress","threshold","rgb","eyeline","halfcut","quartercut","calib"])
except getopt.GetoptError as err:
print(err)
sys.exit()
#self.otps = []
def nothing(self,x):
pass
def main(self):
self.getOtps()
for otp, arg in self.otps:
if otp == '-a':
self.address = str(arg)
self.cap.open(self.address)
elif otp == '--threshold':
self.thresholdIs = True
for ot , ar in self.otps:
if ot == '-t':
self.threshold = int(ar)
elif (otp == '-r' and arg == 'True'):
self.rgbIs = True
elif otp == '-e' and arg == 'True':
self.eyeLinesIs = True
elif otp == '-c' and arg == 'True':
self.cameraIs = True
elif otp == '--halfcut':
self.fx = 0.5
self.fy = 0.5
elif otp == '--quartercut':
self.fx = 0.25
self.fy = 0.25
elif otp == '--calib':
self.calibrationIsOkey = True
#TODO
#print(self.otps, self.args)
#self.optimizationEyesLooking()
array1 = [[0,0]]
openImage = False
o = False
while True:
#first open cam
try:
self.readSelf()
self.frame = cv2.resize(self.frame,None,fx=self.fx,fy=self.fy)
#TODO
#self.frame = cv2.rotate(self.frame,cv2.ROTATE_90_COUNTERCLOCKWISE)
self.grayFrame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
except:
print("error")
self.faces = self.detector(self.grayFrame)
self.lanmarkingLeftEyes()
if self.eyeLinesIs == True:
self.eyeLines()
if self.cameraIs == True and self.ret:
cv2.imshow("Frame", self.frame)
#second isteğe göre açılan seçenekler (thershold vb) göz seçim ayarları için
if self.thresholdIs == True:
cv2.imshow("2",self.capThreshold)
if self.cutEyesGray is not None:
cv2.imshow("3", self.cutEyesGray)
self.key = cv2.waitKey(1)
#third kalibrasyyon
#key 'o'
if self.key == 79 or self.key == 111:
o = True
self.cameraIs = False
self.thresholdIs = False
cv2.destroyAllWindows()
#key 'space'
if self.key == 32:
cv2.destroyAllWindows()
o = False
openImage = True
if self.calibrationIsOkey == True and o == True:
self.optimizationEyesLooking()
if openImage == True:
self.lookingPointDrawCircle()
#four final
if self.finalScreen:
self.final_screen()
if self.key == 27:
break
self.cap.release()
cv2.destroyAllWindows()
def showImage(self):
self.testImage = cv2.imread('anime.jpg')
imageH, imageW, imageChannels= self.testImage.shape
cv2.circle(self.testImage, ( (self.eyeCenter[0] * imageW) / self.rightMean[0], (self.eyeCenter[1] * imageH) / self.bottomMean[1]))
def lookingPointDrawCircle(self):
self.thresholdIs = False
cv2.imshow("",self.img)
def readSelf(self):
self.ret, self.frame=self.cap.read()
def lanmarkingLeftEyes(self):
for face in self.faces:
#x = face.left()
#y = face.top()
#x1 = face.right()
#y1 = face.bottom()
self.landmarks = self.predictor(self.grayFrame, face)
self.left_eye = np.array([(self.landmarks.part(36).x, self.landmarks.part(36).y),
(self.landmarks.part(37).x, self.landmarks.part(37).y),
(self.landmarks.part(38).x, self.landmarks.part(38).y),
(self.landmarks.part(39).x, self.landmarks.part(39).y),
(self.landmarks.part(40).x, self.landmarks.part(40).y),
(self.landmarks.part(41).x, self.landmarks.part(41).y)], np.int32)
h, w, _ = self.frame.shape
mask = np.zeros((h, w), np.uint8)
cv2.polylines(mask, [self.left_eye], True, 255, 2)
cv2.fillPoly(mask, [self.left_eye], 255)
self.maskEyes = cv2.bitwise_and(self.grayFrame, self.grayFrame, mask=mask)
self.maskEyes = np.where(self.maskEyes==0, 255,self.maskEyes)
self.min_x = np.min(self.left_eye[:,0])
self.max_x = np.max(self.left_eye[:,0])
self.min_y = np.min(self.left_eye[:,1])
self.max_y = np.max(self.left_eye[:,1])
self.cutEyes = self.maskEyes[self.min_y : self.max_y, self.min_x : self.max_x]
self.cutEyes = cv2.resize(self.cutEyes, None, fx=5, fy=5)
self.capThreshold = cv2.GaussianBlur(self.cutEyes, (5,5), 0)
_, self.capThreshold = cv2.threshold(self.capThreshold, self.threshold, 255, cv2.THRESH_BINARY_INV)
self.contours, _ = cv2.findContours(self.capThreshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in self.contours:
(x, y, w, h) = cv2.boundingRect(cnt)
cv2.rectangle(self.capThreshold, (x, y), (x + w, y + h), (255, 0, 0), 1)
#middle point x = x + int(w/2) y = y + int(h/2)
cv2.circle(self.cutEyes, (x+int(w/2),y+int(h/2)) ,5, (255,0,0),-1)
self.eyeCenter = [x+int(w/2),y+int(h/2)]
break
if self.rgbIs == True:
cv2.imshow("c", self.cutEyes)
def final_screen(self):
x,y=self.pC()
cv2.namedWindow("dd", cv2.WND_PROP_FULLSCREEN)
cv2.moveWindow("dd", screen.x - 1, screen.y - 1)
cv2.setWindowProperty("dd", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
if (x-int(x))<0.5:
x=math.floor(x)
else:
x = x + 1
x = math.floor(x)
if (y-int(y))<0.5:
y=math.floor(y)
else:
y = y + 1
y = math.floor(y)
cv2.circle(self.img, (int(x),int(y)), 5, (0,0,0), -1)
#print("x:",x," y:",y, " eyecenter:",self.eyeCenter)
cv2.imshow("dd",self.img)
def pC(self):
#print(self.minMean)
return (self.screenSizeX*(self.eyeCenter[0]-self.minMean[0]))/(self.maxMean[0]-self.minMean[0]),(self.screenSizeY*(self.eyeCenter[1]-self.minMean[1]))/(self.maxMean[1]-self.minMean[1])
def eyeLines(self):
horizontalLineLeft = (self.landmarks.part(36).x, self.landmarks.part(36).y)
horizontalLineRight = (self.landmarks.part(39).x, self.landmarks.part(39).y)
verticalLineTop = (self.landmarks.part(38).x, self.landmarks.part(38).y)
verticalLineBottom = (self.landmarks.part(40).x, self.landmarks.part(40).y)
cv2.line(self.frame, horizontalLineLeft, horizontalLineRight,(0,255,0),1)
cv2.line(self.frame, verticalLineTop, verticalLineBottom,(0,255,0),1)
def getCutEyeShape(self,x,y,x1,y1):
return self.frame[y:y1, x:x1]
def optimizationEyesLooking(self):
background = np.zeros((screen.height,screen.width),np.uint8)
cv2.namedWindow("aa", cv2.WND_PROP_FULLSCREEN)
cv2.moveWindow("aa", screen.x - 1, screen.y - 1)
cv2.setWindowProperty("aa", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
if self.optIfBlock==1:
self.startTime(time.perf_counter())
if time.perf_counter()-self.time < 3:
if self.eyeCenter != None:
self.minArray = np.append(self.minArray, [self.eyeCenter],axis=0)
cv2.circle(background, (10,10), 5, (255,255,255), -1)
(text_width, text_height) = cv2.getTextSize("Follow point", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0]
cv2.putText(background, "Follow point", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA)
elif time.perf_counter()-self.time < 6 and time.perf_counter()-self.time > 3:
if self.eyeCenter != None:
self.maxArray = np.append(self.maxArray, [self.eyeCenter],axis=0)
cv2.circle(background, (screen.width-10,screen.height-10), 5, (255,255,255), -1)
(text_width, text_height) = cv2.getTextSize("Follow point", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0]
cv2.putText(background, "Follow point", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA)
elif time.perf_counter()-self.time == 6:
cv2.destroyAllWindows()
else:
self.CalibFinishBlock = True
self.calibrationIsOkey = True
self.check = True
self.optIfBlock=3
elif self.optIfBlock==2:
(text_width, text_height) = cv2.getTextSize("Kalibrasyonu ayarlamak için 's' tuşuna basın", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0]
cv2.putText(background, "Kalibrasyonu ayarlamak için 's' tuşuna basın", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA)
elif self.optIfBlock==3:
self.optFinish(background)
#key 's'
if self.key == 83 or self.key == 115:
self.optIfBlock = 1
#key 'i'
if self.key == 73 or self.key == 105:
self.minArray = self.minArray[1:]
self.maxArray = self.maxArray[1:]
#self.minMean = self.minArray.mean(0)
self.minMean = self.minArray.min(0)
#self.maxMean = self.maxArray.mean(0)
self.maxMean = self.maxArray.max(0)
self.calibrationIsOkey=False
self.finalScreen=True
cv2.destroyWindow("aa")
else:
cv2.imshow("aa",background)
def optFinish(self, stage):
(text_width, text_height) = cv2.getTextSize("Go to do image 'i'", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 1)[0]
cv2.putText(stage, "Go to do image 'i'", ((screen.width//2)-(text_width//2),(screen.height//2)-30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255), 1 , cv2.LINE_AA)
cv2.imshow("aa",stage)
def startTime(self, time):
if self.startTimeIfBlock:
self.time = time
self.startTimeIfBlock = False
def getCameraShape(self):
for i in range(3):
print(self.frame.shape[i])
return self.frame[1], self.frame[0]
if __name__ == "__main__":
import screeninfo
screen = screeninfo.get_monitors()[0]
ct = Controller()
ct.main()
| 2.578125
| 3
|
spatiam_persist.py
|
Spatiam/Spatiam_ION_DMI
| 0
|
12782407
|
<gh_stars>0
import sys
import time
import requests
import subprocess
from subprocess import PIPE
import datetime
MGR_API_URL = "https://www.spatiam.com/ion-dtn-mgr/api/"
CONFIG_FILENAME = "spatiam_config.txt"
AUTH_TOKEN = sys.argv[1]
NETWORK_ID = sys.argv[4]
NODE_UUID = sys.argv[5]
NODE_LISTENING_IP = sys.argv[6]
node_update = datetime.datetime.fromisoformat(sys.argv[2] + ' ' + sys.argv[3])
print("SPATIAM PERSISTENCE SCRIPT")
print("The provided node will restart if any network updates are detected")
print("\nChecking for updates ...")
# ---------------------------------------------------------------
# ion_alive: Checks bplist return code to check if ION is running
# ---------------------------------------------------------------
def ion_alive():
command = "bplist"
proc = subprocess.Popen([command], shell=True, stdout=subprocess.PIPE)
proc.communicate()
return proc.returncode == 0
# ---------------------------------------------------------------
# ionrestart: Starts ION, stops it first if it is already running
# ---------------------------------------------------------------
def ionrestart():
print("Restarting ION ...")
failed_restart = 1
while True:
if failed_restart >= 3:
print("ION failed to respond, exiting program")
quit()
if ion_alive():
ionstop_command = 'ionstop'
subprocess.Popen([ionstop_command], shell=True, stdout=PIPE, stderr=PIPE).wait()
time.sleep(2)
ionstart_command = 'ionstart -I ' + CONFIG_FILENAME
subprocess.Popen([ionstart_command], shell=True, stdout=PIPE, stderr=PIPE).wait()
time.sleep(5)
if not ion_alive():
failed_restart += 1
else:
print("ION Successfully restarted")
break
# ----------------------------------------------------------
# download_config: Download configuration file of given node
# ----------------------------------------------------------
def download_config():
try:
r = requests.get(MGR_API_URL + 'dynamic-config', params={'node': NODE_UUID, 'listeningip': NODE_LISTENING_IP, 'length':315360000})
if r.status_code == 200:
config = r.text
# Update only if config file has changed (network could be reporting 'cosmetic' change)
if not config == open(CONFIG_FILENAME).read():
try:
print("Network update detected (" + str(datetime.datetime.now())+")")
config_file = open(CONFIG_FILENAME, "w")
config_file.write(config)
config_file.close()
print("Node configuration downloaded (" +CONFIG_FILENAME +")")
ionrestart()
print("\nChecking for updates...")
return True
except Exception as e:
print(e)
return False
else:
return True
else:
return False
except:
return False
# -----------------------------------------------------------------
# latest_network_update: Returns timestamp of latest network update
# -----------------------------------------------------------------
def latest_network_update():
try:
r = requests.get(MGR_API_URL + 'api/network/'+NETWORK_ID+'/last_change', headers = {'Authorization': 'Token ' + AUTH_TOKEN})
if r.status_code == 200:
return datetime.datetime.fromisoformat(r.text)
else:
return None
except:
return None
while True:
latest_update = latest_network_update()
# Network has had an update
if latest_update is not None and latest_update > node_update:
if download_config():
node_update = latest_update
# In case ION stops working
elif not ion_alive():
print("ION failure detected (" + str(datetime.datetime.now()) + ")")
ionrestart()
time.sleep(10)
| 2.625
| 3
|
textstat_core/sentence.py
|
textstat/textstat-core
| 1
|
12782408
|
<filename>textstat_core/sentence.py<gh_stars>1-10
from .word_collection import WordCollection
class Sentence(WordCollection):
@property
def length(self):
return len(self.words)
| 2.453125
| 2
|
src/diem/testnet.py
|
Embodimentgeniuslm3/client-sdk-python
| 32
|
12782409
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
"""Provides utilities for working with Diem Testnet.
```python
from diem import testnet
from diem.testing import LocalAccount
# create client connects to testnet
client = testnet.create_client()
# create faucet for minting coins for your testing account
faucet = testnet.Faucet(client)
# create a local account and mint some coins for it
account: LocalAccount = faucet.gen_account()
```
"""
import requests
import typing
from . import diem_types, jsonrpc, utils, chain_ids, bcs, identifier, stdlib
from .testing import LocalAccount, DD_ADDRESS
JSON_RPC_URL: str = "https://testnet.diem.com/v1"
FAUCET_URL: str = "https://testnet.diem.com/mint"
CHAIN_ID: diem_types.ChainId = chain_ids.TESTNET
DESIGNATED_DEALER_ADDRESS: diem_types.AccountAddress = utils.account_address(DD_ADDRESS)
TEST_CURRENCY_CODE: str = "XUS"
HRP: str = identifier.TDM
def create_client() -> jsonrpc.Client:
"""create a jsonrpc.Client connects to Testnet public full node cluster"""
return jsonrpc.Client(JSON_RPC_URL)
def gen_vasp_account(client: jsonrpc.Client, base_url: str) -> LocalAccount:
raise Exception("deprecated: use `gen_account` instead")
def gen_account(
client: jsonrpc.Client, dd_account: bool = False, base_url: typing.Optional[str] = None
) -> LocalAccount:
"""generates a Testnet onchain account"""
account = Faucet(client).gen_account(dd_account=dd_account)
if base_url:
payload = stdlib.encode_rotate_dual_attestation_info_script_function(
new_url=base_url.encode("utf-8"), new_key=account.compliance_public_key_bytes
)
apply_txn(client, account, payload)
return account
def gen_child_vasp(
client: jsonrpc.Client,
parent_vasp: LocalAccount,
initial_balance: int = 10_000_000_000,
currency: str = TEST_CURRENCY_CODE,
) -> LocalAccount:
child, payload = parent_vasp.new_child_vasp(initial_balance, currency)
apply_txn(client, parent_vasp, payload)
return child
def apply_txn(
client: jsonrpc.Client, vasp: LocalAccount, payload: diem_types.TransactionPayload
) -> jsonrpc.Transaction:
seq = client.get_account_sequence(vasp.account_address)
txn = vasp.create_signed_txn(seq, payload)
client.submit(txn)
return client.wait_for_transaction(txn)
class Faucet:
"""Faucet service is a proxy server to mint coins for your test account on Testnet
See https://github.com/diem/diem/blob/master/json-rpc/docs/service_testnet_faucet.md for more details
"""
def __init__(
self,
client: jsonrpc.Client,
url: typing.Union[str, None] = None,
retry: typing.Union[jsonrpc.Retry, None] = None,
) -> None:
self._client: jsonrpc.Client = client
self._url: str = url or FAUCET_URL
self._retry: jsonrpc.Retry = retry or jsonrpc.Retry(5, 0.2, Exception)
self._session: requests.Session = requests.Session()
def gen_account(self, currency_code: str = TEST_CURRENCY_CODE, dd_account: bool = False) -> LocalAccount:
account = LocalAccount.generate()
self.mint(account.auth_key.hex(), 100_000_000_000, currency_code, dd_account)
return account
def mint(
self,
authkey: str,
amount: int,
currency_code: str,
dd_account: bool = False,
vasp_domain: typing.Optional[str] = None,
is_remove_domain: bool = False,
) -> None:
self._retry.execute(
lambda: self._mint_without_retry(authkey, amount, currency_code, dd_account, vasp_domain, is_remove_domain)
)
def _mint_without_retry(
self,
authkey: str,
amount: int,
currency_code: str,
dd_account: bool = False,
vasp_domain: typing.Optional[str] = None,
is_remove_domain: bool = False,
) -> None:
response = self._session.post(
self._url,
params={
"amount": amount,
"auth_key": authkey,
"currency_code": currency_code,
"return_txns": "true",
"is_designated_dealer": "true" if dd_account else "false",
"vasp_domain": vasp_domain,
"is_remove_domain": "true" if is_remove_domain else "false",
},
)
response.raise_for_status()
de = bcs.BcsDeserializer(bytes.fromhex(response.text))
length = de.deserialize_len()
for i in range(length):
txn = de.deserialize_any(diem_types.SignedTransaction)
try:
self._client.wait_for_transaction(txn)
except jsonrpc.TransactionExecutionFailed as e:
if e.txn.vm_status.explanation.reason == "EDOMAIN_ALREADY_EXISTS":
continue
raise e
| 2.390625
| 2
|
lambda/src/compiler/pkg/state_machine_resources.py
|
jack-e-tabaska/BayerCLAW
| 0
|
12782410
|
<filename>lambda/src/compiler/pkg/state_machine_resources.py
from collections import deque
import json
import logging
from typing import Generator, List, Dict, Tuple
from uuid import uuid4
import boto3
from . import batch_resources as b
from . import chooser_resources as c
from . import enhanced_parallel_resources as ep
from . import native_step_resources as ns
from . import scatter_gather_resources as sg
from . import subpipe_resources as sp
from .util import CoreStack, Step, Resource, State, make_logical_name, lambda_logging_block
from .validation import validate_batch_step, validate_native_step, validate_parallel_step, validate_scatter_step, \
validate_subpipe_step, validate_chooser_step
def make_launcher_step(core_stack: CoreStack, wf_params: dict) -> dict:
launch_step_name = "Launch"
ret = {
launch_step_name: {
"Type": "Task",
"Resource": core_stack.output("LauncherLambdaArn"),
"Parameters": {
"repo_template": wf_params["repository"],
"input_obj.$": "$",
**lambda_logging_block(launch_step_name),
},
"ResultPath": "$",
"OutputPath": "$",
"_stet": True,
},
}
return ret
def make_step_list(steps: List[Dict]) -> List[Step]:
ret = deque()
next_step = ""
for step in reversed(steps):
name, spec = next(iter(step.items()))
n = spec.get("Next") or spec.get("next")
e = spec.get("End") or spec.get("end")
if n is None and e is None:
step = Step(name, spec, next_step)
elif n is not None:
step = Step(name, spec, n)
else:
step = Step(name, spec, "")
ret.appendleft(step)
next_step = name
return list(ret)
def process_step(core_stack: CoreStack,
step: Step,
wf_params: dict,
depth: int) -> Generator[Resource, None, List[State]]:
if "scatter" in step.spec:
normalized_step = validate_scatter_step(step)
states_to_add = yield from sg.handle_scatter_gather(core_stack,
normalized_step,
wf_params,
depth)
elif "image" in step.spec:
normalized_step = validate_batch_step(step)
states_to_add = yield from b.handle_batch(core_stack,
normalized_step,
wf_params)
elif "subpipe" in step.spec:
normalized_step = validate_subpipe_step(step)
states_to_add = sp.handle_subpipe(core_stack,
normalized_step)
elif "Type" in step.spec:
normalized_step = validate_native_step(step)
states_to_add = yield from ns.handle_native_step(core_stack,
normalized_step,
wf_params,
depth)
elif "choices" in step.spec:
normalized_step = validate_chooser_step(step)
states_to_add = c.handle_chooser_step(core_stack,
normalized_step)
elif "branches" in step.spec:
normalized_step = validate_parallel_step(step)
states_to_add = yield from ep.handle_parallel_step(core_stack,
normalized_step,
wf_params,
depth)
else:
raise RuntimeError(f"step '{step.name}' is not a recognized step type")
return states_to_add
def make_branch(core_stack: CoreStack,
raw_steps: list,
wf_params: dict,
include_launcher: bool = False,
depth: int = 0) -> Generator[Resource, None, dict]:
logger = logging.getLogger(__name__)
if include_launcher:
launcher_step = make_launcher_step(core_stack, wf_params)
raw_steps.insert(0, launcher_step)
steps = make_step_list(raw_steps)
states = {}
for step in steps:
states_to_add = yield from process_step(core_stack, step, wf_params, depth)
states.update(states_to_add)
ret = {
"StartAt": steps[0].name,
"States": states,
}
return ret
def write_state_machine_to_fh(sfn_def: dict, fh) -> dict:
json.dump(sfn_def, fh, indent=4)
ret = {
"Bucket": "FILE",
"Key": "FILE",
"Version": "FILE",
}
return ret
def write_state_machine_to_s3(sfn_def: dict, core_stack: CoreStack) -> dict:
def_json = json.dumps(sfn_def, indent=4)
bucket = core_stack.output("ResourceBucketName")
base_filename = uuid4().hex
key = f"stepfunctions/{base_filename}.json"
s3 = boto3.client("s3")
response = s3.put_object(
Bucket=bucket,
Key=key,
Body=def_json.encode("utf-8")
)
ret = {
"Bucket": bucket,
"Key": key,
}
if "VersionId" in response:
ret["Version"] = response["VersionId"]
return ret
def handle_state_machine(core_stack: CoreStack,
raw_steps: List[Dict],
wf_params: dict,
dst_fh=None) -> Generator[Resource, None, str]:
state_machine_def = yield from make_branch(core_stack, raw_steps, wf_params, include_launcher=True)
if dst_fh is None:
state_machine_location = write_state_machine_to_s3(state_machine_def, core_stack)
else:
state_machine_location = write_state_machine_to_fh(state_machine_def, dst_fh)
state_machine_name = make_logical_name("main.state.machine")
ret = {
"Type": "AWS::StepFunctions::StateMachine",
"Properties": {
"StateMachineName": {
"Ref": "AWS::StackName",
},
"RoleArn": core_stack.output("StatesExecutionRoleArn"),
"DefinitionS3Location": state_machine_location,
"DefinitionSubstitutions": None,
},
}
yield Resource(state_machine_name, ret)
return state_machine_name
def add_definition_substitutions(sfn_resource: Resource, other_resources: dict) -> None:
defn_subs = {k: {"Ref": k} for k in other_resources.keys() if k != sfn_resource.name}
defn_subs["WorkflowName"] = {"Ref": "AWS::StackName"}
defn_subs["AWSRegion"] = {"Ref": "AWS::Region"}
defn_subs["AWSAccountId"] = {"Ref": "AWS::AccountId"}
sfn_resource.spec["Properties"]["DefinitionSubstitutions"] = defn_subs
| 1.859375
| 2
|
tests/conftest.py
|
sameraslan/rymscraper
| 41
|
12782411
|
<reponame>sameraslan/rymscraper
from rymscraper import RymNetwork
import pytest
# @pytest.fixture
@pytest.fixture(scope="session", autouse=True)
def network():
network = RymNetwork(headless=True)
yield network
network.browser.close()
network.browser.quit()
| 1.851563
| 2
|
test/transfer_action_test.py
|
mvdbeek/pulsar
| 0
|
12782412
|
<reponame>mvdbeek/pulsar<gh_stars>0
import os
from .test_utils import files_server
from pulsar.client.action_mapper import RemoteTransferAction
def test_write_to_file():
with files_server() as (server, directory):
from_path = os.path.join(directory, "remote_get")
open(from_path, "wb").write(b"123456")
to_path = os.path.join(directory, "local_get")
url = server.application_url + "?path=%s" % from_path
RemoteTransferAction(to_path, url=url).write_to_path(to_path)
assert open(to_path, "rb").read() == b"123456"
def test_write_from_file():
with files_server() as (server, directory):
from_path = os.path.join(directory, "local_post")
open(from_path, "wb").write(b"123456")
to_path = os.path.join(directory, "remote_post")
url = server.application_url + "?path=%s" % to_path
RemoteTransferAction(to_path, url=url).write_from_path(from_path)
posted_contents = open(to_path, "rb").read()
assert posted_contents == b"123456", posted_contents
| 2.375
| 2
|
utils/sundry_utils.py
|
czh513/RLs-for-TF2.0-Gym-Unity
| 1
|
12782413
|
<filename>utils/sundry_utils.py
class LinearAnnealing:
def __init__(self, x, x_, end):
'''
Params:
x: start value
x_: end value
end: annealing time
'''
assert end != 0, 'the time steps for annealing must larger than 0.'
self.x = x
self.x_ = x_
self.interval = (x_ - x) / end
def __call__(self, current):
return max(self.x + self.interval * current, self.x_)
| 2.984375
| 3
|
Post-process/correlation/correl.py
|
michgz/vibration-record
| 0
|
12782414
|
# Run with Python 2.7
##
from trace import Trace
from trace import ReadIn
import datetime
import os
import sys
import zipfile
import shutil
import math
import getopt
import numpy
def ODSDate(dt):
return dt.strftime("<table:table-cell table:style-name=\"ce2\" office:value-type=\"date\" office:date-value=\"%Y-%m-%dT%H:%M:%S\" calcext:value-type=\"date\"><text:p>%d/%m/%Y %H:%M:%S</text:p></table:table-cell>")
def bIncludeTraces():
return True
def correl(infile1, infile2, intext1, intext2, output, diff = float('nan')):
# The time delta by which ADXL is _before_ LSM6.
#
# This was determined manually. Next big challenge: do it automatically!!
#
runningPath = os.path.dirname(os.path.abspath(__file__))
adxl = []
ax1 = ReadIn([infile1])
for ay in ax1:
az = ay.Calc()
adxl.append([ay.dt,az[1],az[2]])
lsm6 = []
ax2 = ReadIn([infile2])
for ay in ax2:
az = ay.Calc()
lsm6.append([ay.dt,az[1],az[2]])
if (math.isnan(diff)):
# Here were determine the time delta automatically.
#
# First, ensure that the two traces at least overlap in time.
if (min([a[0] for a in adxl]) < max([a[0] for a in lsm6])) and (min([a[0] for a in lsm6]) < max([a[0] for a in adxl])):
pass # we're ok
else:
sys.exit(6) # now okay
# Now choose the top 25%
min_len = min([len(adxl), len(lsm6)])
if (min_len <= 10):
choose_len = min_len
else:
choose_len = min_len /4;
def sortKey(e):
return e[1] # or e[2]
sorted_1 = sorted(adxl, key=sortKey, reverse=True)[0:choose_len]
sorted_2 = sorted(lsm6, key=sortKey, reverse=True)[0:choose_len]
#The following are in seconds
epsilon = 20
step_big = 10 # best if it's about 1/2 epsilon
step_small = 1 # usually 1
vals = range(-4000,4000,step_big)
# First pass
v3 = []
for v in vals:
b3 = 0
for b1 in sorted_1:
for b2 in sorted_2:
if (abs(b1[0]+datetime.timedelta(seconds=v)-b2[0]).total_seconds() < epsilon):
b3 += 1
break
v3.append(b3)
max_at = vals[v3.index(max(v3))]
# Second pass
vals = range(max_at - 2*step_big, max_at + 2*step_big, step_small)
v3 = []
for v in vals:
b3 = 0
for b1 in sorted_1:
for b2 in sorted_2:
if (abs(b1[0]+datetime.timedelta(seconds=v)-b2[0]).total_seconds() < epsilon):
b3 += 1
break
v3.append(b3)
max_val = max(v3)
# Now take the average of all values with the same maximum value
max_at = [i for i, j in enumerate(v3) if j == max_val]
time_diff = datetime.timedelta( seconds=vals[sum(max_at)/len(max_at)] )
else:
time_diff = datetime.timedelta( seconds=diff )
print "Using time difference of " + str(time_diff.total_seconds()) + " seconds"
# The furtherest two times can be and still count as the same
#
epsilon = datetime.timedelta(seconds=15)
print len(adxl)
print len(lsm6)
adxl_idx = len(adxl)*[-1]
lsm6_idx = len(lsm6)*[-1]
a1_idx = 0
## Find matching indices
##
for a1 in adxl:
targ_time = a1[0] + time_diff
best_idx = 999999 # Just any value
best_diff = datetime.timedelta.max
a2_idx = 0
for a2 in lsm6:
diff = targ_time - a2[0]
if (abs(diff) < best_diff):
best_diff = abs(diff)
best_idx = a2_idx
a2_idx += 1
if (best_diff < epsilon) and (lsm6_idx[best_idx] == -1):
adxl_idx[a1_idx] = best_idx
lsm6_idx[best_idx] = a1_idx
a1_idx += 1
## Choose some representative points
##
bx2 = []
bx_idx = 0
for a1 in adxl:
bx2.append(a1[1]) # or [2]
bx_idx += 1
bx1 = numpy.argsort(numpy.array(bx2)) # sorted indices
## Select only those for which a match exists
#
bx0 = []
for b0 in bx1:
if adxl_idx[b0]>=0:
bx0.append(b0)
## Limit the number
#
num_bx = min(len(bx0) / 2, 100) # number of indices to use.
bx1 = bx0[0:num_bx]
if os.path.abspath(output):
theName = output
else:
theName = os.path.join(runningPath, output)
axis_labels = True
if True:
# Now output to a file
i3 = 0
for i1 in range(0,len(adxl_idx)):
if (adxl_idx[i1] >= 0):
i3 += 1
noRows = i3 - 1
print "noRows = %d" % noRows
with open( runningPath + "/content_local_obj1.xml", "w") as dest:
with open( runningPath + "/6_2.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
dest.write( "<office:body>" )
dest.write( "<office:chart>" )
dest.write( "<chart:chart svg:width=\"25.17cm\" svg:height=\"14.295cm\" xlink:href=\"..\" xlink:type=\"simple\" chart:class=\"chart:scatter\" chart:style-name=\"ch1\">" )
dest.write( "<chart:plot-area chart:style-name=\"ch2\" table:cell-range-address=\"Data.C2:Data.C%d Data.G2:Data.G%d\" svg:x=\"0.538cm\" svg:y=\"0.284cm\" svg:width=\"24.124cm\" svg:height=\"13.514cm\">" % (noRows+2, noRows+2) )
dest.write( "<chartooo:coordinate-region svg:x=\"1.345cm\" svg:y=\"0.483cm\" svg:width=\"22.853cm\" svg:height=\"12.668cm\"/>" )
dest.write( "<chart:axis chart:dimension=\"x\" chart:name=\"primary-x\" chart:style-name=\"ch3\"" )
if (axis_labels):
dest.write( ">" )
#dest.write( "<chart:title svg:x=\"11.511cm\" svg:y=\"13.735cm\" chart:style-name=\"ch4\">" )
dest.write( "<chart:title svg:x=\"11.511cm\" svg:y=\"13.735cm\">" )
dest.write( "<text:p>%s</text:p>" % intext1 )
dest.write( "</chart:title>" )
dest.write( "</chart:axis>" )
else:
dest.write( "/>" )
dest.write( "<chart:axis chart:dimension=\"y\" chart:name=\"primary-y\" chart:style-name=\"ch3\"" )
if (axis_labels):
dest.write( ">" )
#dest.write( "<chart:title svg:x=\"0cm\" svg:y=\"8.13cm\" chart:style-name=\"ch5\">" )
dest.write( "<chart:title svg:x=\"0cm\" svg:y=\"8.13cm\">" )
dest.write( "<text:p>%s</text:p>" % intext2 )
dest.write( "</chart:title>" )
else:
dest.write( ">" )
dest.write( "<chart:grid chart:style-name=\"ch4\" chart:class=\"major\"/></chart:axis>" )
dest.write( "<chart:series chart:style-name=\"ch5\" chart:values-cell-range-address=\"Data.G2:Data.G%d\" chart:class=\"chart:scatter\">" % (noRows+2) )
dest.write( "<chart:domain table:cell-range-address=\"Data.C2:Data.C%d\"/><chart:data-point chart:repeated=\"%d\"/>" % (noRows+2, noRows) )
dest.write( "</chart:series>" )
dest.write( "<chart:wall chart:style-name=\"ch6\"/><chart:floor chart:style-name=\"ch7\"/></chart:plot-area><table:table table:name=\"local-table\"><table:table-header-columns><table:table-column/>" )
dest.write( "</table:table-header-columns>" )
dest.write( "<table:table-columns><table:table-column table:number-columns-repeated=\"2\"/></table:table-columns>" )
dest.write( "<table:table-header-rows>" )
dest.write( "<table:table-row>" )
dest.write( "<table:table-cell><text:p/></table:table-cell>" )
dest.write( "<table:table-cell office:value-type=\"string\"><text:p>Column C</text:p></table:table-cell><table:table-cell office:value-type=\"string\"><text:p>Column G</text:p></table:table-cell>" )
dest.write( "</table:table-row>" )
dest.write( "</table:table-header-rows>" )
dest.write( "<table:table-rows>" )
for i1 in range(0,len(adxl_idx)):
if (adxl_idx[i1] >= 0):
i3 += 1
dest.write( "<table:table-row>" )
dest.write( "<table:table-cell office:value-type=\"string\"><text:p>%d</text:p></table:table-cell>" % i3)
f3 = adxl[i1][1]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\"><text:p>%0.3f</text:p>" % (f3, f3) )
if (i3 ==1 ):
dest.write( "<draw:g><svg:desc>Data.C2:Data.C%d</svg:desc></draw:g>" % (noRows+2) )
dest.write( "</table:table-cell>" )
f3 = lsm6[adxl_idx[i1]][1]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\"><text:p>%0.3f</text:p>" % (f3, f3) )
if (i3 ==1 ):
dest.write( "<draw:g><svg:desc>Data.G2:Data.G%d</svg:desc></draw:g>" % (noRows+2) )
dest.write( "</table:table-cell>" )
dest.write( "</table:table-row>" )
dest.write( "</table:table-rows></table:table></chart:chart></office:chart></office:body></office:document-content>" )
with open( runningPath + "/content_local_obj2.xml", "w") as dest:
with open( runningPath + "/7_2.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
dest.write( "<office:body>" )
dest.write( "<office:chart>" )
dest.write( "<chart:chart svg:width=\"25.17cm\" svg:height=\"14.295cm\" xlink:href=\"..\" xlink:type=\"simple\" chart:class=\"chart:scatter\" chart:style-name=\"ch1\">" )
dest.write( "<chart:plot-area chart:style-name=\"ch2\" table:cell-range-address=\"Data.B2:Data.B%d Data.F2:Data.F%d\" svg:x=\"0.503cm\" svg:y=\"0.285cm\" svg:width=\"24.164cm\" svg:height=\"13.725cm\">" % (noRows+2, noRows+2) )
dest.write( "<chartooo:coordinate-region svg:x=\"1.124cm\" svg:y=\"0.482cm\" svg:width=\"23.076cm\" svg:height=\"12.512cm\"/>" )
dest.write( "<chart:axis chart:dimension=\"x\" chart:name=\"primary-x\" chart:style-name=\"ch3\"" )
if (axis_labels):
dest.write( ">" )
#dest.write( "<chart:title svg:x=\"11.538cm\" svg:y=\"13.735cm\" chart:style-name=\"ch4\">" )
dest.write( "<chart:title svg:x=\"11.538cm\" svg:y=\"13.735cm\">" )
dest.write( "<text:p>%s</text:p>" % intext1 )
dest.write( "</chart:title>" )
dest.write( "</chart:axis>" )
else:
dest.write( "/>" )
dest.write( "<chart:axis chart:dimension=\"y\" chart:name=\"primary-y\" chart:style-name=\"ch3\">" )
if (axis_labels):
#dest.write( "<chart:title svg:x=\"0cm\" svg:y=\"8.051cm\" chart:style-name=\"ch5\">" )
dest.write( "<chart:title svg:x=\"0cm\" svg:y=\"8.051cm\">" )
dest.write( "<text:p>%s</text:p>" % intext2 )
dest.write( "</chart:title>" )
dest.write( "<chart:grid chart:style-name=\"ch4\" chart:class=\"major\"/></chart:axis>" )
dest.write( "<chart:series chart:style-name=\"ch5\" chart:values-cell-range-address=\"Data.F2:Data.F%d\" chart:class=\"chart:scatter\">" % (noRows+2) )
dest.write( "<chart:domain table:cell-range-address=\"Data.B2:Data.B%d\"/><chart:data-point chart:repeated=\"%d\"/>" % (noRows+2, noRows) )
dest.write( "</chart:series>" )
dest.write( "<chart:wall chart:style-name=\"ch6\"/><chart:floor chart:style-name=\"ch7\"/></chart:plot-area><table:table table:name=\"local-table\"><table:table-header-columns><table:table-column/>" )
dest.write( "</table:table-header-columns>" )
dest.write( "<table:table-columns><table:table-column table:number-columns-repeated=\"2\"/></table:table-columns>" )
dest.write( "<table:table-header-rows>" )
dest.write( "<table:table-row>" )
dest.write( "<table:table-cell><text:p/></table:table-cell>" )
dest.write( "<table:table-cell office:value-type=\"string\"><text:p>Column B</text:p></table:table-cell><table:table-cell office:value-type=\"string\"><text:p>Column F</text:p></table:table-cell>" )
dest.write( "</table:table-row>" )
dest.write( "</table:table-header-rows>" )
dest.write( "<table:table-rows>" )
i4 = 0
for i1 in range(0,len(adxl_idx)):
if (adxl_idx[i1] >= 0):
i4 += 1
dest.write( "<table:table-row>" )
dest.write( "<table:table-cell office:value-type=\"string\"><text:p>%d</text:p></table:table-cell>" % i4)
f3 = adxl[i1][2]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\"><text:p>%0.3f</text:p>" % (f3, f3) )
if (i4 ==1 ):
dest.write( "<draw:g><svg:desc>Data.B1:Data.B%d</svg:desc></draw:g>" % noRows )
dest.write( "</table:table-cell>" )
f3 = lsm6[adxl_idx[i1]][2]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\"><text:p>%0.3f</text:p>" % (f3, f3) )
if (i4 ==1 ):
dest.write( "<draw:g><svg:desc>Data.F1:Data.F%d</svg:desc></draw:g>" % noRows )
dest.write( "</table:table-cell>" )
dest.write( "</table:table-row>" )
dest.write( "</table:table-rows></table:table></chart:chart></office:chart></office:body></office:document-content>" )
with open( runningPath + "/content_local_obj3.xml", "w") as dest:
with open( runningPath + "/3_1.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
dest.write( intext1 )
with open( runningPath + "/3_2.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
with open( runningPath + "/content_local_obj4.xml", "w") as dest:
with open( runningPath + "/4_1.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
dest.write( intext2 )
with open( runningPath + "/4_2.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
######################################################
### Write Header copperplate to the contents file ##
with open( runningPath + "/content_local.xml", "w") as dest:
with open( runningPath + "/../template_content/3.xml", "r") as source:
while True:
copy_buff = source.read(4096)
if not copy_buff:
break
dest.write(copy_buff)
dest.write( "<office:body>" )
dest.write( "<office:spreadsheet>" )
dest.write( "<table:calculation-settings table:automatic-find-labels=\"false\"/>" )
dest.write( "<table:table table:name=\"Raw\" table:style-name=\"ta1\">" )
dest.write( "<table:table-column table:style-name=\"co1\" table:default-cell-style-name=\"ce3\"/>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"3\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co1\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"2\" table:default-cell-style-name=\"Default\"/>" )
# Show the file names
dest.write( "<table:table-row table:style-name=\"ro1\">" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % (intext1) )
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % (intext2) )
dest.write( "<table:table-cell/><table:table-cell/>" )
dest.write( "</table:table-row>" )
if bIncludeTraces():
for bi in bx1:
## Find the corresponding point
ci = adxl_idx[bi]
dest.write( "<table:table-row table:style-name=\"ro1\">" )
dest.write( ODSDate(ax1[bi].dt) )
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( ODSDate(ax2[ci].dt) )
dest.write( "<table:table-cell/><table:table-cell/></table:table-row>" )
for ii in range(0, 500):
dest.write( "<table:table-row table:style-name=\"ro1\">" )
if (ii < len(ax1[bi].x)):
f3 = ax1[bi].x[ii]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = ax1[bi].y[ii]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = ax1[bi].z[ii]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
else:
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell/>" )
if (ii < len(ax2[ci].x)):
f3 = ax2[ci].x[ii]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = ax2[ci].y[ii]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = ax2[ci].z[ii]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
else:
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "</table:table-row>" )
full_len = len(bx1)*501
dest.write( "</table:table>" )
dest.write( "<table:table table:name=\"Data\" table:style-name=\"ta1\">" )
dest.write( "<table:shapes>" )
dest.write( "<draw:frame draw:z-index=\"0\" draw:style-name=\"gr1\" draw:text-style-name=\"P1\" svg:width=\"251.69mm\" svg:height=\"142.94mm\" svg:x=\"223.06mm\" svg:y=\"7.17mm\">" )
dest.write( "<draw:object draw:notify-on-update-of-ranges=\"Data.C2:Data.C%d Data.G2:Data.G%d\" xlink:href=\"./Object 1\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\">" % (noRows+2, noRows+2) )
dest.write( "<loext:p/>" )
dest.write( "</draw:object>" )
dest.write( "<draw:image xlink:href=\"./ObjectReplacements/Object 1\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\"/>" )
dest.write( "</draw:frame>" )
dest.write( "<draw:frame draw:z-index=\"1\" draw:style-name=\"gr1\" draw:text-style-name=\"P1\" svg:width=\"251.69mm\" svg:height=\"142.94mm\" svg:x=\"409.83mm\" svg:y=\"79.95mm\">" )
dest.write( "<draw:object draw:notify-on-update-of-ranges=\"Data.B2:Data.B%d Data.F2:Data.F%d\" xlink:href=\"./Object 2\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\">" % (noRows+2, noRows+2) )
dest.write( "<loext:p/>" )
dest.write( "</draw:object>" )
dest.write( "<draw:image xlink:href=\"./ObjectReplacements/Object 2\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\"/>" )
dest.write( "</draw:frame>" )
dest.write( "</table:shapes>" )
dest.write( "<table:table-column table:style-name=\"co1\" table:default-cell-style-name=\"ce3\"/>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"3\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co1\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"2\" table:default-cell-style-name=\"Default\"/>" )
# Show the file names
dest.write( "<table:table-row table:style-name=\"ro1\">" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % (intext1) )
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % (intext2) )
dest.write( "<table:table-cell/><table:table-cell/>" )
dest.write( "</table:table-row>" )
for i1 in range(0,len(adxl_idx)):
if (adxl_idx[i1] >= 0):
dest.write( "<table:table-row table:style-name=\"ro1\">" )
dest.write( ODSDate(adxl[i1][0]) )
f3 = adxl[i1][1]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = adxl[i1][2]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
dest.write("<table:table-cell/>")
dest.write( ODSDate(lsm6[adxl_idx[i1]][0]) )
f3 = lsm6[adxl_idx[i1]][1]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = lsm6[adxl_idx[i1]][2]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
dest.write("<table:table-cell/>")
#dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % ("Hello!") )
dest.write( "</table:table-row>" )
# Write empty row
dest.write( "<table:table-row table:style-name=\"ro1\"><table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/></table:table-row>" )
dest.write( "<table:table-row table:style-name=\"ro1\">" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % ("Unmatched:") )
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % ("Unmatched:") )
dest.write( "<table:table-cell/><table:table-cell/>" )
dest.write( "</table:table-row>" )
# Now write out the remaining values (that haven't been paired).
i1 = 0
i2 = 0
while(True):
while(i1 < len(adxl_idx)):
if (adxl_idx[i1] != -1):
i1 += 1
else:
break
while(i2 < len(lsm6_idx)):
if (lsm6_idx[i2] != -1):
i2 += 1
else:
break
if(i1 >= len(adxl_idx)) and (i2 >= len(lsm6_idx)):
# Finished!
break
else:
# Something still to do!
dest.write( "<table:table-row table:style-name=\"ro1\">" )
if(i1 < len(adxl_idx)):
dest.write( ODSDate(adxl[i1][0]) )
f3 = adxl[i1][1]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = adxl[i1][2]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
i1 += 1
else:
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell/>" )
if(i2 < len(lsm6_idx)):
dest.write( ODSDate(lsm6[i2][0]) )
f3 = lsm6[i2][1]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
f3 = lsm6[i2][2]
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%0.3f\" calcext:value-type=\"float\"><text:p>%0.3f</text:p></table:table-cell>" % (f3, f3) )
i2 += 1
else:
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "</table:table-row>" )
dest.write( "</table:table>" )
## Now write out the third table sheet
dest.write( "<table:table table:name=\"Compare\" table:style-name=\"ta1\">" )
dest.write( "<table:shapes>" )
dest.write( "<draw:frame draw:z-index=\"0\" draw:style-name=\"gr1\" draw:text-style-name=\"P1\" svg:width=\"278.64mm\" svg:height=\"168.14mm\" svg:x=\"85.08mm\" svg:y=\"19.65mm\">" )
dest.write( "<draw:object draw:notify-on-update-of-ranges=\"Compare.D3:Compare.D502 Compare.E3:Compare.E502 Compare.D3:Compare.D502 Compare.F3:Compare.F502 Compare.D3:Compare.D502 Compare.G3:Compare.G502\" xlink:href=\"./Object 3\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\"><loext:p/>" )
dest.write( "</draw:object>" )
dest.write( "<draw:image xlink:href=\"./ObjectReplacements/Object 3\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\"/>" )
dest.write( "</draw:frame>" )
dest.write( "<draw:frame draw:z-index=\"1\" draw:style-name=\"gr1\" draw:text-style-name=\"P1\" svg:width=\"306.74mm\" svg:height=\"172.65mm\" svg:x=\"372.25mm\" svg:y=\"17.8mm\">" )
dest.write( "<draw:object draw:notify-on-update-of-ranges=\"Compare.I3:Compare.I502 Compare.J3:Compare.J502 Compare.I3:Compare.I502 Compare.K3:Compare.K502 Compare.I3:Compare.I502 Compare.L3:Compare.L502\" xlink:href=\"./Object 4\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\"><loext:p/>" )
dest.write( "</draw:object>" )
dest.write( "<draw:image xlink:href=\"./ObjectReplacements/Object 4\" xlink:type=\"simple\" xlink:show=\"embed\" xlink:actuate=\"onLoad\"/>" )
dest.write( "</draw:frame>" )
dest.write( "</table:shapes>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"4\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co1\" table:default-cell-style-name=\"ce3\"/>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"4\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co1\" table:default-cell-style-name=\"Default\"/>" )
dest.write( "<table:table-column table:style-name=\"co2\" table:number-columns-repeated=\"2\" table:default-cell-style-name=\"Default\"/>" )
# Show the file names
dest.write( "<table:table-row table:style-name=\"ro1\"><table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % (intext1) )
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"string\" calcext:value-type=\"string\"><text:p>%s</text:p></table:table-cell>" % (intext2) )
dest.write( "<table:table-cell/><table:table-cell/>" )
dest.write( "</table:table-row>" )
dest.write( "<table:table-row table:style-name=\"ro1\"><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"1\" calcext:value-type=\"float\"><text:p>1</text:p></table:table-cell>" )
dest.write( "<table:table-cell/>" )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([.$A$3:.$C$%d];[.$B$2];2)\" office:value-type=\"float\" office:value=\"502\" calcext:value-type=\"float\"><text:p>502</text:p></table:table-cell>" % (500) )
dest.write( "<table:table-cell table:style-name=\"ce2\" table:formula=\"of:=INDEX([$Raw.$A$1:.$C$%d];[.$D$2];1)\" " % (full_len + 1) )
dt_x = datetime.datetime.now()
dest.write( dt_x.strftime("office:value-type=\"date\" office:date-value=\"%Y-%m-%dT%H:%M:%S\" calcext:value-type=\"date\"><text:p>%d/%m/%Y %H:%M:%S</text:p></table:table-cell>") )
#dest.write( "<table:table-cell/>" )
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell table:style-name=\"ce2\" table:formula=\"of:=INDEX([$Raw.$E$1:.$G$%d];[.$D$2];1)\" " % (full_len + 1) )
dt_x = datetime.datetime.now()
dest.write( dt_x.strftime("office:value-type=\"date\" office:date-value=\"%Y-%m-%dT%H:%M:%S\" calcext:value-type=\"date\"><text:p>%d/%m/%Y %H:%M:%S</text:p></table:table-cell>") )
dest.write( "<table:table-cell/>" )
dest.write( "<table:table-cell table:style-name=\"ce5\" table:formula=\"of:=24*60*60*([.J2]-[.E2])\" office:value-type=\"float\" office:value=\"0\" calcext:value-type=\"float\"><text:p>0.00</text:p></table:table-cell>" )
dest.write( "</table:table-row>" )
for yi in range(0,500):
dest.write( "<table:table-row table:style-name=\"ro1\">" )
## Limit the number. What if num_bx > 500?? That can't happpen at the moment because (above) it
# is limited to 100, but maybe in future?
#
if yi <= num_bx:
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%d\" calcext:value-type=\"float\"><text:p>%d</text:p></table:table-cell>" % (yi+1, yi+1) )
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%d\" calcext:value-type=\"float\"><text:p>%d</text:p></table:table-cell>" % (yi*501+2, yi*501+2) )
dest.write( "<table:table-cell/>" )
else:
dest.write( "<table:table-cell/><table:table-cell/><table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%d\" calcext:value-type=\"float\"><text:p>%d</text:p></table:table-cell>" % (yi+1, yi+1) )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([$Raw.$A$1:.$C$%d];[.$D$2]+[.$D%d];1)\" office:value-type=\"float\" office:value=\"0.0000\" calcext:value-type=\"float\"><text:p>0.0000</text:p></table:table-cell>" % (full_len+1, yi+3) )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([$Raw.$A$1:.$C$%d];[.$D$2]+[.$D%d];2)\" office:value-type=\"float\" office:value=\"0.0000\" calcext:value-type=\"float\"><text:p>0.0000</text:p></table:table-cell>" % (full_len+1, yi+3) )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([$Raw.$A$1:.$C$%d];[.$D$2]+[.$D%d];3)\" office:value-type=\"float\" office:value=\"0.0000\" calcext:value-type=\"float\"><text:p>0.0000</text:p></table:table-cell>" % (full_len+1, yi+3) )
dest.write( "<table:table-cell/>" )
dest.write( "<table:table-cell office:value-type=\"float\" office:value=\"%d\" calcext:value-type=\"float\"><text:p>%d</text:p></table:table-cell>" % (yi+1, yi+1) )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([$Raw.$E$1:.$G$%d];[.$D$2]+[.$I%d];1)\" office:value-type=\"float\" office:value=\"0.0000\" calcext:value-type=\"float\"><text:p>0.0000</text:p></table:table-cell>" % (full_len+1, yi+3) )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([$Raw.$E$1:.$G$%d];[.$D$2]+[.$I%d];2)\" office:value-type=\"float\" office:value=\"0.0000\" calcext:value-type=\"float\"><text:p>0.0000</text:p></table:table-cell>" % (full_len+1, yi+3) )
dest.write( "<table:table-cell table:formula=\"of:=INDEX([$Raw.$E$1:.$G$%d];[.$D$2]+[.$I%d];3)\" office:value-type=\"float\" office:value=\"0.0000\" calcext:value-type=\"float\"><text:p>0.0000</text:p></table:table-cell>" % (full_len+1, yi+3) )
dest.write( "</table:table-row>" )
dest.write( "</table:table>" )
## Finish the document
dest.write( "</office:spreadsheet></office:body></office:document-content>" )
##### Now write to the ZIP archive ####
shutil.copy2( runningPath + "/6_2.ODS", theName) # 6_2 includes plots, 5_1 does not.
with zipfile.ZipFile(theName, "a") as z:
z.write( runningPath + "/content_local.xml", "content.xml", zipfile.ZIP_DEFLATED )
z.write( runningPath + "/content_local_obj1.xml", "Object 1/content.xml", zipfile.ZIP_DEFLATED )
z.write( runningPath + "/content_local_obj2.xml", "Object 2/content.xml", zipfile.ZIP_DEFLATED )
z.write( runningPath + "/content_local_obj3.xml", "Object 3/content.xml", zipfile.ZIP_DEFLATED )
z.write( runningPath + "/content_local_obj4.xml", "Object 4/content.xml", zipfile.ZIP_DEFLATED )
z.close()
os.remove( runningPath + "/content_local.xml" )
os.remove( runningPath + "/content_local_obj1.xml" )
os.remove( runningPath + "/content_local_obj2.xml" )
os.remove( runningPath + "/content_local_obj3.xml" )
os.remove( runningPath + "/content_local_obj4.xml" )
######################################################################
## START MAIN PROGRAM ##
######################################################################
def usage():
print "-h help"
print "-o output"
print "-d time delta to use (if omitted, calculate automatically)"
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "ho:d:", ["help", "output=", "delta=", "diff="])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
Output = None
Diff = float('nan')
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
Output = a
elif o in ("-d", "--delta", "--diff"):
Diff = float(a)
else:
assert False, "unhandled option"
if (Output == None):
# Have no output. Fails.
sys.exit(4)
if (len(args) != 2):
# Wrong number of args. Fails.
sys.exit(4)
thePath = args[0]
if args[0].upper().endswith('.CSV'):
input1 = os.path.abspath(args[0])
else:
sys.exit(3)
if args[1].upper().endswith('.CSV'):
input2 = os.path.abspath(args[1])
else:
sys.exit(3)
# Determine the text to show as representation of each input. This is either the filename,
# or the parent directory plus file name, depending on what the program determines.
if (os.path.dirname(input1) == os.path.dirname(input2)):
# If the directory is the same, don't bother showing it.
input1_text = os.path.basename(input1)
input2_text = os.path.basename(input2)
else:
if (args[0] == os.path.basename(input1)):
# If the input doesn't contain the directory, don't bother showing it
input1_text = args[0]
else:
# ... otherwise, show 1 level of the directory
input1_text = os.path.join(os.path.basename(os.path.dirname(input1)),os.path.basename(input1))
if (args[1] == os.path.basename(input2)):
# If the input doesn't contain the directory, don't bother showing it
input2_text = args[1]
else:
# ... otherwise, show 1 level of the directory
input2_text = os.path.join(os.path.basename(os.path.dirname(input2)),os.path.basename(input2))
if math.isnan(Diff):
correl(input1, input2, input1_text, input2_text, output=Output)
else:
correl(input1, input2, input1_text, input2_text, output=Output, diff=Diff)
if __name__=="__main__":
main()
| 2.234375
| 2
|
tests/themes/kenney/demo_checkbox.py
|
Rahuum/glooey
| 86
|
12782415
|
<reponame>Rahuum/glooey<gh_stars>10-100
#!/usr/bin/env python3
import pyglet
import glooey.themes.kenney as kenney
import run_demos
import itertools
colors = 'blue', 'red', 'green', 'yellow', 'grey'
icons = 'checkmark', 'cross'
window = pyglet.window.Window()
gui = kenney.Gui(window)
@run_demos.on_space(gui) #
def test_checkbox():
# Test the getters and setters for the base button class.
button = kenney.Checkbox()
gui.clear()
gui.add(button)
for color, icon in itertools.product(colors, icons):
button.color = color
button.icon = icon
yield f"{color}, {icon}"
# Test the pre-configured settings for the named button classes.
#
# Note that when one checkbox is switched for another (as in this test),
# you have to move the mouse to trigger an on_mouse_enter() event before
# you can click on the widget. This is necessary because the GUI doesn't
# keep track of where the mouse is, and I decided that fixing this would
# introduce more complexity than its worth.
for subcls in kenney.Checkbox.__subclasses__():
gui.clear()
gui.add(subcls())
yield subcls.__name__
pyglet.app.run()
| 2.375
| 2
|
ipscanner.py
|
molney239/ipscanner
| 1
|
12782416
|
<filename>ipscanner.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Python script and module for scanning IPv4 addresses range. Shows basic information about devices and addresses.
Usage examples:
From console:
pip3 install -r requirements.txt
python3 ipscanner.py 192.168.1.0-192.168.1.255,8.8.8.8 -p 2 -t 200 -v 1
This command starting script for scanning address in local network and 8.8.8.8 address.
From python:
import ipscanner
ipscanner.scan_range([192.168.1.0, 8.8.8.8], verbosity=3, ping_timeout=200)
========== LICENSE ==========
The MIT License
Copyright (c) 2021 molney239 and contributors
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
=============================
"""
# Libs
import ipaddress
import pythonping
import getmac
import requests
import socket
from termcolor import colored
from platform import system
__author__ = "molney239"
__copyright__ = 'Copyright 2021, ipscanner'
__license__ = 'The MIT License'
__version__ = '1.0.0'
__email__ = '<EMAIL>'
# For correct colorized output in Windows.
if system() == "Windows":
from colorama import init
init()
class PingResult:
def __init__(self, address: str, available: bool, ping_ms: int):
self.address = address
self.available = available
self.ping_ms = ping_ms
class ScanResult:
def __init__(self, address: str, available: bool, mac: str, vendor_name: str, device_name: str, ping_ms: int):
self.address = address
self.available = available
self.mac = mac
self.vendor_name = vendor_name
self.device_name = device_name
self.ping_ms = ping_ms
def parse_addresses(addresses: str):
"""
Parses string to IPv4 range.
:param addresses: Addresses string. Example: 192.168.1.0.192.168.1.255,8.8.8.8
:return: Generator of strings with IPv4s.
"""
for i in addresses.split(','):
if i.count('-') == 0:
yield i
elif i.count('-') == 1:
for j in range(int(ipaddress.IPv4Address(i.split('-')[0])),
int(ipaddress.IPv4Address(i.split('-')[1])) + 1):
yield str(ipaddress.IPv4Address(j))
else:
raise ValueError("Invalid addresses range: " + i)
def print_result(result: ScanResult, colorized: bool = True) -> None:
"""
Printing result of scan to console.
:param result: ScanResult object.
:param colorized: Colorize output. Disable, if the output is not in the correct format.
"""
output = ""
if colorized and system() == "Windows":
output = "IPv4: {:29} | Available: {:17} | MAC: {:31} | Vendor name: {:30} | Device name: {:20} | Ping: {}"
else:
output = "IPv4: {:15} | Available: {:3} | MAC: {:17} | Vendor name: {:30} | Device name: {:20} | Ping: {}"
if colorized:
print(output.format(
str(colored(result.address, 'blue')),
str(colored("YES", 'green')) if result.available else str(colored("NO", 'red')),
str(colored(str(result.mac), 'yellow')),
str(result.vendor_name),
str(result.device_name),
str(result.ping_ms)) + "ms"
)
else:
print(output.format(
str(result.address),
("YES" if result.available else "NO"),
str(result.mac),
str(result.vendor_name),
str(result.device_name),
str(result.ping_ms)) + "ms"
)
def ping(address: str, packets_count: int = 1, ping_timeout: int = 2000) -> PingResult:
"""
Pings address.
:param address: IP v4 address.
:param packets_count: Packets count.
:param ping_timeout: Timeout (milliseconds).
:return: PingResult object.
"""
if packets_count < 1:
raise ValueError("Invalid ping packets count: " + str(packets_count))
if ping_timeout < 0:
raise ValueError("Invalid ping timeout: " + str(ping_timeout))
result = pythonping.ping(address, count=packets_count, timeout=ping_timeout / 1000.0, verbose=False)
return PingResult(address, result.success(), result.rtt_avg_ms)
def scan(address: str, packets_count: int = 1, ping_timeout: int = 2000) -> ScanResult:
"""
Scans IP v4 address.
:param address: Address.
:param packets_count: Ping packets count.
:param ping_timeout: Ping timeout (milliseconds).
:return: ScanResult object.
"""
ping_result = ping(address, packets_count=packets_count, ping_timeout=ping_timeout)
mac = "None"
vendor_name = "None"
device_name = "None"
if ping_result.available:
mac = getmac.get_mac_address(ip=address)
if mac is not None:
url = "https://api.macvendors.com/"
response = requests.get(url + mac)
if response.status_code == 200:
vendor_name = response.content.decode()
device_name = socket.getfqdn(address)
else:
mac = "None"
return ScanResult(
address,
ping_result.available,
mac,
vendor_name,
device_name,
ping_result.ping_ms
)
def scan_range(addresses: iter, packets_count: int = 1, ping_timeout: int = 2000, verbosity: int = 0,
colorized: bool = True) -> list:
"""
Scans IP v4 addresses.
Verbosity codes:
0: None.
1: Only available addresses.
2: Only not available addresses.
3: All addresses.
:param addresses: Iterable object with addresses (strings).
:param packets_count: Ping packets count.
:param ping_timeout: Ping timeout (milliseconds).
:param verbosity: Verbosity.
:param colorized: Colorized output.
:return: List of dictionaries with results.
"""
results = []
for address in addresses:
result = scan(address, packets_count=packets_count, ping_timeout=ping_timeout)
results.append(result)
if (verbosity == 1 and result.available) or (verbosity == 2 and not result.available) or (verbosity == 3):
print_result(result, colorized=colorized)
return results
if __name__ == "__main__":
import argparse
# Parse console arguments.
parser = argparse.ArgumentParser(description="Scans IPv4 addresses for availability.")
parser.add_argument("addresses", metavar='addresses', type=str, help="Addresses for scan.")
parser.add_argument("-v", metavar="--verbosity", type=int, default=3, help="Verbosity. " +
"0: None. " +
"1: Only available addresses. " +
"2: Only not available addresses. " +
"3: All addresses.")
parser.add_argument("-p", metavar="--packets-count", type=int, default=1, help="Ping packets count.")
parser.add_argument("-t", metavar="--ping-timeout", type=int, default=2000, help="Ping timeout (milliseconds).")
parser.add_argument("--non-colorized", action="store_true",
help="Not colorize output. (Use if the output is not in the correct format.)")
args = parser.parse_args()
scan_range(parse_addresses(args.addresses), packets_count=args.p, ping_timeout=args.t, verbosity=args.v,
colorized=not args.non_colorized)
| 3.171875
| 3
|
bindings/python/src/cloudsmith_api/apis/webhooks_api.py
|
cloudsmith-io/cloudsmith-api
| 9
|
12782417
|
# coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class WebhooksApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def webhooks_create(self, owner, repo, **kwargs):
"""
Create a specific webhook in a repository.
Create a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_create(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param WebhooksCreate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_create_with_http_info(owner, repo, **kwargs)
else:
(data) = self.webhooks_create_with_http_info(owner, repo, **kwargs)
return data
def webhooks_create_with_http_info(self, owner, repo, **kwargs):
"""
Create a specific webhook in a repository.
Create a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_create_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param WebhooksCreate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_create`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_create`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryWebhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_delete(self, owner, repo, identifier, **kwargs):
"""
Delete a specific webhook in a repository.
Delete a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_delete(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_delete_with_http_info(owner, repo, identifier, **kwargs)
else:
(data) = self.webhooks_delete_with_http_info(owner, repo, identifier, **kwargs)
return data
def webhooks_delete_with_http_info(self, owner, repo, identifier, **kwargs):
"""
Delete a specific webhook in a repository.
Delete a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_delete_with_http_info(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'identifier']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_delete`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_delete`")
# verify the required parameter 'identifier' is set
if ('identifier' not in params) or (params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `webhooks_delete`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
if 'identifier' in params:
path_params['identifier'] = params['identifier']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/{identifier}/', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_list(self, owner, repo, **kwargs):
"""
Get a list of all webhooks in a repository.
Get a list of all webhooks in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_list(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param int page: A page number within the paginated result set.
:param int page_size: Number of results to return per page.
:return: list[RepositoryWebhook]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_list_with_http_info(owner, repo, **kwargs)
else:
(data) = self.webhooks_list_with_http_info(owner, repo, **kwargs)
return data
def webhooks_list_with_http_info(self, owner, repo, **kwargs):
"""
Get a list of all webhooks in a repository.
Get a list of all webhooks in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_list_with_http_info(owner, repo, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param int page: A page number within the paginated result set.
:param int page_size: Number of results to return per page.
:return: list[RepositoryWebhook]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'page', 'page_size']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_list`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_list`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
query_params = []
if 'page' in params:
query_params.append(('page', params['page']))
if 'page_size' in params:
query_params.append(('page_size', params['page_size']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[RepositoryWebhook]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_partial_update(self, owner, repo, identifier, **kwargs):
"""
Update a specific webhook in a repository.
Update a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_partial_update(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:param WebhooksPartialUpdate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_partial_update_with_http_info(owner, repo, identifier, **kwargs)
else:
(data) = self.webhooks_partial_update_with_http_info(owner, repo, identifier, **kwargs)
return data
def webhooks_partial_update_with_http_info(self, owner, repo, identifier, **kwargs):
"""
Update a specific webhook in a repository.
Update a specific webhook in a repository.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_partial_update_with_http_info(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:param WebhooksPartialUpdate data:
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'identifier', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_partial_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_partial_update`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_partial_update`")
# verify the required parameter 'identifier' is set
if ('identifier' not in params) or (params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `webhooks_partial_update`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
if 'identifier' in params:
path_params['identifier'] = params['identifier']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/{identifier}/', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryWebhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def webhooks_read(self, owner, repo, identifier, **kwargs):
"""
Views for working with repository webhooks.
Views for working with repository webhooks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_read(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.webhooks_read_with_http_info(owner, repo, identifier, **kwargs)
else:
(data) = self.webhooks_read_with_http_info(owner, repo, identifier, **kwargs)
return data
def webhooks_read_with_http_info(self, owner, repo, identifier, **kwargs):
"""
Views for working with repository webhooks.
Views for working with repository webhooks.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.webhooks_read_with_http_info(owner, repo, identifier, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str owner: (required)
:param str repo: (required)
:param str identifier: (required)
:return: RepositoryWebhook
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['owner', 'repo', 'identifier']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method webhooks_read" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'owner' is set
if ('owner' not in params) or (params['owner'] is None):
raise ValueError("Missing the required parameter `owner` when calling `webhooks_read`")
# verify the required parameter 'repo' is set
if ('repo' not in params) or (params['repo'] is None):
raise ValueError("Missing the required parameter `repo` when calling `webhooks_read`")
# verify the required parameter 'identifier' is set
if ('identifier' not in params) or (params['identifier'] is None):
raise ValueError("Missing the required parameter `identifier` when calling `webhooks_read`")
collection_formats = {}
path_params = {}
if 'owner' in params:
path_params['owner'] = params['owner']
if 'repo' in params:
path_params['repo'] = params['repo']
if 'identifier' in params:
path_params['identifier'] = params['identifier']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['apikey']
return self.api_client.call_api('/webhooks/{owner}/{repo}/{identifier}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryWebhook',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 1.796875
| 2
|
Unit 2/Lesson 3/y=mx+b.py
|
KevinBoxuGao/ICS3UI
| 0
|
12782418
|
<reponame>KevinBoxuGao/ICS3UI
m = int(input("enter slope of line: "))
b = int(input("enter the y-intercept: "))
if b == 0:
bOutput=""
elif b < 0:
bOutput = str(b)
else:
bOutput="+"+str(b)
if m == 0:
mOutput = ""
bOutput = str(b)
elif m == 1:
mOutput = "x"
elif m == -1:
mOutput = "-x"
else:
mOutput = str(m)+"x"
print("The equation of the line is y="+mOutput+bOutput)
| 3.890625
| 4
|
buildhelpers.py
|
nCov19-DataPoints/nCov19-DataPoints
| 42
|
12782419
|
<gh_stars>10-100
import urllib.request
import csv
from io import StringIO
import json
import datetime
import pytz
from collections import namedtuple
CET = pytz.timezone('CET')
Color = namedtuple("Color",['r','g','b','a'])
colortable = [
{"t":0, "c":{"r":0,"g":255,"b":0,"a":0.1} },
{"t":0.1, "c":{"r":128,"g":255,"b":0,"a":0.1} },
{"t":0.25, "c":{"r":255,"g":255,"b":0,"a":0.1} },
{"t":0.5, "c":{"r":255,"g":213,"b":0,"a":0.2} },
{"t":1.0, "c":{"r":255,"g":191,"b":0,"a":0.3} },
{"t":2.5, "c":{"r":255,"g":128,"b":0,"a":0.3} },
{"t":5.0, "c":{"r":255,"g":0,"b":0,"a":0.3} },
{"t":10.0, "c":{"r":255,"g":0,"b":0,"a":0.4} },
{"t":25.0, "c":{"r":255,"g":0,"b":0,"a":0.5} },
{"t":50.0, "c":{"r":255,"g":0,"b":64,"a":0.5} },
{"t":75.0, "c":{"r":255,"g":0,"b":128,"a":0.5} },
{"t":100.0, "c":{"r":255,"g":0,"b":191,"a":0.5} }
]
colortableoutdated = [
{"t": 0, "c": {"r": 102, "g": 153, "b": 102, "a": 0.1}},
{"t": 0.1, "c": {"r": 128, "g": 153, "b": 102, "a": 0.1}},
{"t": 0.25, "c": {"r": 153, "g": 153, "b": 102, "a": 0.1}},
{"t": 0.5, "c": {"r": 153, "g": 145, "b": 102, "a": 0.2}},
{"t": 1.0, "c": {"r": 153, "g": 140, "b": 102, "a": 0.3}},
{"t": 2.5, "c": {"r": 153, "g": 128, "b": 102, "a": 0.3}},
{"t": 5.0, "c": {"r": 153, "g": 102, "b": 102, "a": 0.3}},
{"t": 10.0, "c": {"r": 153, "g": 102, "b": 102, "a": 0.4}},
{"t": 25.0, "c": {"r": 153, "g": 102, "b": 102, "a": 0.5}},
{"t": 50.0, "c": {"r": 153, "g": 102, "b": 128, "a": 0.5}},
{"t": 75.0, "c": {"r": 153, "g": 102, "b": 141, "a": 0.5}},
{"t": 100.0, "c": {"r": 153, "g": 102, "b": 153, "a": 0.5}}
]
def calculateColor(colortable,v):
indexge = len(colortable)
for i in range(0,len(colortable)):
if v<=colortable[i]["t"]:
indexge = i;
break;
tl = colortable[max(0,indexge-1)]
th = colortable[min(indexge,len(colortable)-1)]
l = tl["c"]
h = th["c"]
#fl = (v - tl["t"])/(th["t"] - tl["t"]) if th["t"]>tl["t"] else 1.0
fl = 1.0
fh = 1.0 - fl
return Color(
int(fl*l["r"]+fh*h["r"]),
int(fl*l["g"]+fh*h["g"]),
int(fl*l["b"]+fh*h["b"]),
fl*l["a"]+fh*h["a"]
)
class datapoint:
def __init__(self, numcases, timestamp, sourceurl):
self.numcases = numcases
self.timestamp = timestamp
self.sourceurl = sourceurl
def __str__(self):
return "datapoint(numcases="+str(self.numcases)+",timestamp="+self.timestamp.isoformat()+",sourceurl='"+self.sourceurl+"'"
def processGEOJSON(country, geojsonfilename, geojsonprop_caseskey, geojsonprop_id, numcaseslookup, populationlookup, preprocessgeojson = None):
totalCases = 0
mostrecentupdate = max(numcaseslookup[n].timestamp for n in numcaseslookup)
with open("ncov19.csv","w") as f:
f.write("Names,Cases\n")
for n in numcaseslookup:
f.write(n + "," + str(numcaseslookup[n].numcases) + "," + numcaseslookup[n].timestamp.isoformat() + "," + numcaseslookup[n].sourceurl+"\n")
with open(geojsonfilename, "r", encoding="utf-8") as source:
geojsondata = json.load(source)
if preprocessgeojson:
preprocessgeojson(geojsondata)
unmatchedgeojsonnames = []
for f in geojsondata["features"]:
p = f["properties"]
if isinstance(geojsonprop_caseskey, str):
name = p[geojsonprop_caseskey]
try:
v = numcaseslookup.pop(name)
except:
v = None
else:
try:
name, v = geojsonprop_caseskey(f, numcaseslookup)
except:
name, v = "???", None
if v==None:
v = datapoint(numcases = 0, timestamp = mostrecentupdate, sourceurl = "")
unmatchedgeojsonnames.append(name)
p["NAME"] = name
p["ID"] = p[geojsonprop_id] if isinstance(geojsonprop_id, str) else geojsonprop_id(f)
p["CASES"] = v.numcases
p["LASTUPDATE"] = v.timestamp.isoformat()
p["POPULATION"] = populationlookup[name] if isinstance(populationlookup, dict) else populationlookup(f)
p["CASESPER10000"] = p["CASES"] / p["POPULATION"] * 10000
p["SOURCEURL"] = v.sourceurl
colors = colortable if v.timestamp + datetime.timedelta(hours=96) > datetime.datetime.now(datetime.timezone.utc) else colortableoutdated
fillColor = calculateColor(colors,p["CASESPER10000"])
p["fill"] = True
p["fillColor"] = "#{0:02x}{1:02x}{2:02x}".format(fillColor.r,fillColor.g,fillColor.b)
p["fillOpacity"] = fillColor.a
p["weight"] = 1
p["opacity"] = 0.3
totalCases = totalCases + p["CASES"]
if len(numcaseslookup)>0:
for key in numcaseslookup:
print("Not found: '"+key+"' ('"+str(key.encode('unicode-escape'))+"') Datapoint: ",numcaseslookup[key])
print("GEOJSON contains the following unmatched names:")
for n in unmatchedgeojsonnames:
print("'"+n+"' ('"+str(n.encode('unicode-escape'))+"')")
print(country+": assigned "+str(totalCases)+" cases to geojson features.")
with open("ncov19map.geojson", "w") as dest:
json.dump(geojsondata,dest)
with open("ncov19map.js", "w") as dest:
dest.write("GEOJSON_" + country + "=")
json.dump(geojsondata,dest)
dest.write(";")
dest.write("\nGEOJSON_totalCases=GEOJSON_totalCases+"+str(totalCases))
dest.write("\nGEOJSON_lastoverallupdate=new Date(Math.max("+str((mostrecentupdate - datetime.datetime(1970,1,1, tzinfo=datetime.timezone.utc)).total_seconds()*1000)+",GEOJSON_lastoverallupdate.valueOf()));")
now = datetime.datetime.utcnow()
dest.write("\nGEOJSON_lastbuildrunutc=new Date(Math.max(Date.UTC("+str(now.year)+","+str(now.month-1)+","+str(now.day)+","+str(now.hour)+","+str(now.minute)+","+str(now.second)+"),GEOJSON_lastbuildrunutc));")
dest.write("\nlegendColors=[")
for i in range(0,len(colortable)):
dest.write("\n {{ v:{0},c:\"#{1:02x}{2:02x}{3:02x}\",o:{4} }}".format(colortable[i]["t"],colortable[i]["c"]["r"],colortable[i]["c"]["g"],colortable[i]["c"]["b"],colortable[i]["c"]["a"]))
if i!=len(colortable)-1:
dest.write(",")
dest.write("\n];")
print("Completed exporting "+country)
| 2.578125
| 3
|
HW/HW7/CW7.2.py
|
kolyasalubov/Lv-639.pythonCore
| 0
|
12782420
|
<reponame>kolyasalubov/Lv-639.pythonCore<gh_stars>0
def reverse_list(l):
return [list for list in reversed (l)]
#'return a list with the reverse order of l'
| 3.109375
| 3
|
implementation/server/factories/books_author.py
|
Aincient/cleo
| 0
|
12782421
|
"""
Books Author model factory.
"""
import random
from factory import DjangoModelFactory, LazyAttribute
from books.models import Author
from .factory_faker import Faker
__all__ = (
'AuthorFactory',
'LimitedAuthorFactory',
'SingleAuthorFactory',
)
class BaseAuthorFactory(DjangoModelFactory):
"""Base author factory."""
salutation = Faker('text', max_nb_chars=10)
name = Faker('name')
email = Faker('email')
birth_date = Faker('date')
biography = Faker('text')
phone_number = Faker('phone_number')
website = Faker('url')
company = Faker('company')
company_phone_number = Faker('phone_number')
company_email = Faker('email')
company_website = Faker('url')
class Meta(object):
"""Meta class."""
model = Author
abstract = True
class AuthorFactory(BaseAuthorFactory):
"""Author factory."""
class LimitedAuthorFactory(BaseAuthorFactory):
"""Author factory, but limited to 20 authors."""
id = LazyAttribute(
lambda __x: random.randint(1, 20)
)
class Meta(object):
"""Meta class."""
django_get_or_create = ('id',)
class SingleAuthorFactory(BaseAuthorFactory):
"""Author factory, limited to a single author."""
id = 999999
name = "<NAME>"
email = "<EMAIL>"
class Meta(object):
"""Meta class."""
django_get_or_create = ('id',)
| 2.859375
| 3
|
test_shorturl.py
|
lepture/flask-shorturl
| 28
|
12782422
|
<reponame>lepture/flask-shorturl
from nose.tools import raises
from flask import Flask
from flask_shorturl import ShortUrl
def test_shorturl():
app = Flask(__name__)
su = ShortUrl(app)
for a in range(0, 200000, 37):
b = su.encode(a)
c = su.enbase(b)
d = su.debase(c)
e = su.decode(d)
assert a == e
assert b == d
assert c == su.encode_url(a)
assert a == su.decode_url(c)
def test_init_app():
app = Flask(__name__)
su = ShortUrl()
su.init_app(app)
a = 21
b = su.encode(a)
c = su.enbase(b)
d = su.debase(c)
e = su.decode(d)
assert a == e
assert b == d
assert c == su.encode_url(a)
assert a == su.decode_url(c)
def test_flask_ctx():
app = Flask(__name__)
DEFAULT_ALPHABET = 'mn6j2c4rv8bpygw95z7hsdaetxuk3fq'
app.config.setdefault('SHORT_URL_ALPHABET', DEFAULT_ALPHABET)
app.config.setdefault('SHORT_URL_MIN_LENGTH', 5)
app.config.setdefault('SHORT_URL_BLOCK_SIZE', 24)
su = ShortUrl()
with app.test_request_context():
a = 21
b = su.encode(a)
c = su.enbase(b)
d = su.debase(c)
e = su.decode(d)
assert a == e
assert b == d
assert c == su.encode_url(a)
assert a == su.decode_url(c)
@raises(RuntimeError)
def test_no_ctx():
su = ShortUrl()
su.encode(21)
| 2.625
| 3
|
ExpenseTracker/expense/urls.py
|
lennyAiko/LifeExpenses
| 0
|
12782423
|
<filename>ExpenseTracker/expense/urls.py
from django.contrib import admin
from django.urls import path, include
from .views import home, ExpenseList, createExpense, deleteExpense, setBudget
urlpatterns = [
path("home/", home, name="home"),
path("expenses/", ExpenseList, name="expense"),
path("create_expense/", createExpense, name="create_expense"),
path("delete_expense/<int:pk>", deleteExpense, name="remove_expense"),
path("set_budget", setBudget, name="add_budget")
]
| 1.625
| 2
|
shhelper/paths.py
|
PowerSnail/PythonShellHelper
| 0
|
12782424
|
import typing as T
from pathlib import Path
import re
class Globals:
cwd = Path(".").absolute()
def cd(p: str):
Globals.cwd.joinpath(p).absolute()
def pwd() -> Path:
return Globals.cwd
def ls(p: str = ".") -> T.List[Path]:
path = Globals.cwd.joinpath(p)
return [p for p in path.iterdir()]
def batch_rename(regex, name_template):
r = re.compile(regex)
for p in ls():
match = r.match(p.name)
if match is None:
continue
groups = match.groups()
new_name = name_template.format(*groups)
p.rename(p.parent.joinpath(new_name)) # TODO: name conflict
| 2.890625
| 3
|
OLD/Location.py
|
bhuvan21/TBGC
| 0
|
12782425
|
from input_functions import safe_input, numbered_choice
from display_funcs import decorate
class Location():
def __init__(self, name, short_desc, long_desc, contains=[], level=0, starting_location=False, input_func=safe_input, output_func=print):
self.name = name
self.short_desc = short_desc
self.long_desc = long_desc
self.contains = contains
self.starting_location = starting_location
self.level = level
self.input_func = input_func
self.output_func = output_func
self.first_visit = None
for l in self.contains:
if type(l) == Location:
l.contains.append(self)
def enter(self):
numbered_choice("What/Who/Where would you like to interact with?", [s.name for s in self.contains], self.contains, self.input_func, self.output_func).interact()
def interact(self):
self.output_func(self.name + " : " + self.short_desc)
def get_fancy(self):
return decorate(self)
| 3.65625
| 4
|
app/app/api/category_routes.py
|
Web-Dev-Collaborative/Anvil_2.0
| 12
|
12782426
|
from flask import Blueprint
from flask_login import current_user
from app.models import Category
category_routes= Blueprint('category', __name__)
@category_routes.route("")
def get_categories():
user_categories = Category.query.filter_by(user_id=current_user.id)
default_categories = Category.query.filter_by(user_id=None)
return {
"defaultCategories": [default_category.to_dict()
for default_category
in default_categories],
"userCategories": [user_category.to_dict()
for user_category
in user_categories]
}
| 2.515625
| 3
|
mobyle/web/security.py
|
mobyle2/mobyle2.web
| 0
|
12782427
|
import time
from mobyle.common.connection import connection
from mobyle.common import users
def groupFinder(userid, request):
#try to find user in database:
user = connection.User.find_one({"email": userid})
if user is not None:
groups = user['groups']
if user['admin']:
groups.append('group:admin')
return groups
| 2.65625
| 3
|
scripts/extract-changeid-map.py
|
thewtex/gerrit-static-archive
| 1
|
12782428
|
import sys
import os
import json
import json_lines
output_file = 'output.jl'
if not os.path.exists(output_file):
print('Did not find expected output file!')
sys.exit(1)
change_id_to_change_number = {}
with open(output_file, 'rb') as fp:
for item in json_lines.reader(fp):
if 'ChangeIdToChangeNumber' in item:
change_id_to_change_number.update(item['ChangeIdToChangeNumber'])
with open(os.path.join('mirror', 'ChangeIdToChangeNumber.json'), 'w') as fp:
json.dump(change_id_to_change_number, fp)
| 2.640625
| 3
|
efls-console/console/argo_template/test_template.py
|
universe-hcy/Elastic-Federated-Learning-Solution
| 65
|
12782429
|
# -*- coding: utf8 -*-
import kfp
def flip_coin():
return kfp.dsl.ContainerOp(
name='Flip a coin',
image='python:alpine3.6',
command=['python', '-c', """
import random
res = "heads" if random.randint(0, 1) == 0 else "tails"
with open('/output', 'w') as f:
f.write(res)
"""],
file_outputs={'output': '/output'}
)
def end():
return kfp.dsl.ContainerOp(name='End', image="alpine:3.6", command=["sh", "-c", 'echo "Flip coin ends"'])
def heads():
return kfp.dsl.ContainerOp(name='Heads', image="alpine:3.6", command=["sh", "-c", 'echo "it was heads"'])
def tails():
return kfp.dsl.ContainerOp(name='Tails', image="alpine:3.6", command=["sh", "-c", 'echo "it was tails"'])
@kfp.dsl.pipeline(name='Coin-flip', description='Flip a coin')
def coin_flip_pipeline():
flip_op = flip_coin()
result_op = None
with kfp.dsl.Condition(flip_op.output == 'heads'):
result_op = heads()
with kfp.dsl.Condition(flip_op.output == 'tails'):
result_op = tails()
end_op = end()
end_op.after(result_op)
def main():
kfp.compiler.Compiler().compile(coin_flip_pipeline, __file__ + ".yaml")
if __name__ == '__main__':
main()
| 2.5625
| 3
|
cdc_config.py
|
dlf412/mysql-cdc-redis
| 4
|
12782430
|
#!/usr/bin/env python
# encoding: utf-8
# redis for saving binlog file and position
# please reverse the db + 1 for saving mysql changed data
redis_url = "redis://127.0.0.1/0"
cache_url = "redis://127.0.0.1/1"
# mysql server id
server_id = 1
# mysql connection setting
mysql_settings = {'host': '192.168.1.34',
'port': 3306,
'user': 'mediawise',
'passwd': '<PASSWORD>'}
# watch databases setting
# it can be set None or a tuple.
# Watch all databases if set None
# value format: None or database tuple
schemas = None
# watch tables setting like databases setting if with primary_key
# value format: None or table tuple
#tables = ("task",)
tables = None
# please set the unique key if without primary_key
# value format:
# {} or {"table_name": fields tuple, ....}
tables_without_primary_key = {"db_test.task_test": ("uuid", )}
# Read on binlog stream is blocking, default is True
# If set to False, the cdc problem will exit when reading binlog over
blocking = True
# watch event setting
events = ["insert", "update", "delete"]
# turn off dumping trigger if set to 0
cache_max_rows = 2000000
dump_command = "python dump2csv.py -c dump.conf"
log_level = "INFO"
binlog_max_latency = 60000
| 1.929688
| 2
|
Finance/Python/finance/lib/python2.7/site-packages/pandas_datareader/google/quotes.py
|
pallavbakshi/datascience
| 1
|
12782431
|
import pandas as pd
from dateutil.parser import parse
import numpy as np
from pandas_datareader.base import _BaseReader
import json
import re
class GoogleQuotesReader(_BaseReader):
"""Get current google quote"""
@property
def url(self):
return 'http://www.google.com/finance/info'
@property
def params(self):
if isinstance(self.symbols, pd.compat.string_types):
sym_list = self.symbols
else:
sym_list = ','.join(self.symbols)
params = {'q': sym_list}
return params
def _read_lines(self, out):
buffer = out.read()
m = re.search('// ', buffer)
result = json.loads(buffer[m.start() + len('// '):])
return pd.DataFrame([[float(x['cp']), float(x['l'].replace(',', '')),
np.datetime64(parse(x['lt']).isoformat())]
for x in result], columns=['change_pct',
'last', 'time'],
index=[x['t'] for x in result])
| 2.921875
| 3
|
paper/make_language_fig.py
|
csdms/bmi_wrap
| 0
|
12782432
|
import matplotlib.pyplot as plt
import pandas as pd
def main():
if 0:
data = pd.read_html("https://csdms.colorado.edu/wiki/CSDMS_models_by_numbers")[
2
]
languages = pd.DataFrame(
{"Count": data["Count"].values}, index=data["Program language"]
)
languages.to_csv("languages.csv")
else:
languages = pd.read_csv("languages.csv", index_col=0, header=0)
other = languages[languages["Count"] < 10]
languages.loc["Other", "Count"] += other["Count"].sum()
languages = languages[languages["Count"] >= 10]
languages.sort_index(inplace=True)
# explode = [0.1, 0.1, 0.1, 0.1, 0.0, 0.0, 0.1]
explode = [0.1] * len(languages)
plt.pie(
languages["Count"],
autopct="%1.1f%%",
labels=languages.index,
explode=explode,
shadow=True,
)
plt.show()
if __name__ == "__main__":
main()
| 3.375
| 3
|
copyfile.py
|
UncleEngineer/pythonbackupfile
| 1
|
12782433
|
import os
import shutil
path = r'C:\Users\<NAME>\Desktop\Work'
destination = 'F:\\HERE'
allfile = os.listdir(path)
for f in allfile:
if f[-3:] == 'txt':
#print(f)
source = os.path.join(path,f)
dest = os.path.join(destination,f)
print(source)
print(dest)
shutil.copy2(source,dest)
print('----')
| 3.09375
| 3
|
genesis_blockchain_tools/crypto/backend/rubenesque.py
|
potehinre/genesis-blockchain-tools
| 0
|
12782434
|
from rubenesque.codecs.sec import encode, decode
from rubenesque.signatures import ecdsa
import rubenesque.curves
from hashlib import sha256
from ..formatters import encode_sig, decode_sig
from ...convert import int_to_hex_str
from .common import point_to_hex_str, split_str_to_halves
from .errors import (
UnknownPointFormatError, UnknownSignatureFormatError,
UnknownPublicKeyFormatError
)
backend_name = 'rubenesque'
class CurveByAttrName:
@property
def P256(self):
return rubenesque.curves.find('secp256r1')
curve = CurveByAttrName()
def gen_private_key(curve=curve.P256, hashfunc=sha256):
priv_key = curve.private_key()
return int_to_hex_str(priv_key)
def get_public_key(priv_key, curve=curve.P256, hashfunc=sha256, fmt='RAW'):
if fmt in ['RAW', '04']:
priv_key = int(priv_key, 16)
pub_key_obj = curve.generator() * priv_key
return point_to_hex_str(pub_key_obj, fmt=fmt)
else:
raise UnknownPublicKeyFormatError("fmt: '%s'" % fmt)
def gen_keypair(curve=curve.P256, hashfunc=sha256, pub_key_fmt='RAW'):
priv_key = gen_private_key(curve=curve, hashfunc=hashfunc)
pub_key = get_public_key(priv_key, curve=curve, hashfunc=hashfunc,
fmt=pub_key_fmt)
return priv_key, pub_key
def sign(priv_key, data, hashfunc=sha256, curve=curve.P256, sign_fmt='DER',
sign_size=32):
h = hashfunc(data.encode('utf-8')).digest()
r, s = ecdsa.sign(curve, int(priv_key, 16), h)
if sign_fmt in ['DER', 'RAW']:
return encode_sig(r, s, fmt=sign_fmt, size=sign_size).hex()
else:
raise UnknownSignatureFormatError("fmt: '%s'" % sign_fmt)
def verify(pub_key, data, signature, hashfunc=sha256, curve=curve.P256,
sign_fmt='DER', sign_size=32, pub_key_fmt='RAW'):
if pub_key_fmt == 'RAW':
pub_key_encoded = pub_key
elif pub_key_fmt == '04':
pub_key_encoded = pub_key[2:]
else:
raise UnknownPublicKeyFormatError("fmt: '%s'" % pub_key_fmt)
x, y = split_str_to_halves(pub_key_encoded)
x, y = int(x, 16), int(y, 16)
pub_key_point = curve(x, y)
if sign_fmt in ['DER', 'RAW']:
r, s = decode_sig(bytes.fromhex(signature), fmt=sign_fmt)
else:
raise UnknownSignatureFormatError("fmt: '%s'" % sign_fmt)
data_bytes = data.encode()
h = hashfunc(data_bytes).digest()
return ecdsa.verify(pub_key_point, h, r, s)
| 2.59375
| 3
|
tests/event/beforeafter/modules/directcalls/__init__.py
|
da-h/miniflask
| 5
|
12782435
|
<gh_stars>1-10
# note that this function does not ask for any miniflask variables (state/event/mf)
def dosomething():
print("event called")
def before_dosomething():
print("before_-event called")
def after_dosomething():
print("after_-event called")
def main(event):
event.dosomething()
def register(mf):
mf.register_event('dosomething', dosomething, unique=False)
mf.register_event('before_dosomething', before_dosomething, unique=False)
mf.register_event('after_dosomething', after_dosomething, unique=False)
mf.register_event('main', main, unique=False)
| 2.40625
| 2
|
app/wakkerdam/__init__.py
|
mofferthond/flask-base
| 0
|
12782436
|
from app.wakkerdam.views import wakkerdam
| 1.101563
| 1
|
user/utils.py
|
superior-prog/JobLand
| 1
|
12782437
|
from datetime import date
from io import BytesIO
from django.core.mail import send_mail
from django.http import HttpResponse
from django.template import loader
from django.template.loader import get_template
from xhtml2pdf import pisa
from .models import *
def match_skill(job_list, skill_list, preferred_job_list):
recommendation_list = []
for skill in skill_list:
for job in job_list:
split_text = skill.skill_title.lower().split(' ')
for s in split_text:
index = job.requirements.lower().find(s)
index1 = job.job_title.lower().find(s)
if (index >= 0 or index1 >= 0) and job not in recommendation_list:
recommendation_list.append(job)
for preferred_job in preferred_job_list:
for job in job_list:
split_text = preferred_job.details.lower().split(' ')
for s in split_text:
index = job.requirements.lower().find(s)
index1 = job.job_title.lower().find(s)
if (index >= 0 or index1 >= 0) and job not in recommendation_list:
recommendation_list.append(job)
return recommendation_list
def rateAvg(rates, length):
summation = 0
if length == 0:
return 0
else:
for r in rates:
summation = summation + r.rate
if (summation / length) % 1 > 0:
return summation / length
else:
return summation // length
def increaseViewCount(user_one, user_two):
if user_one.is_company:
applicant = ApplicantProfileModel.objects.get(user=user_two)
if applicant.totalViewCount is None:
applicant.totalViewCount = 0
applicant.totalViewCount = applicant.totalViewCount + 1
applicant.save()
elif user_one.is_applicant:
company = CompanyProfileModel.objects.get(user=user_two)
if company.totalViewCount is None:
company.totalViewCount = 0
company.totalViewCount = company.totalViewCount + 1
company.save()
ProfileViewDetails.objects.create(
viewedBy=user_one,
viewed=user_two,
)
return
def extractFirstName(name):
first_name = name.split(' ')
return first_name[0]
def calculate_age(born):
today = date.today()
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
def add_to_employee_keyword(user, keyword):
if keyword is not None:
EmployeeSearchKeywordModel.objects.create(
searched_by=user,
searched_for=keyword,
)
return
def add_to_job_keyword(user, keyword):
if keyword is not None:
JobSearchKeywordModel.objects.create(
searched_by=user,
searched_for=keyword,
)
return
def deactivate_user(user):
user.is_active = False
user.save()
return
def render_to_pdf(template_src, context={}):
template = get_template(template_src)
html = template.render(context)
result = BytesIO()
pdf = pisa.pisaDocument(BytesIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return None
def send_registration_mail(user):
subject = "Welcome to JobLand"
from_email = '<EMAIL>'
home_link = "http://127.0.0.1:8000"
about_link = "http://127.0.0.1:8000"
contact_link = "http://127.0.0.1:8000"
context = {
"user": user,
"home_link": home_link,
"about_link": about_link,
"contact_link": contact_link,
}
msg_plain = loader.render_to_string('email/registration.txt', context)
msg_html = loader.render_to_string('email/registration.html', context)
send_mail(subject, msg_plain, from_email, [user.email], fail_silently=True, html_message=msg_html)
return
| 2.28125
| 2
|
sc_sample_writer.py
|
MasterScott/scamper
| 23
|
12782438
|
#!/usr/bin/env python
# Program: $Id: $
# Author: <NAME> <<EMAIL>>
# Description: Example use of sc_warts_writer library.
#
import sys
import time
from sc_warts_writer import WartsWriter, WartsTrace
if __name__ == "__main__":
assert len(sys.argv) == 2
now = time.time()
w = WartsWriter(sys.argv[1])
w.write_list(1, 1, 'sc_sample_writer demo')
w.write_cycle(1, 1, 1, now)
tr = WartsTrace()
tr.add({'listid' : 1, 'srcport' : 1234, 'dstport' : 80,
'srcaddr' : '1.2.3.4', 'dstaddr' : '5.6.7.8',
'attempts' : 2, 'tracetyp' : 2, 'probehop' : 7,
'probesent' : 5, 'firsttl' : 4, 'timeval' : now + 1})
# hopflags (SCAMPER_TRACE_HOP_FLAG_REPLY_TTL)
reply = {'addr' : '4.4.4.4', 'rtt' : 23456,
'ipid' : 1234, 'probesize' : 60,
'replysize' : 54, 'probettl' : 4,
'replyttl' : 60, 'tos' : 0,
'icmp' : 4, 'hopflags' : 0x10}
tr.add_reply(reply)
reply = {'addr' : '6.6.6.6', 'rtt' : 834567,
'ipid' : 1234, 'probesize' : 60,
'replysize' : 54, 'probettl' : 6,
'replyttl' : 58, 'tos' : 0,
'icmp' : 4, 'hopflags' : 0x10}
tr.add_reply(reply)
w.write_object(tr)
# finish
w.write_cycle_stop(1, now+10)
| 2.25
| 2
|
test/test_app/models.py
|
revpoint/prefs-n-perms
| 3
|
12782439
|
<reponame>revpoint/prefs-n-perms<filename>test/test_app/models.py
from django.db import models
class Site(models.Model):
def __unicode__(self):
return u'Site {0}'.format(self.id)
class Customer(models.Model):
site = models.ForeignKey('Site', related_name='customers')
def __unicode__(self):
return u'Customer {0} - Site {1}'.format(self.id, self.site.id)
class CustomerUser(models.Model):
customer = models.ForeignKey('Customer', related_name='users')
def __unicode__(self):
return u'User {0} - Customer {1}'.format(self.id, self.customer.id)
| 2.234375
| 2
|
portfolio_api/serializers/project.py
|
tjeerddie/portfolio-backend
| 0
|
12782440
|
from portfolio_admin.models import Project
from rest_framework import serializers
class ProjectSerializer(serializers.ModelSerializer):
skills = serializers.StringRelatedField(many=True)
class Meta:
model = Project
exclude = ['id', 'created_at', 'updated_at', 'portfolio', 'private']
| 1.789063
| 2
|
cmsplugin_embed/migrations/0001_initial.py
|
glomium/cmstemplate
| 0
|
12782441
|
<reponame>glomium/cmstemplate
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-23 16:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='Embed',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cmsplugin_embed_embed', serialize=False, to='cms.CMSPlugin')),
('source', models.CharField(max_length=255, null=True, verbose_name='Source')),
('allow_fullscreen', models.BooleanField(default=False, verbose_name='Allow fullscreen')),
('ratio', models.CharField(choices=[('21by9', '21:9'), ('16by9', '16:9'), ('4by3', '4:3'), ('1by1', '1:1')], default='16by9', max_length=5, verbose_name='Ratio')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| 1.4375
| 1
|
tests/r/test_friendship.py
|
hajime9652/observations
| 199
|
12782442
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.friendship import friendship
def test_friendship():
"""Test module friendship.py by downloading
friendship.csv and testing shape of
extracted data has 0 rows and 7 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = friendship(test_path)
try:
assert x_train.shape == (0, 7)
except:
shutil.rmtree(test_path)
raise()
| 2.53125
| 3
|
setup.py
|
xdssio/zappa_client
| 1
|
12782443
|
<reponame>xdssio/zappa_client
from setuptools import setup
setup(name='zappa_client',
description='A tool to call Zappa serverless function directly without AWS APIGateway',
long_description="Zappa is a tool wrap the deployment of serverless applications to AWS Lambda, "
"you can invoke your function with AWS APIGatway using curl easily. "
"For some use cases you would like to avoid opening your function to APIGateway "
"and to call your function directly. "
"there are many steps implemented by zappa which makes it difficult, "
"this package is a wrapper to save you that pain.",
version='0.0.1',
url='https://github.com/xdssio/zappa_client',
author='<NAME>',
author_email='<EMAIL>',
license='MIT License',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'boto3>=1.7.21'
],
)
| 1.476563
| 1
|
main_cpu.py
|
vietbt/ViTextnormASR
| 0
|
12782444
|
<reponame>vietbt/ViTextnormASR
from utils.data import Data
from network.trainer import train
if __name__=="__main__":
config = "configs/config.norm.cpu.json"
mbert = "configs/config.mbert.json"
vibert = "configs/config.vibert.json"
velectra = "configs/config.velectra.json"
# for fold_id in range(5):
# config = f"configs/config.norm.fold_{fold_id}.json"
# data = Data.from_config(config, velectra)
# train(config, velectra, "nojoint", biaffine=False)
train(config, velectra, "norm_to_punc", n_blocks=4, n_tokens=50, biaffine=False)
train(config, velectra, "punc_to_norm", n_blocks=4, n_tokens=50)
train(config, velectra, "norm_to_punc", n_tokens=50)
train(config, velectra, "punc_to_norm", n_tokens=50)
train(config, velectra, "norm_to_punc")
train(config, velectra, "punc_to_norm")
| 2.234375
| 2
|
cerebstats/stat_scores/chi2GOFScore.py
|
HarshKhilawala/cerebstats
| 0
|
12782445
|
# ============================================================================
# ~/cerebstats/cerebstats/stat_scores/chi2GOFScore.py
#
# This py-file contains custum score functions initiated by
#
# from cerebstats import scoreScores
# from cerebstats.scoreScores import ABCScore
# ============================================================================
import numpy as np
from scipy.stats import chisquare
import sciunit
# ==============================Chi2GOFScore==================================
class Chi2GOFScore(sciunit.Score):
"""
Compute chi2-statistic for chi-squared goodness-of-fit Test of proportions.
One may think of this as a **one-way contingency table.**
+--------------+-------------------------------------------------------------+
| sample size | :math:`k` categories of a categorial variable of interest |
+ +--------------+--------------+----------------+--------------+
| :math:`n` | :math:`x_1` | :math:`x_2` | :math:`\\ldots` | :math:`x_k` |
+==============+==============+==============+================+==============+
| observations | :math:`O_1` | :math:`O_2` | :math:`\\ldots` | :math:`O_k` |
+--------------+--------------+--------------+----------------+--------------+
| probabilities| :math:`p_1` | :math:`p_2` | :math:`\\ldots` | :math:`p_k` |
+--------------+--------------+--------------+----------------+--------------+
| expected | :math:`np_1` | :math:`np_2` | :math:`\\ldots` | :math:`np_k` |
+--------------+--------------+--------------+----------------+--------------+
Notice that for probabilities of *k* categories :math:`\\sum_{\\forall i} p_i = 1`. The expected counts for each category can be derived from it (or already given) such that :math:`\\sum_{\\forall i} np_i = n`.
.. table:: Title here
==================== ==============================================================================
Definitions Interpretation
==================== ==============================================================================
:math:`n` sample size; total number of experiments done
:math:`k` number of categorical variables
:math:`O_i` observed count (frequency) for :math:`i^{th}` variable
:math:`p_i` probability for :math:`i^{th}` category such that
:math:`\\sum_{\\forall i} p_i = 1`
:math:`E_i` expected count for :math:`i^{th}` category such that
:math:`E_i = n p_i`
test-statistic :math:`\\chi^2 = \\sum_{\\forall i} \\frac{(O_i - E_i)^2}{E_i}`
:math:`df` degrees of freedom, :math:`df = k-1`
==================== ==============================================================================
*Note* the modification made when compared with a two-way :math:`\\chi^2` test is
- the calculation of expected counts :math:`E_i = n p_i`
- the degree of freedom :math:`df = k-1`
This class uses `scipy.stats.chisquare <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html>`_.
**Use Case:**
::
x = Chi2GOFScoreForProportionChi2GOFTest.compute( observation, prediction )
score = Chi2GOFScoreForProportionChi2GOFTest(x)
*Note*: As part of the `SciUnit <http://scidash.github.io/sciunit.html>`_ framework this custom :py:class:`.TScore` should have the following methods,
* :py:meth:`.compute` (class method)
* :py:meth:`.sort_key` (property)
* :py:meth:`.__str__`
"""
#_allowed_types = (float,)
_description = ( "ZScoreForSignTest gives the z-statistic applied to medians. "
+ "The experimental data (observation) is taken as the sample. "
+ "The sample statistic is 'median' or computed median form 'raw_data'. "
+ "The null-value is the 'some' specified value whic is taken to be the predicted value generated from running the model. " )
@classmethod
def compute(cls, observation, prediction):
"""
+---------------------+-----------------------------------------------------------------------+
| Argument | Value type |
+=====================+=======================================================================+
| first argument |dictionary; observation/experimental data must have keys "sample_size" |
| |with a number as its value and "observed_freq" whose value is an array |
+---------------------+-----------------------------------------------------------------------+
| second argument |dictionary; model prediction must have either "probabilities" or |
| |"expected" whose value is an array (same length as "observed_freq") |
+---------------------+-----------------------------------------------------------------------+
*Note:*
* chi squared tests (for goodness-of-fit or contingency table) by nature are two-sided so there is not option for one-sidedness.
"""
name = "chi2_goodness_of_fit_test_for_proportions"
if "probabilities" in prediction:
probabilities = np.array( prediction["probabilities"] )
expected_counts = observation["sample_size"] * probabilities
elif "expected" in prediction:
expected_counts = np.array( prediction["expected"] )
probabilities = expected_counts / observation["sample_size"]
#
k_categories = expected_counts.size
score, pvalue = chisquare( observation["observed_freq"], f_exp = expected_counts )
#return self.score # chi2_statistic
return {"name": name, "sample_statistic": probabilities, "expected_values": expected_counts,
"test_statistic": score, "df": k_categories-1, "p_value": pvalue}
@property
def sort_key(self):
return self.score
def __str__(self):
return "ChiSqGOFScore is " + str(self.score)
# ============================================================================
| 2.28125
| 2
|
admin/management/commands/migrate.py
|
adelsonllima/djangoplus
| 21
|
12782446
|
<reponame>adelsonllima/djangoplus
# -*- coding: utf-8 -*-
from django.core.management.commands import migrate
from djangoplus.admin.management import sync_permissions
class Command(migrate.Command):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--ignore_sync_permissions', action='store_true', dest='ignore_sync_permissions')
def handle(self, *args, **options):
super(Command, self).handle(*args, **options)
if not options.get('ignore_sync_permissions'):
sync_permissions()
| 1.929688
| 2
|
w2b/fft.py
|
adamd1008/wav2bmp
| 1
|
12782447
|
<filename>w2b/fft.py<gh_stars>1-10
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from numpy.fft import rfft, irfft
from . import util
################################################################################
def get_fft_stats(n, size, overlapDec):
"""Calculate the stats needed to iteratively compute the FFT over a set of
wave samples with a given length, size and overlap.
"""
if n < size:
raise ValueError("`n' cannot be less than size")
if (overlapDec >= 1.0) or (overlapDec < 0.0):
raise ValueError("Overlap must be LT 1.0, GE 0.0")
elif overlapDec != 0.0:
overlapLog2 = np.log2(1.0 / (1.0 - overlapDec))
if int(overlapLog2) != overlapLog2:
raise ValueError("Overlap must be 1 / (2^n)")
if size < 2:
raise ValueError("Size must be GE 2")
sizeLog2 = np.log2(size)
if int(sizeLog2) != sizeLog2:
raise ValueError("Size must be 2^n")
sizeOverlap = size * overlapDec
if int(sizeOverlap) != sizeOverlap:
raise ValueError("Size is not wholly divisible by overlap")
# So, start the spectrogram with size * overlapDec's worth of samples not
# in the FFT (4 zeros 50%, 6 zeroes 75%, 7 zeroes 87.5%, etc.)
overlap = int(size * overlapDec)
step = size - overlap
if int(step) != step:
raise ValueError("`overlapDec' must produce whole step")
nDivStep = int(n / step)
nModStep = int(n % step)
sizeDivStep = int(size / step)
# Left iterations is going to be one less than the number of times that
# step goes into size
leftPadIters = sizeDivStep - 1
start = -(leftPadIters * step)
# Right iterations is a little harder; we need to check how many samples are
# left over when dividing all samples by step (not size, right?)
iters = nDivStep + leftPadIters
if nModStep > 0:
iters += 1
# Debug prints
#print("n =", n)
#print("size =", size)
#print("overlapDec =", overlapDec)
#print("overlap =", overlap)
#print("step =", step)
#print("leftPadIters =", leftPadIters)
#print("start =", start)
#print("iters =", iters)
#print()
#iterString = ""
#iterStringFirst = True
#count = 0
#for i in range(start, n, step):
# if iterStringFirst:
# iterStringFirst = False
# else:
# iterString += ", "
# iterString += str(i) + " to " + str(i + size)
# count += 1
#print(str(count) + " iters: ", iterString)
#print()
return start, step, iters
################################################################################
def wav2bmp(fs, wav, size=1024, overlapDec=0.0, window=np.hanning):
"""Transform wave samples into a spectrogram image.
Warning: using a window in the bmp2wav flow (when recomputing the complex
FFT result for resynthesis with the mask image) will result in a very badly
scaled result!
See tests/test_bmp2wav.py.
"""
if wav.ndim != 1:
raise ValueError("Expected 1-dim array")
l = wav.shape[0]
fftLen = int(size / 2) + 1
if callable(window):
wnd = window(size)
elif type(window) == np.ndarray:
if window.ndim != 1:
raise ValueError("Expected `window' to be a 1-dim array")
elif window.shape[0] != size:
raise ValueError("Expected `window' to be `size/2+1'")
else:
wnd = window
elif window == None:
wnd = None
else:
raise ValueError("Expected `window' to be a function or NumPy array")
start, step, iters = get_fft_stats(l, size, overlapDec)
c = 0
ab = np.zeros((fftLen, iters), dtype="float32")
an = np.zeros((fftLen, iters), dtype="float32")
x = np.zeros((fftLen, iters), dtype=complex)
buf = np.ndarray(size, dtype="float32")
for i in range(start, l, step):
# Do stuff
if i < 0:
bufStart = size - (size + i)
bufEnd = size
wavEnd = i + size
#print("[<] i =", i, "\tbuf: 0 to", bufStart, "to", bufEnd, \
# "\twav: 0 to", wavEnd, "|", wavEnd)
buf[0:bufStart] = 0.0
buf[bufStart:bufEnd] = wav[0:wavEnd]
elif (i + size) >= l:
bufEnd = l - i
wavStart = i
#print("[>] i =", i, "\tbuf: 0 to", bufEnd, "to", size, \
# "\twav:", wavStart, "to", l, "|", (l - wavStart))
buf[0:bufEnd] = wav[wavStart:l]
buf[bufEnd:size] = 0.0
else:
wavStart = i
wavEnd = wavStart + size
#print("[=] i =", i, "\tbuf: 0 to", size, \
# "\t\twav:", wavStart, "to", wavEnd, \
# "|", (wavEnd - wavStart))
buf[:] = wav[wavStart:wavEnd]
if type(wnd) != type(None):
buf *= wnd
X = rfft(buf)
absNorm = np.abs(X) / size
ab[:, c] = absNorm
an[:, c] = util.angle(X)
x[:, c] = X
c += 1
assert c == iters
return ab, an, x
################################################################################
def bmp2wav(fs, l, x, mask, size, overlapDec):
"""Apply a filter mask to a spectrogram image and transform it back to
wave samples.
This function cheats by first performing wav2bmp again and applying the
filter mask onto the resultant complex FFT data, before then converting
that image into wave samples. This removes the need to convert the
amplitude and angle BMPs back into complex numbers for the filter mask
scaling.
"""
assert x.ndim == 2
assert x.ndim == mask.ndim
assert x.shape == mask.shape
assert x.dtype == complex
assert mask.dtype == "float32"
start, step, iters = get_fft_stats(l, size, overlapDec)
fftI = 0
wavI = start
out = np.zeros(l, dtype="float32")
mult = size / step
while wavI < l:
buf = irfft(x[:, fftI] * mask[:, fftI])
if wavI < 0:
bufStart = size - (size + wavI)
bufEnd = size
wavEnd = wavI + size
#print("[<] wavI =", wavI, "\tout: 0 to", wavEnd, \
# "\tbuf:", bufStart, "to", bufEnd, "|", wavEnd)
out[0:wavEnd] += buf[bufStart:bufEnd] / mult
elif (wavI + size) >= l:
bufEnd = l - wavI
wavStart = wavI
#print("[>] wavI =", wavI, "\tout:", wavStart, "to", l, \
# "\tbuf: 0 to", bufEnd, "|", (l - wavStart))
out[wavStart:l] += buf[0:bufEnd] / mult
else:
wavStart = wavI
wavEnd = wavStart + size
#print("[=] wavI =", wavI, "\tout:", wavStart, "to", wavEnd, \
# "\t\tbuf: 0 to", buf.shape[0], "|", (wavEnd - wavStart))
out[wavStart:wavEnd] += buf[:] / mult
fftI += 1
wavI += step
assert fftI == iters
return out
| 1.882813
| 2
|
ege_date.py
|
rlowrance/re-local-linear
| 0
|
12782448
|
<reponame>rlowrance/re-local-linear
'''create files contains estimated generalization errors for model
sys.argv: year month day
The sale_date of the models. Uses data up to the day before the sale date
Files created:
year-month-day-MODEL-SCOPE-T[-MODELPARAMS].foldResult
where:
MODEL is one of the models {ols, lasso, ridge, rf, xt}
SCOPE is one of {global,zDDDDD} (global or zip-code specific)
T is the number of days in the training period
MODELPARAMS depends on the model and may be empty
'''
import collections
import cPickle as pickle
import datetime
import numpy as np
import pandas as pd
import pdb
from pprint import pprint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import ensemble
import sys
import warnings
from Bunch import Bunch
from directory import directory
from Logger import Logger
def usage():
print 'usage: python ege-date.py yyyy-mm-dd'
sys.exit(1)
def make_control(argv):
'Return control Bunch'''
script_name = argv[0]
base_name = script_name.split('.')[0]
random_seed = 123
now = datetime.datetime.now()
log_file_name = base_name + '.' + now.isoformat('T') + '.log'
if len(argv) < 2:
print 'missing date argument'
usage()
if len(argv) > 2:
print 'extra args'
usage()
year, month, day = argv[1].split('-')
sale_date = datetime.date(int(year), int(month), int(day))
# prior work found that the assessment was not useful
# just the census and tax roll features
# predictors with transformation to log domain
predictors = { # the columns in the x_arrays are in this order
'fraction.owner.occupied': None,
'FIREPLACE.NUMBER': 'log1p',
'BEDROOMS': 'log1p',
'BASEMENT.SQUARE.FEET': 'log1p',
'LAND.SQUARE.FOOTAGE': 'log',
'zip5.has.industry': None,
'census.tract.has.industry': None,
'census.tract.has.park': None,
'STORIES.NUMBER': 'log1p',
'census.tract.has.school': None,
'TOTAL.BATHS.CALCULATED': 'log1p',
'median.household.income': 'log', # not log feature in earlier version
'LIVING.SQUARE.FEET': 'log',
'has.pool': None,
'zip5.has.retail': None,
'census.tract.has.retail': None,
'is.new.construction': None,
'avg.commute': None,
'zip5.has.park': None,
'PARKING.SPACES': 'log1p',
'zip5.has.school': None,
'TOTAL.ROOMS': 'log1p',
'age': None,
'age2': None,
'effective.age': None,
'effective.age2': None}
debug = False
b = Bunch(
path_in=directory('working') + 'transactions-subset2.pickle',
path_log=directory('log') + log_file_name,
path_out=directory('working') + base_name + '-%s' % sale_date + '.pickle',
arg_date=sale_date,
random_seed=random_seed,
sale_date=sale_date,
models={'rf': Rf(), 'ols': Ols()} if not debug else {'rf': Rf()},
scopes=['global', 'zip'],
training_days=(7, 366) if debug else range(7, 366, 7),
n_folds=10,
predictors=predictors,
price_column='SALE.AMOUNT',
debug=debug)
return b
def x(mode, df, control):
'''return 2D np.array, with df x values possibly transformed to log
RETURNS array: np.array 2D
'''
def transform(v, mode, transformation):
if mode is None:
return v
if mode == 'linear':
return v
if mode == 'log':
if transformation is None:
return v
if transformation == 'log':
return np.log(v)
if transformation == 'log1p':
return np.log1p(v)
raise RuntimeError('bad transformation: ' + str(transformation))
raise RuntimeError('bad mode:' + str(mode))
array = np.empty(shape=(df.shape[0], len(control.predictors)),
dtype=np.float64).T
# build up in transposed form
index = 0
for predictor_name, transformation in control.predictors.iteritems():
v = transform(df[predictor_name].values, mode, transformation)
array[index] = v
index += 1
return array.T
def y(mode, df, control):
'''return np.array 1D with transformed price column from df'''
df2 = df.copy(deep=True)
if mode == 'log':
df2[control.price_column] = \
pd.Series(np.log(df[control.price_column]),
index=df.index)
array = np.array(df2[control.price_column].as_matrix(), np.float64)
return array
def demode(v, mode):
'convert log domain to normal'
if v is None:
return None
result = np.exp(v) if mode == 'log' else v
return result
def errors(model_result):
'return median_absolute_error and median_relative_absolute_error'
pprint(model_result)
actuals = model_result['actuals']
estimates = model_result['estimates']
abs_error = np.abs(actuals - estimates)
median_abs_error = np.median(abs_error)
rel_abs_error = abs_error / actuals
median_rel_abs_error = np.median(rel_abs_error)
return median_abs_error, median_rel_abs_error
class ReportOls(object):
'report generation with y_mode and x_mode in key'
# NOTE: perhaps reusable for any model with y and x modes
def __init__(self):
self.format_global_fold = '%10s %2d %3s %6s %3s %3s f%d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d %3s %3s f%d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s %3s %3s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d %3s %3s median %6.0f %3.2f'
def global_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def zip_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(isinstance(scope, tuple))
assert(scope[0] == 'zip')
zip_code = scope[1]
for result_key, result_value in result.iteritems():
y_mode = result_key[1][:3]
x_mode = result_key[3][:3]
median_abs_error, median_rel_abs_error = errors(result_value)
line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode,
x_mode,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def summarize_global(self,
sale_date,
training_days,
model_name,
all_results,
control):
scope = 'global'
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
# determine errors in the fold
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
print 'key', key
print 'not in result'
continue
result = all_results[key]
model_result = result[('y_mode', y_mode, 'x_mode', x_mode)]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
y_mode_print,
x_mode_print,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name,
all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
for y_mode in ('log', 'linear'):
y_mode_print = y_mode[:3]
for x_mode in ('log', 'linear'):
x_mode_print = x_mode[:3]
mode_key = ('y_mode', y_mode, 'x_mode', x_mode)
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key][mode_key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
y_mode_print,
x_mode_print,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name,
all_results, control):
self.summarize_global(sale_date, training_days, model_name,
all_results, control)
self.summarize_zip(sale_date, training_days, model_name,
all_results, control)
class Ols(object):
'Ordinary least squares via sklearn'
def __init__(self):
self.Model_Constructor = linear_model.LinearRegression
def reporter(self):
return ReportOls
def run(self, train, test, control):
'''fit on training data and test
ARGS
train : dataframe
test : dataframe
control: Bunch
RETURN dict of values
dict key = (x_mode, y_mode)
values = dict with keys 'actuals', 'estimates', 'fitted', x_names
'''
# implement variants
verbose = False
def variant(x_mode, y_mode):
train_x = x(x_mode, train, control)
test_x = x(x_mode, test, control)
train_y = y(y_mode, train, control)
model = self.Model_Constructor(fit_intercept=True,
normalize=True,
copy_X=True)
fitted_model = model.fit(train_x, train_y)
# if the model cannot be fitted, LinearRegressor returns
# the mean of the train_y values
estimates = fitted_model.predict(test_x)
value = {
'coef': fitted_model.coef_,
'intercept_': fitted_model.intercept_,
'estimates': demode(estimates, y_mode),
'actuals': y('linear', test, control)
}
# check results
if verbose:
print 'x_mode, y_mode: ', x_mode, y_mode
print 'actuals: ', value['actuals']
print 'estimates: ', value['estimates']
return value
all_variants = {}
for x_mode in ('log', 'linear'):
for y_mode in ('log', 'linear'):
variant_value = variant(x_mode, y_mode)
key = ('y_mode', y_mode, 'x_mode', x_mode)
all_variants[key] = variant_value
return all_variants
class ReportRf(object):
'report generation w no variants (for now'
def __init__(self):
'sale_date days model global fold error abs_error'
self.format_global_fold = '%10s %2d %3s %6s f%d %6.0f %3.2f'
self.format_zip_fold = '%10s %2d %3s %6d f%d %6.0f %3.2f'
self.format_global = '%10s %2d %3s %6s median %6.0f %3.2f'
self.format_zip = '%10s %2d %3s %6d median %6.0f %3.2f'
def global_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(scope == 'global')
median_abs_error, median_rel_abs_error = errors(result)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def zip_fold_line(self, key, result):
fold_number, sale_date, training_days, model_name, scope = key
assert(isinstance(scope, tuple))
assert(scope[0] == 'zip')
zip_code = scope[1]
median_abs_error, median_rel_abs_error = errors(result)
line = self.format_global_fold % (sale_date,
training_days,
model_name,
zip_code,
fold_number,
median_abs_error,
median_rel_abs_error)
return line
def summarize_global(self, sale_date, training_days, model_name, all_results, control):
scope = 'global'
median_errors = np.zeros(control.n_folds, dtype=np.float64)
median_rel_errors = np.zeros(control.n_folds, dtype=np.float64)
for fold_number in xrange(control.n_folds):
key = (fold_number, sale_date, training_days, model_name, scope)
if key not in all_results:
# can happen when a model could not be fit
print 'model_result missing key', key
continue
model_result = all_results[key]
if len(model_result['actuals']) == 0:
continue
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_global_fold % (sale_date,
training_days,
model_name,
scope,
fold_number,
median_abs_error,
median_rel_abs_error)
print fold_line
median_errors[fold_number] = median_abs_error
median_rel_errors[fold_number] = median_rel_abs_error
all_folds_line = self.format_global % (sale_date,
training_days,
model_name,
scope,
np.median(median_errors),
np.median(median_rel_errors))
print all_folds_line
def summarize_zip(self, sale_date, training_days, model_name, all_results, control):
def list_median(lst):
assert(len(lst) > 0)
return np.median(np.array(lst, dtype=np.float64))
def report_zip_code(zip_code, keys):
median_abs_errors = []
median_rel_abs_errors = []
for key in keys:
model_result = all_results[key]
median_abs_error, median_rel_abs_error = errors(model_result)
fold_line = self.format_zip_fold % (sale_date,
training_days,
model_name,
zip_code,
key[0], # fold number
median_abs_error,
median_rel_abs_error)
print fold_line
median_abs_errors.append(median_abs_error)
median_rel_abs_errors.append(median_rel_abs_error)
all_folds_line = self.format_zip % (sale_date,
training_days,
model_name,
zip_code,
list_median(median_abs_errors),
list_median(median_rel_abs_errors))
print all_folds_line
# determine all zip codes in the specified lines
zip_codes = collections.defaultdict(set)
for key in all_results.keys():
key_fold_number, key_sale_date, key_training_days, key_model_name, key_scope = key
if key_scope == 'global':
# examine only zip code scopes
continue
if key_sale_date == sale_date and key_training_days == training_days and key_model_name == model_name:
key_zip_code = key_scope[1]
zip_codes[key_zip_code].add(key)
# process each zip code
for zip_code, keys in zip_codes.iteritems():
report_zip_code(zip_code, keys)
def summarize(self, sale_date, training_days, model_name, all_results, control):
self.summarize_global(sale_date, training_days, model_name, all_results, control)
self.summarize_zip(sale_date, training_days, model_name, all_results, control)
class Rf(object):
'Random forests via sklearn'
def __init__(self):
self.Model_Constructor = ensemble.RandomForestRegressor
def reporter(self):
return ReportRf
def run(self, train, test, control):
'''fit on train, test on test, return dict of variants
The variants are defined by the number of trees in the forest
RETURN dict with key = variant_description
'''
verbose = False
def variant(n_trees):
train_x = x(None, train, control) # no transformation
test_x = x(None, test, control)
train_y = y(None, train, control)
model = self.Model_Constructor(n_estimators=n_trees,
random_state=control.random_seed)
fitted_model = model.fit(train_x, train_y)
estimates = fitted_model.predict(test_x)
# return selected fitted results
result = {
'feature_importances': fitted_model.feature_importances_,
'estimates': estimates,
'actuals': y('None', test, control)}
if verbose:
for k, v in result.iteritems():
print k, v
return result
all_variants = {}
for n_trees in (10, 100, 300, 1000):
variant_value = variant(n_trees)
key = ('n_trees', n_trees)
all_variants[key] = variant_value
return all_variants
def within(sale_date, training_days, df):
'return indices of samples up to training_days before the sale_date'
assert(training_days > 0)
# one training day means use samples on the sale date only
first_ok_sale_date = sale_date - datetime.timedelta(training_days - 1)
date_column = 'sale.python_date'
after = df[date_column] >= first_ok_sale_date
before = df[date_column] <= sale_date
ok_indices = np.logical_and(after, before)
return ok_indices
def on_sale_date(sale_date, df):
'''return indices of sample on the sale date'''
date_column = 'sale.python_date'
result = df[date_column] == sale_date
return result
def add_age(df, sale_date):
'Return new df with extra columns for age and effective age'
column_names = df.columns.tolist()
if 'age' in column_names:
print column_names
print 'age in column_names'
pdb.set_trace()
assert('age' not in column_names)
assert('age2' not in column_names)
assert('effective.age' not in column_names)
assert('effective.age2' not in column_names)
sale_year = df['sale.year']
def age(column_name):
'age from sale_date to specified column'
age_in_years = sale_year - df[column_name].values
return pd.Series(age_in_years, index=df.index)
result = df.copy(deep=True)
result['age'] = age('YEAR.BUILT')
result['effective.age'] = age('EFFECTIVE.YEAR.BUILT')
result['age2'] = result['age'] * result['age']
result['effective.age2'] = result['effective.age'] * result['effective.age']
return result
def unique_zip_codes(df):
'yield each unique zip code in the dataframe'
unique_zip_codes = df['zip5'].unique()
for i in xrange(len(unique_zip_codes)):
yield unique_zip_codes[i]
def zip_codes(df, a_zip_code):
'return new dataframe containing just the specified zip code'
df_copy = df.copy(deep=True)
result = df_copy[df_copy['zip5'] == a_zip_code]
return result
def make_train_model(df, sale_date, training_days):
'return df of transactions no more than training_days before the sale_date'
just_before_sale_date = within(sale_date, training_days, df)
train_model = add_age(df[just_before_sale_date], sale_date)
return train_model
def make_test_model(df, sale_date):
'return df of transactions on the sale_date'
selected_indices = on_sale_date(sale_date, df)
test_model = add_age(df[selected_indices], sale_date)
return test_model
def determine_most_popular_zip_code(df, control):
'return the zip_code that occurs most ofen in the dataframe'
zip_code_counter = collections.Counter()
for _, zip_code in df.zip5.iteritems():
zip_code_counter[zip_code] += 1
most_common_zip_code, count = zip_code_counter.most_common(1)[0]
print 'most common zip_code', most_common_zip_code, 'occurs', count
# assert: the most common zip code is in each fold
fold_number = -1
folds_for_zip_code = collections.defaultdict(set)
kf = cross_validation.KFold(n=(len(df)),
n_folds=control.n_folds,
shuffle=True,
random_state=control.random_seed)
for train_indices, test_indices in kf:
fold_number += 1
train = df.iloc[train_indices].copy(deep=True)
test = df.iloc[test_indices].copy(deep=True)
if most_common_zip_code not in test.zip5.values:
print most_common_zip_code, 'not in', fold_number
for zip_code in unique_zip_codes(test):
assert(zip_code in test.zip5.values)
if zip_code not in train.zip5.values:
print 'fold %d zip_code %d in test and not train' % (
fold_number,
zip_code)
folds_for_zip_code[zip_code].add(fold_number)
assert(len(folds_for_zip_code[most_common_zip_code]) == 10)
# print zip_code not in each test set
count_in_10 = 0
count_not_in_10 = 0
for zip_code, set_folds in folds_for_zip_code.iteritems():
if len(set_folds) != 10:
print 'zip_code %d in only %d folds' % (zip_code, len(set_folds))
count_not_in_10 += 1
else:
count_in_10 += 1
print 'all other zip codes are in 10 folds'
print 'in 10: %d not in 10: %d' % (count_in_10, count_not_in_10)
print 'NOTE: all this analysis is before training samples are selected'
return most_common_zip_code
def read_training_data(control):
'return dataframe'
def squeeze(result, verbose=False):
'replace float64 with float32'
def is_np_array_float64(x):
return isinstance(x, np.ndarray) and x.dtype == np.float64
def is_np_scalar_float64(x):
return isinstance(x, np.float64)
if verbose:
pprint(result)
assert(isinstance(result, dict))
new_result = {}
for k, v in result.iteritems():
if isinstance(k, str):
# rf result
if is_np_array_float64(v):
# e.g., actual, estimate, other info in a vector
new_result[k] = np.array(v, dtype=np.float32)
else:
print k, v
raise RuntimeError('unexpected')
elif isinstance(k, tuple):
# ols result
new_ols_result = {}
for ols_key, ols_value in v.iteritems():
if is_np_array_float64(ols_value):
new_ols_result[ols_key] = np.array(ols_value, dtype=np.float32)
elif is_np_scalar_float64(ols_value):
new_ols_result[ols_key] = np.float32(ols_value)
else:
print ols_key, ols_value
raise RuntimeError('unexpected')
new_result[k] = new_ols_result
else:
# unexpected
print k, v
raise RuntimeError('unexpected')
if verbose:
pprint(new_result)
return new_result
def fit_and_test_models(df, control):
'''Return all_results dict and list of feature names
all_results has results for each fold, sale date, training period model, scope
'''
verbose = False
all_results = {}
fold_number = -1
on_sale_date = df['sale.python_date'] == control.sale_date
num_test_samples = sum(on_sale_date)
print 'sale_date %s has %d samples' % (control.sale_date, num_test_samples)
assert num_test_samples >= control.n_folds, 'unable to form folds'
skf = cross_validation.StratifiedKFold(on_sale_date, control.n_folds)
for train_indices, test_indices in skf:
fold_number += 1
# don't create views (just to be careful)
train = df.iloc[train_indices].copy(deep=True)
test = df.iloc[test_indices].copy(deep=True)
for training_days in control.training_days:
train_model = make_train_model(train, control.sale_date, training_days)
if len(train_model) == 0:
print 'no training data fold %d sale_date %s training_days %d' % (
fold_number, control.sale_date, training_days)
sys.exit(1)
test_model = make_test_model(test, control.sale_date)
if len(test_model) == 0:
print 'no testing data fold %d sale_date %s training_days %d' % (
fold_number, control.sale_date, training_days)
continue
for model_name, model in control.models.iteritems():
print fold_number, control.sale_date, training_days, model_name
def make_key(scope):
return (fold_number, control.sale_date, training_days, model_name, scope)
# determine global results (for all areas)
if len(test_model) == 0 or len(train_model) == 0:
print 'skipping global zero length', len(test_model), len(train_model)
else:
global_result = model.run(train=train_model,
test=test_model,
control=control)
key = make_key(scope='global')
all_results[key] = squeeze(global_result)
report = model.reporter()() # instantiate report class
if verbose:
print report.global_fold_line(key, global_result)
# determine results for each zip code in test data
for zip_code in unique_zip_codes(test_model):
train_model_zip = zip_codes(train_model, zip_code)
test_model_zip = zip_codes(test_model, zip_code)
if len(train_model_zip) == 0 or len(test_model_zip) == 0:
print 'skipping zip zero length', zip_code, len(test_model_zip), len(train_model_zip)
else:
zip_code_result = model.run(train=train_model_zip,
test=test_model_zip,
control=control)
key = make_key(scope=('zip', zip_code))
all_results[key] = squeeze(zip_code_result)
if verbose:
print report.zip_fold_line(key, zip_code_result)
print 'num test samples across all folds:', num_test_samples
return all_results
def print_results(all_results, control):
for training_days in control.training_days:
for model_name, model in control.models.iteritems():
report = model.reporter()() # how to print is in the model result
report.summarize(control.sale_date,
training_days,
model_name,
all_results,
control)
def main(argv):
warnings.filterwarnings('error') # convert warnings to errors
control = make_control(argv)
sys.stdout = Logger(logfile_path=control.path_log) # print also write to log file
print control
# read input
f = open(control.path_in, 'rb')
df_loaded = pickle.load(f)
f.close()
df_loaded_copy = df_loaded.copy(deep=True) # used for debugging
if False:
most_popular_zip_code = determine_most_popular_zip_code(df_loaded.copy(), control)
print most_popular_zip_code
all_results = fit_and_test_models(df_loaded, control)
assert(df_loaded.equals(df_loaded_copy))
if False:
print_results(all_results, control)
# write result
print 'writing results to', control.path_out
result = {'control': control, # control.predictors orders the x values
'all_results': all_results}
f = open(control.path_out, 'wb')
pickle.dump(result, f)
print 'ok'
if __name__ == "__main__":
if False:
# quite pyflakes warnings
pdb.set_trace()
pprint(None)
np.all()
pd.Series()
main(sys.argv)
| 2.15625
| 2
|
plugins/share_post/__init__.py
|
mohnjahoney/website_source
| 13
|
12782449
|
from .share_post import * # noqa
| 1.046875
| 1
|
generators/generate_interactions_personalize_offers.py
|
ajayfenix/retail-demo-store
| 0
|
12782450
|
"""
A simple script for generating sample data for learning to give personalised offers.
"""
import json
import pandas as pd
import numpy as np
import gzip
import random
import logging
GENERATE_INBALANCED_DATA = False
NUM_INTERACTIONS_PER_USER = 3
FIRST_TIMESTAMP = 1591803782 # 2020-06-10, 18:43:02
LAST_TIMESTAMP = 1599579782 # 2020-09-08, 18:43:02
RANDOM_SEED = 1
IN_PRODUCTS_FILENAME = "src/products/src/products-service/data/products.yaml"
IN_USERS_FILENAME = "src/users/src/users-service/data/users.json.gz"
IN_OFFERS_FILENAME = "src/offers/src/offers-service/data/offers.json"
# Where to put the generated data so that it is picked up by stage.sh
GENERATED_DATA_ROOT = "src/aws-lambda/personalize-pre-create-resources/data"
def generate_data(interactions_filename, users_df, offers_df):
"""Script for writing to a file simulated user-offer interactions"""
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
num_users = users_df.shape[0]
num_interactions = NUM_INTERACTIONS_PER_USER * num_users
if GENERATE_INBALANCED_DATA:
# We may wish to assume probability is proportional to ID to show off how we can add
# business logic around Personalize
offer_probs = offers_df.id.values.astype(float)
else:
# Or we can work around inbalance at the data munging stage
offer_probs = np.ones(len(offers_df.id.values), dtype=float)
# Normalise so that we have probabilities
offer_probs = offer_probs / offer_probs.sum()
# generate timestamps
time_between_events = (LAST_TIMESTAMP - FIRST_TIMESTAMP) / num_interactions
timestamps = np.arange(FIRST_TIMESTAMP, LAST_TIMESTAMP, time_between_events).astype(int)
# pre-shuffle them as we will be using them as a randomising key when we sort by timestamp
np.random.shuffle(timestamps)
# generate all users Ids
sample_user_ids = np.tile(users_df['id'].values.astype(int), NUM_INTERACTIONS_PER_USER)
# only one event type
event_type = ['OfferConverted'] * num_interactions
# we sort it to ensure there is a correlation between user ID and offer ID.
# This correlation is what the personalisation will learn.
sampled_offers = sorted(np.random.choice(offers_df.id.values, num_interactions, p=offer_probs))
interactions_df = pd.DataFrame({'ITEM_ID': sampled_offers,
'USER_ID': sample_user_ids,
'EVENT_TYPE': event_type,
'TIMESTAMP': timestamps})
# by sorting by timestamp, other elements get shuffled
interactions_df = interactions_df.sort_values('TIMESTAMP')
with open(interactions_filename, 'w') as outfile:
interactions_df.to_csv(outfile, index=False)
globals().update(locals()) # This can be used for inspecting in console after script ran or if run with ipython.
print('Generation script finished - created offers dataset')
if __name__ == '__main__':
# User info is stored in the repository - it was automatically generated
with gzip.open(IN_USERS_FILENAME, 'r') as f:
users = json.load(f)
users_df = pd.DataFrame(users)
# Offers info is stored in repository
with open(IN_OFFERS_FILENAME, 'r') as f:
offers = json.load(f)
offers_df = pd.DataFrame(offers)
logging.basicConfig(level=logging.INFO)
generate_data(GENERATED_DATA_ROOT + '/offer_interactions.csv', users_df, offers_df)
| 3.21875
| 3
|