max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/fixtures.py
|
SpotDraft/silver-instamojo
| 0
|
12781251
|
import pytest
from django_dynamic_fixture import G
from silver.models import Transaction, Proforma, Invoice, Customer
from silver import payment_processors
from silver_instamojo.models import InstamojoPaymentMethod
@pytest.fixture
def customer():
return G(Customer, currency='RON', address_1='9', address_2='9',
sales_tax_number=0)
@pytest.fixture
def payment_processor():
return payment_processors.get_instance('instamojo_manual')
@pytest.fixture
def payment_processor_triggered():
return payment_processors.get_instance('instamojo_triggered')
@pytest.fixture
def payment_method(customer, payment_processor):
return G(InstamojoPaymentMethod, customer=customer,
payment_processor=payment_processor.name)
@pytest.fixture
def proforma(customer):
return G(Proforma, state=Invoice.STATES.ISSUED, customer=customer,
transaction_currency='RON')
@pytest.fixture
def invoice(customer, proforma):
return G(Invoice, related_document=proforma, state=Invoice.STATES.ISSUED,
customer=customer, transaction_currency='RON')
@pytest.fixture
def transaction(customer, payment_processor, payment_method, proforma, invoice):
return G(Transaction, invoice=invoice, proforma=proforma, currency='RON',
amount=invoice.total, payment_method=payment_method)
@pytest.fixture
def transaction_triggered(customer, payment_processor_triggered,
payment_method, proforma, invoice):
return G(Transaction, invoice=invoice, proforma=proforma, currency='RON',
amount=invoice.total, payment_method=payment_method)
| 2.03125
| 2
|
setup.py
|
arcanosam/pytksync
| 0
|
12781252
|
<gh_stars>0
""" Packaging and distribution using cx-freeze on Windows Plataform only
"""
import os
import sys
from cx_Freeze import setup, Executable
os.environ['TCL_LIBRARY'] = 'C:\\pysyncdev\\tcl\\tcl8.6'
os.environ['TK_LIBRARY'] = 'C:\\pysyncdev\\tcl\\tk8.6'
build_exe_options = {
'include_msvcr': True, #skip error msvcr100.dll missing
'includes': [
'gui.app_win',
'gui.conf_win'
],
'include_files': [
'C:\\pysyncdev\\DLLs\\tcl86t.dll',
'C:\\pysyncdev\\DLLs\\tk86t.dll'
]
}
# GUI applications require a different base on Windows (the default is for a
# console application).
base = None
if sys.platform == 'win32':
base = 'Win32GUI'
setup(
name='pysyncdev',
version='0.1',
description='pysyncdev',
options={'build_exe': build_exe_options},
executables=[Executable('main.py', base=base)]
)
| 1.78125
| 2
|
sdk/python/pulumi_azure/lighthouse/_inputs.py
|
aangelisc/pulumi-azure
| 0
|
12781253
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DefinitionAuthorizationArgs',
]
@pulumi.input_type
class DefinitionAuthorizationArgs:
def __init__(__self__, *,
principal_id: pulumi.Input[str],
role_definition_id: pulumi.Input[str],
delegated_role_definition_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
principal_display_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] principal_id: Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription.
:param pulumi.Input[str] role_definition_id: The role definition identifier. This role will define the permissions that are granted to the principal. This cannot be an `Owner` role.
:param pulumi.Input[Sequence[pulumi.Input[str]]] delegated_role_definition_ids: The set of role definition ids which define all the permissions that the principal id can assign.
:param pulumi.Input[str] principal_display_name: The display name of the security group/service principal/user that would be assigned permissions to the projected subscription.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "role_definition_id", role_definition_id)
if delegated_role_definition_ids is not None:
pulumi.set(__self__, "delegated_role_definition_ids", delegated_role_definition_ids)
if principal_display_name is not None:
pulumi.set(__self__, "principal_display_name", principal_display_name)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> pulumi.Input[str]:
"""
Principal ID of the security group/service principal/user that would be assigned permissions to the projected subscription.
"""
return pulumi.get(self, "principal_id")
@principal_id.setter
def principal_id(self, value: pulumi.Input[str]):
pulumi.set(self, "principal_id", value)
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> pulumi.Input[str]:
"""
The role definition identifier. This role will define the permissions that are granted to the principal. This cannot be an `Owner` role.
"""
return pulumi.get(self, "role_definition_id")
@role_definition_id.setter
def role_definition_id(self, value: pulumi.Input[str]):
pulumi.set(self, "role_definition_id", value)
@property
@pulumi.getter(name="delegatedRoleDefinitionIds")
def delegated_role_definition_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The set of role definition ids which define all the permissions that the principal id can assign.
"""
return pulumi.get(self, "delegated_role_definition_ids")
@delegated_role_definition_ids.setter
def delegated_role_definition_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "delegated_role_definition_ids", value)
@property
@pulumi.getter(name="principalDisplayName")
def principal_display_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the security group/service principal/user that would be assigned permissions to the projected subscription.
"""
return pulumi.get(self, "principal_display_name")
@principal_display_name.setter
def principal_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "principal_display_name", value)
| 1.859375
| 2
|
Lib/api.py
|
evi1hack/viperpython
| 0
|
12781254
|
# -*- coding: utf-8 -*-
# @File : api.py
# @Date : 2021/2/25
# @Desc :
import random
import string
def get_random_str(len):
value = ''.join(random.sample(string.ascii_letters + string.digits, len))
return value
def data_return(code=500, data=None,
msg_zh="服务器发生错误,请检查服务器",
msg_en="An error occurred on the server, please check the server."):
return {'code': code, 'data': data, 'msg_zh': msg_zh, "msg_en": msg_en}
| 2.8125
| 3
|
waterstructureCreator/check_polarity.py
|
AlexandraDavila/WaterStructureCreator
| 3
|
12781255
|
<gh_stars>1-10
import numpy as np
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
# Getting the polarity
def get_structure_polarity(selected_h2o):
"""Check the water structures for polarity
Parameters
----------
selected_h2o : dict
dictionary of the structures
Returns
-------
structure_polarity : dict
dictionary of structures and their polarity as boolean
"""
structure_polarity = {}
for water_structs in selected_h2o.keys():
water_struct_list = selected_h2o[water_structs]
water_struct_list = sorted(water_struct_list, key = lambda x : x[0][0])
# sorting does what it should do!
polt = []
for wsindex, water_struct in enumerate(water_struct_list):
filmstruct = water_struct[-1].copy()
sg = SpacegroupAnalyzer(filmstruct, symprec=0.1)
mytest = any([np.linalg.norm(a.rotation_matrix[2] - np.array([0,0,-1])) < 0.0001 for a in sg.get_space_group_operations()])
polt.append(float(mytest))
polt = not bool(int(np.round(np.mean(polt),0)))
structure_polarity[water_structs] = polt
return structure_polarity
| 2.734375
| 3
|
twitchbot/disabled_commands.py
|
jostster/PythonTwitchBotFramework
| 0
|
12781256
|
from pathlib import Path
from .config import Config
from .command import get_command, command_exist
def is_command_disabled(channel: str, cmd: str):
if channel in cfg_disabled_commands.data:
if command_exist(cmd):
cmd = get_command(cmd).fullname
return cmd in cfg_disabled_commands[channel]
return False
def disable_command(channel: str, cmd: str):
if channel not in cfg_disabled_commands.data:
cfg_disabled_commands[channel] = [cmd]
return
if command_exist(cmd):
cmd = get_command(cmd).fullname
if cmd in cfg_disabled_commands[channel]:
return
cfg_disabled_commands[channel].append(cmd)
cfg_disabled_commands.save()
def enable_command(channel: str, cmd: str):
if channel not in cfg_disabled_commands.data:
return
if command_exist(cmd):
cmd = get_command(cmd).fullname
if cmd in cfg_disabled_commands[channel]:
cfg_disabled_commands[channel].remove(cmd)
cfg_disabled_commands.save()
cfg_disabled_commands = Config(Path('configs', 'disabled_commands.json'))
| 2.5625
| 3
|
roll.py
|
es3649/script-tools
| 0
|
12781257
|
#!/usr/bin/env python3
help_str = """
roll is a tool for computing die rolls
Pass any number of arguments of the form
<number>d<number>
The first number refers to the number of dice to roll;
The second refers to the number of sides on the die.
For example, to roll 5, 6-sided dice, pass '5d6'.
It also computes rolls with advantage or disadvantage:
each of these rolls 2 dice instead of one, then chooses
the greater for advantage and the lesser for disadvantage.
Use this option by adding the letter 'a' for advantage
or the letter 'd' for disadvantage to the end of the
argument. For example, passing 4d20d will roll 4 pairs
of 20-sided dice, and for each pair will return the lesser
of the two numbers rolled.
<NAME> - 2020
"""
import re
import sys
import random
from math import floor
def roll_x_y_sided_dice(x,y):
"""Rolls x, y-sided dice
Parameters:
x (int): the number of dice to roll
y (int): the number of sides on each die
Returns:
rolls (list): the value of each roll
"""
return [floor(random.random()*y)+1 for _ in range(x)]
def do_rolls(rolls):
"""accepts a list of 3 tuples, where the first is the number of dice
to roll, the second is the number of sides on the die, and the third
is either None, 'a' signifying advantage, or 'd' signifying
disadvantage
Parameters:
rolls (list): the list of rolls to do
Returns:
results (list): a list of 2 tuples containing the numbers rolled
and the total
total (int): the total for all the rolls
"""
# result variables
results = []
total = 0
# for each roll we need to do
for roll in rolls:
# if it's advantace, handle that
if roll[2] == 'a':
# take the max of 2 y-sided dice x times
result = [max(roll_x_y_sided_dice(2,int(roll[1]))) for _ in range(int(roll[0]))]
elif roll[2] == 'd':
# take the min of 2 y-sided dice x times
result = [min(roll_x_y_sided_dice(2,int(roll[1]))) for _ in range(int(roll[0]))]
else:
# take x, y-sided dice
result = roll_x_y_sided_dice(int(roll[0]), int(roll[1]))
# total them up, add to the running total and the results
s = sum(result)
total += s
results.append((result,s))
# return the generated rolls
return results, total
# if this is the main method
if __name__ == "__main__":
# check for a help message and print it
if len(sys.argv) == 1 or (len(sys.argv) == 2 and sys.argv[1] == 'help'):
print(help_str)
sys.exit(0)
# compile a pattern to match the die roll args
pattern = re.compile(r'^([1-9][0-9]*)d([1-9][0-9]*)(a|d)?$')
# a list of compiled matches
matches = []
# match each roll and get the groups
for arg in sys.argv[1:]:
match = pattern.match(arg)
# bad arg, complain
if not match:
print(f"Bad argument: {arg}")
print(help_str)
sys.exit(1)
matches.append(match.groups())
# do the hard work
results, grand_total = do_rolls(matches)
# print results
for roll, (res, total) in zip(sys.argv[1:], results):
print(f"{roll:<7}: {total}")
print(res)
if len(sys.argv) > 2:
print()
# print grand total
if len(sys.argv) > 2:
print(f"Total: {grand_total}")
| 4.59375
| 5
|
notesnv/cmd_arg_utils.py
|
knoopx/ulauncher-notes-nv
| 4
|
12781258
|
"""
Utilities for working with command line strings and arguments
"""
import re
from typing import List, Dict, Optional
DOUBLE_QUOTED_GROUPS = re.compile(r"(\".+?\")")
DOUBLE_QUOTED_STRING = re.compile(r"^\".+\"?")
def argsplit(cmd: str) -> List[str]:
"""
Split a command line string on spaces into an argument list
that can be passed to subprocess.run()
Use doublequotes to preserve spaces.
>>> argsplit(" word1 word2")
['word1', 'word2']
>>> argsplit('word1 word2 "blah blah"')
['word1', 'word2', 'blah blah']
"""
# Strip string of whitespace and remove repeated spaces
cmd = cmd.strip()
# Split into quoted and unquoted chunks
# (This trips up on escaped doublequotes!)
args = []
chunks = DOUBLE_QUOTED_GROUPS.split(cmd)
for chunk in chunks:
if chunk:
if DOUBLE_QUOTED_STRING.fullmatch(chunk):
# Strip then add quoted chunks
args.append(chunk.strip('"'))
else:
# Clean unquoted chunks and further split on spaces
chunk = re.sub(r" +", " ", chunk).strip()
if chunk:
args += chunk.split(" ")
return args
def argbuild(
cmd: str, mapping: Dict[str, str], append_missing_field: Optional[str] = None
) -> List[str]:
"""
Turn a command template string into list of args
suitable for subprocess.run() by replacing fields with values
using Python's str.format_map() function.
:param cmd: command to be turned into list of args
:param mapping: fields and their replacements
:param append_missing_field: if this field wasn't used in cmd, pass it as last arg
:returns: list of args
If `append_missing_field` is specified, it must be in `mapping`
Examples:
>>> argbuild('gedit --new-window', {'fn': '/foo/bar', 'ln': 12})
['gedit', '--new-window']
>>> argbuild('gedit --new-window {fn} {ln}', {'fn': '/foo/bar', 'ln': 12})
['gedit', '--new-window', '/foo/bar', '12']
>>> argbuild('gedit {ln}', {'fn': '/foo/bar', 'ln': 12}, append_missing_field='fn')
['gedit', '12', '/foo/bar']
"""
append_field_used = False
if append_missing_field:
append_map = dict((k, "{" + k + "}") for k, v in mapping.items())
append_map[append_missing_field] = mapping[append_missing_field]
args = []
for arg in argsplit(cmd):
# Track if append_missing_field was used
if append_missing_field and not append_field_used:
# Try replacing the append field and see if string changes
append_field_used = arg != arg.format_map(append_map)
args.append(arg.format_map(mapping))
if append_missing_field and not append_field_used:
args.append(mapping[append_missing_field])
return args
| 3.6875
| 4
|
mkalias.py
|
XiKuuKy/mkalias
| 1
|
12781259
|
<filename>mkalias.py
import sys
try:
shell = sys.argv[1]
alias = sys.argv[2]
run = sys.argv[3]
run = run.replace('"', "")
run = run.replace("'", "")
except:
print("Usage: \n\tmkalias <shell> <alias> <command>")
sys.exit()
if shell == "zsh" or shell == "bash" or shell == "sh":
print("alias " + alias + "=\"" + run + "\"")
elif shell == "fish":
print("alias " + alias + " \"" + run + "\"")
else:
print("Sorry you're shell isn't supported. Please submit a issue on Github and I will try to include it.")
| 2.8125
| 3
|
test/test_cairopen.py
|
colinmford/coldtype
| 142
|
12781260
|
import unittest
from coldtype.pens.cairopen import CairoPen
from pathlib import Path
from coldtype.color import hsl
from coldtype.geometry import Rect
from coldtype.text.composer import StSt, Font
from coldtype.pens.datpen import DATPen, DATPens
from PIL import Image
import drawBot as db
import imagehash
import contextlib
co = Font.Cacheable("assets/ColdtypeObviously-VF.ttf")
renders = Path("test/renders/cairo")
renders.mkdir(parents=True, exist_ok=True)
def hash_img(path):
if path.exists():
return (
imagehash.colorhash(Image.open(path)),
imagehash.average_hash(Image.open(path)))
else:
return -1
@contextlib.contextmanager
def test_image(test:unittest.TestCase, path, rect=Rect(300, 300)):
img = (renders / path)
hash_before = hash_img(img)
if img.exists():
img.unlink()
yield(img, rect)
hash_after = hash_img(img)
test.assertEqual(hash_after, hash_before)
test.assertEqual(img.exists(), True)
class TestCairoPen(unittest.TestCase):
def test_cairo_pdf(self):
r = Rect(300, 300)
pdf = renders / "test_cairo.pdf"
dp = (StSt("CDEL", co, 100, wdth=0.5)
.pens()
.align(r))
CairoPen.Composite(dp, r, pdf)
self.assertEqual(len(dp), 4)
self.assertEqual(type(dp), DATPens)
def test_cairo_png(self):
with test_image(self, "test_cairo.png") as (i, r):
rr = Rect(0, 0, 100, 100)
dp = (DATPen()
.define(r=rr, c=75)
.gs("$r↗ $r↓|↘|$c $r↖|↙|$c")
.align(r)
.scale(1.2)
.rotate(180)
.f(hsl(0.5, a=0.1))
.s(hsl(0.9))
.sw(5))
CairoPen.Composite(dp, r, i)
self.assertEqual(len(dp.value), 4)
self.assertEqual(type(dp), DATPen)
if __name__ == "__main__":
unittest.main()
| 2.171875
| 2
|
netbox/extras/migrations/0044_jobresult.py
|
letic/netbox
| 2
|
12781261
|
<reponame>letic/netbox<filename>netbox/extras/migrations/0044_jobresult.py
import uuid
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import extras.utils
from extras.choices import JobResultStatusChoices
def convert_job_results(apps, schema_editor):
"""
Convert ReportResult objects to JobResult objects
"""
Report = apps.get_model('extras', 'Report')
ReportResult = apps.get_model('extras', 'ReportResult')
JobResult = apps.get_model('extras', 'JobResult')
ContentType = apps.get_model('contenttypes', 'ContentType')
report_content_type = ContentType.objects.get_for_model(Report)
job_results = []
for report_result in ReportResult.objects.all():
if report_result.failed:
status = JobResultStatusChoices.STATUS_FAILED
else:
status = JobResultStatusChoices.STATUS_COMPLETED
job_results.append(
JobResult(
name=report_result.report,
obj_type=report_content_type,
created=report_result.created,
completed=report_result.created,
user=report_result.user,
status=status,
data=report_result.data,
job_id=uuid.uuid4()
)
)
JobResult.objects.bulk_create(job_results)
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('extras', '0043_report'),
]
operations = [
migrations.CreateModel(
name='JobResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('completed', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(default='pending', max_length=30)),
('data', django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True)),
('job_id', models.UUIDField(unique=True)),
('obj_type', models.ForeignKey(limit_choices_to=extras.utils.FeatureQuery('job_results'), on_delete=django.db.models.deletion.CASCADE, related_name='job_results', to='contenttypes.ContentType')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['obj_type', 'name', '-created'],
},
),
migrations.RunPython(
code=convert_job_results
),
migrations.DeleteModel(
name='ReportResult'
)
]
| 2.171875
| 2
|
gevent_ticker/__init__.py
|
segmentio/gevent-ticker
| 2
|
12781262
|
from gevent_ticker.ticker import Ticker, ticker
| 1.007813
| 1
|
features/feature_generation_strategy-2.py
|
jmrozanec/features-generator
| 3
|
12781263
|
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.decomposition import PCA, TruncatedSVD, FastICA
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
import abc
class ColumnBasedFeatureGenerationStrategyAbstract(BaseEstimator, TransformerMixin):
"""Provides abstraction for features generation"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def fit(self, train):
"""Required Method"""
@abc.abstractmethod
def transform(self, train):
"""Required Method"""
@abc.abstractmethod
def featurename(self, colname1, colname2):
"""Required Method"""
@abc.abstractmethod
def equivalent_featurenames(self, colname1, colname2):
"""Required Method. Used to reflect commutativity."""
class SumFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def fit(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)] = train[[colname1, colname2]].sum(axis=1)
val[self.featurename(colname1, colname2)] = val[[colname1, colname2]].sum(axis=1)
test[self.featurename(colname1, colname2)] = test[[colname1, colname2]].sum(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_sum_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class DiffFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[colname1]-train[colname2]
val[self.featurename(colname1, colname2)]=train[colname1]-val[colname2]
test[self.featurename(colname1, colname2)]=test[colname1]-test[colname2]
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_diff_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2)]
class ProdFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[colname1]*train[colname2]
val[self.featurename(colname1, colname2)]=val[colname1]*val[colname2]
test[self.featurename(colname1, colname2)]=test[colname1]*test[colname2]
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_prod_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class DivFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[colname1]/train[colname2]
val[self.featurename(colname1, colname2)]=val[colname1]/val[colname2]
test[self.featurename(colname1, colname2)]=test[colname1]/test[colname2]
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_div_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2)]
class AvgFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[[colname1, colname2]].mean(axis=1)
val[self.featurename(colname1, colname2)]=val[[colname1, colname2]].mean(axis=1)
test[self.featurename(colname1, colname2)]=test[[colname1, colname2]].mean(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_avg_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class MaxFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[[colname1, colname2]].max(axis=1)
val[self.featurename(colname1, colname2)]=val[[colname1, colname2]].max(axis=1)
test[self.featurename(colname1, colname2)]=test[[colname1, colname2]].max(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_max_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
class MinFeatureGenerationStrategy(ColumnBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, colname1, colname2):
train[self.featurename(colname1, colname2)]=train[[colname1, colname2]].min(axis=1)
val[self.featurename(colname1, colname2)]=val[[colname1, colname2]].min(axis=1)
test[self.featurename(colname1, colname2)]=test[[colname1, colname2]].min(axis=1)
return (train, val, test)
def featurename(self, colname1, colname2):
return "{}_min_{}".format(colname1, colname2)
def equivalent_featurenames(self, colname1, colname2):
return [self.featurename(colname1, colname2), self.featurename(colname2, colname1)]
# Features based on decomposition methods
class DecompositionBasedFeatureGenerationStrategyAbstract(object):
"""Provides abstraction for features generation"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def generate(self, train, val, test):
"""Required Method"""
@abc.abstractmethod
def featurename(self, idx):
"""Required Method"""
@abc.abstractmethod
def equivalent_featurenames(self, idx):
"""Required Method. Used to reflect commutativity."""
class PCAFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = PCA(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "pca_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class TSVDFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = TruncatedSVD(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "tsvd_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class ICAFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = FastICA(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "ica_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class GRPFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = GaussianRandomProjection(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "grp_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
class SRPFeatureGenerationStrategy(DecompositionBasedFeatureGenerationStrategyAbstract):
def generate(self, train, val, test, n_comps):
decomposer = SparseRandomProjection(n_components=n_comps, random_state=1234)
results_train = decomposer.fit_transform(train)
results_val = decomposer.fit_transform(val)
results_test = decomposer.transform(test)
for i in range(1, n_comps + 1):
train[self.featurename(i)] = results_train[:, i - 1]
val[self.featurename(i)] = results_val[:, i - 1]
test[self.featurename(i)] = results_test[:, i - 1]
return (train, val, test)
def featurename(self, idx):
return "grp_{}".format(str(idx))
def equivalent_featurenames(self, idx):
return [self.featurename(idx)]
| 2.359375
| 2
|
testing/il_mimic.py
|
ioanabica/Invariant-Causal-Imitation-Learning
| 12
|
12781264
|
<filename>testing/il_mimic.py
import argparse
import os
import pickle
import gym
import numpy as np
import pandas as pd
try:
from paths import get_model_path # noqa
except (ModuleNotFoundError, ImportError):
from .paths import get_model_path # pylint: disable=reimported
from contrib.energy_model import EnergyModel
from network import (
EnvDiscriminator,
FeaturesDecoder,
FeaturesEncoder,
MineNetwork,
ObservationsDecoder,
StudentNetwork,
)
from student import BaseStudent, ICILStudent
from testing.train_utils import fill_buffer, make_agent, save_results_mimic
# pylint: disable=redefined-outer-name
def make_student(run_seed: int, config) -> BaseStudent:
trajs_path = config["TRAIN_TRAJ_PATH"]
model_path = get_model_path(config["ENV"], "student_" + config["ALG"], run_seed=run_seed)
state_dim = config["STATE_DIM"]
action_dim = config["ACTION_DIM"]
num_training_envs = config["NUM_TRAINING_ENVS"]
# run_seed = run_seed
batch_size = config["BATCH_SIZE"]
buffer_size_in_trajs = config["NUM_TRAJS_GIVEN"]
adam_alpha = config["ADAM_ALPHA"]
env = gym.make("CartPole-v1") # This is needed such the student code doesn't break.
teacher = make_agent("CartPole-v1", "dqn", config["NUM_TRAINING_ENVS"])
teacher.load_pretrained()
buffer = fill_buffer(
trajs_path=config["TRAIN_TRAJ_PATH"],
batch_size=batch_size,
run_seed=run_seed,
traj_shift=None,
buffer_size_in_trajs=buffer_size_in_trajs,
sampling_rate=None,
strictly_batch_data=True,
)
energy_model = EnergyModel(
in_dim=state_dim,
width=config["MLP_WIDTHS"],
batch_size=batch_size,
adam_alpha=adam_alpha,
buffer=buffer,
sgld_buffer_size=config["SGLD_BUFFER_SIZE"],
sgld_learn_rate=config["SGLD_LEARN_RATE"],
sgld_noise_coef=config["SGLD_NOISE_COEF"],
sgld_num_steps=config["SGLD_NUM_STEPS"],
sgld_reinit_freq=config["SGLD_REINIT_FREQ"],
)
energy_model.train(num_updates=config["NUM_STEPS_TRAIN_ENERGY_MODEL"])
causal_features_encoder = FeaturesEncoder(
input_size=state_dim, representation_size=config["REP_SIZE"], width=config["MLP_WIDTHS"]
)
causal_features_decoder = FeaturesDecoder(
action_size=action_dim, representation_size=config["REP_SIZE"], width=config["MLP_WIDTHS"]
)
observations_decoder = ObservationsDecoder(
representation_size=config["REP_SIZE"], out_size=state_dim, width=config["MLP_WIDTHS"]
)
policy_network = StudentNetwork(in_dim=config["REP_SIZE"], out_dim=action_dim, width=config["MLP_WIDTHS"])
env_discriminator = EnvDiscriminator(
representation_size=config["REP_SIZE"], num_envs=config["NUM_TRAINING_ENVS"], width=config["MLP_WIDTHS"]
)
noise_features_encoders = [
FeaturesEncoder(input_size=state_dim, representation_size=config["REP_SIZE"], width=config["MLP_WIDTHS"])
for i in range(num_training_envs)
]
noise_features_decoders = [
FeaturesDecoder(action_size=action_dim, representation_size=config["REP_SIZE"], width=config["MLP_WIDTHS"])
for i in range(num_training_envs)
]
mine_network = MineNetwork(x_dim=config["REP_SIZE"], z_dim=config["REP_SIZE"], width=config["MLP_WIDTHS"])
return ICILStudent(
env=env,
trajs_paths=trajs_path,
model_path=model_path,
num_training_envs=num_training_envs,
teacher=teacher,
causal_features_encoder=causal_features_encoder,
noise_features_encoders=noise_features_encoders,
causal_features_decoder=causal_features_decoder,
noise_features_decoders=noise_features_decoders,
observations_decoder=observations_decoder,
env_discriminator=env_discriminator,
policy_network=policy_network,
energy_model=energy_model,
mine_network=mine_network,
buffer=buffer,
adam_alpha=adam_alpha,
)
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument("--trial", default=0, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = init_arg()
config = {
"ENV": "MIMIC",
"ALG": "ICILStudent",
"NUM_TRAINING_ENVS": 2,
"REP_SIZE": 32,
"NUM_STEPS_TRAIN": 3000,
"NUM_REPETITIONS": 10,
"BATCH_SIZE": 128,
"MLP_WIDTHS": 64,
"ADAM_ALPHA": 0.0005,
"SGLD_BUFFER_SIZE": 10000,
"SGLD_LEARN_RATE": 0.01,
"SGLD_NOISE_COEF": 0.01,
"SGLD_NUM_STEPS": 50,
"SGLD_REINIT_FREQ": 0.05,
"NUM_STEPS_TRAIN_ENERGY_MODEL": 1000,
"TRAIN_TRAJ_PATH": ["volume/MIMIC/train_mimic_ventilator_0.npy", "volume/MIMIC/train_mimic_ventilator_1.npy"],
"TEST_TRAJ_PATH": "volume/MIMIC/test_mimic_ventilator.npy",
"NUM_TRAJS_GIVEN": 4000,
"STATE_DIM": 228,
"ACTION_DIM": 2,
}
print("Config: %s" % config)
TRIAL = args.trial
print("Trial number %s" % TRIAL)
results_dir_base = "testing/results/"
results_dir = os.path.join(results_dir_base, config["ENV"], config["ALG"])
if not os.path.exists(results_dir):
os.makedirs(results_dir)
config_file = "trial_" + str(TRIAL) + "_" + "config.pkl"
results_file_name = "trial_" + str(TRIAL) + "_" + "results.csv"
results_file_path = os.path.join(results_dir, results_file_name)
if os.path.exists(os.path.join(results_dir, config_file)):
raise NameError("CONFIG file already exists %s. Choose a different trial number." % config_file)
pickle.dump(config, open(os.path.join(results_dir, config_file), "wb"))
for run_seed in range(config["NUM_REPETITIONS"]):
print("Run %s out of %s" % (run_seed + 1, config["NUM_REPETITIONS"]))
student = make_student(run_seed, config)
student.train(num_updates=config["NUM_STEPS_TRAIN"])
action_match, auc_score, apr_score = student.test_batch_data(config["TEST_TRAJ_PATH"])
result = (action_match, auc_score, apr_score)
print(
"Results on test environment for run %s: accuracy %.3f, auc %.3f, apr %.3f"
% (run_seed + 1, action_match, auc_score, apr_score)
)
save_results_mimic(results_file_path, run_seed, action_match, auc_score, apr_score)
results_mimic = pd.read_csv(
"testing/results/" + config["ENV"] + "/" + config["ALG"] + "/trial_" + str(TRIAL) + "_results.csv", header=None
)
print(
"Average results for "
+ str(config["NUM_REPETITIONS"])
+ " repetitions: accuracy %.3f, auc %.3f, apr %.3f"
% (np.mean(results_mimic[1].values), np.mean(results_mimic[2].values), np.mean(results_mimic[3].values))
)
| 2.140625
| 2
|
Functions.py
|
AyushAniket/ImageClassification
| 0
|
12781265
|
# Imports modules
import argparse
import torch
from torchvision import transforms,datasets,models
from PIL import Image
import numpy as np
def get_input_args_train():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type = str, default = 'flowers',
help='dataset directory')
parser.add_argument('--save_dir', type = str, default = '/home/workspace/ImageClassifier/',
help = 'path to the folder for saving checkpoints')
parser.add_argument('--arch',type = str, default = 'densenet',
help = 'NN Model Architecture vgg or densenet. default = densenet')
parser.add_argument('--learning_rate',type = float, default = 0.001,
help = 'value of learning rate')
parser.add_argument('--hidden_units',type = int, default = 512,
help = 'number of hidden units')
parser.add_argument('--epochs',type = int, default = 10,
help = 'number of iterations for training network')
parser.add_argument('--gpu', type = bool, default = 'False',
help='device to run your model : gpu or cpu. Default = False i.e cpu')
return parser.parse_args()
def get_input_args_predict():
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type = str, default = '/home/workspace/ImageClassifier/flowers/test/1/image_06743.jpg',
help = 'path to image')
parser.add_argument('--checkpoint',type = str, default = 'checkpoint.pth',
help = 'trained model checkpoint')
parser.add_argument('--top_k',type = int, default = 3,
help = 'number of classes with highest prob.')
parser.add_argument('--category_names', default = 'cat_to_name.json',
help = 'mapping of categories to real names file')
parser.add_argument('--gpu', type = bool, default = 'False',
help='device to run your model : gpu or cpu.Default = False i.e cpu')
return parser.parse_args()
def process_data(train_dir, test_dir, valid_dir):
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
trainsets = datasets.ImageFolder(train_dir, transform = train_transforms)
testsets = datasets.ImageFolder(test_dir, transform = test_transforms)
validsets = datasets.ImageFolder(valid_dir, transform = test_transforms)
trainloader = torch.utils.data.DataLoader(trainsets, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(testsets, batch_size=64)
validloader = torch.utils.data.DataLoader(validsets, batch_size=64)
return trainloader, testloader, validloader, trainsets
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
image = Image.open(image)
if image.size[0] > image.size[1]:
aspect = image.size[1] / 256
new_size = (image.size[0] / aspect, 256)
else:
aspect = image.size[0] / 256
new_size = (256, image.size[1] / aspect)
image.thumbnail(new_size, Image.ANTIALIAS)
# crop out center of image
width, height = image.size # Get dimensions
left = (width - 224) / 2
top = (height - 224) / 2
right = (width + 224) / 2
bottom = (height + 224) / 2
image = image.crop((left, top, right, bottom))
np_image = np.array(image)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = np_image / 255.0
np_image = (np_image - mean)/std
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
| 2.78125
| 3
|
algospot/drawrect/answer.py
|
BK-Yoo/everyday-study
| 1
|
12781266
|
<filename>algospot/drawrect/answer.py<gh_stars>1-10
from operator import ixor
for _ in range(int(input())):
a = [0, 0]
for i in range(3):
a = list(map(ixor, a, map(int, input().split())))
print(*a)
| 3.15625
| 3
|
inst/python/rpytools/output.py
|
Mormukut11/R-interface-to-Python
| 0
|
12781267
|
<reponame>Mormukut11/R-interface-to-Python
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def start_stdout_capture():
restore = sys.stdout
sys.stdout = StringIO()
return restore
def end_stdout_capture(restore):
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = restore
return output
def start_stderr_capture():
restore = sys.stderr
sys.stderr = StringIO()
return restore
def end_stderr_capture(restore):
output = sys.stderr.getvalue()
sys.stderr.close()
sys.stderr = restore
return output
class OutputRemap(object):
def __init__(self, target, handler, tty = True):
self.target = target
self.handler = handler
self.tty = tty
def write(self, message):
return self.handler(message)
def isatty(self):
return self.tty
def __getattr__(self, attr):
if (self.target):
return getattr(self.target, attr)
else:
return 0
def close(self):
return None
def flush(self):
return None
def remap_output_streams(r_stdout, r_stderr, tty):
if (sys.stdout is None):
sys.stdout = OutputRemap(sys.stdout, r_stdout, tty)
if (sys.stderr is None):
sys.stderr = OutputRemap(sys.stderr, r_stderr, tty)
| 2.265625
| 2
|
tests/run_sdl2_mapping.py
|
justengel/pyjoystick
| 3
|
12781268
|
from pyjoystick.sdl2 import sdl2, Key, Joystick, ControllerEventLoop, get_mapping, set_mapping
if __name__ == '__main__':
import time
import argparse
devices = Joystick.get_joysticks()
print("Devices:", devices)
monitor = devices[0]
monitor_keytypes = [Key.AXIS]
for k, v in get_mapping(monitor).items():
print(k, ":", v)
set_mapping(monitor, {'lefttrigger': Key(Key.BUTTON, 0), 'righttrigger': Key(Key.BUTTON, 1),
'a': Key(Key.AXIS, 2), 'b': Key(Key.AXIS, 5)})
print()
print("New mapping:")
for k, v in get_mapping(monitor).items():
print(k, ":", v)
#################################
def print_add(joy):
print('Added', joy)
def print_remove(joy):
print('Removed', joy)
def key_received(key):
# Make joystick key and event key values match
monitor.update_key(key)
# Get mapping name
key_name = key.joystick.key_mapping.get(key, None)
if not key_name:
return
if key_name == 'a':
# A button pressed do action
print('Action on button A')
else:
print('Key:', key_name, 'Value:', key.value, 'Joystick:', key.joystick)
ControllerEventLoop(print_add, print_remove, key_received).run()
| 2.921875
| 3
|
fp/tests/test_datasets.py
|
ankushjain2001/FairPrep
| 8
|
12781269
|
import unittest
from fp.traindata_samplers import CompleteData
from fp.missingvalue_handlers import CompleteCaseAnalysis
from fp.scalers import NamedStandardScaler
from fp.learners import NonTunedLogisticRegression, NonTunedDecisionTree
from fp.pre_processors import NoPreProcessing
from fp.post_processors import NoPostProcessing
from fp.dataset_experiments import AdultDatasetWhiteMaleExperiment, AdultDatasetMaleExperiment, AdultDatasetWhiteExperiment
from fp.dataset_experiments import PropublicaDatasetWhiteExperiment, GermanCreditDatasetSexExperiment, RicciRaceExperiment
from fp.dataset_experiments import GiveMeSomeCreditExperiment
class TestSuiteDatasets(unittest.TestCase):
def test_AdultDatasetWhiteMaleExperiment(self):
self.experiment = AdultDatasetWhiteMaleExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_AdultDatasetMaleExperiment(self):
self.experiment = AdultDatasetMaleExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_AdultDatasetWhiteExperiment(self):
self.experiment = AdultDatasetWhiteExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_PropublicaDatasetWhiteExperiment(self):
self.experiment = PropublicaDatasetWhiteExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_GermanCreditDatasetSexExperiment(self):
self.experiment = GermanCreditDatasetSexExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_RicciRaceExperiment(self):
self.experiment = RicciRaceExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
def test_GiveMeSomeCreditExperiment(self):
self.experiment = GiveMeSomeCreditExperiment(
fixed_random_seed = 0xabcd,
train_data_sampler = CompleteData(),
missing_value_handler = CompleteCaseAnalysis(),
numeric_attribute_scaler = NamedStandardScaler(),
learners = [NonTunedLogisticRegression(), NonTunedDecisionTree()],
pre_processors = [NoPreProcessing()],
post_processors = [NoPostProcessing()]
)
self.experiment.run()
if __name__ == '__main__':
unittest.main()
| 2.1875
| 2
|
inktime/fading.py
|
g-patin/inktime
| 0
|
12781270
|
<filename>inktime/fading.py
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/03_trajectories-in-od-space.ipynb (unless otherwise specified).
__all__ = []
| 1.125
| 1
|
chapter04/4.4.2_gradient_in_neural.py
|
Myeonghan-Jeong/deep-learning-from-scratch
| 0
|
12781271
|
<gh_stars>0
from commons.functions import softmax, cross_entropy_error
from commons.gradient import numerical_gradient
import numpy as np
class simpleNet:
def __init__(self): # init values with one-hot-encoind
self.W = np.random.randn(2, 3)
def predict(self, x):
return np.dot(x, self.W)
def loss(self, x, t): # calculate error
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)
return loss
x = np.array([0.6, 0.9])
t = np.array([0, 0, 1])
net = simpleNet()
def f(w):
return net.loss(x, t)
dW = numerical_gradient(f, net.W)
print(dW)
| 2.828125
| 3
|
antimarkdown/handlers.py
|
Crossway/antimarkdown
| 0
|
12781272
|
# -*- coding: utf-8 -*-
"""antimarkdown.handlers -- Element handlers for converting HTML Elements/subtrees to Markdown text.
"""
from collections import deque
from antimarkdown import nodes
def render(*domtrees):
if not domtrees:
return ''
root = nodes.Root()
for dom in domtrees:
build_render_tree(root, dom)
lines = str(root).rstrip().splitlines()
# Strip leading empty lines
while lines and not lines[0].strip():
lines.pop(0)
return nodes.normalize('\n'.join(lines))
def build_render_tree(root, domtree):
"""Process an ElementTree domtree and build a render tree.
"""
opened = set()
stack = deque([domtree])
blackboard = {}
render_tree = root
current_node = render_tree
while stack:
domtree = stack.pop()
if domtree not in opened:
# Open the domtree
# Build the render node.
node_class = getattr(nodes, domtree.tag.upper(), nodes.Node)
current_node = node_class(current_node, domtree, blackboard)
stack.append(domtree)
# Queue children
for el in reversed(domtree):
stack.append(el)
opened.add(domtree)
else:
# Close the domtree
current_node = current_node.parent
return root
| 3.265625
| 3
|
examples/example_06_fit_parallax_EMCEE.py
|
pmehta08/MulensModel
| 0
|
12781273
|
"""
Fits PSPL model with parallax using EMCEE sampler.
"""
import os
import sys
import numpy as np
try:
import emcee
except ImportError as err:
print(err)
print("\nEMCEE could not be imported.")
print("Get it from: http://dfm.io/emcee/current/user/install/")
print("and re-run the script")
sys.exit(1)
import matplotlib.pyplot as plt
import MulensModel as mm
# Define likelihood functions
def ln_like(theta, event, parameters_to_fit):
""" likelihood function """
for key, val in enumerate(parameters_to_fit):
setattr(event.model.parameters, val, theta[key])
return -0.5 * event.get_chi2()
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
if theta[parameters_to_fit.index("t_E")] < 0.:
return -np.inf
return 0.0
def ln_prob(theta, event, parameters_to_fit):
""" combines likelihood and priors"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit)
# In the cases that source fluxes are negative we want to return
# these as if they were not in priors.
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
# Read the data
file_name = os.path.join(
mm.DATA_PATH, "photometry_files", "OB05086",
"starBLG234.6.I.218982.dat")
my_data = mm.MulensData(file_name=file_name, add_2450000=True)
coords = "18:04:45.71 -26:59:15.2"
# Starting parameters:
params = dict()
params['t_0'] = 2453628.3
params['t_0_par'] = 2453628.
params['u_0'] = 0.37 # Change sign of u_0 to find the other solution.
params['t_E'] = 100.
params['pi_E_N'] = 0.
params['pi_E_E'] = 0.
my_model = mm.Model(params, coords=coords)
my_event = mm.Event(datasets=my_data, model=my_model)
# Which parameters we want to fit?
parameters_to_fit = ["t_0", "u_0", "t_E", "pi_E_N", "pi_E_E"]
# And remember to provide dispersions to draw starting set of points
sigmas = [0.01, 0.001, 0.1, 0.01, 0.01]
# Initializations for EMCEE
n_dim = len(parameters_to_fit)
n_walkers = 40
n_steps = 500
n_burn = 150
# Including the set of n_walkers starting points:
start_1 = [params[p] for p in parameters_to_fit]
start = [start_1 + np.random.randn(n_dim) * sigmas
for i in range(n_walkers)]
# Run emcee (this can take some time):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, ln_prob, args=(my_event, parameters_to_fit))
sampler.run_mcmc(start, n_steps)
# Remove burn-in samples and reshape:
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
# Results:
results = np.percentile(samples, [16, 50, 84], axis=0)
print("Fitted parameters:")
for i in range(n_dim):
r = results[1, i]
print("{:.5f} {:.5f} {:.5f}".format(r, results[2, i]-r, r-results[0, i]))
# We extract best model parameters and chi2 from my_event:
print("\nSmallest chi2 model:")
best = [my_event.best_chi2_parameters[p] for p in parameters_to_fit]
print(*[repr(b) if isinstance(b, float) else b.value for b in best])
print(my_event.best_chi2)
# Now let's plot 3 models
plt.figure()
model_0 = mm.Model({'t_0': 2453628.29062, 'u_0': 0.37263, 't_E': 102.387105})
model_1 = mm.Model(
{'t_0': 2453630.35507, 'u_0': 0.488817, 't_E': 93.611301,
'pi_E_N': 0.2719, 'pi_E_E': 0.1025, 't_0_par': params['t_0_par']},
coords=coords)
model_2 = mm.Model(
{'t_0': 2453630.67778, 'u_0': -0.415677, 't_E': 110.120755,
'pi_E_N': -0.2972, 'pi_E_E': 0.1103, 't_0_par': params['t_0_par']},
coords=coords)
model_0.set_datasets([my_data])
model_1.set_datasets([my_data])
model_2.set_datasets([my_data])
t_1 = 2453200.
t_2 = 2453950.
plot_params = {'lw': 2.5, 'alpha': 0.3, 'subtract_2450000': True,
't_start': t_1, 't_stop': t_2}
my_event.plot_data(subtract_2450000=True)
model_0.plot_lc(label='no pi_E', **plot_params)
model_1.plot_lc(label='pi_E, u_0>0', **plot_params)
model_2.plot_lc(label='pi_E, u_0<0', color='black', ls='dashed', **plot_params)
plt.xlim(t_1-2450000., t_2-2450000.)
plt.legend(loc='best')
plt.title('Data and 3 fitted models')
plt.show()
| 2.203125
| 2
|
evaluate.py
|
liuxin811/SRGAN-improved
| 0
|
12781274
|
<reponame>liuxin811/SRGAN-improved
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 22:41:34 2020
@author: liuxin
"""
import numpy as np
import os
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import Input, Conv2d, BatchNorm2d, Elementwise, SubpixelConv2d, Flatten, Dense, MaxPool2d
from tensorlayer.models import Model
import time
import cv2
import model
import config
def evaluate():
valid_hr_img_list = sorted(tl.files.load_file_list(path=config.path_valid_HR_orin, regx='.*.png', printable=False))[:]
valid_lr_img_list = sorted(tl.files.load_file_list(path=config.path_valid_LR_orin, regx='.*.png', printable=False))[:]
valid_lr_imgs = tl.vis.read_images(valid_lr_img_list, path=config.path_valid_LR_orin, n_threads=8)
valid_hr_imgs = tl.vis.read_images(valid_hr_img_list, path=config.path_valid_HR_orin, n_threads=8)
imid = 0 # 0: 企鹅 81: 蝴蝶 53: 鸟 64: 古堡
valid_lr_img = valid_lr_imgs[imid]
#print(valid_lr_img.shape)
valid_hr_img = valid_hr_imgs[imid]
valid_lr_img = (valid_lr_img / 127.5) - 1 # rescale to [-1, 1]
valid_lr_img = np.asarray(valid_lr_img, dtype=np.float32)
valid_lr_img = valid_lr_img[np.newaxis,:,:,:]
W, H = valid_hr_img.shape[0], valid_hr_img.shape[1]
G = model.get_G([1, None, None, 3])
G.load_weights(os.path.join(config.path_model, 'g_gan.h5'))
G.eval()
#网络输出图像
gen_img = G(valid_lr_img).numpy()
#插值放大的图像
out_bicu = config.resize_img(valid_lr_img, (W, H))
tl.vis.save_image(gen_img[0], os.path.join(config.path_pic, 'fh.png'))
tl.vis.save_image(valid_lr_img[0], os.path.join(config.path_pic, 'rl.png'))
tl.vis.save_image(valid_hr_img, os.path.join(config.path_pic, 'hr.png'))
tl.vis.save_image(out_bicu[0], os.path.join(config.path_pic, 'bh.png'))
print('验证图像已保存在{}文件夹中'.format(config.path_pic))
if __name__ == '__main__':
#with tf.device('/cpu'):
evaluate()
| 1.898438
| 2
|
tornkts/utils.py
|
ktsstudio/tornkts
| 6
|
12781275
|
import errno
import mimetypes
from datetime import datetime
import os
import six
from passlib.apps import django10_context as pwd_context
try:
import ujson as json
except:
import json as json
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def to_int(value, default=0):
if type(value) == list:
if len(value) > 0:
value = value[0]
else:
return default
try:
value = int(value)
except:
value = default
return value
def json_dumps(data):
return json.dumps(data)
def json_loads(data):
if isinstance(data, str):
return json.loads(data)
else:
return None
def now_date():
return datetime(datetime.now().year, datetime.now().month, datetime.now().day)
def unique_list(target):
seen = set()
return [x for x in target if not (x in seen or seen.add(x))]
def encode_multipart_formdata(fields=None, files=None):
if fields is None:
fields = {}
if files is None:
files = {}
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n' if six.PY2 else b'\r\n'
L = []
for (key, value) in fields.items():
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
if six.PY2:
filename = filename.encode("utf8")
L.append('--' + BOUNDARY)
L.append(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (
key, filename
)
)
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
if six.PY3:
for i in range(len(L)):
if isinstance(L[i], int):
L[i] = str(L[i])
if isinstance(L[i], str):
L[i] = str.encode(L[i])
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
class PasswordHelper(object):
@staticmethod
def get_hash(text):
return pwd_context.encrypt(text)
@staticmethod
def verify_hash(text, hashed_text):
try:
return pwd_context.verify(text, hashed_text)
except:
return False
class FileHelper(object):
@staticmethod
def file_ext(filename):
split = filename.rsplit('.', 1)
if len(split) > 1:
extension = str(split[1])
return extension.lower()
return ""
class InvalidArgumentException(Exception):
message = ''
def __init__(self, message):
super().__init__()
self.message = message
| 2.28125
| 2
|
code.py
|
FoamyGuy/CircuitPython-Badge-Reverse-Pong-Game
| 0
|
12781276
|
import board
import displayio
from adafruit_display_shapes.circle import Circle
import time
from pong_helpers import AutoPaddle, ManualBall
# width and height variables used to know where the bototm and right edge of the screen are.
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 128
# FPS (Frames per second) setting, raise or lower this to make the game faster or slower
FPS = 60
# what fraction of a second to wait in order to achieve the desired FPS setting
FPS_DELAY = 1 / FPS
# Make the display context
splash = displayio.Group(max_size=10)
board.DISPLAY.show(splash)
# Make a background color fill
color_bitmap = displayio.Bitmap(SCREEN_WIDTH, SCREEN_HEIGHT, 1)
color_palette = displayio.Palette(1)
color_palette[0] = 0xFFFFFF
bg_sprite = displayio.TileGrid(color_bitmap, x=0, y=0, pixel_shader=color_palette)
splash.append(bg_sprite)
# hold the time we last updated the game state.
# Also represents the last executed "frame" for our FPS setting.
last_update_time = 0
# create left paddle object
# width: 5, height: 30
# x: 1, y: 0
left_paddle = AutoPaddle(5,30,1,0)
# add it to screen group
splash.append(left_paddle.rect)
# create right paddle object
# width: 5, height: 30
# x: 6 pixels inside the right edge
# y: 36 pixels above the bottom edge.
# 30 because it is the paddle height, 6 because it's "a few more" to move away from the edge.
right_paddle = AutoPaddle(5,30,SCREEN_WIDTH-6,SCREEN_HEIGHT-30-6)
# add it to screen group
splash.append(right_paddle.rect)
# create ball
# diameter: 3
# x: center of the screen
# y: center of the screen
ball = ManualBall(3, int(SCREEN_WIDTH/2), int(SCREEN_HEIGHT/2))
# add it to screen group
splash.append(ball.circle)
# variable to hold current time
now = 0
# debug variable to count loops inbetween updates/frames
loops_since_update = 0
# update() function will get called from main loop
# at an appropriate interval to match FPS setting.
def update():
# call update on all game objects
left_paddle.update()
right_paddle.update()
ball.update(left_paddle, right_paddle)
while True:
# update time variable
now = time.monotonic()
# check if the delay time has passed since the last game update
if last_update_time + FPS_DELAY <= now:
# call update
update()
# set the last update time to now
last_update_time = now
#print(loops_since_update)
# reset debug loop counter
loops_since_update = 0
else:
# update debug loop counter
loops_since_update += 1
| 3.234375
| 3
|
popularity.py
|
udit01/Image-Quantization
| 1
|
12781277
|
import numpy as np
import cv2
import heapq
import statistics
import math
def get_norm(t1 , t2):
(xa, ya, za) = t1
(xb, yb, zb) = t2
return math.sqrt((xa-xb)^2 + (ya-yb)^2 + (za-zb)^2)
def popularity(image,k):
(m,n,_) = image.shape
d = {}
for i in range(m):
for j in range(n):
t = tuple(image[i,j])
if t in d:
d[t] += 1
else:
d[t] = 1
top_k_colors =heapq.nlargest(k, d, key=d.get)
return top_k_colors
def popularity_quant(image, k):
finalImage = image.copy()
color_map = popularity(image, k)
(m,n,_) = image.shape
for i in range(m):
for j in range(n):
t = tuple(image[i,j])
min_dist = 100000000.0
for col in color_map:
dist = get_norm(t, col)
if min_dist > dist :
min_dist = dist
min_col = col
finalImage[i,j] = np.asarray(min_col)
return finalImage
test_image = cv2.imread('test1.png')
img = popularity_quant(test_image, 10)
cv2.imshow('Popularity Cut image',img)
cv2.waitKey()
cv2.destroyAllWindows()
cv2.imwrite('popularity_test1.png', img)
| 2.609375
| 3
|
bin/coldbackup/coldbackup.py
|
destinysky/BackupResourcesController_k8s
| 0
|
12781278
|
<gh_stars>0
from socketserver import BaseRequestHandler, TCPServer
class EchoHandler(BaseRequestHandler):
def handle(self):
print('Got connection from', self.client_address)
while True:
msg = str(self.request.recv(8192),encoding='utf-8')
print(msg)
if not msg or msg=="ok":
print("exit")
self.request.send(bytes("bye",encoding='utf-8'))
exit()
if __name__ == '__main__':
serv = TCPServer(('', 20000), EchoHandler)
serv.serve_forever()
| 2.875
| 3
|
jailscraper/spiders/inmate_spider.py
|
propublica/cookcountyjail2
| 25
|
12781279
|
import boto3
import csv
import logging
import io
import os
import requests
import scrapy
from datetime import date, datetime, timedelta
from jailscraper import app_config, utils
from jailscraper.models import InmatePage
# Quiet down, Boto!
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logging.getLogger('s3transfer').setLevel(logging.CRITICAL)
ONE_DAY = timedelta(days=1)
class InmatesSpider(scrapy.Spider):
name = "inmates"
def __init__(self, category=None, *args, **kwargs):
super(InmatesSpider, self).__init__(*args, **kwargs)
if app_config.USE_S3_STORAGE:
s3 = boto3.resource('s3')
self._bucket = s3.Bucket(app_config.S3_BUCKET)
self._today = datetime.combine(date.today(), datetime.min.time())
self._yesterday = self._today - ONE_DAY
def start_requests(self):
for url in self._generate_urls():
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
inmate = InmatePage(response.body)
if app_config.USE_LOCAL_STORAGE:
self._save_local(response, inmate)
if app_config.USE_S3_STORAGE:
self._save_to_s3(response, inmate)
yield {
'Age_At_Booking': inmate.age_at_booking,
'Bail_Amount': inmate.bail_amount,
'Booking_Date': inmate.booking_date,
'Booking_Id': inmate.booking_id,
'Charges': inmate.charges,
'Court_Date': inmate.court_date,
'Court_Location': inmate.court_location,
'Gender': inmate.gender,
'Inmate_Hash': inmate.inmate_hash,
'Height': inmate.height,
'Housing_Location': inmate.housing_location,
'Race': inmate.race,
'Weight': inmate.weight,
'Incomplete': self._is_complete_record(inmate)
}
def _generate_urls(self):
"""Make URLs."""
f = self._get_seed_file()
data = list(csv.DictReader(f))
urls = [app_config.INMATE_URL_TEMPLATE.format(row['Booking_Id']) for row in data]
dates = [datetime.strptime(row['Booking_Date'], '%Y-%m-%d') for row in data]
last_date = max(dates) + ONE_DAY
self._start_date = last_date
# Scan the universe of URLs
while last_date < self._today:
next_query = last_date.strftime('%Y-%m%d')
for num in range(1, app_config.MAX_DEFAULT_JAIL_NUMBER + 1):
jailnumber = '{0}{1:03d}'.format(next_query, num)
urls.append(app_config.INMATE_URL_TEMPLATE.format(jailnumber))
last_date = last_date + ONE_DAY
return urls
def _get_seed_file(self):
"""Returns data from seed file as array of lines."""
if app_config.USE_S3_STORAGE:
return self._get_s3_seed_file()
else:
return self._get_local_seed_file()
def _get_s3_seed_file(self):
"""Get seed file from S3. Return file-like object."""
urls = utils.get_manifest()
seed_url = urls.pop()
seed_response = requests.get(seed_url)
return io.StringIO(seed_response.text)
def _get_local_seed_file(self):
"""Get seed file from local file system. Return file-like object."""
try:
files = sorted(os.listdir('data/daily'))
except FileNotFoundError:
files = []
if not len(files):
self.log('No seed file found.')
return app_config.FALLBACK_START_DATE, []
last_file = os.path.join('data/daily', files[-1])
f = open(last_file)
self.log('Used {0} from local file system to seed scrape.'.format(last_file))
return f
def _save_local(self, response, inmate):
"""Save scraped page to local filesystem."""
os.makedirs('data/raw', exist_ok=True)
filepath = os.path.join('data/raw', self._generate_page_filename(inmate))
with open(filepath, 'wb') as f:
f.write(response.body)
self.log('Wrote {0} to local file system'.format(filepath))
def _save_to_s3(self, response, inmate):
"""Save scraped page to s3."""
key = '{0}/raw/{1}'.format(app_config.TARGET, self._generate_page_filename(inmate))
if key.startswith('/'):
key = key[1:]
f = io.BytesIO(response.body)
self._bucket.upload_fileobj(f, key)
self.log('Uploaded s3://{0}/{1}'.format(app_config.S3_BUCKET, key))
def _generate_page_filename(self, inmate):
"""Make a scraped page filename."""
name = '{0}-{1}.html'.format(self._today.strftime('%Y-%m-%d'), inmate.booking_id)
return name
def _is_complete_record(self, inmate):
"""Was this scrape run daily?"""
booking_date = datetime.strptime(inmate.booking_date, '%Y-%m-%d')
return booking_date < self._yesterday
| 2.34375
| 2
|
fluent_pages/tests/__init__.py
|
bashu/sigmacms-fluent-pages
| 0
|
12781280
|
"""
Test suite for fluent-pages
"""
import django
if django.VERSION < (1,6):
# Expose for Django 1.5 and below (before DiscoverRunner)
from .test_urldispatcher import UrlDispatcherTests, UrlDispatcherNonRootTests
from .test_menu import MenuTests
from .test_modeldata import ModelDataTests
from .test_plugins import PluginTests, PluginUrlTests
from .test_templatetags import TemplateTagTests
| 1.539063
| 2
|
openpyxl/reader/tests/test_worbook.py
|
Hitachi-Data-Systems/org-chart-builder
| 8
|
12781281
|
<reponame>Hitachi-Data-Systems/org-chart-builder
# Copyright (c) 2010-2014 openpyxl
from io import BytesIO
from zipfile import ZipFile
import pytest
from openpyxl.xml.constants import (
ARC_WORKBOOK,
ARC_CONTENT_TYPES,
ARC_WORKBOOK_RELS,
REL_NS,
)
@pytest.fixture()
def DummyArchive():
body = BytesIO()
archive = ZipFile(body, "w")
return archive
def test_hidden_sheets(datadir, DummyArchive):
from .. workbook import read_sheets
datadir.chdir()
archive = DummyArchive
with open("hidden_sheets.xml") as src:
archive.writestr(ARC_WORKBOOK, src.read())
sheets = read_sheets(archive)
assert list(sheets) == [
('rId1', 'Blatt1', None),
('rId2', 'Blatt2', 'hidden'),
('rId3', 'Blatt3', 'hidden')
]
@pytest.mark.parametrize("excel_file, expected", [
("bug137.xlsx", [
{'path': 'xl/worksheets/sheet1.xml', 'title': 'Sheet1', 'type':'%s/worksheet' % REL_NS}
]
),
("contains_chartsheets.xlsx", [
{'path': 'xl/worksheets/sheet1.xml', 'title': 'data', 'type':'%s/worksheet' % REL_NS},
{'path': 'xl/worksheets/sheet2.xml', 'title': 'moredata', 'type':'%s/worksheet' % REL_NS},
]),
("bug304.xlsx", [
{'path': 'xl/worksheets/sheet3.xml', 'title': 'Sheet1', 'type':'%s/worksheet' % REL_NS},
{'path': 'xl/worksheets/sheet2.xml', 'title': 'Sheet2', 'type':'%s/worksheet' % REL_NS},
{'path': 'xl/worksheets/sheet.xml', 'title': 'Sheet3', 'type':'%s/worksheet' % REL_NS},
])
]
)
def test_detect_worksheets(datadir, excel_file, expected):
from openpyxl.reader.excel import detect_worksheets
datadir.chdir()
archive = ZipFile(excel_file)
assert list(detect_worksheets(archive)) == expected
@pytest.mark.parametrize("excel_file, expected", [
("bug137.xlsx", {
"rId1": {'path': 'xl/chartsheets/sheet1.xml', 'type':'%s/chartsheet' % REL_NS},
"rId2": {'path': 'xl/worksheets/sheet1.xml', 'type':'%s/worksheet' % REL_NS},
"rId3": {'path': 'xl/theme/theme1.xml', 'type':'%s/theme' % REL_NS},
"rId4": {'path': 'xl/styles.xml', 'type':'%s/styles' % REL_NS},
"rId5": {'path': 'xl/sharedStrings.xml', 'type':'%s/sharedStrings' % REL_NS}
}),
("bug304.xlsx", {
'rId1': {'path': 'xl/worksheets/sheet3.xml', 'type':'%s/worksheet' % REL_NS},
'rId2': {'path': 'xl/worksheets/sheet2.xml', 'type':'%s/worksheet' % REL_NS},
'rId3': {'path': 'xl/worksheets/sheet.xml', 'type':'%s/worksheet' % REL_NS},
'rId4': {'path': 'xl/theme/theme.xml', 'type':'%s/theme' % REL_NS},
'rId5': {'path': 'xl/styles.xml', 'type':'%s/styles' % REL_NS},
'rId6': {'path': '../customXml/item1.xml', 'type':'%s/customXml' % REL_NS},
'rId7': {'path': '../customXml/item2.xml', 'type':'%s/customXml' % REL_NS},
'rId8': {'path': '../customXml/item3.xml', 'type':'%s/customXml' % REL_NS}
}),
]
)
def test_read_rels(datadir, excel_file, expected):
from openpyxl.reader.workbook import read_rels
datadir.chdir()
archive = ZipFile(excel_file)
assert dict(read_rels(archive)) == expected
@pytest.mark.parametrize("workbook_file, expected", [
("bug137_workbook.xml",
[
("rId1", "Chart1", None),
("rId2", "Sheet1", None),
]
),
("bug304_workbook.xml",
[
('rId1', 'Sheet1', None),
('rId2', 'Sheet2', None),
('rId3', 'Sheet3', None),
]
)
])
def test_read_sheets(datadir, DummyArchive, workbook_file, expected):
from openpyxl.reader.workbook import read_sheets
datadir.chdir()
archive = DummyArchive
with open(workbook_file) as src:
archive.writestr(ARC_WORKBOOK, src.read())
assert list(read_sheets(archive)) == expected
def test_read_content_types(datadir, DummyArchive):
from openpyxl.reader.workbook import read_content_types
archive = DummyArchive
datadir.chdir()
with open("content_types.xml") as src:
archive.writestr(ARC_CONTENT_TYPES, src.read())
assert list(read_content_types(archive)) == [
('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml', '/xl/workbook.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml', '/xl/worksheets/sheet1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml', '/xl/chartsheets/sheet1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml', '/xl/worksheets/sheet2.xml',),
('application/vnd.openxmlformats-officedocument.theme+xml', '/xl/theme/theme1.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml', '/xl/styles.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.sharedStrings+xml', '/xl/sharedStrings.xml'),
('application/vnd.openxmlformats-officedocument.drawing+xml', '/xl/drawings/drawing1.xml'),
('application/vnd.openxmlformats-officedocument.drawingml.chart+xml','/xl/charts/chart1.xml'),
('application/vnd.openxmlformats-officedocument.drawing+xml', '/xl/drawings/drawing2.xml'),
('application/vnd.openxmlformats-officedocument.drawingml.chart+xml', '/xl/charts/chart2.xml'),
('application/vnd.openxmlformats-officedocument.spreadsheetml.calcChain+xml', '/xl/calcChain.xml'),
('application/vnd.openxmlformats-package.core-properties+xml', '/docProps/core.xml'),
('application/vnd.openxmlformats-officedocument.extended-properties+xml', '/docProps/app.xml')
]
def test_missing_content_type(datadir, DummyArchive):
from .. workbook import detect_worksheets
archive = DummyArchive
datadir.chdir()
with open("bug181_content_types.xml") as src:
archive.writestr(ARC_CONTENT_TYPES, src.read())
with open("bug181_workbook.xml") as src:
archive.writestr(ARC_WORKBOOK, src.read())
with open("bug181_workbook.xml.rels") as src:
archive.writestr(ARC_WORKBOOK_RELS, src.read())
sheets = list(detect_worksheets(archive))
assert sheets == [{'path': 'xl/worksheets/sheet1.xml', 'title': 'Sheet 1', 'type':'%s/worksheet' % REL_NS}]
| 1.945313
| 2
|
tests/test_executor.py
|
jackcvr/concurrency
| 0
|
12781282
|
<reponame>jackcvr/concurrency
import operator
import threading
import time
from concurrent import futures
import pytest
from threadlet import TimeoutError
from threadlet.executor import BrokenThreadPool, ThreadPoolExecutor
def test_executor_shutdown():
max_workers = 4
with ThreadPoolExecutor(max_workers, idle_timeout=5) as tpe:
for _ in range(max_workers):
tpe.submit(time.sleep, 0.1)
assert threading.active_count() == max_workers + 1
assert threading.active_count() == 1
@pytest.mark.parametrize("max_workers", (1, 2, 3, 4))
def test_executor_submit_success(max_workers):
numbers = [1, 2]
expected_res = sum(numbers)
with ThreadPoolExecutor(max_workers) as tpe:
f = tpe.submit(sum, numbers)
assert f.result(1) == expected_res
@pytest.mark.parametrize("max_workers", (1, 2, 3, 4))
def test_executor_submit_error(max_workers):
numbers = [1, 0]
with ThreadPoolExecutor(max_workers) as tpe:
f = tpe.submit(operator.truediv, *numbers)
with pytest.raises(ZeroDivisionError):
f.result()
del f
@pytest.mark.parametrize("max_workers", (1, 2))
def test_executor_submit_timeout(max_workers):
with ThreadPoolExecutor(max_workers) as tpe:
f = tpe.submit(time.sleep, 2)
with pytest.raises(TimeoutError):
f.result(timeout=1)
assert f.result(timeout=2) is None
def test_executor_worker_error():
def raise_init_error():
raise RuntimeError("init")
with ThreadPoolExecutor(1, initializer=raise_init_error) as tpe:
f = tpe.submit(time.sleep, 2)
with pytest.raises(BrokenThreadPool):
f.result(1)
del f
@pytest.mark.parametrize("max_workers", (3, 4))
def test_executor_idle_timeout_none(max_workers):
with ThreadPoolExecutor(max_workers, idle_timeout=None) as tpe:
for _ in range(max_workers):
tpe.submit(time.sleep, 0.1)
assert threading.active_count() == max_workers + 1
time.sleep(1)
assert threading.active_count() == max_workers + 1
@pytest.mark.parametrize("max_workers", (3, 4))
def test_executor_idle_timeout(max_workers):
idle_timeout = 1
work_time = 0.5
with ThreadPoolExecutor(max_workers, idle_timeout=idle_timeout) as tpe:
assert threading.active_count() == 1
for _ in range(2):
for _ in range(max_workers):
tpe.submit(time.sleep, work_time)
assert threading.active_count() == max_workers + 1
time.sleep(work_time + idle_timeout + 1)
assert threading.active_count() == 1
@pytest.mark.parametrize("max_workers", (1, 2, 3, 4))
def test_executor_min_workers(max_workers):
idle_timeout = 1
work_time = 0.5
min_workers = max_workers - 1
with ThreadPoolExecutor(max_workers, min_workers=min_workers, idle_timeout=idle_timeout) as tpe:
assert threading.active_count() == min_workers + 1
for _ in range(max_workers):
tpe.submit(time.sleep, work_time)
assert threading.active_count() == max_workers + 1
time.sleep(work_time + idle_timeout + 1)
assert threading.active_count() == min_workers + 1
@pytest.mark.parametrize("max_workers", (1, 2, 3, 4))
def test_executor_max_workers(max_workers):
idle_timeout = 1
work_time = 0.1
task_limit = max_workers * 10
def task():
nonlocal done_tasks
time.sleep(work_time)
done_tasks += 1
with ThreadPoolExecutor(max_workers, idle_timeout=idle_timeout) as tpe:
assert threading.active_count() == 1
done_tasks = 0
fs = []
for _ in range(task_limit):
fs.append(tpe.submit(task))
assert threading.active_count() == max_workers + 1
futures.wait(fs)
assert threading.active_count() == max_workers + 1
time.sleep(work_time + idle_timeout + 1)
assert threading.active_count() == 1
assert done_tasks == task_limit
| 2.71875
| 3
|
snpe/run_snpe.py
|
csarron/MobileAccelerator
| 2
|
12781283
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import os
import tensorflow as tf
import zipfile as zp
import subprocess
import glob
import json
from PIL import Image
from collections import OrderedDict
import shutil
import stat
import sys
def convert_to_dlc(script_path, frozen_model_file, snpe_root, input_node='input', output_node='output', image_size=224):
print('converting ' + frozen_model_file + ' to snpe dlc format')
sys.stdout.flush()
model_name_ = os.path.splitext(os.path.split(frozen_model_file)[1])[0]
dlc_path = 'models/{}.dlc'.format(model_name_)
dlc_full_path = os.path.join(snpe_root, 'benchmarks', dlc_path)
# if os.path.exists(dlc_full_path):
# return dlc_path
if not os.path.exists(os.path.dirname(dlc_full_path)):
os.makedirs(os.path.dirname(dlc_full_path))
cmd = [script_path,
'--graph', os.path.abspath(frozen_model_file),
'--input_dim', input_node, '1,{0},{0},3'.format(image_size),
'--out_node', output_node,
'--allow_unconsumed_nodes',
'--dlc', dlc_full_path]
subprocess.call(cmd)
print()
sys.stdout.flush()
return dlc_path
# print('INFO: Creating ' + DLC_QUANTIZED_FILENAME + ' quantized model')
# data_cropped_dir = os.path.join(os.path.join(model_dir, 'data'), 'cropped')
# cmd = ['snpe-dlc-quantize',
# '--input_dlc', os.path.join(dlc_dir, DLC_FILENAME),
# '--input_list', os.path.join(data_cropped_dir, RAW_LIST_FILE),
# '--output_dlc', os.path.join(dlc_dir, DLC_QUANTIZED_FILENAME)]
# subprocess.call(cmd)
def __get_img_raw(img_file):
img_file = os.path.abspath(img_file)
img = Image.open(img_file)
img_ndarray = np.array(img) # read it
if len(img_ndarray.shape) != 3:
raise RuntimeError('Image shape' + str(img_ndarray.shape))
if img_ndarray.shape[2] != 3:
raise RuntimeError('Require image with rgb but channel is %d' % img_ndarray.shape[2])
# reverse last dimension: rgb -> bgr
return img_ndarray
def __create_mean_raw(img_raw, mean_rgb):
if img_raw.shape[2] != 3:
raise RuntimeError('Require image with rgb but channel is %d' % img_raw.shape[2])
img_dim = (img_raw.shape[0], img_raw.shape[1])
mean_raw_r = np.empty(img_dim)
mean_raw_r.fill(mean_rgb[0])
mean_raw_g = np.empty(img_dim)
mean_raw_g.fill(mean_rgb[1])
mean_raw_b = np.empty(img_dim)
mean_raw_b.fill(mean_rgb[2])
# create with c, h, w shape first
tmp_transpose_dim = (img_raw.shape[2], img_raw.shape[0], img_raw.shape[1])
mean_raw = np.empty(tmp_transpose_dim)
mean_raw[0] = mean_raw_r
mean_raw[1] = mean_raw_g
mean_raw[2] = mean_raw_b
# back to h, w, c
mean_raw = np.transpose(mean_raw, (1, 2, 0))
return mean_raw.astype(np.float32)
def __create_raw_img(img_file, mean_rgb, div, req_bgr_raw, save_uint8):
img_raw = __get_img_raw(img_file)
mean_raw = __create_mean_raw(img_raw, mean_rgb)
snpe_raw = img_raw - mean_raw
snpe_raw = snpe_raw.astype(np.float32)
# scalar data divide
snpe_raw /= div
if req_bgr_raw:
snpe_raw = snpe_raw[..., ::-1]
if save_uint8:
snpe_raw = snpe_raw.astype(np.uint8)
else:
snpe_raw = snpe_raw.astype(np.float32)
img_file = os.path.abspath(img_file)
filename, ext = os.path.splitext(img_file)
snpe_raw_filename = filename
snpe_raw_filename += '.raw'
snpe_raw.tofile(snpe_raw_filename)
return 0
def __resize_square_to_jpg(src, dst, size):
src_img = Image.open(src)
# If black and white image, convert to rgb (all 3 channels the same)
if len(np.shape(src_img)) == 2: src_img = src_img.convert(mode='RGB')
# center crop to square
width, height = src_img.size
short_dim = min(height, width)
crop_coord = (
(width - short_dim) / 2,
(height - short_dim) / 2,
(width + short_dim) / 2,
(height + short_dim) / 2
)
img = src_img.crop(crop_coord)
# resize to alexnet size
dst_img = img.resize((size, size), Image.ANTIALIAS)
# save output - save determined from file extension
dst_img.save(dst)
return 0
def convert_img(src, dest, size):
print("converting images...")
for root, dirs, files in os.walk(src):
for jpgs in files:
src_image = os.path.join(root, jpgs)
if '.jpg' in src_image:
print(src_image)
dest_image = os.path.join(dest, jpgs)
__resize_square_to_jpg(src_image, dest_image, size)
for root, dirs, files in os.walk(dest):
for jpgs in files:
src_image = os.path.join(root, jpgs)
print(src_image)
mean_rgb = (128, 128, 128)
__create_raw_img(src_image, mean_rgb, 128, False, False)
def create_file_list(input_dir, output_filename, ext_pattern, print_out=True, rel_path=True):
input_dir = os.path.abspath(input_dir)
output_filename = os.path.abspath(output_filename)
output_dir = os.path.dirname(output_filename)
if not os.path.isdir(input_dir):
raise RuntimeError('input_dir %s is not a directory' % input_dir)
if not os.path.isdir(output_dir):
raise RuntimeError('output_filename %s directory does not exist' % output_dir)
glob_path = os.path.join(input_dir, ext_pattern)
file_list = glob.glob(glob_path)
if rel_path:
file_list = [os.path.relpath(file_path, output_dir) for file_path in file_list]
if len(file_list) <= 0:
if print_out:
print('no results with %s' % glob_path)
else:
with open(output_filename, 'w') as f:
f.write('\n'.join(file_list))
if print_out:
print('%s created listing %d files.' % (output_filename, len(file_list)))
def prepare_data_images(image_size, snpe_root):
# make a copy of the image files from the alex net model data dir
image_dir_relative_path = 'models/alexnet/data'
image_dir = os.path.join(snpe_root, image_dir_relative_path)
data_cropped_dir = os.path.join(image_dir, 'cropped_%s' % image_size)
raw_list = os.path.join(image_dir, 'target_raw_list_%s.txt' % image_size)
if not os.path.exists(raw_list):
os.makedirs(data_cropped_dir)
print('creating inception style raw image data')
convert_img(image_dir, data_cropped_dir, image_size)
print('Create file lists')
create_file_list(data_cropped_dir, raw_list, '*.raw')
print()
sys.stdout.flush()
return data_cropped_dir, raw_list
# generate bench config json file
def gen_config(dlc_path, input_list_file, input_data, processors_, runs):
name = os.path.splitext(os.path.basename(dlc_path))[0]
config = OrderedDict()
config['Name'] = name
config['HostRootPath'] = name
config['HostResultsDir'] = os.path.join(name, 'results')
config['DevicePath'] = '/data/local/tmp/snpebm'
config['Devices'] = ["123"]
config['Runs'] = runs
model = OrderedDict()
model['Name'] = name
model['Dlc'] = dlc_path
model['InputList'] = input_list_file
model['Data'] = [input_data]
config['Model'] = model
config['Runtimes'] = processors_
config['Measurements'] = ['timing'] # ['timing', 'mem']
return config
def write_config(config, save_path):
with open(save_path, 'w') as f:
json.dump(config, f, indent=4)
def check_processor_arg(processor_str):
default = "GPU,DSP,CPU,GPU_FP16"
processor_list = default.split(',')
parsed_processors = []
for p in processor_str.split(','):
if p not in processor_list:
print("please use either GPU, DSP or CPU or any combination of them, seperated by comma(',')")
print("e.g. -p GPU,DSP means running on GPU and DSP; -p CPU means only running on CPU")
exit(-1)
else:
parsed_processors.append(p)
return parsed_processors
"""
caution1: rename data/snpe-1.31.0.522 to data/snpe-1.31.0
caution2: manually change executable permission on the phone through adb:
adb shell "chmod a+x /data/local/tmp/snpebm/artifacts/arm-android-clang6.0/bin/snpe*"
python snpe/run_snpe.py --model data/resnet_v1_50/resnet_v1_50.frozen.pb --snpe_sdk data/snpe-1.31.0.zip 2>&1 | tee run_resnet50.log
(test for pip install tensorflow=1.14)
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-sdk", "--snpe_sdk", type=str, default="data/snpe-1.15.0.zip",
help="path to snpe sdk zip file")
parser.add_argument("-p", "--processors", type=check_processor_arg, default="GPU,DSP,CPU",
help="processor to use, use GPU,DSP,CPU or any combination of them (separated by comma)")
parser.add_argument("-n", "--runs", type=int, default=10,
help="number of times to repeat the run")
parser.add_argument("-ndk", "--android_ndk", type=str,
help="path to android ndk")
parser.add_argument("-m", "--model", type=str, default="data/mobilenet_v1/mobilenet_v1_1.0_224.frozen.pb",
help="frozen tensorflow model")
parser.add_argument("-s", "--image_size", type=int, default=224,
help="input image size")
parser.add_argument("-i", "--input_node", type=str, default='input',
help="input node name in the model")
parser.add_argument("-o", "--output_node", type=str, default='output',
help="output node name in the model")
parser.add_argument("-t", "--show_time", action='store_true',
help="show time in csv")
return parser.parse_args()
if __name__ == '__main__':
web_url = "https://developer.qualcomm.com/software/snapdragon-neural-processing-engine-ai"
tf_path = os.path.dirname(tf.__file__)
args = parse_args()
snpe_sdk_file = args.snpe_sdk
snpe_dir = os.path.dirname(snpe_sdk_file)
snpe_sdk_path = os.path.abspath(os.path.splitext(snpe_sdk_file)[0])
snpe_name = os.path.basename(snpe_sdk_path)
if not os.path.exists(snpe_sdk_file):
print("please download SNPE SDK from:", web_url)
exit(-1)
elif not os.path.exists(snpe_sdk_path):
print("extracting snpe to:", snpe_sdk_path, "...")
zp_ref = zp.ZipFile(snpe_sdk_file, 'r')
zp_ref.extractall(snpe_dir)
zp_ref.close()
print("snpe sdk extraction done.")
else:
print("found snpe sdk at:", snpe_sdk_path)
sys.stdout.flush()
print()
sys.stdout.flush()
ndk_path = os.environ.get("ANDROID_NDK", None) or args.android_ndk
if not ndk_path:
print("please set ndk path either by specify -ndk or set 'export ANDROID_NDK=path/to/android-ndk'")
exit(-1)
# may install pkg deps
if not os.path.exists('/tmp/{}_deps_checked'.format(snpe_name)):
# print("copying libs from ndk to snpe sdk...")
#
# shutil.copy('{}/sources/cxx-stl/gnu-libstdc++/4.9/libs/arm64-v8a/libgnustl_shared.so'.format(ndk_path),
# '{}/lib/aarch64-linux-gcc4.9'.format(snpe_sdk_path))
#
# shutil.copy('{}/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/libgnustl_shared.so'.format(ndk_path),
# '{}/lib/arm-android-gcc4.9'.format(snpe_sdk_path))
# print("gcc libs copied.")
# print()
# sys.stdout.flush()
print("checking package dependencies...")
check_cmd = 'yes | bash {}/bin/dependencies.sh'.format(snpe_sdk_path)
subprocess.call(check_cmd, shell=True)
print("checking python dependencies...")
check_cmd = 'yes | bash {}/bin/check_python_depends.sh'.format(snpe_sdk_path)
subprocess.call(check_cmd, shell=True)
for os_type in ["arm-android-gcc4.9", "arm-android-clang6.0", "x86_64-linux-clang"]:
bin_dir = "{}/bin/{}".format(snpe_sdk_path, os_type)
if not os.path.exists(bin_dir):
continue
for bin_file in os.listdir(bin_dir):
script_file_path = os.path.join("{}/bin/{}".format(snpe_sdk_path, os_type), bin_file)
print('set script:', script_file_path, ' to executable')
sys.stdout.flush()
st = os.stat(script_file_path)
os.chmod(script_file_path, st.st_mode | stat.S_IEXEC)
open('/tmp/{}_deps_checked'.format(snpe_name), 'a').close()
os.environ["SNPE_ROOT"] = snpe_sdk_path
py_path = os.environ.get("PYTHONPATH", "")
os.environ["PYTHONPATH"] = "{0}/lib/python:{1}".format(snpe_sdk_path, py_path)
os.environ["TENSORFLOW_HOME"] = tf_path
bin_path = os.environ.get("PATH", "")
os.environ["PATH"] = "{}/bin/x86_64-linux-clang:{}".format(snpe_sdk_path, bin_path)
model_file = args.model
if not os.path.exists(model_file):
print(model_file, "not exist!")
exit(-1)
convert_dlc_script = "{}/bin/x86_64-linux-clang/snpe-tensorflow-to-dlc".format(snpe_sdk_path)
dlc_file = convert_to_dlc(convert_dlc_script, model_file, snpe_sdk_path,
args.input_node, args.output_node, args.image_size)
data_dir, raw_file_list = prepare_data_images(args.image_size, snpe_sdk_path)
print('generating benchmark configuration...')
sys.stdout.flush()
config = gen_config(dlc_file, raw_file_list, data_dir, args.processors, args.runs)
model_name = os.path.splitext(os.path.split(model_file)[1])[0]
config_path = os.path.join('{}/benchmarks'.format(snpe_sdk_path), "{}.json".format(model_name))
write_config(config, config_path)
print('benchmark configuration generated.')
print()
sys.stdout.flush()
print('running benchmark on {}...'.format(' '.join(args.processors)))
print()
sys.stdout.flush()
bench_cmd = ['python', 'snpe_bench.py', '-c', config_path, '-a']
subprocess.call(bench_cmd, cwd='{}/benchmarks'.format(snpe_sdk_path))
stats_file = model_file.replace('.pb', '.csv')
shutil.copy('{0}/benchmarks/{1}/results/latest_results/benchmark_stats_{1}.csv'.format(snpe_sdk_path, model_name),
stats_file)
print('benchmark results saved to:', stats_file)
if args.show_time:
import csv
with open(stats_file, 'r') as f, open('{}.txt'.format(stats_file), 'w') as f2:
reader = csv.reader(f)
next(reader)
for row in reader:
if 'Total Inference Time' in row:
gpu_time = float(row[3])/1000
dsp_time = float(row[9])/1000
cpu_time = float(row[18])/1000
header = 'GPU, DSP, CPU'
print(header)
f2.write(header + '\n')
time_str = '{:4.2f}, {:4.2f}, {:4.2f}'.format(gpu_time, dsp_time, cpu_time)
print(time_str)
f2.write(time_str + '\n')
break
print('all done.')
| 2.140625
| 2
|
bin/yap_log.py
|
Novartis/yap
| 23
|
12781284
|
#!/usr/bin/env python
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
import re
import yap_file_io
from subprocess import Popen, PIPE
import yap_workflow_dict as wd
def merge_multiproc_files(command, filename, barcode, err_log, stat_log):
""" Merges the temporary multiproc error files created. """
exit_str = Popen("cat " + err_log + "_multiproc_* | grep EXIT_CODE",
stdout=PIPE, shell=True).communicate()[0]
exit_code = 0
for i in exit_str.split('\n'):
m = re.match("EXIT_CODE: (.*)\n", i)
if m:
exit_code = exit_code + int(m.group(1))
if exit_code == 0:
yap_file_io.write_data(
"YAP_COMMAND: " +
command +
"\nINPUT FILE: " +
filename +
"\n",
stat_log)
os.system("cat " + err_log + "_multiproc_* >>" + stat_log)
yap_file_io.write_data("_" * 30 + "\n", stat_log)
os.system("rm " + err_log + "_multiproc_*")
else:
yap_file_io.write_data(
"YAP_COMMAND: " +
command +
"\nINPUT FILE: " +
filename +
"\n",
err_log)
os.system("cat " + err_log + "_multiproc_* >>" + err_log)
yap_file_io.write_data("_" * 30 + "\n", err_log)
os.system("rm " + err_log + "_multiproc_*")
def combine_temp_log(err_path, stat_path):
""" Combines the temporary log file directory and
merges them with the sample error and status logs. """
err_temp_list = glob.glob(err_path + "/*log_temp")
stat_temp_list = glob.glob(stat_path + "/*log_temp")
if len(err_temp_list) > 0:
for i in err_temp_list:
if os.path.isdir(i):
file_name = i.replace("log_temp", "err.log")
if len(glob.glob(i + "/*")):
os.system('cat ' + i + '/* >> ' + file_name)
os.system('rm ' + i + "/*")
if len(stat_temp_list) > 0:
for i in stat_temp_list:
if os.path.isdir(i):
file_name = i.replace("log_temp", "stat.log")
if len(glob.glob(i + "/*")):
os.system('cat ' + i + '/* >> ' + file_name)
os.system('rm ' + i + "/*")
def merge_tee_files(command, filename, err_log, stat_log):
""" Merges the temporary log files produced as a result of
the multiprocessing module. """
exit_str = Popen("cat " + err_log + "_yap_tee_* | grep EXIT_CODE",
stdout=PIPE, shell=True).communicate()[0]
exit_code = 0
for i in exit_str.split('\n'):
m = re.match("EXIT_CODE: (.*)\n", i)
if m:
exit_code = exit_code + int(m.group(1))
if exit_code == 0:
yap_file_io.write_data("YAP_COMMAND: " + command + "\nINPUT_FILES: " +
filename + "\nYAP_STATUS_MSG: ", stat_log)
os.system("cat " + err_log + "_yap_tee_* >>" + stat_log)
yap_file_io.write_data("_" * 30 + "\n", stat_log)
os.system("rm " + err_log + "_yap_tee_*")
else:
yap_file_io.write_data("YAP_COMMAND: " + command + "\nINPUT_FILES: " +
filename + "\nYAP_ERROR_MSG:", err_log)
os.system("cat " + err_log + "_yap_tee_* >>" + err_log)
yap_file_io.write_data("_" * 30 + "\n", err_log)
os.system("rm " + err_log + "_yap_tee_*")
def write_log(command, file, exit_code, std_err, err_log, stat_log):
""" Checks if the command has succeeded/failed and
logs it to the status/error log respectively. """
cmd_sep = "_" * 30 + "\n"
if str(exit_code) == '0':
err_str = "YAP_COMMAND: %s\nINPUT_FILES: %s\nEXIT_CODE: %s\nYAP_STATUS_MSG: %s\n" % (
command, file, exit_code, std_err)
yap_file_io.write_data(err_str + cmd_sep, stat_log)
else:
err_str = "YAP_COMMAND: %s\nINPUT_FILES: %s\nEXIT_CODE: %s\nYAP_ERROR_MSG: %s\n" % (
command, file, exit_code, std_err)
yap_file_io.write_data(err_str + cmd_sep, err_log)
def pass_fail_dict(input_arr, dict):
""" Called within the pass_fail_matrix() function. Accepts
the contents of a logfile as a list as input along with the sample_dict
Gets the EXIT_CODE, err_log doesn't contain exit codes for a
successful YAP run. """
key = ""
stage = "False"
stage_dict = {'PREPROCESS': 'PREPROCESS',
'FASTQC': 'PREPROCESS',
'FASTQSCREEN': 'PREPROCESS',
'ALIGNMENT': 'ALIGNMENT',
'MERGE ALIGNMENT': 'ALIGNMENT',
'REGROUP': 'ALIGNMENT',
'POSTPROCESS': 'POSTPROCESS',
'CUFFDIFF': 'CUFFDIFF',
'CUFFMERGE': 'CUFFMERGE',
'CUFFCOMPARE': 'CUFFCOMPARE',
'MACS2': 'MACS2'}
for i in range(len(input_arr)):
# Pattern to match the start of a command
start = re.match('\*{50}(.*) STARTED(.*)', input_arr[i])
# Pattern to match the end of a command
finish = re.match('\*{50}(.*) FINISHED(.*)', input_arr[i])
exit_code = re.match("EXIT_CODE: (.*)", input_arr[i])
if start and stage == "False":
key = stage_dict[start.group(1)]
stage = "True"
elif exit_code and stage == "True":
dict[key] = 'FAIL'
elif finish:
if key in dict.keys():
pass
else:
dict[key] = 'PASS'
stage = "False"
elif stage == "False":
pass
def print_matrix(dict, file):
""" Called within the pass_fail_matrix() function.
Input dict with sample name/command name if applicable with stagewise
fail/pass dict and write path as input.
prints the stagewise matrix to file. """
normal_stage_arr = ['PREPROCESS', 'ALIGNMENT', 'POSTPROCESS']
# Stages of the matrix/sample
compare_stage_arr = ['CUFFDIFF', 'CUFFCOMPARE', 'CUFFMERGE', 'MACS2']
# Multi-sample hence printed after.
head_str = '\t' + '\t'.join(normal_stage_arr) + '\n'
normal_str = ""
compare_str = ""
for i in sorted(dict.keys()):
if sorted(dict[i].keys()) == sorted(normal_stage_arr):
normal_str += i + "\t"
for j in range(len(normal_stage_arr)):
if j != len(normal_stage_arr) - 1:
normal_str += dict[i][normal_stage_arr[j]] + "\t"
else:
normal_str += dict[i][normal_stage_arr[j]] + "\n"
elif set(dict[i].keys()).issubset(set(compare_stage_arr)):
for j in dict[i].keys():
compare_str += i + ": " + dict[i][j] + "\n"
yap_file_io.write_data(head_str + normal_str, file)
if compare_str != '':
yap_file_io.write_data("\n\n" + compare_str, file)
def pass_fail_matrix():
""" Constructs the stagewise pass/fail matrix from the error_log. """
err_log = wd.err_log_path
matrix_path, junk = os.path.split(err_log)
matrix_path += "/yap_pass_fail_matrix.log"
sample_log_dict = {}
pass_fail = {}
for i in glob.glob(err_log + "/*"):
path, file = os.path.split(i)
file, ext = os.path.splitext(file)
sample_log_dict[file.rstrip("_err")] = yap_file_io.read_file(i) # len-1 chunks
for i in sample_log_dict.keys():
pass_fail_dict(sample_log_dict[i], pass_fail)
if not set(pass_fail.keys()).issubset(set(['CUFFDIFF', 'CUFFCOMPARE', 'CUFFMERGE', 'MACS2'])):
if wd.run_preprocess_analysis == "yes" and pass_fail.get("PREPROCESS") is None:
pass_fail["PREPROCESS"] = "FAIL"
elif wd.run_preprocess_analysis == "no":
pass_fail["PREPROCESS"] = "N/A"
if wd.run_reference_alignment == "yes" and pass_fail.get("ALIGNMENT") is None:
pass_fail["ALIGNMENT"] = "FAIL"
elif wd.run_reference_alignment == "no":
pass_fail["ALIGNMENT"] = "N/A"
if wd.run_postprocess_analysis == "yes" and pass_fail.get("POSTPROCESS") is None:
pass_fail["POSTPROCESS"] = "FAIL"
elif wd.run_postprocess_analysis == "no":
pass_fail["POSTPROCESS"] = "N/A"
if pass_fail["PREPROCESS"] == "FAIL":
pass_fail["ALIGNMENT"] = "FAIL"
pass_fail["POSTPROCESS"] = "FAIL"
sample_log_dict[i] = pass_fail
pass_fail = {}
print_matrix(sample_log_dict, matrix_path)
def sample_filter():
""" Filters failed samples after the alignment step. """
ignore_list = []
inp_files_list=[]
list_of_samples=[]
list_of_samples_to_compare=[]
for i in wd.inp_files_list:
pass_fail = {}
err_log = wd.err_log_path + "/" + i[2] + "_err.log"
if os.path.exists(err_log):
pass_fail_dict(yap_file_io.read_file(err_log), pass_fail)
if 'FAIL' in pass_fail.itervalues():
ignore_list.append(i)
if len(ignore_list) != 0:
list_of_samples_to_compare = remove_corrupted_samples(wd.list_of_samples_to_compare,ignore_list)
list_of_samples = remove_corrupted_samples(wd.list_of_samples, ignore_list)
inp_files_list = [i for i in wd.inp_files_list if i not in ignore_list]
return inp_files_list,list_of_samples,list_of_samples_to_compare,ignore_list
else:
return wd.inp_files_list, wd.list_of_samples, wd.list_of_samples_to_compare,ignore_list
def remove_corrupted_samples(command_dict, ignore_list):
""" Called withing the sample_filter() function
Removes samples containing errors. Takes the
command_dict and the list of samples to be ignored as input. """
if len(ignore_list) == 0:
return command_dict
new_dict = command_dict
ignore_file_list = map((lambda x: x[0]), ignore_list)
for key in command_dict:
current_groups = command_dict[key][1] # list of lines in file
new_groups = []
for group in current_groups:
merged_sets = merge_group_sets(group)
corrupt_samples = filter(
(lambda x: x in ignore_file_list), merged_sets)
if len(corrupt_samples) > 0:
continue
else:
new_groups.append(group)
new_dict[key][1] = new_groups
return new_dict
def merge_group_sets(group):
""" Called in the remove_corrupted_samples() function.
'extends' group to merged_sets. """
merged_set = []
for i in range(0, len(group)):
merged_set.extend(group[i])
return merged_set
| 2.3125
| 2
|
act/workers/hybrid_analysis_feed.py
|
pstray/act-workers
| 0
|
12781285
|
#!/usr/bin/env python3
"""hybrid-analysis.com worker for the ACT platform
Copyright 2021 the ACT project <<EMAIL>>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
"""
import argparse
import contextlib
import json
import sys
import traceback
import warnings
from functools import partialmethod
from logging import error, info
from typing import Any, Dict, Generator, List, Optional, Text
import requests
import act.api
from act.api.libs import cli
from act.workers.libs import worker
def parseargs() -> argparse.ArgumentParser:
"""Extract command lines argument"""
parser = worker.parseargs("ACT hybrid-analysis.com Client")
parser.add_argument(
"--feed", action="store_true", help="Download the public feed only, no lookup"
)
parser.add_argument(
"--apikey", default="", help="community apikey for hybrid-analysis.com"
)
parser.add_argument(
"--user-agent", default="Falcon Sandbox", help="User agent while talking to API"
)
parser.add_argument(
"--no-check-certificate",
action="store_true",
help="Do not check SSL certificate",
)
return parser
def download_feed(
user_agent: Text, proxies: Optional[Dict[Text, Text]], verify_ssl: bool = True
) -> Dict[Text, Any]:
"""Download the public feed and return a dictionary"""
url = "https://hybrid-analysis.com/feed?json"
with ssl_verification(verify=verify_ssl):
headers = {"User-Agent": user_agent}
response = requests.get(url, proxies=proxies, headers=headers)
if response.status_code != 200:
raise CommunicationError(
f"hybrid_analysis_feed.download_feed() could not download public feed, "
f"error calling {url}: Status = {response.status_code}"
)
try:
data: Dict[Text, Any] = response.json()
except json.decoder.JSONDecodeError as err:
raise CommunicationError(
f"hybrid_analysis_feed.download_feed() could not load public feed, "
f"error decoding json result from {url}: {err}"
)
return data
def handle_feed(
actapi: act.api.Act,
user_agent: Text,
proxies: Optional[Dict[Text, Text]] = None,
verify_ssl: bool = True,
output_format: Text = "json",
) -> None:
"""Download, parse and provide facts from the public feed of hybrid-analysis.com"""
feed = download_feed(user_agent, proxies, verify_ssl)
feeds_facts: List[act.api.fact.Fact] = []
for report in feed["data"]:
if not (report.get("isinteresting", False) or report.get("threatlevel", 0)):
continue
# store data if threatlevel > 0 or report is interesting
if "sha256" not in report:
continue
feeds_facts += handle_report(actapi, report)
for fact in feeds_facts:
act.api.helpers.handle_fact(fact, output_format=output_format)
def handle_hosts(
actapi: act.api.Act, content: Text, hosts: List[Text]
) -> List[act.api.fact.Fact]:
"""handle the hosts part of a hybrid-analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for host in hosts:
(ip_type, ip) = act.api.helpers.ip_obj(host)
chain = []
chain.append(
actapi.fact("connectsTo").source("content", content).destination("uri", "*")
)
chain.append(
actapi.fact("resolvesTo").source("fqdn", "*").destination(ip_type, ip)
)
chain.append(
actapi.fact("componentOf").source("fqdn", "*").destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_domains(
actapi: act.api.Act, content: Text, domains: List[Text]
) -> List[act.api.fact.Fact]:
"""Handle the domains part of a hybrid-analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for domain in domains:
chain = []
chain.append(
actapi.fact("connectsTo").source("content", content).destination("uri", "*")
)
chain.append(
actapi.fact("componentOf").source("fqdn", domain).destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_extracted_files(
actapi: act.api.Act, content: Text, extracted_files: List[Dict]
) -> List[act.api.fact.Fact]:
"""Handle the extracted_files part of a hybrid_analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for file in extracted_files:
chain = []
if "sha256" not in file:
continue
if not file["file_path"]:
info(f"{file} is missing file_path using name instead")
path = file["file_path"] if file["file_path"] else file["name"]
chain.append(
actapi.fact("componentOf").source("path", path).destination("uri", "*")
)
chain.append(
actapi.fact("at").source("content", file["sha256"]).destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
for hash_type in ["md5", "sha1", "sha256"]:
feeds_facts.append(
actapi.fact("represents")
.source("hash", file[hash_type])
.destination("content", file["sha256"])
)
feeds_facts.append(
actapi.fact("category", hash_type).source("hash", file[hash_type])
)
if (
content != file["sha256"]
): # the act platform does not accept same object on source and destination for write
feeds_facts.append(
actapi.fact("writes")
.source("content", content)
.destination("content", file["sha256"])
)
return feeds_facts
def handle_classification_tags(
actapi: act.api.Act, content: Text, classification_tags: List[Text]
) -> List[act.api.fact.Fact]:
"""handle the classification_tags part or a hybrid_analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for tag in classification_tags:
feeds_facts.append(
actapi.fact("classifiedAs")
.source("content", content)
.destination("tool", tag)
)
return feeds_facts
def handle_mitre_attcks(
actapi: act.api.Act, content: Text, mitre_attcks: List[Dict]
) -> List[act.api.fact.Fact]:
"""Handle the MITRE Att&ck part of the hybrid analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for attck in mitre_attcks:
chain = []
chain.append(
actapi.fact("classifiedAs")
.source("content", content)
.destination("tool", "*")
)
chain.append(
actapi.fact("implements")
.source("tool", "*")
.destination("technique", attck["technique"])
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_process_list(
actapi: act.api.Act, content: Text, process_list: List[Dict]
) -> List[act.api.fact.Fact]:
"""Handle the process list part of the hybrid analysis report"""
feeds_facts: List[act.api.fact.Fact] = []
for proc in process_list:
chain = []
path = proc["normalizedpath"] if "normalizedpath" in proc else proc["name"]
chain.append(
actapi.fact("executes").source("content", content).destination("uri", "*")
)
chain.append(
actapi.fact("componentOf").source("path", path).destination("uri", "*")
)
feeds_facts += act.api.fact.fact_chain(*chain)
return feeds_facts
def handle_report(
actapi: act.api.Act, report: Dict[Text, Any]
) -> List[act.api.fact.Fact]:
"""Create facts from a report"""
feeds_facts: List[act.api.fact.Fact] = []
content = report["sha256"]
for hash_type in ["md5", "sha1", "sha256", "ssdeep", "imphash", "sha512"]:
if (
hash_type not in report
or not report[hash_type]
or report[hash_type] == "Unknown"
):
info(f"{hash_type} not set for content {content}")
continue
feeds_facts.append(
actapi.fact("represents")
.source("hash", report[hash_type])
.destination("content", content)
)
feeds_facts.append(
actapi.fact("category", hash_type).source("hash", report[hash_type])
)
feeds_facts += handle_hosts(actapi, content, report.get("hosts", []))
feeds_facts += handle_domains(actapi, content, report.get("domains", []))
feeds_facts += handle_extracted_files(
actapi, content, report.get("extracted_files", [])
)
feeds_facts += handle_classification_tags(
actapi, content, report.get("classification_tags", [])
)
# DISABLED DUE TO EXCESSIVE FACT CHAIN OBJECT. TO BE DISCUSSED
# feeds_facts += handle_mitre_attcks(actapi, content, report.get("mitre_attcks", []))
feeds_facts += handle_process_list(actapi, content, report.get("process_list", []))
return feeds_facts
def handle_hash(
actapi: act.api.Act,
apikey: Text,
hashdigest: Text,
user_agent: Text,
proxies: Optional[Dict[Text, Text]] = None,
verify_ssl: bool = True,
output_format: Text = "json",
) -> None:
"""Download, parse and provide facts from the public feed of hybrid-analysis.com"""
data = search_hash(apikey, hashdigest, user_agent, proxies, verify_ssl)
for report in data:
for fact in handle_report(actapi, report):
act.api.helpers.handle_fact(fact, output_format=output_format)
def search_hash(
apikey: Text,
hashdigest: Text,
user_agent: Text,
proxies: Optional[Dict[Text, Text]] = None,
verify_ssl: bool = True,
) -> List[Dict[Text, Any]]:
"""Search the hybrid-analysis api for a specific hash"""
url = "https://www.hybrid-analysis.com/api/v2/search/hash"
with ssl_verification(verify=verify_ssl):
headers = {
"User-Agent": user_agent,
"accept": "application/json",
"api-key": apikey,
"Content-Type": "application/x-www-form-urlencoded",
}
form_data = {"hash": hashdigest}
response = requests.post(url, proxies=proxies, headers=headers, data=form_data)
if response.status_code != 200:
print(response.text)
raise CommunicationError(
f"hybrid_analysis_feed.search_hash() could not search community API, "
f"error calling {url}: Status = {response.status_code}"
)
try:
data: List[Dict[Text, Any]] = response.json()
except json.decoder.JSONDecodeError as err:
raise CommunicationError(
f"hybrid_analysis_feed.search_hash() could not load search result, "
f"error decoding json result from {url}: {err}"
)
return data
def main() -> None:
"""main function"""
# Look for default ini file in "/etc/actworkers.ini" and ~/config/actworkers/actworkers.ini
# (or replace .config with $XDG_CONFIG_DIR if set)
args = cli.handle_args(parseargs())
actapi = worker.init_act(args)
# if not args.apikey:
# cli.fatal("You must specify --apikey on command line or in config file")
proxies = (
{"http": args.proxy_string, "https": args.proxy_string}
if args.proxy_string
else None
)
params = {
"actapi": actapi,
"user_agent": args.user_agent,
"proxies": proxies,
"verify_ssl": args.no_check_certificate,
"output_format": args.output_format,
}
if args.feed:
handle_feed(**params)
else:
params["apikey"] = args.apikey
for line in sys.stdin:
params["hashdigest"] = line.strip()
handle_hash(**params)
@contextlib.contextmanager
def ssl_verification(verify: bool = True) -> Generator[None, None, None]:
"""Monkey patch request to manage ssl verification. Can be used 'around' code
that uses requests internally"""
old_request = requests.Session.request
requests.Session.request = partialmethod(old_request, verify=verify) # type: ignore
warnings.filterwarnings("ignore", "Unverified HTTPS request")
yield
warnings.resetwarnings()
requests.Session.request = old_request # type: ignore
class CommunicationError(Exception):
"""CommunicationError is used to gather all communication errors into one"""
...
def main_log_error() -> None:
"""Main entry point, catching and logging errors"""
try:
main()
except Exception:
error("Unhandled exception: {}".format(traceback.format_exc()))
raise
if __name__ == "__main__":
main_log_error()
| 2.046875
| 2
|
tests/test_licensing_args.py
|
rackerlabs/openstack-usage-report
| 7
|
12781286
|
<gh_stars>1-10
import unittest
from usage.args.licensing import parser
class TestLicensingArgs(unittest.TestCase):
"""Tests the arg parser."""
def test_config_file(self):
test_args = ['somefile']
args = parser.parse_args(test_args)
self.assertEquals('/etc/usage/usage.yaml', args.config_file)
test_args = ['somefile', '--config-file', 'somefile']
args = parser.parse_args(test_args)
self.assertEquals('somefile', args.config_file)
def test_log_level(self):
test_args = ['somefile']
args = parser.parse_args(test_args)
self.assertEquals('info', args.log_level)
test_args = ['somefile', '--log-level', 'debug']
args = parser.parse_args(test_args)
self.assertEquals('debug', args.log_level)
def test_definition_file(self):
test_args = ['somefile']
args = parser.parse_args(test_args)
self.assertEquals('/etc/usage/licensing.yaml', args.definition_file)
test_args = ['--definition-file', 'thefile', 'somefile']
args = parser.parse_args(test_args)
self.assertEquals('thefile', args.definition_file)
| 2.796875
| 3
|
tests/test_base.py
|
cluster311/sss-beneficiarios
| 0
|
12781287
|
from sss_beneficiarios_hospitales.data import DataBeneficiariosSSSHospital
def test_query_afiliado():
dbh = DataBeneficiariosSSSHospital(user='FAKE', password='<PASSWORD>')
res = dbh.query(dni='full-afiliado')
assert res['ok']
data = res['resultados']
assert data['title'] == "Superintendencia de Servicios de Salud"
assert data["afiliado"]
assert len(data['tablas']) == 2
for d in data['tablas']:
assert "name" in d
is_afiliacion = "AFILIACION" in [v for k, v in d.items() if k == 'name']
is_persona = "AFILIADO" in [v for k, v in d.items() if k == 'name']
assert is_afiliacion or is_persona
if is_afiliacion:
assert d['data']["Parentesco"] == "TITULAR"
assert d['data']["CUIL"] == "27-1XXXXX3-6"
assert d['data']["Tipo de documento"] == "DOCUMENTO UNICO"
assert d['data']["N\u00famero de documento"] == "1XXXXX3"
assert d['data']["Apellido y nombre"] == "FXXXL MARIA"
assert d['data']["Provincia"] == "CORDOBA"
assert d['data']["Fecha de nacimiento"] == "09-09-1961"
assert d['data']["Sexo"] == "Femenino"
if is_persona:
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["CUIT de empleador"] == "33-63761744-9"
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["Tipo de beneficiario"] == "JUBILADOS Y PENSIONADOS DE PAMI"
assert d['data']["C\u00f3digo de Obra Social"] == "5-0080-7"
assert d['data']["Denominaci\u00f3n Obra Social"] == "INSTITUTO NACIONAL DE SERVICIOS SOCIALES PARA JUBILADOS Y PENSIONADOS"
assert d['data']["Fecha Alta Obra Social"] == "01-08-2012"
def test_query_afiliado_con_empleador():
dbh = DataBeneficiariosSSSHospital(user='FAKE', password='<PASSWORD>')
res = dbh.query(dni='full-afiliado-con-empleador')
assert res['ok']
data = res['resultados']
assert data['title'] == "Superintendencia de Servicios de Salud"
assert data["afiliado"]
for d in data['tablas']:
assert "name" in d
is_afiliacion = "AFILIACION" in [v for k, v in d.items() if k == 'name']
is_persona = "AFILIADO" in [v for k, v in d.items() if k == 'name']
is_declarado = "DECLARADO_POR_EMPLEADOR" in [v for k, v in d.items() if k == 'name']
assert is_afiliacion or is_persona or is_declarado
if is_afiliacion:
assert d['data']["Parentesco"] == "TITULAR"
assert d['data']["CUIL"] == "27-1XXXXX3-6"
assert d['data']["Tipo de documento"] == "DOCUMENTO UNICO"
assert d['data']["N\u00famero de documento"] == "1XXXXX3"
assert d['data']["Apellido y nombre"] == "<NAME>"
assert d['data']["Provincia"] == "CAPITAL FEDERAL"
assert d['data']["Fecha de nacimiento"] == "25-05-1977"
assert d['data']["Sexo"] == "Masculino"
if is_persona:
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["CUIT de empleador"] == "30-70818659-3"
assert d['data']["CUIL titular"] == "27-1XXXXX3-6"
assert d['data']["Tipo de beneficiario"] == "RELACION DE DEPENDENCIA"
assert d['data']["C\u00f3digo de Obra Social"] == "4-0080-0"
assert d['data']["Denominaci\u00f3n Obra Social"] == "OBRA SOCIAL DE EJECUTIVOS Y DEL PERSONAL DE DIRECCION DE EMPRESAS"
assert d['data']["Fecha Alta Obra Social"] == "01-06-1931"
if is_declarado:
assert d['data']["Tipo Beneficiario Declarado"] == "RELACION DE DEPENDENCIA (DDJJ SIJP)"
assert d['data']["Ultimo Per\u00edodo Declarado"] == "02-2020"
def test_query_no_afiliado():
dbh = DataBeneficiariosSSSHospital(user='FAKE', password='<PASSWORD>')
res = dbh.query(dni='full-sin-datos')
assert res['ok']
data = res['resultados']
assert data['title'] == "Superintendencia de Servicios de Salud"
assert data["afiliado"] == False
for d in data['tablas']:
assert "name" in d
is_persona = "NO_AFILIADO" in [v for k, v in d.items() if k == 'name']
assert is_persona
assert d['data']["Apellido y Nombre"] == "BXXXXXXXXS FXXXL <NAME>"
assert d['data']["Tipo Documento"] == "DU"
assert d['data']["Nro Documento"] == "2XXXXX1"
assert d['data']["CUIL"] == "202XXXXX18"
| 2.671875
| 3
|
hot_word.py
|
andyrenpanlong/soubu_app
| 0
|
12781288
|
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from pymongo import MongoClient
import requests
from cookielib import CookieJar
import json
import time
import urllib
import urllib2
import ssl
import sys
from soubu_setting import headers
reload(sys)
sys.setdefaultencoding('utf8')
# 搜布网app数据抓取
requests = requests.Session()
ssl._create_default_https_context = ssl._create_unverified_context
headers = headers
def get_soubu_hot_word(url):
loadData = "params=%7B%22type%22%3A2%2C%22requestId%22%3A%2215204062005412071796704%22%2C%22serverId%22%3A%22%22%7D"
response = requests.post(url, data=loadData, headers=headers, verify=False)
data_obj = json.loads(response.text)
print data_obj, data_obj["msg"]
print data_obj["result"]
print data_obj["sec"]
print data_obj["status"]
data = data_obj["result"]["data"]
soubu_hot_word_save(data)
def soubu_hot_word_save(data):
client = MongoClient('127.0.0.1', 27017)
db = client.soubu
db_name = "hot_word"
db[db_name].insert(data)
if __name__ == '__main__':
url = "https://api.isoubu.com/sbapi/Api/Index/get_hot_word"
get_soubu_hot_word(url)
| 2.625
| 3
|
Mundo 3/teste02.py
|
RafaelSdm/Curso-de-Python
| 1
|
12781289
|
def soma(a,b):
total = a +b
print(total)
def contador (*num):
for c in num:
print(f" {c} ",end='')
print()
for c in num:
tamanho = len(num)
soma(9,2)
soma(6,8)
soma(3,5)
contador(2,4,5)
contador(3,5,6,7,8,)
contador(1,2)
| 3.6875
| 4
|
pyservices/generated/users/user_pb2.py
|
makkalot/eskit-example-microservice
| 0
|
12781290
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: users/user.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from common import originator_pb2 as common_dot_originator__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='users/user.proto',
package='contracts.users',
syntax='proto3',
serialized_options=_b('Z1github.com/makkalot/eskit/generated/grpc/go/users'),
serialized_pb=_b('\n\x10users/user.proto\x12\x0f\x63ontracts.users\x1a\x17\x63ommon/originator.proto\"\x92\x01\n\x04User\x12\x30\n\noriginator\x18\x01 \x01(\x0b\x32\x1c.contracts.common.Originator\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x12\n\nfirst_name\x18\x03 \x01(\t\x12\x11\n\tlast_name\x18\x04 \x01(\t\x12\x0e\n\x06\x61\x63tive\x18\x05 \x01(\x08\x12\x12\n\nworkspaces\x18\x06 \x03(\tB3Z1github.com/makkalot/eskit/generated/grpc/go/usersb\x06proto3')
,
dependencies=[common_dot_originator__pb2.DESCRIPTOR,])
_USER = _descriptor.Descriptor(
name='User',
full_name='contracts.users.User',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='originator', full_name='contracts.users.User.originator', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='email', full_name='contracts.users.User.email', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='first_name', full_name='contracts.users.User.first_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='last_name', full_name='contracts.users.User.last_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='active', full_name='contracts.users.User.active', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='workspaces', full_name='contracts.users.User.workspaces', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=209,
)
_USER.fields_by_name['originator'].message_type = common_dot_originator__pb2._ORIGINATOR
DESCRIPTOR.message_types_by_name['User'] = _USER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
User = _reflection.GeneratedProtocolMessageType('User', (_message.Message,), dict(
DESCRIPTOR = _USER,
__module__ = 'users.user_pb2'
# @@protoc_insertion_point(class_scope:contracts.users.User)
))
_sym_db.RegisterMessage(User)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1.148438
| 1
|
src/948-bag_of_token.py
|
dennislblog/coding
| 0
|
12781291
|
<filename>src/948-bag_of_token.py
class Solution(object):
def bagOfTokensScore(self, tokens, P):
"""
:type tokens: List[int]
:type P: int
:rtype: int
@ solution: 用power买第一个credit, 用一个credit兑换400power,加上之前剩下的100,凑齐500买中间200和300两个credit
@ example: tokens = [100,200,300,400], Power = 200 ==> return 2
---------------------------------------------------------------
"""
# 1. 首先排序(最关键),每次power不足了,用1个token去换最大的power
# 2. 因为最终是最大化token,所以优先用power去买消耗少的token
tokens.sort()
n = len(tokens); i, j = 0, n-1
score, res = 0, 0
while i <= j:
if P >= tokens[i]:
P -= tokens[i]
i += 1; score += 1
# 这一步相当关键,因为有可能score先变小是为了变得更大
res = max(res, score)
elif score > 0:
P += tokens[j]
j -= 1; score -= 1
else:
break
return res
| 3.375
| 3
|
CNN network.py
|
shohamda/deep-learning
| 0
|
12781292
|
<reponame>shohamda/deep-learning
# -*- coding: utf-8 -*-
"""<NAME> 204287635 <NAME> 314767674 - DL Assignment 2
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1hUnIpK7OVBXyQ1r-V6VKu_XqQLcy9UdX
**Assignment # 2, CNN over Fasion MNIST**
In this assignment you are requested to build a convolutional network and train it over the Fasion MNIST data, which is a collection of 28X28 back and white images, classified into 10 different classes of clothing items. For more information about Fashion MNIST you may refer to:
https://github.com/zalandoresearch/fashion-mnist
"""
# Loading Fashion MNIST
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
trainset = torchvision.datasets.FashionMNIST(root='./data', train=True,
download=True, transform=transforms.ToTensor())
testset = torchvision.datasets.FashionMNIST(root='./data', train=False,
download=True, transform=transforms.ToTensor())
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
# Use dataloaders for train and test (batch size is 4)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False)
# The images are of 1, 28, 28 size (only one black-white channel)
trainset[0][0].shape
"""# **Part 1**: Implementing a CNN network for Fashion MNIST
Here is what you need to do; you are encoureged to look at notebook "DL Notebook 9 - CIFAR CNN" when trying to complete the next steps.
Write a network CNNFMnist, that has the following architecture:
* Convolution with 10 3X3 filters
* Relu
* Max pool with 2X2
* Convolution with 5 3X3 filters
* Relu
* Convolution with 16 3X3 filters
* Relu
* Max pool with 2X2
* Liner, output size 128
* Relu
* Liner, output size 64
* Relu
* Liner, output size 10
"""
trainset[0]
class CNNFMnist(nn.Module):
def __init__(self):
super(CNNFMnist, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 3)
self.conv2 = nn.Conv2d(10, 5, 3)
self.conv3 = nn.Conv2d(5, 16, 3)
#self.fc1 = nn.Linear(16 * 3 * 3, 128)
self.fc1 = nn.Linear(256, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
#x = F.relu(self.conv1(x))
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.relu(self.conv2(x))
#x = F.relu(self.conv3(x))
x = F.max_pool2d(F.relu(self.conv3(x)), (2,2))
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
h = x
x = self.fc3(x)
return h, x
"""Write a code that trains the network with FashionMNIST train dataset, for classification (use cross entropy, and SGD).
Run the network for at least 10 epochs, over the entire dataset. Make sure to print the loss over the train set as well as the **test set** over time (say, every 1000 batches, but it's up to you), so you will know where you are during training.
Note, measuring loss of test is similar to measuring loss over the train test. However, make sure not to run the test images in back propagation. Use them only in forward and calulate the average loss over the entire test set. Since it will make the training process run slower, you should measure loss for the test set only at the end of an epoch (so overall you get 10 loss values for the test set). You are encoureged to write a different function for claculating the loss of the test set, and then call it from the training procedure.
You should collect the loss values in an array, so you can plot then into two curves, one for train and one for test.
In addition, you should measure the time it takes you to train the network completely.
"""
net = CNNFMnist() # -- For CPU
print(net)
# define loss function
criterion = nn.CrossEntropyLoss()
# define the optimizer
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# training loop
def test_net():
test_loss = []
for data in testloader:
inputs, labels = data
#inputs = inputs.cuda() # -- for GPU
#labels = labels.cuda() # -- for GPU
_, outputs = net(inputs)
loss = criterion(outputs, labels)
test_loss.append(loss.item())
# return average loss over all test set
return sum(test_loss) / len(test_loss)
train_loss = []
test_loss = []
interval_tuples = []
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(10):
running_train_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
optimizer.zero_grad()
_, outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_train_loss += loss.item()
if (i+1) % 1000 == 0:
interval_tuples.append(str((epoch + 1, i + 1)))
train_loss.append(running_train_loss / 1000)
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_train_loss / 1000))
running_train_loss = 0.0
net.eval()
with torch.no_grad():
running_test_loss = test_net()
print("epoch {}, test loss: {}\n".format(epoch + 1, running_test_loss))
test_loss.append(running_test_loss)
net.train()
print('Finished Training')
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
mnist_cpu=(start.elapsed_time(end)/1000)
# print train loss graph per batch
plt.figure(figsize=(25,10))
plt.plot(interval_tuples, train_loss)
plt.xlabel('(epoch, batch)')
plt.ylabel('loss')
plt.title('train-set loss per epochs')
plt.show()
# Visualization of train and test loss
plt.plot(range(1,11), train_loss[::15], color='blue')
plt.plot(range(1,11),test_loss, color='red')
plt.legend(["train", "test"], loc ="best")
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('loss over train and test sets')
plt.show()
"""Write a function that evaluates the resulted model over the entire test data of FashionMNIST. Provide a single accuracy number."""
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
_, outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
acc_cpu = (100 * correct / total)
"""# **Part 2**: Training with a GPU
You are requested to change your code to use the GPU instead of the CPU.
This can be easily done bu converting every torch.tensor to torch.cuda.tensor.
Specific instructions:
* Change the hardware equipent of your colab notebook. To do that, go to the "Runtime" menu, and then to "Change runtime type". In the dialog box, change "Hardware accelerator" to GPU.
* Please follow the lines that were commented out with the comment # -- For GPU
* Also, remove the lines that have the comment # -- For CPU
Train your network again and compare training time.
"""
#copy the same code with GPU insted of CPU
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
trainset = torchvision.datasets.FashionMNIST(root='./data', train=True,
download=True, transform=transforms.ToTensor())
testset = torchvision.datasets.FashionMNIST(root='./data', train=False,
download=True, transform=transforms.ToTensor())
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress',
'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
# Use dataloaders for train and test (batch size is 4)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False)
# The images are of 1, 28, 28 size (only one black-white channel)
trainset[0][0].shape
class CNNFMnist(nn.Module):
def __init__(self):
super(CNNFMnist, self).__init__()
self.conv1 = nn.Conv2d(1, 10, 3)
self.conv2 = nn.Conv2d(10, 5, 3)
self.conv3 = nn.Conv2d(5, 16, 3)
self.fc1 = nn.Linear(256, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.relu(self.conv2(x))
x = F.max_pool2d(F.relu(self.conv3(x)), (2,2))
x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
h = x
x = self.fc3(x)
return h, x
net = CNNFMnist().cuda() # -- For GPU
print(net)
# define loss function
criterion = nn.CrossEntropyLoss()
# define the optimizer
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# training loop
def test_net():
test_loss = []
for data in testloader:
inputs, labels = data
inputs = inputs.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
_, outputs = net(inputs)
loss = criterion(outputs, labels)
test_loss.append(loss.item())
# return average loss over all test set
return sum(test_loss) / len(test_loss)
train_loss = []
test_loss = []
interval_tuples = []
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(10):
running_train_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.cuda() # -- For GPU
labels = labels.cuda() # -- For GPU
optimizer.zero_grad()
_, outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_train_loss += loss.item()
if (i+1) % 1000 == 0:
interval_tuples.append(str((epoch + 1, i + 1)))
train_loss.append(running_train_loss / 1000)
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_train_loss / 1000))
running_train_loss = 0.0
net.eval()
with torch.no_grad():
running_test_loss = test_net()
print("epoch {}, test loss: {}\n".format(epoch + 1, running_test_loss))
test_loss.append(running_test_loss)
net.train()
print('Finished Training')
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
mnist_gpu=(start.elapsed_time(end)/1000)
# print train loss graph per batch
plt.figure(figsize=(25,10))
plt.plot(interval_tuples, train_loss)
plt.xlabel('(epoch, batch)')
plt.ylabel('loss')
plt.title('train-set loss per epochs')
plt.show()
# Visualization of train and test loss
plt.plot(range(1,11), train_loss[::15], color='blue')
plt.plot(range(1,11),test_loss, color='red')
plt.legend(["train", "test"], loc ="best")
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('loss over train and test sets')
plt.show()
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
_, outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
acc_gpu = (100 * correct / total)
#comparing GPU and CPU
#print time graph
plt.bar(('cpu', 'gpu'), (mnist_cpu, mnist_gpu))
plt.title('time (in seconds)')
plt.show()
#print accuracy graph that should be the same
plt.bar(('cpu', 'gpu'), (acc_cpu,acc_gpu))
plt.title('accuracy')
plt.show()
"""# **Part 3**: Transfer Learning
Traininng data is a valuable resource, and sometimes there is not enough of it for traiing a neural netowrk at scale. To handle this situation, one approach is transfer learning, where we train our network on a different related task, and then switch to train it on the downstream task that we focus on. In this last part of the assignment, you are requested to pretrain your network on CIFAR-10, then train it on Fashion-MNIST, and measure its contribution to the results. To do that, please follow the steps:
**Step 1**
Modify your CNNFMnist implementation to return the output of the layer one before last after Relu (Linear layer of size 64, above) in addition to the final output. For example:
```
def forward(self, x):
...
return h, out
```
and train it on the training-set part of CIFAR-10. Use batch size of 4, and train it for at least 10 epochs. Note that CIFAR-10 images are of different shapes (3X32X32), therefore a conversion into 1X28X28 is needed. To do that, when you load CIFAR-10 using a torchvision Dataset, you can use the transformer torchvision.transforms.Grayscale(num_output_channels=1) in order to convert the images to a 1X32X32 grayscale volume:
```
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=torchvision.transforms.Compose([torchvision.transforms.Grayscale(num_output_channels=1),
torchvision.transforms.ToTensor()]))
```
Then, from each 1X32X32 image, sample 10 1X28X28 images at random positions, and use them for training (*optional* - for data augmentation, if you want, you can also generate the reflection of each of the 10 images and add them the training set).
**Setp 2**
Once done, write a new Module CNNFMnist2, which uses CNNFMnist as one of its sub modules, followed by some additional layers. The output of CNNFMnist that goes into the next layer, should be the output of the 64 neuron one-before-last layer, as described above. CNNFMnist2 should have the following architecture:
* CNNFMnist
* Liner, output size 32
* Relu
* Liner, output size 16
* Relu
* Liner, output size 10
Make sure to allow the user to assign a pre-trained version CNNFMnist as a member of the module. For example:
```
class CNNFMnist2(nn.Module):
def __init__(self, trained_cnnfmnist_model):
super(CNNFMnist2, self).__init__()
self.trained_cnnfmnist_model = trained_cnnfmnist_model
self.fc1 = nn.Linear(64, 32)
...
```
**Step 3**
Train and eval CNNFMnist2 on Fashion-MNIST a few times:
- Using the pre-trained version of CNNFMnist.
- Using a fresh CNNFMnist instance (without training it).
- (Optional) Using the pre-trained version of CNNFMnist, after freezing its weights using the .eval() function.
Report on evaluation results (accuracy) for all of those cases.
"""
train_data = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=torchvision.transforms.Compose([torchvision.transforms.Grayscale(num_output_channels=1),
torchvision.transforms.CenterCrop((28,28)),
torchvision.transforms.ToTensor()]))
for i in range(9):
train_data2 = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=torchvision.transforms.Compose([torchvision.transforms.Grayscale(num_output_channels=1),
torchvision.transforms.CenterCrop((28,28)),
torchvision.transforms.ToTensor()]))
train_data = train_data + train_data2
trainsetcifar = torch.utils.data.ConcatDataset(train_data)
testsetcifar = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=torchvision.transforms.Compose([torchvision.transforms.Grayscale(num_output_channels=1),
torchvision.transforms.CenterCrop((28,28)),
torchvision.transforms.ToTensor()]))
classescifar = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
#Use dataloaders for train and test (batch size is 64)
trainloadercifar = torch.utils.data.DataLoader(train_data, batch_size=64,
shuffle=True)
testloadercifar = torch.utils.data.DataLoader(testsetcifar, batch_size=64,
shuffle=False)
net_cifar = CNNFMnist().cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net_cifar.parameters(), lr = 0.001, momentum=0.9)
for epoch in range(10):
running_train_loss = 0.0
for i, data in enumerate(trainloadercifar, 0):
inputs, labels = data
inputs = inputs.cuda() # -- For GPU
labels = labels.cuda() # -- For GPU
optimizer.zero_grad()
_, outputs = net_cifar(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_train_loss += loss.item()
if (i+1) % 1000 == 0:
#interval_tuples.append(str((epoch + 1, i + 1)))
#train_loss.append(running_train_loss / 1000)
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_train_loss / 1000))
running_train_loss = 0.0
class CNNFMnist2(nn.Module):
def __init__(self, trained_cnnfmnist2_model):
super(CNNFMnist2, self).__init__()
self.trained_cnnfmnist2_model = trained_cnnfmnist2_model
self.fc1 = nn.Linear(64, 32)
self.fc2 = nn.Linear(32, 16)
self.fc3 = nn.Linear(16, 10)
def forward(self, x):
x,_ = self.trained_cnnfmnist2_model(x)
#x = torch.flatten(x, start_dim=1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def accuracy(net):
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.cuda() # -- for GPU
labels = labels.cuda() # -- for GPU
outputs = net(images)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the test images: %d %%' % (100 * correct / total))
return (100 * correct / total)
#pretarined
pretarined = CNNFMnist2(net_cifar).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(pretarined.parameters(), lr = 0.001, momentum=0.9)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(10):
running_train_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.cuda() # -- For GPU
labels = labels.cuda() # -- For GPU
optimizer.zero_grad()
outputs = pretarined(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_train_loss += loss.item()
if (i+1) % 1000 == 0:
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_train_loss / 1000))
running_train_loss = 0.0
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
pretrained_seconds=(start.elapsed_time(end)/1000)
a = accuracy(pretarined)
#untrained
untrained_cifar = CNNFMnist().cuda()
untrained_net_cifar = CNNFMnist2(untrained_cifar).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(untrained_net_cifar.parameters(), lr = 0.001, momentum=0.9)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(10):
running_train_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.cuda() # -- For GPU
labels = labels.cuda() # -- For GPU
optimizer.zero_grad()
outputs = untrained_net_cifar(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_train_loss += loss.item()
if (i+1) % 1000 == 0:
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_train_loss / 1000))
running_train_loss = 0.0
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
untrained_seconds=(start.elapsed_time(end)/1000)
b = accuracy(untrained_net_cifar)
#pretrained-freeze
net_cifar.eval()
pretrained_net_cifar = CNNFMnist2(net_cifar).cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(pretrained_net_cifar.parameters(), lr = 0.001, momentum=0.9)
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(10):
running_train_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
inputs = inputs.cuda() # -- For GPU
labels = labels.cuda() # -- For GPU
optimizer.zero_grad()
outputs = pretrained_net_cifar(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_train_loss += loss.item()
if (i+1) % 1000 == 0:
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_train_loss / 1000))
running_train_loss = 0.0
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
pretrained_freeze_seconds=(start.elapsed_time(end)/1000)
c = accuracy(pretrained_net_cifar)
plt.bar(('pretrained', 'untrained', 'pretrained_freeze'), (pretrained_seconds, untrained_seconds, pretrained_freeze_seconds))
plt.title('time (in seconds)')
plt.show()
plt.bar(('pretrained', 'untrained', 'pretrained_freeze'), (a, b, c))
plt.title('accuracy')
plt.show()
"""# Submission instructions
You should submit a pdf file with the following items:
CPU Experiment:
* Plot of loss curves (train in blue, test in red)
* Training time
GPU Experiment:
* Plot of loss curves (train in blue, test in red)
* Training time
Transfer Learning Experiment:
* Accuracy results on test set for the 2-3 implemeted settings (see above)
Link for your collab notebook.
ID and names of submitters.
Good luck!
"""
| 3.765625
| 4
|
start_mirt_pipeline.py
|
hmirin/guacamole
| 141
|
12781293
|
<reponame>hmirin/guacamole
#!/usr/bin/env python
"""This file will take you all the way from a CSV of student performance on
test items to trained parameters describing the difficulties of the assessment
items.
The parameters can be used to identify the different concepts in your
assessment items, and to drive your own adaptive test. The mirt_engine python
file included here can be used to run an adaptive pretest that will provide an
adaptive set of assessment items if you provide information about whether the
questions are being answered correctly or incorrectly.
Example Use:
with a file called my_data.csv call
./start_mirt_pipeline -i path/to/my_data.csv
let a1_time.json be the name of the output json file
(Congrats! Examine that for information about item difficulty!)
To run an adaptive test with your test items:
./run_adaptive_test.py -i a1_time.json
This will open an interactive session where the test will ask you questions
according to whatever will cause the model to gain the most information to
predict your abilities.
Authors: <NAME>, <NAME>, <NAME>, <NAME>
(2014)
"""
import argparse
import datetime
import multiprocessing
import os
import shutil
import sys
from mirt import mirt_train_EM, generate_predictions, score
from mirt import visualize, adaptive_pretest, generate_responses
from train_util import model_training_util
# Necessary on some systems to make sure all cores are used. If not all
# cores are being used and you'd like a speedup, pip install affinity
try:
import affinity
affinity.set_process_affinity_mask(0, 2 ** multiprocessing.cpu_count() - 1)
except NotImplementedError:
pass
except ImportError:
sys.stderr.write('If you find that not all cores are being '
'used, try installing affinity.\n')
def get_command_line_arguments(arguments=None):
"""Gets command line arguments passed in when called, or
can be called from within a program.
Parses input from the command line into options for running
the MIRT model. For more fine-grained options, look at
mirt_train_EM.py
"""
parser = argparse.ArgumentParser()
parser.add_argument("--generate", action="store_true",
help=("Generate fake training data."))
parser.add_argument("--train", action="store_true",
help=("Train a model from training data."))
parser.add_argument("--visualize", action="store_true",
help=("Visualize a trained model."))
parser.add_argument("--test", action="store_true",
help=("Take an adaptive test from a trained model."))
parser.add_argument("--score", action="store_true",
help=("Score the responses of each student."))
parser.add_argument("--report", action="store_true",
help=("Report on the parameters of each exercise."))
parser.add_argument("--roc_viz", action="store_true",
help=("Examine the roc curve for the current model"
" on the data in the data file."))
parser.add_argument("--sigmoid_viz", action="store_true",
help=("Examine the sigmoids generated for the model in"
" the model file."))
parser.add_argument(
"-d", "--data_file",
default=os.path.dirname(
os.path.abspath(__file__)) + '/sample_data/all.responses',
help=("Name of file where data of interest is located."))
parser.add_argument(
'-a', '--abilities', default=1, type=int,
help='The dimensionality/number of abilities.')
parser.add_argument(
'-s', '--num_students', default=500, type=int,
help="Number of students to generate data for. Only meaningful when "
"generating fake data - otherwise it's read from the data file.")
parser.add_argument(
'-p', '--num_problems', default=10, type=int,
help="Number of problems to generate data for. Only meaningful when "
"generating fake data - otherwise it's read from the data file.")
parser.add_argument("-t", "--time", action="store_true",
help=("Whether to include time as a parameter."
"If you do not select time, the 'time' field"
"in your data is ignored."))
parser.add_argument(
'-w', '--workers', type=int, default=1,
help=("The number of processes to use to parallelize mirt training"))
parser.add_argument(
"-n", "--num_epochs", type=int, default=20,
help=("The number of EM iterations to do during learning"))
parser.add_argument(
"-o", "--model_directory",
default=os.path.dirname(
os.path.abspath(__file__)) + '/sample_data/models/',
help=("The directory to write models and other output"))
parser.add_argument(
"-m", "--model",
default=os.path.dirname(
os.path.abspath(__file__)) + '/sample_data/models/model.json',
help=("The location of the model (to write if training, and to read if"
" visualizing or testing."))
parser.add_argument(
"-q", "--num_replicas", type=int, default=1, help=(
"The number of copies of the data to train on. If there is too "
"little training data, increase this number in order to maintain "
"multiple samples from the abilities vector for each student. A "
"sign that there is too little training data is if the update step"
" length ||dcouplings|| remains large."))
parser.add_argument(
"-i", "--items", type=int, default=5, help=(
"Number of items to use in adaptive test."))
if arguments:
arguments = parser.parse_args(arguments)
else:
arguments = parser.parse_args()
# Support file paths in the form of "~/blah", which python
# doesn't normally recognise
if arguments.data_file:
arguments.data_file = os.path.expanduser(arguments.data_file)
if arguments.model_directory:
arguments.model_directory = os.path.expanduser(
arguments.model_directory)
if arguments.model:
arguments.model = os.path.expanduser(arguments.model)
# When visualize is true, we do all visualizations
if arguments.visualize:
arguments.roc_viz = True
arguments.sigmoid_viz = True
arguments.report = True
# if we haven't been instructed to do anything, then show the help text
if not (arguments.generate or arguments.train
or arguments.visualize or arguments.test
or arguments.roc_viz or arguments.sigmoid_viz
or arguments.report or arguments.score):
print ("\nMust specify at least one task (--generate, --train,"
" --visualize, --test, --report, --roc_viz, --sigmoid_viz, "
"--score).\n")
parser.print_help()
# Save the current time for reference when looking at generated models.
DATE_FORMAT = '%Y-%m-%d-%H-%M-%S'
arguments.datetime = str(datetime.datetime.now().strftime(DATE_FORMAT))
return arguments
def save_model(arguments):
"""Look at all generated models, and save the most recent to the correct
location"""
latest_model = get_latest_parameter_file_name(arguments)
print "Saving model to %s" % arguments.model
shutil.copyfile(latest_model, arguments.model)
def get_latest_parameter_file_name(arguments):
"""Get the most recent of many parameter files in a directory.
There will be many .npz files written; we take the last one.
"""
params = gen_param_str(arguments)
path = arguments.model_directory + params + '/'
npz_files = os.listdir(path)
npz_files.sort(key=lambda fname: fname.split('_')[-1])
return path + npz_files[-1]
def main():
"""Get arguments from the command line and runs with those arguments."""
arguments = get_command_line_arguments()
run_with_arguments(arguments)
def make_necessary_directories(arguments):
"""Ensure that output directories for the data we'll be writing exist."""
roc_dir = arguments.model_directory + 'rocs/'
model_training_util.mkdir_p([roc_dir])
def gen_param_str(arguments):
"""Transform data about current run into a param string for file names."""
time_str = 'time' if arguments.time else 'no_time'
return "%s_%s_%s" % (arguments.abilities, time_str, arguments.datetime)
def generate_model_with_parameters(arguments):
"""Trains a model with the given parameters, saving results."""
param_str = gen_param_str(arguments)
out_dir_name = arguments.model_directory + param_str + '/'
model_training_util.mkdir_p(out_dir_name)
# to set more fine-grained parameters about MIRT training, look at
# the arguments at mirt/mirt_train_EM.py
mirt_train_params = [
'-a', str(arguments.abilities),
'-w', str(arguments.workers),
'-n', str(arguments.num_epochs),
'-f', arguments.model_directory + 'train.responses',
'-o', out_dir_name,
]
if arguments.time:
mirt_train_params.append('--time')
mirt_train_EM.run_programmatically(mirt_train_params)
def generate_roc_curve_from_model(arguments):
"""Read results from each model trained and generate roc curves."""
roc_dir = arguments.model_directory + 'rocs/'
roc_file = roc_dir + arguments.datetime
test_file = arguments.model_directory + 'test.responses'
return generate_predictions.load_and_simulate_assessment(
arguments.model, roc_file, test_file)
def run_with_arguments(arguments):
"""Generate data, train a model, visualize your trained data, and score
students based on a trained model.
"""
params = gen_param_str(arguments)
# Set up directories
make_necessary_directories(arguments)
if arguments.generate:
print 'Generating Responses'
generate_responses.run(arguments)
print 'Generated responses for %d students and %d problems' % (
arguments.num_students, arguments.num_problems)
if arguments.train:
# Only re-separate into test and train when resume_from_file
# is False.
# Separate provided data file into a train and test set.
model_training_util.sep_into_train_and_test(arguments)
print 'Training MIRT models'
generate_model_with_parameters(arguments)
save_model(arguments)
if arguments.roc_viz:
print 'Generating ROC for %s' % arguments.model
roc_curve = generate_roc_curve_from_model(arguments)
print 'Visualizing roc for %s' % arguments.model
visualize.show_roc({params: [r for r in roc_curve]})
if arguments.sigmoid_viz:
print 'Visualizing sigmoids for %s' % arguments.model
visualize.show_exercises(arguments.model)
if arguments.test:
print 'Starting adaptive pretest'
adaptive_pretest.main(arguments.model, arguments.items)
if arguments.report:
print "Generating problems report based on params file."
visualize.print_report(arguments.model)
if arguments.score:
print "Scoring all students based on trained test file"
score.score_students(arguments.model, arguments.data_file)
if __name__ == '__main__':
main()
| 3.546875
| 4
|
questions-and-answers/dice-with-main/dice_test.py
|
Wal100/dice_game
| 0
|
12781294
|
<reponame>Wal100/dice_game<filename>questions-and-answers/dice-with-main/dice_test.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit testing."""
import unittest
import dice
class TestDiceClass(unittest.TestCase):
"""Test the class."""
def test_init_default_object(self):
"""Instantiate an object and check its properties."""
die = dice.Dice()
self.assertIsInstance(die, dice.Dice)
res = die.faces
exp = 6
self.assertEqual(res, exp)
def test_roll_a_dice(self):
"""Rool a dice and check value is in bounds."""
die = dice.Dice()
res = die.roll()
exp = 1 <= res <= die.faces
self.assertTrue(exp)
| 3.859375
| 4
|
highPerformanceComputing/Project2-KNNClassifier/set.py
|
naokishami/Classwork
| 0
|
12781295
|
import pandas as pd
file_romeo = open("./data/romeoandjuliet.csv", "r")
file_moby = open("./data/mobydick.csv", "r")
file_gatsby = open("./data/greatgatsby.csv", "r")
file_hamlet = open("./data/hamlet.csv", "r")
romeo = file_romeo.read()
moby = file_moby.read()
gatsby = file_gatsby.read()
hamlet = file_hamlet.read()
print(type(romeo))
print(romeo)
the_set = []
df_romeo = pd.read_csv("./data/romeoandjuliet.csv", sep=",")
print(df_romeo)
df_moby = pd.read_csv("./data/mobydick.csv", sep=",")
print(df_moby)
df_gatsby = pd.read_csv("./data/greatgatsby.csv", sep=",")
print(df_gatsby)
df_hamlet = pd.read_csv("./data/hamlet.csv", sep=",")
print(df_hamlet)
romeo_moby = pd.merge(df_romeo, df_moby, how="outer", on=["Word"], suffixes=('_Romeo', '_Moby'))
print(romeo_moby)
gatsby_hamlet = pd.merge(df_gatsby, df_hamlet, how="outer", on=["Word"], suffixes=('_Gatsby', '_Hamlet'))
print(gatsby_hamlet)
full = pd.merge(romeo_moby, gatsby_hamlet, how="outer", on=["Word"])
print(full)
pd.set_option("display.max_rows", None, "display.max_columns", None)
full = full.fillna(0)
print(full)
full.to_csv(path_or_buf="./data/matrix.csv")
#the_set = romeo.union(moby, gatsby, hamlet)
file_romeo.close()
file_moby.close()
file_gatsby.close()
file_hamlet.close()
| 3.484375
| 3
|
samples/python/46.teams-auth/bots/__init__.py
|
Aliacf21/BotBuilder-Samples
| 1,998
|
12781296
|
<reponame>Aliacf21/BotBuilder-Samples<gh_stars>1000+
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .dialog_bot import DialogBot
from .teams_bot import TeamsBot
__all__ = ["DialogBot", "TeamsBot"]
| 1.101563
| 1
|
src/voice/preprocessing/audio_preprocessing.py
|
youngerous/kobart-voice-summarization
| 8
|
12781297
|
<reponame>youngerous/kobart-voice-summarization
# -*- coding: utf-8 -*-
"""
audio_preprocessing.py
Autor: HyeongwonKang, JeoungheeKim
audio clip resampling and Cropping blanks
예시 : python audio_preprocessing.py -r /data/wings -s resamp_data/wings
"""
## 라이브러리 Import
import numpy as np
import os
import argparse
from tqdm.notebook import tqdm
import librosa
from pathlib import Path
import matplotlib.pyplot as plt
import IPython.display as ipd
import glob
import soundfile as sf
import sys
## 함수 설정
def printProgressBar(iteration, total, prefix = 'Progress', suffix = 'Complete',\
decimals = 1, length = 70, fill = '█'):
# 작업의 진행상황을 표시
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
_string_out = '\r%s |%s| %s%% %s' %(prefix, bar, percent, suffix)
sys.stdout.write(_string_out)
sys.stdout.flush()
if iteration == total:
sys.stdout.write('\n')
## 파일 읽어오기(지정한 Sampling Rate로)
def load_audio(file_path, sr=22050):
"""
file_path : 파일위치
sr : 오디오를 읽을 때 Sampling rate 지정
"""
## 확장자 추출
ext = Path(file_path).suffix
## 파일 읽기
if ext in ['.wav', '.flac']:
wav, sr = librosa.load(file_path, sr=sr)
elif ext == '.pcm':
wav = np.memmap(file_path, dtype='h', mode='r').astype('float32') / 32767
elif ext in ['.raw', '.RAW']:
wav, sr = sf.read(file_path, channels=1, samlerate=sr, format='RAW', subtype='PCM_16')
else:
raise ValueError("Unsupported preprocess method : {0}".format(ext))
return wav, sr
## 공백 자르기(패딩 추가)
def trim_audio(wav, top_db=10, pad_len=4000):
"""
"""
## 최대 db에 따라 음성의 자를 위치 판별
non_silence_indices = librosa.effects.split(wav, top_db=top_db)
start = non_silence_indices[0][0]
end = non_silence_indices[-1][1]
## 음성 자르기
wav = wav[start:end]
## padding 추가
wav = np.hstack([np.zeros(pad_len), wav, np.zeros(pad_len)])
return wav
## WAV 그려보기
def plot_wav(wav, sr):
## 그려보기
plt.figure(1)
plot_a = plt.subplot(211)
plot_a.plot(wav)
plot_a.set_xlabel('sample rate * time')
plot_a.set_ylabel('energy')
plot_b = plt.subplot(212)
plot_b.specgram(wav, NFFT=1024, Fs=sr, noverlap=900)
plot_b.set_xlabel('Time')
plot_b.set_ylabel('Frequency')
plt.show()
if __name__ == "__main__":
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
# stream_handler = logging.StreamHandler()
# logger.addHandler(stream_handler)
ap = argparse.ArgumentParser()
ap.add_argument("-r", "--root_path", required=True, help="audio script csv path")
ap.add_argument("-s", "--save_path", required=True, help="base audio clip path")
args = vars(ap.parse_args())
root_path = args["root_path"]
save_path = args["save_path"]
## 시작하기
## 타코트론2는 기본적으로 22050 sampling rate에서 동작
sampling_rate = 22050
## 개인설정에 따라 특정 소리보다 작은 음성을 삭제하도록 설정
# decibel = 10
## Wav 파일 읽어오기 pcm 또는 다른 확장자도 사용 가능.
file_list = glob.glob(os.path.join(root_path, "*.wav"))
# file_list = glob.glob(os.path.join(root_path, "*.pcm"))
## 저장할 위치 선택
os.makedirs(save_path, exist_ok=True)
for i, file_path in enumerate(file_list):
printProgressBar(i+1, len(file_list))
## 파일 불러오기(타코트론2는 기본적으로 22050 sampling rate에서 동작)
wav, sr = load_audio(file_path, sr=sampling_rate)
## 오디오 자르기(패딩 추가)
# trimed_wav = trim_audio(wav, top_db=decibel)
filename = Path(file_path).name
temp_save_path = os.path.join(save_path, filename)
## 저장하기
# sf.write(temp_save_path, trimed_wav, sampling_rate)
sf.write(temp_save_path, wav, sampling_rate)
| 2.421875
| 2
|
server/openapi_server/controllers/text_location_annotation_controller.py
|
cascadianblue/phi-annotator
| 0
|
12781298
|
import connexion
from openapi_server.annotator.phi_types import PhiType
from openapi_server.get_annotations import get_annotations
from openapi_server.models.error import Error # noqa: E501
from openapi_server.models.text_location_annotation_request import TextLocationAnnotationRequest # noqa: E501
from openapi_server.models.text_location_annotation_response import TextLocationAnnotationResponse # noqa: E501
def create_text_location_annotations(): # noqa: E501
"""Annotate locations in a clinical note
Return the location annotations found in a clinical note # noqa: E501
:param text_location_annotation_request:
:type text_location_annotation_request: dict | bytes
:rtype: TextLocationAnnotationResponse
"""
res = None
status = None
if connexion.request.is_json:
try:
annotation_request = TextLocationAnnotationRequest.from_dict(connexion.request.get_json()) # noqa: E501
note = annotation_request.note
annotations = get_annotations(
note, phi_type=PhiType.LOCATION)
res = TextLocationAnnotationResponse(annotations)
status = 200
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status
| 2.21875
| 2
|
pyFM/signatures/WKS_functions.py
|
RobinMagnet/pyFM
| 35
|
12781299
|
<reponame>RobinMagnet/pyFM
import numpy as np
def WKS(evals, evects, energy_list, sigma, scaled=False):
"""
Returns the Wave Kernel Signature for some energy values.
Parameters
------------------------
evects : (N,K) array with the K eigenvectors of the Laplace Beltrami operator
evals : (K,) array of the K corresponding eigenvalues
energy_list : (num_E,) values of e to use
sigma : (float) [positive] standard deviation to use
scaled : (bool) Whether to scale each energy level
Output
------------------------
WKS : (N,num_E) array where each column is the WKS for a given e
"""
assert sigma > 0, f"Sigma should be positive ! Given value : {sigma}"
evals = np.asarray(evals).flatten()
indices = np.where(evals > 1e-5)[0].flatten()
evals = evals[indices]
evects = evects[:, indices]
e_list = np.asarray(energy_list)
coefs = np.exp(-np.square(e_list[:,None] - np.log(np.abs(evals))[None,:])/(2*sigma**2)) # (num_E,K)
weighted_evects = evects[None, :, :] * coefs[:,None, :] # (num_E,N,K)
natural_WKS = np.einsum('tnk,nk->nt', weighted_evects, evects) # (N,num_E)
if scaled:
inv_scaling = coefs.sum(1) # (num_E)
return (1/inv_scaling)[None,:] * natural_WKS
else:
return natural_WKS
def lm_WKS(evals, evects, landmarks, energy_list, sigma, scaled=False):
"""
Returns the Wave Kernel Signature for some landmarks and energy values.
Parameters
------------------------
evects : (N,K) array with the K eigenvectors of the Laplace Beltrami operator
evals : (K,) array of the K corresponding eigenvalues
landmarks : (p,) indices of landmarks to compute
energy_list : (num_E,) values of e to use
sigma : int - standard deviation
Output
------------------------
landmarks_WKS : (N,num_E*p) array where each column is the WKS for a given e for some landmark
"""
assert sigma > 0, f"Sigma should be positive ! Given value : {sigma}"
evals = np.asarray(evals).flatten()
indices = np.where(evals > 1e-2)[0].flatten()
evals = evals[indices]
evects = evects[:,indices]
e_list = np.asarray(energy_list)
coefs = np.exp(-np.square(e_list[:, None] - np.log(np.abs(evals))[None, :]) / (2*sigma**2)) # (num_E,K)
weighted_evects = evects[None, landmarks, :] * coefs[:,None,:] # (num_E,p,K)
landmarks_WKS = np.einsum('tpk,nk->ptn', weighted_evects, evects) # (p,num_E,N)
if scaled:
inv_scaling = coefs.sum(1) # (num_E,)
landmarks_WKS = ((1/inv_scaling)[None,:,None] * landmarks_WKS)
return landmarks_WKS.reshape(-1,evects.shape[0]).T # (N,p*num_E)
def auto_WKS(evals, evects, num_E, landmarks=None, scaled=True):
"""
Compute WKS with an automatic choice of scale and energy
Parameters
------------------------
evals : (K,) array of K eigenvalues
evects : (N,K) array with K eigenvectors
landmarks : (p,) If not None, indices of landmarks to compute.
num_E : (int) number values of e to use
Output
------------------------
WKS or lm_WKS : (N,num_E) or (N,p*num_E) array where each column is the WKS for a given e
and possibly for some landmarks
"""
abs_ev = sorted(np.abs(evals))
e_min,e_max = np.log(abs_ev[1]),np.log(abs_ev[-1])
sigma = 7*(e_max-e_min)/num_E
e_min += 2*sigma
e_max -= 2*sigma
energy_list = np.linspace(e_min,e_max,num_E)
if landmarks is None:
return WKS(abs_ev, evects, energy_list, sigma, scaled=scaled)
else:
return lm_WKS(abs_ev, evects, landmarks, energy_list, sigma, scaled=scaled)
def mesh_WKS(mesh, num_E, landmarks=None, k=None):
assert mesh.eigenvalues is not None, "Eigenvalues should be processed"
if k is None:
k = len(mesh.eigenvalues)
else:
assert len(mesh.eigenvalues >= k), f"At least ${k}$ eigenvalues should be computed, not {len(mesh.eigenvalues)}"
return auto_WKS(mesh.eigenvalues[:k], mesh.eigenvectors[:, :k], num_E, landmarks=landmarks, scaled=True)
| 2.625
| 3
|
bbp/comps/fas.py
|
kevinmilner/bbp
| 0
|
12781300
|
#!/usr/bin/env python
"""
Southern California Earthquake Center Broadband Platform
Copyright 2010-2016 Southern California Earthquake Center
"""
from __future__ import division, print_function
# Import Python modules
import os
import sys
import shutil
import matplotlib as mpl
mpl.use('AGG', warn=False)
import pylab
import numpy as np
# Import Broadband modules
import bband_utils
import install_cfg
from station_list import StationList
# Import plot config file
import plot_config
def create_boore_asc2smc(control_file, input_file,
data_column, num_headers,
extension_string):
"""
This function creates the control file for the asc2smc converter tool
"""
ctl_file = open(control_file, 'w')
ctl_file.write("!Control file for ASC2SMC ! first line\n")
ctl_file.write("! Revision of program involving a change in the "
"control file on this date:\n")
ctl_file.write(" 02/02/12\n")
ctl_file.write("!Name of summary file:\n")
ctl_file.write(" asc2smc.sum\n")
ctl_file.write("!n2skip (-1=headers preceded by !; 0=no headers; "
"otherwise number of headers to skip)\n")
ctl_file.write(" %d\n" % (num_headers))
ctl_file.write("!write headers to smc file "
"(even if n2skip > 0)? (Y/N)\n")
ctl_file.write(" Y\n")
ctl_file.write("!sps (0.0 = obtain from input file)\n")
ctl_file.write(" 0\n")
ctl_file.write("!N columns to read, column number for "
"time and data columns \n")
ctl_file.write("! (for files made using blpadflt, period is in "
"column 1 and sd, pv, pa, rv, \n")
ctl_file.write("! aa are in columns 2, 3, 4, 5, 6, respectively)\n")
ctl_file.write("! Note: if sps .ne. 0.0, then column number for time "
"is ignored (but a placeholder is\n")
ctl_file.write("! still needed--e.g., 1 1 1 (read one column, which "
"contains the data; 1 20 1 would be the same)\n")
ctl_file.write("! But note: if the data are not in the first column, "
"but only the data column is to be read\n")
ctl_file.write("! (because sps will be used to establish "
"the time values),\n")
ctl_file.write("! then ncolumns must be the column corresponding to "
"the data. For example, assume that\n")
ctl_file.write("! the data are in column 3 and that columns 1 and 2 "
"contain time and some other variable, but\n")
ctl_file.write("! the time column is not to be used (perhaps because "
"accumulated error in creating the column\n")
ctl_file.write("! leads to a slight shift in the time values). "
"Then the input line should be:\n")
ctl_file.write("! 3 1 3\n")
ctl_file.write("!\n")
ctl_file.write("! This program assumes one data point per row; if "
"there are more points (as, for example,\n")
ctl_file.write("! in files with N points per line), "
"use the program wrapped2asc).\n")
ctl_file.write("!\n")
ctl_file.write(" 3 1 %d\n" % (data_column))
ctl_file.write("!Xfactr\n")
ctl_file.write(" 1.0\n")
ctl_file.write("!Read input format (used if the format is such that "
"the values are not separated by spaces,\n")
ctl_file.write("!in which case a free format cannot be "
"used for input)?\n")
ctl_file.write(" N\n")
ctl_file.write("!If yes, specify a format; if not, "
"still need a placeholder\n")
ctl_file.write(" (3e13.5)\n")
ctl_file.write("!For output, use old (standard) smc format or new\n")
ctl_file.write('!higher precision format. Specify "high" for\n')
ctl_file.write("!high precision; any other word defaults to standard\n")
ctl_file.write("!precision (but some word is needed as "
"a placeholder, even if\n")
ctl_file.write("!standard precision is desired).\n")
ctl_file.write(" high\n")
ctl_file.write("!String to append to input file name "
"for the output filename.\n")
ctl_file.write(" %s\n" % (extension_string))
ctl_file.write('!Input file name (time,data pairs; "stop" in any '
'column to quit):\n')
ctl_file.write("%s\n" % (input_file))
ctl_file.write("STOP\n")
ctl_file.close()
def create_boore_smc2fs2(control_file, input_file, name_string):
"""
This function creates the control file for the smc2fs2 FAS tool
"""
ctl_file = open(control_file, 'w')
ctl_file.write('!Control file for program SMC2FS2\n')
ctl_file.write('! Revision of program involving a change in the control '
'file on this date:\n')
ctl_file.write(' 03/10/10\n')
ctl_file.write('! As many comment lines as desired, each '
'starting with "!"\n')
ctl_file.write('! The string "pp:" indicates a new set '
'of processing parameters\n')
ctl_file.write('! to be applied to the following smc files. '
'The parameters are given on the\n')
ctl_file.write('! lines following "pp:", until the next "pp:" line '
'or until "stop" is \n')
ctl_file.write('! encountered.\n')
ctl_file.write('! NOTE: Use the tapers with caution, '
'choosing them so that important signal\n')
ctl_file.write('! is not reduced by the tapering. '
'This can be particularly a problem with \n')
ctl_file.write('! analog data from relatively small earthquakes '
'that triggered near the \n')
ctl_file.write('! S-wave arrival. \n')
ctl_file.write('!\n')
ctl_file.write('! -----------------------------------------'
'------------------------------------\n')
ctl_file.write('!\n')
ctl_file.write('! Meaning of smoothing input parameters\n')
ctl_file.write('!\n')
ctl_file.write('! NO SMOOTHING\n')
ctl_file.write('! itype = 0\n')
ctl_file.write('! SMOOTHING OVER EQUALLY SPACED FREQUENCIES\n')
ctl_file.write('! itype = 1: box weighting function\n')
ctl_file.write('! smooth_param = width of box weighting function (Hz)\n')
ctl_file.write('! itype = 2: triangular weighting function\n')
ctl_file.write('! smooth_param = width of triangular '
'weighting function (Hz)\n')
ctl_file.write('! SMOOTHING OVER LOGARITHMICALLY SPACED FREQUENCIES\n')
ctl_file.write('! itype = 3: box weighting function\n')
ctl_file.write('! smooth_param = xi, which is the fraction of '
'a decade for the\n')
ctl_file.write('! box weighting function \n')
ctl_file.write('! itype = 4: triangular weighting function\n')
ctl_file.write('! smooth_param = xi, which is the fraction of '
'a decade for the\n')
ctl_file.write('! triangular weighting function \n')
ctl_file.write('! itype = 5: Konno and Ohmachi weighting function '
'(see BSSA 88, 228-241)\n')
ctl_file.write('! smooth_param = xi, which is the fraction '
'of a decade for which\n')
ctl_file.write('! the Konno and Ohmachi weighting '
'function is greater\n')
ctl_file.write('! than 0.043.(it is related to\n')
ctl_file.write('! their smoothing parameter b '
'by the equation\n')
ctl_file.write('! b = 4.0/smooth_param, so we have '
'this correspondence between\n')
ctl_file.write('! b and smooth_param\n')
ctl_file.write('! b smooth_param \n')
ctl_file.write('! 10 0.40\n')
ctl_file.write('! 20 0.20\n')
ctl_file.write('! 40 0.10\n')
ctl_file.write('! \n')
ctl_file.write('! b = 40 seems to be commonly used, '
'but I do not think that it\n')
ctl_file.write('! gives enough smoothing; '
'I PREFER SMOOTH_PARAM = 0.2, \n')
ctl_file.write('! corresponding to b = 20. \n')
ctl_file.write('!\n')
ctl_file.write('! ipow = power of FAS to be smoothed '
'(2 = smoothing energy spectrum)\n')
ctl_file.write('!\n')
ctl_file.write('! df_smooth: Note: need df_smooth for '
'linearly-spaced smoothers, \n')
ctl_file.write('! and generally it should be the df from the fft. '
'For general x data, it is\n')
ctl_file.write('! the spacing between x values, assumed to be constant, '
'The reason for\n')
ctl_file.write('! including it as an input parameter is to "fool" the\n')
ctl_file.write('! program to do smoothing over a specified '
'number of points by\n')
ctl_file.write('! setting df_smooth = 1 and smooth_param = number '
'of points (including \n')
ctl_file.write('! points with zero weight at ends; e.g., '
'smooth_param = 5 will \n')
ctl_file.write('! give a smoother with weights 0, 1/4, 2/4, 1/4, 0; '
'smooth_param\n')
ctl_file.write('! should be odd).\n')
ctl_file.write('!\n')
ctl_file.write('! ------------------------------------'
'-----------------------------------------\n')
ctl_file.write('! Meaning of frequency specification parameters:\n')
ctl_file.write('!\n')
ctl_file.write('!SPECIFY_FREQUENCIES? (y/n):\n')
ctl_file.write('! <enter Y or N>\n')
ctl_file.write('!FREQUENCY SPECIFICATION: \n')
ctl_file.write('! If specify_frequencies = Y, then enter the \n')
ctl_file.write('! number of frequencies, freq(1), freq(2)..., '
'freq(nfreq)\n')
ctl_file.write('! If specify_frequencies = N, then enter \n')
ctl_file.write('! f_low, f_high, log-spaced (0=N, 1=Y), freq_param\n')
ctl_file.write('! if freq_param = 0.0, there is no interpolation, '
'and the FFT frequencies \n')
ctl_file.write('! are used between f_low and f_high '
'(log-spaced is ignored).\n')
ctl_file.write('! if freq_param /= 0.0 and log-spaced = 0, '
'then freq_param is the spacing of the\n')
ctl_file.write('! interpolated frequencies '
'between f_low and f_high\n')
ctl_file.write('! if freq_param /= 0.0 and log-spaced = 1, '
'then freq_param is the number of \n')
ctl_file.write('! interpolated frequencies between f_low and '
'f_high (NOTE: f_low must be > 0.0)\n')
ctl_file.write('! ---------------------------------------'
'--------------------------------------\n')
ctl_file.write('!\n')
ctl_file.write('!Name of summary file:\n')
ctl_file.write(' smc2fs2.sum\n')
ctl_file.write('PP: new set of parameters\n')
ctl_file.write('!tskip, tlength\n')
ctl_file.write(' 0.0 2000.0\n')
ctl_file.write('!dc_remove?\n')
ctl_file.write(' .true. \n')
ctl_file.write('!Length of taper at beginning and end of time series, '
'before adding zeros\n')
ctl_file.write('! to make the number of points in '
'the record a power of two.\n')
ctl_file.write(' 0.0 0.0\n')
ctl_file.write('!signnpw2(<0, backup for npw2, no zpad):\n')
ctl_file.write(' +1.0\n')
ctl_file.write('!smoothing: itype, ipow, df_smooth '
'(0 = FFT df), smooth_param\n')
ctl_file.write('! (see above for the meaning of these input parameters):\n')
ctl_file.write(' 0 1 0.0 0.20\n')
ctl_file.write('!SPECIFY_FREQUENCIES? (y/n):\n')
ctl_file.write(' N\n')
ctl_file.write('!FREQUENCY SPECIFICATION\n')
ctl_file.write(' 0.01 100.0 0 0.0 \n')
ctl_file.write('!character string to append to filename:\n')
ctl_file.write(' %s\n' % (name_string))
ctl_file.write('!Output in smc format (Y,N)?\n')
ctl_file.write('! ***IMPORTANT NOTE: Output cannot be in smc '
'format if use log-spaced \n')
ctl_file.write('! frequencies because programs such as smc2asc '
'have not been modified\n')
ctl_file.write('! to deal with log-spaced frequency.\n')
ctl_file.write(' n\n')
ctl_file.write('!Files to process:\n')
ctl_file.write('%s\n' % (input_file))
ctl_file.write('stop\n')
ctl_file.close()
def read_fas_file(fas_file):
"""
Reads FAS file and returns freq and fas arrays
"""
freqs = []
fas = []
# Read input file
input_file = open(fas_file, 'r')
# Skip headers
for line in input_file:
line = line.strip()
# skip blank lines
if not line:
continue
if line.startswith("freq"):
break
for line in input_file:
line = line.strip()
# skip blank lines
if not line:
continue
pieces = line.split()
pieces = [float(piece) for piece in pieces]
freqs.append(pieces[0])
fas.append(pieces[1])
# All done!
input_file.close()
return freqs, fas
def plot_fas(freqs, ns_data, ew_data, eas_smoothed_data, fas_plot, station):
"""
Create a plot of both FAS components
"""
# Generate plot
# Set plot dims
pylab.gcf().set_size_inches(11, 8.5)
pylab.gcf().clf()
# Adjust title y-position
t = pylab.title("Station: %s" % (station), size=12)
pylab.plot(freqs, ns_data, 'b', lw=0.75, label="NS")
pylab.plot(freqs, ew_data, 'r', lw=0.75, label="EW")
pylab.plot(freqs, eas_smoothed_data, 'k', lw=1.25, label="Smoothed EAS")
pylab.legend(loc='upper right')
pylab.xscale('log')
pylab.yscale('log')
pylab.ylabel('Fourier Amplitude (cm/s)')
pylab.xlabel('Frequency (Hz)')
pylab.axis([0.01, 100, 0.001, 1000])
pylab.grid(True)
pylab.grid(b=True, which='major', linestyle='-', color='lightgray')
pylab.grid(b=True, which='minor', linewidth=0.5, color='gray')
# Save plot
pylab.savefig(fas_plot, format="png",
transparent=False, dpi=plot_config.dpi)
pylab.close()
def ko98_smoothing(freqs, data, delta_freq, bexp):
"""
# ** smoothing of a function y (equally-spaced, dx) with the "Konno-Ohmachi"
# ** function sin (alog10(f/fc)^exp) / alog10(f/fc)^exp) ^^4
# ** where fc is the frequency around which the smoothing is performed
# ** exp determines the exponent 10^(1/exp) is the half-width of the peak
# ** cf Konno & Ohmachi, 1998, BSSA 88-1, pp. 228-241
"""
nx = len(freqs)
data_smooth = np.zeros(nx)
fratio = np.power(10., (2.5 / bexp))
data_smooth[0] = data[0]
for index in range(1, nx):
freq = freqs[index]
# Added check to avoid division by zero later and NaNs in the output file
if freq == 0.0:
data_smooth[index] = data[index]
continue
fc1 = freq / fratio
fc2 = freq * fratio
index1 = int(fc1 / delta_freq)
index2 = int((fc2 / delta_freq) + 1)
if index1 <= 1:
index1 = 0
if index2 >= nx:
index2 = nx
a1 = 0.0
a2 = 0.0
for j in range(index1, index2):
if j != index:
# Extra check to avoid NaNs in output file
if freqs[j] == 0.0:
data_smooth[index] = data[index]
break
c1 = bexp * np.log10(freqs[j] / freq)
c1 = np.power(np.sin(c1) / c1, 4.0)
a2 = a2 + c1
a1 = a1 + c1 * data[j]
else:
a2 = a2 + 1.0
a1 = a1 + data[index]
data_smooth[index] = a1 / a2
return data_smooth
def calculate_smoothed_eas(ns_file, ew_file, output_file=None):
"""
Calculates the smoothed EAS at the same frequencies as specified in
the input files
"""
b_param = 188.5 # cm/s
# Read data
freqs, ns_data = read_fas_file(ns_file)
_, ew_data = read_fas_file(ew_file)
eas_data = []
# Calculate EAS
for ns_comp, ew_comp in zip(ns_data, ew_data):
eas_data.append(np.sqrt(0.5*(pow(ns_comp, 2) + pow(ew_comp, 2))))
# Calculate Smoothed EAS
smoothed_eas = ko98_smoothing(freqs, eas_data,
freqs[1]-freqs[0],
b_param)
# Write data file if output_file is provided
if output_file is not None:
out_file = open(output_file, 'w')
out_file.write("# Freq(Hz)\t FAS H1 (cm/s)\t FAS H2 (cm/s)\t "
"EAS (cm/s)\t Smoothed EAS, b=%f (cm/s)\n" %
(b_param))
for freq, fas_h1, fas_h2, eas, s_eas in zip(freqs, ns_data,
ew_data, eas_data,
smoothed_eas):
out_file.write("%2.7E\t%2.7E\t%2.7E\t%2.7E\t%2.7E\n" %
(freq, fas_h1, fas_h2, eas, s_eas))
out_file.close()
# All done!
return freqs, ns_data, ew_data, eas_data, smoothed_eas
class FAS(object):
"""
Implement FAS analisys for the Broadband Platform
"""
def __init__(self, i_r_stations, sim_id=0):
"""
Initializes class variables
"""
self.sim_id = sim_id
self.r_stations = i_r_stations
def run(self):
"""
Run FAS analysis codes
"""
print("FAS Calculation".center(80, '-'))
install = install_cfg.InstallCfg.getInstance()
sim_id = self.sim_id
sta_base = os.path.basename(os.path.splitext(self.r_stations)[0])
self.log = os.path.join(install.A_OUT_LOG_DIR, str(sim_id),
"%d.fas_%s.log" % (sim_id, sta_base))
a_statfile = os.path.join(install.A_IN_DATA_DIR,
str(sim_id),
self.r_stations)
a_tmpdir = os.path.join(install.A_TMP_DATA_DIR, str(sim_id))
a_outdir = os.path.join(install.A_OUT_DATA_DIR, str(sim_id))
a_outdir_fas = os.path.join(a_outdir, "FAS")
#
# Make sure the tmp and out directories exist
#
bband_utils.mkdirs([a_tmpdir, a_outdir, a_outdir_fas], print_cmd=False)
slo = StationList(a_statfile)
site_list = slo.getStationList()
# Save current directory
old_cwd = os.getcwd()
os.chdir(a_tmpdir)
for site in site_list:
print("==> Processing station: %s" % (site.scode))
# Copy acc file to tmpdata
acc_file = "%d.%s.acc.bbp" % (sim_id, site.scode)
shutil.copy2(os.path.join(a_outdir, acc_file),
os.path.join(a_tmpdir, acc_file))
asc2smc_control_file = "asc2smc.ctl"
smc2fs2_control_file = "smc2fs2.ctl"
header_lines = bband_utils.count_header_lines(os.path.join(a_tmpdir,
acc_file))
# Work on both NS and EW components
for comp, data_column in zip(["NS", "EW"], [2, 3]):
# First we convert from BBP to SMC format
create_boore_asc2smc(os.path.join(a_tmpdir,
asc2smc_control_file),
acc_file, data_column, header_lines,
".smc8.%s" % (comp))
cmd = ("%s << END >> %s 2>&1\n" %
(os.path.join(install.A_USGS_BIN_DIR, "asc2smc"),
self.log) +
"%s\n" % (asc2smc_control_file) +
"END\n")
bband_utils.runprog(cmd, False, abort_on_error=True)
# Then, we run the smc2fs2 FAS tool
smc_file = "%s.smc8.%s" % (acc_file, comp)
create_boore_smc2fs2(os.path.join(a_tmpdir,
smc2fs2_control_file),
smc_file, ".no_smooth.fs.col")
cmd = ("%s >> %s 2>&1\n" %
(os.path.join(install.A_USGS_BIN_DIR, "smc2fs2"),
self.log))
bband_utils.runprog(cmd, False, abort_on_error=True)
# Calculate EAS and smoothed EAS
ns_file = os.path.join(a_tmpdir,
"%s.smc8.NS.no_smooth.fs.col" % (acc_file))
ew_file = os.path.join(a_tmpdir,
"%s.smc8.EW.no_smooth.fs.col" % (acc_file))
output_file = os.path.join(a_outdir_fas,
"%s.smc8.smooth.fs.col" % (acc_file))
(freqs, ns_fas,
ew_fas, eas, smoothed_eas) = calculate_smoothed_eas(ns_file,
ew_file,
output_file)
# Create plot
fas_plot = os.path.join(a_outdir_fas,
"%d.%s.fas.png" % (sim_id, site.scode))
plot_fas(freqs, ns_fas, ew_fas, smoothed_eas, fas_plot, site.scode)
# All done, restore working directory
os.chdir(old_cwd)
print("FAS Calculation Completed".center(80, '-'))
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Usage: %s station_list sim_id" % (os.path.basename(sys.argv[0])))
sys.exit(1)
print("Testing Module: %s" % (os.path.basename(sys.argv[0])))
ME = FAS(sys.argv[1], sim_id=int(sys.argv[2]))
ME.run()
sys.exit(0)
| 2.484375
| 2
|
jc_decrypter/utils/files.py
|
cesarbruschetta/julio-cesar-decrypter
| 0
|
12781301
|
""" module utils to method to files """
import logging
import hashlib
logger = logging.getLogger(__name__)
def write_file(path: str, source: str, mode="w") -> None:
""" write file in file system in unicode """
logger.debug("Gravando arquivo: %s", path)
with open(path, mode, encoding="utf-8") as f:
f.write(source)
def write_file_binary(path: str, source: bytes) -> None:
""" write file in file system in bytes """
logger.debug("Gravando arquivo binario: %s", path)
with open(path, "wb") as f:
f.write(source)
def sha1(mensagem: str) -> str:
""" generate sha1 hash """
_sum = hashlib.sha1()
_sum.update(mensagem.encode("utf-8"))
return _sum.hexdigest()
| 3.375
| 3
|
webargscontrib/utils/__init__.py
|
marcellarius/webargscontrib.utils
| 0
|
12781302
|
from .string import lowercase, strip
from .types import boolean
from .validate import choices, not_empty, not_null, within
| 1.445313
| 1
|
2.py
|
envizzion/raspberrypi-sim800l
| 0
|
12781303
|
<filename>2.py
# SIMSMS1.py
import RPi.GPIO as GPIO
import serial
import time, sys
import datetime
P_BUTTON = 24 # Button, adapt to your wiring
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(P_BUTTON, GPIO.IN, GPIO.PUD_UP)
SERIAL_PORT = "/dev/ttyAMA0" # Raspberry Pi 2
#SERIAL_PORT = "/dev/ttyS0" # Raspberry Pi 3
ser = serial.Serial(SERIAL_PORT, baudrate = 9600, timeout = 5)
setup()
ser.write("AT+CMGF=1\r") # set to text mode
time.sleep(3)
ser.write('AT+CMGDA="DEL ALL"\r') # delete all SMS
time.sleep(3)
reply = ser.read(ser.inWaiting()) # Clean buf
print "Listening for incomming SMS..."
while True:
reply = ser.read(ser.inWaiting())
if reply != "":
ser.write("AT+CMGR=1\r")
time.sleep(3)
reply = ser.read(ser.inWaiting())
print "SMS received. Content:"
print reply
if "getStatus" in reply:
t = str(datetime.datetime.now())
if GPIO.input(P_BUTTON) == GPIO.HIGH:
state = "Button released"
else:
state = "Button pressed"
ser.write('AT+CMGS="+41764331356"\r')
time.sleep(3)
msg = "Sending status at " + t + ":--" + state
print "Sending SMS with status info:" + msg
ser.write(msg + chr(26))
time.sleep(3)
ser.write('AT+CMGDA="DEL ALL"\r') # delete all
time.sleep(3)
ser.read(ser.inWaiting()) # Clear buf
time.sleep(5)
| 3.140625
| 3
|
tests/test_model_methods/test_load_all.py
|
naterenegar/ormar
| 905
|
12781304
|
<filename>tests/test_model_methods/test_load_all.py<gh_stars>100-1000
from typing import List
import databases
import pytest
import sqlalchemy
import ormar
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class BaseMeta(ormar.ModelMeta):
database = database
metadata = metadata
class Language(ormar.Model):
class Meta(BaseMeta):
tablename = "languages"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
level: str = ormar.String(max_length=150, default="Beginner")
class CringeLevel(ormar.Model):
class Meta(BaseMeta):
tablename = "levels"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
language = ormar.ForeignKey(Language)
class NickName(ormar.Model):
class Meta(BaseMeta):
tablename = "nicks"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="hq_name")
is_lame: bool = ormar.Boolean(nullable=True)
level: CringeLevel = ormar.ForeignKey(CringeLevel)
class HQ(ormar.Model):
class Meta(BaseMeta):
tablename = "hqs"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="hq_name")
nicks: List[NickName] = ormar.ManyToMany(NickName)
class Company(ormar.Model):
class Meta(BaseMeta):
tablename = "companies"
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100, nullable=False, name="company_name")
founded: int = ormar.Integer(nullable=True)
hq: HQ = ormar.ForeignKey(HQ, related_name="companies")
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@pytest.mark.asyncio
async def test_load_all_fk_rel():
async with database:
async with database.transaction(force_rollback=True):
hq = await HQ.objects.create(name="Main")
company = await Company.objects.create(name="Banzai", founded=1988, hq=hq)
hq = await HQ.objects.get(name="Main")
await hq.load_all()
assert hq.companies[0] == company
assert hq.companies[0].name == "Banzai"
assert hq.companies[0].founded == 1988
hq2 = await HQ.objects.select_all().get(name="Main")
assert hq2.companies[0] == company
assert hq2.companies[0].name == "Banzai"
assert hq2.companies[0].founded == 1988
@pytest.mark.asyncio
async def test_load_all_many_to_many():
async with database:
async with database.transaction(force_rollback=True):
nick1 = await NickName.objects.create(name="BazingaO", is_lame=False)
nick2 = await NickName.objects.create(name="Bazinga20", is_lame=True)
hq = await HQ.objects.create(name="Main")
await hq.nicks.add(nick1)
await hq.nicks.add(nick2)
hq = await HQ.objects.get(name="Main")
await hq.load_all()
assert hq.nicks[0] == nick1
assert hq.nicks[0].name == "BazingaO"
assert hq.nicks[1] == nick2
assert hq.nicks[1].name == "Bazinga20"
hq2 = await HQ.objects.select_all().get(name="Main")
assert hq2.nicks[0] == nick1
assert hq2.nicks[0].name == "BazingaO"
assert hq2.nicks[1] == nick2
assert hq2.nicks[1].name == "Bazinga20"
@pytest.mark.asyncio
async def test_load_all_with_order():
async with database:
async with database.transaction(force_rollback=True):
nick1 = await NickName.objects.create(name="Barry", is_lame=False)
nick2 = await NickName.objects.create(name="Joe", is_lame=True)
hq = await HQ.objects.create(name="Main")
await hq.nicks.add(nick1)
await hq.nicks.add(nick2)
hq = await HQ.objects.get(name="Main")
await hq.load_all(order_by="-nicks__name")
assert hq.nicks[0] == nick2
assert hq.nicks[0].name == "Joe"
assert hq.nicks[1] == nick1
assert hq.nicks[1].name == "Barry"
await hq.load_all()
assert hq.nicks[0] == nick1
assert hq.nicks[1] == nick2
hq2 = (
await HQ.objects.select_all().order_by("-nicks__name").get(name="Main")
)
assert hq2.nicks[0] == nick2
assert hq2.nicks[1] == nick1
hq3 = await HQ.objects.select_all().get(name="Main")
assert hq3.nicks[0] == nick1
assert hq3.nicks[1] == nick2
@pytest.mark.asyncio
async def test_loading_reversed_relation():
async with database:
async with database.transaction(force_rollback=True):
hq = await HQ.objects.create(name="Main")
await Company.objects.create(name="Banzai", founded=1988, hq=hq)
company = await Company.objects.get(name="Banzai")
await company.load_all()
assert company.hq == hq
company2 = await Company.objects.select_all().get(name="Banzai")
assert company2.hq == hq
@pytest.mark.asyncio
async def test_loading_nested():
async with database:
async with database.transaction(force_rollback=True):
language = await Language.objects.create(name="English")
level = await CringeLevel.objects.create(name="High", language=language)
level2 = await CringeLevel.objects.create(name="Low", language=language)
nick1 = await NickName.objects.create(
name="BazingaO", is_lame=False, level=level
)
nick2 = await NickName.objects.create(
name="Bazinga20", is_lame=True, level=level2
)
hq = await HQ.objects.create(name="Main")
await hq.nicks.add(nick1)
await hq.nicks.add(nick2)
hq = await HQ.objects.get(name="Main")
await hq.load_all(follow=True)
assert hq.nicks[0] == nick1
assert hq.nicks[0].name == "BazingaO"
assert hq.nicks[0].level.name == "High"
assert hq.nicks[0].level.language.name == "English"
assert hq.nicks[1] == nick2
assert hq.nicks[1].name == "Bazinga20"
assert hq.nicks[1].level.name == "Low"
assert hq.nicks[1].level.language.name == "English"
hq2 = await HQ.objects.select_all(follow=True).get(name="Main")
assert hq2.nicks[0] == nick1
assert hq2.nicks[0].name == "BazingaO"
assert hq2.nicks[0].level.name == "High"
assert hq2.nicks[0].level.language.name == "English"
assert hq2.nicks[1] == nick2
assert hq2.nicks[1].name == "Bazinga20"
assert hq2.nicks[1].level.name == "Low"
assert hq2.nicks[1].level.language.name == "English"
hq5 = await HQ.objects.select_all().get(name="Main")
assert len(hq5.nicks) == 2
await hq5.nicks.select_all(follow=True).all()
assert hq5.nicks[0] == nick1
assert hq5.nicks[0].name == "BazingaO"
assert hq5.nicks[0].level.name == "High"
assert hq5.nicks[0].level.language.name == "English"
assert hq5.nicks[1] == nick2
assert hq5.nicks[1].name == "Bazinga20"
assert hq5.nicks[1].level.name == "Low"
assert hq5.nicks[1].level.language.name == "English"
await hq.load_all(follow=True, exclude="nicks__level__language")
assert len(hq.nicks) == 2
assert hq.nicks[0].level.language is None
assert hq.nicks[1].level.language is None
hq3 = (
await HQ.objects.select_all(follow=True)
.exclude_fields("nicks__level__language")
.get(name="Main")
)
assert len(hq3.nicks) == 2
assert hq3.nicks[0].level.language is None
assert hq3.nicks[1].level.language is None
await hq.load_all(follow=True, exclude="nicks__level__language__level")
assert len(hq.nicks) == 2
assert hq.nicks[0].level.language is not None
assert hq.nicks[0].level.language.level is None
assert hq.nicks[1].level.language is not None
assert hq.nicks[1].level.language.level is None
await hq.load_all(follow=True, exclude="nicks__level")
assert len(hq.nicks) == 2
assert hq.nicks[0].level is None
assert hq.nicks[1].level is None
await hq.load_all(follow=True, exclude="nicks")
assert len(hq.nicks) == 0
| 2.296875
| 2
|
pygcms/dispspec.py
|
dirkenstein/pyspec
| 0
|
12781305
|
<gh_stars>0
import sys
import operator
#from time import time
import time
import codecs
import re
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import pygcms.msfile.msfileread as mr
import pygcms.msfile.readspec as readspec
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import pygcms.calc.putil as putil
import peakutils
import numpy as np
import pandas
import scipy
import scipy.interpolate
import copy
from mol import skeleton
import os
class MainWindow(QMainWindow):
count = 0
MaxRecentFiles = 5
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self.mdi = QMdiArea()
self.setCentralWidget(self.mdi)
self.recentFileActs = []
for i in range(MainWindow.MaxRecentFiles):
self.recentFileActs.append(
QAction(self, visible=False,
triggered=self.recentfileaction))
bar = self.menuBar()
app = bar.addMenu("Application")
prefact = app.addAction("Preferences")
prefact.triggered.connect(self.preferences)
file = bar.addMenu("&File")
file.addAction("Load")
file.addAction("Save MSP")
file.addAction("Save Raw")
self.separatorAct = file.addSeparator()
for i in range(MainWindow.MaxRecentFiles):
file.addAction(self.recentFileActs[i])
file.addSeparator()
file.triggered[QAction].connect(self.fileaction)
self.updateRecentFileActions()
window = bar.addMenu("&Window")
#window.addAction("New")
window.addAction("cascade")
window.addAction("Tiled")
window.triggered[QAction].connect(self.windowaction)
self.create_status_bar()
#self.mdi.subWindowActivated.connect(self.updateMenus)
chrom = bar.addMenu("&Chromatogram")
chrom.addAction("Peak Find")
chrom.addAction("Baseline")
chrom.addAction("Autointegrate")
chrom.triggered[QAction].connect(self.doChromAction)
spec = bar.addMenu("Spectrum")
subact = spec.addAction("Subtract")
subact.triggered.connect(self.subtractAction)
nistact = spec.addAction("Launch NIST")
nistact.triggered.connect(self.launchNISTAction)
hlp = bar.addMenu("&Help")
aboutact = hlp.addAction("About")
aboutact.triggered.connect(self.on_about)
self.paths = QPathSettings()
self.colors = QColorSettings()
self.setWindowTitle("MSDisplay")
self.registers = []
self.running_subtract = False
def on_about(self):
msg = """
MSDisplay
* HP Chemstation .MS Reader:
*Reads .MS spectrum files
*Provides basic chromatogram analysis
* (C)2019 <NAME>
"""
QMessageBox.about(self, "About MSDisplay", msg.strip())
def doChromAction(self, q):
act = self.activeMdiChild()
if act is not None:
if q.text() == "Peak Find":
act.peak_detect()
elif q.text() == "Baseline":
act.dobaseline()
elif q.text() == "Autointegrate":
act.autointegrate()
def create_status_bar(self):
self.status_text = QLabel("MS File display")
self.progress = QProgressBar(self)
#self.progress.setGeometry(, 80, 250, 20)
self.statusBar().addWidget(self.status_text, 1)
self.statusBar().addWidget(self.progress, 2)
def windowaction(self, q):
print ("triggered")
if q.text() == "New":
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
sub.setWidget(QTextEdit())
sub.setWindowTitle("subwindow"+str(MainWindow.count))
self.mdi.addSubWindow(sub)
sub.show()
if q.text() == "cascade":
self.mdi.cascadeSubWindows()
if q.text() == "Tiled":
self.mdi.tileSubWindows()
def recentfileaction(self, q):
action = self.sender()
if action:
self.loadMSFile(action.data())
def fileaction(self, q):
print ("triggered: ", q)
if q.text() == "Load":
file_choices = "MS Files (*.ms);;All Files (*)"
path, choices = QFileDialog.getOpenFileName(self,
'Load file', '',
file_choices)
if path:
self.loadMSFile(path)
if q.text() == "Save MSP":
file_choices = "MSP Files (*.msp)"
path, choice = QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices)
if path:
self.saveMSPFile(path)
if q.text() == "Save Raw":
file_choices = "Raw Files (*.bin)"
path, choice = QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices)
if path:
self.saveRawFile(path)
def launchNISTAction(self):
self.launchNIST(True)
def launchNIST(self, bgnd):
win = self.getActiveTicArea()
self.tic_win = win
if win is not None:
nistpath = self.paths.getPath('nistpath')
#nistpath = '/Volumes/[C] Windows 7 1/NIST08/MSSEARCH/'
#filepath = 'C:\\tmp\\'
filepath = self.paths.getPath('filepath') #'C:\\MSREAD\\'
specfile = self.paths.getPath('specfile') #filespec.fil
usepath = self.paths.getPath('usepath') #/Users/dirk/.wine/drive_c/MSREAD/
#usepath = '/Volumes/[C] Windows 7 1/MSREAD/'
nistapp = self.paths.getPath('nistapp') #'nistms\$.exe'
#winecmd ='open /Applications/Wine\ Stable.app/ --args '
winecmd= self.paths.getPath('winecmd') #'/Applications/Wine\ Stable.app/Contents/Resources/wine/bin/wine '
readyfile = self.paths.getPath('readyfile')
resultfile = self.paths.getPath('resultfile')
fname = 'datafile%i.msp' % MainWindow.count
if bgnd:
if os.path.isfile(nistpath + readyfile):
os.remove(nistpath + readyfile)
if os.path.isfile(nistpath + resultfile):
os.remove(nistpath + resultfile)
self.saveMSPFile(usepath + fname)
inifile = open(nistpath + 'autoimp.msd', "w")
inifile.write(filepath + specfile + '\n')
inifile.close()
strfile = open (nistpath + 'autoimp.str', "w")
strfile.write ('"MS Read" "' + filepath+'MSREAD.BAT" "%1"\n')
strfile.close()
specf = open(usepath + 'filespec.fil', "w")
specf.write (filepath + fname + ' OVERWRITE\n')
specf.close()
if bgnd:
#time.sleep(1)
os.system(winecmd + nistpath + nistapp + ' /instrument /par=2')
self.file_timer = QTimer()
self.file_timer.setSingleShot(False)
self.file_timer.timeout.connect(self.onFileTimer)
self.file_timer.start(1000)
else:
os.system(winecmd + nistpath + nistapp + ' /INSTRUMENT')
def onFileTimer(self):
readyfile = self.paths.getPath('readyfile')
resultfile = self.paths.getPath('resultfile')
nistpath = self.paths.getPath('nistpath')
if os.path.isfile(nistpath + readyfile):
self.file_timer.stop()
srchfile = codecs.open(nistpath + resultfile, 'r', 'cp437') #'iso-8859-1'
te = []
for x in srchfile:
te.append(x)
ts = ''.join(te)
res = self.parseRes(ts)
self.tic_win.compoundsList(res)
def preferences(self):
#MainWindow.count = MainWindow.count+1
prefdlg = QPrefsDialog(self.paths, self.colors, self)
self.paths.setParent(prefdlg)
self.colors.setParent(prefdlg)
prefdlg.show()
def newTextWindow(self, t):
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
te = QTextEdit(t)
sub.setWidget(te)
sub.setWindowTitle("Search Results "+str(MainWindow.count))
self.mdi.addSubWindow(sub)
sub.show()
def parseRes(self, t):
rdi = {}
hhdi = []
for l in t.splitlines():
ls = l.strip()
if ls.startswith('Unknown:'):
if len(hhdi) > 0:
rdi.update({ cn : { 'LibFact':lf, 'Hits': hhdi }})
hhdi = []
libstr = 'Compound in Library Factor ='
p = ls.find(libstr)
cn = ls[9:p].strip()
#print(cn)
lf = int(ls[p+len(libstr):].strip())
#print (lf)
elif ls.startswith('Hit '):
hdi = {}
desc = ls[4:].split(';')
first = True
for t in desc:
vs = t.split(':')
if first:
first = False
hn = vs[0].strip()
hdi.update({'Hit' : hn})
n = 'Name'
v = vs[1].strip().strip('<>')
else:
if len(vs) == 1:
n = 'Formula'
v = vs[0].strip().strip('<>')
else:
n = vs[0].strip()
v = vs[1].strip().strip('<>')
hdi.update({n :v })
hhdi.append(hdi)
else:
pass
if len(hhdi) > 0:
rdi.update({ cn : { 'LibFact':lf, 'Hits': hhdi }})
return rdi
def getActiveTicArea(self):
act = self.activeMdiChild()
if isinstance(act, QPeaksWindow):
win = act.ticArea
elif isinstance(act, QTICArea):
win = act
else:
win = None
return win
def saveMSPFile(self, path):
act = self.activeMdiChild()
try:
if isinstance(act, QSpectrumArea):
f = open(path, "w");
act.spectrum.saveMsp(f, act.rt, "UNK-1")
f.close()
else:
win = self.getActiveTicArea()
if win is not None and win.maxima is not None:
f = open(path, "w");
anysel = False
for idx, m in win.maxima['retention_time'].iteritems():
if win.peakw is not None:
sel = win.peakw.table_model.isSelected(idx)
else:
sel, mkr, fil = win.sels[idx]
if sel:
win.runfile.setUseNew(win.subtract_cb.isChecked())
tic = win.runfile.nearest_tic(m)
s = win.runfile.getSpectra()
rt, spectrum=s[tic.index[0]]
spectrum.saveMsp(f, rt, "UNK-%i" % idx)
anysel = True
f.close()
if not anysel:
os.remove(path)
self.statusBar().showMessage('No maxima selected...', 4000)
else:
self.statusBar().showMessage('No maxima to save...', 4000)
except Exception as e:
self.statusBar().showMessage('Unable to save: ' + str(e), 4000)
def saveRawFile(self, path):
act = self.activeMdiChild()
try:
if isinstance(act, QSpectrumArea):
f = open(path, "wb");
act.spectrum.saveRaw(f)
f.close()
else:
self.statusBar().showMessage('No spectrum selected... ', 4000)
except Exception as e:
self.statusBar().showMessage('Unable to save: ' + str(e), 4000)
def loadMSFile(self, path):
#self.canvas.print_figure(path, dpi=self.dpi)
try:
theRun = mr.ReadMSFile(path)
self.statusBar().showMessage('Loaded %s' % path, 2000)
#self.on_draw()
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
submain = QTICArea(sub, self)
submain.setFile(theRun)
submain.setPath(path)
sub.setWidget(submain)
sub.setWindowTitle(str(MainWindow.count) + ": " + self.strippedName(path))
self.mdi.addSubWindow(sub)
sub.show()
self.setCurrentFile(path)
except Exception as e:
self.statusBar().showMessage('Unable to load: ' + str(e), 4000)
def activeMdiChild(self):
activeSubWindow = self.mdi.activeSubWindow()
if activeSubWindow:
return activeSubWindow.widget()
return None
def strippedName(self, fullFileName):
return QFileInfo(fullFileName).fileName()
def setCurrentFile(self, fileName):
self.curFile = fileName
#if self.curFile:
# self.setWindowTitle("%s - Recent Files" % self.strippedName(self.curFile))
#else:
# self.setWindowTitle("Recent Files")
settings = QSettings('Muppetastic', 'MSRead')
files = settings.value('recentFileList')
if files is None:
files = []
try:
files.remove(fileName)
except ValueError:
pass
files.insert(0, fileName)
del files[MainWindow.MaxRecentFiles:]
settings.setValue('recentFileList', files)
for widget in QApplication.topLevelWidgets():
if isinstance(widget, MainWindow):
widget.updateRecentFileActions()
def updateRecentFileActions(self):
settings = QSettings('Muppetastic', 'MSRead')
files = settings.value('recentFileList')
if files:
l = len(files)
else:
l = 0
numRecentFiles = min(l, MainWindow.MaxRecentFiles)
for i in range(numRecentFiles):
text = "&%d %s" % (i + 1, self.strippedName(files[i]))
self.recentFileActs[i].setText(text)
self.recentFileActs[i].setData(files[i])
self.recentFileActs[i].setVisible(True)
for j in range(numRecentFiles, MainWindow.MaxRecentFiles):
self.recentFileActs[j].setVisible(False)
self.separatorAct.setVisible((numRecentFiles > 0))
def subtractAction(self):
act = self.activeMdiChild()
if act is not None and not self.running_subtract:
if isinstance(act, QSpectrumArea) or isinstance(act, QTICArea):
self.registers.append(act)
if len(self.registers) == 2:
a1 = self.registers[0]
a2 = self.registers[1]
t1 = isinstance(a1, QSpectrumArea)
t2 = isinstance(a2, QSpectrumArea)
if t2:
t = subtractThread(a1, a2)
self.statusBar().showMessage('Subtracting..', 2000)
t.progress_update.connect(self.updateProgressBar)
t.subtract_done.connect(self.onSubtractComplete)
#t.scan_status.connect(self.showScanStatus)
self.running_subtract = True
t.start(priority=QThread.LowestPriority)
else:
self.statusBar().showMessage('Can''t subtract TIC from spectrum', 2000)
registers = []
else:
self.statusBar().showMessage("Added to registers")
def onSubtractComplete(self, res):
a1 = self.registers[0]
a2 = self.registers[1]
t1 = isinstance(a1, QSpectrumArea)
t2 = isinstance(a2, QSpectrumArea)
self.running_subtract = False
registers = []
if t1:
a1.launchSpectrumArea(res[0], a1.getRT(), ' Sub:' + str(a2.getRT()) + ': ')
def updateProgressBar(self, maxVal):
uv = self.progress.value() + maxVal
if maxVal == 0:
uv = 0
if uv > 100:
uv = 100
self.progress.setValue(uv)
class QTICArea(QWidget):
def __init__(self, parent = None, main=None):
super().__init__(parent)
self.dpi = 100
self.fig = Figure((6.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(parent)
self.axes = self.fig.add_subplot(111)
#self.plt = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
#self.canvas.mpl_connect('pick_event', self.on_pick)
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('motion_notify_event', self.on_mouse_move)
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(True)
self.grid_cb.stateChanged.connect(self.on_draw)
self.minima_cb = QCheckBox("Show &Minima")
self.minima_cb.setChecked(False)
self.minima_cb.stateChanged.connect(self.on_draw)
self.baseline_cb = QCheckBox("Show &Baseline")
self.baseline_cb.setChecked(True)
self.baseline_cb.stateChanged.connect(self.on_draw)
self.areas_cb = QCheckBox("Show &Areas")
self.areas_cb.setChecked(True)
self.areas_cb.stateChanged.connect(self.on_draw)
self.tic_cb = QCheckBox("Show Computed &TIC")
self.tic_cb.setChecked(False)
self.tic_cb.stateChanged.connect(self.on_draw)
self.subtract_cb = QCheckBox("Subtracted Spectra")
self.subtract_cb.setChecked(False)
self.subtract_cb.stateChanged.connect(self.on_draw)
self.subtract_cb.hide()
self.slider_label = QLabel('Baseline Degree :')
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(1, 10)
self.slider.setValue(6)
self.slider.setTracking(True)
self.slider.setTickInterval(1)
self.slider.setTickPosition(QSlider.TicksBothSides)
self.slider.valueChanged.connect(self.redo_baseline)
self.main = main
hbox = QHBoxLayout()
hbox.addWidget(self.grid_cb)
hbox.addWidget(self.minima_cb)
hbox.addWidget(self.baseline_cb)
hbox.addWidget(self.areas_cb)
hbox.addWidget(self.tic_cb)
hbox.addWidget(self.subtract_cb)
sbox = QHBoxLayout()
sbox.addWidget(self.slider_label)
sbox.addWidget(self.slider)
cbox = QVBoxLayout()
cbox.addLayout(hbox)
cbox.addLayout(sbox)
vbox = QVBoxLayout()
vbox.addWidget(self.mpl_toolbar)
vbox.addWidget(self.canvas)
vbox.addLayout(cbox)
self.setLayout(vbox)
self.anns = []
#self.setCentralWidget(self.main_frame)
self.maxima = None
self.base_abundance = None
self.peakw = None
self.ranges = None
self.path = ""
self.baseline =None
self.baseline_plt =None
self.baseline_order = 6
def redo_baseline(self):
self.baseline_order = self.slider.sliderPosition()
self.calc_baseline()
self.on_draw()
def on_click(self,event):
x = event.xdata
y = event.ydata
if event.dblclick:
tic = self.runfile.nearest_tic(x)
self.runfile.setUseNew(self.subtract_cb.isChecked())
s = self.runfile.getSpectra()
rt, spectrum=s[tic.index[0]]
self.launchSpectrumArea(spectrum, rt, '')
else:
pass
def launchSpectrumArea(self, spectrum, rt, special):
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
submain = QSpectrumArea(sub, self.main)
submain.setSpectrum(spectrum)
submain.setRT(rt)
sub.setWidget(submain)
sub.setWindowTitle(str(MainWindow.count) + ": " + special + "Spectrum at: " + str(rt) + " : " + self.main.strippedName(self.path))
self.main.mdi.addSubWindow(sub)
sub.show()
def on_mouse_move(self, event):
xe = event.xdata
if xe is not None:
#print('mouse x: ' + str(xe))
#self.axes.lines = [self.axes.lines[0]]
self.vline.set_data([xe, xe], [0,1] )
self.canvas.draw()
def draw_sel_peak(self, idx, dosel, flip, keep):
hcolor = self.main.colors.getColorFor('Highlight').name()
m = self.maxima.iloc[idx]
sel, mkr, fil = self.sels[idx]
if sel and flip:
selct = False
elif flip:
selct = True
elif keep:
selct = sel
else:
selct = dosel
#print(idx, sel, selct, dosel, flip, keep)
if mkr is not None:
mkr.remove()
mkr = None
if fil is not None:
fil.remove()
fil = None
if selct:
mkr = self.axes.scatter(m['retention_time'] , m['abundance'], color='red', marker='v')
if self.ranges is not None:
it, ie = self.ranges[idx]
#print("Sel it, ie ",it, ie)
i = self.runfile.getTic()
#print(i['retention_time'][it:ie])
#print(i['abundance'][it:ie])
#print(self.baseline['abundance'][it:ie])
fil = self.axes.fill_between(i['retention_time'][it:ie],i['abundance'][it:ie], self.baseline['abundance'][it:ie], color=hcolor)
self.sels[idx] = (selct, mkr, fil)
return selct
def draw_all_sel_peaks(self, clr, value, keep):
for idx, v in self.maxima.iterrows():
#print(idx)
if clr:
sel, mkr, fil = self.sels[idx]
self.sels[idx] = (sel, None, None)
self.draw_sel_peak(idx, value, False, keep)
def on_pick(self,event):
#print(event)
mkr = event.artist
#xdata = event.get_xdata()
#ydata = event.get_ydata()
ind = event.ind
#points = tuple(zip(xdata[ind], ydata[ind]))
#print('onpick points:', ind)
#mkr.set_color("red")
sel = self.draw_sel_peak(ind[0], False, True, False)
self.canvas.draw()
if self.peakw is not None:
self.peakw.table_model.doSelect(ind[0], sel)
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
#box_points = event.artist.get_bbox().get_points()
#msg = "You've clicked on a peak with coords:\n %s" % box_points
#QMessageBox.information(self, "Click!", msg)
def setFile(self, runfile):
self.runfile = runfile
self.on_draw()
def setPath(self, path):
self.path = path
def on_draw(self):
self.axes.clear()
self.baseline_plt = None
self.runfile.setComputed(self.tic_cb.isChecked())
self.runfile.setUseNew(self.subtract_cb.isChecked())
mr.ReadMSFile.axes(self.axes)
self.axes.grid(self.grid_cb.isChecked())
if self.runfile != None:
self.runfile.plot(self.axes)
if self.maxima is not None:
self.draw_peak_detect()
if self.base_abundance is not None:
self.draw_baseline()
if self.ranges is not None:
self.draw_integration()
if self.maxima is not None:
self.draw_all_sel_peaks(True, False, True)
self.vline = self.axes.axvline(x=self.runfile.getMinX(), color="k")
self.canvas.draw()
def peak_detect(self):
i = self.runfile.getTic()
self.maxima, self.minima = putil.PUtil.peaksfr(i, 'abundance','retention_time')
#self.maxima = pandas.DataFrame(np.array(maxtab), columns=['retention_time', 'abundance'])
#self.minima = pandas.DataFrame(np.array(mintab), columns=['retention_time', 'abundance'])
sels= []
for idx, r in self.maxima.iterrows():
sels.append((False, None, None))
self.sels = sels
self.draw_peak_detect()
self.canvas.draw()
def draw_peak_detect(self):
self.axes.scatter(self.maxima['retention_time'] , self.maxima['abundance'], color='blue', marker='v', picker=5)
if self.minima_cb.isChecked():
self.axes.scatter(self.minima['retention_time'] , self.minima['abundance'], color='green', marker='^')
anns = []
for idx, r in self.maxima.iterrows():
anns.append(self.axes.annotate('%i:%.2f' % ( idx+1, r['retention_time']), xy=(r['retention_time'] + 0.05, r['abundance'] + 10000)))
for a in self.anns:
a.remove()
self.anns = anns
def peaksList(self):
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
submain = QPeaksWindow( self.maxima, ["Sel", "RT", "Peak", "Area", "Norm"], self, sub, self.main)
sub.setWidget(submain)
sub.setWindowTitle(str(MainWindow.count) + ": Peak List: " + self.main.strippedName(self.path) )
self.main.mdi.addSubWindow(sub)
sub.show()
self.peakw = submain
def compoundsList(self, search_res):
MainWindow.count = MainWindow.count+1
sub = QMdiSubWindow()
submain = QCompoundsWindow(search_res, self, sub, self.main)
sub.setWidget(submain)
sub.setWindowTitle(str(MainWindow.count) + ": Compound List: " + self.main.strippedName(self.path) )
self.main.mdi.addSubWindow(sub)
sub.show()
self.compundw = submain
def dobaseline(self):
self.calc_baseline()
self.draw_baseline()
self.canvas.draw()
def calc_baseline(self):
i = self.runfile.getTic()
self.baseline = pandas.DataFrame()
self.baseline['retention_time'] = i['retention_time']
self.baseline['abundance'] = peakutils.baseline(i['abundance'], deg=self.baseline_order)
self.base_abundance = pandas.DataFrame()
self.base_abundance['retention_time'] = i['retention_time']
self.base_abundance['abundance'] = i['abundance'] - self.baseline['abundance']
def draw_baseline(self):
bcolor = self.main.colors.getColorFor('Baseline').name()
if self.baseline_plt is not None:
print(self.baseline_plt)
self.baseline_plt.remove()
if self.baseline_cb.isChecked():
self.baseline_plt, = self.axes.plot(self.baseline['retention_time'], self.baseline['abundance'], color=bcolor)
def autointegrate(self):
if self.maxima is None:
self.peak_detect()
if self.base_abundance is None:
self.dobaseline()
i = self.runfile.getTic()
ib = self.base_abundance
areas = []
ranges=[]
for m in self.maxima['retention_time']:
nearest = putil.PUtil.strad(self.minima, 'retention_time', m)
#print (nearest)
strt = nearest['retention_time'].min()
end = nearest['retention_time'].max()
#print("RTStr, end: ", strt, end)
istrt = putil.PUtil.nearest(i, 'retention_time', strt).iloc[0].name
iend = putil.PUtil.nearest(i, 'retention_time', end).iloc[0].name
if istrt == iend:
print ('0-width peak: ',m)
if m == self.maxima['retention_time'].iloc[-1]:
iend = i.iloc[-1].name
print ('last peak, fixing, iend now: ', iend)
elif m == self.maxima['retention_time'].iloc[0]:
istrt = i.iloc[0].name
print ('first peak, fixing, istrt now: ', istrt)
iend += 1 # the slicer needs one more
ranges.append((istrt, iend))
#print("Str, end: ", istrt, iend)
areas.append(scipy.integrate.trapz(ib['abundance'][istrt:iend], ib['retention_time'][istrt:iend]))
aread = pandas.DataFrame(np.array(areas), columns=['area'])
self.maxima['area'] = aread['area']
self.maxima['normalized_area'] = self.maxima['area'] / self.maxima['area'].max()
self.ranges = ranges
self.draw_integration()
self.draw_all_sel_peaks(False, False, True)
self.canvas.draw()
self.peaksList()
def draw_integration(self):
i = self.runfile.getTic()
n = 0
cls = ['yellow', 'cyan']
area1 = self.main.colors.getColorFor('Area1')
area2 = self.main.colors.getColorFor('Area2')
cls = [area1.name(), area2.name()]
if self.areas_cb.isChecked():
for it, ie in self.ranges:
#print ("it, ie ", it,ie)
self.axes.fill_between(i['retention_time'][it:ie],i['abundance'][it:ie], self.baseline['abundance'][it:ie], color=cls[n%2])
n += 1
anns = []
for idx, r in self.maxima.iterrows():
anns.append(self.axes.annotate('%i:%.3f' % (idx+1, r['normalized_area']), xy=(r['retention_time'] + 0.05, r['abundance'] + 10000)))
for a in self.anns:
a.remove()
self.anns = anns
def subtractBaselineSpec(self, specarea, progress=None):
print ('subtract ', self.runfile , " - ", specarea.spectrum )
spectra = self.runfile.getSpectra()
subspectra = []
np2 = specarea.spectrum.getSpectrum()['ions']
fn = scipy.interpolate.interp1d(np2['m/z'], np2['abundance'], kind='cubic', copy=True, bounds_error=False, fill_value=0, assume_sorted=False)
p = len(spectra)/100
for rt, spectrum in spectra:
np1 = spectrum.getSpectrum()['ions']
np3 = np1.copy()
np3['abundance'] = np3['abundance'] - np3['m/z'].apply(fn)
np3 = np3[np3['abundance']>0].sort_values(by='m/z', ascending=True)
newspec = copy.deepcopy(spectrum)
newspec.setSpectrumIons(np3)
subspectra.append((rt, newspec))
progress(p)
#QThread.yieldCurrentThread()
#QThread.msleep (50)
time.sleep(0)
self.runfile.setNewSpectra( subspectra)
self.subtract_cb.show()
return subspectra
class QSpectrumArea(QTICArea):
def __init__(self, parent = None, main=None):
super().__init__(parent, main)
self.minima_cb.hide()
self.baseline_cb.hide()
self.areas_cb.hide()
self.tic_cb.hide()
self.subtract_cb.hide()
def setSpectrum(self, spec):
self.spectrum = spec
self.on_draw()
def setRT(self, rt):
self.rt = rt
def getRT(self):
return self.rt
def on_draw(self):
self.axes.clear()
readspec.ReadSpec.axes(self.axes)
self.axes.grid(self.grid_cb.isChecked())
if self.spectrum != None:
self.spectrum.plot(self.axes)
self.canvas.draw()
self.vline = self.axes.axvline(x=self.spectrum.getMinX(), color="k")
def on_click(self,event):
x = event.xdata
y = event.ydata
msg = "You've clicked on a spectrum with coords:\n click=%s button=%d,\n x=%d, y=%d,\n xdata=%f, ydata=%f" % (
'double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata)
QMessageBox.information(self, "Click!", msg)
def peak_detect(self):
pass
def subtractBaselineSpec(self, specarea, progress=None):
print ('subtract ',self.spectrum, " - " , specarea.spectrum )
np2 = specarea.spectrum.getSpectrum()['ions']
np1 = self.spectrum.getSpectrum()['ions']
#np22 = np2.copy()
#np22['abundance'] = -np22['abundance']
#np3 = pandas.concat((np1, np22))
#print(np2)
fn = scipy.interpolate.interp1d(np2['m/z'], np2['abundance'], kind='cubic', copy=True, bounds_error=False, fill_value=0, assume_sorted=False)
np3 = np1.copy()
np3['abundance'] = np3['abundance'] - np3['m/z'].apply(fn)
progress(50)
#print(np22)
# np3 = np1
#np3=np3.sort_values(by='m/z', ascending=True).groupby('m/z').sum()
np3 = np3[np3['abundance']>0].sort_values(by='m/z', ascending=True)
subspec = copy.deepcopy(self.spectrum)
subspec.setSpectrumIons(np3)
progress(50)
return [subspec]
class QPeaksWindow(QWidget):
def __init__(self, peakList, header, ticArea, parent = None, main=None, *args):
super().__init__(parent, *args)
self.main = main
# setGeometry(x_pos, y_pos, width, height)
#self.setGeometry(70, 150, 1326, 582)
self.setWindowTitle("Peak List")
self.table_model = PeaksTableModel(self, peakList, header, ticArea)
self.table_view = QTableView()
#self.table_view.setSelectionMode(QAbstractItemView.SingleSelection)
self.table_view.setSelectionMode(QAbstractItemView.SingleSelection)
self.table_view.setSelectionBehavior(QAbstractItemView.SelectRows)
# bind cell click to a method reference
self.table_view.clicked.connect(self.showSelection)
self.table_view.clicked.connect(self.selectRow)
self.table_view.setModel(self.table_model)
# enable sorting
self.table_view.setSortingEnabled(True)
layout = QVBoxLayout(self)
layout.addWidget(self.table_view)
self.setLayout(layout)
#self.maxima = peakList
self.ticArea = ticArea
# def update_model(self, datalist, header):
# self.table_model2 = PeaksTableModel(self, dataList, header)
# self.table_view.setModel(self.table_model2)
# self.table_view.update()
def showSelection(self, item):
cellContent = item.data()
# print(cellContent) # test
sf = "You clicked on {}".format(cellContent)
# display in title bar for convenience
self.setWindowTitle(sf)
def selectRow(self, index):
# print("current row is %d", index.row())
pass
class PeaksTableModel(QAbstractTableModel):
"""
keep the method names
they are an integral part of the model
"""
def __init__(self, parent, peaks, header, tic, *args):
QAbstractTableModel.__init__(self, parent, *args)
self.peaks = peaks
self.header = header
self.ticArea = tic
#self.timer = QTimer()
self.change_flag = True
#self.timer.timeout.connect(self.updateModel)
#self.timer.start(1000)
self.checkboxes = []
self.isChecked = []
#print("Cbox init")
for idx, v in peaks.iterrows():
self.checkboxes.append(QCheckBox(""))
tf, mkr, fil = self.ticArea.sels[idx]
self.isChecked.append(tf)
#peaks['selected'] = False
# self.rowCheckStateMap = {}
def setPeaks(self, peaks):
self.peaks = peaks
self.layoutAboutToBeChanged.emit()
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
self.layoutChanged.emit()
#def updateModel(self):
# dataList2 = []
# self.change_flag = True
# self.peaks = dataList2
# self.layoutAboutToBeChanged.emit()
# self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
# self.layoutChanged.emit()
def rowCount(self, parent):
#print ("Rows: ", len(self.peaks))
return len(self.peaks)
def columnCount(self, parent):
#print ("Cols: ", len(self.peaks.iloc[0]))
return len(self.peaks.iloc[0])+1
def data(self, index, role):
if not index.isValid():
return None
if (index.column() == 0):
n = self.peaks.iloc[index.row()].name
self.checkboxes[index.row()].setChecked(self.isChecked[n])
self.checkboxes[index.row()].setText(str(n+1))
value = self.checkboxes[index.row()].text()
else:
fvalue = self.peaks.iloc[index.row()][index.column()-1]
if (index.column() == 1):
value = "%.2f" %fvalue
elif (index.column() == 2 or index.column() == 3):
value = "%.0f" % fvalue
else:
value = "%.3f" % fvalue
#print("Value: ",value, self.peaks.iloc[index.row()])
if role == Qt.EditRole:
return value
elif role == Qt.DisplayRole:
return value
elif role == Qt.CheckStateRole:
if index.column() == 0:
# print(">>> data() row,col = %d, %d" % (index.row(), index.column()))
if self.checkboxes[index.row()].isChecked():
return Qt.Checked
else:
return Qt.Unchecked
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.header[col]
return None
def sort(self, col, order):
"""sort table by given column number col"""
# print(">>> sort() col = ", col)
if col != 0:
self.layoutAboutToBeChanged.emit()
#self.mylist = sorted(self.mylist, key=operator.itemgetter(col))
if order == Qt.DescendingOrder:
asc=False
else:
asc=True
#print ("col: ", self.peaks.columns)
self.peaks = self.peaks.sort_values(by=self.peaks.columns[col-1], ascending=asc)
self.layoutChanged.emit()
def flags(self, index):
if not index.isValid():
return None
# print(">>> flags() index.column() = ", index.column())
if index.column() == 0:
# return Qt::ItemIsEnabled | Qt::ItemIsSelectable | Qt::ItemIsUserCheckable
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable
else:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def setData(self, index, value, role):
if not index.isValid():
return False
# print(">>> setData() role = ", role)
# print(">>> setData() index.column() = ", index.column())
# print(">>> setData() value = ", value)
if role == Qt.CheckStateRole and index.column() == 0:
print(">>> setData() role = ", role)
print(">>> setData() index.column() = ", index.column())
n = self.peaks.iloc[index.row()].name
if value == Qt.Checked:
self.checkboxes[index.row()].setChecked(True)
self.isChecked[n] = True
self.checkboxes[index.row()].setText(str(n+1))
# if studentInfos.size() > index.row():
# emit StudentInfoIsChecked(studentInfos[index.row()])
else:
self.checkboxes[index.row()].setChecked(False)
self.isChecked[n] = False
self.checkboxes[index.row()].setText(str(n+1))
self.ticArea.draw_sel_peak(n, self.isChecked[n], False, False)
self.ticArea.canvas.draw()
#self.checkboxes[index.row()].setText("U")
else:
print(">>> setData() role = ", role)
print(">>> setData() index.column() = ", index.column())
# self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
print(">>> setData() index.row = ", index.row())
print(">>> setData() index.column = ", index.column())
self.dataChanged.emit(index, index)
return True
def isSelected(self, row):
return self.isChecked[row]
def doSelect(self, row, state):
self.isChecked[row] = state
#self.dataChanged.emit(
n = self.peaks.index.get_loc(row)
self.checkboxes[n].setChecked(state)
self.dataChanged.emit(self.createIndex(n, 0), self.createIndex(n, 0))
def doSelectOnly(self, row, state):
for b in range(len(self.isChecked)):
self.isChecked[b] = not state
self.isChecked[row] = state
for b in range(len(self.isChecked)):
n = self.peaks.index.get_loc(b)
self.checkboxes[n].setChecked(state)
#self.dataChanged.emit(
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
#def timer_func(win, mylist):
# print(">>> timer_func()")
# win.table_model.setDataList(mylist)
# win.table_view.repaint()
# win.table_view.update()
# def timer_func(num):
# print(">>> timer_func() num = ", num)
class QCompoundsWindow(QWidget):
def __init__(self, compounds, ticArea, parent = None, main=None, *args):
super().__init__(parent, *args)
self.main = main
# setGeometry(x_pos, y_pos, width, height)
#self.setGeometry(70, 150, 1326, 582)
self.setWindowTitle("Compounds List")
self.peaks_dropdown = QComboBox()
self.cnames = list(compounds.keys())
idns = []
for k in self.cnames:
idx = int(k[4:])
idns.append(idx)
idt = "%i: " % (idx+1)
self.peaks_dropdown.addItem(idt + k)
self.idns = idns
self.peaks_dropdown.currentIndexChanged.connect(self.onDropdownChanged)
self.peak_info = QLabel('LibFact: ' + str(compounds[self.cnames[self.peaks_dropdown.currentIndex()]]['LibFact']))
self.table_model = CompoundsTableModel(self, compounds, self.cnames, ticArea)
self.table_view = QTableView()
#self.table_view.setSelectionMode(QAbstractItemView.SingleSelection)
self.table_view.setSelectionMode(QAbstractItemView.SingleSelection)
self.table_view.setSelectionBehavior(QAbstractItemView.SelectRows)
# bind cell click to a method reference
self.table_view.clicked.connect(self.showSelection)
self.table_view.clicked.connect(self.selectRow)
self.table_view.setModel(self.table_model)
# enable sorting
self.table_view.setSortingEnabled(True)
self.dpi = 100
self.molfig = Figure((2.0, 2.0), dpi=self.dpi)
self.molview = FigureCanvas(self.molfig)
self.molview.setParent(parent)
layout = QVBoxLayout(self)
layout.addWidget(self.peaks_dropdown)
layout.addWidget(self.peak_info)
layout.addWidget(self.table_view)
layout.addWidget(self.molview)
self.setLayout(layout)
#self.maxima = peakList
self.ticArea = ticArea
self.compounds = compounds
self.fileFindStart()
#def update_model(self, datalist, header):
# self.table_model2 = CompoundsTableModel(self, dataList, header)
# self.table_view.setModel(self.table_model2)
# self.table_view.update()
def fileFindStart(self):
self.file_timer = QTimer()
self.file_timer.setSingleShot(False)
self.file_timer.timeout.connect(self.onFileTimer)
self.file_timer.start(2000)
def onFileTimer(self):
usepath = self.main.paths.getPath('usepath') #/Users/dirk/.wine/drive_c/MSREAD/
molfile = self.main.paths.getPath('molfile')
molp = usepath + molfile
#molp = usepath + 'nistms.mol'
if os.path.isfile(molp):
#self.file_timer.stop()
#fig = self.molfig.add_subplot(111)
ax = self.molfig.add_subplot(111)
skeleton.draw_mol(self.molfig, ax, molp, 'cp437')
self.molview.draw()
os.remove(molp)
def onDropdownChanged(self):
p = self.peaks_dropdown.currentIndex()
self.peak_info.setText('LibFact: ' + str(self.compounds[self.cnames[self.peaks_dropdown.currentIndex()]]['LibFact']))
print (self.idns[p])
self.table_model.changePeak(p)
if self.ticArea.peakw:
self.ticArea.peakw.table_model.doSelectOnly(self.idns[p], True)
self.ticArea.draw_all_sel_peaks(False, False, False)
self.ticArea.draw_sel_peak(self.idns[p], True, False, False)
self.ticArea.canvas.draw()
def showSelection(self, item):
cellContent = item.data()
# print(cellContent) # test
sf = "You clicked on {}".format(cellContent)
# display in title bar for convenience
self.setWindowTitle(sf)
def selectRow(self, index):
# print("current row is %d", index.row())
pass
class CompoundsTableModel(QAbstractTableModel):
"""
keep the method names
they are an integral part of the model
"""
def __init__(self, parent, compounds, cnames, tic, *args):
QAbstractTableModel.__init__(self, parent, *args)
self.compounds = compounds
#self.header = header
self.ticArea = tic
self.selectedPeak = 0
#self.timer = QTimer()
self.change_flag = True
self.compound_names = cnames
#self.timer.timeout.connect(self.updateModel)
#self.timer.start(1000)
#self.checkboxes = []
#self.isChecked = []
#print("Cbox init")
#for idx, v in peaks.iterrows():
# self.checkboxes.append(QCheckBox(""))
# tf, mkr, fil = self.ticArea.sels[idx]
# self.isChecked.append(tf)
#peaks['selected'] = False
# self.rowCheckStateMap = {}
self.headings = list(self.compounds[self.peakName()]['Hits'][0].keys())
def peakName(self):
return self.compound_names[self.selectedPeak]
def setCompounds(self, compounds):
self.compounds = compounds
self.layoutAboutToBeChanged.emit()
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
self.layoutChanged.emit()
#def updateModel(self):
# dataList2 = []
# self.change_flag = True
# self.peaks = dataList2
# self.layoutAboutToBeChanged.emit()
# self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
# self.layoutChanged.emit()
def rowCount(self, parent):
#print ("Rows: ", len(self.peaks))
#print(self.compounds[self.peakName()]['Hits'])
#print(self.peakName())
l = len(self.compounds[self.peakName()]['Hits'])
#print("rc: ", l)
return l
def columnCount(self, parent):
#print ("Cols: ", len(self.peaks.iloc[0]))
#print(self.compounds[self.peakName()]['Hits'][0])
l = len(self.compounds[self.peakName()]['Hits'][0])
#print("cc: ", l)
return l
def data(self, index, role):
if not index.isValid():
return None
kn = list(self.compounds[self.peakName()]['Hits'][0].keys())[index.column()]
value = self.compounds[self.peakName()]['Hits'][index.row()][kn]
#print("Value: ",value, self.peaks.iloc[index.row()])
if role == Qt.EditRole:
return value
elif role == Qt.DisplayRole:
return value
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return list(self.compounds[self.peakName()]['Hits'][0].keys())[col]
return None
def sort(self, col, order):
"""sort table by given column number col"""
# print(">>> sort() col = ", col)
self.layoutAboutToBeChanged.emit()
#self.mylist = sorted(self.mylist, key=operator.itemgetter(col))
if order == Qt.DescendingOrder:
asc=False
else:
asc=True
kn=list(self.compounds[self.peakName()]['Hits'][0].keys())[col]
print ("col: ", col, kn)
self.compounds[self.peakName()]['Hits'] = sorted(self.compounds[self.peakName()]['Hits'], key=lambda x : x[kn], reverse=not asc)
self.layoutChanged.emit()
def flags(self, index):
if not index.isValid():
return None
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def changePeak(self, peak):
self.selectedPeak = peak
self.layoutAboutToBeChanged.emit()
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
self.layoutChanged.emit()
def setData(self, index, value, role):
if not index.isValid():
return False
# print(">>> setData() role = ", role)
# print(">>> setData() index.column() = ", index.column())
# print(">>> setData() value = ", value)
print(">>> setData() role = ", role)
print(">>> setData() index.column() = ", index.column())
# self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
print(">>> setData() index.row = ", index.row())
print(">>> setData() index.column = ", index.column())
self.dataChanged.emit(index, index)
return True
class QPathSettings(QWidget):
def __init__(self, parent = None, *args):
super().__init__(parent, *args)
group = QGroupBox("NIST Paths")
self.settings = QSettings('Muppetastic', 'MSRead')
spaths = self.settings.value('NISTPaths')
#spaths = None
if not spaths:
self.defaults()
self.settings.setValue('NISTPaths', self.paths)
else:
self.paths = spaths
self.editpaths = self.paths
self.parent = parent
n = 0
plist = []
pg = QGridLayout()
for k in self.paths.keys():
pl = QLabel (k)
pt = QLineEdit(self.paths[k])
pt.setMinimumWidth(200)
pg.addWidget(pl, n, 0)
pg.addWidget(pt, n, 1)
pt.editingFinished.connect(self.on_parm_update)
plist.append((k, pl, pt))
n += 1
group.setLayout(pg)
self.plist = plist
bbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Apply |
QDialogButtonBox.Cancel
)
bbox.accepted.connect(self.onOk)
bbox.rejected.connect(self.onCancel)
btn = bbox.button(QDialogButtonBox.Apply)
btn.clicked.connect(self.onApply)
layout = QVBoxLayout(self)
layout.addWidget(group)
layout.addWidget(bbox)
self.setLayout(layout)
def defaults(self):
self.paths = {'nistpath' : '/Users/dirk/.wine/drive_c/NIST08/MSSEARCH/',
#nistpath = '/Volumes/[C] Windows 7 1/NIST08/MSSEARCH/'
#filepath = 'C:\\tmp\\'
'filepath' : 'C:\\MSREAD\\',
'specfile' : 'filespec.fil',
'usepath' : '/Users/dirk/.wine/drive_c/MSREAD/',
#usepath = '/Volumes/[C] Windows 7 1/MSREAD/'
'nistapp' : 'nistms\$.exe',
#winecmd ='open /Applications/Wine\ Stable.app/ --args '
'winecmd' : '/Applications/Wine\ Stable.app/Contents/Resources/wine/bin/wine ',
'autoimpfile' : 'autoimp.msd',
'readyfile' : 'SRCREADY.TXT',
'resultfile' : 'SRCRESLT.TXT',
'molfile' : 'nistms.mol'}
def getPath(self, name):
return self.paths[name]
def on_parm_update(self):
for k, wtl, wtb in self.plist:
self.editpaths[k] = wtb.text()
def onApply(self):
self.paths = self.editpaths
self.settings.setValue('NISTPaths', self.paths)
def onOk(self):
self.paths = self.editpaths
self.settings.setValue('NISTPaths', self.paths)
self.parent.close()
def onCancel(self):
self.parent.close()
def setParent(self, parent):
self.parent = parent
class QColorSettings(QWidget):
def __init__(self, parent = None, *args):
super().__init__(parent, *args)
group = QGroupBox("Colors")
self.settings = QSettings('Muppetastic', 'MSRead')
scolors = self.settings.value('Colors')
if not scolors:
self.defaults()
self.settings.setValue('Colors', self.colors)
else:
self.colors = scolors
self.editcolors = self.colors
self.parent = parent
box = QVBoxLayout()
tcbox = QGridLayout()
n = 0
self.plist = []
for c in self.colors.keys():
coln = QLabel (c)
btn = QColorButton(self.colors[c])
btn.colorChanged.connect(self.onColorChanged)
tcbox.addWidget(coln, n, 0)
tcbox.addWidget(btn, n, 1)
self.plist.append((c, coln, btn))
n+= 1
group.setLayout(tcbox)
bbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Apply |
QDialogButtonBox.Cancel
)
bbox.accepted.connect(self.onOk)
bbox.rejected.connect(self.onCancel)
btn = bbox.button(QDialogButtonBox.Apply)
btn.clicked.connect(self.onApply)
box.addWidget(group)
box.addWidget(bbox)
self.setLayout(box)
def defaults(self):
self.colors = { "Baseline" : QColor("red"),
"Highlight" : QColor("red"),
"Area1" : QColor("cyan"),
"Area2" : QColor("yellow") }
def getColorFor(self, name):
return self.colors[name]
def onColorChanged(self):
for k, wtl, wtb in self.plist:
self.editcolors[k] = QColor(wtb.color())
def onApply(self):
self.colors = self.editcolors
self.settings.setValue('Colors', self.colors)
def onOk(self):
self.colors = self.editcolors
self.settings.setValue('Colors', self.colors)
self.parent.close()
def onCancel(self):
self.parent.close()
def setParent(self, parent):
self.parent = parent
class QPrefsDialog(QDialog):
def __init__(self, paths, colors, parent = None):
super().__init__(parent)
self.setWindowTitle("Preferences")
box = QVBoxLayout()
self.tabs = QTabWidget()
self.colors = colors
self.tabs.addTab(paths, "NIST Paths")
self.tabs.addTab(colors, "Colors")
box.addWidget(self.tabs)
self.setLayout(box)
class subtractThread(QThread):
progress_update = pyqtSignal(int)
subtract_done = pyqtSignal(list)
subtract_status = pyqtSignal(bool, str)
def __init__(self, a1, a2):
QThread.__init__(self)
self.a1 = a1
self.a2 = a2
def __del__(self):
self.wait()
def run(self):
try:
res = self.a1.subtractBaselineSpec(self.a2,self.progress_update.emit)
#self.subtract_status.emit(True, 'Complete')
self.subtract_done.emit(res)
except Exception as e:
print ('Exc ' + str(e))
#self.scan_status.emit(False, str(e))
class QColorButton(QPushButton):
'''
Custom Qt Widget to show a chosen color.
Left-clicking the button shows the color-chooser, while
right-clicking resets the color to None (no-color).
'''
colorChanged = pyqtSignal()
def __init__(self, color = None, *args, **kwargs):
super().__init__(*args, **kwargs)
if color:
self._color = color.name()
self.setMaximumWidth(32)
self.pressed.connect(self.onColorPicker)
if self._color:
self.setStyleSheet("background-color: %s;" % self._color)
else:
self.setStyleSheet("")
def setColor(self, color):
if color != self._color:
self._color = color
self.colorChanged.emit()
if self._color:
self.setStyleSheet("background-color: %s;" % self._color)
else:
self.setStyleSheet("")
def color(self):
return self._color
def onColorPicker(self):
'''
Show color-picker dialog to select color.
Qt will use the native dialog by default.
'''
dlg = QColorDialog(self)
if self._color:
dlg.setCurrentColor(QColor(self._color))
if dlg.exec_():
self.setColor(dlg.currentColor().name())
def mousePressEvent(self, e):
if e.button() == Qt.RightButton:
self.setColor(None)
return super(QColorButton, self).mousePressEvent(e)
def main():
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.0625
| 2
|
constants.py
|
LaudateCorpus1/file-extensions
| 15
|
12781306
|
'''
Contains package-wide constants.
'''
BASE_URL = 'https://fileinfo.com/filetypes/'
FILE_TYPES = {
'Text': {
'url': 'text',
'path': 'Documents/Text' ,
},
'Data': {
'url': 'datafiles-all',
'path': 'Data',
},
'Audio': {
'url': 'audio-all',
'path': 'Music',
},
'Video': {
'url': 'video',
'path': 'Videos',
},
'eBook': {
'url': 'ebook',
'path': 'eBooks',
},
'3D image': {
'url': '3d_image',
'path': 'Images/3D',
},
'Raster image': {
'url': 'raster_image',
'path': 'Images/Raster',
},
'Vector image': {
'url': 'vector_image',
'path': 'Images/Vector',
},
'Raw camera image': {
'url': 'camera_raw',
'path': 'Images/Raw Camera',
},
'Page layout': {
'url': 'page_layout',
'path': 'Documents/Layouts',
},
'Spreadsheet': {
'url': 'spreadsheet',
'path': 'Documents/Spreadsheets',
},
'Database': {
'url': 'database',
'path': 'Databases',
},
'Executable': {
'url': 'executable',
'path': 'Executables',
},
'Game file': {
'url': 'game-all',
'path': 'Game Files',
},
'CAD': {
'url': 'cad',
'path': 'CAD',
},
'GIS': {
'url': 'gis',
'path': 'GIS',
},
'Web': {
'url': 'web',
'path': 'Web',
},
'Plugin': {
'url': 'plugin',
'path': 'Plugins',
},
'Font': {
'url': 'font',
'path': 'Fonts',
},
'System': {
'url': 'system',
'path': 'System Files',
},
'Settings': {
'url': 'settings-all',
'path': 'Settings',
},
'Encoded': {
'url': 'encoded',
'path': 'Misc/Encoded',
},
'Compressed': {
'url': 'compressed',
'path': 'Archives',
},
'Disk image': {
'url': 'disk_image',
'path': 'Disk Images',
},
'Code': {
'url': 'developer-all',
'path': 'Code',
},
'Backup': {
'url': 'backup',
'path': 'Backups',
},
'Misc': {
'url': 'misc-all',
'path': 'Misc',
},
}
| 1.445313
| 1
|
2015/day17.py
|
natedileas/advent-of-code
| 0
|
12781307
|
import itertools as it
TEST1 = """
20
15
10
5
5
"""
INPUT = open('input17.txt').read()
# def count_ways(containers, total=150):
# ways = 0
# containers = sorted(containers, reverse=True)
# def count(containers, used, stack=0):
# print(containers, used, stack)
# for i in range(len(containers)):
# c = containers.pop(0)
# used.append(c)
# if sum(used) == total:
# ways += 1
# used.pop()
# print(containers, used, stack)
# return
# elif sum(used) < total:
# count(containers, used, stack=stack+1)
# elif sum(used) > total:
# containers.append(used.pop())
# print(containers, used, stack)
# return
# count(containers, [])
# return ways
def count_ways(containers, volume):
return sum((1 for c in range(2, len(containers)) for i in it.combinations(containers, c) if sum(i) == volume))
def find_min_ways(containers, volume):
for c in range(2, len(containers)):
ways = 0
for i in it.combinations(containers, c):
if sum(i) == volume:
ways += 1
if ways > 0:
return c, ways
print(count_ways([int(l) for l in TEST1.splitlines() if l], 25), 25)
print(count_ways([int(l) for l in INPUT.splitlines() if l], 150), 150)
print(find_min_ways([int(l) for l in INPUT.splitlines() if l], 150), 150)
| 3.375
| 3
|
docs/.src/programs/retrograde_mars.py
|
astrax/astro2019
| 0
|
12781308
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 23 21:14:48 2018
@author: ahmed
"""
# IMPORTATION
from pylab import *
#plt.style.use('dark_background')
#plt.style.use('ggplot')
import ephem as ep
# deux fonctions supplémentaires du module datetime sont nécessaires
from datetime import datetime , timedelta
# OBSERVATEUR
obs = ep.Observer()
# COORDONNÉES DE TUNIS
obs.lon, obs.lat, obs.elev = '10.08', '36.4', 100.0
obs.name = "SAT-TUNIS"
# MARS
mr = ep.Mars()
plt.figure(figsize=(10, 5))
for i in range (0 , 181):
# nous changeons la date d'un jour pendant six mois
dt = datetime (2018, 5, 1) + timedelta(i)
ds = "%d/%02d/%02d"%(dt.year, dt.month, dt.day)
print(" jour de l'année: ", i +1 , ds)
# fixer la date de l'observateur et calculer les coordonnées
obs.date = ds
mr.compute(obs)
ra = degrees(float(repr(mr.ra)))
de = degrees(float(repr(mr.dec)))
# on dessine des objets
plot([ra], [de], c = "red", marker = "o", alpha =.5)
# nous ajoutons une description de la date en moyenne tous les 10 jours
if (dt.day % 10) == 0: text(ra, de, ds, fontsize =8)
# conversion RA donné en degrés
# sur les formats heure, minute et seconde
def RAd2hms (x, loc):
h = x//15
m = int(((x - h * 15.0) / 15.0) * 60.0)
s = ((x - h *15 - m / 4.0) / 15.0) * 3600.0
return "%02dh%02dm%02ds"%(h, m, s)
# changement de déclinaison donné en degrés
# le format du degré, minute, second arc
def DEd2dms (x , loc ):
d = int(fabs(x))
m = int((fabs(x) - d)*60)
s = (fabs(x) - d - m /60.0)*3600.0
if x <0: d = -1 * d
return " %02dd%02dm%02ds"%(d, m, s)
# description du graphique
xlabel("ascension droite " + r"$\alpha$")
gca().xaxis.set_major_formatter(FuncFormatter(RAd2hms))
ylabel(" déclinaison " + r"$\delta$")
gca().yaxis.set_major_formatter(FuncFormatter(DEd2dms))
title("Mouvement retrograde de Mars - 6 mois en 2018 \n"+obs.name, fontweight='bold')
savefig("../figs/retrogradeMars.pdf"); savefig("../figs/retrogradeMars.png")
show()
| 3.15625
| 3
|
isles/api/pingdom.py
|
michael-ranieri/Misty
| 1
|
12781309
|
<gh_stars>1-10
#!/usr/bin/env python
# Copyright (C) 2011 <NAME> <<EMAIL>.d.<EMAIL> at gmail.com>
# System imports
import urllib2
import json
import base64
# Misty imports
import settings_local as settings
def allChecks(iterable):
for i in iterable:
if i['status'] != 'up':
return False
return True
def main():
pingBaseURL = 'https://api.pingdom.com'
AUTH = base64.b64encode(settings.PINGDOM_USER + ':' + settings.PINGDOM_PASSWORD)
req = urllib2.Request(pingBaseURL + '/api/2.0/checks')
req.add_header("App-Key", settings.PINGDOM_KEY)
req.add_header("Authorization", 'Basic ' + AUTH)
response = urllib2.urlopen(req)
msg = json.loads(response.read())
checks = []
for check in msg['checks']:
checks.append({
'name' : check['name'],
'status' : check['status']
})
if allChecks(checks):
print "Pingdom reports that all %s checks are good!" % len(checks)
sys.exit(0)
else:
for check in checks:
if check['status'] == 'down':
print check['name'] + ' || ' + check['status']
if __name__ == '__main__':
main()
| 2.734375
| 3
|
checkmate/contrib/plugins/git/test/__init__.py
|
marcinguy/checkmate-ce
| 80
|
12781310
|
import os
import tempfile
import pytest
import subprocess
TEST_DIRECTORY = os.path.abspath(__file__+"/../")
DATA_DIRECTORY = os.path.join(TEST_DIRECTORY,"data")
GIT_TEST_REPOSITORY = DATA_DIRECTORY + "/test_repository/d3py.tar.gz"
| 1.578125
| 2
|
boy/life_class.py
|
Valden92/Boy-and-boll-TheGame
| 1
|
12781311
|
import pygame
from pygame.sprite import Sprite
class BoyLife(Sprite):
def __init__(self):
"""Инициализирует графическое отображение жизней."""
super().__init__()
self.image = pygame.image.load('img/heart.png')
self.width = self.image.get_width()
self.height = self.image.get_height()
self.image = pygame.transform.scale(self.image, (self.width // 30, self.height // 30))
self.rect = self.image.get_rect()
| 3.28125
| 3
|
isitpublic/app/routes.py
|
tmidi/flask-isitpublic
| 0
|
12781312
|
<filename>isitpublic/app/routes.py
from flask import render_template, request
from forms import IpForm
from functions import is_valid_ipv4_address, is_valid_ipv6_address, netmask
from app import app
@app.route('/', methods=['POST', 'GET'])
def index():
form = IpForm()
if request.method == "POST" and form.validate():
form = IpForm()
ip = form.address.data
if '/' in ip:
return render_template("index.html",
form=form,
result=netmask(ip),
ip=ip)
elif ':' in ip:
return render_template('index.html', form=form,
result=is_valid_ipv6_address(ip),
ip=ip)
else:
return render_template('index.html', form=form,
result=is_valid_ipv4_address(ip),
ip=ip)
else:
return render_template("index.html", form=form)
| 2.921875
| 3
|
features/steps/log-scale-axes.py
|
eaton-lab/toyplot
| 438
|
12781313
|
<reponame>eaton-lab/toyplot<filename>features/steps/log-scale-axes.py<gh_stars>100-1000
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import nose
import numpy
import toyplot.data
@given(u'values from -1000 to -1')
def step_impl(context):
context.x = numpy.linspace(-1000, -1, 100)
@given(u'values from -1000 to -0.01')
def step_impl(context):
context.x = numpy.linspace(-1000, -0.01, 100)
@given(u'values from -1000 to 0')
def step_impl(context):
context.x = numpy.linspace(-1000, 0, 100)
@given(u'values from -1000 to 0.5')
def step_impl(context):
context.x = numpy.linspace(-1000, 0.5, 100)
@given(u'values from -0.5 to 1000')
def step_impl(context):
context.x = numpy.linspace(-0.5, 1000, 100)
@given(u'values from 0 to 1000')
def step_impl(context):
context.x = numpy.linspace(0, 1000, 100)
@given(u'values from 0.01 to 1000')
def step_impl(context):
context.x = numpy.linspace(0.01, 1000, 100)
@given(u'values from 1 to 1000')
def step_impl(context):
context.x = numpy.linspace(1, 1000, 100)
@given(u'values from -1000 to 1000')
def step_impl(context):
context.x = numpy.linspace(-1000, 1000, 100)
@given(u'log 10 axes on x and y')
def step_impl(context):
context.axes = context.canvas.cartesian(xscale="log10", yscale="log10")
@given(u'log 2 axes on x and y')
def step_impl(context):
context.axes = context.canvas.cartesian(xscale="log2", yscale="log2")
@given(u'log 10 axes on x and y with custom format')
def step_impl(context):
context.axes = context.canvas.cartesian(xscale="log10", yscale="log10")
context.axes.x.ticks.locator = toyplot.locator.Log(base=10, format="{base}^{exponent}")
context.axes.y.ticks.locator = toyplot.locator.Log(base=10, format="{base}^{exponent}")
@when(u'plotting x, x with markers')
def step_impl(context):
context.axes.plot(context.x, context.x, marker="o")
@given(u'squared values from 0 to 10')
def step_impl(context):
context.values = numpy.linspace(0, 10) ** 2
@given(u'squared values from -10 to 0')
def step_impl(context):
context.values = -(numpy.linspace(10, 0) ** 2)
@given(u'log 10 axes on y with domain min 10')
def step_impl(context):
context.axes = context.canvas.cartesian(yscale="log10")
context.axes.y.domain.min = 10
@given(u'log 10 axes on y with domain max -10')
def step_impl(context):
context.axes = context.canvas.cartesian(yscale="log10")
context.axes.y.domain.max = -10
@when(u'plotting the values with bars')
def step_impl(context):
context.axes.bars(context.values)
| 1.820313
| 2
|
file.py
|
iamchess/asdf
| 1
|
12781314
|
import pprint
import random
chessBoard = [[0 for j in range(8)] for i in range(8)]
chessBoard[0][0] = "R"
pprint.pprint(chessBoard)
#rook
def move():
x = 0
y = 0
getPosition = [0,0]
chessBoard[0][0] = 0
if random.uniform(0, 2) < 1:
x = int(random.uniform(0, 7))
else:
y = int(random.uniform(0, 7))
newPosition = (x,y)
chessBoard[x][y] = "R"
pprint.pprint(chessBoard)
move()
| 3.671875
| 4
|
mailchimp_marketing_asyncio/models/inline_response2001_exports.py
|
john-parton/mailchimp-asyncio
| 0
|
12781315
|
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineResponse2001Exports(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'export_id': 'int',
'started': 'datetime',
'finished': 'datetime',
'size_in_bytes': 'int',
'download_url': 'str',
'links': 'list[ResourceLink]'
}
attribute_map = {
'export_id': 'export_id',
'started': 'started',
'finished': 'finished',
'size_in_bytes': 'size_in_bytes',
'download_url': 'download_url',
'links': '_links'
}
def __init__(self, export_id=None, started=None, finished=None, size_in_bytes=None, download_url=None, links=None): # noqa: E501
"""InlineResponse2001Exports - a model defined in Swagger""" # noqa: E501
self._export_id = None
self._started = None
self._finished = None
self._size_in_bytes = None
self._download_url = None
self._links = None
self.discriminator = None
if export_id is not None:
self.export_id = export_id
if started is not None:
self.started = started
if finished is not None:
self.finished = finished
if size_in_bytes is not None:
self.size_in_bytes = size_in_bytes
if download_url is not None:
self.download_url = download_url
if links is not None:
self.links = links
@property
def export_id(self):
"""Gets the export_id of this InlineResponse2001Exports. # noqa: E501
The ID for the export. # noqa: E501
:return: The export_id of this InlineResponse2001Exports. # noqa: E501
:rtype: int
"""
return self._export_id
@export_id.setter
def export_id(self, export_id):
"""Sets the export_id of this InlineResponse2001Exports.
The ID for the export. # noqa: E501
:param export_id: The export_id of this InlineResponse2001Exports. # noqa: E501
:type: int
"""
self._export_id = export_id
@property
def started(self):
"""Gets the started of this InlineResponse2001Exports. # noqa: E501
Start time for the export. # noqa: E501
:return: The started of this InlineResponse2001Exports. # noqa: E501
:rtype: datetime
"""
return self._started
@started.setter
def started(self, started):
"""Sets the started of this InlineResponse2001Exports.
Start time for the export. # noqa: E501
:param started: The started of this InlineResponse2001Exports. # noqa: E501
:type: datetime
"""
self._started = started
@property
def finished(self):
"""Gets the finished of this InlineResponse2001Exports. # noqa: E501
If finished, the finish time for the export. # noqa: E501
:return: The finished of this InlineResponse2001Exports. # noqa: E501
:rtype: datetime
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this InlineResponse2001Exports.
If finished, the finish time for the export. # noqa: E501
:param finished: The finished of this InlineResponse2001Exports. # noqa: E501
:type: datetime
"""
self._finished = finished
@property
def size_in_bytes(self):
"""Gets the size_in_bytes of this InlineResponse2001Exports. # noqa: E501
The size of the uncompressed export in bytes. # noqa: E501
:return: The size_in_bytes of this InlineResponse2001Exports. # noqa: E501
:rtype: int
"""
return self._size_in_bytes
@size_in_bytes.setter
def size_in_bytes(self, size_in_bytes):
"""Sets the size_in_bytes of this InlineResponse2001Exports.
The size of the uncompressed export in bytes. # noqa: E501
:param size_in_bytes: The size_in_bytes of this InlineResponse2001Exports. # noqa: E501
:type: int
"""
self._size_in_bytes = size_in_bytes
@property
def download_url(self):
"""Gets the download_url of this InlineResponse2001Exports. # noqa: E501
If the export is finished, the download URL for an export. URLs are only valid for 90 days after the export completes. # noqa: E501
:return: The download_url of this InlineResponse2001Exports. # noqa: E501
:rtype: str
"""
return self._download_url
@download_url.setter
def download_url(self, download_url):
"""Sets the download_url of this InlineResponse2001Exports.
If the export is finished, the download URL for an export. URLs are only valid for 90 days after the export completes. # noqa: E501
:param download_url: The download_url of this InlineResponse2001Exports. # noqa: E501
:type: str
"""
self._download_url = download_url
@property
def links(self):
"""Gets the links of this InlineResponse2001Exports. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this InlineResponse2001Exports. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this InlineResponse2001Exports.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this InlineResponse2001Exports. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineResponse2001Exports, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse2001Exports):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.71875
| 2
|
stream/generate/kafkaProd.py
|
sanjeevkanabargi/python
| 0
|
12781316
|
<gh_stars>0
#!/usr/bin/env python
import threading, logging, time
import multiprocessing
import sys
from kafka import KafkaConsumer, KafkaProducer
topic = "cfwTopic"
def main():
producer = KafkaProducer(bootstrap_servers='localhost:9092')
filepath = str(sys.argv[1])
with open(filepath) as fp:
for line in fp:
producer.send(topic, line.encode('utf-8'))
producer.close()
if __name__ == "__main__":
logging.basicConfig(
format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',
level=logging.INFO
)
main()
| 2.53125
| 3
|
zeeko/telemetry/tests/test_writer.py
|
alexrudy/Zeeko
| 2
|
12781317
|
import pytest
import numpy as np
import zmq
import h5py
import struct
import itertools
from .. import Writer
from .. import chunk_api
from ...messages import array as array_api
from .conftest import assert_chunk_allclose, assert_h5py_allclose
from zeeko.conftest import assert_canrecv
from ...tests.test_helpers import ZeekoTestBase, ZeekoMappingTests, OrderedDict
from ...messages.tests.test_receiver import ReceiverTests, ReceiverTestBase
@pytest.fixture
def notify(address2, context):
"""Notification socket"""
s = context.socket(zmq.PUSH)
s.bind(address2)
with s:
yield s
@pytest.fixture
def rnotify(address2, context, notify):
"""Recieve notifications."""
s = context.socket(zmq.PULL)
s.connect(address2)
with s:
yield s
@pytest.fixture
def n():
"""Number of arrays to publish."""
return 3
@pytest.fixture
def metadata_callback():
"""Return a metadata callback."""
def callback():
return {'meta':'data', 'n':5}
return callback
def test_writer_construction(filename):
"""Test construction"""
w = Writer(filename)
class WriterTestsBase(ReceiverTestBase):
"""Base class items for Writers."""
pytestmark = pytest.mark.usefixtures("rnotify")
cls = Writer
@pytest.fixture
def arrays(self, n, name, chunk_array, chunk_mask):
"""Return a list of chunks"""
cs = OrderedDict()
for i in range(n):
c = chunk_api.PyChunk("{0:s}{1:d}".format(name, i), np.random.randn(*chunk_array.shape), chunk_mask)
cs[c.name] = c
return cs
@pytest.fixture
def receiver(self, filename, metadata_callback):
"""The receiver object"""
obj = self.cls()
obj.metadata_callback = metadata_callback
with h5py.File(filename) as obj.file:
yield obj
@pytest.fixture
def writer(self, receiver):
"""Return a receiver"""
return receiver
def send_arrays(self, socket, arrays, framecount):
"""Send arrays."""
assert socket.poll(timeout=100, flags=zmq.POLLOUT)
array_api.send_array_packet_header(socket, "arrays", len(arrays), framecount, flags=zmq.SNDMORE)
chunks = list(arrays.values())
for chunk in chunks[:-1]:
chunk.send(socket, flags=zmq.SNDMORE)
chunks[-1].send(socket)
def recv_arrays(self, receiver, socket, arrays, flags=zmq.NOBLOCK):
"""Wrapper around receiving arrays."""
assert_canrecv(socket)
receiver.receive(socket, flags=flags)
for key in arrays:
assert receiver.event(key).is_set()
assert len(receiver) == len(arrays)
def send_unbundled_arrays(self, socket, arrays):
"""Send arrays as individual messages."""
array_api.send_array_packet_header(socket, "arrays", len(arrays), flags=zmq.SNDMORE)
chunks = list(arrays.values())
for chunk in chunks[:-1]:
chunk.send(socket, flags=zmq.SNDMORE)
chunks[-1].send(socket)
def recv_unbundled_arrays(self, receiver, socket, arrays, flags=zmq.NOBLOCK):
"""Receive unbundled arrays"""
count = 0
while socket.poll(timeout=100, flags=zmq.POLLIN):
assert_canrecv(socket)
receiver.receive(socket, flags=flags)
count += 1
for key in arrays:
assert receiver.event(key).is_set()
# assert count == len(arrays)
def make_modified_arrays(self, arrays):
"""Make modified arrays."""
return OrderedDict((cs.name, chunk_api.PyChunk(cs.name, cs.array * 2.0, cs.mask)) for cs in arrays.values())
def assert_receiver_arrays_allclose(self, receiver, arrays):
"""Assert receiver and arrays are all close."""
assert len(receiver) == len(arrays)
assert set(receiver.keys()) == set(arrays.keys())
for i, key in enumerate(receiver):
chunk = receiver[key]
assert_chunk_allclose(chunk, arrays[key])
class TestWriter(ReceiverTests, WriterTestsBase):
"""Test case for recorders."""
pass
class TestWriterMapping(ZeekoMappingTests, WriterTestsBase):
"""Test recorder behavior as a mapping."""
cls = Writer
@pytest.fixture
def mapping(self, chunksize, push, pull, arrays, framecount, filename, metadata_callback):
"""A client, set up for use as a mapping."""
obj = self.cls()
obj.metadata_callback = metadata_callback
with h5py.File(filename) as obj.file:
self.send_arrays(push, arrays, framecount)
self.recv_arrays(obj, pull, arrays)
yield obj
@pytest.fixture
def keys(self, arrays):
"""Return keys which should be availalbe."""
return arrays.keys()
| 1.929688
| 2
|
calibration.py
|
ladsantos/phoenix_pipeline
| 0
|
12781318
|
<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import os
from pathlib import Path
from astropy.nddata import CCDData
import ccdproc as ccdp
from astropy.stats import mad_std
import utils as utl
import astropy.units as u
import shutil
#-----------------------------------
#
#---Function to calibrate images----
#
#-----------------------------------
def calibrate_images(x_d, x_f, x_s, it_s = 'object', x_b = '', ):
"""
Parameters
----------
x_b : str
Path of the bias files.
By default bias is not provided
x_d : str
Path of the dark files
x_f : str
Path of flat files
x_s : str
Path of Science files
it_s : str
Imagetyp of fits science file
Default is set to object
-----------
"""
path_b = Path(x_b)
path_d = Path(x_d)
path_f = Path(x_f)
path_s = Path(x_s)
#-----Bias
if x_b == '' and path_b == Path(''):
print(' ')
print('Be aware: You did not provide the Bias files; the process will still continue though.')
print(' ')
files_b = None
elif not path_b.is_dir():
raise RuntimeError('The path you provided for the Bias files does not exist.')
else:
files_b = ccdp.ImageFileCollection(path_b)
#-----Dark
if x_d == '' or not path_d.is_dir():
raise RuntimeError('You must provide Dark files for processing.\n Or the path you provided does not exist.')
else:
files_d = ccdp.ImageFileCollection(path_d)
#-----Flat
if x_f == '' or not path_f.is_dir():
raise RuntimeError('You must provide Flatfield files for processing.\n Or the path you provided does not exist.')
else:
files_f = ccdp.ImageFileCollection(path_f)
#-----Science
if x_s == '' or not path_s.is_dir():
raise RuntimeError('You must provide Science images for processing.\n Or the path you provided does not exist.')
else:
files_s = ccdp.ImageFileCollection(path_s)
#-----------------------------------
#
#--------Calibrating Images---------
#
#-----------------------------------
if files_b is not None:
#-------------------------------
#------Creating Master-bias-----
#-------------------------------
cali_bias_path = Path(path_b / 'cali_bias')
cali_bias_path.mkdir(exist_ok = True)
files_b_cali = files_b.files_filtered(imagetyp = 'bias', include_path = True)
combined_bias = ccdp.combine(files_b_cali,\
method='average',\
sigma_clip=True,\
sigma_clip_low_thresh=5,\
sigma_clip_high_thresh=5,\
sigma_clip_func=np.ma.median,\
sigma_clip_dev_func=mad_std,\
mem_limit=350e6)
combined_bias.meta['combined'] = True
combined_bias.write(cali_bias_path / 'master_bias.fits')
# Reading master bias
master_bias = CCDData.read(cali_bias_path / 'master_bias.fits')
else:
master_bias = None
#-------------------------------
#-------Calibrating Darks-------
#-------------------------------
cali_dark_path = Path(path_d / 'cali_dark')
cali_dark_path.mkdir(exist_ok = True)
files_d_cali = files_d.files_filtered(imagetyp = 'DARK', include_path = True)
for ccd, file_name in files_d.ccds(imagetyp = 'DARK', return_fname = True, ccd_kwargs = {'unit':'adu'}):
if master_bias is not None:
# Subtract bias
ccd = ccdp.subtract_bias(ccd, master_bias)
else:
ccd = ccd
# Save the result
ccd.write(cali_dark_path / file_name)
#--------------------------------
#------Creating Master-Dark------
#--------------------------------
red_dark = ccdp.ImageFileCollection(cali_dark_path)
# Calculating exposure times of DARK images
dark_times = set(red_dark.summary['exptime'][red_dark.summary['imagetyp'] == 'DARK'])
for exposure in sorted(dark_times):
cali_darks = red_dark.files_filtered(imagetyp = 'dark',\
exptime = exposure,\
include_path = True)
combined_dark = ccdp.combine(cali_darks,\
method='average',\
sigma_clip=True,\
sigma_clip_low_thresh=5,\
sigma_clip_high_thresh=5,\
sigma_clip_func=np.ma.median,\
sigma_clip_dev_func=mad_std,\
mem_limit=350e6)
combined_dark.meta['combined'] = True
com_dark_name = 'combined_dark_{:6.3f}.fits'.format(exposure)
combined_dark.write(cali_dark_path / com_dark_name)
# Reading master dark of various exposure times
red_dark = ccdp.ImageFileCollection(cali_dark_path)
combined_darks = {ccd.header['exptime']: ccd for ccd in red_dark.ccds(imagetyp = 'DARK', combined = True)}
#--------------------------------
#-------Calibrating Flats--------
#--------------------------------
cali_flat_path = Path(path_f / 'cali_flat')
cali_flat_path.mkdir(exist_ok = True)
files_f_cali = files_f.files_filtered(imagetyp = 'FLAT', include_path = True)
for ccd, file_name in files_f.ccds(imagetyp = 'FLAT', ccd_kwargs = {'unit' : 'adu'}, return_fname = True):
# Subtract bias
if master_bias is not None:
ccd = ccdp.subtract_bias(ccd, master_bias)
else:
ccd = ccd
closest_dark = utl.find_nearest_dark_exposure(ccd, dark_times)
if closest_dark is None:
closest_dark1 = utl.find_nearest_dark_exposure(ccd, dark_times, tolerance = 100)
# Subtract scaled Dark
ccd = ccdp.subtract_dark(ccd, combined_darks[closest_dark1],\
exposure_time = 'exptime',\
exposure_unit = u.second,\
scale = True)
ccd.write(cali_flat_path / ('flat-' + file_name))
else:
closest_dark2 = utl.find_nearest_dark_exposure(ccd, dark_times)
# Subtracting Darks
ccd = ccdp.subtract_dark(ccd, combined_darks[closest_dark2], exposure_time = 'exptime', exposure_unit = u.second)
ccd.write(cali_flat_path / ('flat-' + file_name))
#--------------------------------
#-----Creating Master-Flat-------
#--------------------------------
red_flats = ccdp.ImageFileCollection(cali_flat_path)
cali_flats = red_flats.files_filtered(imagetyp = 'FLAT', include_path = True)
combined_flat = ccdp.combine(cali_flats,\
method='average',\
scale = utl.inverse_median,\
sigma_clip=True,\
sigma_clip_low_thresh=5,\
sigma_clip_high_thresh=5,\
sigma_clip_func=np.ma.median,\
sigma_clip_dev_func=mad_std,\
mem_limit=350e6)
combined_flat.meta['combined'] = True
combined_flat.write(cali_flat_path / 'master_flat.fits')
# Reading master flat
red_flats = ccdp.ImageFileCollection(cali_flat_path)
combined_flat = CCDData.read(cali_flat_path / 'master_flat.fits')
#--------------------------------
#---Calibrating Science Images---
#--------------------------------
cali_science_path = Path(path_s / 'cali_science')
cali_science_path.mkdir(exist_ok = True)
# Correcting for flat
for ccd, file_name in files_s.ccds(imagetyp = it_s, ccd_kwargs = {'unit' : 'adu'}, return_fname = True):
# Subtract scaled Dark
#ccd = ccdp.subtract_dark(ccd, combined_darks[closest_dark1], exposure_time = 'exptime', exposure_unit = u.second, scale = True)
ccd = ccdp.flat_correct(ccd, combined_flat)#['FLAT'])
ccd.write(cali_science_path / file_name)
files_s1 = ccdp.ImageFileCollection(cali_science_path)
files_s_cali = files_s1.files_filtered(imagetyp = it_s, include_path = True)
# Creating a list of spectrum images
files_spec = files_s1.summary['file', 'view_pos']
files_spec_list = np.array([])
for i in range(len(files_spec)):
xxx = files_spec['view_pos'][i]
if xxx[0:4] == 'open':
files_spec_list = np.hstack((files_spec_list, files_spec['file'][i]))
# Sky subtracting images
final_calibrated = Path(path_s / 'Final_calibrated_science')
final_calibrated.mkdir(exist_ok = True)
# Variance in sky subtracting images
final_calibrated_err = Path(path_s / 'Error_final_calibrated_science')
final_calibrated_err.mkdir(exist_ok = True)
j = 0
for i in range(int(len(files_spec_list)/2)):
# For Reduced Image
ccd1 = CCDData.read(x_s + 'cali_science/' + files_spec_list[j], unit='adu')
ccd2 = CCDData.read(x_s + 'cali_science/' + files_spec_list[j+1], unit = 'adu')
sky_sub1 = ccd1.data - ccd2.data
ss1 = CCDData(sky_sub1, unit='adu')
ss1.header = ccd1.header
ss1.meta['sky_sub'] = True
name1 = 'sky_sub_' + files_spec_list[j]
ss1.write(final_calibrated / name1)
sky_sub2 = ccd2.data - ccd1.data
ss2 = CCDData(sky_sub2, unit='adu')
ss2.header = ccd2.header
ss2.meta['sky_sub'] = True
name2 = 'sky_sub_' + files_spec_list[j+1]
ss2.write(final_calibrated / name2)
# For Errors in Reduced Image
ccd1e = ccdp.create_deviation(ccd1, gain=9.2*u.electron/u.adu, readnoise=40*u.electron)
ccd1er = ccd1e.uncertainty.array
ccd1err = np.nan_to_num(ccd1er)
ccd2e = ccdp.create_deviation(ccd2, gain=9.2*u.electron/u.adu, readnoise=40*u.electron)
ccd2er = ccd2e.uncertainty.array
ccd2err = np.nan_to_num(ccd2er)
data_err = np.sqrt(ccd1err**2 + ccd2err**2 - (2*ccd1err*ccd2err))
data_err1 = CCDData(data_err, unit='adu')
data_err1.meta['Error'] = True
data_err1.header = ccd1.header
name3 = 'sky_sub_err_' + files_spec_list[j]
data_err1.write(final_calibrated_err / name3)
name4 = 'sky_sub_err_' + files_spec_list[j+1]
data_err1.write(final_calibrated_err / name4)
j = j+2
| 2.3125
| 2
|
app/database/user_db.py
|
gmbz/frro-soporte-TPI-09
| 2
|
12781319
|
from ..models.models import Usuario
from .db import session
from ..models.exceptions import UserAlreadyExists, UserNotFound
from typing import Optional
def register_user(usuario: Usuario) -> Usuario:
"""Agrega un nuevo usuario a la tabla y lo devuelve"""
if user_exists(usuario):
raise UserAlreadyExists(f"User {usuario.username} ya existe")
if email_exists(usuario):
raise UserAlreadyExists(f"El email {usuario.email} ya existe")
session.add(usuario)
session.commit()
return usuario
def buscar_id_user(user: Usuario) -> Usuario:
return session.query(Usuario).get(user.id)
def buscar_id(id_usuario) -> Optional[Usuario]:
return session.query(Usuario).get(id_usuario)
def user_exists(user_: Usuario) -> Optional[Usuario]:
return session.query(Usuario).filter(Usuario.username == user_.username
).first()
def email_exists(user_: Usuario) -> Optional[Usuario]:
return session.query(Usuario).filter(Usuario.email == user_.email
).first()
def buscar_user(username_: str) -> Optional[Usuario]:
return session.query(Usuario).filter(Usuario.username == username_).first()
def autenticacion(user_: Usuario) -> Optional[Usuario]:
usuario = buscar_user(user_.username)
if usuario:
check = usuario.check_password(user_.password)
if check is True:
return usuario
raise UserNotFound('Contraseña incorrecta')
raise UserNotFound('El usuario no existe')
def change_pass():
session.commit()
| 2.84375
| 3
|
molecule/default/tests/test_installation.py
|
Temelio/ansible-role-virtualenv
| 0
|
12781320
|
"""
Role tests
"""
import os
import pytest
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.parametrize('name', [
('python-dev'),
('python-virtualenv'),
])
def test_packages(host, name):
"""
Test installed packages
"""
assert host.package(name).is_installed
| 2.140625
| 2
|
cogs/money.py
|
MrRazamataz/RazBot
| 6
|
12781321
|
# MrRazamataz's RazBot
# bal cog
import discord
from discord import reaction
from discord.ext import commands
import asyncio
import os
import aiofiles
import json
from discord.ext import tasks
class money(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_ready(self):
global moneydata
print("Loading money...")
with open("money.json") as f:
moneydata = json.load(f)
print("Starting save tasks loop...")
self.save.start()
global recent_change
recent_change = False
@commands.Cog.listener()
async def on_message(self, message):
global moneydata
if message.guild is None:
return
if not message.author.bot:
id = str(message.author.id)
moneydata[id] = moneydata.get(id, 0) + 1
print(f"Money for {id} has been increased by 1!")
global recent_change
recent_change = True
@commands.command(name='bal', aliases=['balance'])
@commands.cooldown(1, 5, commands.BucketType.user)
async def balance(self, ctx, user: discord.Member = None):
global moneydata, selfmessage
selfmessage = "not set"
if user == None:
user = ctx.author
selfmessage = "no"
#id = ctx.message.author.id
id = str(user.id)
currentdata = []
try:
if selfmessage == "no":
await ctx.send(f"Your bal is: `{moneydata[id]}`.")
else:
await ctx.send(f"{user.display_name}'s bal is: `{moneydata[id]}`.")
except Exception as e:
print(e)
@balance.error
async def balance_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown):
await ctx.send(f"You are on a cooldown of that command for {error.retry_after:.2f}s!", delete_after=2)
@commands.command(name='save')
async def savebal(self, ctx):
if ctx.author.id == 6<PASSWORD>:
with open("money.json", "w") as f:
json.dump(moneydata, f, indent=4)
await ctx.send("Saved.")
else:
await ctx.send("This command can only be used by MrRazamataz.")
@tasks.loop(minutes=5)
async def save(self):
global recent_change
if recent_change is True:
with open("money.json", "w") as f:
json.dump(moneydata, f, indent=4)
print("Saved money from ram.")
recent_change = False
else:
print("There are no recent changes to the money, not saving.")
def setup(client):
client.add_cog(money(client))
| 2.625
| 3
|
PyLeap_CPB_EyeLights_LED_Glasses_Sparkle/code.py
|
gamblor21/Adafruit_Learning_System_Guides
| 665
|
12781322
|
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import board
from adafruit_led_animation.animation.sparkle import Sparkle
from adafruit_led_animation.color import PURPLE
from adafruit_led_animation.sequence import AnimationSequence
from adafruit_is31fl3741.adafruit_ledglasses import MUST_BUFFER, LED_Glasses
from adafruit_is31fl3741.led_glasses_animation import LED_Glasses_Animation
glasses = LED_Glasses(board.I2C(), allocate=MUST_BUFFER)
glasses.set_led_scaling(255)
glasses.global_current = 0xFE
glasses.enable = True
pixels = LED_Glasses_Animation(glasses)
anim2 = Sparkle(pixels, 0.05, PURPLE)
group = AnimationSequence(
anim2, advance_interval=5, auto_reset=True, auto_clear=True
)
while True:
group.animate()
| 2.3125
| 2
|
Classifiers/plot_sentiment.py
|
kanishk2509/TwitterBotDetection
| 2
|
12781323
|
<reponame>kanishk2509/TwitterBotDetection<filename>Classifiers/plot_sentiment.py<gh_stars>1-10
import matplotlib.pyplot as plt
import csv
def main():
hashtags_user = []
hashtags_bot = []
usermentions_user = []
usermentions_bot = []
rep_user = []
rep_bot = []
tpd_user = []
tpd_bot = []
age_user = []
age_bot = []
sentiment_user = []
sentiment_bot = []
tweet_count_user = []
tweet_count_bot = []
similarity_user = []
similarity_bot = []
print('Main')
with \
open('/home/chris/PycharmProjects/TwitterBotDetection/ApproachV4/datasets'
'/dataset.csv',
'r+',
encoding="utf-8") as inp:
reader = csv.DictReader(inp)
for row in reader:
if row['bot'] == '1':
hashtags_bot.append(float(row['hashtags_ratio']))
usermentions_bot.append(float(row['user_mentions_ratio']))
rep_bot.append(float(row['account_rep']))
tpd_bot.append(float(row['avg_tpd']))
age_bot.append(float(row['age']))
sentiment_bot.append(float(row['avg_tweet_sentiment']))
similarity_bot.append(float(row['avg_cosine_similarity']))
tweet_count_bot.append(float(row['tweet_count']))
else:
hashtags_user.append(float(row['hashtags_ratio']))
usermentions_user.append(float(row['user_mentions_ratio']))
x = float(row['account_rep'])
if x < 0:
x = 0
rep_user.append(x)
tpd_user.append(float(row['avg_tpd']))
age_user.append(float(row['age']))
sentiment_user.append(float(row['avg_tweet_sentiment']))
similarity_user.append(float(row['avg_cosine_similarity']))
tweet_count_user.append(float(row['tweet_count']))
x = range(100)
y = range(100, 200)
fig = plt.figure()
ax1 = fig.add_subplot(111)
plt.xlabel('similarity')
plt.ylabel('tweet count')
ax1.scatter(similarity_bot, tweet_count_bot, s=10, c='b', marker="s", label='bot')
ax1.scatter(similarity_user, tweet_count_user, s=10, c='r', marker="o", label='normal')
#ax1.scatter(cce_array_bot, spam_ratio_array_bot, s=10, c='b', marker="s", label='bot')
#ax1.scatter(cce_array_user, spam_ratio_array_user, s=10, c='r', marker="o", label='normal')
plt.legend(loc='upper left');
plt.show()
if __name__ == '__main__':
main()
| 2.640625
| 3
|
ipodder/feeds.py
|
sgrayban/CastPodder
| 0
|
12781324
|
#
# CastPodder feeds module
# Copyright (c) 2005-2006 <NAME> and the CastPodder Team
#
# $Id: feeds.py 147 2006-11-07 08:17:03Z sgrayban $
"""
CastPodder is Copright © 2005-2006 <NAME>
Read the file Software_License_Agreement.txt for more info.
"""
__license__ = "Commercial"
import os
import sys
import stat
import logging
import urlparse
import urllib
import re
from ipodder.contrib import bloglines
from ipodder.contrib import urlnorm
import ipodder
from ipodder.grabbers import BasicGrabber
from ipodder import feedmanager
from gui.skin import DEFAULT_SUBS
log = logging.getLogger('iPodder')
SUB_STATES = ('unsubscribed', 'newly-subscribed', 'subscribed', 'preview', 'disabled', 'force')
def mkfeedkey(feedorint):
"Make a feedkey out of a feed or an integer."
assert feedorint is not None
return 'feed#%d' % int(feedorint)
def urlrstrip(url):
"Strip nasty things from the trailing end of a URL."
while url[-3:] in ['%00', '%20']:
url = url[:-3]
scheme, addr, path, query, fragid = urlparse.urlsplit(url)
while path and ord(path[-1:]) <= 32:
path = path[:-1]
return urlparse.urlunsplit((scheme, addr, path, query, fragid))
class Normalizer(object):
"""Class to normalize URLs."""
__cache = {}
def normalize(cls, url):
try:
return cls.__cache[url]
except KeyError:
return urlnorm.normalize(url)
normalize = classmethod(normalize)
class Feed(object):
"Represent a podcast feed."
def __init__(self, feeds, url, title='', sub_state='autopreview', manager_url=None, username='', password=''):
"Initialise the feed."
self.feeds = feeds
self.id = feeds.nextfeedid()
self.url = urlrstrip(url) # fix 1060842 whilst adding
assert sub_state in SUB_STATES
self.sub_state = sub_state
self.title = title
self.manager_url = manager_url
self.username = username
self.password = password
self.fix_state(noisy=False)
def fix_state(self, noisy=True):
"On unpickling, we might not have all the attributes we want."
def info(*a, **kw):
"""Let the user know what's going on, unless we're being quiet.
Uses a function attribute to keep track of whether it's the
first call or not."""
if not noisy:
return
if info.first:
log.debug("Fixing feed state for feed ID #%d", self.id)
info.first = False
log.info(*a, **kw)
info.first = True
if not self.sub_state in SUB_STATES:
info("Reassigning invalid subscription state %s",
self.sub_state)
self.sub_state = 'subscribed'
defaults = {
# maintained exclusively by us
'checked': None, # when did we last check? (time tuple)
'grabbed': None, # when did we last grab an enclosure?
'error': '', # set to string to give feed error display
'dirname': None, # relative to config.download_dir
# used when we start managing disk consumption:
'priority': 1,
'mblimit': 0,
# set from feedparser/HTTP
'modified': '', # last-modified according to HTTP (time tuple)
'etag': '', # etag last time we checked
'status': -1, # http status
# set from feedparser:
'version': 'unknown',
'title': '',
'tagline': '',
'generator': '',
'copyright_detail': {},
'copyright': '',
'link': '',
# auto cleanup options
'cleanup_enabled': False, # is auto-cleanup enabled?
'cleanup_max_days': 7, # cleanup files more than this many days old
'cleanup_last': None, # last time auto cleanup was run for this feed
# feed manager options
'manager_url': None, # are we managed by a central service?
# authentication options
'username': '',
'password': '',
}
for att, value in defaults.items():
if not hasattr(self, att):
info("Defaulting missing attribute %s to %s.",
repr(att), repr(value))
setattr(self, att, value)
if hasattr(self, 'name'):
info("Replacing old 'name' attribute with 'title'.")
self.title = self.name
del self.name
strippedurl = urlrstrip(self.url)
if not strippedurl and self.sub_state != 'disabled':
info("Disabling Garth's empty feed from hell...")
self.sub_state = 'disabled'
elif strippedurl != self.url:
info("Fixing feed with trailing control characters or whitespace.")
self.url = strippedurl
# strip out old normurl attributes
if self.__dict__.has_key('normurl'):
del self.__dict__['normurl']
# Do NOT helpfully flush changes. It'll lead to massive duplication.
def _get_normurl(self):
"What's our normalized URL?"
# info("Calculating normalized URL.")
return Normalizer.normalize(str(self.url))
normurl = property(_get_normurl)
def __str__(self):
"Convert a Feed into a string: use its name."
return unicode(self).encode('ascii', 'replace')
def __unicode__(self):
"Return a Unicode title for a feed."
if self.title:
return unicode(self.title)
else:
return unicode("Feed ID %d at %s" % (self.id, self.url))
def __int__(self):
"Convert a Feed into an int: use its id."
return self.id
# TODO: add a feeds arg to __init__, and a flush method.
def half_flush(self):
"""Flush this feed's information back to the state database."""
self.feeds.write_feed_to_state(self)
def flush(self):
"""Flush all feeds everywhere."""
self.feeds.flush()
def mb_on_disk(self):
"""Calculate how much disk space we're using. Doesn't scan the
playlist, so won't find anything outside of the feed's current
target directory."""
if self.dirname is None:
return 0.0
try:
path = self.target_directory
if not os.path.isdir(path):
return 0.0
files = [os.path.join(path, f) for f in os.listdir(path)]
files = [f for f in files if os.path.isfile(f)]
bytes = sum([os.stat(f)[stat.ST_SIZE] for f in files])
return float(bytes) / (1024.0*1024.0)
except OSError, ex:
errno, message = ex.args
if errno == 3: # ESRCH, directory not found
return 0.0
log.exception("Caught OSError (errno %d, \"%s\") "\
"scanning directory %s", errno, message, path)
return 0.0
except: # Oh, no. Another @(*&^! blind exception catcher.
try:
log.exception("Can't calculate MB total for %s", self)
except (NameError, UnboundLocalError):
log.exception("Can't find path for feed at %s", self.url)
return 0.0
def get_target_directory(self):
"""Calculate the full target directory for this feed.
Computes self.dirname on the fly if necessary."""
if not self.dirname:
if self.title:
self.dirname = re.sub('[\\\\/?*:<>|;"\'\.]','', self.title)
else:
self.dirname = "Feed%d" % self.id
log.info("Feed \"%s\" now has target directory %s",
self.title, self.dirname)
self.half_flush()
return os.path.join(self.feeds.config.download_dir, self.dirname)
target_directory = property(
fget = get_target_directory,
doc = "Target directory for this feed."
)
def mkdir(self):
"""Ensure our directory exits."""
tg = self.target_directory
if not os.path.isdir(tg):
log.info("Creating target directory %s", tg)
os.makedirs(tg)
def get_target_filename(self, enclosure):
"""Calculate the target filename for an enclosure referred to
by this feed.
enclosure -- either a URL or something with a .url attribute."""
if hasattr(enclosure, 'url'):
url = enclosure.url
else:
url = enclosure
# this should get rid of all the extra stuff at the end of the url
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
# however some enclosures put the real url in the query
if query[:4] == "http":
scheme, netloc, path, params, query, fragment = urlparse.urlparse(query)
path = urllib.url2pathname(path) # decodes %20 etc
filename = path.split(os.sep)[-1] # get last path component of URL
# check for data after .mp3 extension - hansjj
#idx = filename.find(".mp3")
#if (idx > -1):
# filename = filename[0:(idx+4)]
result = os.path.join(self.target_directory, filename)
# log.debug("%s -> %s", url, result)
return result
def get_target_status(self, enclosure):
"""Return target file status for this URL.
Was: is_url_present"""
target_file = self.get_target_filename(enclosure)
return self.get_target_file_status(target_file)
def get_target_file_status(self, filename):
"""Return (True, foundpath), or (False, None) if the path can't
be determined.
filename -- the base filename of the file (no path information!)
This method used to be called is_file_present."""
# match without extra resolution
if os.path.isfile(filename):
return (True, filename)
# match in our target directory
path = os.path.join(self.target_directory, filename)
if os.path.isfile(path):
return (True, path)
# match, uh, weirdly
path2 = os.path.join(self.feeds.config.download_dir, self.dirname, urllib.url2pathname(filename))
if os.path.isfile(path2):
return (True, path2)
# don't match
return (False, None)
if 0:
# These should be unused by now.
def file_path_from_url(self,url):
return self.is_url_present(url)[1]
def file_path(self,filename):
return self.is_file_present(filename)[1]
def getfiles(self):
files = []
if not self.dirname:
return []
dir = os.path.join(self.feeds.config.download_dir, self.dirname)
for f in os.listdir(dir):
pathname = os.path.join(dir,f)
info = os.stat(pathname)
ctime = info[stat.ST_CTIME]
size = info[stat.ST_SIZE]
files.append((f,pathname,ctime,size))
files.sort(lambda a, b: cmp(a[2],b[2]))
return files
class DuplicateFeedUrl(ValueError):
"""Used to reject duplicate feeds."""
pass
class Feeds(object):
"A list to keep track of feeds."
def __init__(self, config, state):
"Initialize the feeds from `config` and `state`."
object.__init__(self)
self.config = config
self.state = state
self.feeds_by_normalized_url = {}
self._members = []
self.absorb()
self.flush()
self.clean_state_database()
self.refresh_grabber_passwords()
#We need this for unpickling Enclosure objects.
ipodder.set_feeds_instance(self)
def nextfeedid(self):
"Return the next feed ID. Doesn't sync(); that's expected later."
state = self.state
feedid = state.get('lastfeedid', 1) # What's the last feedid we used?
if not isinstance(feedid, int):
feedid = 1
# Just in case, make sure we avoid collisions.
while True:
feedkey = mkfeedkey(feedid)
if state.has_key(feedkey):
feedid = feedid + 1
else:
break
state['lastfeedid'] = feedid
return feedid
def has_feed_url(self, url):
return self.has_feed_normurl(urlnorm.normalize(url))
def has_feed_normurl(self, normurl):
return self.feeds_by_normalized_url.has_key(normurl)
def __getitem__(self, key):
"""Retrieve a feed."""
if isinstance(key, int):
for feed in self._members:
if feed.id == key:
return feed
else:
raise KeyError, key
else:
normurl = urlnorm.normalize(key)
return self.feeds_by_normalized_url[normurl]
def get(self, key, default=None):
"""Retrieve a feed, returning a default value if it's not found."""
try:
return self[key]
except KeyError:
return default
def addfeed(self, url, quiet=False, *a, **kw):
"Create and add a feed, taking care of all state."
state = self.state
feed = Feed(self, url, *a, **kw)
assert feed is not None
match = self.feeds_by_normalized_url.get(feed.normurl)
if match is not None:
if match.sub_state != 'subscribed' and match.sub_state != feed.sub_state:
#update the old feed with the new feed's state, to
#handle adding a previously deleted feed.
match.sub_state = feed.sub_state
match.manager_url = feed.manager_url
if hasattr(state, 'sync'):
state.sync()
if quiet:
return None
else:
raise DuplicateFeedUrl, match
else:
self.feeds_by_normalized_url[feed.normurl] = feed
self.write_feed_to_state(feed)
self._members.append(feed)
return feed
def absorb(self):
"Absorb feed definitions from everywhere."
self.absorb_from_state()
self.absorb_from_favorites_file()
self.absorb_from_command_line()
self.absorb_from_bloglines()
self.absorb_from_preloads_dir()
self.absorb_default_feeds()
def absorb_default_feeds(self):
"Absorb the default feed if necessary."
if not len(self):
log.info("No feeds defined! Adding the default feeds.")
for (title,url) in DEFAULT_SUBS:
self.addfeed(url, title=title, sub_state='subscribed')
def absorb_from_state(self):
"Absorb feeds from the state database."
# First, let's dodge a known pickle problem.
try:
sys.modules['feeds'] = sys.modules['ipodder.feeds']
except KeyError:
pass # ugh
state = self.state
feeds_by_normalized_url = self.feeds_by_normalized_url
feedcount = 0
delfeeds = []
feedkeys = [key for key in state.keys() if key[:5] == 'feed#']
goodfeeds = []
for key in feedkeys:
feed = state[key] # will fail on pickle problem
if not hasattr(feed, '__class__') \
or feed.__class__.__name__ not in ['Feed', 'ipodder.feeds.Feed']:
log.error("Deleting non-feed object for key %s", key)
del state[key]
state.sync()
continue
goodfeeds.append(feed)
feedidmap = dict([(feed.id, feed) for feed in goodfeeds])
feedids = feedidmap.keys()
feedids.sort()
for feedid in feedids:
feed = feedidmap[feedid]
feed.fix_state() # add new attributes, etc
feed.feeds = self # and this one :)
# feeds_by_url is used so we can avoid loading duplicate
# feeds from all these different sources
collision = feeds_by_normalized_url.get(feed.normurl)
if collision is not None:
log.warn("Feed #%d (\"%s\") has the same URL as feed #%d (\"%s\"): %s",
feed.id, str(feed), collision.id, str(collision), feed.url)
delfeeds.append(feed)
else:
feeds_by_normalized_url[feed.normurl] = feed
self._members.append(feed)
feedcount = feedcount + 1
log.info("Loaded %d feeds from the state database.", feedcount)
if delfeeds:
log.warn("%d feeds need deleting.", len(delfeeds))
for delfeed in delfeeds:
feedkey = mkfeedkey(delfeed.id)
del state[feedkey]
if hasattr(state, 'sync'):
state.sync()
def absorb_from_favorites_file(self):
"Absorb feeds from the old favorites file."
filename = self.config.favorites_file
name, ext = os.path.splitext(filename)
feedcount = 0
sub_state = 'newly-subscribed'
if not len(self):
# If we're upgrading from 1.0 or previous, assume everything
# is subscribed.
sub_state = 'subscribed'
# If it's an OPML file, use the other method.
if ext == '.opml':
return self.absorb_from_opml_file(filename)
# Load from a flat file of URLs
log.debug("Attempting to load favorites file %s", filename)
try:
feeds = file(filename, 'rt')
for line in feeds:
url = line.strip()
if not url:
continue # it's an empty line!
if url[:1] == '#':
continue # it's a comment!
try:
self.addfeed(url, sub_state=sub_state)
log.info("Added from favorites file: %s", url)
feedcount = feedcount + 1
except DuplicateFeedUrl, ex:
pass # log.debug("Skipping known feed %s", url)
feeds.close()
except (IOError, OSError), ex:
errno, message = ex.args
if errno == 2: # ENOFILE
log.debug("... but it doesn't exist. Oops.")
else:
log.exception("Ran into some problem loading feeds "\
"from favorites file %s", filename)
log.info("Loaded %d new feeds from %s", feedcount, filename)
def absorb_from_command_line(self):
"""Absorb favorites from the command line."""
pass # not implemented yet, but let's not make it a show-stopper
def absorb_from_opml_file(self, filename, default_sub_state='unknown'):
"""Absorb favorites from an OPML file, defaulting their
subscription state. Return the number of subscriptions added,
or None if parsing failed."""
fh = open(filename,'r')
opml = fh.read()
fh.close()
import ipodder.outlines
tree = ipodder.outlines.Head.fromopml(opml)
if not tree:
return None
def traverse(node,numadded):
if not isinstance(node, ipodder.outlines.Node):
return
if not hasattr(node,"type"):
return
url = ''
if node.type == "link":
title = node.text
url = node.url
if node.type == "rss":
title = node.title
url = node.xmlUrl
if url:
self.addfeed(url,title=title,quiet=True,sub_state='newly-subscribed')
numadded += 1
for child in node:
numadded = traverse(child,numadded)
return numadded
numadded = traverse(tree,0)
return numadded
def absorb_from_bloglines(self):
"""Absorb favorites from Bloglines."""
if not self.config.bl_username:
log.info("Bloglines not configured.")
return
log.info("Attempting to load new feeds from Bloglines...")
if not self.config.bl_password:
log.error("Can't access Bloglines; no password specified.")
return
if not self.config.bl_folder:
log.error("Can't access Bloglines; no blogroll folder specified.")
return
newfeeds = 0
blfeeds = 0
try:
for url in bloglines.extractsubs(self.config.bl_username,
self.config.bl_password, self.config.bl_folder):
blfeeds += 1
try:
url = str(url) # strip Unicode
self.addfeed(url, sub_state='newly-subscribed')
log.info("Added from Bloglines: %s", url)
newfeeds = newfeeds + 1
except DuplicateFeedUrl, ex:
log.debug("Skipping known feed %s", url)
if not blfeeds:
log.error("Couldn't see anything in Bloglines. Either your "\
"folder is wrong, or you haven't subscribed to "\
"anything in it.")
except KeyError:
log.error("Couldn't load feeds from Bloglines because blogroll "\
"folder %s doesn't exist.", self.config.bl_folder)
except KeyboardInterrupt:
raise
except bloglines.urllib2.HTTPError, ex:
log.debug("%s", repr(ex.__dict__))
if ex.code == 401:
log.error("Can't access Bloglines: authentication failure.")
elif ex.code == 404:
log.error("Bloglines service appears to no longer be "\
"available where we think it is (404).")
elif ex.code == 503:
log.error("Bloglines service unavailable (503).")
else:
log.error("Can't access Bloglines; HTTP return code %d",
ex.code)
return
except:
log.exception("Experimental Bloglines support failed. "\
"Please report the following information:")
return
log.info("Loaded %d new feeds out of %d from Bloglines.",
newfeeds, blfeeds)
def absorb_from_preloads_dir(self):
"""Absorb feeds from any opml files in the application's
preloads subdirectory."""
#locate the preloads directory.
paths = ipodder.configuration.determine_paths()
preloads = paths['preloads']
if not os.path.exists(preloads):
log.debug("No preloads directory found at %s." % preloads)
return
#load history.
state = self.state
try:
old_preloads = state['old_preloads']
except KeyError:
old_preloads = []
log.debug("Old preloads: %s" % str(old_preloads))
#look for preload opml files
new_preloads = os.listdir(preloads)
log.debug("New preloads = %s" % str(new_preloads))
#check preload opml files against history
preloads_added = 0
for file in new_preloads:
if not file.endswith('.opml'):
log.debug('Preload file %s is not an OPML file.' % file)
continue
if old_preloads.count(file) > 0:
log.debug('Preload file %s is in history. Skipping.' % file)
continue
fullpath = os.path.join(preloads,file)
log.debug("Absorbing preloads file %s" % fullpath)
try:
feeds_added = self.absorb_from_opml_file(fullpath)
if feeds_added:
old_preloads.append(file)
preloads_added += 1
except:
log.exception("Preload failed from file %s" % fullpath)
if preloads_added > 0:
log.debug("Absorbed %d new preload files." % preloads_added)
state['old_preloads'] = old_preloads
def flush(self):
"""Flush feed definitions to our various places."""
self.write_to_state()
self.write_to_favorites_file()
def write_feed_to_state(self, feed, sync=True):
"""Write one feed's state to the state database."""
# TODO: fix grotty hack by using pickle protocol properly
state = self.state
feedkey = mkfeedkey(feed)
if hasattr(feed, 'feeds'):
del feed.feeds
state[feedkey] = feed
if sync:
if hasattr(state, 'sync'):
state.sync()
feed.feeds = self
def write_to_state(self):
"""Flush feed definitions to the state database."""
state = self.state
for feed in self._members:
self.write_feed_to_state(feed, sync=False)
if hasattr(state, 'sync'):
state.sync()
def write_to_favorites_file(self):
"""Flush feed definitions to the favorites file."""
filename = self.config.favorites_file
name, ext = os.path.splitext(filename)
# If it's a torrent, use the other method.
if ext == '.torrent':
return self.write_to_opml_file(filename)
# Otherwise...
try:
favorites = file(filename, 'wt')
for feed in self._members:
if feed.sub_state in ('disabled',):
continue
try:
print >> favorites, "# %s" % feed
except UnicodeEncodeError, ex:
pass # simplistic, but it'll work
print >> favorites, feed.url
favorites.close()
log.info("Wrote %d entries to %s", len(self._members), filename)
except (IOError, OSError):
log.exception("Unexpected problem writing favorites file %s",
filename)
def write_to_opml_file(self, filename):
"""Flush feed definitions to an OPML file."""
#Step 1: Build the XML document
from xml.dom.minidom import getDOMImplementation
import time
impl = getDOMImplementation()
doc = impl.createDocument(None,"opml",None)
opml = doc.documentElement
opml.setAttribute("version","1.1")
head = doc.createElement("head")
title = doc.createElement("title")
title.appendChild(doc.createTextNode("CastPodder Exported Subscriptions"))
head.appendChild(title)
dc = doc.createElement("dateCreated")
dc.appendChild(doc.createTextNode(time.strftime('%a, %d %b %Y %T %z',time.localtime())))
head.appendChild(dc)
opml.appendChild(head)
body = doc.createElement("body")
rootOutline = doc.createElement("outline")
rootOutline.setAttribute("text","CastPodder Exported Subscriptions")
n = 0
for feed in self._members:
if feed.sub_state in ('disabled',):
continue
outline = doc.createElement("outline")
outline.setAttribute("type","rss")
outline.setAttribute("text",feed.title)
outline.setAttribute("title",feed.title)
outline.setAttribute("xmlUrl",feed.url)
rootOutline.appendChild(outline)
n += 1
body.appendChild(rootOutline)
opml.appendChild(body)
#Step 2: Write to file
try:
fh = open(filename, 'w')
fh.write(doc.toxml(encoding='utf-8'))
fh.close()
log.info("Wrote %d entries to %s", n, filename)
except (IOError, OSError):
log.exception("Unexpected problem writing opml file %s",
filename)
def __len__(self):
"How long are we?"
return len(self._members)
def __iter__(self):
"Support iteration through our members."
return iter(self._members)
def clean_state_database(self):
"Delete now-obsolete state keys."
state = self.state
first = True
for key in state.iterkeys():
if key[:5] == 'feed-':
if first:
first = False
log.info("Cleaning up state database of stale feed "\
"status items.")
del state[key]
def get_target_status(self, url, hint=None, greedy=False):
"""Finds a target and returns information on it.
url -- the URL (or something with a .url attribute) to check
hint -- a particular Feed object to check against
greedy -- scan through all known feeds if hint isn't set
Returns: (exists, found_path, first_feed_it_was_found_in)
Was: is_url_present."""
if hint:
(is_present,path) = hint.get_target_status(url)
if is_present:
return (is_present, path, hint)
if not greedy:
return (False,None,None)
for feedinfo in self:
(is_present, path) = feedinfo.get_target_status(url)
if is_present:
return (is_present, path, feedinfo)
return (False, None, None)
def refresh_grabber_passwords(self):
"""Set up feed passwords. NOTE: urllib2 wants to deal with passwords
at the host level, and things break if we set up the password at
e.g. http://foo.bar.com/path/to/rss.xml, so we have to strip off
the path and register the password for http://foo.bar.com/". The
upshot is you can only register one username/password per host, at
least until one of us writes a better password manager."""
for feed in self._members:
if feed.username and feed.password:
import urlparse
p = urlparse.urlsplit(feed.url)
url = urlparse.urlunsplit([p[0],p[1],'/','',''])
BasicGrabber.shared_password_mgr.add_password(None, url, \
feed.username, feed.password)
if __name__ == '__main__':
import shelve
import types
import pickle
from ipodder import conlogging, configuration
import ipodder.state
import dbhash
logging.basicConfig()
handler = logging.StreamHandler()
handler.formatter = conlogging.ConsoleFormatter("%(message)s", wrap=False)
log.addHandler(handler)
log.propagate = 0
log.setLevel(logging.DEBUG)
parser = configuration.makeCommandLineParser()
options, args = parser.parse_args()
if args:
parser.error("only need options; no arguments.")
config = configuration.Configuration(options)
if 1:
log.info("Checking we can unpickle everything...")
state = dbhash.open(config.state_db_file, 'w')
keys = state.keys()
for key in keys:
if key == "tmp_downloads":
#don't unpickle me unless Feeds has been instantiated.
continue
try:
value = state[key]
except KeyError, ex:
log.error("Database corruption on key %s", repr(key))
state[key] = ''
del state[key]
state.sync()
else:
delete = False
try:
item = pickle.loads(value)
except (IndexError, KeyError, EOFError), ex:
delete = True
except TypeError, ex:
if ex.args:
if 'null bytes' in ex.args[0]:
delete = True
log.exception("Can't import module for key %s: %s", repr(key), ex.args)
if delete:
log.error("Record %s damaged beyond repair.", repr(key))
del state[key]
state.sync()
continue
state.close()
del state
log.info("Check complete. Creating Feeds object...")
state = ipodder.state.State(config)
feeds = Feeds(config, state)
if 0:
for feed in feeds:
print str(feed)
atts = [att for att in dir(feed)
if att[:1] != '_'
and not att in ['feeds']
and not isinstance(getattr(feed, att), types.MethodType)]
atts.sort()
for att in atts:
print " %s = %s" % (att, repr(getattr(feed, att)))
| 2.375
| 2
|
ai2business/datasets/test/test_sample_generator.py
|
Maximilianpy/ai2business
| 0
|
12781325
|
<reponame>Maximilianpy/ai2business
# Copyright 2020 AI2Business. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test-Environment for sample_generator."""
from ai2business.datasets import sample_generator
from ai2business.datasets.data import database
ref_DOW = database.StockMarket.__dict__["dowjones"]
def test_definition() -> None:
assert sample_generator.SampleGenerators.__name__ == "SampleGenerators"
def test_load_default() -> None:
assert sample_generator.stock_market() == ref_DOW
def test_load_failed() -> None:
assert sample_generator.stock_market("STOCK") == {}
| 2.109375
| 2
|
modules/interject.py
|
TBurchfield/bobbit-ng
| 0
|
12781326
|
# interject.py -----------------------------------------------------------------
import tornado.gen
# Metadata ---------------------------------------------------------------------
NAME = 'interject'
ENABLE = True
TYPE = 'command'
PATTERN = '^!interject (?P<first>[^\s]+) (?P<second>[^\s]+)$'
USAGE = '''Usage: !ddg <query>
Creates interject meme with first and second arguments.
Example:
> !interject Linux GNU
I'd just like to interject for a moment. What you’re referring to as Linux,
is in fact, GNU/Linux, or as I’ve recently taken to calling it, GNU plus
Linux.
'''
# Constants --------------------------------------------------------------------
TEMPLATE = "I'd just like to interject for a moment. What you’re referring to as {first}, is in fact, {second}/{first}, or as I’ve recently taken to calling it, {second} plus {first}."
# Command ----------------------------------------------------------------------
@tornado.gen.coroutine
def command(bot, nick, message, channel, first='Linux', second='GNU'):
bot.send_response(TEMPLATE.format(first=first, second=second), nick, channel)
# Register ---------------------------------------------------------------------
def register(bot):
return (
(PATTERN, command),
)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python: --------------------------------
| 2.75
| 3
|
rogysensor.py
|
ndrogness/RogyGarden
| 0
|
12781327
|
<reponame>ndrogness/RogyGarden<filename>rogysensor.py
#!/usr/bin/env python3
import time
from collections import namedtuple
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
class RogySensor:
SensorData = namedtuple('SensorData', ['name', 'val', 'units'])
def __init__(self, sensor_type=None, history=5, samples_per_read=1):
self.sensor_type = sensor_type
self.active = False
self.vals = []
self.vals_ts = []
self.history = history
self.samples_per_read = samples_per_read
def __str__(self):
return ['At {0}: {1}={2}{3}'.format(self.vals_ts[i], self.vals[i].name, self.vals[i].val,
self.vals[i].units) for i in range(0, len(self.vals))]
def read(self, return_value=True, pretty_format=False):
# Override this
return [self.SensorData(name='na', val=-1, units='na')]
def post_conversion(self, in_value):
# Override this
return in_value
def _free_local(self):
# Override this
pass
def free(self):
if self.active is True:
self._free_local()
self.vals.clear()
self.vals_ts.clear()
class RogySensorI2C(RogySensor):
I2C_SENSORS = {
'BMP280': {'chipid': 0x58}
}
i2c_bus = SMBus(1)
def __init__(self, scl_pin=22, sda_pin=21, device='Unknown', history=5, samples_per_read=1):
super().__init__(sensor_type='i2c', history=history, samples_per_read=samples_per_read)
self._scl_pin = scl_pin
self._sda_pin = sda_pin
def _read_i2c(self):
# Override this
return [self.SensorData(name='na', val=-1, units='na')]
def read_data_val(self, sensordata_name):
self.read()
for _sval in self.vals[0]:
if _sval.name == sensordata_name:
return _sval.val, '{0}{1} at {2}'.format(_sval.val, _sval.units, time.asctime(self.vals_ts[0]))
else:
return None, 'Offline'
def read(self, return_value=True, pretty_format=False):
if self.active is not True:
return None
self.vals.insert(0, self._read_i2c())
if len(self.vals) > self.history:
del self.vals[self.history]
self.vals_ts.insert(0, time.localtime())
if len(self.vals_ts) > self.history:
del self.vals_ts[self.history]
if return_value is True:
if pretty_format is True:
return ['{0}={1}{2}'.format(sd.name, sd.val, sd.units) for sd in self.vals[0]]
else:
return self.post_conversion(self.vals[0])
else:
return None
class RogyBMP280(RogySensorI2C):
def __init__(self, scl_pin=22, sda_pin=21, history=5, samples_per_read=10):
super().__init__(scl_pin=scl_pin, sda_pin=sda_pin, device='BMP280', history=history, samples_per_read=samples_per_read)
# self.rs_i2c_device = MP_BMP280_I2C(i2c=self.rs_i2c, address=118)
self._read_i2c = self._read_bmp280
try:
import bmp280
except ImportError as err:
print('Missing bmp280 library:', err)
self.active = False
return
try:
self._sensor = bmp280.BMP280(i2c_dev=self.i2c_bus)
# self.rs_i2c = machine.I2C(scl=machine.Pin(scl_pin), sda=machine.Pin(sda_pin))
# Try a read
self._sensor.get_temperature()
except RuntimeError as err:
print('Cant start BMP280 sensor:', err)
self.active = False
return
self.active = True
def get_relative_altitude(self):
baseline_size = 100
baseline_values = []
for i in range(baseline_size):
pressure = self._sensor.get_pressure()
baseline_values.append(pressure)
time.sleep(.1)
baseline = sum(baseline_values[:-25]) / len(baseline_values[:-25])
return self._sensor.get_altitude(qnh=baseline)
def _read_bmp280(self):
# return self.rs_i2c_device.temperature
_st1 = 0
_st2 = 0
_st3 = 0
for i in range(0, self.samples_per_read):
_st1 += self._sensor.get_temperature()
_st2 += self._sensor.get_pressure()
_st3 += self._sensor.get_altitude()
time.sleep(.1)
# I'm American...convert to F
_st1 = '{:.1f}'.format(((_st1 / self.samples_per_read) * 9/5) + 32)
_st2 = '{:.2f}'.format(_st2 / self.samples_per_read)
_st3 = '{:.2f}'.format((_st3 / self.samples_per_read) * 3.28084)
# relative_altitude = '{:05.2f}'.format(self.get_relative_altitude() * 3.28084)
return [self.SensorData(name='temp', val=_st1, units='F'),
self.SensorData(name='bar_pres', val=_st2, units='hPa'),
self.SensorData(name='altitude', val=_st3, units='ft')
]
class RogyINA260(RogySensorI2C):
def __init__(self, scl_pin=22, sda_pin=21, history=5, samples_per_read=1):
super().__init__(scl_pin=scl_pin, sda_pin=sda_pin, device='INA260', history=history,
samples_per_read=samples_per_read)
# self.rs_i2c_device = MP_BMP280_I2C(i2c=self.rs_i2c, address=118)
self._read_i2c = self._read_ina260
try:
import board
import busio
import adafruit_ina260
except ImportError as err:
print('Missing INA260 sensor library:', err)
self.active = False
return
try:
self._sensor = adafruit_ina260.INA260(busio.I2C(board.SCL, board.SDA))
except ValueError as err2:
print('Cant start INA260 sensor:', err2)
self.active = False
return
self.active = True
def _read_ina260(self):
_st1 = 0
_st2 = 0
_st3 = 0
for i in range(0, self.samples_per_read):
_st1 += self._sensor.current
_st2 += self._sensor.voltage
_st3 += self._sensor.power
time.sleep(.1)
# Convert to Amps, V, Watts
_st1 = '{:.2f}'.format((_st1 / self.samples_per_read) / 1000)
_st2 = '{:.2f}'.format(_st2 / self.samples_per_read)
_st3 = '{:.2f}'.format((_st3 / self.samples_per_read) / 1000)
return [self.SensorData(name='current', val=_st1, units='A'),
self.SensorData(name='voltage', val=_st2, units='V'),
self.SensorData(name='power', val=_st3, units='W')
]
def main():
# vbat = machine.ADC(36)
# vbat.atten(vbat.ATTN_11DB)
# VBAT = Pin 35
bmp280 = RogyBMP280()
ina260 = RogyINA260()
while True:
print(bmp280.read(pretty_format=True))
print(ina260.read(pretty_format=True))
time.sleep(1)
if __name__ == '__main__':
main()
| 2.546875
| 3
|
geonode/geonode/groups/migrations/24_initial.py
|
ttungbmt/BecaGIS_GeoPortal
| 0
|
12781328
|
<reponame>ttungbmt/BecaGIS_GeoPortal
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
import taggit.managers
from django.utils.timezone import now
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('auth', '0006_require_contenttypes_0002'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GroupInvitation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=40)),
('email', models.EmailField(max_length=254)),
('role', models.CharField(max_length=10, choices=[('manager', 'Manager'), ('member', 'Member')])),
('state', models.CharField(default='sent', max_length=10, choices=[('sent', 'Sent'), ('accepted', 'Accepted'), ('declined', 'Declined')])),
('created', models.DateTimeField(default=now)),
('from_user', models.ForeignKey(related_name='pg_invitations_sent',
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='GroupMember',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role', models.CharField(max_length=10, choices=[('manager', 'Manager'), ('member', 'Member')])),
('joined', models.DateTimeField(default=now)),
],
),
migrations.CreateModel(
name='GroupProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50, verbose_name='Title')),
('slug', models.SlugField(unique=True)),
('logo', models.ImageField(upload_to='people_group', verbose_name='Logo', blank=True)),
('description', models.TextField(verbose_name='Description')),
('email', models.EmailField(help_text='Email used to contact one or all group members, such as a mailing list, shared email, or exchange group.', max_length=254, null=True, verbose_name='Email', blank=True)),
('access', models.CharField(default=b"public'", help_text='Public: Any registered user can view and join a public group.<br>Public (invite-only):Any registered user can view the group. Only invited users can join.<br>Private: Registered users cannot see any details about the group, including membership. Only invited users can join.', max_length=15, verbose_name='Access', choices=[('public', 'Public'), ('public-invite', 'Public (invite-only)'), ('private', 'Private')])),
('last_modified', models.DateTimeField(auto_now=True)),
('group', models.OneToOneField(to='auth.Group', on_delete=models.CASCADE)),
('keywords', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A space or comma-separated list of keywords', verbose_name='Keywords')),
],
),
migrations.AddField(
model_name='groupmember',
name='group',
field=models.ForeignKey(to='groups.GroupProfile', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='groupmember',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
migrations.AddField(
model_name='groupinvitation',
name='group',
field=models.ForeignKey(related_name='invitations', to='groups.GroupProfile', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='groupinvitation',
name='user',
field=models.ForeignKey(related_name='pg_invitations_received',
to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True),
),
migrations.AlterUniqueTogether(
name='groupinvitation',
unique_together=set([('group', 'email')]),
),
]
| 1.90625
| 2
|
photoBatch.py
|
yoderra/photoscan_batch
| 0
|
12781329
|
<reponame>yoderra/photoscan_batch
#
# <NAME>
# <EMAIL> | <EMAIL>
# Mobile Geospatial Systems Group
# Department of Ecosystem Science and Management
# The Pennsylvania State University
# 2018/06/14
#
# Script to automate PhotoScan v1.3.3 processing.
#!/bin/python
import os, PhotoScan
print("---- Script started ----")
# create new photoscan document
doc = PhotoScan.app.document
print("---- Photoscan booted up ----")
# add a new chunk
chunk = doc.addChunk()
print("---- New chunk added ----")
# ask user for folder containing photos
path_photos = PhotoScan.app.getExistingDirectory("Enter folder containing photos:")
path_photos += "/"
image_list = os.listdir(path_photos)
photo_list = list()
for photo in image_list:
if photo.rsplit(".",1)[1].upper() in ["JPG", "JPEG", "TIF", "PNG"]:
photo_list.append(path_photos + photo)
print(photo)
else:
print("No photo available")
# lists all photos in folder (optional):
#print(photo_list)
# add photos to chunk
chunk.addPhotos(photo_list)
PhotoScan.app.update
print("---- Added photos ----")
# match photos
chunk.matchPhotos(accuracy=PhotoScan.MediumAccuracy)
print("---- Matched photos ----")
# align photos
chunk.alignCameras()
print("---- Aligned Photos ----")
# build dense cloud
chunk.buildDenseCloud(quality=PhotoScan.MediumQuality)
print("---- Dense cloud built ----")
# build model
chunk.buildModel
print("---- Model built ----")
# build texture
chunk.buildTexture
print("---- Texture built ----")
# save document (WIP)
doc.save()
print("---- Project saved ----")
| 2.3125
| 2
|
pylearn2/data_augmentation.py
|
BouchardLab/pylearn2
| 0
|
12781330
|
from pylearn2.blocks import Block
from pylearn2.utils.rng import make_theano_rng
from pylearn2.space import Conv2DSpace, VectorSpace
import theano
from theano.compile.mode import get_default_mode
class ScaleAugmentation(Block):
def __init__(self, space, seed=20150111, mean=1., std=.05, cpu_only=True):
self.rng = make_theano_rng(seed, which_method=['normal'])
self.mean = mean
self.std = std
self.space = space
self.cpu_only = cpu_only
super(ScaleAugmentation, self).__init__()
def create_theano_function(self):
if hasattr(self, 'f'):
return self.f
else:
X = self.space.make_theano_batch()
dim = X.ndim
arg = (dim-1)*('x',)
scale = self.rng.normal(size=[X.shape[0]], avg=self.mean, std=self.std)
scale = scale.dimshuffle(0,*arg)
out = X*scale
if self.cpu_only:
mode = get_default_mode().excluding('gpu')
else:
mode = get_default_mode()
return theano.function([X], out, mode=mode)
def perform(self, X):
f = self.create_theano_function()
return f(X)
def get_input_space(self):
return self.space
def get_output_space(self):
return self.space
| 2.3125
| 2
|
qcloudsdkvod/WxPublishRequest.py
|
f3n9/qcloudcli
| 0
|
12781331
|
<reponame>f3n9/qcloudcli
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class WxPublishRequest(Request):
def __init__(self):
super(WxPublishRequest, self).__init__(
'vod', 'qcloudcliV1', 'WxPublish', 'vod.api.qcloud.com')
def get_SubAppId(self):
return self.get_params().get('SubAppId')
def set_SubAppId(self, SubAppId):
self.add_param('SubAppId', SubAppId)
def get_fileId(self):
return self.get_params().get('fileId')
def set_fileId(self, fileId):
self.add_param('fileId', fileId)
def get_fileUrl(self):
return self.get_params().get('fileUrl')
def set_fileUrl(self, fileUrl):
self.add_param('fileUrl', fileUrl)
def get_videoUin(self):
return self.get_params().get('videoUin')
def set_videoUin(self, videoUin):
self.add_param('videoUin', videoUin)
| 1.851563
| 2
|
modelexp/models/sas/_cube.py
|
DomiDre/modelexp
| 0
|
12781332
|
<reponame>DomiDre/modelexp<gh_stars>0
from ._saxsModel import SAXSModel
from fortSAS import cube
from numpy.polynomial.hermite import hermgauss
from numpy.polynomial.legendre import leggauss
class Cube(SAXSModel):
def initParameters(self):
self.params.add('a', 100, min=0)
self.params.add('sldCore', 40e-6)
self.params.add('sldSolvent', 10e-6)
self.params.add('sigA', 0., min=0)
self.params.add('i0', 1, min=0)
self.params.add('bg', 1e-6, min=0)
self.params.add('orderHermite', 15, min=1)
self.params.add('orderLegendre', 15, min=1)
self.addConstantParam('orderHermite')
self.addConstantParam('orderLegendre')
def initMagneticParameters(self):
self.params.add('magSldCore', 5e-6, min=0)
self.params.add('magSldSolvent', 0, vary=False)
self.addConstantParam('magSldSolvent')
def calcModel(self):
self.x_herm, self.w_herm = hermgauss(int(self.params['orderHermite']))
self.x_leg, self.w_leg = leggauss(int(self.params['orderLegendre']))
self.I = self.params['i0'] * cube.formfactor(
self.q,
self.params['a'],
self.params['sldCore'],
self.params['sldSolvent'],
self.params['sigA'],
self.x_herm, self.w_herm, self.x_leg, self.w_leg
) + self.params['bg']
self.r, self.sld = cube.sld(
self.params['a'],
self.params['sldCore'],
self.params['sldSolvent']
)
def calcMagneticModel(self):
self.x_herm, self.w_herm = hermgauss(int(self.params['orderHermite']))
self.x_leg, self.w_leg = leggauss(int(self.params['orderLegendre']))
self.I = self.params['i0'] * cube.magnetic_formfactor(
self.q,
self.params['a'],
self.params['sldCore'],
self.params['sldSolvent'],
self.params['sigA'],
self.params['magSldCore'],
self.params['magSldSolvent'],
self.params['xi'],
self.params['sin2alpha'],
self.params['polarization'],
self.x_herm, self.w_herm, self.x_leg, self.w_leg
) + self.params['bg']
self.r, self.sld = cube.sld(
self.params['a'],
self.params['sldCore'],
self.params['sldSolvent']
)
self.rMag, self.sldMag = cube.sld(
self.params['a'],
self.params['magSldCore'],
self.params['magSldSolvent']
)
| 2.09375
| 2
|
kronos_executor/kronos_executor/executor_schedule.py
|
ecmwf/kronos
| 4
|
12781333
|
#!/usr/bin/env python
from kronos_executor.executor import Executor
class ExecutorDepsScheduler(Executor):
"""
An ExecutorDepsScheduler passes a time_schedule of jobs to the real scheduler to be executed.
Certain elements of the ExecutorDepsScheduler can be overridden by the user.
"""
def __init__(self, config, schedule, arg_config=None):
"""
Initialisation. Passed a dictionary of configurations
"""
super(ExecutorDepsScheduler, self).__init__(config, schedule, arg_config=arg_config)
def do_run(self):
"""
Specific run function for this type of execution
:return:
"""
while len(self.jobs) != 0:
submittable = []
job_deps = []
for j in self.jobs:
try:
depends = j.depends
depend_ids = [self.submitted_job_ids[d] for d in depends]
# We have found a job
submittable.append(j)
job_deps.append(depend_ids)
except KeyError:
# Go on to the next job in the list
pass
self.job_submitter.submit(submittable, job_deps)
for job in submittable:
self.jobs.remove(job)
| 3.046875
| 3
|
tutorial/snippets/views_rest_mixins.py
|
BennyJane/django-demo
| 0
|
12781334
|
<reponame>BennyJane/django-demo
from django.http import Http404
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
from rest_framework import status
from rest_framework import generics
from rest_framework import mixins
from rest_framework.views import APIView
from rest_framework.response import Response
# 使用混入类,进一步提高代码复用率
# 逻辑: 继承generics.GenericAPIView基类,提供了核心功能;添加扩展类 mixins,提供 .list .create等方法
class SnippetList(mixins.ListModelMixin,
mixins.CreateModelMixin,
generics.GenericAPIView):
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class SnippetDetail(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
generics.GenericAPIView):
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
def get(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| 2.0625
| 2
|
src/vak/config/predict.py
|
jspaaks/vak
| 26
|
12781335
|
"""parses [PREDICT] section of config"""
import os
from pathlib import Path
import attr
from attr import converters, validators
from attr.validators import instance_of
from .validators import is_a_directory, is_a_file, is_valid_model_name
from .. import device
from ..converters import comma_separated_list, expanded_user_path
@attr.s
class PredictConfig:
"""class that represents [PREDICT] section of config.toml file
Attributes
----------
csv_path : str
path to where dataset was saved as a csv.
checkpoint_path : str
path to directory with checkpoint files saved by Torch, to reload model
labelmap_path : str
path to 'labelmap.json' file.
models : list
of model names. e.g., 'models = TweetyNet, GRUNet, ConvNet'
batch_size : int
number of samples per batch presented to models during training.
num_workers : int
Number of processes to use for parallel loading of data.
Argument to torch.DataLoader. Default is 2.
device : str
Device on which to work with model + data.
Defaults to 'cuda' if torch.cuda.is_available is True.
spect_scaler_path : str
path to a saved SpectScaler object used to normalize spectrograms.
If spectrograms were normalized and this is not provided, will give
incorrect results.
annot_csv_filename : str
name of .csv file containing predicted annotations.
Default is None, in which case the name of the dataset .csv
is used, with '.annot.csv' appended to it.
output_dir : str
path to location where .csv containing predicted annotation
should be saved. Defaults to current working directory.
min_segment_dur : float
minimum duration of segment, in seconds. If specified, then
any segment with a duration less than min_segment_dur is
removed from lbl_tb. Default is None, in which case no
segments are removed.
majority_vote : bool
if True, transform segments containing multiple labels
into segments with a single label by taking a "majority vote",
i.e. assign all time bins in the segment the most frequently
occurring label in the segment. This transform can only be
applied if the labelmap contains an 'unlabeled' label,
because unlabeled segments makes it possible to identify
the labeled segments. Default is False.
save_net_outputs : bool
if True, save 'raw' outputs of neural networks
before they are converted to annotations. Default is False.
Typically the output will be "logits"
to which a softmax transform might be applied.
For each item in the dataset--each row in the `csv_path` .csv--
the output will be saved in a separate file in `output_dir`,
with the extension `{MODEL_NAME}.output.npz`. E.g., if the input is a
spectrogram with `spect_path` filename `gy6or6_032312_081416.npz`,
and the network is `TweetyNet`, then the net output file
will be `gy6or6_032312_081416.tweetynet.output.npz`.
"""
# required, external files
checkpoint_path = attr.ib(converter=expanded_user_path, validator=is_a_file)
labelmap_path = attr.ib(converter=expanded_user_path, validator=is_a_file)
# required, model / dataloader
models = attr.ib(
converter=comma_separated_list,
validator=[instance_of(list), is_valid_model_name],
)
batch_size = attr.ib(converter=int, validator=instance_of(int))
# csv_path is actually 'required' but we can't enforce that here because cli.prep looks at
# what sections are defined to figure out where to add csv_path after it creates the csv
csv_path = attr.ib(
converter=converters.optional(expanded_user_path),
validator=validators.optional(is_a_file),
default=None,
)
# optional, transform
spect_scaler_path = attr.ib(
converter=converters.optional(expanded_user_path),
validator=validators.optional(is_a_file),
default=None,
)
# optional, data loader
num_workers = attr.ib(validator=instance_of(int), default=2)
device = attr.ib(validator=instance_of(str), default=device.get_default())
annot_csv_filename = attr.ib(
validator=validators.optional(instance_of(str)), default=None
)
output_dir = attr.ib(
converter=expanded_user_path,
validator=is_a_directory,
default=Path(os.getcwd()),
)
min_segment_dur = attr.ib(
validator=validators.optional(instance_of(float)), default=None
)
majority_vote = attr.ib(validator=instance_of(bool), default=True)
save_net_outputs = attr.ib(validator=instance_of(bool), default=False)
| 2.515625
| 3
|
plum/util.py
|
ruancomelli/plum
| 153
|
12781336
|
<filename>plum/util.py
import abc
import logging
__all__ = ["multihash", "Comparable", "is_in_class", "get_class", "get_context"]
log = logging.getLogger(__name__)
def multihash(*args):
"""Multi-argument order-sensitive hash.
Args:
*args: Objects to hash.
Returns:
int: Hash.
"""
return hash(args)
class Comparable:
"""A mixin that makes instances of the class comparable.
Requires the subclass to just implement `__le__`.
"""
__metaclass__ = abc.ABCMeta
def __eq__(self, other):
return self <= other <= self
def __ne__(self, other):
return not self == other
@abc.abstractmethod
def __le__(self, other):
pass # pragma: no cover
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return other.__le__(self)
def __gt__(self, other):
return self >= other and self != other
def is_comparable(self, other):
"""Check whether this object is comparable with another one.
Args:
other (:class:`.util.Comparable`): Object to check comparability
with.
Returns:
bool: `True` if the object is comparable with `other` and `False`
otherwise.
"""
return self < other or self == other or self > other
def is_in_class(f):
"""Check if a function is part of a class.
Args:
f (function): Function to check.
Returns:
bool: `True` if `f` is part of a class, else `False`.
"""
parts = f.__qualname__.split(".")
return len(parts) >= 2 and parts[-2] != "<locals>"
def _split_parts(f):
qualified_name = f.__module__ + "." + f.__qualname__
return qualified_name.split(".")
def get_class(f):
"""Assuming that `f` is part of a class, get the fully qualified name of the
class.
Args:
f (function): Method to get class name for.
Returns:
str: Fully qualified name of class.
"""
parts = _split_parts(f)
return ".".join(parts[:-1])
def get_context(f):
"""Get the fully qualified name of the context for `f`.
If `f` is part of a class, then the context corresponds to the scope of the class.
If `f` is not part of a class, then the context corresponds to the scope of the
function.
Args:
f (function): Method to get context for.
Returns:
str: Context.
"""
parts = _split_parts(f)
if is_in_class(f):
# Split off function name and class.
return ".".join(parts[:-2])
else:
# Split off function name only.
return ".".join(parts[:-1])
| 3.21875
| 3
|
listings/syndication/migrations/0001_initial.py
|
wtrevino/django-listings
| 2
|
12781337
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeedType'
db.create_table('syndication_feedtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('template', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('content_type', self.gf('django.db.models.fields.CharField')(default='Content-type: application/xml', unique=True, max_length=100)),
))
db.send_create_signal('syndication', ['FeedType'])
# Adding model 'Feed'
db.create_table('syndication_feed', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feed_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['syndication.FeedType'])),
))
db.send_create_signal('syndication', ['Feed'])
# Adding M2M table for field site on 'Feed'
db.create_table('syndication_feed_site', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('feed', models.ForeignKey(orm['syndication.feed'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('syndication_feed_site', ['feed_id', 'site_id'])
def backwards(self, orm):
# Deleting model 'FeedType'
db.delete_table('syndication_feedtype')
# Deleting model 'Feed'
db.delete_table('syndication_feed')
# Removing M2M table for field site on 'Feed'
db.delete_table('syndication_feed_site')
models = {
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'syndication.feed': {
'Meta': {'object_name': 'Feed'},
'feed_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['syndication.FeedType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'syndication.feedtype': {
'Meta': {'object_name': 'FeedType'},
'content_type': ('django.db.models.fields.CharField', [], {'default': "'Content-type: application/xml'", 'unique': 'True', 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'template': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['syndication']
| 2.1875
| 2
|
0_Strings/operators.py
|
ChristopherDaigle/udemy_python
| 0
|
12781338
|
# Python has three numeric types:
#
# 1. int: -1
# Python 3 has no integer max size
#
# 2. float: -1.0
# Max value is: 1.7976931348623157e+308
# Min value: 2.2250738585072014e-308
# 52 digits of precision
# Python 3 also has a "Decimal" data type which is more precise
#
# 3. complex: i ** 2
a = 12
b = 3
print(a + b) # 15
print(a - b) # 9
print(a * b) # 36
print(a / b) # 4.0
print(a // b) # 4 integer division, rounded down to -\inf
print(a % b) # 0, modulo, remainder after integer division
print()
# Won't work because of being a floating point value
# for i in range(1, a / b):
for i in range(1, a // b):
print(i)
print(a ** 2)
print()
# Follow's order of operations
print(a + b / 3 - 4 * 12)
print(a + (b / 3) - (4 * 12))
print()
# Sequence: ORDERED set of items
# "Hello World" is a sequence of 11 items, each are string characters
# ['computer', 'monitor', 'keyboard', 'mouse', 'mouse pad'] is a sequence of 5 items, each are strings, which are
# also sequences themselves - a list may be a sequence of sequences
computer_parts = ['computer', 'monitor', 'keyboard', 'mouse', 'mouse pad']
print(computer_parts[1]) # 'monitor'
print(computer_parts[1][0]) # 'm'
| 4.34375
| 4
|
bin/SchemaUpgrade/versions/4c7ab7d3b46c_version_0_68_007.py
|
Middlecon/DBImport
| 10
|
12781339
|
"""Version 0.68.007
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2021-10-22 06:59:47.134546
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from sqlalchemy import Enum
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = 'e3<PASSWORD>4da580'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('airflow_tasks', sa.Column('sensor_soft_fail', sa.Integer(), nullable=True, comment='Setting this to 1 will add soft_fail=True on sensor'))
op.execute("ALTER TABLE `airflow_tasks` CHANGE COLUMN `sensor_soft_fail` `sensor_soft_fail` INTEGER NULL COMMENT 'Setting this to 1 will add soft_fail=True on sensor' AFTER `sensor_timeout_minutes`")
op.add_column('airflow_dag_sensors', sa.Column('sensor_soft_fail', sa.Integer(), nullable=True, comment='Setting this to 1 will add soft_fail=True on sensor'))
op.alter_column('airflow_custom_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_etl_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_export_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_import_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=32),
type_=mysql.VARCHAR(length=128),
comment='Time to execute dag',
existing_nullable=False)
def downgrade():
op.drop_column('airflow_tasks', 'sensor_soft_fail')
op.drop_column('airflow_dag_sensors', 'sensor_soft_fail')
op.alter_column('airflow_custom_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_etl_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_export_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
op.alter_column('airflow_import_dags', 'schedule_interval',
existing_type=mysql.VARCHAR(length=128),
type_=mysql.VARCHAR(length=32),
comment='Time to execute dag',
existing_nullable=False)
| 1.828125
| 2
|
tests/server/test_bios_profile.py
|
ecoen66/imcsdk
| 31
|
12781340
|
<filename>tests/server/test_bios_profile.py
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from ..connection.info import custom_setup, custom_teardown
from nose.tools import assert_equal, assert_not_equal
from imcsdk.apis.server.bios import bios_profile_backup_running, \
bios_profile_upload, bios_profile_activate, bios_profile_delete,\
bios_profile_get, bios_profile_generate_json, is_bios_profile_enabled,\
bios_profile_exists
handle = None
REMOTE_SERVER = ''
REMOTE_FILE = ''
USER = ''
PASSWORD = ''
expected_output = {
"name": "simple",
"description": "Simple Profile",
"tokens": {
"TPMAdminCtrl": "Enabled",
"TerminalType": "PC-ANSI"
}
}
def setup_module():
global handle
handle = custom_setup()
def teardown_module():
global handle
custom_teardown(handle)
def test_bios_profile_backup():
bios_profile_backup_running(handle, server_id=1)
assert_not_equal(bios_profile_get(handle, name='cisco_backup_profile'),
None)
def test_bios_profile_upload():
bios_profile_upload(handle, remote_server=REMOTE_SERVER,
remote_file=REMOTE_FILE, protocol='scp',
user=USER, pwd=PASSWORD)
time.sleep(2)
assert_not_equal(bios_profile_get(handle, name='simple'),
None)
def test_bios_profile_activate():
bios_profile_activate(handle, name='simple',
backup_on_activate=True, reboot_on_activate=False)
assert_equal(is_bios_profile_enabled(handle,
name='simple',
server_id=1),
True)
def test_bios_profile_exists():
match, mo = bios_profile_exists(handle, name='simple',
enabled=True)
assert_equal(match, True)
def test_bios_profile_not_exists():
match, mo = bios_profile_exists(handle, name='complex')
assert_equal(match, False)
def test_bios_profile_generate_json():
diff = []
output = bios_profile_generate_json(handle, name='simple')
output_tokens = output.pop('tokens')
expected_tokens = expected_output.pop('tokens')
diff = [key for key in output if key in expected_output and
output[key] != expected_output[key]]
assert_equal(diff, [])
diff = [key for key in output_tokens if
key in expected_tokens and output_tokens[key] != expected_tokens[key]]
assert_equal(diff, [])
def test_bios_profile_delete():
bios_profile_delete(handle, name='simple')
assert_equal(bios_profile_get(handle, name='simple'), None)
| 2.171875
| 2
|
mesh/vis_utils.py
|
melonwan/sphereHand
| 53
|
12781341
|
from __future__ import print_function, division, absolute_import
import pickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle
def visualize_vertices(vertices:np.ndarray, bones:np.ndarray = None):
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(vertices[:-1:5,0], vertices[:-1:5,1], vertices[:-1:5,2], c='b')
print('%f to %f' % (vertices[:,2].min(), vertices[:,2].max()))
if bones is not None:
joints = []
for bone in bones:
joint = np.linalg.inv(bone['offset_matrix'])[0:3, 3]
joints.append(np.expand_dims(joint, axis=0))
joints = np.vstack(joints)
ax.scatter(joints[:,0], joints[:,1], joints[:,2], c='r')
print('%f to %f' % (joints[:,2].min(), joints[:,2].max()))
plt.show()
if __name__ == '__main__':
with open('mesh/model/preprocessed_right_hand.pkl', 'rb') as f:
mesh = pickle.load(f)
visualize_vertices(mesh['vertices'], mesh['bones'])
| 2.40625
| 2
|
bert-baselines/bert_models.py
|
mawdoo3/alue_baselines
| 2
|
12781342
|
<filename>bert-baselines/bert_models.py<gh_stars>1-10
from transformers import BertPreTrainedModel, BertModel
from torch import nn
from torch.nn import BCEWithLogitsLoss
class BertForMultiLabelSequenceClassification(BertPreTrainedModel):
"""
Bert Model transformer with a multi-label sequence classification head on top
(a linear layer with sigmoid activation on top of the pooled output).
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.linear = nn.Linear(config.hidden_size, self.config.num_labels)
self.classifier = nn.Sigmoid()
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
pooled_output = self.linear(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = BCEWithLogitsLoss()
# Apply loss before the `Sigmoid` layer, as `BCEWithLogitsLoss`
# internally applies `Sigmoid` in a more numerically stable fashion.
loss = loss_fct(pooled_output, labels.type_as(pooled_output))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| 2.796875
| 3
|
tests/mock.py
|
asyncee/pycamunda
| 0
|
12781343
|
<gh_stars>0
# -*- coding: utf-8 -*-
import requests
def raise_requests_exception_mock(*args, **kwargs):
raise requests.exceptions.RequestException
def not_ok_response_mock(*args, **kwargs):
class Response:
ok = False
text = 'text'
content = 'content'
def __bool__(self):
return bool(self.ok)
def json(self):
return {
'message': 'an error message',
'count': 1,
'bpmn20Xml': '<my>test</xml>'
}
return Response()
def response_mock(*args, **kwargs):
class Response:
ok = True
text = 'text'
content = 'content'
def __bool__(self):
return bool(self.ok)
def json(self):
return {
'message': 'an error message',
'count': 1,
'bpmn20Xml': '<my>test</xml>'
}
return Response()
def count_response_mock(*args, **kwargs):
class Response:
ok = True
def __bool__(self):
return bool(self.ok)
def json(self):
return {'count': 1}
return Response()
| 2.734375
| 3
|
project_dashboard/projects/migrations/0015_auto_20180524_1531.py
|
KruizerChick/project-dashboard
| 0
|
12781344
|
<gh_stars>0
# Generated by Django 2.0.5 on 2018-05-24 20:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0014_auto_20180524_1438'),
]
operations = [
migrations.AlterField(
model_name='issue',
name='category',
field=models.ManyToManyField(related_name='issues', to='projects.Category', verbose_name='categories'),
),
]
| 1.492188
| 1
|
examples/embed/plot_cca_comparison.py
|
idc9/mvlearn
| 0
|
12781345
|
"""
======================
Comparing CCA Variants
======================
A comparison of Kernel Canonical Correlation Analysis (KCCA) with three
different types of kernel to Deep Canonical Correlation Analysis (DCCA).
Each learns and computes kernels suitable for different situations. The point
of this tutorial is to illustrate, in toy examples, the rough intuition as to
when such methods work well and generate linearly correlated projections.
The simulated latent data has two signal dimensions draw from independent
Gaussians. Two views of data were derived from this.
- View 1: The latent data.
- View 2: A transformation of the latent data.
To each view, two additional independent Gaussian noise dimensions were added.
Each 2x2 grid of subplots in the figure corresponds to a transformation and
either the raw data or a CCA variant. The x-axes are the data from view 1
and the y-axes are the data from view 2. Plotted are the correlations between
the signal dimensions of the raw views and the top two components of each
view after a CCA variant transformation. Linearly correlated plots on the
diagonals of the 2x2 grids indicate that the CCA method was able to
successfully learn the underlying functional relationship between the two
views.
"""
from mvlearn.embed import KCCA, DCCA
from mvlearn.datasets import GaussianMixture
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
# Make Latents
n_samples = 200
centers = [[0, 1], [0, -1]]
covariances = 2*np.array([np.eye(2), np.eye(2)])
gm_train = GaussianMixture(n_samples, centers, covariances)
# Test
gm_test = GaussianMixture(n_samples, centers, covariances)
# Make 2 views
n_noise = 2
transforms = ['linear', 'poly', 'sin']
Xs_train = []
Xs_test = []
for transform in transforms:
gm_train.sample_views(transform=transform, n_noise=n_noise)
gm_test.sample_views(transform=transform, n_noise=n_noise)
Xs_train.append(gm_train.get_Xy()[0])
Xs_test.append(gm_test.get_Xy()[0])
# Plotting parameters
labels = gm_test.latent_[:, 0]
cmap = matplotlib.colors.ListedColormap(
sns.diverging_palette(240, 10, n=len(labels), center='light').as_hex())
cmap = 'coolwarm'
method_labels = \
['Raw Views', 'Linear KCCA', 'Polynomial KCCA', 'Gaussian KCCA', 'DCCA']
transform_labels = \
['Linear Transform', 'Polynomial Transform', 'Sinusoidal Transform']
input_size1, input_size2 = Xs_train[0][0].shape[1], Xs_train[0][1].shape[1]
outdim_size = min(Xs_train[0][0].shape[1], 2)
layer_sizes1 = [256, 256, outdim_size]
layer_sizes2 = [256, 256, outdim_size]
methods = [
KCCA(ktype='linear', reg=0.1, degree=2.0, constant=0.1, n_components=2),
KCCA(ktype='poly', reg=0.1, degree=2.0, constant=0.1, n_components=2),
KCCA(ktype='gaussian', reg=1.0, sigma=2.0, n_components=2),
DCCA(input_size1, input_size2, outdim_size, layer_sizes1, layer_sizes2,
epoch_num=400)
]
fig, axes = plt.subplots(3 * 2, 5 * 2, figsize=(20, 12))
sns.set_context('notebook')
for r, transform in enumerate(transforms):
axs = axes[2 * r:2 * r + 2, :2]
for i, ax in enumerate(axs.flatten()):
dim2 = int(i / 2)
dim1 = i % 2
ax.scatter(
Xs_test[r][0][:, dim1],
Xs_test[r][1][:, dim2],
cmap=cmap,
c=labels,
)
ax.set_xticks([], [])
ax.set_yticks([], [])
if dim1 == 0:
ax.set_ylabel(f"View 2 Dim {dim2+1}")
if dim1 == 0 and dim2 == 0:
ax.text(-0.5, -0.1, transform_labels[r], transform=ax.transAxes,
fontsize=18, rotation=90, verticalalignment='center')
if dim2 == 1 and r == len(transforms)-1:
ax.set_xlabel(f"View 1 Dim {dim1+1}")
if i == 0 and r == 0:
ax.set_title(method_labels[r],
{'position': (1.11, 1), 'fontsize': 18})
for c, method in enumerate(methods):
axs = axes[2*r: 2*r+2, 2*c+2:2*c+4]
Xs = method.fit(Xs_train[r]).transform(Xs_test[r])
for i, ax in enumerate(axs.flatten()):
dim2 = int(i / 2)
dim1 = i % 2
ax.scatter(
Xs[0][:, dim1],
Xs[1][:, dim2],
cmap=cmap,
c=labels,
)
if dim2 == 1 and r == len(transforms)-1:
ax.set_xlabel(f"View 1 Dim {dim1+1}")
if i == 0 and r == 0:
ax.set_title(method_labels[c + 1], {'position': (1.11, 1),
'fontsize': 18})
ax.axis("equal")
ax.set_xticks([], [])
ax.set_yticks([], [])
| 2.828125
| 3
|
pyvizio/api/base.py
|
jezzab/pyvizio
| 72
|
12781346
|
<filename>pyvizio/api/base.py
"""Vizio SmartCast API base commands."""
from abc import abstractmethod
from typing import Any, Dict
class CommandBase(object):
"""Base command to send data to Vizio device."""
def __init__(self, url: str = "") -> None:
"""Initialize base command to send data to Vizio device."""
self._url = url
def __repr__(self) -> str:
return f"{type(self).__name__}({self.__dict__})"
def __eq__(self, other) -> bool:
return self is other or self.__dict__ == other.__dict__
@property
def _method(self) -> str:
"""Get command method."""
return "put"
@property
def url(self) -> str:
"""Get endpoint for command."""
return self._url
@url.setter
def url(self, new_url: str) -> None:
"""Set endpoint for command."""
self._url = new_url
def get_url(self) -> str:
"""Get endpoint for command."""
return self._url
def get_method(self) -> str:
return self._method
@abstractmethod
def process_response(self, json_obj: Dict[str, Any]) -> Any:
"""Always return True when there is no custom process_response method for subclass."""
return True
class InfoCommandBase(CommandBase):
"""Base command to get data from Vizio device."""
def __init__(self, url: str = "") -> None:
"""Initialize base command to get data from Vizio device."""
super(InfoCommandBase, self).__init__(url)
@property
def _method(self) -> str:
"""Get command method."""
return "get"
@property
def url(self) -> str:
"""Get endpoint for command."""
return CommandBase.url.fget(self)
@url.setter
def url(self, new_url: str) -> None:
"""Set endpoint for command."""
CommandBase.url.fset(self, new_url)
def process_response(self, json_obj: Dict[str, Any]) -> Any:
"""Always return None when there is no custom process_response method for subclass."""
return None
| 2.828125
| 3
|
secondstring.py
|
dssheldon/pands-programs-sds
| 0
|
12781347
|
<filename>secondstring.py
# The objective of the program is to take a string from a user and
# return every second character in reverse
# Based on Page 32 of "A Whirlwind Tour of Python" by <NAME>
mystring = input('Please input your desired text: ') # Asks users to input a string
rev_mystring = mystring[::-2] # starts off on the last character of a string and skips 1 character to return every second character (in reverse)
print(rev_mystring) # prints the new string
| 4.3125
| 4
|
Latest/venv/Lib/site-packages/pyface/dock/idock_ui_provider.py
|
adamcvj/SatelliteTracker
| 1
|
12781348
|
<filename>Latest/venv/Lib/site-packages/pyface/dock/idock_ui_provider.py
#-------------------------------------------------------------------------------
#
# Copyright (c) 2006, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: <NAME>
# Date: 06/17/2006
#
#-------------------------------------------------------------------------------
""" Defines the IDockUIProvider interface which objects which support being
dragged and dropped into a DockWindow must implement.
"""
#-------------------------------------------------------------------------------
# 'IDockUIProvider' class:
#-------------------------------------------------------------------------------
class IDockUIProvider ( object ):
#---------------------------------------------------------------------------
# Returns a Traits UI which a DockWindow can imbed:
#---------------------------------------------------------------------------
def get_dockable_ui ( self, parent ):
""" Returns a Traits UI which a DockWindow can imbed.
"""
return self.edit_traits( parent = parent,
kind = 'subpanel',
scrollable = True )
| 1.601563
| 2
|
deep_talk/__init__.py
|
ptarau/DeepTalk
| 1
|
12781349
|
<reponame>ptarau/DeepTalk
#__version__ = '0.1.3'
__all__ = ('dialog_about','txt_quest','pdf_quest', 'pro')
from .qpro import *
| 0.898438
| 1
|
tests/test_sa_integration.py
|
ChowNow/elixir
| 0
|
12781350
|
<gh_stars>0
"""
test integrating Elixir entities with plain SQLAlchemy defined classes
"""
from __future__ import absolute_import
from builtins import object
from sqlalchemy.orm import *
from sqlalchemy import *
from elixir import *
class TestSQLAlchemyToElixir(object):
def setup(self):
metadata.bind = "sqlite://"
def teardown(self):
cleanup_all(True)
def test_simple(self):
class A(Entity):
name = Field(String(60))
# Remember the entity need to be setup before you can refer to it from
# SQLAlchemy.
setup_all(True)
b_table = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60)),
Column('a_id', Integer, ForeignKey(A.id))
)
b_table.create()
class B(object):
pass
mapper(B, b_table, properties={
'a': relation(A)
})
b1 = B()
b1.name = 'b1'
b1.a = A(name='a1')
session.add(b1)
session.commit()
session.expunge_all()
b = session.query(B).one()
assert b.a.name == 'a1'
class TestElixirToSQLAlchemy(object):
def setup(self):
metadata.bind = "sqlite://"
def teardown(self):
cleanup_all(True)
def test_m2o(self):
a_table = Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60)),
)
a_table.create()
class A(object):
pass
mapper(A, a_table)
class B(Entity):
name = Field(String(60))
a = ManyToOne(A)
setup_all(True)
a1 = A()
a1.name = 'a1'
b1 = B(name='b1', a=a1)
session.add(b1)
session.commit()
session.expunge_all()
b = B.query.one()
assert b.a.name == 'a1'
def test_m2o_non_pk_target(self):
a_table = Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60), unique=True)
)
a_table.create()
class A(object):
pass
mapper(A, a_table)
class B(Entity):
name = Field(String(60))
a = ManyToOne(A, target_column=['name'])
# currently fails
# c = ManyToOne('C', target_column=['id', 'name'])
# class C(Entity):
# name = Field(String(60), unique=True)
setup_all(True)
a1 = A()
a1.name = 'a1'
b1 = B(name='b1', a=a1)
session.commit()
session.expunge_all()
b = B.query.one()
assert b.a.name == 'a1'
# def test_m2m(self):
# a_table = Table('a', metadata,
# Column('id', Integer, primary_key=True),
# Column('name', String(60), unique=True)
# )
# a_table.create()
#
# class A(object):
# pass
#
# mapper(A, a_table)
#
# class B(Entity):
# name = Field(String(60))
# many_a = ManyToMany(A)
#
# setup_all(True)
#
# a1 = A()
# a1.name = 'a1'
# b1 = B(name='b1', many_a=[a1])
#
# session.commit()
# session.expunge_all()
#
# b = B.query.one()
#
# assert b.many_a[0].name == 'a1'
| 2.546875
| 3
|