max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
bungee/const.py | wan/bungee | 2 | 12774051 | # General ES Constants
COUNT = 'count'
CREATE = 'create'
DOCS = 'docs'
FIELD = 'field'
FIELDS = 'fields'
HITS = 'hits'
ID = '_id'
INDEX = 'index'
INDEX_NAME = 'index_name'
ITEMS = 'items'
KILOMETERS = 'km'
MAPPING_DYNAMIC = 'dynamic'
MAPPING_MULTI_FIELD = 'multi_field'
MAPPING_NULL_VALUE = 'null_value'
MILES = 'mi'
OK = 'ok'
PROPERTIES = 'properties'
PROPERTY_TYPE = 'type'
SCORE = '_score'
SOURCE = '_source'
TOTAL = 'total'
TTL = '_ttl'
TYPE = '_type'
UID = '_uid'
UNIT = 'unit'
URL = 'url'
URLS = 'urls'
# Matching / Filtering
AND = "and"
BOOL = 'bool'
DOC_TYPE = 'doc_type'
FILTER = 'filter'
FILTERED = 'filtered'
MATCH_ALL = 'match_all'
MUST = 'must'
MUST_NOT = 'must_not'
OR = "or"
QUERY = 'query'
SHOULD = 'should'
SORT = 'sort'
TERMS = 'terms'
TERM = 'term'
# Sorting / Misc.
ASC = 'asc'
DESC = 'desc'
FACET_FILTER = 'facet_filter'
FACETS = 'facets'
FROM = 'from'
OFFSET = 'offset'
ORDER = 'order'
SIZE = 'size'
TO = 'to'
# Runtime Constants
DEFAULT_PAGE_SIZE = 20
| 1.09375 | 1 |
lib/abook.py | ids1024/Utilities | 0 | 12774052 | import os
from configparser import ConfigParser
infile = os.path.expanduser("~/.abook/addressbook")
class AddressBook(object):
def __init__(self, contacts):
self.contacts = contacts
for i in self.contacts:
i["email"] = list(filter(None, i.get("email", '').split(",")))
def __getitem__(self, key):
if isinstance(key, str):
names = [i['name'] for i in self.contacts]
if key in names:
return self.contacts[names.index(key)]
for x, name in enumerate(names):
if key.lower() in name.lower():
return self.contacts[x]
elif isinstance(key, int):
return self.contacts[key]
raise KeyError
def __len__(self):
return len(self.contacts)
def __repr__(self):
return str(self.contacts)
def __contains__(self, item):
for i in self.contacts:
if i['name'] == item or item in i['email']:
return True
return False
def get_abook():
parser = ConfigParser()
parser.read(infile)
contacts = [dict(parser[i]) for i in parser if i.isdigit()]
return AddressBook(contacts)
| 3.25 | 3 |
src/schist/db.py | slyphon/zsh-history-backup | 1 | 12774053 | <gh_stars>1-10
from __future__ import print_function
import logging
import os.path
import re
import sqlite3
from collections import defaultdict
from contextlib import contextmanager
from textwrap import dedent
from .common import _utf8
import arrow
import attr
import six
from attr.validators import instance_of, optional
log = logging.getLogger(__name__)
DEFAULT_DB_PATH = os.path.expanduser("~/.schist.sq3")
@attr.s(frozen=True, slots=True)
class Row(object):
timestamp = attr.ib(
validator=instance_of(arrow.arrow.Arrow),
convert=lambda s: arrow.get(s)
)
command = attr.ib(
validator=instance_of(six.string_types),
convert=_utf8
)
def as_sql_dict(self):
d = attr.asdict(self)
d['timestamp'] = self.timestamp.timestamp
return d
@property
def unix(self):
return self.timestamp.timestamp
def __iter__(self):
return iter(attr.astuple(self))
class AlreadyOpenException(Exception):
pass
class NoConnectionError(Exception):
pass
@attr.s(frozen=True, slots=True)
class HistConfig(object):
table_name = attr.ib(validator=instance_of(six.string_types))
# A function that takes a file pointer to the appropriate history file
# and yields Row objects
history_iter_fn = attr.ib()
# a function that takes a path to a sqlite3 db file and returns
# an sqlite3 connection object
db_conn_factory = attr.ib()
# a function that takes a Row iterator and a File object and outputs
# the rows to that file
output_fn = attr.ib()
histfile = attr.ib(
validator=instance_of(six.string_types))
_conn = attr.ib(default=None)
db_path = attr.ib(
default=DEFAULT_DB_PATH,
validator=instance_of(six.string_types))
@contextmanager
def open(self):
if self._conn is not None:
raise AlreadyOpenException("connection already open")
hc = self._open()
try:
yield hc
finally:
hc._close()
def _close(self):
if self._conn is not None:
# from the sqlite3 docs:
#
# The PRAGMA optimize command will automatically run ANALYZE on individual tables on an
# as-needed basis. The recommended practice is for applications to invoke the PRAGMA optimize
# statement just before closing each database connection.
#
self._conn.execute("PRAGMA optimize")
self._conn.close()
def _open(self):
conn = self.db_conn_factory(self.db_path)
return self.evolve(conn=conn)
@property
def conn(self):
if self._conn is None:
raise NoConnectionError()
else:
return self._conn
def init_db(self):
if not self.table_exists():
self.create_table()
def count(self):
return self.conn.execute(
"select count(*) as c from {table}".format(table=self.table_name)
).fetchone()[0]
def create_table(self):
return self.conn.execute("""\
CREATE TABLE IF NOT EXISTS {table} (
timestamp BIGINT NOT NULL,
command text NOT NULL,
PRIMARY KEY (timestamp, command)
)
""".format(
table=self.table_name,
))
def insert(self):
with self.conn:
cur = self.conn.cursor()
q = u"""\
REPLACE INTO {table} ('timestamp', 'command')
VALUES(:timestamp, :command)
""".format(table=self.table_name)
with self.open_histfile() as fp:
cur.executemany(q, (r.as_sql_dict() for r in self.history_iter_fn(fp)))
def search(self, term, limit=25):
"""do a text search for a command"""
with self.conn:
q = u"""\
SELECT * from {table} where command LIKE :term ORDER BY timestamp DESC LIMIT :limit
""".format(table=self.table_name)
for r in self.conn.execute(q, {'term': term, 'limit': int(limit)}):
yield Row(**r)
def table_exists(self):
xs = self.conn.execute(
"SELECT name from sqlite_master WHERE type='table' and name=:name",
dict(name=self.table_name)
).fetchall()
return len(xs) > 0
@contextmanager
def open_histfile(self):
with open(self.histfile, 'rb') as fp:
yield fp
_ROWS_SQL = "select timestamp, command from {table} order by rowid {limit}"
def rows(self, limit=None):
q = self._ROWS_SQL.format(
table=self.table_name,
limit=' LIMIT %d' % (limit,) if limit is not None else ''
)
for r in self.conn.execute(q):
yield Row(**r)
def cmds_since(self, ts):
q = "select count(*) as c from {table} where timestamp > :ts".format(table=self.table_name)
return self.conn.execute(q, {'ts': ts.timestamp}).fetchone()[0]
def last_cmd(self):
q = "select timestamp as ts from {table} order by rowid DESC limit 1".format(
table=self.table_name)
last_ts = self.conn.execute(q).fetchone()['ts']
return arrow.get(last_ts).to('local')
def evolve(self, **kw):
return attr.evolve(self, **kw)
def restore(self, out_fp):
"""dump the contents of the db to out_fp in the correct format"""
self.output_fn(self.rows(), out_fp)
| 2.5625 | 3 |
third_party/llvm_toolchain/local_config_llvm.bzl | storypku/bazel-galaxy | 0 | 12774054 | load("//third_party:common.bzl", "err_out", "execute")
_LLVM_BINARIES = [
"clang",
"clang-cpp",
"ld.lld",
"llvm-ar",
"llvm-as",
"llvm-nm",
"llvm-objcopy",
"llvm-objdump",
"llvm-profdata",
"llvm-dwp",
"llvm-ranlib",
"llvm-readelf",
"llvm-strip",
"llvm-symbolizer",
]
_LLVM_VERSION_MINIMAL = "10.0.0"
def _label(filename):
return Label("//third_party/llvm_toolchain:{}".format(filename))
def _check_llvm_binaries(repository_ctx, llvm_dir):
for binary in _LLVM_BINARIES:
binary_path = "{}/bin/{}".format(llvm_dir, binary)
if not repository_ctx.path(binary_path).exists:
fail("{} doesn't exist".format(binary_path))
def _retrieve_clang_version(repository_ctx, clang_binary):
script_path = repository_ctx.path(Label("//third_party/llvm_toolchain:find_clang_version.py"))
python_bin = repository_ctx.which("python3")
result = execute(repository_ctx, [python_bin, script_path, clang_binary])
if result.return_code:
fail("Failed to run find_clang_version.py: {}".format(err_out(result)))
llvm_version = result.stdout.strip()
actual_version = [int(m) for m in llvm_version.split(".")]
minimal_version = [int(m) for m in _LLVM_VERSION_MINIMAL.split(".")]
if actual_version < minimal_version:
fail("Minimal llvm version supported is {}, got: {}".format(_LLVM_VERSION_MINIMAL, llvm_version))
return result.stdout.strip()
def _local_config_llvm_impl(repository_ctx):
llvm_dir = repository_ctx.os.environ.get("LLVM_DIR", None)
if not llvm_dir:
fail("LLVM_DIR not set.")
if llvm_dir.endswith("/"):
llvm_dir = llvm_dir[:-1]
_check_llvm_binaries(repository_ctx, llvm_dir)
clang_binary = "{}/bin/clang".format(llvm_dir)
llvm_version = _retrieve_clang_version(repository_ctx, clang_binary)
repository_ctx.symlink(_label("cc_toolchain_config.bzl"), "cc_toolchain_config.bzl")
arch = repository_ctx.execute(["uname", "-m"]).stdout.strip()
repository_ctx.template(
"toolchains.bzl",
_label("toolchains.bzl.tpl"),
{
"%{arch}": arch,
},
)
repository_ctx.template(
"BUILD",
_label("BUILD.tpl"),
{
"%{arch}": arch,
"%{llvm_dir}": llvm_dir,
"%{llvm_version}": llvm_version,
},
)
local_config_llvm = repository_rule(
implementation = _local_config_llvm_impl,
environ = ["LLVM_DIR"],
local = True,
configure = True,
)
| 2.03125 | 2 |
dolfyn/meta/api_dumb.py | aidanbharath/dolfyn | 28 | 12774055 | <reponame>aidanbharath/dolfyn
valid=False
def marray(arr,*args,**kwargs):
return arr
def unitsDict(*args,**kwargs):
return None
def varMeta(*args,**kwargs):
return None
| 1.90625 | 2 |
PDSim/misc/clipper/setup.py | sebdenis/pdsim | 24 | 12774056 |
if __name__=='__main__':
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import sys
sys.argv += ['build_ext','--inplace']
ext = Extension("pyclipper",
sources=["pyclipper.pyx", "clipper.cpp"],
language="c++", # this causes Pyrex/Cython to create C++ source
include_dirs=["./../include"],
)
setup(
ext_modules=[ext],
cmdclass = {'build_ext': build_ext},
)
| 1.648438 | 2 |
src/std/coppertop/std/_stats/core.py | DangerMouseB/coppertop | 0 | 12774057 | # *******************************************************************************
#
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# *******************************************************************************
import math, numpy
from coppertop.pipe import *
from coppertop.std.linalg import tvarray
@coppertop
def cov(A:tvarray) -> tvarray:
return numpy.cov(A).view(tvarray)
@coppertop
def mean(ndOrPy):
# should do full numpy?
return numpy.mean(ndOrPy)
@coppertop
def std(ndOrPy, dof=0):
# should do full numpy? std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=<no value>)
return numpy.std(ndOrPy, dof)
@coppertop
def logisticCDF(x, mu, s):
return 1 / (1 + math.exp(-1 * (x - mu) / s))
@coppertop
def logisticCDFInv(p, mu, s):
return mu + -s * math.log(1 / p - 1)
| 2.1875 | 2 |
Ambience/data/StressDetector.py | Matchstic/automated-ambience | 1 | 12774058 | <filename>Ambience/data/StressDetector.py
import Queue
import time
# Constants
ROLLING_AVERAGE_COUNT = 5
BPM_HIGH_DEVIATION = 40
HRV_HIGH_DEVIATION = 35
class StressDetector():
def __init__(self):
self.previous_stress_levels = Queue.Queue()
self.baseline_bpm = 72 # average bpm
self.baseline_hrv = 60 # average ms
def configure_baselines(self, bpm, hrv):
self.baseline_bpm = bpm
self.baseline_hrv = hrv
def generate_stress_level(self, bpm, hrv, bpmTimestamp, hrvTimestamp):
global ROLLING_AVERAGE_COUNT
#####################################################################
# 1. Find deviation of incoming BPM and HRV from a baseline value for
# the user
#####################################################################
# XXX: A positive deviation equates to more stressed
# BPM is higher when stressed
bpm_deviation = bpm - self.baseline_bpm
# HRV is lower when stressed
hrv_deviation = self.baseline_hrv - hrv
#####################################################################
# 2. Modify the deviations by a confidence score, dependant on the age
# of the incoming data.
#####################################################################
# Get the current UNIX timestamp
timestamp_now = int(time.time())
# The BPM must be within the last 15 seconds to be considered up-to-date.
# An age of 5 minutes ago is treated as too old. y = -x/(300s - 15s) + 4/3
bpm_confidence = 1.0 if timestamp_now - bpmTimestamp <= 15 else (0.0 - float(timestamp_now - bpmTimestamp) / 285.0) + 1.053
if bpm_confidence < 0.0: bpm_confidence = 0.0
# The HRV must be within the last 15 minutes to be considered up-to-date.
# An age of 6 hours is treated as too old. y = -x/(6h in seconds - 15m in seconds) + 1.043
hrv_confidence = 1.0 if timestamp_now - hrvTimestamp <= 900 else (0.0 - float(timestamp_now - hrvTimestamp) / 20700) + 1.043
if hrv_confidence < 0.0: hrv_confidence = 0.0
#####################################################################
# 3. Compute stress levels from each data point with the confidence
# score and a normalised deviation
#####################################################################
normalised_bpm = self._normalise_bpm_deviation(bpm_deviation)
normalised_hrv = self._normalise_hrv_deviation(hrv_deviation)
stress_from_bpm = bpm_confidence * normalised_bpm
stress_from_hrv = hrv_confidence * normalised_hrv
#####################################################################
# 4. Compute an ordered weighted average to fuse the stress levels
#####################################################################
# If HRV is at confidence 1.0, we utilise it over BPM due to its higher
# accuracy for stress detection.
weights = [0.9, 0.1]
stress_level = 0.0
if bpm_confidence > hrv_confidence:
stress_level = weights[0]*stress_from_bpm + weights[1]*stress_from_hrv
else:
stress_level = weights[1]*stress_from_bpm + weights[0]*stress_from_hrv
#####################################################################
# 5. Rolling average to smooth stress levels
#####################################################################
# Pop oldest level if needed
if self.previous_stress_levels.qsize() >= ROLLING_AVERAGE_COUNT:
self.previous_stress_levels.get()
# Add new level
self.previous_stress_levels.put(stress_level)
average_queue_copy = []
while True:
try:
elem = self.previous_stress_levels.get(block=False)
except:
break
else:
average_queue_copy.append(elem)
for elem in average_queue_copy:
self.previous_stress_levels.put(elem)
# Iterate over the queue's contents without removal
averaged_stress_level = 0
for level in average_queue_copy:
averaged_stress_level += level
# Take average
averaged_stress_level /= self.previous_stress_levels.qsize()
return averaged_stress_level
def _normalise_bpm_deviation(self, bpm_deviation):
# normalise between 0 and self.stressed_bpm - self.baseline_bpm
max_value = BPM_HIGH_DEVIATION
normalised = bpm_deviation / float(max_value)
if normalised > 1.0: normalised = 1.0
elif normalised < 0.0: normalised = 0.0
return normalised
def _normalise_hrv_deviation(self, hrv_deviation):
max_value = HRV_HIGH_DEVIATION
normalised = hrv_deviation / float(max_value)
if normalised > 1.0: normalised = 1.0
elif normalised < 0.0: normalised = 0.0
return normalised
| 2.984375 | 3 |
emodis_ndvi_python/pycodes/getndvitodate.py | gina-alaska/emodis-ndvi-python_container | 1 | 12774059 |
import numpy as np
from int_tabulated import *
def GetNDVItoDate(NDVI, Time, Start_End, bpy, DaysPerBand, CurrentBand):
#;
#;jzhu,8/9/2011,This program calculates total ndvi integration (ndvi*day) from start of season to currentband, the currentband is the dayindex of interesting day.
#
FILL=-1.0
ny=1
#;DaysPerBand=365./bpy
NowT=CurrentBand #CurrentBand is the index of NDVI, the index start from 0
NowN=NDVI[NowT]
SeasonLength=NowT-Start_End['SOST'][0]
NDVItoDate=np.zeros(ny)+FILL
if SeasonLength < 0:
SeasonLength = FILL
if SeasonLength > 0 and SeasonLength < bpy: #<2>
#index range
segl=int(np.ceil(Start_End['SOST'][0]))
segh=int(np.floor(NowT )) + 1
XSeg= Time[ segl: segh ] #Xseg[Start_End['SOST'][0]:NowT]
NDVILine= NDVI[ segl : segh ]
#if XSeg[0] != Start_End['SOST'][0]: #<3>
# XSeg = np.concatenate([ np.array( [Start_End['SOST'][0] ] ), XSeg])
# NDVILine = np.concatenate([ np.array([ Start_End['SOSN'][0] ] ), NDVILine])
#<3>
#if XSeg[len(XSeg)-1] != NowT : #<4>
# XSeg = np.concatenate( [XSeg, np.array([NowT]) ] )
# NDVILine= np.concatenate( [NDVILine, np.array([NowN]) ] )
#<4>
BaseLine=XSeg*0+Start_End['SOSN'][0]
# get rid of duplicated point and sort the XSeg
XSeg, index=np.unique(XSeg,return_index=True)
NDVILine=NDVILine[index]
BaseLine=BaseLine[index]
IntNDVI=Int_Tabulated(XSeg, NDVILine)
IntBase=Int_Tabulated(XSeg, BaseLine)
NDVItoDate[0]=(IntNDVI-IntBase)*DaysPerBand
else: #<2>
NDVItoDate[0]=FILL
NDVItoDate={'NDVItoDate':NDVItoDate[0],'NowT':NowT,'NowN':NowN}
return NDVItoDate | 2.859375 | 3 |
tests/unit/test_comments.py | severinbeauvais/business-schemas | 0 | 12774060 | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure comments added onto filings and businesses are valid."""
import copy
from registry_schemas import validate
from registry_schemas.example_data import COMMENT_BUSINESS, COMMENT_FILING
def test_valid_comment_filing():
"""Assert that the schema is performing as expected for filing comments."""
is_valid, errors = validate(COMMENT_FILING, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_comment_business():
"""Assert that the schema is performing as expected for business comments."""
comment = copy.deepcopy(COMMENT_BUSINESS)
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_valid_no_timestamp():
"""Assert that the schema does not require a timestamp."""
# check with timestamp set to null
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['timestamp'] = None
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
# check with timestamp removed entirely
del comment['comment']['timestamp']
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
def test_invalid_filing_and_business_id():
"""Assert that schema fails with both filing and business id set."""
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['businessId'] = 1
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_no_filing_or_business_id():
"""Assert that one of business or filing id is required."""
# check that setting an id to null fails
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['filingId'] = None
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
# check that having neither id in the json at all fails
del comment['comment']['filingId']
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_invalid_null_submitter():
"""Assert that submitter id cannot be null."""
comment = copy.deepcopy(COMMENT_FILING)
comment['comment']['submitterId'] = None
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert not is_valid
def test_valid_no_submitter():
"""Assert that submitter id is not required."""
comment = copy.deepcopy(COMMENT_FILING)
del comment['comment']['submitterId']
is_valid, errors = validate(comment, 'comment')
if errors:
for err in errors:
print(err.message)
print(errors)
assert is_valid
| 2.140625 | 2 |
src/content/models.py | b4isty/django-blog | 0 | 12774061 | <filename>src/content/models.py
from django.db import models
# Create your models here.
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_save
class Blog(models.Model):
author = models.ForeignKey(User)
topic = models.CharField(max_length=500, null=True)
title = models.CharField(max_length=500)
body = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
# def get_absolute_url(self):
# return reverse('detail', kwargs={'pk': self.pk})
class Comment(models.Model):
user = models.ForeignKey(User)
blogs = models.ForeignKey(Blog, blank=True)
comment_text = models.CharField(max_length=500, blank=True, null=True)
def __str__(self):
return self.comment_text
class Vote(models.Model):
user = models.ForeignKey(User)
blog = models.ForeignKey(Blog)
def __str__(self):
return self.user
def post_save_user_receiver(sender, instance, created, *args, **kwargs):
if created:
is_active = True
# if created:
# profile, is_created = Author.objects.get_or_create(user=instance)
# default_user_profile = Author.objects.get_or_create(user__id=1)[0] # user__username=
# # default_user_profile.followers.add(instance)
post_save.connect(post_save_user_receiver, sender=User)
| 2.484375 | 2 |
refinery/bnpy/bnpy-dev/bnpy/init/__init__.py | csa0001/Refinery | 103 | 12774062 | """
The :mod:`init` module gathers initialization procedures for model parameters
"""
import FromScratchGauss, FromScratchMult
import FromScratchBernRel
import FromSaved, FromTruth
__all__ = ['FromScratchGauss', 'FromSaved', 'FromTruth', 'FromScratchMult', 'FromScratchBernRel']
| 1.617188 | 2 |
src/openstackapi/server_meta.py | jiangyt2112/NetworkMonitor | 0 | 12774063 | <gh_stars>0
{
'OS-EXT-STS:task_state': None,
'addresses':
{'int-net':
[
{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:5d:9e:22',
'version': 4,
'addr': '192.168.1.8',
'OS-EXT-IPS:type': 'fixed'
},
{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:5d:9e:22',
'version': 4,
'addr': '192.168.166.23',
'OS-EXT-IPS:type': 'floating'
}
]
},
'OS-EXT-STS:vm_state': 'active',
'OS-EXT-SRV-ATTR:instance_name': 'instance-00000002',
'OS-SRV-USG:launched_at': '2018-10-26T09:36:46.000000',
'id': '61205745-b2bf-4db0-ad50-e7a60bf08bd5',
'security_groups': [{'name': 'defalt'}],
'user_id': 'd2fcc0c45a134de28dba429dbef2c3ba',
'progress': 0,
'OS-EXT-STS:power_state': 1,
'OS-EXT-AZ:availability_zone': 'nova',
'status': 'ACTIVE',
'updated': '2018-10-26T09:36:46Z',
'hostId': '1b6fa73a7ea8e40dc812954fe751d3aa812e6b52489ddb5360f5d36e',
'OS-EXT-SRV-ATTR:host': 'control-node',
'OS-SRV-USG:terminated_at': None,
'OS-EXT-SRV-ATTR:hypervisor_hostname': 'control-node',
'name': 'test',
'created': '2018-10-26T09:36:38Z',
'tenant_id': 'a95424bbdca6410092073d564f1f4012',
}
# ip netns add ns1
# ovs-vsctl add-port br-int tap0 tag=1 -- set Interface tap0 type=internal
# ip a
# ovs-vsctl show
# ip link set tap0 netns ns1
# ip netns exec ns1 ip addr add 192.168.1.3/24 dev tap0
# ip netns exec ns1 ifconfig tap0 promisc up
# ip netns exec ns1 ip a
# ip netns exec ns1 ping 192.168.1.1
# ip netns add ns1
# ip netns show
# ip netns exec ns1 ip a
# ip netns exec ns1 ip tuntap add tap0 mode tap
# ip netns exec ns1 ip a
# ip netns exec ns1 ip aadr add 192.168.1.3/24 dev tap0
# ip netns exec ns1 ip addr add 192.168.1.3/24 dev tap0
# ip netns exec ns1 ip a
# ip netns exec ns1 ip set tap0 up
# ip netns exec ns1 ip link set tap0 up
# ovs-ofctl dump-ports br-int qvo3ef787ad-67
# ovs-vsctl list interface br-ex
| 1.390625 | 1 |
chap09/list0903.py | ytianjin/GitTest | 0 | 12774064 | # 打印输出直角在左下角的等腰三角形和长方形
def put_star(n):
"""连续输出n个*"""
for _ in range(n):
print('*', end='')
print('直角在左下角的等腰三角形')
n = int(input('腰长:'))
for i in range(1, n + 1):
put_star(i)
print()
print('长方形')
h = int(input('宽:'))
w = int(input('长:'))
for i in range(1, h + 1):
put_star(w)
print() | 3.984375 | 4 |
Day01/depthcount.py | squidbot/aoc2021 | 0 | 12774065 | <gh_stars>0
with open('input.txt') as f:
lines = f.readlines()
count = 0
curDepth = 0
for line in lines:
newDepth = int(line)
if curDepth != 0:
if newDepth > curDepth:
count += 1
curDepth = newDepth
print(count)
| 3.390625 | 3 |
kiritan.py | kaz/kiritan-server | 6 | 12774066 | <reponame>kaz/kiritan-server
# coding: UTF-8
import os
import sys
import time
import hashlib
import logging
import threading
import subprocess
from win32con import *
from win32gui import *
from win32process import *
# 共通設定
waitSec = 0.1
windowName = "VOICEROID+ 東北きりたん EX"
# WAV生成(排他)
lock = threading.Lock()
def talk(input):
with lock:
return generate_wav(input)
# 子ウィンドウの検索
def enum_child_windows(window):
result = []
def callback(hwnd, param):
result.append((hwnd, GetClassName(hwnd), GetWindowText(hwnd)))
EnumChildWindows(window, callback, None)
return result
# VOICELOIDを操作してWAV生成
def generate_wav(inputText):
# 空文字列は拒否
inputText = inputText.strip()
if inputText == "":
return None
# 出力先ディレクトリ作成
outdir = "./output/"
try:
os.mkdir(outdir)
except:
pass
# ファイルが存在してたらやめる
outfile = os.path.abspath(outdir + hashlib.md5(inputText.encode("utf-8")).hexdigest() + ".wav")
if os.path.exists(outfile):
return outfile
logging.info("Generating WAV")
while True:
# VOICEROIDプロセスを探す
window = FindWindow(None, windowName) or FindWindow(None, windowName + "*")
if window:
break
# 見つからなかったらVOICEROIDを起動
subprocess.Popen(["C:\Program Files (x86)\AHS\VOICEROID+\KiritanEX\VOICEROID.exe"])
time.sleep(32 * waitSec)
while True:
# ダイアログが出ていたら閉じる
errorDialog = FindWindow(None, "エラー") or FindWindow(None, "注意") or FindWindow(None, "音声ファイルの保存")
if errorDialog:
SendMessage(errorDialog, WM_CLOSE, 0, 0)
time.sleep(waitSec)
else:
break
# 最前列に持ってくる
SetWindowPos(window, HWND_TOPMOST, 0, 0, 0, 0, SWP_SHOWWINDOW | SWP_NOMOVE | SWP_NOSIZE)
# VOICEROID操作(保存ダイアログを出すまで)
def __req_speech():
for hwnd, className, windowText in enum_child_windows(window):
# テキストを入力する
if className.count("RichEdit20W"):
SendMessage(hwnd, WM_SETTEXT, 0, inputText)
if windowText.count("音声保存"):
# 最小化解除
ShowWindow(window, SW_SHOWNORMAL)
# 保存ボタンを押す
SendMessage(hwnd, WM_LBUTTONDOWN, MK_LBUTTON, 0)
SendMessage(hwnd, WM_LBUTTONUP, 0, 0)
# 別スレッドで実行(保存ダイアログを出すとSendMessageがブロックする)
threading.Thread(target=__req_speech).start()
# 保存ダイアログを探す
while True:
dialog = FindWindow(None, "音声ファイルの保存")
if dialog:
break
time.sleep(waitSec)
# 保存ボタンを押す
while FindWindow(None, "音声ファイルの保存"):
for hwnd, className, windowText in enum_child_windows(dialog):
# ファイル名を入力
if className.count("Edit"):
SendMessage(hwnd, WM_SETTEXT, 0, outfile)
# 保存ボタンを押す
if windowText.count("保存"):
SendMessage(hwnd, WM_LBUTTONDOWN, MK_LBUTTON, 0)
SendMessage(hwnd, WM_LBUTTONUP, 0, 0)
time.sleep(waitSec)
# プログレスダイアログが表示されている間は待つ
while FindWindow(None, "音声保存"):
time.sleep(waitSec)
# txtが存在していたら消す
try:
os.remove(outfile.replace("wav", "txt"))
except:
pass
return outfile
| 2.203125 | 2 |
Janus/python-base-unit_09/FIM/dir_tree.py | voodoopeople42/Vproject | 0 | 12774067 | <gh_stars>0
# dir_tree.py
import os
for dirname, dirnames, filenames in os.walk('.'):
# print path to all subdirectories first.
for subdirname in dirnames:
print(os.path.join(dirname, subdirname))
# print path to all filenames.
for filename in filenames:
print(os.path.join(dirname, filename))
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
if '.git' in dirnames:
# don't go into any .git directories.
dirnames.remove('.git')
| 3.125 | 3 |
tests/lib/crypto/test_bip32.py | weex/python-rein | 1 | 12774068 | <gh_stars>1-10
import unittest
from rein.lib import bitcoinecdsa
from rein.lib.crypto import bip32
class Bip32Test(unittest.TestCase):
def test_bip32(self):
mnemonic_list_initial = [u'correct',u'horse',u'battery',u'staple']
key = bip32.mnemonic_to_key(mnemonic_list_initial)
wifkey_master = bip32.get_master_private_key(key)
address_master = bip32.get_master_address(key)
wifkey_delegate = bip32.get_delegate_private_key(key)
address_delegate = bip32.get_delegate_address(key)
self.assertEqual(
bitcoinecdsa.privkey_to_address(wifkey_master),address_master)
self.assertEqual(
bitcoinecdsa.privkey_to_address(wifkey_delegate),address_delegate)
mnemonic = "correct horse battery staple"
mnemonic_list = str(mnemonic).split()
mnemonic_list_unicode = [s.decode('unicode-escape') for s in mnemonic_list]
self.assertEqual(mnemonic_list,mnemonic_list_initial)
key2 = bip32.mnemonic_to_key(mnemonic_list_unicode)
wifkey_master2 = bip32.get_master_private_key(key2)
address_master2 = bip32.get_master_address(key2)
wifkey_delegate2 = bip32.get_delegate_private_key(key2)
address_delegate2 = bip32.get_delegate_address(key2)
self.assertEqual(bitcoinecdsa.privkey_to_address(wifkey_master2),address_master2)
self.assertEqual(bitcoinecdsa.privkey_to_address(wifkey_delegate2),address_delegate2)
key3 = bip32.mnemonic_to_key(mnemonic.decode('unicode-escape'))
wifkey_master3 = bip32.get_master_private_key(key3)
address_master3 = bip32.get_master_address(key3)
wifkey_delegate3 = bip32.get_delegate_private_key(key3)
address_delegate3 = bip32.get_delegate_address(key3)
self.assertEqual(bitcoinecdsa.privkey_to_address(wifkey_master3),address_master3)
self.assertEqual(bitcoinecdsa.privkey_to_address(wifkey_delegate3),address_delegate3)
| 2.40625 | 2 |
conanfile.py | vuo/conan-muparser | 0 | 12774069 | <filename>conanfile.py
from conans import ConanFile, CMake, tools
import os
import platform
class MuParserConan(ConanFile):
name = 'muparser'
source_version = '2.3.2'
package_version = '0'
version = '%s-%s' % (source_version, package_version)
build_requires = (
'llvm/5.0.2-1@vuo/stable',
'macos-sdk/11.0-0@vuo/stable',
)
settings = 'os', 'compiler', 'build_type', 'arch'
url = 'http://muparser.beltoforion.de/'
license = 'http://beltoforion.de/article.php?a=muparser&hl=en&p=licence'
description = 'A library for parsing mathematical expressions'
source_dir = 'muparser-%s' % source_version
build_dir = '_build'
install_dir = '_install'
def requirements(self):
if platform.system() == 'Linux':
self.requires('patchelf/0.10pre-1@vuo/stable')
elif platform.system() != 'Darwin':
raise Exception('Unknown platform "%s"' % platform.system())
def source(self):
tools.get('https://github.com/beltoforion/muparser/archive/v%s.tar.gz' % self.source_version,
sha256='b35fc84e3667d432e3414c8667d5764dfa450ed24a99eeef7ee3f6647d44f301')
self.run('mv %s/License.txt %s/%s.txt' % (self.source_dir, self.source_dir, self.name))
def build(self):
cmake = CMake(self)
cmake.definitions['BUILD_SHARED_LIBS'] = True
cmake.definitions['ENABLE_OPENMP'] = False
cmake.definitions['ENABLE_SAMPLES'] = False
cmake.definitions['CONAN_DISABLE_CHECK_COMPILER'] = True
cmake.definitions['CMAKE_BUILD_TYPE'] = 'Release'
cmake.definitions['CMAKE_C_COMPILER'] = self.deps_cpp_info['llvm'].rootpath + '/bin/clang'
cmake.definitions['CMAKE_C_FLAGS'] = cmake.definitions['CMAKE_CXX_FLAGS'] = '-Oz'
cmake.definitions['CMAKE_INSTALL_PREFIX'] = '%s/%s' % (os.getcwd(), self.install_dir)
if platform.system() == 'Darwin':
cmake.definitions['CMAKE_OSX_ARCHITECTURES'] = 'x86_64;arm64'
cmake.definitions['CMAKE_OSX_DEPLOYMENT_TARGET'] = '10.11'
cmake.definitions['CMAKE_OSX_SYSROOT'] = self.deps_cpp_info['macos-sdk'].rootpath
cmake.definitions['CMAKE_CXX_COMPILER'] = self.deps_cpp_info['llvm'].rootpath + '/bin/clang++'
tools.mkdir(self.build_dir)
with tools.chdir(self.build_dir):
cmake.configure(source_dir='../%s' % self.source_dir,
build_dir='.')
cmake.build()
cmake.install()
with tools.chdir(self.install_dir):
if platform.system() == 'Darwin':
self.run('install_name_tool -id @rpath/libmuparser.dylib lib/libmuparser.dylib')
elif platform.system() == 'Linux':
patchelf = self.deps_cpp_info['patchelf'].rootpath + '/bin/patchelf'
self.run('%s --set-soname libmuparser.so lib/libmuparser.so' % patchelf)
def package(self):
if platform.system() == 'Darwin':
libext = 'dylib'
elif platform.system() == 'Linux':
libext = 'so'
self.copy('*.h', src='%s/include' % self.install_dir, dst='include/muParser')
self.copy('libmuparser.%s' % libext, src='%s/lib' % self.install_dir, dst='lib')
self.copy('%s.txt' % self.name, src=self.source_dir, dst='license')
def package_info(self):
self.cpp_info.libs = ['muparser']
| 2.21875 | 2 |
opps/polls/widgets.py | opps/opps-polls | 1 | 12774070 | <reponame>opps/opps-polls<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from itertools import chain
from django.forms import CheckboxInput
from django.utils.html import format_html, format_html_join
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text, python_2_unicode_compatible
class CheckboxSelectMultiple(forms.widgets.CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
output = [u'<ul>']
# Normalize to strings
str_values = set([force_text(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
label_for = format_html(u' for="{0}_{1}"', id_, i)
else:
label_for = ''
cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = format_html(option_label)
output.append(format_html(u'<li><label{0}>{1} {2}</label></li>',
label_for, rendered_cb, option_label))
output.append(u'</ul>')
return mark_safe('\n'.join(output))
class RadioInput(forms.widgets.RadioInput):
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
if 'id' in self.attrs:
label_for = format_html(u' for="{0}_{1}"', self.attrs['id'], self.index)
else:
label_for = u''
choice_label = format_html(self.choice_label)
return format_html(u'<label{0}>{1} {2}</label>', label_for, self.tag(), choice_label)
@python_2_unicode_compatible
class RadioFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return format_html(u'<ul>\n{0}\n</ul>',
format_html_join('\n', u'<li>{0}</li>',
[(force_text(w),) for w in self]
))
class RadioSelect(forms.widgets.Select):
renderer = RadioFieldRenderer
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RadioSelect, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = ''
str_value = force_text(value) # Normalize to string.
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, str_value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# RadioSelect is represented by multiple <input type="radio"> fields,
# each of which has a distinct ID. The IDs are made distinct by a "_X"
# suffix, where X is the zero-based index of the radio field. Thus,
# the label for a RadioSelect should reference the first one ('_0').
if id_:
id_ += '_0'
return id_
| 2.40625 | 2 |
REST/python-refresher-master/17_default_parameter_values/code.py | Rebell-Leader/bg | 0 | 12774071 | <gh_stars>0
def add(x, y=3):
print(x + y)
add(5) # 8
add(5, 8) # 13
add(y=3) # Error, missing x
# -- Order of default parameters --
# def add(x=5, y): # Not OK, default parameters must go after non-default
# print(x + y)
# -- Usually don't use variables as default value --
default_y = 3
def add(x, y=default_y):
sum = x + y
print(sum)
add(2) # 5
default_y = 4
print(default_y) # 4
add(2) # 5, even though we re-defined default_y
| 3.703125 | 4 |
solutions/exercise3.py | FilippoAleotti/SIMUR | 0 | 12774072 | '''
<NAME>
<EMAIL>
29 November 2019
I PROFESSIONAL MASTER'S PROGRAM, II LEVEL "SIMUR", Imola 2019
Given a list of integer, store the frequency of each value in a dict, where the key is the value.
'''
def are_equals(dict1, dict2):
''' check if two dict are equal.
Both the dicts have str keys and integer values
'''
for k,v in dict1.items():
if k not in dict2.keys():
return False
if dict2[k] != v:
return False
return True
def frequency_extractor(input_list):
output_dict = {}
for element in input_list:
if str(element) not in output_dict.keys():
output_dict[str(element)] = 1
else:
output_dict[str(element)] += 1
return output_dict
frequency_1 = frequency_extractor([0,1,0,2,2,1,2,1,0,0,2,1,1])
frequency_2 = frequency_extractor([1,2,2,2,0,5,3])
assert are_equals(frequency_1, {'0':4,'1':5,'2':4})
assert are_equals(frequency_2, {'0':1,'1':1,'2':3,'3':1,'5':1})
| 3.890625 | 4 |
test/test_add_contact.py | piersto/python_training_44 | 0 | 12774073 | <gh_stars>0
# -*- coding: utf-8 -*-
from model.contact import Contact
def test_add_contact(app):
app.contact.open_add_new_contact_page()
list_of_contacts_old = app.contact.list_of_contacts()
contact = Contact(firstname='Ivan',
middlename='Petrovich', lastname='Kovaliov',
birthday='31', birth_month='December')
app.contact.add_contact(contact)
assert len(list_of_contacts_old) + 1 == app.contact.count()
list_of_contacts_new = app.contact.list_of_contacts()
list_of_contacts_old.append(contact)
assert sorted(list_of_contacts_old, key=Contact.id_or_max) == sorted(list_of_contacts_new, key=Contact.id_or_max)
def test_add_empty_contact(app):
app.contact.open_add_new_contact_page()
app.contact.add_contact(Contact(firstname='', middlename='', lastname=''))
| 2.578125 | 3 |
wasatch/AndorDevice.py | adiravishankara/Wasatch.PY | 0 | 12774074 | import re
import os
import usb
import time
import json
import queue
import struct
import logging
import datetime
from ctypes import *
from typing import TypeVar, Any, Callable
from .SpectrometerSettings import SpectrometerSettings
from .SpectrometerState import SpectrometerState
from .SpectrometerResponse import SpectrometerResponse
from .SpectrometerRequest import SpectrometerRequest
from .SpectrometerResponse import ErrorLevel
from .InterfaceDevice import InterfaceDevice
from .DeviceID import DeviceID
from .Reading import Reading
log = logging.getLogger(__name__)
class AndorDevice(InterfaceDevice):
"""
This is the basic implementation of our interface with Andor cameras
@todo have check_result return a SpectrometerResponse
##########################################################################
This class adopts the external device interface structure
This involves receiving a request through the handle_request function
A request is processed based on the key in the request
The processing function passes the commands to the requested device
Once it receives a response from the connected device it then passes that
back up the chain
Enlighten Request
|
handle_requests
|
------------
/ / | \ \
{ get_laser status, acquire, set_laser_watchdog, etc....}
\ \ | / /
------------
|
{self.driver.some_andor_sdk_call}
############################################################################
"""
SUCCESS = 20002 #!< see load_error_codes()
SHUTTER_SPEED_MS = 50 #!< empirically determined
def __init__(self, device_id, message_queue=None) -> None:
# if passed a string representation of a DeviceID, deserialize it
super().__init__()
if type(device_id) is str:
device_id = DeviceID(label=device_id)
self.device_id = device_id
self.message_queue = message_queue
self.load_error_codes()
self.connected = False
# Receives ENLIGHTEN's 'change settings' commands in the spectrometer
# process. Although a logical queue, has nothing to do with multiprocessing.
self.command_queue = []
self.immediate_mode = False
self.settings = SpectrometerSettings(self.device_id)
self.summed_spectra = None
self.sum_count = 0
self.session_reading_count = 0
self.take_one = False
self.failure_count = 0
self.dll_fail = True
self.toggle_state = True
self.driver = None
self.process_id = os.getpid()
self.last_memory_check = datetime.datetime.now()
self.last_battery_percentage = 0
self.spec_index = 0
self._scan_averaging = 1
self.dark = None
self.boxcar_half_width = 0
# decide appropriate DLL filename for architecture
arch = 64 if 64 == struct.calcsize("P") * 8 else 32
filename = f"atmcd{arch}d.dll"
# Andor libraries may be found in various locations
dll_paths = [ r"C:\Program Files\Andor Driver Pack 2",
r"C:\Program Files\Andor SDK",
r"dist\Andor",
r"dist" ]
# try to find correct DLL in any known location
for path in dll_paths:
pathname = os.path.join(path, filename)
if os.path.exists(pathname):
try:
log.debug(f"attempting to load {pathname}")
self.driver = cdll.LoadLibrary(pathname)
self.dll_fail = False
except Exception as e:
log.error(f"Error loading {pathname}: {e}")
if self.driver is not None:
break
if self.driver is None:
log.error(f"could not find {filename} in search path: {dll_paths}")
# set Andor defaults for important "EEPROM" settings
# (all but has_cooling can be overridden via config file)
# Andor API doesn't have access to detector info
# Note that we use non-iDus cameras, including the Newton
self.settings.eeprom.detector = "iDus"
self.settings.eeprom.wavelength_coeffs = [0,1,0,0]
self.settings.eeprom.has_cooling = True
self.settings.eeprom.startup_integration_time_ms = 10
self.settings.eeprom.startup_temp_degC = -60
self.process_f = self._init_process_funcs()
###############################################################
# Private Methods
###############################################################
def _init_process_funcs(self) -> dict[str, Callable[..., Any]]:
process_f = {}
process_f["connect"] = self.connect
process_f["acquire_data"] = self.acquire_data
process_f["set_shutter_enable"] = self.set_shutter_enable
process_f["set_integration_time_ms"] = self.set_integration_time_ms
process_f["get_serial_number"] = self.get_serial_number
process_f["init_tec_setpoint"] = self.init_tec_setpoint
process_f["set_tec_setpoint"] = self.set_tec_setpoint
process_f["init_detector_area"] = self.init_detector_area
process_f["scans_to_average"] = self.scans_to_average
##################################################################
# What follows is the old init-lambdas that are squashed into process_f
# Long term, the upstream requests should be changed to match the new format
# This is an easy fix for the time being to make things behave
##################################################################
process_f["integration_time_ms"] = lambda x: self.set_integration_time_ms(x) # conversion from millisec to microsec
process_f["fan_enable"] = lambda x: self.set_fan_enable(bool(x))
process_f["shutter_enable"] = lambda x: self.set_shutter_enable(bool(x))
process_f["detector_tec_enable"] = lambda x: self.toggle_tec(bool(x))
process_f["detector_tec_setpoint_degC"] = lambda x: self.set_tec_setpoint(int(round(x)))
return process_f
def _update_wavelength_coeffs(self, coeffs: list[float]) -> None:
self.settings.eeprom.wavelength_coeffs = coeffs
self.config_values['wavelength_coeffs'] = coeffs
f = open(self.config_file, 'w')
json.dump(self.config_values, f)
def set_fan_enable(self, x: bool) -> SpectrometerResponse:
self.check_result(self.driver.SetFanMode(int(x)), f"Andor Fan On {x}")
return SpectrometerResponse()
def _get_default_data_dir(self) -> str:
if os.name == "nt":
return os.path.join(os.path.expanduser("~"), "Documents", "EnlightenSpectra")
return os.path.join(os.environ["HOME"], "EnlightenSpectra")
def _check_config_file(self) -> bool:
self.config_dir = os.path.join(self._get_default_data_dir(), 'config')
self.config_file = os.path.join(self.config_dir, self.serial + '.json')
if not os.path.exists(self.config_dir):
os.makedirs(self.config_dir)
return os.path.isfile(self.config_file)
def _get_spectrum_raw(self) -> list[float]:
log.debug("requesting spectrum");
#################
# read spectrum
#################
#int[] spec = new int[pixels];
spec_arr = c_long * self.pixels
spec_init_vals = [0] * self.pixels
spec = spec_arr(*spec_init_vals)
# ask for spectrum then collect, NOT multithreaded (though we should look into that!), blocks
#spec = new int[pixels]; //defaults to all zeros
self.driver.StartAcquisition();
self.driver.WaitForAcquisition();
success = self.driver.GetAcquiredData(spec, c_ulong(self.pixels));
if (success != self.SUCCESS):
log.debug(f"getting spectra did not succeed. Received code of {success}. Returning")
return
convertedSpec = [x for x in spec]
#if (self.eeprom.featureMask.invertXAxis):
# convertedSpec.reverse()
log.debug(f"getSpectrumRaw: returning {len(spec)} pixels");
return convertedSpec;
def _take_one_averaged_reading(self) -> SpectrometerResponse:
averaging_enabled = (self.settings.state.scans_to_average > 1)
if averaging_enabled and not self.settings.state.free_running_mode:
# collect the entire averaged spectrum at once (added for
# BatchCollection with laser delay)
#
# So: we're NOT in "free-running" mode, so we're basically being
# slaved to parent process and doing exactly what is requested
# "on command." That means we can perform a big, heavy blocking
# scan average all at once, because they requested it.
self.sum_count = 0
loop_count = self.settings.state.scans_to_average
else:
# we're in free-running mode
loop_count = 1
log.debug("take_one_averaged_reading: loop_count = %d", loop_count)
# either take one measurement (normal), or a bunch (blocking averaging)
reading = None
for loop_index in range(0, loop_count):
# start a new reading
# NOTE: reading.timestamp is when reading STARTED, not FINISHED!
reading = Reading(self.device_id)
if self.settings.eeprom.has_cooling and self.toggle_state:
c_temp = c_int()
result = self.driver.GetTemperature(0,c_temp)
if (self.SUCCESS != result):
log.error(f"unable to read tec temp, result was {result}")
else:
log.debug(f"andor read temperature, value of {c_temp.value}")
reading.detector_temperature_degC = c_temp.value
try:
reading.integration_time_ms = self.settings.state.integration_time_ms
reading.laser_power_perc = self.settings.state.laser_power_perc
reading.laser_power_mW = self.settings.state.laser_power_mW
reading.laser_enabled = self.settings.state.laser_enabled
reading.spectrum = self._get_spectrum_raw()
temperature = c_float()
temp_success = self.driver.GetTemperatureF(byref(temperature))
reading.detector_temperature_degC = temperature.value
except usb.USBError:
self.failure_count += 1
log.error(f"Andor Device: encountered USB error in reading for device {self.device}")
if reading.spectrum is None or reading.spectrum == []:
if self.failure_count > 3:
return SpectrometerResponse(data=False,error_msg="exceeded failure for readings")
if not reading.failure:
if averaging_enabled:
if self.sum_count == 0:
self.summed_spectra = [float(i) for i in reading.spectrum]
else:
log.debug("device.take_one_averaged_reading: summing spectra")
for i in range(len(self.summed_spectra)):
self.summed_spectra[i] += reading.spectrum[i]
self.sum_count += 1
log.debug("device.take_one_averaged_reading: summed_spectra : %s ...", self.summed_spectra[0:9])
# count spectra
self.session_reading_count += 1
reading.session_count = self.session_reading_count
reading.sum_count = self.sum_count
# have we completed the averaged reading?
if averaging_enabled:
if self.sum_count >= self.settings.state.scans_to_average:
reading.spectrum = [ x / self.sum_count for x in self.summed_spectra ]
log.debug("device.take_one_averaged_reading: averaged_spectrum : %s ...", reading.spectrum[0:9])
reading.averaged = True
# reset for next average
self.summed_spectra = None
self.sum_count = 0
else:
# if averaging isn't enabled...then a single reading is the
# "averaged" final measurement (check reading.sum_count to confirm)
reading.averaged = True
# were we told to only take one (potentially averaged) measurement?
if self.take_one and reading.averaged:
log.debug("completed take_one")
self.change_setting("cancel_take_one", True)
log.debug("device.take_one_averaged_reading: returning %s", reading)
if reading.spectrum is not None and reading.spectrum != []:
self.failure_count = 0
# reading.dump_area_scan()
return SpectrometerResponse(data=reading)
def _close_ex_shutter(self) -> SpectrometerResponse:
self.check_result(self.driver.SetShutterEx(1, 1, self.SHUTTER_SPEED_MS, self.SHUTTER_SPEED_MS, 2), "SetShutterEx(2)")
self.settings.state.shutter_enabled = False
return SpectrometerResponse(True)
def _open_ex_shutter(self) -> SpectrometerResponse:
self.check_result(self.driver.SetShutterEx(1, 1, self.SHUTTER_SPEED_MS, self.SHUTTER_SPEED_MS, 1), "SetShutterEx(1)")
self.settings.state.shutter_enabled = True
return SpectrometerResponse(True)
###############################################################
# Public Methods
###############################################################
def check_result(self, result, func):
if result != self.SUCCESS:
name = self.get_error_code(result)
msg = f"error calling {func}: {result} ({name})"
log.error(msg)
raise RuntimeError(msg)
log.debug(f"successfully called {func}")
def connect(self) -> SpectrometerResponse:
if self.dll_fail:
return SpectrometerResponse(data=False,error_lvl=ErrorLevel.high,error_msg="couldn't load Andor dll")
cameraHandle = c_int()
self.check_result(self.driver.GetCameraHandle(self.spec_index, byref(cameraHandle)), "GetCameraHandle")
self.check_result(self.driver.SetCurrentCamera(cameraHandle.value), "SetCurrentCamera")
try:
path_to_ini = create_string_buffer(b'\000' * 256)
self.check_result(self.driver.Initialize(path_to_ini), "Initialize")
except:
log.error("Andor.Initialize failed", exc_info=1)
return SpectrometerResponse(data=False, error_lvl=ErrorLevel.high, error_msg="Andor initialization failed")
self.get_serial_number()
self.init_tec_setpoint()
self.init_detector_area()
if not self._check_config_file():
self.config_values = {
'detector_serial_number': self.serial,
'wavelength_coeffs': [0,1,0,0],
'excitation_nm_float': 0,
}
f = open(self.config_file, 'w')
json.dump(self.config_values, f)
else:
self._load_config_values()
self.check_result(self.driver.CoolerON(), "CoolerON")
self.check_result(self.driver.SetAcquisitionMode(1), "SetAcquisitionMode(single_scan)")
self.check_result(self.driver.SetTriggerMode(0), "SetTriggerMode")
self.check_result(self.driver.SetReadMode(0), "SetReadMode(full_vertical_binning)")
self.init_detector_speed()
self.check_result(self.driver.SetShutterEx(1, 1, self.SHUTTER_SPEED_MS, self.SHUTTER_SPEED_MS, 0), "SetShutterEx(fully automatic external with internal always open)")
self.settings.state.shutter_enabled = True
self.set_integration_time_ms(self.settings.eeprom.startup_integration_time_ms)
# success!
log.info("AndorDevice successfully connected")
self.connected = True
self.settings.eeprom.active_pixels_horizontal = self.pixels
self.settings.eeprom.has_cooling = True
return SpectrometerResponse(data=True)
def _load_config_values(self):
f = open(self.config_file,)
self.config_values = dict(json.load(f))
log.debug(f"loaded {self.config_file}: {self.config_values}")
# alternate spellings (deprecated)
if "wp_serial_number" in self.config_values:
self.settings.eeprom.serial_number = self.config_values['wp_serial_number']
if "wp_model" in self.config_values:
self.settings.eeprom.model = self.config_values['wp_model']
# same spelling
for k in [ 'detector',
'model',
'detector',
'serial_number',
'wavelength_coeffs',
'excitation_nm_float',
'startup_temp_degC',
'startup_integration_time_ms' ]:
if k in self.config_values:
setattr(self.settings.eeprom, k, self.config_values[k])
# post-load initialization
if 'startup_temp_degC' in self.config_values:
self.set_tec_setpoint(self.settings.eeprom.startup_temp_degC)
def acquire_data(self) -> SpectrometerResponse:
reading = self._take_one_averaged_reading()
return reading
def set_shutter_enable(self, enable: bool) -> SpectrometerResponse:
if enable:
return self._open_ex_shutter()
else:
return self._close_ex_shutter()
def set_integration_time_ms(self, ms: float) -> SpectrometerResponse:
self.integration_time_ms = ms
log.debug(f"setting integration time to {self.integration_time_ms}ms")
exposure = c_float()
accumulate = c_float()
kinetic = c_float()
sec = ms / 1000.0
self.check_result(self.driver.SetExposureTime(c_float(sec)), f"SetExposureTime({sec})")
self.check_result(self.driver.GetAcquisitionTimings(byref(exposure), byref(accumulate), byref(kinetic)), "GetAcquisitionTimings")
log.debug(f"read integration time of {exposure.value:.3f}sec (expected {ms}ms)")
return SpectrometerResponse(data=True)
def get_serial_number(self) -> SpectrometerResponse:
sn = c_int()
self.check_result(self.driver.GetCameraSerialNumber(byref(sn)), "GetCameraSerialNumber")
self.serial = f"CCD-{sn.value}"
self.settings.eeprom.serial_number = self.serial
log.debug(f"connected to {self.serial}")
return SpectrometerResponse(True)
def init_tec_setpoint(self) -> SpectrometerResponse:
minTemp = c_int()
maxTemp = c_int()
self.check_result(self.driver.GetTemperatureRange(byref(minTemp), byref(maxTemp)), "GetTemperatureRange")
self.settings.eeprom.max_temp_degC = maxTemp.value
self.settings.eeprom.min_temp_degC = minTemp.value
# commenting-out because Andor camera is reporting -120C for a device
# only rated at -60C...leaving hardcoded default for now
#
# self.settings.eeprom.startup_temp_degC = minTemp.value
# however the startup temperature was set (hardcode, JSON, clamped to min)...apply it
self.setpoint_deg_c = self.settings.eeprom.startup_temp_degC
self.check_result(self.driver.SetTemperature(self.setpoint_deg_c), f"SetTemperature({self.setpoint_deg_c})")
log.debug(f"set TEC to {self.setpoint_deg_c}°C (range {self.settings.eeprom.min_temp_degC}, {self.settings.eeprom.max_temp_degC})")
return SpectrometerResponse(True)
def toggle_tec(self, toggle_state):
c_toggle = c_int(toggle_state)
self.toggle_state = c_toggle.value
if toggle_state:
self.check_result(self.driver.CoolerON(), "CoolerON")
else:
self.check_result(self.driver.CoolerOFF(), "CoolerOFF")
return SpectrometerResponse(True)
def set_tec_setpoint(self, set_temp):
if set_temp < self.settings.eeprom.min_temp_degC or set_temp > self.settings.eeprom.max_temp_degC:
log.error(f"requested temp of {set_temp}, but it is outside range ({self.settings.eeprom.min_temp_degC}C, {self.settings.eeprom.max_temp_degC}C)")
return
if not self.toggle_state:
log.error(f"returning because toggle state is {self.toggle_state}")
return
self.setpoint_deg_c = set_temp
# I don't think CoolerON should need to be called, but I'm not seeing temperature changes
# when it is not present here.
self.check_result(self.driver.CoolerON(), "CoolerON")
self.check_result(self.driver.SetTemperature(self.setpoint_deg_c), f"SetTemperature({self.setpoint_deg_c})")
return SpectrometerResponse(True)
def init_detector_area(self) -> SpectrometerResponse:
xPixels = c_int()
yPixels = c_int()
self.check_result(self.driver.GetDetector(byref(xPixels), byref(yPixels)), "GetDetector(x, y)")
log.debug(f"detector {xPixels.value} width x {yPixels.value} height")
self.pixels = xPixels.value
return SpectrometerResponse(True)
def init_detector_speed(self) -> SpectrometerResponse:
# set vertical to recommended
VSnumber = c_int()
speed = c_float()
self.check_result(self.driver.GetFastestRecommendedVSSpeed(byref(VSnumber), byref(speed)), "GetFastestRecommendedVSSpeed")
self.check_result(self.driver.SetVSSpeed(VSnumber.value), f"SetVSSpeed({VSnumber.value})")
# set horizontal to max
nAD = c_int()
sIndex = c_int()
STemp = 0.0
HSnumber = 0
ADnumber = 0
self.check_result(self.driver.GetNumberADChannels(byref(nAD)), "GetNumberADChannels")
for iAD in range(nAD.value):
self.check_result(self.driver.GetNumberHSSpeeds(iAD, 0, byref(sIndex)), f"GetNumberHSSpeeds({iAD})")
for iSpeed in range(sIndex.value):
self.check_result(self.driver.GetHSSpeed(iAD, 0, iSpeed, byref(speed)), f"GetHSSpeed(iAD {iAD}, iSpeed {iSpeed})")
if speed.value > STemp:
STemp = speed.value
HSnumber = iSpeed
ADnumber = iAD
self.check_result(self.driver.SetADChannel(ADnumber), f"SetADChannel({ADnumber})")
self.check_result(self.driver.SetHSSpeed(0, HSnumber), f"SetHSSpeed({HSnumber})")
log.debug(f"set AD channel {ADnumber} with horizontal speed {HSnumber} ({STemp})")
return SpectrometerResponse(True)
def scans_to_average(self, value: int) -> SpectrometerResponse:
self.sum_count = 0
self.settings.state.scans_to_average = int(value)
return SpectrometerResponse(True)
def get_error_code(self, code):
if code in self.error_codes:
return self.error_codes[code]
return "UNKNOWN_ANDOR_ERROR"
## @see ATMCD32D.H
def load_error_codes(self):
self.error_codes = {
20001: "DRV_ERROR_CODES",
20002: "DRV_SUCCESS",
20003: "DRV_VXDNOTINSTALLED",
20004: "DRV_ERROR_SCAN",
20005: "DRV_ERROR_CHECK_SUM",
20006: "DRV_ERROR_FILELOAD",
20007: "DRV_UNKNOWN_FUNCTION",
20008: "DRV_ERROR_VXD_INIT",
20009: "DRV_ERROR_ADDRESS",
20010: "DRV_ERROR_PAGELOCK",
20011: "DRV_ERROR_PAGEUNLOCK",
20012: "DRV_ERROR_BOARDTEST",
20013: "DRV_ERROR_ACK",
20014: "DRV_ERROR_UP_FIFO",
20015: "DRV_ERROR_PATTERN",
20017: "DRV_ACQUISITION_ERRORS",
20018: "DRV_ACQ_BUFFER",
20019: "DRV_ACQ_DOWNFIFO_FULL",
20020: "DRV_PROC_UNKONWN_INSTRUCTION",
20021: "DRV_ILLEGAL_OP_CODE",
20022: "DRV_KINETIC_TIME_NOT_MET",
20023: "DRV_ACCUM_TIME_NOT_MET",
20024: "DRV_NO_NEW_DATA",
20025: "DRV_PCI_DMA_FAIL",
20026: "DRV_SPOOLERROR",
20027: "DRV_SPOOLSETUPERROR",
20028: "DRV_FILESIZELIMITERROR",
20029: "DRV_ERROR_FILESAVE",
20033: "DRV_TEMPERATURE_CODES",
20034: "DRV_TEMPERATURE_OFF",
20035: "DRV_TEMPERATURE_NOT_STABILIZED",
20036: "DRV_TEMPERATURE_STABILIZED",
20037: "DRV_TEMPERATURE_NOT_REACHED",
20038: "DRV_TEMPERATURE_OUT_RANGE",
20039: "DRV_TEMPERATURE_NOT_SUPPORTED",
20040: "DRV_TEMPERATURE_DRIFT",
20033: "DRV_TEMP_CODES",
20034: "DRV_TEMP_OFF",
20035: "DRV_TEMP_NOT_STABILIZED",
20036: "DRV_TEMP_STABILIZED",
20037: "DRV_TEMP_NOT_REACHED",
20038: "DRV_TEMP_OUT_RANGE",
20039: "DRV_TEMP_NOT_SUPPORTED",
20040: "DRV_TEMP_DRIFT",
20049: "DRV_GENERAL_ERRORS",
20050: "DRV_INVALID_AUX",
20051: "DRV_COF_NOTLOADED",
20052: "DRV_FPGAPROG",
20053: "DRV_FLEXERROR",
20054: "DRV_GPIBERROR",
20055: "DRV_EEPROMVERSIONERROR",
20064: "DRV_DATATYPE",
20065: "DRV_DRIVER_ERRORS",
20066: "DRV_P1INVALID",
20067: "DRV_P2INVALID",
20068: "DRV_P3INVALID",
20069: "DRV_P4INVALID",
20070: "DRV_INIERROR",
20071: "DRV_COFERROR",
20072: "DRV_ACQUIRING",
20073: "DRV_IDLE",
20074: "DRV_TEMPCYCLE",
20075: "DRV_NOT_INITIALIZED",
20076: "DRV_P5INVALID",
20077: "DRV_P6INVALID",
20078: "DRV_INVALID_MODE",
20079: "DRV_INVALID_FILTER",
20080: "DRV_I2CERRORS",
20081: "DRV_I2CDEVNOTFOUND",
20082: "DRV_I2CTIMEOUT",
20083: "DRV_P7INVALID",
20084: "DRV_P8INVALID",
20085: "DRV_P9INVALID",
20086: "DRV_P10INVALID",
20087: "DRV_P11INVALID",
20089: "DRV_USBERROR",
20090: "DRV_IOCERROR",
20091: "DRV_VRMVERSIONERROR",
20092: "DRV_GATESTEPERROR",
20093: "DRV_USB_INTERRUPT_ENDPOINT_ERROR",
20094: "DRV_RANDOM_TRACK_ERROR",
20095: "DRV_INVALID_TRIGGER_MODE",
20096: "DRV_LOAD_FIRMWARE_ERROR",
20097: "DRV_DIVIDE_BY_ZERO_ERROR",
20098: "DRV_INVALID_RINGEXPOSURES",
20099: "DRV_BINNING_ERROR",
20100: "DRV_INVALID_AMPLIFIER",
20101: "DRV_INVALID_COUNTCONVERT_MODE",
20102: "DRV_USB_INTERRUPT_ENDPOINT_TIMEOUT",
20990: "DRV_ERROR_NOCAMERA",
20991: "DRV_NOT_SUPPORTED",
20992: "DRV_NOT_AVAILABLE",
20115: "DRV_ERROR_MAP",
20116: "DRV_ERROR_UNMAP",
20117: "DRV_ERROR_MDL",
20118: "DRV_ERROR_UNMDL",
20119: "DRV_ERROR_BUFFSIZE",
20121: "DRV_ERROR_NOHANDLE",
20130: "DRV_GATING_NOT_AVAILABLE",
20131: "DRV_FPGA_VOLTAGE_ERROR",
20150: "DRV_OW_CMD_FAIL",
20151: "DRV_OWMEMORY_BAD_ADDR",
20152: "DRV_OWCMD_NOT_AVAILABLE",
20153: "DRV_OW_NO_SLAVES",
20154: "DRV_OW_NOT_INITIALIZED",
20155: "DRV_OW_ERROR_SLAVE_NUM",
20156: "DRV_MSTIMINGS_ERROR",
20173: "DRV_OA_NULL_ERROR",
20174: "DRV_OA_PARSE_DTD_ERROR",
20175: "DRV_OA_DTD_VALIDATE_ERROR",
20176: "DRV_OA_FILE_ACCESS_ERROR",
20177: "DRV_OA_FILE_DOES_NOT_EXIST",
20178: "DRV_OA_XML_INVALID_OR_NOT_FOUND_ERROR",
20179: "DRV_OA_PRESET_FILE_NOT_LOADED",
20180: "DRV_OA_USER_FILE_NOT_LOADED",
20181: "DRV_OA_PRESET_AND_USER_FILE_NOT_LOADED",
20182: "DRV_OA_INVALID_FILE",
20183: "DRV_OA_FILE_HAS_BEEN_MODIFIED",
20184: "DRV_OA_BUFFER_FULL",
20185: "DRV_OA_INVALID_STRING_LENGTH",
20186: "DRV_OA_INVALID_CHARS_IN_NAME",
20187: "DRV_OA_INVALID_NAMING",
20188: "DRV_OA_GET_CAMERA_ERROR",
20189: "DRV_OA_MODE_ALREADY_EXISTS",
20190: "DRV_OA_STRINGS_NOT_EQUAL",
20191: "DRV_OA_NO_USER_DATA",
20192: "DRV_OA_VALUE_NOT_SUPPORTED",
20193: "DRV_OA_MODE_DOES_NOT_EXIST",
20194: "DRV_OA_CAMERA_NOT_SUPPORTED",
20195: "DRV_OA_FAILED_TO_GET_MODE",
20196: "DRV_OA_CAMERA_NOT_AVAILABLE",
20211: "DRV_PROCESSING_FAILED"
}
| 2.421875 | 2 |
koapy/utils/krx/calendar/AbstractHolidayCalendar.py | fossabot/koapy | 0 | 12774075 | <gh_stars>0
"""
"""
# pylint: disable=pointless-string-statement
"""
BSD 3-Clause License
Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
All rights reserved.
Copyright (c) 2011-2020, Open source contributors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/holiday.py
from pandas import DatetimeIndex, Series, Timestamp
from pandas.tseries.holiday import AbstractHolidayCalendar as PandasAbstractHolidayCalendar
class AbstractHolidayCalendar(PandasAbstractHolidayCalendar):
def concat(self, pre_holidays):
combined = Series(index=DatetimeIndex([]), dtype=object)
for holidays in pre_holidays:
combined = combined.combine_first(holidays)
return combined
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception(
f"Holiday Calendar {self.name} does not have any rules specified"
)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
pre_holidays = [
rule.dates(start, end, return_name=True) for rule in self.rules
]
if pre_holidays:
holidays = self.concat(pre_holidays) # This line is changed from original implementation
else:
holidays = Series(index=DatetimeIndex([]), dtype=object)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
| 1.492188 | 1 |
src/mimic_preproc/extract-scripts/get_cohort_baseline_info.py | aminzadenoori/POPCORN-POMDP | 6 | 12774076 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File to build out a cohort using the ADMISSIONS, ICUSTAYS, and PATIENTS tables in raw mimic data.
@author: josephfutoma
"""
import numpy as np
import pandas as pd
import os
from datetime import datetime
import pickle
from time import time
PATH_TO_REPO = "XXX"
### options for building out cohort.
limit_first_ICU = True # do we limit to first ICU stay if there are several in an admission?
limit_first_admission = False #limit to first admission only?
age_thresh = 18 #min age to be included
LOS_lower_thresh = 12#LOS must be at least this long to be included; exclude very short stays
# load in stuff
mv_icuids_dat = pd.read_csv(PATH_TO_REPO+'query-data/mv_icuids.csv')
#may be many icu stays for one admission
icustays_dat = pd.read_csv(PATH_TO_REPO+'raw-data/ICUSTAYS.csv',
usecols=['SUBJECT_ID','HADM_ID','ICUSTAY_ID','FIRST_CAREUNIT',
'INTIME','OUTTIME'])
#may be many admissions for one patient
admissions_dat = pd.read_csv(PATH_TO_REPO+'raw-data/ADMISSIONS.csv',
usecols=['SUBJECT_ID','HADM_ID','ADMITTIME','DISCHTIME','DEATHTIME','HOSPITAL_EXPIRE_FLAG',
'ADMISSION_TYPE','ADMISSION_LOCATION','DISCHARGE_LOCATION',
'INSURANCE','LANGUAGE','RELIGION','MARITAL_STATUS','ETHNICITY'])
patients_dat = pd.read_csv(PATH_TO_REPO+'raw-data/PATIENTS.csv',
usecols=['SUBJECT_ID','GENDER','DOB','DOD','DOD_HOSP','DOD_SSN'])
### merge these all together
cohort_dat = patients_dat.merge(admissions_dat,'inner',on='SUBJECT_ID')
cohort_dat = cohort_dat.merge(icustays_dat,'inner',on=['SUBJECT_ID','HADM_ID'])
cohort_dat = mv_icuids_dat.merge(cohort_dat,'left',on='ICUSTAY_ID')
# NOTE: not entirely sure what the difference is between these death dates...
# mimic documentation claims HOSPITAL_EXPIRE_FLAG marks in-hospital mortality, not
# sure what concordance among these deaths is...
cohort_dat = cohort_dat.loc[:,['ICUSTAY_ID','HADM_ID','SUBJECT_ID',
'INTIME','OUTTIME','ADMITTIME','DISCHTIME','DOB','DOD','DOD_HOSP',
'DOD_SSN','DEATHTIME','HOSPITAL_EXPIRE_FLAG','GENDER','ADMISSION_TYPE','ADMISSION_LOCATION',
'DISCHARGE_LOCATION','INSURANCE','LANGUAGE','RELIGION','MARITAL_STATUS',
'ETHNICITY','FIRST_CAREUNIT']]
cohort_dat['INTIME'] = pd.to_datetime(cohort_dat['INTIME'])
cohort_dat['OUTTIME'] = pd.to_datetime(cohort_dat['OUTTIME'])
cohort_dat['ADMITTIME'] = pd.to_datetime(cohort_dat['ADMITTIME'])
cohort_dat['DISCHTIME'] = pd.to_datetime(cohort_dat['DISCHTIME'])
cohort_dat['DOB'] = pd.to_datetime(cohort_dat['DOB'])
cohort_dat['DOD'] = pd.to_datetime(cohort_dat['DOD'])
cohort_dat['DOD_HOSP'] = pd.to_datetime(cohort_dat['DOD_HOSP'])
cohort_dat['DOD_SSN'] = pd.to_datetime(cohort_dat['DOD_SSN'])
cohort_dat['DEATHTIME'] = pd.to_datetime(cohort_dat['DEATHTIME'])
cohort_dat['LOS'] = (cohort_dat['OUTTIME']-cohort_dat['INTIME']).dt.total_seconds()/60/60 #in hrs
#get age in years...super janky, i will never figure out datetimes...
cohort_dat['AGE'] = [x.total_seconds()/60/60/24/365.2422 for x in (np.array(cohort_dat['ADMITTIME'].dt.date) - np.array(cohort_dat['DOB'].dt.date))]
cohort_dat = cohort_dat.sort_values(by=["ICUSTAY_ID"])
# should be 23,386 ICU stays so far
#get the ICU with earliest INTIME for each HADM_ID
if limit_first_ICU:
first_icu_stays_dat = cohort_dat.loc[:,['HADM_ID','INTIME']].groupby(['HADM_ID']).min().reset_index()
cohort_dat = cohort_dat.merge(first_icu_stays_dat,'right',on=['HADM_ID','INTIME'])
#if doing this, should be at 21,876 now
if limit_first_admission:
first_adm_dat = cohort_dat.loc[:,['SUBJECT_ID','ADMITTIME']].groupby(['SUBJECT_ID']).min().reset_index()
cohort_dat = cohort_dat.merge(first_adm_dat,'right',on=['SUBJECT_ID','ADMITTIME'])
#if doing this and also did first ICU stay, should be at 17,678 now.
#however, i'm not sure how necessary it is to do this....ask Leo...?
#TODO may be worth doing some EDA here
#FILTER TO ADULTS
cohort_dat = cohort_dat.loc[cohort_dat['AGE']>=age_thresh,:]
#FILTER ON LOS
cohort_dat = cohort_dat.loc[cohort_dat['LOS']>=LOS_lower_thresh,:]
#should be at 21583 ICU stays, if limit to first ICU stay, do not limit to first admission, age>=18, LOS>=12
cohort_dat.to_csv(PATH_TO_REPO+"query-data/cohort.csv",index=False)
| 2.703125 | 3 |
train.py | rodrigoduranna/frogsounds | 0 | 12774077 | <filename>train.py
import librosa
import numpy as np
import time
import glob
import os
import matplotlib.pyplot as plt
comeco = time.time()
print("Extraindo caracteristicas ...")
#extrai as caracteristicas de um arquivo de som
def extract_feature(file_name):
X, sample_rate = librosa.load(file_name) #extrai o numero de amostras do arquivo
stft = np.abs(librosa.stft(X)) #Short time Fourier transform do arquivo de som
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0) #MFCSS do arquivo de som
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0) #chromatograma do arquivo da STFT
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0) #Mel espectograma do araquivo de som
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0) #Contraste espectral da STFT
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0) #Tonnetz do arquivo de som
return mfccs,chroma,mel,contrast,tonnetz
#procura todos os arquviso com a extensao .wav dentro de uma pasta e extrai as caracteristicas de cada arquivo
def parse_audio_files(parent_dir,sub_dirs,file_ext='*.wav'):
features, labels = np.empty((0,193)), np.empty(0)
for label, sub_dir in enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
print("Extraindo caracteristicas de ", fn)
try:
mfccs, chroma, mel, contrast,tonnetz = extract_feature(fn) #pega as caracteristicas de um arquivo de som
ext_features = np.hstack([mfccs,chroma,mel,contrast,tonnetz]) #coloca todas em formato de pilha
features = np.vstack([features,ext_features]) #serializa
labels = np.append(labels, fn.split('/')[2].split('-')[1]) #cria os labels para o arquivo
except:
print("Erro ao processar o arquivo", fn) #caso o arquivo esteja corrompido
pass
return np.array(features), np.array(labels, dtype = np.int)
#One hot encode é uma técnica de preenchimento de matriz onde apenas um elemento de cada linha possui o valor 1 e o restante são 0s
def one_hot_encode(labels):
n_labels = len(labels)
n_unique_labels = len(np.unique(labels))
one_hot_encode = np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels-1] = 1
return one_hot_encode
#pasta onde estão os arquivos a serem categorizados
parent_dir = 'FrogData'
#sub_dirs = ['fold1','fold2','fold3']
#sub diretorios a serem pesquisados
sub_dirs = ['frogs']
print("Lendo arquivos de som ...")
features, labels = parse_audio_files(parent_dir,sub_dirs)
print("Codificando caracteristicas ...")
labels = one_hot_encode(labels)
#Cria os conjuntos de treinamento e teste, a partir dos arquivos lidos. 70% para treinamento, 30% para teste.
train_test_split = np.random.rand(len(features)) < 0.70
train_x = features[train_test_split]
train_y = labels[train_test_split]
test_x = features[~train_test_split]
test_y = labels[~train_test_split]
print("Treinando a rede neural ...")
import tensorflow as tf
from sklearn.metrics import precision_recall_fscore_support
training_epochs = 5000 #numero de iteracoes para o treinamento ...
n_dim = features.shape[1]
n_classes = 10 #numero final de sapos a serem classificados
n_hidden_units_one = 280 #numero de neuronios na 1a camada
n_hidden_units_two = 300 #numero de neuronios na 2a camada
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.01 #taxa de aprendizado da funcao de custo.
#inserindo valores padrao nas variaveis
X = tf.placeholder(tf.float32,[None,n_dim]) # input
Y = tf.placeholder(tf.float32,[None,n_classes]) #output
#Weight , bias da cadamada 1
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
#Weight , bias da cadamada 2
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
#Weight , bias da cadamada de saida
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.global_variables_initializer()
#objeto para salvar o modelo de treinamento em disco apos finalizar o treinamento.
saver = tf.train.Saver()
print("Calculando a funcao de custo ...")
cost_function = tf.reduce_mean(-tf.reduce_sum(Y * tf.log(y_), reduction_indices=[1]))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#vetor com os valores calculados da função de custo
cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
print("Iniciando sessao do TensorFlow ...")
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs): #5000 iteracoes para treinamento...
print("Epoch", epoch)
_,cost = sess.run([optimizer,cost_function],feed_dict={X:train_x,Y:train_y}) #calcula a função de custo de acordo com a entrada e armazenada na saida os valores calculados
cost_history = np.append(cost_history,cost) #guarda o valor calculado para plotar a funcao de custo depois
y_pred = sess.run(tf.argmax(y_,1),feed_dict={X: test_x})
y_true = sess.run(tf.argmax(test_y,1))
save_path = saver.save(sess, "model/graph.ckpt")
print("Model saved in file: %s" % save_path)
fim = time.time()
print(" Plotando a funcao de custo ...")
fig = plt.figure(figsize=(10,8))
plt.plot(cost_history)
plt.ylabel("Custo")
plt.xlabel("Iteracoes")
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
p,r,f,s = precision_recall_fscore_support(y_true, y_pred, average='micro')
print ("Precisao (F-Score):", round(f,3))
print("Tempo de processamento", fim-comeco) | 2.65625 | 3 |
amy/workshops/migrations/0186_extend_Curriculum.py | code-review-doctor/amy | 53 | 12774078 | <filename>amy/workshops/migrations/0186_extend_Curriculum.py<gh_stars>10-100
# Generated by Django 2.1.7 on 2019-07-18 15:37
from django.db import migrations, models
import django.db.models.deletion
def extend_current_Curricula(apps, schema_editor):
"""Update existing Curricula with new fields."""
Curriculum = apps.get_model('workshops', 'Curriculum')
data = {
'swc-other': dict(
carpentry='SWC',
description='Software Carpentry (other)',
other=True,
),
'dc-other': dict(
carpentry='DC',
description='Data Carpentry (other)',
other=True,
),
'lc': dict(
carpentry='LC',
description='Library Carpentry (Intro to Data, Unix Shell, Git, and/or OpenRefine)',
other=False,
),
'dc-geospatial': dict(
carpentry='DC',
description='Data Carpentry (Geospatial)',
other=False,
),
'swc-python': dict(
carpentry='SWC',
description='Software Carpentry (shell, git, Python)',
other=False,
),
'dc-genomics': dict(
carpentry='DC',
description='Data Carpentry (Genomics)',
other=False,
),
'dc-socsci-r': dict(
carpentry='DC',
description='Data Carpentry (Social Sciences with R)',
other=False,
),
'dc-ecology-r': dict(
carpentry='DC',
description='Data Carpentry (Ecology with R)',
other=False,
),
'swc-es-r': dict(
carpentry='SWC',
description='Software Carpentry in Spanish (shell, git, R)',
other=False,
),
'dc-socsci-python': dict(
carpentry='DC',
description='Data Carpentry (Social Sciences with Python)',
other=False,
),
'unknown': dict(
carpentry='',
description="Don't know yet",
other=False,
),
'dc-ecology-python': dict(
carpentry='DC',
description='Data Carpentry (Ecology with Python)',
other=False,
),
'swc-es-python': dict(
carpentry='SWC',
description='Software Carpentry in Spanish (shell, git, Python)',
other=False,
),
'swc-r': dict(
carpentry='SWC',
description='Software Carpentry (shell, git, R)',
other=False,
),
'lc-other': dict(
carpentry='LC',
description='Library Carpentry (other)',
other=True,
),
}
for slug, defaults in data.items():
Curriculum.objects.filter(slug=slug).update(**defaults)
class Migration(migrations.Migration):
dependencies = [
('workshops', '0185_add_InfoSource'),
]
operations = [
migrations.AddField(
model_name='curriculum',
name='carpentry',
field=models.CharField(blank=True, choices=[('SWC', 'Software Carpentry'), ('DC', 'Data Carpentry'), ('LC', 'Library Carpentry'), ('', 'unspecified / irrelevant')], default='', max_length=5, verbose_name='Which Carpentry does this curriculum belong to?'),
),
migrations.AddField(
model_name='curriculum',
name='description',
field=models.TextField(blank=True, default='', help_text='You can enter Markdown. It will be shown as a hover or popup over the curriculum entry on forms.', max_length=300, verbose_name='Curriculum longer description'),
),
migrations.AddField(
model_name='curriculum',
name='other',
field=models.BooleanField(blank=True, default=False, help_text="Mark this curriculum record as '*Other' (eg. 'SWC Other', 'DC Other', or simply 'Other')", verbose_name="Field marked as 'Other'"),
),
migrations.RunPython(extend_current_Curricula),
]
| 2.0625 | 2 |
function/python/brightics/function/statistics/mann_whitney_test.py | parkjh80/studio | 1 | 12774079 | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, \
pandasDF2MD, keyValues2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate
from brightics.common.validation import from_under
import pandas as pd
import numpy as np
from scipy.stats import mannwhitneyu
import itertools
def mann_whitney_test(table, group_by=None, **params):
check_required_parameters(_mann_whitney_test, params, ['table'])
params = get_default_from_parameters_if_required(params, _mann_whitney_test)
if group_by is not None:
return _function_by_group(_mann_whitney_test, table, group_by=group_by, **params)
else:
return _mann_whitney_test(table, **params)
def _mann_whitney_test(table, response_col, factor_col, use_continuity=True):
result = dict()
rb = BrtcReprBuilder()
rb.addMD("""## Mann Whitney test Result""")
groups = dict()
uniq_factor = table[factor_col].unique()
for name in uniq_factor:
groups[name] = np.array(table[response_col])[np.where(table[factor_col] == name)]
for name1, name2 in itertools.combinations(uniq_factor, 2):
stats, pval = mannwhitneyu(groups[name1], groups[name2], use_continuity=use_continuity)
rb.addMD(strip_margin("""
| ## {name1} vs {name2}
|
| ### Statistics U value: {stats}
|
| ### P value: {pval}
""".format(name1=name1, name2=name2, stats=stats, pval=pval)))
name = str(name1) + '_' + str(name2)
result[name] = dict()
result[name]['Statistics'] = stats
result[name]['P value'] = pval
result['_repr_brtc_'] = rb.get()
return {'result': result}
| 1.898438 | 2 |
pages/home_page.py | aelnahas/automation-quandl | 0 | 12774080 | from common.page_object import PageObject, PageNotLoaded
from pages.footer import Footer
from pages.locators import HomePageLocators
from pages.signin_page import SigninPage
from pages.top_bar import TopBarNav
class HomePage(PageObject):
""" Quandl's page object """
def is_loaded(self):
"""A Top Bar Navigation is loaded if the top bar element is visible"""
return self._is_loaded_helper(HomePageLocators.HOME_HERO)
def _check_page(self, page_object, name):
"""A helper method to check if a part of the page has not loaded correctly"""
if not page_object.is_loaded():
raise PageNotLoaded("{} was not loaded on home page".format(name))
@property
def top_bar(self):
"""The top bar navigation page object"""
# check to see if the top bar is loaded, return the page object if it is
top_bar = TopBarNav(self._webdriver)
self._check_page(top_bar, "Top bar navigation")
return top_bar
@property
def footer(self):
"""The footer page object found on the home page"""
# check to see if the footer is visible and return it if so
footer = Footer(self._webdriver)
self._check_page(footer, "Footer")
return footer
def navigate_to_sign_in(self):
self.top_bar.navigate_to_signin()
def sign_in(self, username, password):
"""sign a user in
:param username: username string
:param password: <PASSWORD>
"""
# fill in the form and submit it
self.top_bar.navigate_to_signin()
sign_in = SigninPage(self._webdriver)
sign_in.username = username
sign_in.password = password
sign_in.log_in()
def logout(self):
"""Log out of a user account"""
self.top_bar.logout() | 2.875 | 3 |
python/htcrack/htcrack.py | Ethic41/codes | 2 | 12774081 | <reponame>Ethic41/codes
import argparse
import os
from bs4 import BeautifulSoup as bs
import requests
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--usernames", help="specify a file containing list of usernames", required=True, type=file)
parser.add_argument("-p", "--passwords", help="specify a file containing list of passwords", required=True, type=file)
args = parser.parse_args()
opener(args.usernames, args.passwords)
#opener: u gussed it as the name implies open a list of usernames and passwords
#read them and save them to memory
def opener(user_list, pass_list):
with open(user_list, "r") as f:
usernames = f.readlines() # contains the list of usernames
with open(pass_list, "r") as f:
passwords = f.readlines() #contains the list of passwords
cracker(usernames, passwords)
def cracker(usernames, passwords):
with open("cracked_cred.dmd", "a") as f:
print("Cracking...")
for psswd in passwords:
for user in usernames:
cracked = checker(user, psswd)
if cracked:
f.write(user+"::"+psswd+"\n")
print("found password for "+user+" ==> "+psswd)
usernames.remove(user)
print("Exhausted the list of passwords")
| 3.328125 | 3 |
cddm/_core_nb.py | IJSComplexMatter/cddm | 4 | 12774082 | <reponame>IJSComplexMatter/cddm
"""
Low level numba functions
"""
from __future__ import absolute_import, print_function, division
import numpy as np
import numba as nb
from cddm.conf import C,F, I64, NUMBA_TARGET, NUMBA_FASTMATH, NUMBA_CACHE
from cddm.fft import _fft, _ifft
from cddm.decorators import doc_inherit
#Some useful functions
@nb.vectorize([F(C)], target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _abs2(x):
"""Absolute square of data"""
return x.real*x.real + x.imag*x.imag
@nb.vectorize([F(C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def abs2(x):
"""Absolute square of data"""
return x.real*x.real + x.imag*x.imag
@nb.vectorize([F(F,F),C(C,C)], target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _mean(a,b):
"""Man value"""
return 0.5 * (a+b)
@nb.vectorize([F(F,F),C(C,C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def mean(a,b):
"""Man value"""
return 0.5 * (a+b)
@nb.vectorize([F(F,F),C(C,C)], target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _choose(a,b):
"""Chooses data randomly"""
r = np.random.rand()
if r >= 0.5:
return a
else:
return b
@nb.vectorize([F(F,F),C(C,C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def choose(a,b):
"""Chooses data randomly"""
r = np.random.rand()
if r >= 0.5:
return a
else:
return b
@nb.guvectorize([(F[:],F[:]),(C[:],C[:])],"(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def convolve(a, out):
"""Convolves input array with kernel [0.25,0.5,0.25]"""
n = len(out)
assert n > 2
result = a[0]
for i in range(1,n-1):
out[i-1] = result
result = 0.25*(a[i-1]+2*a[i]+a[i+1])
out[i] = result
out[-1] = a[-1]
# @nb.guvectorize([(F[:],F[:]),(C[:],C[:])],"(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def convolve(a, out):
# """Convolves input array with kernel [0.25,0.5,0.25]"""
# n = len(out)
# assert n > 2
# for i in range(n):
# out[i] = a[i]
@nb.guvectorize([(F[:],F[:],F[:],F[:]),(F[:],F[:],C[:],C[:])],"(n),(m),(m)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def interpolate(x_new, x,y,out):
"""Linear interpolation"""
assert len(x) >= 2
for i in range(len(x_new)):
xi = x_new[i]
for j in range(1,len(x)):
x0 = x[j-1]
x1 = x[j]
if xi <= x1:
#interpolate or extrapolate backward
deltay = y[j] - y[j-1]
deltax = x1 - x0
out[i] = (xi - x0) * deltay/ deltax + y[j-1]
break
#extrapolate forward
if xi > x1:
deltay = y[-1] - y[-2]
deltax = x1 - x0
out[i] = (xi - x0) * deltay/deltax + y[-2]
@nb.guvectorize([(I64[:],I64[:],F[:],F[:]),(I64[:],I64[:],C[:],C[:])],"(n), (m),(m)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _log_interpolate(x_new, x,y, out):
"""Linear interpolation in semilogx space."""
assert len(x) >= 2
for i in range(len(x_new)):
xi = x_new[i]
log = False
if xi > 1:
xi = np.log(xi)
log = True
for j in range(1,len(x)):
x0 = x[j-1]
x1 = x[j]
if x0 >= 1 and log == True:
x0 = np.log(x0)
x1 = np.log(x1)
if x_new[i] <= x[j]:
#interpolate or extrapolate backward
deltay = y[j] - y[j-1]
deltax = x1-x0
out[i] = (xi - x0) * deltay / deltax + y[j-1]
break
#extrapolate forward for data points outside of the domain
if xi > x1:
deltay = y[-1] - y[-2]
deltax = x1 - x0
out[i] = (xi - x0) * deltay/deltax + y[-2]
def log_interpolate(x_new, x,y, out = None):
"""Linear interpolation in semilogx space."""
#wrapped to suprres divide by zero warning numba issue #4793
with np.errstate(divide='ignore'):
return _log_interpolate(x_new, x,y, out)
log_interpolate.__doc__ = _log_interpolate.__doc__
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _median(array, out):
"""Performs median filter."""
n = len(array)
assert n > 2
result_out = array[0]
out[-1] = array[-1]
for i in range(1,n-1):
if array[i] < array[i+1]:
if array[i] < array[i-1]:
result = min(array[i+1],array[i-1])
else:
result = array[i]
else:
if array[i] < array[i-1]:
result = array[i]
else:
result = max(array[i+1],array[i-1])
out[i-1] = result_out
result_out = result
out[i] = result_out
#out[n-1] = result_out
#out[0] = out[1]
def median(array, out = None):
"""Performs median filter of complex or float data."""
array = np.asarray(array)
if np.iscomplexobj(array):
if out is None:
out = np.empty_like(array)
_median(array.real, out.real)
_median(array.imag, out.imag)
return out
else:
return _median(array, out)
@nb.vectorize([F(F,F,F)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _weighted_sum_real(x, y, weight):
return x * weight + (1.- weight) * y
@nb.vectorize([C(C,C,C)], target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _weighted_sum_complex(x, y, weight):
real = x.real * weight.real + (1.- weight.real) * y.real
imag = x.imag * weight.imag + (1.- weight.imag) * y.imag
return real + 1j * imag
def weighted_sum(x, y, weight, out = None):
"""Performs weighted sum of two data sets, given the weight data.
Weight must be normalized between 0 and 1. Performs:
`x * weight + (1.- weight) * y`
"""
if np.iscomplexobj(weight):
return _weighted_sum_complex(x, y, weight, out)
else:
return _weighted_sum_real(x, y, weight, out)
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _median_slow(array, out):
"""Performs median filter. slow implementation... for testing"""
n = len(array)
assert n > 2
for i in range(1,n-1):
median = np.sort(array[i-1:i+2])[1]
out[i] = median
out[0] = array[0]
out[-1] = array[-1]
#out[0] = out[1]
#out[n-1] = out[n-2]
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def decreasing(array, out):
"""Performs decreasing filter. Each next element must be smaller or equal"""
n = len(array)
for i in range(n):
if i == 0:
out[0] = array[0]
else:
if array[i] < out[i-1] or np.isnan(out[i-1]):
out[i] = array[i]
else:
out[i] = out[i-1]
@nb.guvectorize([(F[:],F[:])],"(n)->(n)", target = "cpu", cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def increasing(array,out):
"""Performs increasing filter. Each next element must be greater or equal"""
n = len(array)
for i in range(1,n):
if i == 0:
out[0] = array[0]
else:
if array[i] > out[i-1] or np.isnan(out[i-1]):
out[i] = array[i]
else:
out[i] = out[i-1]
#------------------------------------------------
# low level numba-optimized computation functions
#------------------------------------------------
@nb.jit([(C[:],C[:], F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_add_vec(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = x.real * y.real + x.imag * y.imag
#add
out[j] = out[j] + tmp
@nb.jit([(C[:],C[:], C[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex_add_vec(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = y * np.conj(x)
#add
out[j] = out[j] + tmp
@nb.jit([(C[:],C[:]), (F[:],F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_vec(x, out):
for j in range(x.shape[0]):
out[j] = out[j] + x[j]
@nb.jit([(C[:],C[:],F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_squaresum_vec(x, y, out):
for j in range(x.shape[0]):
xx = x[j]
yy = y[j]
tmp = xx.real * xx.real + xx.imag* xx.imag
tmp = tmp + yy.real * yy.real + yy.imag * yy.imag
out[j] = out[j] + tmp
@nb.jit([(C[:],C[:],F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_stats_vec(x, out1, out2):
for j in range(x.shape[0]):
out1[j] = out1[j] + x[j]
out2[j] = out2[j] + x[j].real * x[j].real + x[j].imag * x[j].imag
@nb.guvectorize([(C[:,:],C[:],F[:])],"(m,n)->(n),(n)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _calc_stats_vec(f,out1, out2):
for i in range(f.shape[0]):
_add_stats_vec(f[i],out1,out2)
@nb.guvectorize([(C[:,:],C[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(n,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_vec(f1,f2,t1,t2,dummy,out):
for i in range(f1.shape[0]):
for j in range(f2.shape[0]):
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
_cross_corr_add_vec(f1[i],f2[j], out[m])
@nb.guvectorize([(C[:,:],C[:,:],I64[:],I64[:],C[:,:],C[:,:])],"(l,k),(n,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex_vec(f1,f2,t1,t2,dummy,out):
for i in range(f1.shape[0]):
for j in range(f2.shape[0]):
m=t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
_cross_corr_complex_add_vec(f1[i],f2[j], out[m])
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:])],"(m),(n),(m),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr(x,y,t1,t2,dummy,out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
#m = abs(j-i)
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
tmp = x[i].real * y[j].real + x[i].imag * y[j].imag
out[m] += tmp
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],C[:],C[:])],"(m),(n),(m),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex(x,y,t1,t2,dummy,out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
m = t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
tmp = np.conj(x[i]) *y[j]
out[m] += tmp
@nb.jit([(C[:],C[:], F[:])], nopython = True)
def _cross_corr_add(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = x.real * y.real + x.imag * y.imag
#add
out[0] = out[0] + tmp
@nb.jit([(C[:],C[:], C[:])], nopython = True)
def _cross_corr_complex_add(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = y * np.conj(x)
#add
out[0] = out[0] + tmp
@nb.guvectorize([(C[:],C[:],F[:],F[:])],"(n),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_regular(x,y,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_corr_add(x[0:n],y[i:], out[i:i+1])
if i > 0:
_cross_corr_add(y[0:n],x[i:], out[i:i+1])
@nb.guvectorize([(C[:],C[:],C[:],C[:])],"(n),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_complex_regular(x,y,dummy,out):
for i in range((out.shape[0]+1)//2):
n = x.shape[0] - i
_cross_corr_complex_add(x[0:n],y[i:], out[i:i+1])
if i > 0:
j = out.shape[0] - i
_cross_corr_complex_add(x[i:], y[0:n], out[j:j+1])
@nb.guvectorize([(C[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_corr_add(x[i:],x[0:n], out[i:i+1])
@nb.guvectorize([(C[:],C[:],C[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_complex_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_corr_complex_add(x[0:n],x[i:], out[i:i+1])
@nb.guvectorize([(C[:,:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_vec(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m= abs(t[j]-t[i])
if m < out.shape[0]:
_cross_corr_add_vec(f[i],f[j], out[m])
#else just skip calculation
@nb.guvectorize([(C[:,:],I64[:],C[:,:],C[:,:])],"(l,k),(l),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_complex_vec(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m= t[j]-t[i]
if m >= 0:
if m < out.shape[0]:
_cross_corr_complex_add_vec(f[i],f[j], out[m])
else:
m = abs(m)
if m < out.shape[0]:
#negative tau, so store complex conjugate
_cross_corr_complex_add_vec(f[j],f[i], out[m])
@nb.guvectorize([(C[:],I64[:],F[:],F[:])],"(l),(l),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m= abs(t[j]-t[i])
if m < out.shape[0]:
tmp = f[i].real * f[j].real + f[i].imag * f[j].imag
out[m] += tmp
@nb.guvectorize([(C[:],I64[:],C[:],C[:])],"(l),(l),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_complex(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m = t[j]-t[i]
if m >= 0:
if m < out.shape[0]:
tmp = np.conj(f[i]) * f[j]
out[m] += tmp
else:
m = abs(m)
if m < out.shape[0]:
tmp = f[i] * np.conj(f[j])
out[m] += tmp
@nb.jit([(C[:],C[:], F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_add_vec(xv,yv, out):
for j in range(xv.shape[0]):
x = xv[j]
y = yv[j]
#calculate cross product
tmp = x-y
d = tmp.real*tmp.real + tmp.imag*tmp.imag
#add
out[j] = out[j] + d
@nb.guvectorize([(C[:,:],C[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(n,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_vec(f1,f2,t1,t2,dummy,out):
for i in range(f1.shape[0]):
for j in range(f2.shape[0]):
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
_cross_diff_add_vec(f2[j],f1[i], out[m])
#else just skip calculation
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:])],"(m),(n),(m),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff(x,y,t1,t2,dummy,out):
for i in range(x.shape[0]):
for j in range(y.shape[0]):
#m = abs(j-i)
m=abs(t2[j]-t1[i])
if m < out.shape[0]:
tmp = y[j]-x[i]
d = tmp.real*tmp.real + tmp.imag*tmp.imag
out[m] += d
@nb.jit([(C[:],C[:], F[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_add(xv,yv, out):
for j in range(xv.shape[0]):
tmp = xv[j] - yv[j]
d = tmp.real*tmp.real + tmp.imag*tmp.imag
out[0] += d
@nb.guvectorize([(C[:],C[:],F[:],F[:])],"(n),(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_diff_regular(x,y,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_diff_add(y[i:],x[0:n], out[i:i+1])
if i > 0:
_cross_diff_add(y[0:n],x[i:], out[i:i+1])
@nb.guvectorize([(C[:,:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_diff_vec(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i,f.shape[0]):
m=abs(t[j]-t[i])
if m < out.shape[0]:
_cross_diff_add_vec(f[j],f[i], out[m])
#else just skip calculation
@nb.guvectorize([(C[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_diff_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
_cross_diff_add(x[i:],x[0:n], out[i:i+1])
@nb.guvectorize([(C[:],I64[:],F[:],F[:])],"(l),(l),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_diff(f,t,dummy,out):
for i in range(f.shape[0]):
for j in range(i, f.shape[0]):
m=abs(t[j]-t[i])
if m < out.shape[0]:
tmp = f[j] - f[i]
d = tmp.real*tmp.real + tmp.imag*tmp.imag
out[m] += d
@nb.guvectorize([(C[:,:],I64[:],I64[:],C[:,:],C[:,:]),(F[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_vec(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = abs(t2[j]-t1[i])
if m < out.shape[0]:
_add_vec(f[i], out[m])
@nb.guvectorize([(C[:,:],I64[:],I64[:],C[:,:],C[:,:]),(F[:,:],I64[:],I64[:],F[:,:],F[:,:])],"(l,k),(l),(n),(m,k)->(m,k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_vec(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
_add_vec(f[i], out[m])
@nb.guvectorize([(C[:],I64[:],I64[:],C[:],C[:]),(F[:],I64[:],I64[:],F[:],F[:])],"(l),(l),(n),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = abs(t2[j]-t1[i])
if m < out.shape[0]:
out[m] += f[i]
@nb.guvectorize([(C[:],I64[:],I64[:],C[:],C[:]),(F[:],I64[:],I64[:],F[:],F[:])],"(l),(l),(n),(m)->(m)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex(f,t1,t2,dummy,out):
for i in range(t1.shape[0]):
for j in range(t2.shape[0]):
m = t2[j]-t1[i]
if abs(m) < (out.shape[0]+1)//2:
out[m] += f[i]
@nb.guvectorize([(C[:],C[:],C[:]), (F[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_regular(x,dummy,out):
for i in range(out.shape[0]):
n = x.shape[0] - i
if i == 0:
tmp = out[0]
tmp = tmp*0.
for j in range(x.shape[0]):
tmp = tmp + x[j]
prev = tmp*2
out[0] += tmp
if i > 0:
prev = prev - x[i-1] - x[n]
out[i] += prev
@nb.guvectorize([(C[:],C[:],C[:]), (F[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_regular_inverted(x,dummy,out):
for i in range((out.shape[0]+1)//2):
n = x.shape[0] - i
if i == 0:
tmp = out[0]
tmp = tmp*0.
for j in range(x.shape[0]):
tmp = tmp + x[j]
prev1 = tmp
prev2 = tmp
out[0] += tmp
if i > 0:
prev1 = prev1 - x[i-1]
prev2 = prev2 - x[n]
out[i] += prev1
out[-i] += prev2
@nb.guvectorize([(C[:],C[:],C[:]), (F[:],F[:],F[:])],"(n),(k)->(k)", target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_regular(x,dummy,out):
for i in range((out.shape[0]+1)//2):
n = x.shape[0] - i
if i == 0:
tmp = out[0]
tmp = tmp*0.
for j in range(x.shape[0]):
tmp = tmp + x[j]
prev1 = tmp
prev2 = tmp
out[0] += tmp
if i > 0:
prev1 = prev1 - x[i-1]
prev2 = prev2 - x[n]
out[i] += prev2
out[-i] += prev1
@nb.guvectorize([(C[:],C[:],F[:],F[:]),(C[:],C[:],C[:],C[:])],"(n),(n),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_fft_regular(x, y, dummy, out):
out_length = len(dummy)
if np.iscomplexobj(out):
out_length = (out_length+1)//2
length = len(x)
tmp1 = np.empty((length*2), x.dtype)
tmp1[0:length] = x
tmp1[length:] = 0.
tmp2 = np.empty((length*2), y.dtype)
tmp2[0:length] = y
tmp2[length:] = 0.
x = _fft(tmp1, overwrite_x = True)
y = _fft(tmp2, overwrite_x = True)
x = np.conj(x)*y
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:out_length] += _out[:out_length]
out[-1:-out_length:-1] += _out[-1:-out_length:-1]
else:
out[:] += _out[:out_length].real
out[1:] += _out[-1:-out_length:-1].real
@nb.guvectorize([(C[:],F[:],F[:]),(C[:],C[:],C[:])],"(n),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_fft_regular(x, dummy, out):
out_length = len(dummy)
length = len(x)
tmp = np.empty((length*2), x.dtype)
tmp[0:length] = x
tmp[length:] = 0.
x = _fft(tmp, overwrite_x = True)
x = x*np.conj(x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:] += _out[:out_length]
else:
out[:] += _out[:out_length].real
@nb.jit([(C[:],I64[:],C[:]),(F[:],I64[:],C[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _fill_data(x,t, out):
for i in range(t.shape[0]):
m = t[i]
if m < out.shape[0]:
out[m] = x[i]
@nb.jit([(I64[:],C[:])], nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _fill_ones(t, out):
for i in range(t.shape[0]):
m = t[i]
if m < out.shape[0]:
out[m] = 1.
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],I64[:],F[:],F[:]),(C[:],C[:],I64[:],I64[:],I64[:],C[:],C[:])],"(n),(n),(n),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_corr_fft(x, y, t1,t2,length, dummy, out):
out_length = len(dummy)
if np.iscomplexobj(out):
out_length = (out_length+1)//2
tmp1 = np.zeros((length*2), x.dtype)
_fill_data(x,t1, tmp1)
#tmp1[list(t1)] = x
tmp2 = np.zeros((length*2), y.dtype)
_fill_data(y,t2, tmp2)
#tmp2[list(t2)] = y
x = _fft(tmp1, overwrite_x = True)
y = _fft(tmp2, overwrite_x = True)
np.conj(x, out = x)
np.multiply(x,y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:out_length] += _out[:out_length]
out[-1:-out_length:-1] += _out[-1:-out_length:-1]
else:
out[:] += _out[:out_length].real
out[1:] += _out[-1:-out_length:-1].real
@nb.guvectorize([(C[:],I64[:],I64[:],F[:],F[:]),(C[:],I64[:],I64[:],C[:],C[:])],"(n),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _auto_corr_fft(x, t,length, dummy, out):
out_length = len(dummy)
tmp = np.zeros((length*2), x.dtype)
_fill_data(x,t, tmp)
#tmp[list(t)] = x
x = _fft(tmp, overwrite_x = True)
y = np.conj(x)
np.multiply(x,y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:] += _out[:out_length]
else:
out[:] += _out[:out_length].real
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:]), (C[:],C[:],I64[:],I64[:],C[:],C[:])],"(n),(m),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_fft(x, y,t,length, dummy, out):
out_length = len(dummy)
tmp1 = np.zeros((length*2), x.dtype)
_fill_data(x,t, tmp1)
x = _fft(tmp1, overwrite_x = True)
np.multiply(np.conj(x),y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:] += np.conj(_out[:out_length])
out[1:] += np.conj(_out[-1:-out_length:-1])
else:
out[:] += _out[:out_length].real
out[1:] += _out[-1:-out_length:-1].real
@nb.guvectorize([(C[:],C[:],I64[:],I64[:],F[:],F[:]),(C[:],C[:],I64[:],I64[:],C[:],C[:])],"(n),(m),(n),(),(k)->(k)", forceobj=True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _cross_sum_complex_fft(x, y,t,length, dummy, out):
out_length = len(dummy)
out_length = (out_length+1)//2
tmp1 = np.zeros((length*2), x.dtype)
_fill_data(x,t, tmp1)
x = _fft(tmp1, overwrite_x = True)
np.multiply(np.conj(x),y, out = x)
_out = _ifft(x, overwrite_x = True)
if np.iscomplexobj(out):
out[:out_length] += np.conj(_out[:out_length])
out[-1:-out_length:-1] += np.conj(_out[-1:-out_length:-1])
else:
out[:out_length] += _out[:out_length].real
out[-1:-out_length:-1] += _out[-1:-out_length:-1].real
#-----------------------------
# occurence count functions
@nb.jit(nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_count_cross(t1,t2,n):
for ii in range(t1.shape[0]):
for jj in range(t2.shape[0]):
m = abs(t2[jj] - t1[ii])
if m < len(n):
n[m] += 1
@nb.jit(nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_count_cross_complex(t1,t2,n):
for ii in range(t1.shape[0]):
for jj in range(t2.shape[0]):
m = t2[jj] - t1[ii]
if abs(m) < (len(n)+1)/2:
n[m] += 1
# @nb.jit([(I64[:],I64[:],I64[:,:],I64[:,:])],cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def cross_tau_time(t1,t2,tn1,tn2):
# assert len(t1) == len(t2)
# assert tn1.shape == tn2.shape
# count = np.zeros((tn1.shape[0],),tn1.dtype)
# for ii in range(t1.shape[0]):
# for jj in range(t2.shape[0]):
# m = abs(t1[ii] - t2[jj])
# if m < tn1.shape[0]:
# i = count[m]
# if i < tn1.shape[1]:
# tn1[m,i] = t1[ii]
# tn2[m,i] = t2[jj]
# count[m] +=1
@nb.jit([(I64[:],I64[:],I64[:,:],I64[:,:])],cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def cross_tau_times(t1,t2,tpos,tneg):
assert len(t1) == len(t2)
assert tpos.shape == tpos.shape
count_pos = np.zeros((tpos.shape[0],),tpos.dtype)
count_neg = np.zeros((tneg.shape[0],),tneg.dtype)
for ii in range(t1.shape[0]):
for jj in range(t2.shape[0]):
m = t1[ii] - t2[jj]
if abs(m) < tpos.shape[0]:
if m >= 0:
i = count_pos[m]
if i < tpos.shape[1]:
tpos[m,i] = t1[ii]
count_pos[m] +=1
else:
m = -m
i = count_neg[m]
if i < tneg.shape[1]:
tneg[m,i] = t1[ii]
count_neg[m] +=1
@nb.jit([(I64[:],I64[:,:])],cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def auto_tau_times(t,tpos):
count_pos = np.zeros((tpos.shape[0],),tpos.dtype)
for ii in range(t.shape[0]):
for jj in range(ii,t.shape[0]):
m = t[jj] - t[ii]
assert m >= 0
if m < tpos.shape[0]:
i = count_pos[m]
if i < tpos.shape[1]:
tpos[m,i] = t[ii]
count_pos[m] +=1
def cross_count_mixed(t1,t2, n, period):
pos = np.empty((n,len(t1)//period * 2),int)
pos[...] = -1
neg = np.empty((n,len(t1)//period * 2),int)
neg[...] = -1
cross_tau_times(t1,t2,pos,neg)
count_pos_pos = np.zeros((n, 2*n), int)
count_neg_neg = np.zeros((n, 2*n), int)
count_pos_neg = np.zeros((n, 2*n), int)
for i in range(n):
pmask = pos[i] > 0
_add_count_cross(pos[i,pmask],pos[i,pmask],count_pos_pos[i])
nmask = neg[i] > 0
_add_count_cross(neg[i,nmask],neg[i,nmask],count_neg_neg[i])
_add_count_cross(pos[i,pmask],neg[i,nmask],count_pos_neg[i])
return count_pos_pos,count_neg_neg,count_pos_neg
def auto_count_mixed(t, n, period):
pos = np.empty((n,len(t)//period * 2),int)
pos[...] = -1
auto_tau_times(t,pos)
count = np.zeros((n, 2*n), int)
for i in range(n):
mask = pos[i] > 0
_add_count_cross(pos[i,mask],pos[i,mask],count[i])
return count
@nb.jit(nopython = True, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _add_count_auto(t,n):
for ii in range(t.shape[0]):
for jj in range(ii,t.shape[0]):
m = abs(t[ii] - t[jj])
if m < len(n):
n[m] += 1
#normalization functions
#-----------------------
# complex inf
CINF = C(np.inf + np.inf*1j)
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_baseline_real(data, count, bg1, bg2):
return data/count - (bg1.real * bg2.real + bg1.imag * bg2.imag)
@nb.vectorize([C(C,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_baseline_complex(data, count, bg1, bg2):
if count != 0:
return data/count - bg2 * np.conj(bg1)
else:
return CINF
def normalize_corr_baseline(data, count, bg1, bg2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_baseline_complex(data, count, bg1, bg2, out)
else:
return _normalize_corr_baseline_real(data, count, bg1, bg2, out)
@nb.vectorize([F(F,I64,C,C,F,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_baseline_real(data, count, bg1, bg2, var, sq):
tmp = data - 0.5 * sq
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp + (0.5 * d2) + var
@nb.vectorize([C(C,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_baseline_complex(data, count, bg1, bg2, var, sq):
tmp = data - 0.5 * sq * (1. + 1j)
if count != 0:
tmp = tmp/count
else:
tmp = CINF
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
real = (0.5 * d2) + var
d = (bg1.imag + bg2.real)
d2 = d*d
d = (bg1.real - bg2.imag)
d2 = d2 + d*d
imag = (0.5 * d2) + var
return tmp + real + (1j* imag)
def normalize_struct_baseline(data, count, bg1, bg2, var, sq, out = None):
if np.iscomplexobj(data):
return _normalize_struct_baseline_complex(data, count, bg1, bg2, var, sq, out)
else:
return _normalize_struct_baseline_real(data, count, bg1, bg2, var, sq, out)
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_real(data, count, m1, m2):
tmp = m1.real * m2.real + m1.imag * m2.imag
tmp = tmp/count
tmp = data - tmp
return tmp/count
@nb.vectorize([C(C,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_complex(data, count, m1, m2):
if count != 0:
tmp = m2 * np.conj(m1)
tmp = tmp/count
tmp = data - tmp
return tmp/count
else:
return CINF
def normalize_corr_compensated(data, count, m1, m2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_compensated_complex(data, count, m1, m2, out)
else:
return _normalize_corr_compensated_real(data, count, m1, m2, out)
@nb.vectorize([F(F,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_subtracted_real(data, count, bg1, bg2, m1, m2):
tmp = m1.real * m2.real + m1.imag * m2.imag
tmp = tmp/count
tmp = data - tmp
tmp = tmp/count
tmp += (m1.real/count - bg1.real)*(m2.real/count - bg2.real)
tmp += (m1.imag/count - bg1.imag)*(m2.imag/count - bg2.imag)
return tmp
@nb.vectorize([C(C,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_compensated_subtracted_complex(data, count, bg1, bg2, m1, m2):
if count != 0:
tmp = m2 * np.conj(m1)
tmp = tmp/count
tmp = data - tmp
tmp = tmp/count
tmp += np.conj(m1/count-bg1)*(m2/count-bg2)
return tmp
else:
return CINF
def normalize_corr_compensated_subtracted(data, count, bg1, bg2, m1, m2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_compensated_subtracted_complex(data, count, bg1, bg2, m1, m2, out)
else:
return _normalize_corr_compensated_subtracted_real(data, count, bg1, bg2, m1, m2, out)
@nb.vectorize([F(F,I64,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_compensated_real(data, count, var, sq, m1, m2):
tmp = (m1.real-m2.real)* (m1.real- m2.real)
tmp = tmp + (m1.imag-m2.imag)* (m1.imag- m2.imag)
tmp = 0.5*tmp/count
tmp = data + tmp - 0.5 * sq
return tmp/count + var
@nb.vectorize([C(C,I64,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_compensated_complex(data, count, var, sq, m1, m2):
if count == 0:
return CINF
else:
real = (m1.real-m2.real)* (m1.real- m2.real)
real = real + (m1.imag-m2.imag)* (m1.imag- m2.imag)
real = 0.5*real/count - 0.5 * sq
real = real/count + var
imag = (m1.imag+m2.real)* (m1.imag+ m2.real)
imag = imag + (m1.real-m2.imag)* (m1.real- m2.imag)
imag = 0.5*imag/count - 0.5 * sq
imag = imag/count + var
c = real + 1j* imag
return data/count + c
def normalize_struct_compensated(data, count, var, sq, m1,m2, out = None):
if np.iscomplexobj(data):
return _normalize_struct_compensated_complex(data, count, var, sq, m1,m2, out)
else:
return _normalize_struct_compensated_real(data, count, var, sq, m1,m2, out)
@nb.vectorize([F(F,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_subtracted_real(data, count, bg1, bg2, m1, m2):
tmp = data
tmp = tmp - bg1.real * m2.real - bg1.imag * m2.imag
tmp = tmp - bg2.real * m1.real - bg2.imag * m1.imag
return tmp/count + bg1.real * bg2.real + bg1.imag * bg2.imag
@nb.vectorize([C(C,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_corr_subtracted_complex(data, count, bg1, bg2, m1, m2):
tmp = data
tmp = tmp - np.conj(bg1) * m2 - np.conj(m1) * bg2
if count != 0:
return tmp/count + np.conj(bg1) * bg2
else:
return CINF
def normalize_corr_subtracted(data, count, bg1, bg2, m1,m2, out = None):
if np.iscomplexobj(data):
return _normalize_corr_subtracted_complex(data, count, bg1, bg2, m1,m2, out)
else:
return _normalize_corr_subtracted_real(data, count, bg1, bg2, m1, m2, out)
@nb.vectorize([F(F,I64,C,C,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_subtracted_real(data, count, bg1, bg2, var, sq, m1, m2):
tmp = data - 0.5 * sq
tmp = tmp + (m1.real-m2.real)* (bg1.real- bg2.real)
tmp = tmp + (m1.imag-m2.imag)* (bg1.imag- bg2.imag)
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp - (0.5 * d2) + var
@nb.vectorize([C(C,I64,C,C,F,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_struct_subtracted_complex(data, count, bg1, bg2, var, sq, m1, m2):
if count == 0:
return CINF
else:
tmp = data - 0.5 * sq * (1. + 1j)
real = (m1.real-m2.real)* (bg1.real- bg2.real)
real += (m1.imag-m2.imag)* (bg1.imag- bg2.imag)
imag = (m1.imag + m2.real)* (bg1.imag + bg2.real)
imag += (m1.real-m2.imag)* (bg1.real- bg2.imag)
tmp = tmp + real + imag * 1j
tmp = tmp/count
d = (bg2.real - bg1.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
real = (0.5 * d2) - var
d = (bg1.imag + bg2.real)
d2 = d*d
d = (bg1.real - bg2.imag)
d2 = d2 + d*d
imag = (0.5 * d2) - var
c = real + 1j* imag
return tmp - c
def normalize_struct_subtracted(data, count, bg1, bg2, var, m1,m2, sq, out = None):
if np.iscomplexobj(data):
return _normalize_struct_subtracted_complex(data, count, bg1, bg2, var, m1, m2, sq, out)
else:
return _normalize_struct_subtracted_real(data, count, bg1, bg2, var, m1, m2, sq, out)
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_0(data, count, bg1, bg2):
return data/count - (bg1.real * bg2.real + bg1.imag * bg2.imag)
@nb.vectorize([F(F,I64,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_cdiff_1(data, count, d):
return data/count - (d.real * d.real + d.imag*d.imag)
@nb.vectorize([F(F,I64,C,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_2(data, count, bg1, bg2, m1, m2):
tmp = data
tmp = tmp - bg1.real * m2.real - bg1.imag * m2.imag
tmp = tmp - bg2.real * m1.real - bg2.imag * m1.imag
return tmp/count + bg1.real * bg2.real + bg1.imag * bg2.imag
@nb.vectorize([F(F,I64,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_2b(data, count, m1, m2):
tmp = m1.real * m2.real + m1.imag * m2.imag
tmp = tmp/count
tmp = data -tmp
return tmp/count
@nb.vectorize([F(F,I64,C,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_cdiff_3(data, count, dm, m1, m2):
ds = m2 - m1
tmp = data - 2*(dm.real * ds.real + dm.imag * ds.imag)
return tmp/count + (dm.real * dm.real + dm.imag * dm.imag)
@nb.vectorize([F(F,I64,C,C,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_3(data, count, bg1, bg2, sq, m1, m2):
tmp = data - 0.5 * sq
tmp = tmp + (m1.real-m2.real)* (bg1.real- bg2.real)
tmp = tmp + (m1.imag-m2.imag)* (bg1.imag- bg2.imag)
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp - (0.5 * d2)
@nb.vectorize([F(F,I64,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_3b(data, count, sq, m1, m2):
tmp = (m1.real-m2.real)* (m1.real- m2.real)
tmp = tmp + (m1.imag-m2.imag)* (m1.imag- m2.imag)
tmp = 0.5*tmp/count
tmp = data + tmp - 0.5 * sq
return tmp/count
@nb.vectorize([F(F,I64,C,C,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def _normalize_ccorr_1(data, count, bg1, bg2, sq):
tmp = data - 0.5 * sq
tmp = tmp/count
d = (bg1.real - bg2.real)
d2 = d*d
d = (bg1.imag - bg2.imag)
d2 = d2 + d*d
return tmp + (0.5 * d2)
#because of numba bug, this does not work for np.nan inputs
# @nb.jit([F(F,F)], cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def _weight_from_g(g,delta):
# tmp1 = 2*g
# tmp2 = g**2 + 1 + 2*delta**2
# return tmp1/tmp2
@nb.vectorize([F(F,F),C(C,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def weight_from_g(g, delta):
"""Computes weight for weighted normalization from normalized and scaled
correlation function"""
tmp1 = 2*g
g2 = g.real**2 + g.imag**2
tmp2 = g2 + 1 + delta**2
return tmp1/tmp2
@nb.vectorize([F(C,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def weight_prime_from_g(g,delta, b1, b2):
"""Computes weight for weighted normalization from normalized and scaled
correlation function"""
# s1 = |b1|^2
s1 = b1.real * b1.real + b1.imag * b1.imag
# s2 = |b2|^2
s2 = b2.real * b2.real + b2.imag * b2.imag
# r = Re(conj(b2)*b1)
r = b1.real * b2.real + b1.imag * b2.imag
#i = Im(conj(b2)*b1)
i = b2.real * b1.imag - b2.imag * b1.real
d2 = delta**2
g2 = g.real**2 + g.imag**2
tmp1 = 2 * g.real + 2 * r + (s1 + s2) * g.real
tmp2 = g2 + 1 + d2 + s1 + s2 + (s2 - s1) * delta + 2 * r * g.real + 2 * i * g.imag
return tmp1/tmp2
def weight_prime_from_d(d, delta, b1, b2):
g = 1 - d/2.
return weight_prime_from_g(g,delta, b1, b2)
def weight_from_d(d, delta):
g = 1 - d/2.
return weight_from_g(g, delta)
@nb.vectorize([F(F,C,F)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def sigma_weighted(w,g,delta):
"""Computes standard deviation of the weighted normalization."""
g2 = g.real**2
c2= g2 + g.imag**2
d2 = delta**2
return (0.5 * (w**2 * (c2 + 1 + d2) - 4 * w * g.real + 2*g2 - c2 + 1 - d2))**0.5
@nb.vectorize([F(F,C,F,C,C)],target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
def sigma_prime_weighted(w,g,delta, b1, b2):
g2 = g.real**2
c2 = g2 + g.imag**2
d2 = delta**2
# s1 = |b1|^2
s1 = b1.real * b1.real + b1.imag * b1.imag
# s2 = |b2|^2
s2 = b2.real * b2.real + b2.imag * b2.imag
# r = Re(conj(b2)*b1)
r = b1.real * b2.real + b1.imag * b2.imag
#i = Im(conj(b2)*b1)
i = b2.real * b1.imag - b2.imag * b1.real
return (0.5 * (w**2 * (c2 + 1 + d2 + s1 + s2 + (s2 - s1) * delta + 2 * r * g.real + 2 * i * g.imag) \
- 4 * w * (g.real + r + 0.5 * (s1 + s2) * g.real ) \
+ 2*g2 - c2 + 1 - d2 + s1 + s2 - (s2 - s1) * delta + 2 * r * g.real - 2 * i * g.imag))**0.5
@nb.jit
def _g(a,index):
index = abs(index)
if index > len(a):
return 0.
else:
return a[index]
# @nb.guvectorize([(F[:],F[:],F[:],F[:],I64[:,:],I64[:,:],I64[:,:],F[:])],"(),(n),(),(),(n,m),(n,m),(n,m)->(n)",target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def sigma_weighted_cross_general(weight,g,noise, delta, pp,pm,mm, out):
# w = weight[0]
# d2 = delta[0]**2
# b2 = noise[0]**2
# for i in range(len(g)):
# g2 = g[i]**2
# out[i] = (0.5 * (w**2 * (g2 + 1 + b2 + 2 * d2) - 4 * w * g[i] + g2 + 1 - d2))
# #correction terms, skipping p = 0 because it was computed above.
# for p in range(1,pp.shape[1]):
# tmp = (pp[i, p] + mm[i, p])*(_g(g, p)**2 + _g(g, p + i) * _g(g, p-i))
# tmp += pm[i, p] *(_g(g, p + i)**2 + _g(g, p - i)**2 + _g(g, p)*_g(g, p + 2 * i) + _g(g, p)*_g(g, p - 2 * i))
# tmp -= 2*w * (pp[i,p] + mm[i,p])* (_g(g,p+i)*_g(g,p) + _g(g,p-i)*_g(g,p))
# tmp -= 2*w * pm[i,p]* (_g(g,p+i)*_g(g,p) + _g(g,p-i)*_g(g,p) + _g(g,p+i)*_g(g,p+2*i) + _g(g,p-i)*_g(g,p-2*i) )
# tmp += w**2 * (pp[i,p] + mm[i,p])* (_g(g,p)**2 + _g(g,p+i)**2 + _g(g,p-i)**2)
# tmp += w**2 * pm[i,p] * (_g(g,p)**2 +_g(g,p-i)**2 + _g(g,p+i)**2 + 0.5* _g(g,p+2*i)**2+ 0.5* _g(g,p-2*i)**2 )
# out[i] = out[i] + 0.5 * tmp / (pp[i,0] + mm[i,0])
# out[i] = out[i] ** 0.5
# @nb.guvectorize([(F[:],F[:],F[:],I64[:,:],F[:])],"(n),(n),(n),(n,m)->(n)",target = NUMBA_TARGET, cache = NUMBA_CACHE, fastmath = NUMBA_FASTMATH)
# def sigma_weighted_auto_general(weight,g,noise, pp, out):
# for i in range(len(g)):
# w = weight[i]
# b2 = noise[i]**2
# g2 = g[i]**2
# out[i] = 0.
# out[i] = (0.5 * (w**2 * (g2 + 1 + b2) - 4 * w * g[i] + g2 + 1))
# #correction terms, skipping p = 0 because it was computed above.
# for p in range(1,pp.shape[1]):
# tmp = pp[i, p] * (_g(g, p)**2 + _g(g, p + i) * _g(g, p-i))
# tmp -= 2*w * pp[i,p] * (_g(g,p+i)*_g(g,p) + _g(g,p-i)*_g(g,p))
# tmp += w**2 * pp[i,p] * (_g(g,p)**2 + 0.5*_g(g,p+i)**2 + 0.5*_g(g,p-i)**2)
# out[i] = out[i] + 0.5 * tmp / pp[i,0]
# out[i] = out[i] ** 0.5
| 2.609375 | 3 |
Codes/Python32/Lib/test/test_timeout.py | eyantra/FireBird_Swiss_Knife | 319 | 12774083 | """Unit tests for socket timeout feature."""
import unittest
from test import support
# This requires the 'network' resource as given on the regrtest command line.
skip_expected = not support.is_resource_enabled('network')
import time
import errno
import socket
class CreationTestCase(unittest.TestCase):
"""Test case for socket.gettimeout() and socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
# Test Socket creation
self.assertEqual(self.sock.gettimeout(), None,
"timeout not disabled by default")
def testFloatReturnValue(self):
# Test return value of gettimeout()
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(None)
self.assertEqual(self.sock.gettimeout(), None)
def testReturnType(self):
# Test return type of gettimeout()
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
# Test type checking by settimeout()
self.sock.settimeout(0)
self.sock.settimeout(0)
self.sock.settimeout(0.0)
self.sock.settimeout(None)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
# Test range checking by settimeout()
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
# Test settimeout() followed by setblocking()
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
def testBlockingThenTimeout(self):
# Test setblocking() followed by settimeout()
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
class TimeoutTestCase(unittest.TestCase):
# There are a number of tests here trying to make sure that an operation
# doesn't take too much longer than expected. But competing machine
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
# solution.
fuzz = 2.0
localhost = '127.0.0.1'
def setUp(self):
raise NotImplementedError()
tearDown = setUp
def _sock_operation(self, count, timeout, method, *args):
"""
Test the specified socket method.
The method is run at most `count` times and must raise a socket.timeout
within `timeout` + self.fuzz seconds.
"""
self.sock.settimeout(timeout)
method = getattr(self.sock, method)
for i in range(count):
t1 = time.time()
try:
method(*args)
except socket.timeout as e:
delta = time.time() - t1
break
else:
self.fail('socket.timeout was not raised')
# These checks should account for timing unprecision
self.assertLess(delta, timeout + self.fuzz)
self.assertGreater(delta, timeout - 1.0)
class TCPTimeoutTestCase(TimeoutTestCase):
"""TCP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = ('www.python.org.', 80)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# Choose a private address that is unlikely to exist to prevent
# failures due to the connect succeeding before the timeout.
# Use a dotted IP address to avoid including the DNS lookup time
# with the connect time. This avoids failing the assertion that
# the timeout occurred fast enough.
addr = ('10.0.0.0', 12345)
with support.transient_internet(addr[0]):
self._sock_operation(1, 0.001, 'connect', addr)
def testRecvTimeout(self):
# Test recv() timeout
with support.transient_internet(self.addr_remote[0]):
self.sock.connect(self.addr_remote)
self._sock_operation(1, 1.5, 'recv', 1024)
def testAcceptTimeout(self):
# Test accept() timeout
support.bind_port(self.sock, self.localhost)
self.sock.listen(5)
self._sock_operation(1, 1.5, 'accept')
def testSend(self):
# Test send() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'send', b"X" * 200000)
def testSendto(self):
# Test sendto() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# The address argument is ignored since we already connected.
self._sock_operation(100, 1.5, 'sendto', b"X" * 200000,
serv.getsockname())
def testSendall(self):
# Test sendall() timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as serv:
support.bind_port(serv, self.localhost)
serv.listen(5)
self.sock.connect(serv.getsockname())
# Send a lot of data in order to bypass buffering in the TCP stack.
self._sock_operation(100, 1.5, 'sendall', b"X" * 200000)
class UDPTimeoutTestCase(TimeoutTestCase):
"""UDP test case for socket.socket() timeout functions"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def tearDown(self):
self.sock.close()
def testRecvfromTimeout(self):
# Test recvfrom() timeout
# Prevent "Address already in use" socket exceptions
support.bind_port(self.sock, self.localhost)
self._sock_operation(1, 1.5, 'recvfrom', 1024)
def test_main():
support.requires('network')
support.run_unittest(
CreationTestCase,
TCPTimeoutTestCase,
UDPTimeoutTestCase,
)
if __name__ == "__main__":
test_main()
| 2.8125 | 3 |
objective_turk/create_hit.py | nmalkin/objective-turk | 0 | 12774084 | import logging
from objective_turk import objective_turk
logger = logging.getLogger(__name__)
EXTERNAL_URL_QUESTION = """<?xml version="1.0"?>
<ExternalQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd">
<ExternalURL>{}</ExternalURL>
<FrameHeight>600</FrameHeight>
</ExternalQuestion>
"""
HTML_QUESTION = """<?xml version="1.0"?>
<HTMLQuestion xmlns="http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2011-11-11/HTMLQuestion.xsd">
<HTMLContent><![CDATA[
{}
]]></HTMLContent>
<FrameHeight>0</FrameHeight>
</HTMLQuestion>
"""
def get_external_question(url: str):
"""
Return a Question string for an External URL HIT pointing to the given URL.
"""
return EXTERNAL_URL_QUESTION.format(url)
def get_html_question(html: str):
"""
Return a Question string for an HTMLQuestion with the given content.
"""
return HTML_QUESTION.format(html)
class BuiltinQualificationType:
"""
An Enum for QualificationTypeId constants
https://github.com/nmalkin/mturk-python/blob/master/mturk/mturk.py#L16
"""
P_SUBMITTED = "00000000000000000000"
P_ABANDONED = "00000000000000000070"
P_RETURNED = "000000000000000000E0"
P_APPROVED = "000000000000000000L0"
P_REJECTED = "000000000000000000S0"
N_APPROVED = "00000000000000000040"
LOCALE = "00000000000000000071"
ADULT = "00000000000000000060"
S_MASTERS = "2ARFPLSP75KLA8M8DH1HTEQVJT3SY6"
MASTERS = "2F1QJWKUDD8XADTFD2Q0G6UTO95ALH"
S_CATMASTERS = "2F1KVCNHMVHV8E9PBUB2A4J79LU20F"
CATMASTERS = "2NDP2L92HECWY8NS8H3CK0CP5L9GHO"
S_PHOTOMASTERS = "2TGBB6BFMFFOM08IBMAFGGESC1UWJX"
PHOTOMASTERS = "21VZU98JHSTLZ5BPP4A9NOBJEK3DPG"
MINIMUM_PERCENTAGE_APPROVED = 95
def get_qualifications(exclude: str = None, include: str = None):
qualifications = [
{
'QualificationTypeId': BuiltinQualificationType.LOCALE,
'Comparator': 'EqualTo',
'LocaleValues': [{'Country': 'US'}],
'RequiredToPreview': True,
},
{
'QualificationTypeId': BuiltinQualificationType.P_APPROVED,
'Comparator': 'GreaterThan',
'IntegerValues': [MINIMUM_PERCENTAGE_APPROVED],
'RequiredToPreview': True,
},
]
if exclude is not None:
for qualification_id in exclude:
logging.debug('excluding workers with qualification %s', qualification_id)
qualifications.append(
{
'QualificationTypeId': qualification_id,
'Comparator': 'DoesNotExist',
'RequiredToPreview': True,
}
)
if include is not None:
for qualification_id in include:
logging.debug(
'allowing only workers with qualification %s', qualification_id
)
qualifications.append(
{
'QualificationTypeId': qualification_id,
'Comparator': 'Exists',
'RequiredToPreview': True,
}
)
return qualifications
def create_hit_with_hit_type(**kwargs):
"""
Create HIT using provided HITTypeId.
You still need to pass 'LifetimeInSeconds', 'MaxAssignments', 'Question'.
Full list of valid parameters:
HITTypeId, MaxAssignments, LifetimeInSeconds, Question, RequesterAnnotation, UniqueRequestToken, AssignmentReviewPolicy, HITReviewPolicy, HITLayoutId, HITLayoutParameters
Other fields will be ignored:
Title, Description, Reward, and Keywords
"""
if 'HITTypeId' not in kwargs:
raise ValueError('missing required argument HITTypeId')
elif 'Question' not in kwargs:
raise ValueError('missing required argument Question')
elif 'MaxAssignments' not in kwargs:
raise ValueError('missing required argument MaxAssignments')
hit_type = kwargs['HITTypeId']
logger.info(
'creating HIT using HITTypeId %s. Title, Description, Reward, and Keywords from calling script will be ignored.',
hit_type,
)
response = objective_turk.client().create_hit_with_hit_type(**kwargs)
logger.debug(response)
#pylint: disable=protected-access
return objective_turk.Hit._new_from_response(response['HIT'])
def create_hit(**kwargs):
"""
Create a HIT with the given arguments.
For arguments, see:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/mturk.html#MTurk.Client.create_hit
"""
response = objective_turk.client().create_hit(**kwargs)
logger.debug(response)
#pylint: disable=protected-access
return objective_turk.Hit._new_from_response(response['HIT'])
| 2.953125 | 3 |
public/route/panel/redirect.py | astask/request | 0 | 12774085 | from src.templating import Request, url_path, redirect, form, render_template
lang = {
"ru": {
"title": "Редирект",
"route": {
"panel": "Панель управления",
"redirect": "Редирект",
},
"redirect_index": "Редирект на главную",
},
}
async def response(request: Request) -> render_template:
if "redirect" in form():
return redirect(url=url_path("index"))
return await render_template("route/panel/redirect.html", context={
"lc": lang[request.lang],
})
| 2.375 | 2 |
cx_Freeze/samples/advanced/advanced_1.py | lexa/cx_Freeze | 358 | 12774086 | <filename>cx_Freeze/samples/advanced/advanced_1.py
#!/usr/bin/env python
print("Hello from cx_Freeze Advanced #1\n")
module = __import__("testfreeze_1")
| 1.445313 | 1 |
cea/plots/colors.py | architecture-building-systems/cea-toolbox | 121 | 12774087 | """
This is the official list of CEA colors to use in plots
"""
import os
import pandas as pd
import yaml
import warnings
import functools
from typing import List, Callable
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
COLORS_TO_RGB = {"red": "rgb(240,75,91)",
"red_light": "rgb(246,148,143)",
"red_lighter": "rgb(252,217,210)",
"blue": "rgb(63,192,194)",
"blue_light": "rgb(171,221,222)",
"blue_lighter": "rgb(225,242,242)",
"yellow": "rgb(255,209,29)",
"yellow_light": "rgb(255,225,133)",
"yellow_lighter": "rgb(255,243,211)",
"brown": "rgb(174,148,72)",
"brown_light": "rgb(201,183,135)",
"brown_lighter": "rgb(233,225,207)",
"purple": "rgb(171,95,127)",
"purple_light": "rgb(198,149,167)",
"purple_lighter": "rgb(231,214,219)",
"green": "rgb(126,199,143)",
"green_light": "rgb(178,219,183)",
"green_lighter": "rgb(227,241,228)",
"grey": "rgb(68,76,83)",
"grey_light": "rgb(126,127,132)",
"black": "rgb(35,31,32)",
"white": "rgb(255,255,255)",
"orange": "rgb(245,131,69)",
"orange_light": "rgb(248,159,109)",
"orange_lighter": "rgb(254,220,198)"}
def color_to_rgb(color):
try:
return COLORS_TO_RGB[color]
except KeyError:
import re
if re.match("rgb\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)", color):
# already an rgb formatted color
return color
return COLORS_TO_RGB["black"] | 2.265625 | 2 |
gec_debug.py | Mathstao/gector | 0 | 12774088 | <gh_stars>0
import sys
import requests
host = "http://11.0.0.150:8890/correct"
def call_gec(data):
resp = requests.post(host, json=data)
res = resp.json()
return res
if __name__ == '__main__':
if len(sys.argv)==2:
text = sys.argv[-1]
else:
text = "Hi, Guibin! My namme is Citao. The marked was closed yestreday. (This email are sent from OnMail.)"
data = {
'text': text,
# Parameters in GECToR model
'iterations': 3,
'min_probability': 0.5,
'min_error_probability': 0.7,
# If sensitive to lower/upper case
'case_sensitive': True,
# If need post processing of LanguageTool
'languagetool_post_process': True,
# When GECToR model thinks the error probability of corrected text > <threshold>,
# we will call LanguageTool for post-processing.
# Only meaningful when 'languagetool_post_process'=True
'languagetool_call_thres': 0.7,
# Skip corrections that contains word in whitelist
'whitelist': ['citao', 'guibin', 'onmail'],
# With the information every step for debugging
'with_debug_info': True,
}
result = call_gec(data)
if data['with_debug_info']:
print(result['debug_info'])
print('Input :', result['input'])
print('Output :', result['output'])
print('Corrections :', result['corrections'])
| 2.78125 | 3 |
ioflo/aid/aggregating.py | BradyHammond/ioflo | 128 | 12774089 | import math
import statistics
def fuzzyAnd(m):
"""
fuzzy anding
m = list of membership values to be anded
returns smallest value in the list
"""
return min(m)
FuzzyAnd = fuzzyAnd
def fuzzyOr(m):
"""
fuzzy oring
m = list of membership values to be ored
returns largest value in the list
"""
return max(m)
FuzzyOr = fuzzyOr
def fuzzyNot(x):
"""
fuzzy not
x = single membership value to be noted
returns the inverse membership value
"""
return 1 - x
def compensatoryAnd(m, g=0.5):
"""
anding function
m = list of membership values for x derived from n membership functions
g = gamma value 0=product 1=algebraic sum
returns compensatory AND value of x
"""
g = float(g)
product1 = 1
product2 = 1
for mem in m:
product1 *= mem
product2 *= (1 - mem)
return math.pow(product1, 1 - g) * math.pow((1 - product2), g)
CompensatoryAnd = compensatoryAnd
def gowa(w, wm, l=1.0):
"""
Generalized Ordered Weighted Averaging Operator
More info can be found here:
https://pdfs.semanticscholar.org/2810/c971af0d01d085c799fb2295dc5668d055c8.pdf
l = -1 = Ordered Weighted Harmonic Averaging Operator
l = -.000000000001 = Ordered Weighted Geometric Averaging Operator
l = 1 = Ordered Weighted Arithmetic Averaging Operator
l = 2 = Ordered Weighted Quadratic Averaging Operator
w = list of weights
wm = list of importance weighted membership values
l = lambda real number specifying type of owa to use
returns ordered weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
if l == 0:
raise ZeroDivisionError("Param l cannot be 0. Use -.000000000001 for owg.")
wm.sort(reverse=True)
s = 0
for i in range(len(w)):
s += w[i] * math.pow(wm[i], l)
return math.pow(s, 1/l)
Gowa = gowa
def owa(w, wm):
"""
Ordered Weighted Arithmetic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Arithmetic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered arithmetic weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
wm.sort(reverse=True)
s = 0
for i in range(len(w)):
s += w[i] * wm[i]
return s
Owa = owa
def owg(w, wm):
"""
Ordered Weighted Geometric Averaging Operator
More info can be found here:
ftp://decsai.ugr.es/pub/arai/tech_rep/decision/libroOWG.pdf
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Geometric Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered geometric weighted average
"""
if len(w) != len(wm):
raise ValueError("Weights and membership value lists must be of equal length.")
wm.sort(reverse=True)
s = 1
for i in range(len(w)):
s *= math.pow(wm[i], w[i])
return s
Owg = owa
def owh(w, wm):
"""
Ordered Weighted Harmonic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Harmonic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered harmonic weighted average
"""
return gowa(w, wm, -1)
Owh = owh
def owq(w, wm):
"""
Ordered Weighted Quadratic Averaging Operator
w = [1,0,0,0] = AND
w = [0,0,0,1] = OR
w = [1/n,1/n,1/n,1/n] = Quadratic Average where n=len(w)
w = list of weights
wm = list of importance weighted membership values
returns ordered quadratic weighted average
"""
return gowa(w, wm, 2)
Owq = owq
def median(wm):
"""
Median Operator
wm = list of importance weighted membership values
returns the middle value in the set
"""
return statistics.median(wm)
Median = median
| 3.46875 | 3 |
test/conftest.py | xphlawlessx/ceknito | 47 | 12774090 | import bcrypt
from functools import lru_cache, wraps
import os
import pytest
from pyrsistent import freeze, thaw
import yaml
from app import create_app
from app.config import Config
from app.models import db, BaseModel, User, SiteMetadata
from app.caching import cache
from app.auth import auth_provider
from test.utilities import recursively_update, add_config_to_site_metadata
@pytest.fixture
def test_config():
"""Extra configuration values to be used in a test."""
return {}
def _make_config(config_dict) -> Config:
"""Create a standard testing Config from the given dictionary."""
return Config(
config_dict=config_dict,
use_environment=False,
model=SiteMetadata,
cache=cache,
)
def _freeze_dict_arg(func):
"""Freeze the argument of a function that takes a single dictionary.
This decorator exists purely to get a hashable dictionary that can
be cached with lru_cache, so func should be a function wrapped with
the lru_cache decorator.
"""
@wraps(func)
def inner(arg: dict):
return func(freeze(arg))
return inner
@_freeze_dict_arg
@lru_cache()
def get_app(frozen_config_dict):
"""Create the Flask application, cached by config dictionary."""
config = _make_config(thaw(frozen_config_dict))
app = create_app(config)
return app, config
@pytest.fixture
def app_before_init_db(test_config):
"""Create the Flask app with an uninitialized database."""
db_name = ":memory:"
config_filename = os.environ.get("TEST_CONFIG", None)
if config_filename is None:
custom_config = {}
else:
with open(config_filename) as stream:
custom_config = yaml.safe_load(stream)
# Set some things that make sense for testing.
test_defaults = {
"app": {
"debug": False,
"development": False,
"languages": ["en"],
"testing": True,
},
# TODO set Redis database number to different than dev-server and use Redis here.
"cache": {"type": "simple"},
"database": {"engine": "SqliteDatabase", "name": db_name},
"mail": {
"server": "smtp.example.com",
"port": 8025,
"default_from": "<EMAIL>",
},
"ratelimit": {"enabled": False},
}
config = {}
recursively_update(config, test_defaults)
recursively_update(config, custom_config)
recursively_update(config, test_config)
app, conf_obj = get_app(config)
app_context = app.app_context()
app_context.push()
yield app, conf_obj
app_context.pop()
# The fixture "client" is generated from this one by pytest-flask.
@pytest.fixture
def app(app_before_init_db):
"""Create the Flask app with an intialized database."""
app, conf_obj = app_before_init_db
cache.clear()
if conf_obj.database.engine == "PostgresqlDatabase":
db.execute_sql("DROP SCHEMA public CASCADE;")
db.execute_sql("CREATE SCHEMA public;")
db.execute_sql("GRANT ALL ON SCHEMA public TO public;")
db.create_tables(BaseModel.__subclasses__())
add_config_to_site_metadata(conf_obj)
yield app
if conf_obj.auth.provider != "LOCAL":
for user in User.select():
try:
auth_provider.actually_delete_user(user)
except Exception as err:
print(f"Error trying to clean up {user.name} in Keycloak realm:", err)
raise err
if conf_obj.database.engine == "Sqlitedatabase":
db.detach(conf_obj.database.name)
@pytest.fixture(autouse=True)
def fast_hashing(monkeypatch):
def just_add_salt(data, salt):
assert isinstance(data, bytes)
assert isinstance(salt, bytes)
data = bytearray(data)
data.append(salt[-1])
return bytes(data)
monkeypatch.setattr(bcrypt, "hashpw", just_add_salt)
@pytest.fixture
def user_info():
return dict(
username="supertester", email="<EMAIL>", password="<PASSWORD>"
)
@pytest.fixture
def user2_info():
return dict(
username="administrator", email="<EMAIL>", password="<PASSWORD>###"
)
@pytest.fixture
def user3_info():
return dict(
username="moderator", email="<EMAIL>", password="<PASSWORD>###"
)
| 2.4375 | 2 |
models/session.py | SsureyMoon/Python-GoogleAppEngine | 0 | 12774091 | from google.appengine.ext import ndb
from protorpc import messages
class Session(ndb.Model):
"""Session -- Session object"""
organizerUserId = ndb.StringProperty()
name = ndb.StringProperty(required=True)
highlights = ndb.StringProperty(repeated=True)
speaker = ndb.StringProperty()
duration = ndb.StringProperty()
typeOfSession = ndb.StringProperty(repeated=True)
date = ndb.DateProperty()
startTime = ndb.IntegerProperty()
conferenceKeyBelongTo = ndb.StringProperty()
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
organizerUserId = messages.StringField(1)
name = messages.StringField(2)
highlights = messages.StringField(3, repeated=True)
speaker = messages.StringField(4)
duration = messages.StringField(5)
typeOfSession = messages.EnumField('TypeOfSession', 6, repeated=True)
date = messages.StringField(7)
startTime = messages.StringField(8)
organizerDisplayName = messages.StringField(9)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound form message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
class TypeOfSession(messages.Enum):
"""TypeOfSession -- session type enumeration value"""
NOT_SPECIFIED = 1
LECTURE = 2
KEYNOTE = 3
WORKSHOP = 4
DEMO = 5
SOCIAL = 6
class FeaturedSpeaker(messages.Message):
"""FeaturedSpeaker -- Featured speaker info. outbound message"""
speaker = messages.StringField(1)
sessionNames = messages.StringField(2, repeated=True)
class FeaturedSpeakerList(messages.Message):
"""FeaturedSpeakerList
-- multiple Featured speaker info. outbound message"""
items = messages.MessageField(FeaturedSpeaker, 1, repeated=True)
| 2.265625 | 2 |
src/boc_python_demo_test.py | bocgi-demo/python | 0 | 12774092 | from boc_python_demo import my_sum
def test_my_sum():
assert my_sum(1) == 1
assert my_sum(2) == 2
assert my_sum(3) == 3
assert my_sum(4) == 5
assert my_sum(5) == 8
assert my_sum(6) == 13
| 2.6875 | 3 |
capsule/gamma_capsule_layer.py | moejoe95/capsnet-limitations | 9 | 12774093 |
import tensorflow as tf
from capsule.utils import squash
import numpy as np
layers = tf.keras.layers
models = tf.keras.models
class GammaCapsule(tf.keras.Model):
def __init__(self, in_capsules, in_dim, out_capsules, out_dim, stdev=0.2, routing_iterations=2, use_bias=True, name=''):
super(GammaCapsule, self).__init__(name=name)
self.in_capsules = in_capsules
self.in_dim = in_dim
self.out_capsules = out_capsules
self.out_dim = out_dim
self.routing_iterations = routing_iterations
self.use_bias = use_bias
with tf.name_scope(self.name):
w_init = tf.random_normal_initializer(stddev=stdev)
self.W = tf.Variable(name="W", initial_value=w_init(shape=(1, out_capsules, in_capsules, out_dim, in_dim),
dtype='float32'),
trainable=True)
if self.use_bias:
bias_init = tf.constant_initializer(0.1)
self.bias = tf.Variable(name="bias", initial_value=bias_init(shape=(1, out_capsules, out_dim),
dtype='float32'),
trainable=True)
def call(self, u):
"""
param: u - (batch_size, in_caps, in_dim)
"""
batch_size = tf.shape(u)[0]
u_norm = tf.norm(u, axis=-1) # (batch_size, in_caps)
# Reshape u into (batch_size, out_caps, in_caps, out_dim, in_dim)
u = tf.expand_dims(u, 1)
u = tf.expand_dims(u, 3)
u = tf.tile(u, [1, self.out_capsules, 1, 1, 1])
u = tf.tile(u, [1, 1, 1, self.out_dim, 1])
# Duplicate transformation matrix for each batch
w = tf.tile(self.W, [batch_size, 1, 1, 1, 1])
# Dotwise product between u and w to get all votes
# shape = (batch_size, out_caps, in_caps, out_dim)
u_hat = tf.reduce_sum(u * w, axis=-1)
# Ensure that ||u_hat|| <= ||v_i||
u_hat_norm = tf.norm(u_hat, axis=-1, keepdims=True)
u_norm = tf.expand_dims(u_norm, axis=1)
u_norm = tf.expand_dims(u_norm, axis=3)
u_norm = tf.tile(u_norm, [1, self.out_capsules, 1, self.out_dim])
new_u_hat_norm = tf.math.minimum(u_hat_norm, u_norm)
u_hat = u_hat / u_hat_norm * new_u_hat_norm
# Scaled-distance-agreement routing
bias = tf.tile(self.bias, [batch_size, 1, 1])
b_ij = tf.zeros(shape=[batch_size, self.out_capsules, self.in_capsules, 1])
for r in range(self.routing_iterations):
c_ij = tf.nn.softmax(b_ij, axis=1)
c_ij_tiled = tf.tile(c_ij, [1, 1, 1, self.out_dim])
s_j = tf.reduce_sum(c_ij_tiled * u_hat, axis=2) + bias
v_j = squash(s_j)
if(r < self.routing_iterations - 1):
v_j = tf.expand_dims(v_j, 2)
v_j = tf.tile(v_j, [1, 1, self.in_capsules, 1]) # (batch_size, out_caps, in_caps, out_dim)
# Calculate scale factor t
p_p = 0.9
d = tf.norm(v_j - u_hat, axis=-1, keepdims=True)
d_o = tf.reduce_mean(tf.reduce_mean(d))
d_p = d_o * 0.5
t = tf.constant(np.log(p_p * (self.out_capsules - 1)) - np.log(1 - p_p), dtype=tf.float32) \
/ (d_p - d_o + 1e-12)
t = tf.expand_dims(t, axis=-1)
# Calc log prior using inverse distances
b_ij = t * d
return v_j
| 2.390625 | 2 |
Heike/iTunes.py | ArtezGDA/text-IO | 0 | 12774094 | <reponame>ArtezGDA/text-IO
iTunes = {
'artists' : {
'The Neighbourhood':
#album name
'Wiped Out!': {
#title of song : duration
'Prey' : 3.22,
'Cry Baby' : 4.02,
'A Moment of Silence' : 2.05
},
'<NAME>':
'To Pimp a Butterfly': {
'King Kunta': 3.54,
'Alright': 3.39,
'u': 4.28
},
'BANKS': {
'Goddess':
'Waiting Game': 3.27,
'This Is What It Feels Like': 5.02,
'Brain': 4.42
},
'The Acid':
'Liminal':{
'Animal': 4.37,
'Ra': 3.22,
'Basic Instinct': 4.49,
},
'The Arctic Monkeys':
'AM': {
'Do I Wanna Know': 4.32,
'Arabella': 3.27,
'R U Mine?': 3.21
}
}
} | 2.046875 | 2 |
dsemproducaoenv/Lib/site-packages/jupyter_server/_version.py | felipetmota/DsEmProducao | 0 | 12774095 | """
store the current version info of the server.
"""
from jupyter_packaging import get_version_info
# Version string must appear intact for tbump versioning
__version__ = '1.6.2'
version_info = get_version_info(__version__)
| 1.40625 | 1 |
src/test/provision/app/alarm/trigger/test_metrics.py | mycloudandme/spacel-provision | 2 | 12774096 | <filename>src/test/provision/app/alarm/trigger/test_metrics.py
import unittest
from spacel.provision.app.alarm.trigger.metrics import MetricDefinitions
class TestMetricDefinitions(unittest.TestCase):
def setUp(self):
self.metrics = MetricDefinitions()
def test_get(self):
cpu_metric = self.metrics.get('cpu')
self.assertEquals('CPUUtilization', cpu_metric['metricName'])
| 2.171875 | 2 |
weapon.py | ebroniarczyk/ufo_worms | 0 | 12774097 | import pygame, math, time
from enum import Enum
class WeaponType(Enum):
MELEE = 1
LOADABLE = 2
DOUBLE_SHOT = 3
# bazuka, granat, paluch, strzelba
class Weapon(object):
def __init__(self, team, battle, game):
self.team = team
self.owner = team.get_selected_worm()
self.force = 0
self.ammo = -1
self.gravity = game.gravity
self.type = WeaponType.LOADABLE
self.shooting = False
self.bullet_x = int(self.owner.x + int(self.owner.face_right) * self.owner.worm_size[0])
self.bullet_y = int(self.owner.y + self.owner.worm_size[1]/2)
self.bullet_current_y = 0
self.t_init_shot = 0
self.t_shot = 10e8
self.bullet_v_vertical = 0
self.bullet_v_horizontal = 0
self.battle = battle
def set_current_owner(self):
"""
Sets current owner of the weapon.
"""
self.owner = self.team.get_selected_worm()
self.bullet_x = int(self.owner.x + self.owner.worm_size[0]/2)
self.bullet_y = int(self.owner.y + self.owner.worm_size[1]/2)
def draw(self, screen):
"""
Draws bullet and rectangle of shot force.
"""
if self.type == WeaponType.LOADABLE:
if self.shooting == False and self.force != 0:
pygame.draw.rect(screen, (255,255,255), pygame.Rect(1100, 650, 70, 20), 1)
pygame.draw.rect(screen, (255,255,255), pygame.Rect(1100, 650, int(self.force), 20))
if self.shooting == True:
pygame.draw.circle(screen, (200,20,0), (int(self.bullet_x), int(self.bullet_y)), 4)
def action(self, key, key_event_type):
"""
Makes alien shoot if Spacebar is pressed.
"""
if key == pygame.K_SPACE and self.type == WeaponType.LOADABLE:
self.force += 0.1
if self.force >= 70 or key_event_type == pygame.KEYUP:
self.__shoot()
# if self.battle.get_preparation() == False and self.battle.get_time() <=1:
# self.__shoot()
# self.force = 0
if self.battle.sound:
shooting_sound = pygame.mixer.Sound("shoot.wav")
shooting_sound.play()
self.check_preparation()
def check_preparation(self):
"""
Starts new round if alien starts to shoot during preparation time.
"""
if self.battle.get_preparation() and self.shooting:
self.battle.next_round()
def update(self):
"""
Updates bullet coordinates.
"""
self.__update_bullet_position()
def __shoot(self):
if self.type == WeaponType.LOADABLE and self.shooting == False :
self.shooting = True
self.bullet_x = int(self.owner.x + self.owner.worm_size[0]/2)
self.bullet_y = int(self.owner.y + self.owner.worm_size[1]/2)
self.bullet_current_y = self.bullet_y + self.owner.worm_size[1]/2
self.t_init_shot = time.clock()
self.t_shot = self.t_init_shot
angle_radians = math.radians(self.owner.angle)
self.bullet_v_vertical = 20*self.force*math.sin(angle_radians)
if self.owner.face_right:
self.bullet_v_horizontal = 20*self.force*math.cos(angle_radians)
if self.owner.face_right == False:
self.bullet_v_horizontal = -20*self.force*math.cos(angle_radians)
self.force = 0
def __update_bullet_position(self):
"""
Sets bullet coordinates.
Checks if any alien is hit.
Removes alien when its hp = 0.
"""
if self.shooting:
delta = 1/300
if(time.clock()-self.t_shot >= delta):
self.t_shot = time.clock()
v_vert = self.bullet_v_vertical - (self.t_shot - self.t_init_shot) * self.gravity
self.bullet_x = self.bullet_x + self.bullet_v_horizontal*delta
self.bullet_y = self.bullet_y - v_vert*delta
if self.bullet_y >= 700:
self.shooting = False
for worm in self.battle.get_all_worms():
if (worm != self.owner and worm.is_alive and
int(self.bullet_x) in range(int(worm.x), int(worm.x)+worm.worm_size[0]) and int(self.bullet_y) in range(int(worm.y), int(worm.y)+worm.worm_size[1])):
self.shooting = False
worm.update_hp(50)
if worm.is_alive == False:
for team in self.battle.teams:
if worm in team.worms:
team.worms.remove(worm)
if len(team.worms) == 0:
self.battle.teams.remove(team)
if len(self.battle.teams) == 1:
self.battle.show = False
self.battle.end_show = True
| 3.046875 | 3 |
database/admin.py | gbriones1/django-skelleton | 0 | 12774098 | <reponame>gbriones1/django-skelleton<filename>database/admin.py
from django.contrib import admin
from database.models import Product, Input
class ProductAdmin(admin.ModelAdmin):
pass
class InputAdmin(admin.ModelAdmin):
pass
admin.site.register(Product, ProductAdmin)
admin.site.register(Input, InputAdmin)
| 1.742188 | 2 |
lib/config.py | captainshar/pi-setup | 0 | 12774099 | <reponame>captainshar/pi-setup
import os
import time
import copy
import glob
import socket
import hashlib
import traceback
import yaml
import pykube
import urlparse
import requests
import netifaces
import avahi
import dbus
import encodings.idna
WPA = """ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
update_config=1
country=%s
network={
\tscan_ssid=1
\tssid="%s"
\t%s
}
"""
RESOURCES = [
"Namespace",
"ConfigMap",
"Secret",
"ServiceAccount",
"ClusterRole",
"ClusterRoleBinding",
"Role",
"RoleBinding"
]
SERVER = """server {
listen %s;
server_name %s;
location / {
proxy_pass %s://%s:%s/;
}
}
"""
class AppException(Exception):
pass
class Daemon(object):
def __init__(self):
self.config = {}
self.mtimes = {}
self.modified = []
self.kube = None
self.node = None
self.cnames = set()
def execute(self, command):
print command
os.system(command)
def reset(self):
print "reseting"
self.execute("rm /boot/klot-io/reset")
with open("/opt/klot-io/config/account.yaml", "w") as yaml_file:
yaml.safe_dump({"password": "<PASSWORD>", "ssh": "disabled"}, yaml_file, default_flow_style=False)
with open("/opt/klot-io/config/network.yaml", "w") as yaml_file:
yaml.safe_dump({"interface": "eth0"}, yaml_file, default_flow_style=False)
with open("/opt/klot-io/config/kubernetes.yaml", "w") as yaml_file:
yaml.safe_dump({"role": "reset"}, yaml_file, default_flow_style=False)
def restart(self):
print "restarting"
self.execute("cp /boot/klot-io/lib/config.py /opt/klot-io/lib/config.py")
self.execute("chown 1000:1000 /opt/klot-io/lib/config.py")
self.execute("chmod a+x /opt/klot-io/lib/config.py")
self.execute("rm /boot/klot-io/lib/config.py")
self.execute("systemctl restart klot-io-daemon")
def reload(self):
reloaded = False
for yaml_path in glob.glob("/boot/klot-io/config/*.yaml"):
self.execute("mv %s /opt/klot-io/config/" % yaml_path)
reloaded = True
if reloaded:
self.execute("chown -R pi /opt/klot-io/config/")
def load(self):
self.modified = []
for path in glob.glob("/opt/klot-io/config/*.yaml"):
config = path.split("/")[-1].split('.')[0]
mtime = os.path.getmtime(path)
if config not in self.mtimes or self.mtimes[config] != mtime:
with open(path, "r") as yaml_file:
self.config[config] = yaml.safe_load(yaml_file)
self.mtimes[config] = mtime
self.modified.append(config)
def differs(self, expected, actual):
print "actual: %s" % actual
print "expected: %s" % expected
return expected != actual
def uninitialized(self):
expected = os.path.exists("/opt/klot-io/config/uninitialized")
try:
requests.get("http://klot-io.local/api/status", timeout=5)
actual = True
except:
actual = False
if expected and not actual:
print("uninitialized not found")
os.remove("/opt/klot-io/config/uninitialized")
elif not expected and actual:
print("uninitialized found")
open("/opt/klot-io/config/uninitialized", "w").close()
# Stolen from https://gist.github.com/gdamjan/3168336
TTL = 15
# Got these from /usr/include/avahi-common/defs.h
CLASS_IN = 0x01
TYPE_CNAME = 0x05
@staticmethod
def encode_cname(name):
return '.'.join( encodings.idna.ToASCII(p) for p in name.split('.') if p )
@staticmethod
def encode_rdata(name):
def enc(part):
a = encodings.idna.ToASCII(part)
return chr(len(a)), a
return ''.join( '%s%s' % enc(p) for p in name.split('.') if p ) + '\0'
def avahi(self):
self.execute("systemctl restart avahi-daemon")
if self.cnames:
bus = dbus.SystemBus()
server = dbus.Interface(bus.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER), avahi.DBUS_INTERFACE_SERVER)
group = dbus.Interface(bus.get_object(avahi.DBUS_NAME, server.EntryGroupNew()), avahi.DBUS_INTERFACE_ENTRY_GROUP)
for cname in self.cnames:
group.AddRecord(
avahi.IF_UNSPEC,
avahi.PROTO_UNSPEC,
dbus.UInt32(0),
self.encode_cname(cname),
self.CLASS_IN,
self.TYPE_CNAME,
self.TTL,
avahi.string_to_byte_array(self.encode_rdata(server.GetHostNameFqdn()))
)
group.Commit()
def account(self):
self.execute("echo 'pi:%s' | chpasswd" % self.config["account"]["password"])
if self.config["account"]["ssh"] == "enabled":
self.execute("systemctl enable ssh")
self.execute("systemctl start ssh")
else:
self.execute("systemctl stop ssh")
self.execute("systemctl disable ssh")
def network(self):
if "network" not in self.modified:
return
expected = self.config["network"]['interface']
with open("/etc/avahi/avahi-daemon.conf", "r") as avahi_file:
for avahi_line in avahi_file:
if "allow-interfaces" in avahi_line:
actual = avahi_line.split('=')[-1].strip()
if self.differs(expected, actual):
os.system("sed -i 's/allow-interfaces=.*/allow-interfaces=%s/' /etc/avahi/avahi-daemon.conf" % expected)
self.avahi()
if expected == "eth0":
self.execute("sudo ifconfig wlan0 down")
expected = WPA % ("NOPE", "nope", 'key_mgmt=NONE')
elif expected == "wlan0":
self.execute("sudo ifconfig wlan0 up")
expected = WPA % (
self.config["network"]["country"],
self.config["network"]["ssid"],
'psk="%s"' % self.config["network"]["psk"] if self.config["network"]["psk"] else 'key_mgmt=NONE'
)
with open("/etc/wpa_supplicant/wpa_supplicant.conf", "r") as wpa_file:
actual = wpa_file.read()
if self.differs(expected, actual):
with open("/etc/wpa_supplicant/wpa_supplicant.conf", "w") as wpa_file:
wpa_file.write(expected)
self.execute("wpa_cli -i wlan0 reconfigure")
def interfaces(self):
interfaces = {}
for interface in netifaces.interfaces():
ifaddresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET in ifaddresses:
interfaces[interface] = ifaddresses[netifaces.AF_INET][0]['addr']
return interfaces
def host(self, expected):
self.node = expected
avahi = False
with open("/etc/hostname", "r") as hostname_file:
actual = hostname_file.read()
if self.differs(expected, actual):
with open("/etc/hostname", "w") as hostname_file:
hostname_file.write(expected)
self.execute("hostnamectl set-hostname %s" % expected)
avahi = True
with open("/etc/hosts", "r") as hosts_file:
actual = hosts_file.readlines()[-1].split("\t")[-1].strip()
if self.differs(expected, actual):
self.execute("sed -i 's/127.0.1.1\t.*/127.0.1.1\t%s/' /etc/hosts" % expected)
avahi = True
if avahi:
self.avahi()
def kubernetes(self):
if self.config["kubernetes"]["role"] == "reset":
if not os.path.exists("/home/pi/.kube/config"):
print "already reset kubernetes"
return
try:
pykube.Node.objects(self.kube).filter().get(name=self.node).delete()
except pykube.ObjectDoesNotExist:
pass
self.host("klot-io")
self.execute("rm -f /opt/klot-io/config/kubernetes.yaml")
self.execute("rm -f /home/pi/.kube/config")
self.execute("kubeadm reset")
self.execute("reboot")
attempts = 20
while attempts:
interfaces = self.interfaces()
print "interfaces: %s" % interfaces
if self.config["network"]['interface'] in interfaces:
break
time.sleep(5)
attempts -= 1
ip = interfaces[self.config["network"]['interface']]
encoded = hashlib.sha256(self.config["account"]["password"]).hexdigest()
token = "%s.%s" % (encoded[13:19], encoded[23:39])
if self.config["kubernetes"]["role"] == "master":
self.host("%s-klot-io" % self.config["kubernetes"]["cluster"])
if os.path.exists("/home/pi/.kube/config"):
print "already initialized master"
return
self.execute(" ".join([
'kubeadm',
'init',
'--token=%s' % token,
'--token-ttl=0',
'--apiserver-advertise-address=%s' % ip,
'--pod-network-cidr=10.244.0.0/16',
'--kubernetes-version=v1.10.2'
]))
with open("/etc/kubernetes/admin.conf", "r") as config_file:
config = yaml.safe_load(config_file)
config["clusters"][0]["cluster"]["server"] = 'https://%s:6443' % ip
config["clusters"][0]["name"] = self.node
config["users"][0]["name"] = self.node
config["contexts"][0]["name"] = self.node
config["contexts"][0]["context"]["cluster"] = self.node
config["contexts"][0]["context"]["user"] = self.node
config["current-context"] = self.node
elif self.config["kubernetes"]["role"] == "worker":
self.host("%s-%s-klot-io" % (self.config["kubernetes"]["name"], self.config["kubernetes"]["cluster"]))
if os.path.exists("/etc/kubernetes/bootstrap-kubelet.conf"):
print "already initialized worker"
return
self.execute(" ".join([
'kubeadm',
'join',
'%s:6443' % socket.gethostbyname('%s-klot-io.local' % self.config["kubernetes"]["cluster"]),
'--token=%s' % token,
'--discovery-token-unsafe-skip-ca-verification'
]))
config = requests.get(
'http://%s-klot-io.local/api/kubectl' % self.config["kubernetes"]["cluster"],
headers={"x-klot-io-password": self.config["account"]['password']},
).json()["kubectl"]
self.execute("mkdir -p /home/pi/.kube")
self.execute("rm -f /home/pi/.kube/config")
with open("/home/pi/.kube/config", "w") as config_file:
yaml.safe_dump(config, config_file, default_flow_style=False)
self.execute("chown pi:pi /home/pi/.kube/config")
if self.config["kubernetes"]["role"] == "master":
self.execute("sudo -u pi -- kubectl apply -f /opt/klot-io/kubernetes/kube-flannel.yml")
self.execute("sudo -u pi -- kubectl apply -f /opt/klot-io/kubernetes/klot-io-app-crd.yaml")
def resources(self, obj):
if "resources" in obj:
return
obj["resources"] = []
for manifest in obj["spec"]["manifests"]:
source = copy.deepcopy(obj["source"])
source.update(manifest)
print "parsing %s" % source
if "url" in source:
url = source["url"]
elif "site" in source and source["site"] == "github.com":
if "repo" not in source:
raise AppException("missing source.repo for %s" % source["site"])
repo = source["repo"]
version = source["version"] if "version" in source else "master"
url = "https://raw.githubusercontent.com/%s/%s/" % (repo, version)
else:
raise AppException("cannot parse %s" % source)
if url.endswith("/"):
if "path" not in source:
raise AppException("%s has no pathing" % source)
url = "%s/%s" % (url, source["path"])
print "fetching %s" % url
response = requests.get(url)
if response.status_code != 200:
raise AppException("%s error from %s: %s" % (response.status_code, url, response.text))
obj["resources"].extend(list(yaml.safe_load_all(response.text)))
obj["resources"].sort(key= lambda resource: RESOURCES.index(resource["kind"]) if resource["kind"] in RESOURCES else len(RESOURCES))
def display(self, obj):
display = [obj["kind"]]
if "namespace" in obj["metadata"] and obj["metadata"]["namespace"]:
display.append(obj["metadata"]["namespace"])
display.append(obj["metadata"]["name"])
return "/".join(display)
def app(self, requirement, action):
name = requirement["name"]
print "searching for %s" % requirement["name"]
try:
app = pykube.App.objects(self.kube).filter().get(name=requirement["name"]).obj
print "found %s %s" % (app["metadata"]["name"], app["source"])
return app
except pykube.ObjectDoesNotExist:
pass
source = requirement["source"]
print "creating %s" % source
if "url" in source:
url = source["url"]
elif "site" in source and source["site"] == "github.com":
if "repo" not in source:
raise AppException("missing source.repo for %s" % source["site"])
repo = source["repo"]
version = source["version"] if "version" in source else "master"
url = "https://raw.githubusercontent.com/%s/%s/" % (repo, version)
else:
raise Exception("cannot preview %s" % source)
if url.endswith("/"):
path = source["path"] if "path" in source else "klot-io-app.yaml"
url = "%s/%s" % (url, path)
print "requesting %s" % url
response = requests.get(url)
if response.status_code != 200:
raise Exception("error from source %s url: %s - %s: %s" % (source, url, response.status_code, response.text))
obj = yaml.safe_load(response.text)
if not isinstance(obj, dict):
raise Exception("source %s produced non dict %s" % (source, obj))
if obj["apiVersion"] != "klot.io/v1":
raise Exception("source %s apiVersion not klot.io/v1 %s" % (source, obj))
if obj["kind"] != "App":
raise Exception("source %s kind not App %s" % (source, obj))
if "spec" not in obj:
raise Exception("source %s missing spec %s" % (source, obj))
if "metadata" not in obj:
raise Exception("source %s missing metadata %s" % (source, obj))
if "version" not in obj["metadata"]:
raise Exception("source %s missing metadata.version %s" % (source, obj))
if name != obj["metadata"].get("name"):
raise Exception("source %s name does not match %s %s" % (source, name, obj))
obj["source"] = source
obj["action"] = action
obj["status"] = "Discovered"
pykube.App(self.kube, obj).create()
return obj
def source(self, obj):
if "requires" in obj["spec"]:
for requirement in obj["spec"]["requires"]:
print "sourcing %s" % requirement
app = self.app(requirement, obj["action"])
print "sourced %s %s" % (app["metadata"]["name"], app["source"])
def satisfy(self, obj):
satisfied = True
if "requires" in obj["spec"]:
for requirement in obj["spec"]["requires"]:
print "satisfying %s" % requirement
app = self.app(requirement, obj["action"])
if app["status"] != "Installed":
satisfied = False
if obj["action"] == "Install" and app["status"] != "Error":
app["action"] = "Install"
pykube.App(self.kube, app).replace()
return satisfied
def url(self, obj):
if "url" not in obj["spec"]:
return
print "creating url %s " % obj["spec"]["url"]
obj["url"] = "%s://%s.%s.local" % (obj["spec"]["url"]["protocol"], obj["spec"]["url"]["host"], self.node)
if "port" in obj["spec"]["url"]:
obj["url"] = "%s:%s" % (obj["url"], obj["spec"]["url"]["port"])
if "path" in obj["spec"]["url"]:
obj["url"] = "%s/%s" % (obj["url"], obj["path"])
print "created url %s " % obj["url"]
def apps(self):
for obj in [app.obj for app in pykube.App.objects(self.kube).filter()]:
try:
if "status" not in obj:
obj["status"] = "Discovered"
if "action" not in obj:
obj["action"] = "Download"
if obj["status"] == "Discovered" and "resources" not in obj:
self.resources(obj)
self.source(obj)
obj["status"] = "Downloaded"
if obj["action"] == "Install" and obj["status"] not in ["Installed", "Error"] and self.satisfy(obj):
print "installing %s" % self.display(obj)
for resource in obj["resources"]:
print "applying %s" % self.display(resource)
Resource = getattr(pykube, resource["kind"])
try:
Resource(self.kube, resource).replace()
except pykube.PyKubeError:
Resource(self.kube, resource).delete()
Resource(self.kube, resource).create()
self.url(obj)
obj["status"] = "Installed"
elif obj["status"] == "Installed" and obj["action"] == "Uninstall":
print "uninstalling %s" % self.display(obj)
for resource in reversed(obj["resources"]):
print "deleting %s" % self.display(resource)
getattr(pykube, resource["kind"])(self.kube, resource).delete()
if "url" in obj:
del obj["url"]
obj["action"] = "Download"
obj["status"] = "Downloaded"
except Exception as exception:
obj["status"] = "Error"
obj["error"] = traceback.format_exc().splitlines()
traceback.print_exc()
pykube.App(self.kube, obj).replace()
def nginx(self, expected):
actual = {}
for nginx_path in glob.glob("/etc/nginx/conf.d/*.conf"):
host = nginx_path.split("/")[-1].split(".conf")[0]
external = None
actual[host] = {"servers": []}
with open(nginx_path, "r") as nginx_file:
for nginx_line in nginx_file:
if "listen" in nginx_line:
external = int(nginx_line.split()[-1][:-1])
if "proxy_pass" in nginx_line:
actual[host]["servers"].append({
"protocol": nginx_line.split(":")[0].split(" ")[-1],
"external": external,
"internal": int(nginx_line.split(":")[-1].split("/")[0])
})
actual[host]["ip"] = nginx_line.split("/")[2].split(":")[0]
if expected != actual:
self.differs(expected, actual)
self.execute("rm -f /etc/nginx/conf.d/*.conf")
for host in expected:
with open("/etc/nginx/conf.d/%s.conf" % host, "w") as nginx_file:
for server in expected[host]["servers"]:
nginx_file.write(SERVER % (server["external"], host, server["protocol"], expected[host]["ip"], server["internal"]))
self.execute("systemctl reload nginx")
def services(self):
nginx = {}
cnames = set()
for service in [service.obj for service in pykube.Service.objects(self.kube).filter(namespace=pykube.all)]:
if (
"type" not in service["spec"] or service["spec"]["type"] != "LoadBalancer" or
"ports" not in service["spec"] or "selector" not in service["spec"] or
"namespace" not in service["metadata"]
):
continue
servers = []
for port in service["spec"]["ports"]:
if "name" not in port:
continue
if port["name"].lower().startswith("https"):
servers.append({
"protocol": "https",
"external": port["port"],
"internal": port["targetPort"]
})
elif port["name"].lower().startswith("http"):
servers.append({
"protocol": "http",
"external": port["port"],
"internal": port["targetPort"]
})
if not servers:
continue
node_ips = {}
for pod in [pod.obj for pod in pykube.Pod.objects(self.kube).filter(
namespace=service["metadata"]["namespace"],
selector=service["spec"]["selector"]
)]:
if "nodeName" in pod["spec"] and "podIP" in pod["status"]:
node_ips[pod["spec"]["nodeName"]] = pod["status"]["podIP"]
if not node_ips or sorted(node_ips.keys())[0] != self.node:
continue
ip = node_ips[self.node]
host = ("%s.%s.%s-klot-io.local" % (
service["metadata"]["name"],
service["metadata"]["namespace"],
self.config["kubernetes"]["cluster"]
))
cnames.add(host)
nginx[host] = {
"ip": ip,
"servers": servers
}
if cnames != self.cnames:
self.differs(cnames, self.cnames)
self.cnames = cnames
self.avahi()
self.nginx(nginx)
def clean(self):
past = time.time() - 60
for tmp_file in list(glob.glob("/tmp/tmp??????")):
if past > os.path.getmtime(tmp_file):
os.remove(tmp_file)
def process(self):
if os.path.exists("/boot/klot-io/reset"):
self.reset()
if os.path.exists("/boot/klot-io/lib/config.py"):
self.restart()
self.reload()
self.load()
self.uninitialized()
if "account" in self.modified:
self.account()
if "network" in self.modified:
self.network()
if "kubernetes" in self.modified:
self.kubernetes()
if not self.kube and os.path.exists("/home/pi/.kube/config"):
self.kube = pykube.HTTPClient(pykube.KubeConfig.from_file("/home/pi/.kube/config"))
if self.kube:
if self.config["kubernetes"]["role"] == "master":
self.apps()
self.services()
self.clean()
def run(self):
while True:
try:
self.process()
except Exception as exception:
traceback.print_exc()
time.sleep(5)
| 2 | 2 |
Codewars/Even or Odd.py | BerkanR/Programacion | 0 | 12774100 | <reponame>BerkanR/Programacion
# Create a function that takes an integer as an argument and returns "Even" for even numbers or "Odd" for odd numbers.
def even_or_odd(number):
if number % 2 == 0:
return "Even"
else:
return "Odd"
assert (even_or_odd(2)) == "Even", "Debe devolver Even"
assert (even_or_odd(0)) == "Even", "Debe devolver Even"
assert (even_or_odd(7)) == "Odd", "Debe devolver Odd"
assert (even_or_odd(1)) == "Odd", "Debe devolver Odd" | 4.40625 | 4 |
async_fetcher/utils.py | night-crawler/async-fetcher | 3 | 12774101 | import asyncio
import ssl
import aiohttp
# if sys.version_info >= (3, 5):
# EventLoopType = t.Union[asyncio.BaseEventLoop, asyncio.AbstractEventLoop]
# else:
# EventLoopType = asyncio.AbstractEventLoop
def get_or_create_event_loop() -> asyncio.AbstractEventLoop:
try:
loop = asyncio.get_event_loop()
return loop
except (RuntimeError, AssertionError):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
# noinspection PyUnresolvedReferences
class TCPConnectorMixIn:
# noinspection PyUnresolvedReferences
def get_tcp_connector(self) -> aiohttp.TCPConnector:
if not self._connector_owner:
return self._tcp_connector
# return valid connector
if self._tcp_connector and not self._tcp_connector.closed:
return self._tcp_connector
# create ssl context if no valid connector is present
_ssl = ssl.create_default_context(cafile=self.cafile)
# memoize tcp_connector for reuse
# noinspection PyAttributeOutsideInit
self._tcp_connector = aiohttp.TCPConnector(
loop=self.loop,
ssl=_ssl,
keepalive_timeout=self.keepalive_timeout,
)
return self._tcp_connector
def __del__(self):
"""
Properly close owned connector on exit
:return:
"""
if self._connector_owner:
connector = self.get_tcp_connector()
not connector.closed and connector.close()
IMPORT_EXCEPTION_NAMES = ['ImportError', 'ImproperlyConfigured', 'ModuleNotFoundError']
| 2.078125 | 2 |
scheduler/run_round_robin.py | widgetOne/league_admin | 0 | 12774102 | '''
This is the central location for driving the other modules. It should primarily
contain seasons and SCVL specific location.
'''
import facility
from optimizer import make_schedule, save_schedules
from optimizer import make_round_robin_game, get_default_potential_sch_loc
import datetime
from facility import SCVL_Facility_Day
from facility import Facility
from pprint import pprint
def make_round_robin_schedule(sch_template_path, team_counts):
canned_path = 'test/scratch/'
total_schedules = 1 # 12000
summary, schedules = make_round_robin_game(team_counts, sch_template_path, total_schedules)
choosing_a_winner = True
if choosing_a_winner:
preferred_winner = 'min hour and no-sit'
sch = summary[preferred_winner]['sch']
print(sch.get_audit_text())
current_sum = summary[preferred_winner]
make_final_report = True
if make_final_report:
file_path = 'scratch/{}_round_robin_sch.csv'.format(datetime.date.today())
sch.gen_csv(file_path)
print(sch.get_team_round_robin_audit())
print('''\n\n\n\nThe final schedule has these properties:
{}
was seed {} and looks like this:
{}'''.format(current_sum['team_sit_report'], current_sum['seed'], sch))
def make_2018_spring_round_robin_schedule():
dir_name = '2018-1-spring'
file_name = 'round_robin_input_template_maker_2018_1_spring.csv - machine_version.csv'
sch_template_path = 'inputs/{}/{}'.format(dir_name, file_name)
team_counts = [6, 10, 13, 11, 4]
make_round_robin_schedule(sch_template_path, team_counts)
if __name__ == '__main__':
make_2018_spring_round_robin_schedule()
#make_regular_season_fall_2016()
#make_regular_season_spring_2017()
| 2.453125 | 2 |
packages/api-server/api_server/models/tortoise_models/dispenser_state.py | Sald-for-Communication-and-IT/rmf-web | 23 | 12774103 | from tortoise.models import Model
from .json_mixin import JsonMixin
class DispenserState(Model, JsonMixin):
pass
| 1.34375 | 1 |
owners_client.py | azureplus/chrome_depot_tools | 0 | 12774104 | <filename>owners_client.py<gh_stars>0
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import random
import gerrit_util
import owners as owners_db
import scm
APPROVED = 'APPROVED'
PENDING = 'PENDING'
INSUFFICIENT_REVIEWERS = 'INSUFFICIENT_REVIEWERS'
def _owner_combinations(owners, num_owners):
"""Iterate owners combinations by decrasing score.
The score of an owner is its position on the owners list.
The score of a set of owners is the maximum score of all owners on the set.
Returns all combinations of up to `num_owners` sorted by decreasing score:
_owner_combinations(['0', '1', '2', '3'], 2) == [
# score 1
('1', '0'),
# score 2
('2', '0'),
('2', '1'),
# score 3
('3', '0'),
('3', '1'),
('3', '2'),
]
"""
return reversed(list(itertools.combinations(reversed(owners), num_owners)))
class InvalidOwnersConfig(Exception):
pass
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file.
The returned list is sorted so that better owners appear first.
"""
raise Exception('Not implemented')
def GetChangeApprovalStatus(self, change_id):
"""Check the approval status for the latest revision_id in a change.
Returns a map of path to approval status, where the status can be one of:
- APPROVED: An owner of the file has reviewed the change.
- PENDING: An owner of the file has been added as a reviewer, but no owner
has approved.
- INSUFFICIENT_REVIEWERS: No owner of the file has been added as a reviewer.
"""
raise Exception('Not implemented')
def ValidateOwnersConfig(self, change_id):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
def GetFilesApprovalStatus(
self, project, branch, paths, approvers, reviewers):
"""Check the approval status for the given paths.
Utility method to check for approval status when a change has not yet been
created, given reviewers and approvers.
See GetChangeApprovalStatus for description of the returned value.
"""
approvers = set(approvers)
reviewers = set(reviewers)
status = {}
for path in paths:
path_owners = set(self.ListOwnersForFile(project, branch, path))
if path_owners.intersection(approvers):
status[path] = APPROVED
elif path_owners.intersection(reviewers):
status[path] = PENDING
else:
status[path] = INSUFFICIENT_REVIEWERS
return status
def SuggestOwners(self, project, branch, paths):
"""Suggest a set of owners for the given paths."""
paths_by_owner = {}
score_by_owner = {}
for path in paths:
owners = self.ListOwnersForFile(project, branch, path)
for i, owner in enumerate(owners):
paths_by_owner.setdefault(owner, set()).add(path)
# Gerrit API lists owners of a path sorted by an internal score, so
# owners that appear first should be prefered.
# We define the score of an owner to be their minimum position in all
# paths.
score_by_owner[owner] = min(i, score_by_owner.get(owner, i))
# Sort owners by their score.
owners = sorted(score_by_owner, key=lambda o: score_by_owner[o])
# Select the minimum number of owners that can approve all paths.
# We start at 2 to avoid sending all changes that require multiple reviewers
# to top-level owners.
num_owners = 2
while True:
# Iterate all combinations of `num_owners` by decreasing score, and select
# the first one that covers all paths.
for selected in _owner_combinations(owners, num_owners):
covered = set.union(*(paths_by_owner[o] for o in selected))
if len(covered) == len(paths):
return selected
num_owners += 1
class DepotToolsClient(OwnersClient):
"""Implement OwnersClient using owners.py Database."""
def __init__(self, host, root, branch, fopen=open, os_path=os.path):
super(DepotToolsClient, self).__init__(host)
self._root = root
self._fopen = fopen
self._os_path = os_path
self._branch = branch
self._db = owners_db.Database(root, fopen, os_path)
self._db.override_files = self._GetOriginalOwnersFiles()
def _GetOriginalOwnersFiles(self):
return {
f: scm.GIT.GetOldContents(self._root, f, self._branch)
for _, f in scm.GIT.CaptureStatus(self._root, self._branch)
if os.path.basename(f) == 'OWNERS'
}
def ListOwnersForFile(self, _project, _branch, path):
# all_possible_owners returns a dict {owner: [(path, distance)]}. We want to
# return a list of owners sorted by increasing distance.
distance_by_owner = self._db.all_possible_owners([path], None)
# We add a small random number to the distance, so that owners at the same
# distance are returned in random order to avoid overloading those who would
# appear first.
return sorted(
distance_by_owner,
key=lambda o: distance_by_owner[o][0][1] + random.random())
def GetChangeApprovalStatus(self, change_id):
data = gerrit_util.GetChange(
self._host, change_id,
['DETAILED_ACCOUNTS', 'DETAILED_LABELS', 'CURRENT_FILES',
'CURRENT_REVISION'])
reviewers = [r['email'] for r in data['reviewers']['REVIEWER']]
# Get reviewers that have approved this change
label = data['labels']['Code-Review']
max_value = max(int(v) for v in label['values'])
approvers = [v['email'] for v in label['all'] if v['value'] == max_value]
files = data['revisions'][data['current_revision']]['files']
return self.GetFilesApprovalStatus(None, None, files, approvers, reviewers)
def ValidateOwnersConfig(self, change_id):
data = gerrit_util.GetChange(
self._host, change_id,
['DETAILED_ACCOUNTS', 'DETAILED_LABELS', 'CURRENT_FILES',
'CURRENT_REVISION'])
files = data['revisions'][data['current_revision']]['files']
db = owners_db.Database(self._root, self._fopen, self._os_path)
try:
db.load_data_needed_for(
[f for f in files if os.path.basename(f) == 'OWNERS'])
except Exception as e:
raise InvalidOwnersConfig('Error parsing OWNERS files:\n%s' % e)
| 2.484375 | 2 |
algorithms/verifying-an-alien-dictionary.py | Chronoviser/leetcode-1 | 41 | 12774105 | class Solution:
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
order = {alpha: index for index, alpha in enumerate(order)}
for i in range(len(words) - 1):
flag = True
for j in range(min(len(words[i]), len(words[i + 1]))):
if order[words[i][j]] < order[words[i + 1][j]]:
flag = False
break
elif order[words[i][j]] > order[words[i + 1][j]]:
return False
if flag and len(words[i]) > len(words[i + 1]):
return False
return True
| 3.125 | 3 |
segmentation/datasets.py | dataflowr/evaluating_bdl | 110 | 12774106 | # code-checked
# server-checked
import cv2
import numpy as np
import os
import os.path as osp
import random
import torch
from torch.utils import data
import pickle
def generate_scale_label(image, label):
f_scale = 0.5 + random.randint(0, 16)/10.0
image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image, label
def id2trainId(label, id_to_trainid):
label_copy = label.copy()
for k, v in id_to_trainid.items():
label_copy[label == k] = v
return label_copy
################################################################################
# Cityscapes
################################################################################
class DatasetCityscapesAugmentation(data.Dataset):
def __init__(self, root, list_path, max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.list_path = list_path
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetCityscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEval(data.Dataset):
def __init__(self, root, list_path, ignore_label=255):
self.root = root
self.list_path = list_path
self.ignore_label = ignore_label
self.img_ids = [i_id.strip().split() for i_id in open(list_path)]
print ("DatasetCityscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for item in self.img_ids:
image_path, label_path = item
name = osp.splitext(osp.basename(label_path))[0]
img_file = osp.join(self.root, image_path)
label_file = osp.join(self.root, label_path)
self.files.append({
"img": img_file,
"label": label_file,
"name": name,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
class DatasetCityscapesEvalSeq(data.Dataset):
def __init__(self, data_path, sequence="00"):
self.data_path = data_path
self.img_dir = self.data_path + "/leftImg8bit/demoVideo/stuttgart_" + sequence + "/"
self.examples = []
file_names = os.listdir(self.img_dir)
for file_name in file_names:
img_id = file_name.split("_leftImg8bit.png")[0]
img_path = self.img_dir + file_name
example = {}
example["img_path"] = img_path
example["img_id"] = img_id
self.examples.append(example)
self.num_examples = len(self.examples)
print ("DatasetCityscapesEvalSeq - num examples: %d" % self.num_examples)
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
datafiles = self.examples[index]
image = cv2.imread(datafiles["img_path"], cv2.IMREAD_COLOR)
size = image.shape
name = datafiles["img_id"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), np.array(size), name
################################################################################
# Synscapes
################################################################################
class DatasetSynscapesAugmentation(data.Dataset):
def __init__(self, root, root_meta, type="train", max_iters=None, crop_size=(512, 512), ignore_label=255):
self.root = root
self.root_meta = root_meta
self.crop_h, self.crop_w = crop_size
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesAugmentation - num unique examples: %d" % len(self.img_ids))
if not max_iters==None:
self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))
print ("DatasetSynscapesAugmentation - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image, label = generate_scale_label(image, label)
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
img_h, img_w = label.shape
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(0.0, 0.0, 0.0))
label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0,
pad_w, cv2.BORDER_CONSTANT,
value=(self.ignore_label,))
else:
img_pad, label_pad = image, label
img_h, img_w = label_pad.shape
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32)
image = image.transpose((2, 0, 1))
flip = np.random.choice(2)*2 - 1
image = image[:, :, ::flip]
label = label[:, ::flip]
return image.copy(), label.copy(), np.array(size), name
class DatasetSynscapesEval(data.Dataset):
def __init__(self, root, root_meta, type="val", ignore_label=255):
self.root = root
self.root_meta = root_meta
self.ignore_label = ignore_label
if type == "train":
with open(root_meta + "/train_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
elif type == "val":
with open(root_meta + "/val_img_ids.pkl", "rb") as file: # (needed for python3)
self.img_ids = pickle.load(file)
else:
raise Exception("type must be either 'train' or 'val'!")
print ("DatasetSynscapesEval - num examples: %d" % len(self.img_ids))
self.files = []
for img_id in self.img_ids:
self.files.append({
"img": self.root + "/img/rgb-2k/" + img_id + ".png",
"label": self.root_meta + "/gtFine/" + img_id + ".png",
"name": img_id,
"weight": 1
})
self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,
3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,
7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,
14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,
18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,
28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR)
label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE)
if not os.path.exists(datafiles["img"]): # (26 out of 25000 images are missing)
return self.__getitem__(0)
label = id2trainId(label, self.id_to_trainid)
size = image.shape
name = datafiles["name"]
image = np.asarray(image, np.float32)
mean = (102.9801, 115.9465, 122.7717)
image = image[:,:,::-1]
image -= mean
image = image.transpose((2, 0, 1))
return image.copy(), label.copy(), np.array(size), name
| 2.421875 | 2 |
submissions/Ottenlips/puzzles.py | WhittKinley/Legos | 0 | 12774107 | import search
from math import(cos, pi)
stl_map = search.UndirectedGraph(dict(
Kirkwood=dict(Webster=10, Clayton=17, MapleWood=17, Oakland=5, Glendale=7,),
St_Louis=dict(Clayton=12),
Glendale=dict(St_Louis=19),
Oakland=dict(Glendale=4),
MapleWood=dict(St_Louis=11),
Clayton=dict(Webster=14, St_Louis=12, Kirkwood=17),
Webster=dict(Kirkwood=10, Clayton=14, MapleWood=8),
))
stl_map.locations = dict(
St_Louis=(38.6270, 90.1994),Webster=(38.5926, 90.3573),Kirkwood=(38.5834, 90.4068),
Glendale=(38.5959, 90.3771), MapleWood=(38.6104, 90.3228), Clayton=(38.6426, 90.3237),
Oakland=(38.5764, 90.3856),
)
stl_puzzle = search.GraphProblem('Kirkwood', 'St_Louis', stl_map)
stl_puzzle1 = search.GraphProblem('Oakland', 'Webster', stl_map)
stl_puzzle2 = search.GraphProblem('MapleWood', 'Oakland', stl_map)
stl_puzzle.description = '''
An abbreviated map of Sumner County, TN.
This map is unique, to the best of my knowledge.
'''
class LightSwitch(search.Problem):
game_state = [[0,2],[0,3],[0,4],[1,2],[1,3],[1,4],]
def actions(self, state):
return ['jump up', 'jump down','jump left', 'jump right']
def result(self, state, action):
if action == 'jump up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
myPuzzles = [
stl_puzzle,
stl_puzzle1,
stl_puzzle2,
switch_puzzle,
] | 3.09375 | 3 |
trelloengine/structures/card.py | MrFizban/TrelloEngine | 0 | 12774108 | <filename>trelloengine/structures/card.py
#!/usr/bin/env python3
from .base import Base
class Card(Base):
def __init__(self, app_key: str, token: str, id=None, use_log = False):
super(Card, self).__init__(app_key=app_key, token=token, id=id, use_log = use_log)
self.base_url = self.base_url + "/cards"
def create_card(self, id_list: str, name: str =None, desc: str = None, pos:str= None, due: str = None, deu_complete: bool =False,
id_members: list = None, id_labels: list = None, url_source: str = None, file_source: str = None,
id_card_source: str = None, keep_from_source: str = None, address: str = None, location_name: str = None,
coordinate: str = None):
query = {
'key': self.app_key,
'token': self.token,
'name': name,
'desc': desc,
'pos': pos,
'due': due,
'dueComplete': deu_complete,
'idList': id_list,
'idMembers': id_members,
'idLabels': id_labels,
'urlSource': url_source,
'fileSource': file_source,
'idCardSource': id_card_source,
'keepFromSource': keep_from_source,
'andress': address,
'locationName': location_name,
'coordinates': coordinate
}
return super(Card, self).post_request(url=self.base_url,query=self.bool_to_string(query=query))
def get_card(self, id: str = None, fields: str = 'all', actions: str = None, attachments: (str, bool) = False, attachment_fields: str = "all",
members: bool = False, member_filed:str = 'all', member_votet: bool = False, member_voted_field: str = 'all',
check_item_states: bool = False, checklists: str = None, checklist_filed: str = 'all', board: bool = False, board_filed: str = 'all',
get_list: bool = False, plugin_data: bool = None, stickers: bool = False, sticker_field: str = 'all', custom_field_items: bool = False):
url_rquest = self.select_id(id=id)
query = {
'key': self.app_key,
'token': self.token,
'fields': fields,
'actions': actions,
'attachments': attachments,
'attachment_fields': attachment_fields,
'members': members,
'meber_field': member_filed,
'membersVoted': member_votet,
'memberVoted_fields': member_voted_field,
'checkItemsStates': check_item_states,
'checklists': checklists,
'checklist_fields': checklist_filed,
'board': board,
'board_field': board_filed,
'list': get_list,
'pluginData': plugin_data,
'stickers': stickers,
'sticker_fields': sticker_field,
'customFieldItems': custom_field_items
}
return super(Card, self).get_request(url=url_rquest,query=query)
def update_card(self, id: str =None, name: str = None, desc: str = None, closed: bool = None, id_member: str = None,
id_atacment_cover: str = None, id_list: str = None, id_label: str = None, id_board:str = None, pos: (str,int) = None,
due: str = None, due_completed: bool = None, subscribed: str = None, address: str = None, location_name: str = None,
coodinates: str = None, cover: object = None):
url_rquest = self.select_id(id=id)
query = {
'key': self.app_key,
'token': self.token,
'name': name,
'desc': desc,
'closed': closed,
'idMembers': id_member,
'idAttachmentCover': id_atacment_cover,
'idList': id_list,
'idLabels': id_label,
'idBoard': id_board,
'pos': pos,
'due': due,
'dueComplete:':due_completed,
'subscribed': subscribed,
'address': address,
'locationName': location_name,
'coordinates': coodinates,
'cover': cover
}
return super(Card, self).put_request(url=url_rquest,query=query)
def delete_card(self, id: str =None):
url_rquest = self.select_id(id=id)
query = {
'key': self.app_key,
'token': self.token
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def get_field(self,field: str, id: str =None):
url_rquest = self.select_id(id=id, string=[f'{field}'])
query = {
'key': self.app_key,
'token': self.token
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_actions(self, id: str =None, filter: str = "commentCard, updateCard:idList"):
url_rquest = self.select_id(id=id, string=['actions'])
query = {
'key': self.app_key,
'token': self.token,
'filter': filter
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_attachments(self, id: str =None, fields: str = 'all', filter: (bool, str) = False):
url_rquest = self.select_id(id=id, string=['attachments'])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields,
'filter': filter
}
return super(Card, self).get_request(url=url_rquest, query=query)
def create_attachment(self, id: str =None, name: str = None, file: str =None, mine_type: str = None, url: str = None,
set_cover: bool = False, ):
url_rquest = self.select_id(id=id, string=['attachments'])
query = {
'key': self.app_key,
'token': self.token,
'name': name,
'file': file,
'mineType':mine_type,
'url':url,
'setCover': set_cover
}
return super(Card, self).post_request(url=url_rquest, query=query)
def get_attachment(self,id_attachment: str, id: str =None, fields: list = None):
url_rquest = self.select_id(id=id, string=[f'attachments', id_attachment])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def delete_attachment(self, id_attachment: str, id: str =None):
url_rquest = self.select_id(id=id, string=[f'attachments', id_attachment])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def get_board(self, id: str =None, fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['board'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_completed_checkitems(self, id: str =None, fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['checkItemStates'])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_checklists(self, id: str =None, check_items: str = 'all', checkitem_fields: str = 'all', filter: str = 'all',
fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['checklists'])
query = {
'key': self.app_key,
'token': self.token,
'checkItems':check_items,
'checkItem_fields': checkitem_fields,
'filter': filter,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def create_checklist(self, id: str =None, name: str = None, id_checklist_source: str = None, pos: (str, int) = 'top'):
url_rquest = self.select_id(id=id, string=['checklists'])
query = {
'key': self.app_key,
'token': self.token,
'name': name,
'idChecklistSource ': id_checklist_source,
'pos': pos
}
return super(Card, self).post_request(url=url_rquest, query=query)
def get_checkitem(self, id_check_item: str, id: str =None, fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['checkItem', id_check_item])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def update_checkitem(self, id_check_item: str, id: str =None, name: str = None, state: str = None,
id_checklist:str = None, pos: (str, float) = None):
url_rquest = self.select_id(id=id, string=['checkItem', id_check_item])
query = {
'key': self.app_key,
'token': self.token,
'name': name,
'state': state,
'idChecklist': id_checklist,
'pos': pos
}
return super(Card, self).put_request(url=url_rquest, query=query)
def delete_checkitem(self,id_check_item: str, id: str =None):
url_rquest = self.select_id(id=id, string=['checkItem', id_check_item])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def get_list(self, id: str =None, fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['list'])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_members(self, id: str =None, fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['members'])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_members_who_voted(self, id: str =None,fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['membersVoted'])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def add_member_vote(self,value: str, id: str =None):
url_rquest = self.select_id(id=id, string=['membersVoted'])
query = {
'key': self.app_key,
'token': self.token,
'value': value
}
return super(Card, self).post_request(url=url_rquest, query=query)
def get_plugin_data(self, id: str =None):
url_rquest = self.select_id(id=id, string=['pluginData'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).get_request(url=url_rquest, query=query)
def get_stickers(self, id: str =None, fields: str = 'all'):
url_rquest = self.select_id(id=id, string=['attachments'])
query = {
'key': self.app_key,
'token': self.token,
'fields': fields
}
return super(Card, self).get_request(url=url_rquest, query=query)
def add_sticker(self, image: str, top: float, left: float, zindex: int, id: str =None, rotate: float = 0):
url_rquest = self.select_id(id=id, string=['stickers'])
query = {
'key': self.app_key,
'token': self.token,
'image': image,
'top': top,
'left': left,
'zIndex': zindex,
'rotate': rotate
}
return super(Card, self).post_request(url=url_rquest, query=query)
def update_stiker(self, id_sticker: str, top: float, left: float, zindex: int, id: str =None, rotate: float = 0):
url_rquest = self.select_id(id=id, string=['stickers'])
query = {
'key': self.app_key,
'token': self.token,
'top': top,
'left': left,
'zIndex': zindex,
'rotate': rotate
}
return super(Card, self).put_request(url=url_rquest, query=query)
def delete_stiker(self,id_sticker: str, id: str =None):
url_rquest = self.select_id(id=id, string=['stickers', id_sticker])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def update_comment(self, id_action: str, text:str, id: str =None):
url_rquest = self.select_id(id=id, string=['actions', id_action, 'comments'])
query = {
'key': self.app_key,
'token': self.token,
'text': text
}
return super(Card, self).put_request(url=url_rquest, query=query)
def delete_comment(self, id_action: str, id: str =None):
url_rquest = self.select_id(id=id, string=['actions', id_action, 'comments'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def update_costum_filed(self,id_custom_field: str, id: str =None):
url_rquest = self.select_id(id=id, string=['customField', id_custom_field, 'item'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).put_request(url=url_rquest, query=query)
def get_costum_field(self, id_custom_field: str, id: str =None):
url_rquest = self.select_id(id=id, string=['customField', id_custom_field, 'item'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).get_request(url=url_rquest, query=query)
def add_comment(self,text: str, id: str =None):
url_rquest = self.select_id(id=id, string=['actions', 'comments'])
query = {
'key': self.app_key,
'token': self.token,
'text': text
}
return super(Card, self).post_request(url=url_rquest, query=query)
def add_label(self, id: str =None, id_label: str = None):
url_rquest = self.select_id(id=id, string=['idLabels'])
query = {
'key': self.app_key,
'token': self.token,
#'value': id_label
}
return super(Card, self).post_request(url=url_rquest, query=query)
def add_member(self, id: str =None, id_member:str = None):
url_rquest = self.select_id(id=id, string=['attachments', 'idMembers'])
query = {
'key': self.app_key,
'token': self.token,
'value': id_member
}
return super(Card, self).get_request(url=url_rquest, query=query)
def create_label(self, color: str, id: str =None, name:str = None):
url_rquest = self.select_id(id=id, string=['labels'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).post_request(url=url_rquest, query=query)
def remove_label(self, id: str =None):
url_rquest = self.select_id(id=id, string=['labels'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).get_request(url=url_rquest, query=query)
def mark_card_notifications_as_read(self, id: str =None):
url_rquest = self.select_id(id=id, string=['markAssociatedNotificationsRead'])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).post_request(url=url_rquest, query=query)
def remove_member(self, id_member: str, id: str =None):
url_rquest = self.select_id(id=id, string=['idMembers', id_member])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def remove_member_vote(self, id_member: str, id: str =None):
url_rquest = self.select_id(id=id, string=['membersVoted', id_member])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
def update_checkitem_on_checklist(self,id_checklist: str, id_check_item: str, id: str =None, pos: (str,float) = None):
url_rquest = self.select_id(id=id, string=['checklist', id_checklist, 'checkItem', id_check_item])
query = {
'key': self.app_key,
'token': self.token,
'pos': pos
}
return super(Card, self).put_request(url=url_rquest, query=query)
def delete_checklist(self,id_checklist: str, id: str =None):
url_rquest = self.select_id(id=id, string=['checklists', id_checklist])
query = {
'key': self.app_key,
'token': self.token,
}
return super(Card, self).delete_request(url=url_rquest, query=query)
| 2.25 | 2 |
restaurantapp/mainapp/migrations/0009_auto_20200604_0847.py | ShubhamJain0/ShubhamJain0.github.io | 0 | 12774109 | <gh_stars>0
# Generated by Django 2.2.2 on 2020-06-04 08:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0008_auto_20200604_0749'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image',
field=models.FileField(null=True, upload_to='media'),
),
]
| 1.367188 | 1 |
order/controller.py | Courier-Seba/Biela | 0 | 12774110 | class Producto:
"""Producto de venta"""
def __init__(self, nombre, desc, precio):
self.nombre = nombre
self.descripcion = desc
self.precio = precio
# Es una salida para la muestra de la info
def formateo_textual(self):
textoFinal = ""
for atr in [self.nombre, self.descripcion, self.precio]:
textoFinal += atr + "\n"
return textoFinal
class ListaProductos:
"""Productos de una orden"""
def __init__(self, productos):
# Lista de ids de productos
self.nombreArchivo = "save.p"
self.lista = productos
def carga_data(self):
with open(self.nombreArchivo, "rb") as file:
carga = pickle.load(file)
return carga
def guarda_data(self):
with open(self.nombreArchivo, "wb") as file:
pickle.dump(self.lista, file)
listaDeProductos = [
Producto("Cafe", "chico", "30"),
Producto("cafeLeche", "taza", "50")
]
# Solo pata testing
productos = ListaProductos(listaDeProductos)
| 3.6875 | 4 |
mysign_app/urls.py | mindhashnl/roomsignage | 0 | 12774111 | <reponame>mindhashnl/roomsignage
from django.contrib.auth import views
from django.urls import include, path
from django.views.generic import RedirectView, TemplateView
from mysign_app.routes import login
from .routes import admin, company, screen_index
urlpatterns = [
path('', TemplateView.as_view(template_name='mysign_app/index.html'), name='index'),
path('screen/', screen_index, name='screen'),
path('password_reset/', views.PasswordResetView.as_view(), name='password_reset'),
path('password_reset/done/', views.PasswordResetDoneView.as_view(), name='password_reset_done'),
path('reset/<uidb64>/<token>/', views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('reset/done/', views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('login/', login.Login.as_view(), name='login'),
path('logout/', login.logout, name='logout'),
path('admin/', RedirectView.as_view(url='/admin/door_devices/', permanent=False), name='admin_index'),
path('admin/door_devices/', admin.DoorDevices.as_view(), name='admin_door_devices'),
path('admin/companies/', admin.Companies.as_view(), name='admin_companies'),
path('admin/companies/add/', admin.company_add, name='admin_company_add'),
path('admin/users/', admin.Users.as_view(), name='admin_users'),
path('admin/users/add', admin.user_add, name='admin_user_add'),
path('company/', company.CompanyIndex.as_view(), name='company_index'),
path('robots.txt/', TemplateView.as_view(template_name="mysign_app/robots.txt", content_type='text/plain')),
path('', include('pwa.urls'))
]
| 2 | 2 |
b3datepicker/conf.py | RDXT/django-bootstrap3-datepicker | 0 | 12774112 | <reponame>RDXT/django-bootstrap3-datepicker
# -*- coding: utf-8 -*-
from django.apps import AppConfig
from django.conf import settings as base_settings
class B3datepickerConfig(AppConfig):
name = 'b3datepicker'
class Settings(object):
BOOTSTRAP_DATEPICKER_VERSION = '1.6.4'
B3DATEPICKER_JS = '//cdnjs.cloudflare.com/ajax/libs/bootstrap-datepicker/{}/js/bootstrap-datepicker.min.js'.format(
BOOTSTRAP_DATEPICKER_VERSION)
B3DATEPICKER_CSS = '//cdnjs.cloudflare.com/ajax/libs/bootstrap-datepicker/{}/css/bootstrap-datepicker3.min.css'.format(
BOOTSTRAP_DATEPICKER_VERSION)
def __getattribute__(self, name):
if hasattr(base_settings, name):
return getattr(base_settings, name)
return object.__getattribute__(self, name)
settings = Settings()
| 1.851563 | 2 |
extensions/matrix/views.py | nirgal/ngw | 0 | 12774113 | <reponame>nirgal/ngw
import pprint
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.utils.translation import ugettext as _
from django.views.generic import FormView, TemplateView
from ngw.core.models import Contact, ContactGroup, MatrixRoom
from ngw.core.views.generic import NgwUserAcl
from . import matrix
def _get_contact_group(room_id):
try:
ngwroom = MatrixRoom.objects.get(pk=room_id)
except MatrixRoom.DoesNotExist:
return None
return ngwroom.contact_group
def _check_state_filled(room):
'''
Check that room.state is define.
Query the server if needed.
'''
if 'state' not in room:
state = matrix.get_room_state(room['room_id'])['state']
room['state'] = matrix._room_state_clean(state)
def _get_autoredact_maxage(room):
_check_state_filled(room)
try:
seconds = room['state']['m.room.autoredact']['autoredact']
return timedelta(seconds=seconds)
except KeyError:
return None
class MatrixRoomsView(NgwUserAcl, TemplateView):
'''
Room list view
'''
template_name = 'rooms_list.html'
def get_context_data(self, **kwargs):
context = {}
context['title'] = _('Matrix rooms')
request_params = self.request.GET
rooms = matrix.get_rooms(
show_empty=request_params.get('empty', False),
show_private=request_params.get('private', False),
)
rooms = [room for room in rooms]
for room in rooms:
room['pretty'] = pprint.pformat(room)
room['contact_group'] = _get_contact_group(room['room_id'])
autoredact_maxage = _get_autoredact_maxage(room)
if autoredact_maxage:
room['autoredact'] = autoredact_maxage
context['rooms'] = rooms
context.update(kwargs)
return super().get_context_data(**context)
class MatrixRoomView(NgwUserAcl, TemplateView):
'''
Room details view
'''
template_name = 'room.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
room_id = context['room_id']
context['title'] = _('Matrix room') + ' ' + room_id
room = matrix.get_room_info(room_id)
room['contact_group'] = _get_contact_group(room_id)
_check_state_filled(room)
try:
power_levels = room['state']['m.room.power_levels']
default_pl = power_levels.get('users_default', 0)
for member in room['state']['members']:
member['power_level'] = (
power_levels['users'].get(member['user_id'], default_pl))
except KeyError:
pass
autoredact_maxage = _get_autoredact_maxage(room)
if autoredact_maxage:
room['autoredact'] = autoredact_maxage
if self.request.GET.get('debug', False):
room['pretty'] = pprint.pformat(room)
context['room'] = room
return context
class MatrixUserView(NgwUserAcl, TemplateView):
'''
User details view
'''
template_name = 'user.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user_id = context['user_id']
context['title'] = _('Matrix user') + ' ' + user_id
user = matrix.get_user_info(user_id)
context['mat_user'] = user
if 'creation_ts' in user:
context['creation_dt'] = (
datetime.fromtimestamp(user['creation_ts'])
)
login = matrix.localpart(user_id)
try:
ngw_user = Contact.objects.get_by_natural_key(login)
context['ngw_user'] = ngw_user
context['ngw_user_url'] = (
f'/contactgroups/{settings.MATRIX_SYNC_GROUP}'
f'/members/{ngw_user.id}/'
)
except Contact.DoesNotExist:
pass
context['rooms'] = []
for room_id in matrix.get_user_rooms(user_id)['joined_rooms']:
room_info = matrix.get_room_info(room_id)
autoredact_maxage = _get_autoredact_maxage(room_info)
if autoredact_maxage:
room_info['autoredact'] = autoredact_maxage
context['rooms'].append(room_info)
if self.request.GET.get('debug', False):
if user['password_hash']:
user['password_hash'] = '********'
context['pretty'] = pprint.pformat(user)
return context
class RoomDeleteForm(forms.Form):
def __init__(self, room_id, *args, **kargs):
self.room_id = room_id
super().__init__(*args, **kargs)
def close_room(self):
try:
ngwroom = MatrixRoom.objects.get(pk=self.room_id)
except MatrixRoom.DoesNotExist:
pass
else:
ngwroom.delete()
matrix.room_delete(self.room_id)
class MatrixRoomCloseView(NgwUserAcl, FormView):
'''
'''
template_name = 'room_close.html'
form_class = RoomDeleteForm
success_url = '/matrix/room/'
def get(self, request, room_id):
self.room_id = room_id
return super().get(request)
def post(self, request, room_id):
self.room_id = room_id
return super().post(request)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['room_id'] = self.room_id
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = _('Please confirm matrix room shutdown')
context['room_id'] = self.room_id
return context
def form_valid(self, form):
form.close_room()
return super().form_valid(form)
class RoomAddAdminForm(forms.Form):
admin = forms.ChoiceField(
choices=[
(contact.id, contact.name)
for contact in ContactGroup.objects.get(
pk=settings.MATRIX_MOD_GROUP).get_all_members()
])
def __init__(self, room_id, *args, **kargs):
self.room_id = room_id
super().__init__(*args, **kargs)
def make_admin(self):
contact_id = self.cleaned_data['admin']
contact = Contact.objects.get(pk=contact_id)
login = contact.get_username()
assert login
user_id = f'@{login}:{matrix.DOMAIN}'
matrix.room_makeadmin(self.room_id, user_id)
class MatrixRoomAddAdminView(NgwUserAcl, FormView):
'''
'''
template_name = 'add_admin.html'
form_class = RoomAddAdminForm
success_url = '/matrix/room/'
def get(self, request, room_id):
self.room_id = room_id
return super().get(request)
def post(self, request, room_id):
self.room_id = room_id
return super().post(request)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['room_id'] = self.room_id
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = _('Please select contact to make admin')
context['room_id'] = self.room_id
return context
def form_valid(self, form):
form.make_admin()
return super().form_valid(form)
| 1.90625 | 2 |
BOJ/18000~18999/18800~18899/18870.py | shinkeonkim/today-ps | 2 | 12774114 | <reponame>shinkeonkim/today-ps
n = int(input())
L = list(map(int,input().split()))
A = list(set(L[:]))
d = {}
A.sort()
for i in range(len(A)):
d[A[i]] = i
for i in L:
print(d[i],end = " ") | 3.125 | 3 |
hpc_acm_cli/parser_builder.py | coin8086/hpc_acm_cli | 2 | 12774115 | import argparse
class ParserBuilder:
@classmethod
def build(cls, spec):
parser = argparse.ArgumentParser(**spec.get('options', {}))
params = spec.get('params', None)
if params:
cls.add_params(parser, params);
subcommands = spec.get('subcommands', None)
if subcommands:
subparsers = parser.add_subparsers(**subcommands.get('options', {}))
for cmd in subcommands['items']:
subparser = subparsers.add_parser(cmd['name'], **cmd.get('options', {}))
params = cmd.get('params', None)
if params:
cls.add_params(subparser, params);
return parser
@classmethod
def add_params(cls, parser, params):
for param in params:
group = param.get('group', False)
if group:
group = parser.add_mutually_exclusive_group(**param.get('options', {}))
cls.add_params(group, param['items'])
else:
parser.add_argument(param['name'], **param.get('options', {}))
| 2.796875 | 3 |
operations/fleet_management/migrations/0069_auto_20180730_1054.py | kaizer88/emps | 0 | 12774116 | <filename>operations/fleet_management/migrations/0069_auto_20180730_1054.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-30 08:54
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fleet_management', '0068_merge_20180726_0812'),
]
operations = [
migrations.AddField(
model_name='fuelcard',
name='card_limit',
field=models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)], verbose_name=b'Card Limit'),
),
migrations.AddField(
model_name='fuelcard',
name='card_limit_comment',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name=b'Comment'),
),
migrations.AddField(
model_name='historicalfuelcard',
name='card_limit',
field=models.IntegerField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0)], verbose_name=b'Card Limit'),
),
migrations.AddField(
model_name='historicalfuelcard',
name='card_limit_comment',
field=models.CharField(blank=True, max_length=255, null=True, verbose_name=b'Comment'),
),
]
| 1.664063 | 2 |
api_eto/api_eto.py | tonybutzer/eto-draft | 0 | 12774117 | import argparse
from etoLib.log_logger import log_make_logger
from etoLib.s3_func import s3_hello
from etoLib.util_func import unique
from etoLib.util_func import grepfxn
def get_parser():
parser = argparse.ArgumentParser(description='Run the eto code')
parser.add_argument('tile', metavar='TILE', type=str, nargs='*',
help='the tile to process - example: 40N-80E')
parser.add_argument('-c', '--configdir', help='specify and alternate config_dict dir example: -c sample_config ', default='./sample_config', type=str)
parser.add_argument('-o', '--optimize', help='optimize caching on ', default='yes', type=str)
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['configdir']:
print("configdir", args['configdir'])
optimize = False
opt = args['optimize']
config_directory = args['configdir']
log.info('USing configdir {}'.format(config_directory))
log.info('this is just a starter kit for our cmdline api for eto - Help Greg!')
log.info('or logging agents and logging backends ... docker deployments')
# RUN the class Veget
#myveg = VegET(config_directory, tile, shp, optimize)
#myveg.run_veg_et()
log.info('this is how you call one of your functions')
s3_hello('Greg')
if __name__ == '__main__':
log = log_make_logger('THE_ETO_CREATOR')
command_line_runner()
| 2.359375 | 2 |
lib/googlecloudsdk/command_lib/resource_manager/org_policies_base.py | bshaffer/google-cloud-sdk | 0 | 12774118 | <reponame>bshaffer/google-cloud-sdk<filename>lib/googlecloudsdk/command_lib/resource_manager/org_policies_base.py<gh_stars>0
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common constants and methods for Org Policies commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.resource_manager import org_policies
def OrgPoliciesService(args):
client = org_policies.OrgPoliciesClient()
if args.project:
return client.projects
elif args.organization:
return client.organizations
elif args.folder:
return client.folders
else:
return None
def GetOrgPolicyRequest(args):
"""Constructs a resource-dependent GetOrgPolicyRequest.
Args:
args: Command line arguments.
Returns:
Resource-dependent GetOrgPolicyRequest.
"""
messages = org_policies.OrgPoliciesMessages()
request = messages.GetOrgPolicyRequest(
constraint=org_policies.FormatConstraint(args.id))
resource_id = GetResource(args)
if args.project:
return messages.CloudresourcemanagerProjectsGetOrgPolicyRequest(
projectsId=resource_id, getOrgPolicyRequest=request)
elif args.organization:
return messages.CloudresourcemanagerOrganizationsGetOrgPolicyRequest(
organizationsId=resource_id, getOrgPolicyRequest=request)
elif args.folder:
return messages.CloudresourcemanagerFoldersGetOrgPolicyRequest(
foldersId=resource_id, getOrgPolicyRequest=request)
return None
def SetOrgPolicyRequest(args, policy):
"""Constructs a resource-dependent SetOrgPolicyRequest.
Args:
args: Command line arguments.
policy: OrgPolicy for resource-dependent SetOrgPolicyRequest.
Returns:
Resource-dependent SetOrgPolicyRequest.
"""
messages = org_policies.OrgPoliciesMessages()
resource_id = GetResource(args)
request = messages.SetOrgPolicyRequest(policy=policy)
if args.project:
return messages.CloudresourcemanagerProjectsSetOrgPolicyRequest(
projectsId=resource_id, setOrgPolicyRequest=request)
elif args.organization:
return messages.CloudresourcemanagerOrganizationsSetOrgPolicyRequest(
organizationsId=resource_id, setOrgPolicyRequest=request)
elif args.folder:
return messages.CloudresourcemanagerFoldersSetOrgPolicyRequest(
foldersId=resource_id, setOrgPolicyRequest=request)
return None
def GetResource(args):
if args.project:
return args.project
elif args.organization:
return args.organization
elif args.folder:
return args.folder
else:
return None
| 1.960938 | 2 |
netroids/main.py | FarmCodeGary/Netroids | 0 | 12774119 | <reponame>FarmCodeGary/Netroids
import socket
def get_local_address():
local_address = socket.gethostbyname(socket.gethostname())
typed_address = raw_input(
"Enter your external IP address (default: "+local_address+"): ").strip()
if typed_address != "":
local_address = typed_address
return local_address
if __name__ == "__main__":
name = raw_input("Enter your name (default: Player): ").strip()
name = name.replace(" ", "").replace("|", "").replace(":", "")
if name == "":
name = "Player"
client_or_server = raw_input(
"Type 1 to start a server, or 2 to start as a client (default: 1): ")
if client_or_server.strip() == "2":
local_address = get_local_address()
server_address = raw_input("Enter the IP address of a server: ").strip()
# TODO: Move this import?
import client
the_client = client.Client(local_address, server_address, name)
the_client.go()
else:
local_address = get_local_address()
# TODO: Move this import?
import server
the_server = server.Server(local_address, name)
the_server.go()
| 3.203125 | 3 |
mdtk/filesystem_utils.py | JamesOwers/corrupted_midi_dataset | 29 | 12774120 | """Utility functions for file manipulation"""
import logging
import os
import shutil
import sys
import urllib.error
import urllib.request
import zipfile
def download_file(source, dest, verbose=False, overwrite=None):
"""Get a file from a url and save it locally"""
if verbose:
print(f"Downloading {source} to {dest}")
if os.path.exists(dest):
if overwrite is None:
if verbose:
logging.warning(f"WARNING: {dest} already exists, not downloading")
return
if not overwrite:
raise OSError(f"{dest} already exists")
try:
urllib.request.urlretrieve(source, dest)
except urllib.error.HTTPError as e:
print(f"Url {source} does not exist", file=sys.stderr)
raise e
def make_directory(path, overwrite=None, verbose=False):
"""Convenience function to create a directory and handle cases where
it already exists.
Args
----
path: str
The path of the directory to create
overwrite: boolean or None
If the path already exists, if overwrite is: True - delete the
existing path; False - return error; None - leave the existing
path as it is and throw a warning
verbose: bool
Verbosity of printing
"""
if verbose:
print(f"Making directory at {path}")
mkdir = os.makedirs
try:
mkdir(path)
except FileExistsError as e:
if overwrite is True:
if verbose:
print(f"Deleting existing directory: {path}")
shutil.rmtree(path)
mkdir(path)
elif overwrite is None:
if verbose:
logging.warning(
f"WARNING: {path} already exists, writing "
"files only if they do not already exist.",
)
elif overwrite is False:
raise e
else:
raise ValueError(
"overwrite should be boolean or None, not " f'"{overwrite}"'
)
def extract_zip(zip_path, out_path, overwrite=None, verbose=False):
"""Convenience function to extract zip file to out_path."""
if verbose:
print(f"Extracting {zip_path} to {out_path}")
dirname = os.path.splitext(os.path.basename(zip_path))[0]
extracted_path = os.path.join(out_path, dirname)
if os.path.exists(extracted_path):
if overwrite is True:
if verbose:
logging.warning("Deleting existing directory: " f"{extracted_path}")
shutil.rmtree(extracted_path)
elif overwrite is None:
if verbose:
logging.warning(
f"{extracted_path} already exists. Assuming "
"this zip has already been extracted, not "
"extracting.",
)
return extracted_path
elif overwrite is False:
raise FileExistsError(f"{extracted_path} already exists")
with zipfile.ZipFile(zip_path, "r") as zz:
zz.extractall(path=out_path)
return extracted_path
def copy_file(filepath, output_path, overwrite=None, mkdir=False):
"""Convenience function to copy a file from filepath to output_path."""
path = os.path.join(output_path, os.path.basename(filepath))
if os.path.exists(path):
if overwrite is True:
shutil.copy(filepath, output_path)
elif overwrite is None:
return
elif overwrite is False:
raise FileExistsError(f"{path} already exists")
else:
shutil.copy(filepath, output_path)
| 3.65625 | 4 |
docs/api/contrib/boundaries.py | souravsingh/yellowbrick | 1 | 12774121 | <filename>docs/api/contrib/boundaries.py
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from yellowbrick.contrib.classifier import DecisionViz
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
data_set = make_moons(noise=0.3, random_state=0)
X, y = data_set
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)
viz = DecisionViz(KNeighborsClassifier(3), title="Nearest Neighbors", features=['Feature One', 'Feature Two'], classes=['A', 'B'])
viz.fit(X_train, y_train)
viz.draw(X_test, y_test)
viz.poof(outpath="images/knn_decisionviz.png")
viz = DecisionViz(SVC(kernel="linear", C=0.025), title="Linear SVM", features=['Feature One', 'Feature Two'], classes=['A', 'B'])
viz.fit(X_train, y_train)
viz.draw(X_test, y_test)
viz.poof(outpath="images/svc_decisionviz.png")
| 2.625 | 3 |
otcextensions/tests/unit/osclient/cce/v2/fakes.py | kucerakk/python-otcextensions | 0 | 12774122 | <gh_stars>0
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import uuid
import mock
from otcextensions.sdk.cce.v3 import cluster
from otcextensions.sdk.cce.v3 import cluster_node
from otcextensions.tests.unit.osclient import test_base
class TestCCE(test_base.TestCommand):
def setUp(self):
super(TestCCE, self).setUp()
self.app.client_manager.cce = mock.Mock()
self.client = self.app.client_manager.cce
class FakeCluster(test_base.Fake):
"""Fake one or more Cluster"""
@classmethod
def generate(cls):
object_info = {
'kind': 'Cluster',
'metadata': {
'uid': 'id-' + uuid.uuid4().hex,
'name': 'name-' + uuid.uuid4().hex,
},
'spec': {
'type': random.choice(['VirtualMachine', 'BareMetal']),
'flavor': uuid.uuid4().hex,
'version': uuid.uuid4().hex,
'host_network': {
'vpc': 'vpc-' + uuid.uuid4().hex,
'subnet': 'subnet-' + uuid.uuid4().hex,
},
'container_network': {
'mode': random.choice(['overlay_l2']),
}
},
'status': {
'phase': 'Available',
'endpoints': {
'internal': uuid.uuid4().hex,
'external_otc': uuid.uuid4().hex,
}
}
}
obj = cluster.Cluster.existing(**object_info)
return obj
class FakeClusterNode(test_base.Fake):
"""Fake one or more Cluster node"""
@classmethod
def generate(cls):
object_info = {
'kind': 'Node',
'metadata': {
'uid': 'id-' + uuid.uuid4().hex,
'name': 'name-' + uuid.uuid4().hex,
},
'spec': {
'flavor': 'flavor-' + uuid.uuid4().hex,
'availability_zone': 'az-' + uuid.uuid4().hex,
'os': 'os-' + uuid.uuid4().hex,
'login': {
'sshKey': 'key-' + uuid.uuid4().hex,
},
'data_volumes': [
{
'type': 'dt' + uuid.uuid4().hex,
'size': random.randint(1, 15000),
},
{
'type': 'dt' + uuid.uuid4().hex,
'size': random.randint(1, 15000),
},
],
},
'status': {
'phase': 'Available',
'server_id': 'sid-' + uuid.uuid4().hex,
'floating_ip': 'fip-' + uuid.uuid4().hex,
'private_ip': 'pip-' + uuid.uuid4().hex,
},
}
obj = cluster_node.ClusterNode.existing(**object_info)
return obj
| 1.804688 | 2 |
scripts/irac_model_single.py | grizli-project/grizli-aws | 0 | 12774123 | #!/usr/bin/env python
import sys
import os
import time
import json
import golfir.model
import golfir.utils
import yaml
def run(root, argv=[]):
#ds9 = None
defaults = {'ds9': None,
'patch_arcmin': 1.0, # Size of patch to fit
'patch_overlap': 0.2, # Overlap of automatic patches
'mag_limit': [24, 27], # Two-pass modeling. Fit sources brighter than mag_limit in HST catalog
'run_alignment': True, # Run fine alignment between IRAC-HST, in between two steps of `mag_limit`
'galfit_flux_limit': -50, # Brightness limit (uJy) of objects to fit with GALFIT. Or S/N if negative
'refine_brightest': True, # Refine masked bright objects with galfit
'any_limit': 16, # Brightness limit below which to mask *any* sources
'point_limit': 16, # Brightness limit below which to mask point-like sources
'bright_sn': 30, # S/N threshold for masked pixels of bright object
'bkg_kwargs': {'order_npix': 32}, # Arguments to the local background routine
'channels': ['ch1', 'ch2'], # Channels to try
'psf_only': False,
'use_saved_components': False, # Use models from a "components" file if found
'window': None, # PSF-match windowing
'fetch': True,
'PATH': '/GrizliImaging/',
'use_patches': True,
'sync_results': True,
'clean_PATH': True,
'skip_if_exists': True}
print('xxx', defaults)
defaults['patch_arcmin'] = -1
args, kwargs = golfir.utils.argv_to_dict(argv, defaults=defaults)
print('xxx', kwargs)
run_dir = os.path.join(kwargs['PATH'], root)
if os.path.exists(run_dir) & kwargs['skip_if_exists']:
print('directory {0} exists'.format(run_dir))
return True
if not os.path.exists(run_dir):
os.mkdir(run_dir)
if os.path.exists('/tmp/{0}.finished.txt'.format(root)):
print('/tmp/{0}.finished.txt'.format(root))
return True
with open(os.path.join(run_dir, root + '.golfir.yml'), 'w') as fp:
yaml.dump(kwargs, fp)
if isinstance(kwargs['ds9'], str):
if kwargs['ds9'] == 'connect':
target = 'DS9:*'
else:
target = kwargs['ds9']
import grizli.ds9
print('Use DS9: ', target)
kwargs['ds9'] = grizli.ds9.DS9(target=target)
golfir.model.run_all_patches(root, **kwargs)
if kwargs['clean_PATH']:
os.chdir(kwargs['PATH'])
os.system(f'rm -rf ./{root}')
fp = open(f'/tmp/{root}.finished.txt','w')
fp.write(time.ctime())
fp.close()
return True
if __name__ == '__main__':
root = sys.argv[1]
print('xxx run', root, sys.argv[1:])
run(root, argv=sys.argv[1:])
| 2.09375 | 2 |
datasets/MixedBrainDataset.py | rist-ro/argo | 4 | 12774124 | <reponame>rist-ro/argo<filename>datasets/MixedBrainDataset.py
"""
Module for managing multiple brain datasets at once
"""
from datasets.BrainDataset import BrainDataset, modalities, NPROCS
import numpy as np
import os
import fnmatch
import tensorflow as tf
import PIL
import pdb
class MixedBrainDataset(BrainDataset):
def __init__(self, params):
super().__init__(params)
self._id = self.dataset_id(params)
self._data_dirs = self._params['data_dirs']
self._training_data_proportion = self._params['training_data_proportion'] \
if 'training_data_proportion' in params else [1.0 for _ in range(len(self._data_dirs))]
# options for each dataset
self._train_set_x, self._train_set_y, \
self._validation_set_x, self._validation_set_y, \
self._test_set_x, self._test_set_y = self.load_float_brains(self._data_dirs, self._training_data_proportion)
def dataset_id(self, params):
"""
This method interprets the parameters and generate an id
"""
id = 'MixedBrainDataset'
id += super().dataset_id(params)
if 'training_data_proportion' in params.keys():
id += "-p" + "_".join([str(val) for val in params['training_data_proportion']])
return id
def load_float_brains(self, data_dirs, proportions):
datasets_tuple = []
datasets_tuple_validation = []
datasets_tuple_test = []
for data_dir, proportion in zip(data_dirs, proportions):
datasets_tuple = np.append(datasets_tuple, self.load_file_names(data_dir, 'train',
proportion=proportion))
datasets_tuple_validation = np.append(datasets_tuple_validation,
self.load_file_names(data_dir, 'validation'))
datasets_tuple_test = np.append(datasets_tuple_test, self.load_file_names(data_dir, 'test'))
datasets_tuple = np.tile(datasets_tuple, (2, 1))
datasets_tuple_validation = np.tile(datasets_tuple_validation, (2, 1))
datasets_tuple_test = np.tile(datasets_tuple_test, (2, 1))
datasets_tuple = np.asarray(datasets_tuple)
datasets_tuple_validation = np.asarray(datasets_tuple_validation)
datasets_tuple_test = np.asarray(datasets_tuple_test)
print('---------DATASET TUPLE------------', datasets_tuple.shape)
train_set_x, train_set_y = datasets_tuple
print('---------DATASET TUPLE VALIDATION------------', datasets_tuple_validation.shape)
validation_set_x, validation_set_y = datasets_tuple_validation
print('---------DATASET TUPLE TEST------------', datasets_tuple_test.shape)
test_set_x, test_set_y = datasets_tuple_test
print('--------------X SHAPE-----------------')
channels_no = len(self._modalities) if self._modalities != None else 1
self._train_set_x_shape = np.load(datasets_tuple[0, 0]).shape + (channels_no,)
if self._resize is not None:
self._train_set_x_shape = (self._resize, self._resize, channels_no)
print(self._train_set_x_shape)
return train_set_x, train_set_y, validation_set_x, validation_set_y, test_set_x, test_set_y
# overriding
def load_file_names(self, root, data_type, proportion=1.0):
file_names = []
for path, dirs, files in os.walk(root + '/' + data_type):
if self._modalities is not None:
reg_filter = '*_' + str(modalities[self._modalities[0]]) + '_*'
for f in fnmatch.filter(files, reg_filter):
file_names.append(root + '/' + data_type + '/' + f)
else:
for f in files:
file_names.append(root + '/' + data_type + '/' + f)
file_names = np.asarray(file_names)
file_names = file_names[:int(proportion * len(file_names))]
return file_names
def get_label(self, filename):
if 'HCP' in filename:
return 0
if 'BRATS' in filename:
return 1
# todo implement when the tumour mask is given and the label is [1,1]
# overriding
def dataset_map(self, dataset, datasets_tuple):
output_types = self.get_output_types(datasets_tuple)
output_shapes = self.get_output_shapes(datasets_tuple)
def load_function(n):
filename = full_data[n][0]
result = np.empty(output_shapes[0], np.float32)
modality_filename = filename
if self._modalities is not None:
for i, modality in enumerate(self._modalities):
modality_filename = str.replace(str(filename), modalities[self._modalities[0]],
modalities[modality])
image = self.load_slice_from_file(str(modality_filename))
if self._resize is not None:
image = np.array(
PIL.Image.fromarray(image).resize([self._resize, self._resize]))
result[:, :, i] = image
else:
image = self.load_slice_from_file(self._data_dir + '/' + str(filename))
if self._resize is not None:
image = np.array(
PIL.Image.fromarray(image).resize([self._resize, self._resize]))
result = image.reshape([image.shape[0], image.shape[1], 1])
label = self.get_label(modality_filename)
return result, np.int32(label)
full_data = list(zip(*datasets_tuple))
dataset = dataset.map(
lambda n: tuple(tf.py_func(load_function,
[n], output_types)
), num_parallel_calls=NPROCS)
return dataset
# overriding
def get_output_shapes(self, datasets_tuple):
image = np.load(datasets_tuple[0][0]).astype(np.float32)
channels_no = len(self._modalities) if self._modalities is not None else 1
output_shapes = tuple([image.shape + (channels_no,), ()])
if self._resize is not None:
output_shapes = ((self._resize, self._resize, channels_no), ())
return output_shapes
# overriding
def get_output_types(self, datasets_tuple):
image = np.load(datasets_tuple[0][0]).astype(np.float32)
output_types = tuple([tf.as_dtype(image.dtype), tf.int32])
return output_types
# overriding
@property
def x_shape_train(self):
return self._train_set_x_shape
# overriding
# @property
# def y_shape_train(self):
# return self._train_set_y_shape
# overriding
@property
def x_shape_eval(self):
return self._train_set_x_shape
@property
def n_labels(self):
"""return the number of labeles in this dataset"""
return 2
@property
def data_dirs(self):
return self._data_dirs
| 2.203125 | 2 |
setup.py | jay3332/wumpus.py | 4 | 12774125 | import re
from setuptools import setup
with open('wumpus/__init__.py') as f:
contents = f.read()
try:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', contents, re.M
).group(1)
except AttributeError:
raise RuntimeError('Could not identify version') from None
# look at this boilerplate code
try:
author = re.search(
r'^__author__\s*=\s*[\'"]([^\'"]*)[\'"]', contents, re.M
).group(1)
except AttributeError:
author = 'jay3332'
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('requirements.txt', encoding='utf-8') as f:
requirements = f.readlines()
setup(
name='wumpus.py',
author=author,
url='https://github.com/jay3332/wumpus.py',
project_urls={
"Issue tracker": "https://github.com/jay3332/wumpus.py/issues",
"Discord": "https://discord.gg/FqtZ6akWpd"
},
version='0.0.0', # version (Reserve 0.1.0 for the finished release)
packages=[
'wumpus',
'wumpus.core',
'wumpus.models',
'wumpus.typings'
],
license='MIT',
description="An asynchronous wrapper around Discord's API.",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
install_requires=requirements,
extras_require={
'docs': [
'sphinx>=4.1.1',
'furo',
],
'performance': [
'orjson>=1.3.0'
]
},
python_requires='>=3.8.0',
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
]
)
| 1.921875 | 2 |
sheets/views.py | GaniAliguzhinov/ServerChecker | 0 | 12774126 | <filename>sheets/views.py<gh_stars>0
from django.shortcuts import render, redirect
from .forms import UploadSheetForm
import openpyxl
from io import BytesIO
from sheets.tasks import process
def upload_file(request):
"""
View for uploading an excel file with urls.
Once uploaded, the file will be processed asynchronously.
"""
if request.method == 'POST':
form = UploadSheetForm(request.POST, request.FILES)
if form.is_valid():
process_sheet(request.FILES['sheet'])
return redirect('/queries/')
else:
form = UploadSheetForm()
return render(request, 'upload.html', {'form': form})
def process_sheet(file):
"""
Iterate over the excel file and process each url
"""
wb = openpyxl.load_workbook(filename=BytesIO(file.read()))
sheet = wb.active
# To speed up, use a Pool
urls = []
for row in range(1, sheet.max_row+1):
url = sheet.cell(row=row, column=1).value
if'.' not in url:
continue
urls.append(url)
# Process queries on all urls
process.delay(urls)
| 2.59375 | 3 |
mixin_templatetag/componentnodes.py | xblitz/django-template-mixins | 0 | 12774127 | <reponame>xblitz/django-template-mixins<filename>mixin_templatetag/componentnodes.py
from collections import defaultdict
from django.template import TemplateSyntaxError, Node, Template, Variable
from django.template.loader_tags import ExtendsNode, IncludeNode
from django.utils.safestring import mark_safe
SLOT_CONTEXT_KEY = 'slot_context'
class SlotContext:
def __init__(self):
# Dictionary of FIFO queues.
self.slots = defaultdict(list)
def add_slots(self, slots):
for name, slot in slots.items():
self.slots[name].insert(0, slot)
def pop(self, name):
try:
return self.slots[name].pop()
except IndexError:
return None
def push(self, name, slot):
self.slots[name].append(slot)
def get_slot(self, name):
try:
return self.slots[name][-1]
except IndexError:
return None
class ComponentNode(Node):
context_key = 'component_context'
def __init__(self, nodelist, parent_name, extra_context=None, isolated_context=False):
self.extra_context = extra_context
self.isolated_context = isolated_context
self.nodelist = nodelist
self.parent_name = parent_name
# self.template_dirs = template_dirs
self.slots = {n.name: n for n in nodelist.get_nodes_by_type(SlotNode)}
def __repr__(self):
return '<Component Node: %s %s>' % (self.__class__.__name__, self.parent_name.token)
def find_template(self, template_name, context):
template, origin = context.template.engine.find_template(
template_name
)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'component' tag: %r." % parent
if self.parent_name.filters or \
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." % \
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if SLOT_CONTEXT_KEY not in context.render_context:
context.render_context[SLOT_CONTEXT_KEY] = SlotContext()
slot_context = context.render_context[SLOT_CONTEXT_KEY]
# Add the block nodes from this node to the block context
slot_context.add_slots(self.slots)
# If this block's parent doesn't have an extends node it is the root,
# # and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if isinstance(node, ExtendsNode) or isinstance(node, ComponentNode) or isinstance(node, IncludeNode):
raise TemplateSyntaxError("%s must not include extends, another component, or include" % (self))
# Call Template._render explicitly so the parser context stays
# the same.
values = {
name: var.resolve(context)
for name, var in self.extra_context.items()
}
with context.render_context.push_state(compiled_parent, isolated_context=False):
if self.isolated_context:
return compiled_parent._render(context.new(values))
with context.push(**values):
return compiled_parent._render(context)
class SlotNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Slot Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
slot_context = context.render_context.get(SLOT_CONTEXT_KEY)
with context.push():
if slot_context is None:
context['slot'] = self
result = self.nodelist.render(context)
else:
slot = slot_context.pop(self.name)
if slot is None:
slot = self
# Create new block so we can store context without thread-safety issues.
slot = type(self)(slot.name, slot.nodelist)
slot.context = context
context['slot'] = slot
result = slot.nodelist.render(context)
# if push is not None:
# slot_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ slot.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (SLOT_CONTEXT_KEY in render_context and
render_context[SLOT_CONTEXT_KEY].get_slot(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
| 2.0625 | 2 |
ISMLnextGen/retryTest.py | Ravenclaw-OIer/ISML_auto_voter | 128 | 12774128 | <reponame>Ravenclaw-OIer/ISML_auto_voter<filename>ISMLnextGen/retryTest.py
#coding:utf-8
import logging,traceback
from functools import wraps
log = logging.getLogger(__name__)
acceptStatus=(503,'其他接受的状态码')
class RetryExhaustedError(Exception):
pass
#def __init__(self, funcname,args,kwargs):
# print('Exception from {}: {} {}'.format(funcname,args,kwargs))
import aiohttp,asyncio
loop = asyncio.get_event_loop()
def retry(*exceptions, retries=3, cooldown=1, verbose=True):
"""Decorate an async function to execute it a few times before giving up.
Hopes that problem is resolved by another side shortly.
Args:
exceptions (Tuple[Exception]) : The exceptions expected during function execution
retries (int): Number of retries of function execution.
cooldown (int): Seconds to wait before retry.
verbose (bool): Specifies if we should log about not successful attempts.
"""
def wrap(func):
@wraps(func)
async def inner(*args, **kwargs):
retries_count = 0
while True:
try:
result = await func(*args, **kwargs)
except exceptions as err: #exceoptions是从retry传入的
retries_count += 1
message = "Exception:{} during\n{} execution. " \
"{} of {} retries attempted"\
.format(err, func, retries_count, retries)
if retries_count >= retries:
#verbose and log.exception(message)
verbose and print(message)
#raise RetryExhaustedError(
# func.__qualname__, args, kwargs) from err
#raise RetryExhaustedError
return err
else:
#verbose and log.warning(message)
verbose and print(message)
await asyncio.sleep(cooldown)
else:
return result
return inner
return wrap
# Example is taken from http://aiohttp.readthedocs.io/en/stable/#getting-started
async def fetch(session, url):
async with session.get(url) as response:
#return await response.text()
text=await response.text()
if (response.status<400 or response.status in acceptStatus):
return text
else:
return response.raise_for_status()
# Client code, provided for reference
@retry(aiohttp.ClientError,asyncio.TimeoutError)
#@retry(aiohttp.WSServerHandshakeError,aiohttp.ContentTypeError)
async def main():
async with aiohttp.ClientSession() as session:
html = await fetch(session, 'http://localhost:55556')
print(html)
if __name__=='__main__':
loop.run_until_complete(main())
| 2.1875 | 2 |
spydrnet/ir/tests/test_wire.py | yinshuisiyuanabc/spydrnet | 0 | 12774129 | import unittest
import spydrnet as sdn
from spydrnet.ir.first_class_element import FirstClassElement
class TestWire(unittest.TestCase):
def setUp(self):
self.definition_top = sdn.Definition()
self.port_top = self.definition_top.create_port()
self.inner_pin = self.port_top.create_pin()
self.cable = self.definition_top.create_cable()
self.wire = self.cable.create_wire()
self.definition_leaf = sdn.Definition()
self.port = self.definition_leaf.create_port()
self.pin1 = self.port.create_pin()
self.pin2 = self.port.create_pin()
self.instance = self.definition_top.create_child()
self.instance.reference = self.definition_leaf
def test_constructor(self):
self.assertFalse(isinstance(self.wire, FirstClassElement), "Wire should not extend element")
wire2 = sdn.Wire()
self.assertNotEqual(self.wire, wire2, "Unique items are considered equal")
def test_pins_assignement(self):
self.wire.connect_pin(self.instance.pins[self.pin1])
self.wire.connect_pin(self.instance.pins[self.pin2])
self.assertEqual(self.wire.pins, [self.instance.pins[self.pin1], self.instance.pins[self.pin2]])
self.wire.pins = [self.instance.pins[self.pin2], self.instance.pins[self.pin1]]
self.assertEqual(self.wire.pins, [self.instance.pins[self.pin2], self.instance.pins[self.pin1]])
def test_connect_and_disconnect_inner_port(self):
self.wire.connect_pin(self.inner_pin)
self.assertTrue(self.inner_pin in self.wire.pins)
self.assertEqual(self.inner_pin.wire, self.wire)
self.assertEqual(len(self.wire.pins), 1)
self.wire.disconnect_pin(self.inner_pin)
self.assertFalse(self.inner_pin in self.wire.pins)
self.assertIsNone(self.inner_pin.wire)
self.assertEqual(len(self.wire.pins), 0)
def test_connect_and_disconnect_outer_pin_by_reference(self):
self.wire.connect_pin(self.instance.pins[self.pin1])
self.assertEqual(len(self.wire.pins), 1)
self.assertTrue(all(x is self.instance.pins[x] for x in self.wire.pins))
self.assertTrue(all(x.wire is self.wire for x in self.wire.pins))
self.assertTrue(all(x.instance is self.instance for x in self.wire.pins))
self.assertEqual(self.instance.pins[self.pin1].inner_pin, self.pin1)
self.wire.disconnect_pin(self.instance.pins[self.pin1])
self.assertEqual(len(self.wire.pins), 0)
self.assertFalse(self.instance.pins[self.pin1] in self.wire.pins)
self.assertIsNone(self.instance.pins[self.pin1].wire)
self.assertTrue(self.pin1 in self.instance.pins)
def test_connect_and_disconnect_outer_pin_by_object(self):
self.wire.connect_pin(sdn.OuterPin.from_instance_and_inner_pin(self.instance, self.pin2), position=0)
self.assertEqual(len(self.wire.pins), 1)
self.assertTrue(all(x is self.instance.pins[x] for x in self.wire.pins))
self.assertTrue(all(x.wire is self.wire for x in self.wire.pins))
self.assertTrue(all(x.instance is self.instance for x in self.wire.pins))
self.assertEqual(self.instance.pins[self.pin2].inner_pin, self.pin2)
self.wire.disconnect_pin(sdn.OuterPin(self.instance, self.pin2))
self.assertEqual(len(self.wire.pins), 0)
self.assertFalse(self.instance.pins[self.pin2] in self.wire.pins)
self.assertIsNone(self.instance.pins[self.pin1].wire)
self.assertTrue(self.pin1 in self.instance.pins)
def test_disconnect_pin_from(self):
self.wire.connect_pin(self.inner_pin)
self.wire.connect_pin(self.instance.pins[self.pin1])
self.wire.connect_pin(self.instance.pins[self.pin2])
self.wire.disconnect_pins_from(iter((self.inner_pin, self.instance.pins[self.pin1])))
self.wire.disconnect_pins_from({self.instance.pins[self.pin2]})
self.assertEqual(len(self.wire.pins), 0)
self.assertTrue(self.pin1 in self.instance.pins and isinstance(self.instance.pins[self.pin1], sdn.OuterPin) and
self.instance.pins[self.pin1].inner_pin == self.pin1)
self.assertIsNone(self.inner_pin.wire)
self.assertIsNone(self.instance.pins[self.pin1].wire)
self.assertIsNone(self.instance.pins[self.pin2].wire)
self.assertTrue(self.pin1 in self.instance.pins and isinstance(self.instance.pins[self.pin2], sdn.OuterPin) and
self.instance.pins[self.pin2].inner_pin == self.pin2)
@unittest.expectedFailure
def test_disconnect_inner_pin_from_outside_wire(self):
inner_pin = sdn.InnerPin()
self.wire.disconnect_pins_from([inner_pin])
@unittest.expectedFailure
def test_disconnect_outer_pin_from_outside_wire(self):
outer_pin = sdn.OuterPin()
self.wire.disconnect_pins_from([outer_pin]) | 3.046875 | 3 |
hms_tz/hms_tz/page/patient_history/patient_history.py | av-dev2/hms_tz | 5 | 12774130 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, ESS LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.utils import cint
from erpnext.healthcare.utils import render_docs_as_html
@frappe.whitelist()
def get_feed(name, document_types=None, date_range=None, start=0, page_length=20):
"""get feed"""
filters = get_filters(name, document_types, date_range)
result = frappe.db.get_all('Patient Medical Record',
fields=['name', 'owner', 'communication_date',
'reference_doctype', 'reference_name', 'subject'],
filters=filters,
order_by='communication_date DESC',
limit=cint(page_length),
start=cint(start)
)
return result
def get_filters(name, document_types=None, date_range=None):
filters = {'patient': name}
if document_types:
document_types = json.loads(document_types)
if len(document_types):
filters['reference_doctype'] = ['IN', document_types]
if date_range:
try:
date_range = json.loads(date_range)
if date_range:
filters['communication_date'] = ['between', [date_range[0], date_range[1]]]
except json.decoder.JSONDecodeError:
pass
return filters
@frappe.whitelist()
def get_feed_for_dt(doctype, docname):
"""get feed"""
result = frappe.db.get_all('Patient Medical Record',
fields=['name', 'owner', 'communication_date',
'reference_doctype', 'reference_name', 'subject'],
filters={
'reference_doctype': doctype,
'reference_name': docname
},
order_by='communication_date DESC'
)
return result
@frappe.whitelist()
def get_patient_history_doctypes():
document_types = []
settings = frappe.get_single("Patient History Settings")
for entry in settings.standard_doctypes:
document_types.append(entry.document_type)
for entry in settings.custom_doctypes:
document_types.append(entry.document_type)
return document_types
| 2 | 2 |
xing/xacom.py | testkevinkim/xing-plus | 88 | 12774131 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import math
import pandas
def parseErrorCode(code):
"""에러코드 메시지
:param code: 에러 코드
:type code: str
:return: 에러코드 메시지를 반환
::
parseErrorCode("00310") # 모의투자 조회가 완료되었습니다
"""
code = str(code)
ht = {
"-1" : "통신소켓 생성에 실패하였습니다",
"-2" : "서버접속에 실패하였습니다",
"-3" : "서버주소가 틀렸습니다",
"-4" : "서버 접속시간이 초과되었습니다",
"-5" : "이미 서버에 연결중입니다",
"-6" : "해당TR은 사용할수 없습니다",
"-7" : "로그인을 해야 사용이 가능합니다",
"-8" : "시세전용에서는 사용이 불가능합니다",
"-9" : "해당 계좌번호를 가지고 있지 않습니다",
"-10" : "패킷의 크기가 잘못되었습니다",
"-11" : "Data의 크기가 다릅니다",
"-12" : "계좌가 존재하지 않습니다",
"-13" : "Request ID 부족",
"-14" : "소켓이 생성되지 않았습니다",
"-15" : "암호화 생성에 실패했습니다",
"-16" : "데이터 전송에 실패했습니다",
"-17" : "암호화(RTN)처리에 실패했습니다",
"-18" : "공인인증 파일이 없습니다",
"-19" : "공인인증 Function이 없습니다",
"-20" : "메모리가 충분하지 않습니다",
"-21" : "TR의 시간당 전송제한에 걸렸습니다",
"-22" : "해당 TR은 해당 함수를 이용할 수 없습니다",
"-23" : "로그인이 안되었거나, TR에 대한 정보를 찾을 수 없습니다",
"-24" : "계좌위치가 지정되지 않았습니다",
"-25" : "계좌를 가지고 있지 않습니다",
"-26" : "파일 읽기에 실패했습니다 (종목 검색 조회 시, 파일이 없는 경우)",
"0000" : "정상완료되었습니다",
"00310" : "모의투자 조회가 완료되었습니다",
"00136" : "조회가 완료되었습니다",
"00020" : "application program exit[TR:CSPAQ]",
"03669" : "비밀번호 오류입니다. (5회중 4회 남았습니다)",
"01796" : "비밀번호 연속 오류허용횟수를 초과하였습니다. 콜센터로 문의하시기 바랍니다"
}
return ht[code] + " (%s)" % code if code in ht else code
def parseTR(trCode):
"""요청 TR 코드 파싱
:param trCode: TR 코드
:type trCode: str
:return: TR코드 내역을 반환
::
parseTR("t0425") # 주식체결/미체결
"""
ht = {
"t0424" : "주식잔고",
"t0425" : "주식체결/미체결",
"t8407" : "멀티현재가조회",
"t8412" : "주식챠트(N분)",
"t8413" : "주식챠트(일주월)",
"t8430" : "주식종목조회",
"t1833" : "종목검색(씽API용)",
"t1101" : "주식현재가호가조회",
"t1102" : "주식현재가(시세)조회",
"t1411" : "증거금율별종목조회",
"t1702" : "외인기관종목별동향",
"t1301" : "주식시간대별체결조회",
"t0167" : "서버시간조회",
"t9945" : "주식마스터조회API용",
"CSPAQ12200" : "현물계좌예수금 주문가능금액 총평가 조회",
"CSPAT00600" : "현물주문",
"CSPAT00700" : "현물정정주문",
"CSPAT00800" : "현물취소주문",
"CSPBQ00200" : "현물계좌 증거금률별 주문가능 수량 조회",
"HA_" : "KOSDAQ호가잔량",
"H1_" : "KOSPI호가잔량",
"SC0" : "주식주문접수",
"SC1" : "주식주문체결",
"SC2" : "주식주문정정",
"SC3" : "주식주문취소",
"SC4" : "주식주문거부",
"JIF" : "장운영정보"
}
return ht[trCode] if trCode in ht else ""
def parseJstatus(jstatus):
"""장 운영시간 파싱
:param jstatus: 장 운영시간 코드
:type jstatus: str
:return: 장 운영시간 내역을 반환
::
parseJstatus("66") # 사이드카 매수발동
.. note::
- 코스피로 장시간을 확인해야함.
- 선물/옵션 장마감 5분전, 1분전, 10초전은 들어오지 않음
"""
ht = {
"11" : "장전동시호가개시",
"21" : "장시작",
"22" : "장개시10초전",
"23" : "장개시1분전",
"24" : "장개시5분전",
"25" : "장개시10분전",
"31" : "장후동시호가개시",
"41" : "장마감",
"42" : "장마감10초전",
"43" : "장마감1분전",
"44" : "장마감5분전",
"51" : "시간외종가매매개시",
"52" : "시간외종가매매종료",
"53" : "시간외단일가매매개시",
"54" : "시간외단일가매매종료",
"61" : "서킷브레이크발동",
"62" : "서킷브레이크해제",
"63" : "서킷브레이크단일가접수",
"64" : "사이드카 매도발동",
"65" : "사이드카 매도해제",
"66" : "사이드카 매수발동"
}
return ht[jstatus] if jstatus in ht else ""
def parseMarket(jangubun):
"""장 구분
:param jangubun: 시장 구분 코드
:type jangubun: str
:return: 시장 내역을 반환
::
parseMarket("1") # 코스피
"""
ht = {
"1" : "코스피",
"2" : "코스닥",
"5" : "선물/옵션",
"7" : "CME야간선물",
"8" : "EUREX야간옵션선물"
}
return ht[jangubun] if jangubun in ht else ""
def timeType(base = None):
"""장 전,후 시간을 반환
:param base: 기준일시
:type base: datetime
:return: 기준일시에 맞는 타입문자를 반환
BEFORE(장시작 전),SHOWTIME(장 운영시간),AFTER(장종료 후)
::
timeType()
timeType(datetime.today())
"""
today = base if base else datetime.today()
mainStart = today.replace(hour=8, minute=50, second=0, microsecond=0)
mainEnd = today.replace(hour=15, minute=0, second=0, microsecond=0)
if today.weekday() < 5:
if today >= mainStart and today <= mainEnd:
return "SHOWTIME"
else:
if today < mainStart:
return "BEFORE"
elif today > mainEnd:
return "AFTER"
else:
return "NONE"
def today():
"""오늘 날자를 yyyymmdd 형태로 반환
::
today() # 20160101
"""
return datetime.today().strftime("%Y%m%d")
def latestBusinessDay():
"""가장 최근 영업일을 yyyymmdd 형태로 반환
::
latestBusinessDay() # 20160104
"""
baseday = datetime.today()
if baseday.weekday() > 4:
while baseday.weekday() > 4:
baseday = baseday - timedelta(days=1)
return baseday.strftime("%Y%m%d")
# def printMax(x):
# pandas.set_option("display.max_rows", len(x))
# pandas.set_option("display.max_columns", len(x.columns))
# print(x)
# pandas.reset_option("display.max_rows")
# pandas.reset_option("display.max_columns")
#
# def split(arr, size):
# arrs = []
# while len(arr) > size:
# pice = arr[:size]
# arrs.append(pice)
# arr = arr[size:]
# arrs.append(arr)
# return arrs
# # 호가 단위
# def callValueUnit(price, isKospi = False):
# unit = None
# price = int(price)
# if price < 1000:
# unit = 1
# elif price >= 1000 and price < 5000:
# unit = 5
# elif price >= 5000 and price < 10000:
# unit = 10
# elif price >= 10000 and price < 50000:
# unit = 50
# elif price >= 50000:
# if isKospi:
# if price < 100000:
# unit = 100
# elif price >= 100000 and price < 500000:
# unit = 500
# elif price >= 500000:
# unit = 1000
# else:
# unit = 100
# return unit
#
# # 구분
# def sign(type):
# result = None
# type = int(type)
# if type < 3:
# #상승
# result = 1
# elif type == 3:
# #보합
# result = 0
# elif type > 3:
# #하락
# result = -1
# return result
#
# # candle
# def candle(price, open, high, low):
# # print(price, open, high, low)
# p = int(price)
# o = int(open)
# h = int(high)
# l = int(low)
# height = h-l
# body = 0 if height == 0 else round((p-o)/height,2)
#
# if body > 0:
# #양봉
# type = 1
# top = (h-p)/height
# bottom = (o-l)/height
# elif body < 0:
# #음봉
# type = -1
# top = (h-o)/height
# bottom = (p-l)/height
# else:
# #보합
# type = 0.0
# top = 0.0
# bottom = 0.0
#
# return {
# "type" : type,
# "top" : round(top,2) * 100,
# "bottom" : round(bottom,2) * 100,
# "body" : math.fabs(body) * 100
# }
#
# def profit(buy, sell):
# #매매수수료
# fee = (float(buy) * 0.00015) + (float(sell) * 0.00315)
# profit = sell - buy - fee
#
# return {
# "profit" : profit,
# "rate" : round(profit/buy * 100,2)
# }
| 2.78125 | 3 |
app.py | learnazcloud/Auto-branch-protect | 0 | 12774132 | import json # pylint: disable=import-error
import os # pylint: disable=import-error
import time # pylint: disable=import-error
import requests # pylint: disable=import-error
from flask import Flask, request # pylint: disable=import-error
app = Flask(__name__)
print("app",app)
@app.route("/", methods=["POST"])
def webhook():
# Store incoming json data from webhook
payload = request.get_json()
user = "learnazcloud"
cred = os.environ["GH_TOKEN"]
if payload is None:
print("POST was not formatted in JSON")
# Verify the repo was created
try:
if payload["action"] == "created" or payload["action"] == "publicized" or payload["ref"] == "main":
# Delay needed for server to be create the page, otherwise a 404 returns
time.sleep(1)
# Create branch protection for the master branch of the repo
branch_protection = {
"required_status_checks": None,
"pull_request_reviews_enforcement_level": "off",
"required_approving_review_count": 1,
"dismiss_stale_reviews_on_push": True,
"require_code_owner_review": True,
"authorized_dismissal_actors_only": False,
"ignore_approvals_from_contributors": False,
"required_status_checks_enforcement_level": "non_admins",
"strict_required_status_checks_policy": False,
"signature_requirement_enforcement_level": "off",
"linear_history_requirement_enforcement_level": "off",
"enforce_admins": False,
"allow_force_pushes_enforcement_level": "off",
"allow_deletions_enforcement_level": "off",
"merge_queue_enforcement_level": "off",
"required_deployments_enforcement_level": "off",
"required_conversation_resolution_level": "off",
"authorized_actors_only": True,
"authorized_actor_names": [
"learnazcloud-user00"
],
"required_pull_request_reviews": None,
"restrictions": None,
}
session = requests.session()
session.auth = (user, cred)
response_1 = session.put(
payload["repository"]["url"] + "/branches/main/protection",
json.dumps(branch_protection),
)
if response_1.status_code == 200:
print(
"Branch protection created successfully. Status code: ",
response_1.status_code,
)
# Create issue in repo notifying user of branch protection
try:
if payload["repository"]["has_issues"]:
issue = {
"title": "New Protection Added",
"body": "@"
+ user
+ " @learnazcloud-secteam A new branch protection was added to the master branch.",
}
session = requests.session()
session.auth = (user, cred)
response_2 = session.post(
payload["repository"]["url"] + "/issues", json.dumps(issue)
)
if response_2.status_code == 201:
print(
"Issue created successfully. Status code: ",
response_2.status_code,
)
else:
print(
"Unable to create issue. Status code: ",
response_2.status_code,
)
else:
print(
"This repo has no issues so one cannot be created at this time."
)
except KeyError:
# Request did not contain information about if the repository has issues enabled
pass
else:
print(response_1.content)
print(
"Unable to create branch protection. Status code: ",
response_1.status_code,
"No Branch found- Creating one",
)
except KeyError:
# Ignore POST payload since it is not a create action
pass
return "OK"
if __name__ == "__main__":
app.run()
| 2.203125 | 2 |
python/src/main/python/drivers/run-browser-android.py | KishkinJ10/graphicsfuzz | 519 | 12774133 | <reponame>KishkinJ10/graphicsfuzz<filename>python/src/main/python/drivers/run-browser-android.py
#!/usr/bin/env python3
# Copyright 2018 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex
import subprocess
import os
import argparse
def clear_logcat():
clear_cmd = "adb logcat -c"
subprocess.run(shlex.split(clear_cmd))
# start_logcat() returns the logcat stdout
def start_logcat():
clear_logcat()
logcat_cmd = "adb logcat -b system 'ActivityManager:I' '*:S'"
logcat_subprocess_arg = shlex.split(logcat_cmd)
# The universal_newlines flag enable to get stdout as text and not byte stream
logcat_subprocess = subprocess.Popen(logcat_subprocess_arg, stdout=subprocess.PIPE, universal_newlines=True)
return logcat_subprocess.stdout
def start_worker(server, worker, newtab=False):
# Note the escaped ampersand in the command
worker_cmd = "adb shell am start -n org.mozilla.firefox/org.mozilla.gecko.LauncherActivity"
if newtab:
worker_cmd += " -a android.intent.action.VIEW -d 'http://" + server + "/static/runner.html?context=webgl2\&worker=" + worker + "'"
# shlex.split() doesn't keep the escape around the URL ... ? resort
# to shell=True
subprocess.run(worker_cmd, shell=True)
################################################################################
# Main
parser = argparse.ArgumentParser()
parser.add_argument(
'server',
help='Server URL, e.g. localhost:8080')
parser.add_argument(
'worker',
help='Worker name to identify to the server')
args = parser.parse_args()
logcat = start_logcat()
start_worker(args.server, args.worker, newtab=True)
while True:
line = logcat.readline()
if (" Process org.mozilla.firefox " in line) and (" has died" in line):
print("Detected a crash: " + line, end='')
print('Restart worker...')
start_worker(args.server, args.worker)
| 2.234375 | 2 |
src/rpcClient.py | p0lt/QMT | 1 | 12774134 | <reponame>p0lt/QMT
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from misc import getCallerName, getFunctionName, printException, printDbg, readRPCfile, now
from constants import DEFAULT_PROTOCOL_VERSION, MINIMUM_FEE
import threading
from tabGovernance import Torrent
class RpcClient:
def __init__(self):
# Lock for threads
self.lock = threading.Lock()
self.rpc_ip, self.rpc_port, self.rpc_user, self.rpc_passwd = readRPCfile()
rpc_url = "http://%s:%s@%s:%d" % (self.rpc_user, self.rpc_passwd, self.rpc_ip, self.rpc_port)
try:
self.lock.acquire()
self.conn = AuthServiceProxy(rpc_url, timeout=120)
except JSONRPCException as e:
err_msg = 'remote or local QMC-cli running?'
printException(getCallerName(), getFunctionName(), err_msg, e)
except Exception as e:
err_msg = 'remote or local QMC-cli running?'
printException(getCallerName(), getFunctionName(), err_msg, e)
finally:
self.lock.release()
def decodeRawTransaction(self, rawTx):
try:
self.lock.acquire()
res = self.conn.decoderawtransaction(rawTx)
except Exception as e:
err_msg = 'error in decodeRawTransaction'
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = None
finally:
self.lock.release()
return res
def getAddressUtxos(self, addresses):
try:
self.lock.acquire()
res = self.conn.getaddressutxos({'addresses': addresses})
except Exception as e:
err_msg = "error in getAddressUtxos"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = None
finally:
self.lock.release()
return res
def getBlockCount(self):
try:
self.lock.acquire()
n = self.conn.getblockcount()
except Exception as e:
err_msg = 'remote or local QMC-cli running?'
if str(e.args[0]) != "Request-sent":
printException(getCallerName(), getFunctionName(), err_msg, e.args)
n = 0
finally:
self.lock.release()
return n
def getBlockHash(self, blockNum):
try:
self.lock.acquire()
h = self.conn.getblockhash(blockNum)
except Exception as e:
err_msg = 'remote or local QMC-cli running?'
printException(getCallerName(), getFunctionName(), err_msg, e.args)
h = None
finally:
self.lock.release()
return h
def getBudgetVotes(self, torrent):
try:
self.lock.acquire()
votes = self.conn.getbudgetvotes(torrent)
except Exception as e:
err_msg = 'remote or local QMC-cli running?'
printException(getCallerName(), getFunctionName(), err_msg, e.args)
votes = {}
finally:
self.lock.release()
return votes
def getFeePerKb(self):
try:
self.lock.acquire()
# get transaction data from last 200 blocks
feePerKb = float(self.conn.getfeeinfo(200)['feeperkb'])
res = (feePerKb if feePerKb > MINIMUM_FEE else MINIMUM_FEE)
except Exception as e:
err_msg = 'error in getFeePerKb'
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = MINIMUM_FEE
finally:
self.lock.release()
return res
def getMNStatus(self, address):
try:
self.lock.acquire()
mnStatusList = self.conn.listmasternodes(address)
if not mnStatusList:
return None
mnStatus = mnStatusList[0]
mnStatus['mnCount'] = self.conn.getmasternodecount()['enabled']
except Exception as e:
err_msg = "error in getMNStatus"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
mnStatus = None
finally:
self.lock.release()
return mnStatus
def getMasternodeCount(self):
try:
self.lock.acquire()
ans = self.conn.getmasternodecount()
except Exception as e:
err_msg = "error in getMasternodeCount"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
ans = None
finally:
self.lock.release()
return ans
def getMasternodes(self):
mnList = {}
mnList['last_update'] = now()
score = []
try:
self.lock.acquire()
masternodes = self.conn.listmasternodes()
except Exception as e:
err_msg = "error in getMasternodes"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
masternodes = []
finally:
self.lock.release()
for mn in masternodes:
if mn.get('status') == 'ENABLED':
if mn.get('lastpaid') == 0:
mn['score'] = mn.get('activetime')
else:
lastpaid_ago = now() - mn.get('lastpaid')
mn['score'] = min(lastpaid_ago, mn.get('activetime'))
else:
mn['score'] = 0
score.append(mn)
score.sort(key=lambda x: x['score'], reverse=True)
for mn in masternodes:
mn['queue_pos'] = score.index(mn)
mnList['masternodes'] = masternodes
return mnList
def getNextSuperBlock(self):
try:
self.lock.acquire()
n = self.conn.getnextsuperblock()
except Exception as e:
err_msg = 'remote or local QMC-cli running?'
if str(e.args[0]) != "Request-sent":
printException(getCallerName(), getFunctionName(), err_msg, e.args)
n = 0
finally:
self.lock.release()
return n
def getTorrents(self):
torrents = []
try:
self.lock.acquire()
data = self.conn.getbudgetinfo()
except Exception as e:
err_msg = "error getting torrents"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
data = []
finally:
self.lock.release()
for p in data:
new_torrent = Torrent(p.get('Name'), p.get('URL'), p.get('Hash'), p.get('FeeHash'), p.get('BlockStart'),
p.get('BlockEnd'), p.get('TotalPaymentCount'), p.get('RemainingPaymentCount'), p.get('PaymentAddress'),
p.get('Yeas'), p.get('Nays'), p.get('Abstains'),
float(p.get('TotalPayment')), float(p.get('MonthlyPayment')))
torrents.append(new_torrent)
return torrents
def getTorrentsProjection(self):
torrents = []
try:
self.lock.acquire()
data = self.conn.getbudgetprojection()
except Exception as e:
err_msg = "error getting torrents projection"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
data = []
finally:
self.lock.release()
for p in data:
new_torrent = Torrent(p.get('Name'), p.get('URL'), p.get('Hash'), p.get('FeeHash'), p.get('BlockStart'),
p.get('BlockEnd'), p.get('TotalPaymentCount'), p.get('RemainingPaymentCount'), p.get('PaymentAddress'),
p.get('Yeas'), p.get('Nays'), p.get('Abstains'), p.get('TotalPayment'), p.get('MonthlyPayment'))
new_torrent = {}
new_torrent['Name'] = p.get('Name')
new_torrent['Allotted'] = float(p.get("Alloted"))
new_torrent['Votes'] = p.get('Yeas') - p.get('Nays')
new_torrent['Total_Allotted'] = float(p.get('TotalBudgetAlloted'))
torrents.append(new_torrent)
return torrents
def getProtocolVersion(self):
try:
self.lock.acquire()
prot_version = self.conn.getinfo().get('protocolversion')
res = int(prot_version)
except Exception as e:
err_msg = 'error in getProtocolVersion'
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = DEFAULT_PROTOCOL_VERSION
finally:
self.lock.release()
return res
def getRawTransaction(self, txid):
try:
self.lock.acquire()
res = self.conn.getrawtransaction(txid)
except Exception as e:
err_msg = "is Blockchain synced?"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = None
finally:
self.lock.release()
return res
def getStatus(self):
status = False
statusMess = "Unable to connect to a QMC RPC server.\n"
statusMess += "Either the local QMC wallet is not open, or the remote RPC server is not responding."
n = 0
try:
self.lock.acquire()
n = self.conn.getblockcount()
if n > 0:
status = True
statusMess = "Connected to QMC RPC client"
except Exception as e:
# If loading block index set lastBlock=1
if str(e.args[0]) == "Loading block index..." or str(e.args[0]) == "Verifying blocks...":
printDbg(str(e.args[0]))
statusMess = "QMC wallet is connected but still synchronizing / verifying blocks"
n = 1
elif str(e.args[0]) != "Request-sent" and str(e.args[0]) != "10061":
err_msg = "Error while contacting RPC server"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
finally:
self.lock.release()
return status, statusMess, n
def isBlockchainSynced(self):
try:
self.lock.acquire()
res = self.conn.mnsync('status').get("IsBlockchainSynced")
except Exception as e:
if str(e.args[0]) != "Request-sent":
err_msg = "error in isBlockchainSynced"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = False
finally:
self.lock.release()
return res
def mnBudgetRawVote(self, mn_tx_hash, mn_tx_index, torrent_hash, vote, time, vote_sig):
try:
self.lock.acquire()
res = self.conn.mnbudgetrawvote(mn_tx_hash, mn_tx_index, torrent_hash, vote, time, vote_sig)
except Exception as e:
err_msg = "error in mnBudgetRawVote"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = None
finally:
self.lock.release()
return res
def decodemasternodebroadcast(self, work):
try:
self.lock.acquire()
res = self.conn.decodemasternodebroadcast(work.strip())
except Exception as e:
err_msg = "error in decodemasternodebroadcast"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = ""
finally:
self.lock.release()
return res
def relaymasternodebroadcast(self, work):
try:
self.lock.acquire()
res = self.conn.relaymasternodebroadcast(work.strip())
except Exception as e:
err_msg = "error in relaymasternodebroadcast"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = ""
finally:
self.lock.release()
return res
def sendRawTransaction(self, tx_hex, use_swiftx):
try:
self.lock.acquire()
tx_id = self.conn.sendrawtransaction(tx_hex, True, bool(use_swiftx))
except Exception as e:
err_msg = 'error in rpcClient.sendRawTransaction'
printException(getCallerName(), getFunctionName(), err_msg, e.args)
tx_id = None
finally:
self.lock.release()
return tx_id
def verifyMessage(self, qmcaddress, signature, message):
try:
self.lock.acquire()
res = self.conn.verifymessage(qmcaddress, signature, message)
except Exception as e:
err_msg = "error in verifyMessage"
printException(getCallerName(), getFunctionName(), err_msg, e.args)
res = False
finally:
self.lock.release()
return res
| 2 | 2 |
tests/components/test_button.py | trickeydan/j5-dev | 10 | 12774135 | <gh_stars>1-10
"""Tests for the Button Classes."""
from time import sleep, time
from j5.components.button import Button, ButtonInterface
class MockButtonDriver(ButtonInterface):
"""A testing driver for the button component."""
def __init__(self) -> None:
self.state = False
def set_button_state(self, new_state: bool) -> None:
"""Set the button state for testing purposes."""
self.state = new_state
def get_button_state(self, identifier: int) -> bool:
"""Get the state of the button."""
return self.state
def wait_until_button_pressed(self, identifier: int) -> None:
"""Wait until the button was pressed."""
sleep(0.2) # The mock driver always presses the button after 0.2s
def test_button_interface_implementation() -> None:
"""Test that we can implement the button interface."""
MockButtonDriver()
def test_button_instantiation() -> None:
"""Test that we can instantiate a button."""
Button(0, MockButtonDriver())
def test_button_interface_class() -> None:
"""Test that the Button Interface class is a ButtonInterface."""
assert Button.interface_class() is ButtonInterface
def test_button_identifier() -> None:
"""Test the identifier attribute of the component."""
component = Button(0, MockButtonDriver())
assert component.identifier == 0
def test_button_state() -> None:
"""Test that we can get the state of the button."""
driver = MockButtonDriver()
button = Button(0, driver)
assert button.is_pressed is False
driver.set_button_state(True)
assert button.is_pressed is True
def test_button_wait_until_pressed() -> None:
"""Test that the button takes at least 0.2 secs to be pressed."""
button = Button(0, MockButtonDriver())
start_time = time()
button.wait_until_pressed()
end_time = time()
time_taken = end_time - start_time
assert time_taken > 0.2
assert time_taken < 2
| 3.390625 | 3 |
jdf_tools/numbers.py | lluc/django_jdf | 0 | 12774136 | <filename>jdf_tools/numbers.py
"""
Jour de fete - Django_JDF
Convertion de nombres en toutes lettres
@date: 2014/09/12
@copyright: 2014 by <NAME> <<EMAIL>>
@license: MIT
"""
class numbers :
def __init__(self) :
self.schu=["","UN ","DEUX ","TROIS ","QUATRE ","CINQ ","SIX ","SEPT ","HUIT ","NEUF "]
self.schud=["DIX ","ONZE ","DOUZE ","TREIZE ","QUATORZE ","QUINZE ","SEIZE ","DIX SEPT ","DIX HUIT ","DIX NEUF "]
self.schd=["","DIX ","VINGT ","TRENTE ","QUARANTE ","CINQUANTE ","SOIXANTE ","SOIXANTE ","QUATRE VINGT ","QUATRE VINGT "]
def convNumber2letter(self,nombre):
s=''
reste=nombre
i=1000000000
while i>0:
y=reste/i
if y!=0:
centaine=y/100
dizaine=(y - centaine*100)/10
unite=y-centaine*100-dizaine*10
if centaine==1:
s+="CENT "
elif centaine!=0:
s+=self.schu[centaine]+"CENT "
if dizaine==0 and unite==0: s=s[:-1]+"S "
if dizaine not in [0,1]: s+=self.schd[dizaine]
if unite==0:
if dizaine in [1,7,9]: s+="DIX "
elif dizaine==8: s=s[:-1]+"S "
elif unite==1:
if dizaine in [1,9]: s+="ONZE "
elif dizaine==7: s+="ET ONZE "
elif dizaine in [2,3,4,5,6]: s+="ET UN "
elif dizaine in [0,8]: s+="UN "
elif unite in [2,3,4,5,6,7,8,9]:
if dizaine in [1,7,9]: s+=self.schud[unite]
else: s+=self.schu[unite]
if i==1000000000:
if y>1: s+="MILLIARDS "
else: s+="MILLIARD "
if i==1000000:
if y>1: s+="MILLIONS "
else: s+="MILLIONS "
if i==1000:
s+="MILLE "
#end if y!=0
reste -= y*i
dix=False
i/=1000;
#end while
if len(s)==0: s+="ZERO "
return s
| 2.859375 | 3 |
cert_mailer/helpers/sendgrid.py | stuartf/cert-mailer | 5 | 12774137 | <reponame>stuartf/cert-mailer<gh_stars>1-10
import sendgrid
import os
import urllib.request as urllib
from sendgrid.helpers.mail import Content, Attachment, Mail, Email
class Mailer:
def __init__(self):
key = os.environ.get('SENDGRID_API_KEY')
self.sg = sendgrid.SendGridAPIClient(apikey=key)
def send(self, config, subject, body, img, row):
content = Content("text/html", body)
attachment = Attachment()
attachment.content = img
attachment.type = "image/jpeg"
attachment.filename = "qrcode.jpg"
attachment.disposition = "inline"
attachment.content_id = "qrcode"
fromaddr = Email(config.from_email)
toaddr = Email(row['email'])
mail = Mail(fromaddr, subject, toaddr, content)
mail.add_attachment(attachment)
try:
response = self.sg.client.mail.send.post(request_body=mail.get())
except urllib.HTTPError as err:
print(err.read())
exit()
print(response.status_code)
print(response.body)
print(response.headers)
| 2.71875 | 3 |
Source/module_mission_templates.py | qt911025/qt-homemade-mod-osp | 1 | 12774138 | from header_common import *
from header_operations import *
from header_mission_templates import *
from header_animations import *
from header_sounds import *
from header_music import *
from header_items import *
from module_constants import *
####################################################################################################################
# Each mission-template is a tuple that contains the following fields:
# 1) Mission-template id (string): used for referencing mission-templates in other files.
# The prefix mt_ is automatically added before each mission-template id
#
# 2) Mission-template flags (int): See header_mission-templates.py for a list of available flags
# 3) Mission-type(int): Which mission types this mission template matches.
# For mission-types to be used with the default party-meeting system,
# this should be 'charge' or 'charge_with_ally' otherwise must be -1.
#
# 4) Mission description text (string).
# 5) List of spawn records (list): Each spawn record is a tuple that contains the following fields:
# 5.1) entry-no: Troops spawned from this spawn record will use this entry
# 5.2) spawn flags.
# 5.3) alter flags. which equipment will be overriden
# 5.4) ai flags.
# 5.5) Number of troops to spawn.
# 5.6) list of equipment to add to troops spawned from here (maximum 8).
# 6) List of triggers (list).
# See module_triggers.py for infomation about triggers.
#
# Please note that mission templates is work in progress and can be changed in the future versions.
#
####################################################################################################################
pilgrim_disguise = [itm_pilgrim_hood,itm_pilgrim_disguise,itm_practice_staff, itm_throwing_daggers]
af_castle_lord = af_override_horse | af_override_weapons| af_require_civilian
#multiplayer_server_spawn_bots = (
# 0, 0, 0, [],
# [
# (multiplayer_is_server),
# (eq, "$g_multiplayer_ready_for_spawning_agent", 1),
# (store_add, ":total_req", "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_required_team_2"),
# (try_begin),
# (gt, ":total_req", 0),
#
# (try_begin),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_battle),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_destroy),
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_siege),
#
# (team_get_score, ":team_1_score", 0),
# (team_get_score, ":team_2_score", 1),
#
# (store_add, ":current_round", ":team_1_score", ":team_2_score"),
# (eq, ":current_round", 0),
#
# (store_mission_timer_a, ":round_time"),
# (val_sub, ":round_time", "$g_round_start_time"),
# (lt, ":round_time", 20),
#
# (assign, ":rounded_game_first_round_time_limit_past", 0),
# (else_try),
# (assign, ":rounded_game_first_round_time_limit_past", 1),
# (try_end),
#
# (eq, ":rounded_game_first_round_time_limit_past", 1),
#
# (store_random_in_range, ":random_req", 0, ":total_req"),
# (val_sub, ":random_req", "$g_multiplayer_num_bots_required_team_1"),
# (try_begin),
# (lt, ":random_req", 0),
# #add to team 1
# (assign, ":selected_team", 0),
# (else_try),
# #add to team 2
# (assign, ":selected_team", 1),
# (try_end),
#
# (try_begin),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_battle),
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_destroy),
#
# (store_mission_timer_a, ":round_time"),
# (val_sub, ":round_time", "$g_round_start_time"),
#
# (try_begin),
# (le, ":round_time", 20),
# (assign, ":look_only_actives", 0),
# (else_try),
# (assign, ":look_only_actives", 1),
# (try_end),
# (else_try),
# (assign, ":look_only_actives", 1),
# (try_end),
#
# (call_script, "script_multiplayer_find_bot_troop_and_group_for_spawn", ":selected_team", ":look_only_actives"),
# (assign, ":selected_troop", reg0),
# (assign, ":selected_group", reg1),
#
# (team_get_faction, ":team_faction", ":selected_team"),
# (assign, ":num_ai_troops", 0),
# (try_for_range, ":cur_ai_troop", multiplayer_ai_troops_begin, multiplayer_ai_troops_end),
# (store_troop_faction, ":ai_troop_faction", ":cur_ai_troop"),
# (eq, ":ai_troop_faction", ":team_faction"),
# (val_add, ":num_ai_troops", 1),
# (try_end),
#
# (assign, ":number_of_active_players_wanted_bot", 0),
#
# (get_max_players, ":num_players"),
# (try_for_range, ":player_no", 0, ":num_players"),
# (player_is_active, ":player_no"),
# (player_get_team_no, ":player_team_no", ":player_no"),
# (eq, ":selected_team", ":player_team_no"),
#
# (assign, ":ai_wanted", 0),
# (store_add, ":end_cond", slot_player_bot_type_1_wanted, ":num_ai_troops"),
# (try_for_range, ":bot_type_wanted_slot", slot_player_bot_type_1_wanted, ":end_cond"),
# (player_slot_ge, ":player_no", ":bot_type_wanted_slot", 1),
# (assign, ":ai_wanted", 1),
# (assign, ":end_cond", 0),
# (try_end),
#
# (ge, ":ai_wanted", 1),
#
# (val_add, ":number_of_active_players_wanted_bot", 1),
# (try_end),
#
# (try_begin),
# (this_or_next|ge, ":selected_group", 0),
# (eq, ":number_of_active_players_wanted_bot", 0),
#
# (troop_get_inventory_slot, ":has_item", ":selected_troop", ek_horse),
# (try_begin),
# (ge, ":has_item", 0),
# (assign, ":is_horseman", 1),
# (else_try),
# (assign, ":is_horseman", 0),
# (try_end),
#
# (try_begin),
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_siege),
#
# (store_mission_timer_a, ":round_time"),
# (val_sub, ":round_time", "$g_round_start_time"),
#
# (try_begin),
# (lt, ":round_time", 20), #at start of game spawn at base entry point
# (try_begin),
# (eq, ":selected_team", 0),
# (call_script, "script_multiplayer_find_spawn_point", ":selected_team", 1, ":is_horseman"),
# (else_try),
# (assign, reg0, multi_initial_spawn_point_team_2),
# (try_end),
# (else_try),
# (call_script, "script_multiplayer_find_spawn_point", ":selected_team", 0, ":is_horseman"),
# (try_end),
# (else_try),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_battle),
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_destroy),
#
# (try_begin),
# (eq, ":selected_team", 0),
# (assign, reg0, 0),
# (else_try),
# (assign, reg0, 32),
# (try_end),
# (else_try),
# (call_script, "script_multiplayer_find_spawn_point", ":selected_team", 0, ":is_horseman"),
# (try_end),
#
# (store_current_scene, ":cur_scene"),
# (modify_visitors_at_site, ":cur_scene"),
# (add_visitors_to_current_scene, reg0, ":selected_troop", 1, ":selected_team", ":selected_group"),
# (assign, "$g_multiplayer_ready_for_spawning_agent", 0),
#
# (try_begin),
# (eq, ":selected_team", 0),
# (val_sub, "$g_multiplayer_num_bots_required_team_1", 1),
# (else_try),
# (eq, ":selected_team", 1),
# (val_sub, "$g_multiplayer_num_bots_required_team_2", 1),
# (try_end),
# (try_end),
# (try_end),
# ])
#
#multiplayer_server_manage_bots = (
# 3, 0, 0, [],
# [
# (multiplayer_is_server),
# (try_for_agents, ":cur_agent"),
# (agent_is_non_player, ":cur_agent"),
# (agent_is_human, ":cur_agent"),
# (agent_is_alive, ":cur_agent"),
# (agent_get_group, ":agent_group", ":cur_agent"),
# (try_begin),
# (neg|player_is_active, ":agent_group"),
# (call_script, "script_multiplayer_change_leader_of_bot", ":cur_agent"),
# (else_try),
# (player_get_team_no, ":leader_team_no", ":agent_group"),
# (agent_get_team, ":agent_team", ":cur_agent"),
# (neq, ":leader_team_no", ":agent_team"),
# (call_script, "script_multiplayer_change_leader_of_bot", ":cur_agent"),
# (try_end),
# (try_end),
# ])
multiplayer_server_check_polls = (
1, 5, 0,
[
(multiplayer_is_server),
(eq, "$g_multiplayer_poll_running", 1),
(eq, "$g_multiplayer_poll_ended", 0),
(store_mission_timer_a, ":mission_timer"),
(store_add, ":total_votes", "$g_multiplayer_poll_no_count", "$g_multiplayer_poll_yes_count"),
(this_or_next|eq, ":total_votes", "$g_multiplayer_poll_num_sent"),
(gt, ":mission_timer", "$g_multiplayer_poll_end_time"),
(call_script, "script_cf_multiplayer_evaluate_poll"),
],
[
(assign, "$g_multiplayer_poll_running", 0),
(try_begin),
(this_or_next|eq, "$g_multiplayer_poll_to_show", 0), #change map
(eq, "$g_multiplayer_poll_to_show", 3), #change map with factions
(call_script, "script_game_multiplayer_get_game_type_mission_template"),
(start_multiplayer_mission, reg0, "$g_multiplayer_poll_value_to_show", 1),
(call_script, "script_game_set_multiplayer_mission_end"),
(try_end),
])
#multiplayer_server_check_end_map = (
# 1, 0, 0, [],
# [
# (multiplayer_is_server),
# #checking for restarting the map
# (assign, ":end_map", 0),
# (try_begin),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_battle),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_destroy),
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_siege),
#
# (try_begin),
# (eq, "$g_round_ended", 1),
#
# (store_mission_timer_a, ":seconds_past_till_round_ended"),
# (val_sub, ":seconds_past_till_round_ended", "$g_round_finish_time"),
# (store_sub, ":multiplayer_respawn_period_minus_one", "$g_multiplayer_respawn_period", 1),
# (ge, ":seconds_past_till_round_ended", ":multiplayer_respawn_period_minus_one"),
#
# (store_mission_timer_a, ":mission_timer"),
# (try_begin),
# (this_or_next|eq, "$g_multiplayer_game_type", multiplayer_game_type_battle),
# (eq, "$g_multiplayer_game_type", multiplayer_game_type_destroy),
# (assign, ":reduce_amount", 90),
# (else_try),
# (assign, ":reduce_amount", 120),
# (try_end),
#
# (store_mul, ":game_max_seconds", "$g_multiplayer_game_max_minutes", 60),
# (store_sub, ":game_max_seconds_min_n_seconds", ":game_max_seconds", ":reduce_amount"), #when round ends if there are 60 seconds to map change time then change map without completing exact map time.
# (gt, ":mission_timer", ":game_max_seconds_min_n_seconds"),
# (assign, ":end_map", 1),
# (try_end),
#
# (eq, ":end_map", 1),
# (else_try),
# (neq, "$g_multiplayer_game_type", multiplayer_game_type_battle), #battle mod has different end map condition by time
# (neq, "$g_multiplayer_game_type", multiplayer_game_type_destroy), #fight and destroy mod has different end map condition by time
# (neq, "$g_multiplayer_game_type", multiplayer_game_type_siege), #siege mod has different end map condition by time
# (neq, "$g_multiplayer_game_type", multiplayer_game_type_headquarters), #in headquarters mod game cannot limited by time, only can be limited by score.
# (store_mission_timer_a, ":mission_timer"),
# (store_mul, ":game_max_seconds", "$g_multiplayer_game_max_minutes", 60),
# (gt, ":mission_timer", ":game_max_seconds"),
# (assign, ":end_map", 1),
# (else_try),
# #assuming only 2 teams in scene
# (team_get_score, ":team_1_score", 0),
# (team_get_score, ":team_2_score", 1),
# (try_begin),
# (neq, "$g_multiplayer_game_type", multiplayer_game_type_headquarters), #for not-headquarters mods
# (try_begin),
# (this_or_next|ge, ":team_1_score", "$g_multiplayer_game_max_points"),
# (ge, ":team_2_score", "$g_multiplayer_game_max_points"),
# (assign, ":end_map", 1),
# (try_end),
# (else_try),
# (assign, ":at_least_one_player_is_at_game", 0),
# (get_max_players, ":num_players"),
# (try_for_range, ":player_no", 0, ":num_players"),
# (player_is_active, ":player_no"),
# (player_get_agent_id, ":agent_id", ":player_no"),
# (ge, ":agent_id", 0),
# (neg|agent_is_non_player, ":agent_id"),
# (assign, ":at_least_one_player_is_at_game", 1),
# (assign, ":num_players", 0),
# (try_end),
#
# (eq, ":at_least_one_player_is_at_game", 1),
#
# (this_or_next|le, ":team_1_score", 0), #in headquarters game ends only if one team has 0 score.
# (le, ":team_2_score", 0),
# (assign, ":end_map", 1),
# (try_end),
# (try_end),
# (try_begin),
# (eq, ":end_map", 1),
# (call_script, "script_game_multiplayer_get_game_type_mission_template"),
# (start_multiplayer_mission, reg0, "$g_multiplayer_selected_map", 0),
# (call_script, "script_game_set_multiplayer_mission_end"),
# (try_end),
# ])
multiplayer_once_at_the_first_frame = (
0, 0, ti_once, [], [
(start_presentation, "prsnt_multiplayer_welcome_message"),
])
multiplayer_battle_window_opened = (
ti_battle_window_opened, 0, 0, [], [
(start_presentation, "prsnt_multiplayer_team_score_display"),
])
common_battle_init_banner = (
ti_on_agent_spawn, 0, 0, [],
[
(store_trigger_param_1, ":agent_no"),
(agent_get_troop_id, ":troop_no", ":agent_no"),
(call_script, "script_troop_agent_set_banner", "tableau_game_troop_label_banner", ":agent_no", ":troop_no"),
])
##New automatic begin
common_automatic = (0.05,0,0,[],[
(try_for_agents,":attacker_agent"),
(agent_is_human,":attacker_agent"),
(agent_is_alive,":attacker_agent"),
(agent_get_wielded_item,":agent_cur_weapon",":attacker_agent"),
(gt,":agent_cur_weapon",0),
(item_get_type, ":item_type", ":agent_cur_weapon"),
(eq,":item_type",itp_type_musket),
(agent_get_attack_action, ":action_state", ":attacker_agent"),
(try_begin),
(agent_is_non_player, ":attacker_agent"),
(try_begin),
(gt,":action_state",0),#free
(neq,":action_state",5),#reloading
(neq,":action_state",7),#cancelling
(agent_set_attack_action, ":attacker_agent", 0),
(try_end),
(else_try),#player simulate attack
(eq,":action_state",1),#readying
#ticker trigger
(try_begin),
(le,"$g_automatic_ticker_time",0),
#ticker trigger
(item_get_speed_rating,"$g_automatic_ticker_time",":agent_cur_weapon"),
(val_max,"$g_automatic_ticker_time",1),
(store_div,"$g_automatic_ticker_time",200,"$g_automatic_ticker_time"),
(agent_get_item_cur_ammo, ":cur_ammo", ":attacker_agent"),
(gt,":cur_ammo",0),
#match the right ammmo type
(assign,":cur_ammo_id",-1),
(assign,":item_slot_num",4),
(try_for_range,":cur_item_slot",0,":item_slot_num"),
(agent_get_item_slot, ":item_no", ":attacker_agent", ":cur_item_slot"),
(ge,":item_no",0),
(item_get_type, ":item_type", ":item_no"),
(eq,":item_type",itp_type_bullets),
(assign,":cur_ammo_id",":item_no"),
(assign,":item_slot_num",0),#break
(try_end),
(try_begin),
(lt,":cur_ammo_id",0),
(assign,":cur_ammo_id","itm_ha_cartridges"),#as default
(try_end),
#get original position of weapon
(agent_get_look_position,pos24,":attacker_agent"),
(item_get_weapon_length,":length",":agent_cur_weapon"),
(position_move_y, pos24,":length"),
(agent_get_horse,":attacker_agent_horse",":attacker_agent"),
(try_begin),
(agent_get_crouch_mode,":crouch_mode",":attacker_agent"),
(eq, ":crouch_mode", 1),
(position_move_z,pos24,115,1),
(else_try),
(lt,":attacker_agent_horse",0),
(position_move_z,pos24,170,1),
(else_try),
(position_move_z,pos24,270,1),
(try_end),
##get weapon position after calculating accuracy
(copy_position,pos25,pos24),
(item_get_accuracy,":accuracy",":agent_cur_weapon"),
(val_max,":accuracy",1),
(store_div,":randomize_value",500,":accuracy"),
(store_random_in_range,":z",0,":randomize_value"),
(store_random_in_range,":x",0,":randomize_value"),
(position_rotate_z,pos25,":z"),
(position_rotate_x,pos25,":x"),
#create particle,animation and sound effect of weapon
(agent_set_animation, ":attacker_agent", "anim_release_musket", 1),
(item_get_slot,":shoot_sound",":agent_cur_weapon",slot_item_shot_sound),
(play_sound_at_position, ":shoot_sound", pos24),
(particle_system_burst, "psys_rifle_smoke", pos24, 10),
(particle_system_burst, "psys_gun_fire", pos24, 20),
(item_get_missile_speed,":missile_speed",":agent_cur_weapon"),
(set_fixed_point_multiplier,1),
(add_missile, ":attacker_agent", pos25, ":missile_speed", ":agent_cur_weapon", imod_plain, ":cur_ammo_id",imod_plain),
#reduce ammo
(val_sub,":cur_ammo",1),
(agent_set_ammo,":attacker_agent",":agent_cur_weapon",":cur_ammo"),
(else_try),
(val_sub,"$g_automatic_ticker_time",1),
(try_end),
(else_try),
(item_get_speed_rating,"$g_automatic_ticker_time",":agent_cur_weapon"),
(val_max,"$g_automatic_ticker_time",1),
(store_div,"$g_automatic_ticker_time",1400,"$g_automatic_ticker_time"),
(try_end),
(try_end),
])
#New automatics end
#delay script system begin
common_init_delay_script = (ti_before_mission_start, 0, 0, [],
[
(call_script,"script_initialize_param_num_of_script"),#this statement is just for this mod only,because this mod has not sp mode.
(call_script,"script_clear_delay_script_data"),
]
)
common_delay_script = (delay_script_call_interval, 0, 0, [],
[
(val_add,"$cur_time_slot",1),
(try_begin),
(ge,"$cur_time_slot",max_seconds_step),
(val_mod,"$cur_time_slot",max_seconds_step),
(try_end),
(call_script,"script_cf_call_scripts_in_delay_script_queue","$cur_time_slot"),
]
)
#delay script system end
tournament_triggers = []
mission_templates = [
(
"conquest",mtf_battle_mode,-1, #Warlords mode
"Make your life!",
[
(0,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(1,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(2,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(3,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(4,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(5,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(6,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(7,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(8,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(9,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(10,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(11,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(12,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(13,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(14,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(15,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(16,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(17,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(18,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(19,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(20,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(21,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(22,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(23,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(24,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(25,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(26,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(27,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(28,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(29,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(30,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(31,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[]),
(32,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(33,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(34,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(35,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(36,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(37,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(38,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(39,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(40,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(41,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(42,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(43,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(44,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(45,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(46,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(47,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(48,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(49,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(50,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(51,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(52,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(53,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(54,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(55,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(56,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(57,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(58,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(59,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(60,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(61,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(62,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
(63,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[]),
],
[
#multiplayer_server_check_belfry_movement,
multiplayer_server_check_polls,
(ti_on_agent_spawn, 0, 0, [],
[
(store_trigger_param_1, ":agent_no"),
(call_script, "script_multiplayer_server_on_agent_spawn_common", ":agent_no"),
]),
(ti_server_player_joined, 0, 0, [],
[
(store_trigger_param_1, ":player_no"),
(call_script, "script_multiplayer_server_player_joined_common", ":player_no"),
]),
(ti_before_mission_start, 0, 0, [],
[
(assign, "$g_multiplayer_game_type", multiplayer_game_type_conquest),
(call_script, "script_multiplayer_server_before_mission_start_common"),
(multiplayer_make_everyone_enemy),
(call_script, "script_multiplayer_init_mission_variables"),
(call_script, "script_multiplayer_remove_destroy_mod_targets"),
# (call_script, "script_multiplayer_remove_headquarters_flags"), # close this line and open map in deathmatch mod and use all ladders firstly
]), # to be able to edit maps without damaging any headquarters flags ext.
(ti_after_mission_start, 0, 0, [],
[
(set_spawn_effector_scene_prop_kind, 0, -1), #during this mission, agents of "team 0" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(set_spawn_effector_scene_prop_kind, 1, -1), #during this mission, agents of "team 1" will try to spawn around scene props with kind equal to -1(no effector for this mod)
(call_script, "script_initialize_all_scene_prop_slots"),
(call_script, "script_multiplayer_move_moveable_objects_initial_positions"),
(assign, "$g_multiplayer_ready_for_spawning_agent", 1),
]),
(ti_on_multiplayer_mission_end, 0, 0, [],
[
#ELITE_WARRIOR achievement
(try_begin),
(multiplayer_get_my_player, ":my_player_no"),
(is_between, ":my_player_no", 0, multiplayer_max_possible_player_id),
(player_get_team_no, ":my_player_team", ":my_player_no"),
(lt, ":my_player_team", multi_team_spectator),
(player_get_kill_count, ":kill_count", ":my_player_no"),
(player_get_death_count, ":death_count", ":my_player_no"),
(store_mul, ":my_score_plus_death", ":kill_count", 1000),
(val_sub, ":my_score_plus_death", ":death_count"),
(assign, ":continue", 1),
(get_max_players, ":num_players"),
(assign, ":end_cond", ":num_players"),
(try_for_range, ":player_no", 0, ":end_cond"),
(player_is_active, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"),
(this_or_next|eq, ":player_team", 0),
(eq, ":player_team", 1),
(player_get_kill_count, ":kill_count", ":player_no"),
(player_get_death_count, ":death_count", ":player_no"), #get_death_count
(store_mul, ":player_score_plus_death", ":kill_count", 1000),
(val_sub, ":player_score_plus_death", ":death_count"),
(gt, ":player_score_plus_death", ":my_score_plus_death"),
(assign, ":continue", 0),
(assign, ":end_cond", 0), #break
(try_end),
(eq, ":continue", 1),
(unlock_achievement, ACHIEVEMENT_ELITE_WARRIOR),
(try_end),
#ELITE_WARRIOR achievement end
(call_script, "script_multiplayer_event_mission_end"),
# (assign, "$g_multiplayer_stats_chart_opened_manually", 0),
# (start_presentation, "prsnt_multiplayer_stats_chart_deathmatch"),
]),
(ti_on_agent_killed_or_wounded, 0, 0, [],
[
(store_trigger_param_1, ":dead_agent_no"),
(store_trigger_param_2, ":killer_agent_no"),
(call_script, "script_multiplayer_server_on_agent_killed_or_wounded_common", ":dead_agent_no", ":killer_agent_no"),
]),
(1, 0, 0, [],
[
(multiplayer_is_server),
(get_max_players, ":num_players"),
(try_for_range, ":player_no", 0, ":num_players"),
(player_is_active, ":player_no"),
(neg|player_is_busy_with_menus, ":player_no"),
(player_get_team_no, ":player_team", ":player_no"), #if player is currently spectator do not spawn his agent
(lt, ":player_team", multi_team_spectator),
(player_get_troop_id, ":player_troop", ":player_no"), #if troop is not selected do not spawn his agent
(ge, ":player_troop", 0),
(player_get_agent_id, ":player_agent", ":player_no"),
(assign, ":spawn_new", 0),
(try_begin),
(player_get_slot, ":player_first_spawn", ":player_no", slot_player_first_spawn),
(eq, ":player_first_spawn", 1),
(assign, ":spawn_new", 1),
(player_set_slot, ":player_no", slot_player_first_spawn, 0),
(else_try),
(try_begin),
(lt, ":player_agent", 0),
(assign, ":spawn_new", 1),
(else_try),
(neg|agent_is_alive, ":player_agent"),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":player_agent"),
(gt, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign, ":spawn_new", 1),
(try_end),
(try_end),
(eq, ":spawn_new", 1),
(call_script, "script_multiplayer_buy_agent_equipment", ":player_no"),
(troop_get_inventory_slot, ":has_item", ":player_troop", ek_horse),
(try_begin),
(ge, ":has_item", 0),
(assign, ":is_horseman", 1),
(else_try),
(assign, ":is_horseman", 0),
(try_end),
(call_script, "script_multiplayer_find_spawn_point", ":player_team", 0, ":is_horseman"),
(player_spawn_new_agent, ":player_no", reg0),
(try_end),
]),
(1, 0, 0, [], #do this in every new frame, but not at the same time
[
(multiplayer_is_server),
(store_mission_timer_a, ":mission_timer"),
(ge, ":mission_timer", 2),
(assign, ":team_1_count", 0),
(assign, ":team_2_count", 0),
(try_for_agents, ":cur_agent"),
(agent_is_non_player, ":cur_agent"),
(agent_is_human, ":cur_agent"),
(assign, ":will_be_counted", 0),
(try_begin),
(agent_is_alive, ":cur_agent"),
(assign, ":will_be_counted", 1), #alive so will be counted
(else_try),
(agent_get_time_elapsed_since_removed, ":elapsed_time", ":cur_agent"),
(le, ":elapsed_time", "$g_multiplayer_respawn_period"),
(assign, ":will_be_counted", 1),
(try_end),
(eq, ":will_be_counted", 1),
(agent_get_team, ":cur_team", ":cur_agent"),
(try_begin),
(eq, ":cur_team", 0),
(val_add, ":team_1_count", 1),
(else_try),
(eq, ":cur_team", 1),
(val_add, ":team_2_count", 1),
(try_end),
(try_end),
(store_sub, "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_team_1", ":team_1_count"),
(store_sub, "$g_multiplayer_num_bots_required_team_2", "$g_multiplayer_num_bots_team_2", ":team_2_count"),
(val_max, "$g_multiplayer_num_bots_required_team_1", 0),
(val_max, "$g_multiplayer_num_bots_required_team_2", 0),
]),
(0, 0, 0, [],
[
(multiplayer_is_server),
(eq, "$g_multiplayer_ready_for_spawning_agent", 1),
(store_add, ":total_req", "$g_multiplayer_num_bots_required_team_1", "$g_multiplayer_num_bots_required_team_2"),
(try_begin),
(gt, ":total_req", 0),
(store_random_in_range, ":random_req", 0, ":total_req"),
(val_sub, ":random_req", "$g_multiplayer_num_bots_required_team_1"),
(try_begin),
(lt, ":random_req", 0),
#add to team 1
(assign, ":selected_team", 0),
(val_sub, "$g_multiplayer_num_bots_required_team_1", 1),
(else_try),
#add to team 2
(assign, ":selected_team", 1),
(val_sub, "$g_multiplayer_num_bots_required_team_2", 1),
(try_end),
(team_get_faction, ":team_faction_no", ":selected_team"),
(assign, ":available_troops_in_faction", 0),
(try_for_range, ":troop_no", multiplayer_ai_troops_begin, multiplayer_ai_troops_end),
(store_troop_faction, ":troop_faction", ":troop_no"),
(eq, ":troop_faction", ":team_faction_no"),
(val_add, ":available_troops_in_faction", 1),
(try_end),
(store_random_in_range, ":random_troop_index", 0, ":available_troops_in_faction"),
(assign, ":end_cond", multiplayer_ai_troops_end),
(try_for_range, ":troop_no", multiplayer_ai_troops_begin, ":end_cond"),
(store_troop_faction, ":troop_faction", ":troop_no"),
(eq, ":troop_faction", ":team_faction_no"),
(val_sub, ":random_troop_index", 1),
(lt, ":random_troop_index", 0),
(assign, ":end_cond", 0),
(assign, ":selected_troop", ":troop_no"),
(try_end),
(troop_get_inventory_slot, ":has_item", ":selected_troop", ek_horse),
(try_begin),
(ge, ":has_item", 0),
(assign, ":is_horseman", 1),
(else_try),
(assign, ":is_horseman", 0),
(try_end),
(call_script, "script_multiplayer_find_spawn_point", ":selected_team", 0, ":is_horseman"),
(store_current_scene, ":cur_scene"),
(modify_visitors_at_site, ":cur_scene"),
(add_visitors_to_current_scene, reg0, ":selected_troop", 1, ":selected_team", -1),
(assign, "$g_multiplayer_ready_for_spawning_agent", 0),
(try_end),
]),
(1, 0, 0, [],
[
(multiplayer_is_server),
#checking for restarting the map
(assign, ":end_map", 0),
(try_begin),
(store_mission_timer_a, ":mission_timer"),
(store_mul, ":game_max_seconds", "$g_multiplayer_game_max_minutes", 60),
(gt, ":mission_timer", ":game_max_seconds"),
(assign, ":end_map", 1),
(try_end),
(try_begin),
(eq, ":end_map", 1),
(call_script, "script_game_multiplayer_get_game_type_mission_template"),
(start_multiplayer_mission, reg0, "$g_multiplayer_selected_map", 0),
(call_script, "script_game_set_multiplayer_mission_end"),
(try_end),
]),
# (ti_tab_pressed, 0, 0, [],
# [
# (try_begin),
# (eq, "$g_multiplayer_mission_end_screen", 0),
# (assign, "$g_multiplayer_stats_chart_opened_manually", 1),
# (start_presentation, "prsnt_multiplayer_stats_chart_deathmatch"),
# (try_end),
# ]),
multiplayer_once_at_the_first_frame,
(ti_escape_pressed, 0, 0, [],
[
(neg|is_presentation_active, "prsnt_multiplayer_escape_menu"),
# (neg|is_presentation_active, "prsnt_multiplayer_stats_chart_deathmatch"),
(eq, "$g_waiting_for_confirmation_to_terminate", 0),
(start_presentation, "prsnt_multiplayer_escape_menu"),
]),
],
),
(
"test_scene_1",mtf_battle_mode|mtf_synch_inventory,-1,
"test scene 1",
[
(0,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[itm_test_rifle,itm_ha_cartridges]),
(1,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[itm_test_rifle,itm_ha_cartridges]),
],
[
# (0,0,0,[(key_clicked,key_b)],
# [
# (get_player_agent_no, ":player_agent"),
# (agent_get_position, pos0, ":player_agent"),
# (set_spawn_position, pos0),
# (spawn_scene_prop, "spr_catapult_destructible"),
# ],
# ),
# (0,0,0,[(key_clicked,key_n)],
# [
# (display_log_message,"@Printing agents information:"),
# (try_for_agents,":agent_no"),
# (str_clear,s0),
# (assign,reg0,":agent_no"),
# (str_store_string,s0,"@ ID: {reg0} Agent name: "),
# (display_log_message,s0),
# (str_store_agent_name,s0,":agent_no"),
# (display_log_message,s0),
# (try_begin),
# (neg|agent_is_non_player,":agent_no"),
# (str_store_string,s0,"@ Agent is player"),
# (display_log_message,s0),
# (try_end),
# (try_begin),
# (agent_is_ally,":agent_no"),
# (str_store_string,s0,"@ Agent is ally"),
# (display_log_message,s0),
# (try_end),
# (try_end),
# ],
# ),
(0,0,0,[(key_clicked,key_j)],
[
(add_visitors_to_current_scene,0,"trp_test_troop",50, mtef_team_0, 0),
],
),
(0,0,0,[(key_clicked,key_k)],
[
(add_visitors_to_current_scene,1,"trp_test_troop",50, mtef_team_1, 0),
],
),
##tab pressed
(ti_tab_pressed, 0, 0, [],
[
(finish_mission,0),
],
),
# automatic_player,
common_automatic,
],
),
(
"test_scene_2",mtf_battle_mode|mtf_synch_inventory,-1,
"test scene 2",
[
(0,mtef_visitor_source|mtef_team_0,0,aif_start_alarmed,1,[itm_test_rifle,itm_ha_cartridges]),
(1,mtef_visitor_source|mtef_team_1,0,aif_start_alarmed,1,[itm_test_rifle,itm_ha_cartridges]),
],
[
# (0,0,0,[(key_clicked,key_b)],
# [
# (get_player_agent_no, ":player_agent"),
# (agent_get_position, pos0, ":player_agent"),
# (set_spawn_position, pos0),
# (spawn_scene_prop, "spr_catapult_destructible"),
# ],
# ),
# (0,0,0,[(key_clicked,key_n)],
# [
# (display_log_message,"@Printing agents information:"),
# (try_for_agents,":agent_no"),
# (str_clear,s0),
# (assign,reg0,":agent_no"),
# (str_store_string,s0,"@ ID: {reg0} Agent name: "),
# (display_log_message,s0),
# (str_store_agent_name,s0,":agent_no"),
# (display_log_message,s0),
# (try_begin),
# (neg|agent_is_non_player,":agent_no"),
# (str_store_string,s0,"@ Agent is player"),
# (display_log_message,s0),
# (try_end),
# (try_begin),
# (agent_is_ally,":agent_no"),
# (str_store_string,s0,"@ Agent is ally"),
# (display_log_message,s0),
# (try_end),
# (try_end),
# ],
# ),
(0,0,0,[(key_clicked,key_j)],
[
(add_visitors_to_current_scene,0,"trp_test_troop",50, mtef_team_0, 0),
],
),
(0,0,0,[(key_clicked,key_k)],
[
(add_visitors_to_current_scene,1,"trp_test_troop",50, mtef_team_1, 0),
],
),
(0,0,0,[(key_clicked,key_b)],
[
(get_player_agent_no, ":player_agent"),
(agent_get_position, pos0, ":player_agent"),
(init_position,pos1),
(position_copy_origin,pos1,pos0),
(position_get_x,":pos_x",pos1),
(position_get_y,":pos_y",pos1),
(position_get_z,":pos_z",pos1),
(val_add,":pos_z",200),
(call_script,"script_cf_spawn_missile_at_point",":player_agent","itm_test_magic_swords_sub","itm_test_magic_swords_sub",":pos_x",":pos_y",":pos_z"),
],
),
##tab pressed
(ti_tab_pressed, 0, 0, [],
[
(finish_mission,0),
],
),
# automatic_player,
common_automatic,
common_init_delay_script,
common_delay_script
],
),
]
| 1.953125 | 2 |
owntwin/builtin_datasources/gsi_disaportal.py | owntwin/owntwin-cli | 1 | 12774139 | <reponame>owntwin/owntwin-cli
from pathlib import Path
from time import sleep
import owntwin.builder.utils as utils
import requests
from loguru import logger
from owntwin.builder.tile import TileData
KK_KEIKAI_URL = (
"https://disaportaldata.gsi.go.jp/raster/05_kyukeishakeikaikuiki/{z}/{x}/{y}.png"
)
KK_HOUKAI_URL = (
"https://disaportaldata.gsi.go.jp/raster/05_kyukeisyachihoukai/{z}/{x}/{y}.png"
)
HIGHTIDE_URL = "https://disaportaldata.gsi.go.jp/raster/03_hightide_l2_shinsuishin_data/{z}/{x}/{y}.png"
class Downloader(object):
def __init__(self, cwd, interval=1):
self.cwd = Path(cwd)
self.interval = interval
def _download_base(self, template_url, template_filename, tiles, cache=True):
tiledata = []
z_min, z_max = 2, 17
for i, tile in enumerate(tiles):
if not z_min <= tile.z <= z_max:
raise Exception
url = template_url.format(z=tile.z, x=tile.x, y=tile.y)
filename = template_filename.format(z=tile.z, x=tile.x, y=tile.y)
filename = self.cwd.joinpath(filename)
td = TileData(filename, z=tile.z, x=tile.x, y=tile.y)
tiledata.append(td)
logger.info(f"({i + 1}/{len(tiles)}) {url} → {filename}")
# logger.info(f"({i + 1}/{len(tiles)}) {url} → {str(filename).replace('nolze', 'ozekik')}")
if cache and filename.exists():
continue
resp = requests.get(url)
if resp.status_code == 404:
im = utils.make_error_tile((256, 256))
im.save(filename, "png")
else:
with open(filename, "wb") as f:
f.write(resp.content)
if len(tiles) - (i + 1) == 0:
continue
sleep(self.interval)
return tiledata
def download_kyukeisha_keikai(self, tiles, cache=True):
return self._download_base(
KK_KEIKAI_URL, "kk_keikai-{z}_{x}_{y}.png", tiles, cache=cache
)
def download_kyukeisha_houkai(self, tiles, cache=True):
return self._download_base(
KK_HOUKAI_URL, "kk_houkai-{z}_{x}_{y}.png", tiles, cache=cache
)
def download_hightide(self, tiles, cache=True):
return self._download_base(
HIGHTIDE_URL, "hightide-{z}_{x}_{y}.png", tiles, cache=cache
)
| 2.296875 | 2 |
src/lib/Encryption.py | gamesguru/vault | 147 | 12774140 | import base64
import string
from random import randint, choice
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random as CryptoRandom
class Encryption():
def __init__(self, key):
self.key = key # Key in bytes
self.salted_key = None # Placeholder for optional salted key
def digest_key(self):
"""
Use SHA-256 over our key to get a proper-sized AES key
"""
# Add optional salt to key
key = self.key
if self.salted_key:
key = self.salted_key
return SHA256.new(key).digest()
def get_aes(self, IV):
"""
AES instance
"""
return AES.new(self.digest_key(), AES.MODE_CBC, IV)
def gen_salt(self, set_=True):
"""
Generate a random salt
"""
min_char = 8
max_char = 12
allchar = string.ascii_letters + string.punctuation + string.digits
salt = "".join(choice(allchar)
for x in range(randint(min_char, max_char))).encode()
# Set the salt in the same instance if required
if set_:
self.set_salt(salt)
return salt
def set_salt(self, salt=None):
"""
Add a salt to the secret key for this specific encryption or decryption
"""
if salt:
self.salted_key = salt + self.key
else:
self.salted_key = None
def encrypt(self, secret):
"""
Encrypt a secret
"""
# generate IV
IV = CryptoRandom.new().read(AES.block_size)
# Retrieve AES instance
aes = self.get_aes(IV)
# calculate needed padding
padding = AES.block_size - len(secret) % AES.block_size
# Python 2.x: secret += chr(padding) * padding
secret += bytes([padding]) * padding
# store the IV at the beginning and encrypt
data = IV + aes.encrypt(secret)
# Reset salted key
self.set_salt()
# Return base 64 encoded bytes
return base64.b64encode(data)
def decrypt(self, enc_secret):
"""
Decrypt a secret
"""
# Decode base 64
enc_secret = base64.b64decode(enc_secret)
# extract the IV from the beginning
IV = enc_secret[:AES.block_size]
# Retrieve AES instance
aes = self.get_aes(IV)
# Decrypt
data = aes.decrypt(enc_secret[AES.block_size:])
# pick the padding value from the end; Python 2.x: ord(data[-1])
padding = data[-1]
# Python 2.x: chr(padding) * padding
if data[-padding:] != bytes([padding]) * padding:
raise ValueError("Invalid padding...")
# Reset salted key
self.set_salt()
# Remove the padding and return the bytes
return data[:-padding]
| 3.421875 | 3 |
setup.py | scottdraper8/maddress | 2 | 12774141 | import setuptools
# Reads the content of your README.md into a variable to be used in the setup below
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='maddress', # should match the package folder
packages=['maddress'], # should match the package folder
version='1.0.0-alpha', # important for updates
license='MIT', # should match your chosen license
description='Testing installation of Package',
long_description=long_description, # loads your README.md
long_description_content_type='text/markdown', # README.md is of type 'markdown'
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/scottdraper8/maddress',
install_requires=[], # list all packages that your package uses
keywords=['pypi', 'maddress', 'email', 'phone number', 'address', 'geolocation', 'data cleaning'], #descriptive meta-data
classifiers=[ # https://pypi.org/classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
download_url="https://github.com/scottdraper8/maddress/archive/refs/tags/v1.0.0-alpha.tar.gz",
)
| 1.601563 | 2 |
irspack/utils/__init__.py | Random1992/irspack | 0 | 12774142 | <reponame>Random1992/irspack
import random
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import scipy.sparse as sps
from irspack.definitions import InteractionMatrix
from irspack.utils._util_cpp import (
okapi_BM_25_weight,
remove_diagonal,
rowwise_train_test_split_by_fixed_n,
rowwise_train_test_split_by_ratio,
sparse_mm_threaded,
tf_idf_weight,
)
from irspack.utils.id_mapping import IDMappedRecommender
from irspack.utils.threading import get_n_threads
def rowwise_train_test_split(
X: InteractionMatrix,
test_ratio: float = 0.5,
n_test: Optional[int] = None,
ceil_n_test: bool = False,
random_seed: Optional[int] = None,
) -> Tuple[InteractionMatrix, InteractionMatrix]:
"""Splits the non-zero elements of a sparse matrix into two (train & test interactions).
For each row, the ratio of non-zero elements that become the test interaction
is (approximately) constant.
Args:
X:
The source sparse matrix.
test_ratio:
The ratio of test interactions for each row.
That is, for each row, if it contains ``NNZ``-nonzero elements,
the number of elements entering into the test interaction
will be ``math.floor(test_ratio * NNZ)``.
Defaults to 0.5.
random_seed:
The random seed. Defaults to None.
Returns:
A tuple of train & test interactions, which sum back to the original matrix.
"""
if random_seed is None:
random_seed = random.randint(-(2 ** 32), 2 ** 32 - 1)
original_dtype = X.dtype
X_double = X.astype(np.float64)
if n_test is None:
X_train_double, X_test_double = rowwise_train_test_split_by_ratio(
X_double, random_seed, test_ratio, ceil_n_test
)
else:
X_train_double, X_test_double = rowwise_train_test_split_by_fixed_n(
X_double, random_seed, n_test
)
return (
X_train_double.astype(original_dtype),
X_test_double.astype(original_dtype),
)
def df_to_sparse(
df: pd.DataFrame,
user_colname: str,
item_colname: str,
rating_colname: Optional[str] = None,
) -> Tuple[sps.csr_matrix, np.ndarray, np.ndarray]:
row, unique_user_ids = pd.factorize(df[user_colname], sort=True)
col, unique_item_ids = pd.factorize(df[item_colname], sort=True)
if rating_colname is None:
data = np.ones(df.shape[0])
else:
data = np.asfarray(df[rating_colname].values)
return (
sps.csr_matrix(
(data, (row, col)), shape=(len(unique_user_ids), len(unique_item_ids))
),
unique_user_ids,
unique_item_ids,
)
__all__ = [
"rowwise_train_test_split",
"sparse_mm_threaded",
"okapi_BM_25_weight",
"tf_idf_weight",
"remove_diagonal",
"get_n_threads",
"IDMappedRecommender",
]
| 2.28125 | 2 |
desktop-creator.py | mocchapi/Desktop-Creator | 0 | 12774143 | <reponame>mocchapi/Desktop-Creator
import configparser
from ast import literal_eval
print('-------------------------------------------------------------------------------')
print('Desktop Creator v1.0 // made by <NAME> // MIT license')
print('-------------------------------------------------------------------------------')
try:
from appJar import gui
except BaseException as e:
print('FATAL ERROR: appJar cannot be imported and the application cannot be started.')
print(f'Exact exception: {e}')
print('-------------------------------------------------------------------------------')
print('If you are running this program as a stand-alone executable, try checking for updates at the github release page.')
print('If there are no new updates, please open an issue on the github issues page.')
print('You could also download the source code and run the desktop-creator.py found within and following the instructions in the next section.')
print('The source code can be obtained at the github releases page, or by cloning the repository.')
print('This is the github project page: "https://github.com/mocchapi/Desktop-Creator"')
print('-------------------------------------------------------------------------------')
print('If you are running this program using the desktop-creator.py with python 3, try the following:')
print('Possible solution: Install the "appJar" package using "pip3 install appJar"')
print('Possible solution: If appJar is installed and you are running this as root, and it works without root, try installing appJar under root instead of the current user by typing "sudo pip3 install appJar"')
exit()
def ui(app):
app.setSticky('n')
app.setStretch('none')
app.addLabel('label_title','.desktop creator',0,0)
app.addHorizontalSeparator(1,0,6)
app.setStretch('both')
app.setSticky('nesw')
app.startFrame('frame_main')
app.setSticky('nesw')
app.setStretch('column')
app.startLabelFrame('frame_main_entries',label='Entries')
app.setSticky('nsw')
app.setStretch('column')
app.addLabel('label_Name',' Name:',1)
app.addEntry('entry_Name',1,1)
app.addLabel('label_Comment',' Comment:',6)
app.addEntry('entry_Comment',6,1)
app.addLabel('label_Keywords',' Keywords:',8)
app.addEntry('entry_Keywords',8,1)
app.addLabel('label_Exec',' Executable: ',10,0)
app.addFileEntry('entry_Exec',10,1)
app.addLabel('label_Icon',' Icon:',15,0)
app.addFileEntry('entry_Icon',15,1)
app.stopLabelFrame()
app.startLabelFrame('frame_main_options',label='Options')
app.setSticky('nesw')
app.setStretch('column')
app.addNamedCheckBox('Terminal','box_Terminal')
app.addNamedCheckBox('NoDisplay','box_NoDisplay')
app.addNamedCheckBox('PrefersNonDefaultGPU','box_PrefersNonDefaultGPU')
app.addLabel('label_custom','Custom dict:',18,0)
app.addEntry('entry_custom',18,1)
app.addTickOptionBox('NotShowIn',[''],20,0)
app.addTickOptionBox('OnlyShowIn',[''],20,1)
app.stopLabelFrame()
categories = {'AudioVideo':False,'Audio':False,'Video':False,'Development':False,'Education':False,'Game':False,'Graphics':False,'Network':False,'Office':False,'Science':False,'Settings':False,'System':False,'Utility':False}
app.addProperties('Categories',categories,0,3,1,40)
app.stopFrame()
app.setStretch('column')
app.setSticky('esw')
app.startFrame('frame_bot')
app.setStretch('both')
app.startFrame('frame_bot_path',0,0)
app.addLabelEntry('Path:',0,1,2)
app.stopFrame()
app.setStretch('none')
app.startFrame('frame_bot_buttons',0,1)
app.addButton('Save',iobuttons,0,3)
app.setEntry('Path:','/usr/share/applications/unknown.dekstop',callFunction=False)
app.stopFrame()
app.stopFrame()
def update_default_filename():
if filename_untouched:
name = sanitize_filename(app.getEntry('entry_Name'))
#stackoverflow.com/questions/8122079/python-how-to-check-a-string-for-substrings-from-a-list
base_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'x', 'y', 'z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
if any(substring in name for substring in base_chars) == False:
name = 'unknown'
app.setEntry('Path:',f'/usr/share/applications/{name}.desktop',callFunction=False)
def strip_ending_chars(string,char):
loop = True
while loop == True:
if string.endswith(char):
string = string[:-1]
else:
loop = False
return string
def dict_specific_key(dictionary,key):
key_list = []
for entry in dictionary:
if dictionary[entry] == key:
key_list.append(entry)
return key_list
def sanitize_filename(string):
sanitizations = {' ':'_','/':'-',"'":'','"':'','`':'','\n':'','\\':'-'}
for key in sanitizations:
string = string.replace(key, sanitizations[key])
string = strip_ending_chars(string,'.')
return string
def audiovideo_check():
if app.getProperty('Categories','Audio') or app.getProperty('Categories','Video'):
if app.getProperty('Categories','AudioVideo') == False:
app.setProperty('Categories','AudioVideo',value=True,callFunction=False)
def categories_updated():
audiovideo_check()
new_categories = dict_specific_key(app.getProperties('Categories'),True)
if new_categories == []:
new_categories = ['']
app.changeOptionBox('NotShowIn',new_categories,callFunction=False)
app.changeOptionBox('OnlyShowIn',new_categories,callFunction=True)
def optionbox_updated():
onlyShow = dict_specific_key(app.getOptionBox('OnlyShowIn'),True)
notShow = dict_specific_key(app.getOptionBox('NotShowIn'),True)
if len(onlyShow) == 0 and len(notShow) > 0:
app.disableOptionBox('OnlyShowIn')
elif len(notShow) == 0 and len(onlyShow) > 0:
app.disableOptionBox('NotShowIn')
else:
app.enableOptionBox('NotShowIn')
app.enableOptionBox('OnlyShowIn')
def touch_filename():
global filename_untouched
if len(app.getEntry('Path:')) != ' ':
filename_untouched = False
else:
filename_untouched = True
update_default_filename()
def collect_entries():
entries = {}
entries_list = ['entry_Comment','entry_Name','entry_Icon','entry_Exec','entry_Keywords']
for item in entries_list:
item_value = app.getEntry(item)
if item_value != '':
item = item.replace('entry_','')
entries[item] = item_value
try:
if not entries['Keywords'].endswith(';'):
entries['Keywords'] = f'{entries["Keywords"]};'
except:
pass
return entries
def collect_options():
options = {}
options_list = ['box_Terminal','box_NoDisplay','box_PrefersNonDefaultGPU']
for item in options_list:
item_value = app.getCheckBox(item)
item = item.replace('box_','')
options[item] = str(item_value).lower()
NotShowIn = dict_specific_key(app.getOptionBox('NotShowIn'),True)
OnlyShowIn = dict_specific_key(app.getOptionBox('OnlyShowIn'),True)
if len(NotShowIn) > 0:
options['NotShowIn'] = f'{";".join(NotShowIn)};'
elif len(OnlyShowIn) > 0:
options['OnlyShowIn'] = f'{";".join(OnlyShowIn)};'
return options
def collect_categories():
categories_list = dict_specific_key(app.getProperties('Categories'),True)
if len(categories_list) >0:
categories = {'Categories':f'{";".join(categories_list)};'}
else:
categories = {}
return categories
def collect_all():
base = {'Type':'Application','Version':'1.1'}
entries = collect_entries()
options = collect_options()
categories = collect_categories()
result = {**base, **entries, **options, **categories}
if len(app.getEntry('entry_custom')) > 5:
custom_dict = literal_eval(app.getEntry('entry_custom'))
result = {**result, **custom_dict}
return result
def iobuttons(name):
if name == 'Save':
if app.getEntry('entry_Name') != '':
if app.getEntry('entry_Exec') == '':
if app.questionBox('Missing entry','The executable field is empty. Continue?') == False:
return
fulldict = collect_all()
print(fulldict)
try:
path = app.getEntry('Path:')
config = configparser.ConfigParser()
config.optionxform=str
config['Desktop Entry'] = fulldict
with open(path,'w') as file:
config.write(file)
app.infoBox('Success',f'File {path} successfully written.')
except PermissionError:
app.warningBox('Error',f'Insufficient permissions to write {path}.\nTry running as Root.')
else:
app.warningBox('Missing entry','The name field is required.')
def loops_n_events():
app.setEntryChangeFunction('entry_Name',update_default_filename)
app.setPropertiesChangeFunction('Categories',categories_updated)
app.registerEvent(optionbox_updated)
app.setEntryChangeFunction('Path:',touch_filename)
app.setPollTime(250)
if __name__ == '__main__':
filename_untouched = True
app = gui('.desktop creator','600x430')
app.setBg('ivory2',tint=True)
app.setLabelFont(family='Open Sans')
app.setFont(family='Open Sans')
ui(app)
loops_n_events()
app.go()
| 2.578125 | 3 |
src/apps_script.py | Chimildic/goofy-hotkeys | 0 | 12774144 | from googleapiclient.discovery import build
from os import getenv
from auth import get_credentials
class AppsScript():
def __init__(self, id: str):
self._name = getenv("API_SERVICE_NAME")
self._version = getenv("API_VERSION")
self._id = id
def run(self, function: str):
body = {"function": function}
with build(self._name, self._version, credentials=get_credentials()) as service:
return service.scripts().run(scriptId=self._id, body=body).execute()
| 2.375 | 2 |
tests/test_objectid.py | hiroaki-yamamoto/mongoengine-goodjson | 64 | 12774145 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ObjectID test."""
import json
from unittest import TestCase
from bson import ObjectId
from mongoengine.document import Document
from mongoengine.errors import ValidationError
from mongoengine.fields import StringField
from mongoengine_goodjson.fields import ObjectIDField
from mongoengine_goodjson.document import Document as JSONDoc
class NormalSchema(Document):
"""Normal document schama."""
uid = ObjectIDField()
name = StringField(required=True)
class CustomSchema(JSONDoc):
"""Original document schema."""
uid = ObjectIDField()
name = StringField(required=True)
class NormalDocumentTest(TestCase):
"""Normal (or Standard) Document Test."""
def setUp(self):
"""Set up."""
self.model = NormalSchema
self.doc = self.model(
uid=ObjectId(),
name="Test",
)
# self.doc.save()
self.expected_dict = {
"uid": {"$oid": str(self.doc.uid)},
"name": self.doc.name,
}
def test_encode(self):
"""Should be serialized."""
dct = json.loads(self.doc.to_json())
self.assertEqual(dct, self.expected_dict)
def test_decode(self):
"""Should be deserialized."""
doc = self.model.from_json(json.dumps(self.expected_dict))
# In pytohn checking equivalence between objects is impossible.
# And therefore, it needs to convert into dict with to_mongo.
self.assertEqual(doc.uid, self.doc.uid)
self.assertEqual(doc.to_mongo(), self.doc.to_mongo())
class NormalSchemaCastingTest(NormalDocumentTest):
"""Normal Schema casting test."""
def setUp(self):
"""Set up."""
super().setUp()
oid = str(ObjectId())
self.doc = self.model(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": {"$oid": oid}, "name": self.doc.name}
class NormalSchemaInvalidCastingTest(TestCase):
"""Normal Schema casting test."""
def setUp(self):
"""Set up."""
oid = str("あばばばばばば")
self.doc = NormalSchema(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": {"$oid": oid}, "name": self.doc.name}
def test_encode(self):
"""Serialization should be failed."""
with self.assertRaises(ValidationError) as e:
self.doc.to_json()
self.assertEqual(e.exception.field_name, "uid")
class CustomSchemaTest(NormalDocumentTest):
"""Custom schema document test."""
def setUp(self):
"""Set up."""
super().setUp()
self.model = CustomSchema
self.doc = self.model(
uid=ObjectId(),
name="Test",
)
# self.doc.save()
self.expected_dict = {
"uid": str(self.doc.uid),
"name": self.doc.name,
}
class CustomSchemaCastingTest(CustomSchemaTest):
"""Custom Schema casting test."""
def setUp(self):
"""Set up."""
super().setUp()
oid = str(ObjectId())
self.doc = self.model(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": oid, "name": self.doc.name}
class CustomSchemaInvalidCastingTest(TestCase):
"""Custom Schema casting test."""
def setUp(self):
"""Set up."""
oid = str("あばばばばばば")
self.doc = CustomSchema(
uid=oid,
name="Test",
)
# self.doc.save()
self.expected_dict = {"uid": oid, "name": self.doc.name}
def test_encode(self):
"""Serialization should be failed."""
with self.assertRaises(ValidationError) as e:
self.doc.to_json()
self.assertEqual(e.exception.field_name, "uid")
| 2.6875 | 3 |
race/linearizer/client.py | andreycizov/python-race | 0 | 12774146 | <reponame>andreycizov/python-race
import socket
from typing import Optional
from dataclasses import dataclass
from race.linearizer.proto import Packet, pack_str, unpack_str
@dataclass
class TCPClient:
host: str
port: int
client_socket: Optional[socket.socket] = None
recv_buffer_size: int = 4096
def init_socket(self) -> None:
if self.client_socket is not None:
raise AssertionError
new_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_socket.connect((self.host, self.port))
self.client_socket = new_socket
def connect(self) -> None:
self.init_socket()
def close(self) -> None:
if self.client_socket is None:
raise AssertionError
self.client_socket.close()
self.client_socket = None
def send(self, packet: Packet) -> None:
self.client_socket.send(pack_str(packet.into_str()))
def recv(self) -> Packet:
full_buffer = bytes()
while True:
full_buffer += self.client_socket.recv(self.recv_buffer_size)
unpacked = unpack_str(full_buffer)
if not unpacked:
continue
body_str, offset = unpacked
leftover_size = len(full_buffer[offset:])
if leftover_size:
raise AssertionError(leftover_size)
return Packet.from_str(body=body_str)
| 2.859375 | 3 |
Network.py | TheCrks/Online-Td-2 | 0 | 12774147 | <gh_stars>0
import socket
import pickle
class network:
def __init__(self):
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server = "p address"
self.port = 5555
self.address = (self.server, self.port)
self.player = self.connect()
def getPlayer(self):
return self.player
def connect(self):
try:
self.client.connect(self.address)
return self.client.recv(2048 * 1000).decode()
except:
pass
def sendStr(self, data):
try:
self.client.send(str.encode(data))
return pickle.loads(self.client.recv(2048 * 2048))
except socket.error as e:
print(e)
| 2.921875 | 3 |
Greedy/763-partition_labels.py | hscspring/TheAlgorithms-Python | 10 | 12774148 | from collections import Counter
def partition_labels(s: str) -> list:
res = []
count = Counter(s)
addr = {}
for i,c in enumerate(s):
if c in addr:
addr[c].append(i)
else:
addr[c] = [i]
lst = []
added = set()
for c in s:
if c in added:
continue
loc = addr[c]
item = (c, loc[0], loc[-1])
lst.append(item)
added.add(c)
total = count[lst[0][0]]
prev = lst[0][2]
for i in range(1, len(lst)):
item = lst[i]
if item[1] < prev:
total += count[item[0]]
if item[2] > prev:
prev = item[2]
else:
res.append(total)
total = count[item[0]]
prev = item[2]
res.append(total)
return res
def standard_solution(s: str) -> list:
res = []
last = {c:i for i,c in enumerate(s)}
j = anchor = 0
for i,c in enumerate(s):
j = max(j, last[c])
if i == j:
res.append(i - anchor + 1)
anchor = i + 1
return res
s = "ababcbacadefegdehijhklij"
res = partition_labels(s)
print(res)
res = standard_solution(s)
print(res)
| 3.203125 | 3 |
1281_subtract_product_sum.py | kannan5/LeetCode | 0 | 12774149 | <gh_stars>0
# Url https://leetcode.com/problems/subtract-the-product-and-sum-of-digits-of-an-integer/
class Solution:
def subtractProductAndSum(self, n):
prod, sum_n, curr = 1, 0, 0
while n != 0:
curr = n % 10
prod = prod * curr
sum_n = sum_n + curr
n = n//10
return prod - sum_n
if __name__ == '__main__':
a = Solution()
print(a.subtractProductAndSum(234))
| 3.734375 | 4 |
backend/ql_library/users/tests/test_views.py | radekwlsk/ql-library | 1 | 12774150 | <gh_stars>1-10
from unittest.mock import patch
from rest_framework import status
from rest_framework.test import APIRequestFactory, force_authenticate
from test_plus.test import TestCase
from .. import serializers, views
from .factories import UserFactory
class BaseUserTestCase(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
self.admin_user = UserFactory.build(is_superuser=True, is_active=True)
self.view = views.UserViewSet
class TestUserViewSet(BaseUserTestCase):
def test_serializer_list_of_users(self):
view = self.view.as_view({"get": "list"})
users = UserFactory.build_batch(5)
views.UserViewSet.queryset = users
request = self.factory.get("/users/")
force_authenticate(request, user=self.admin_user)
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("count"), len(users))
self.assertEqual(
response.data.get("results"),
serializers.UserSerializer(users, many=True).data,
)
@patch.object(views.UserViewSet, "get_object")
def test_serializer_detail_of_users(self, mock_views):
view = self.view.as_view({"get": "retrieve"})
mock_views.return_value = self.admin_user
request = self.factory.get("/users/1/")
force_authenticate(request, user=self.admin_user)
response = view(request, pk=1)
self.assertTrue(mock_views.called)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data, serializers.UserSerializer(self.admin_user).data
)
| 2.515625 | 3 |