max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
satchmo/apps/product/urls/category.py
|
predatell/satchmo
| 1
|
12777851
|
from django.conf.urls import url
from product.views import CategoryView, CategoryIndexView
urlpatterns = [
url(r'^(?P<parent_slugs>([-\w]+/)*)?(?P<slug>[-\w]+)/$', CategoryView.as_view(), name='satchmo_category'),
url(r'^$', CategoryIndexView.as_view(), name='satchmo_category_index'),
]
| 1.625
| 2
|
gammapy/modeling/sampling.py
|
QRemy/gammapy
| 0
|
12777852
|
<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""MCMC sampling helper functions using ``emcee``."""
import logging
import numpy as np
__all__ = ["uniform_prior", "run_mcmc", "plot_trace", "plot_corner"]
log = logging.getLogger(__name__)
# TODO: so far only works with a uniform prior on parameters
# as there is no way yet to enter min,mean,max in parameters for normal prior
# lnprob() uses a uniform prior. hard coded for now.
def uniform_prior(value, umin, umax):
"""Uniform prior distribution."""
if umin <= value <= umax:
return 0.0
else:
return -np.inf
def normal_prior(value, mean, sigma):
"""Normal prior distribution."""
return -0.5 * (2 * np.pi * sigma) - (value - mean) ** 2 / (2.0 * sigma)
def par_to_model(dataset, pars):
"""Update model in dataset with a list of free parameters factors"""
for i, p in enumerate(dataset.parameters.free_parameters):
p.factor = pars[i]
def ln_uniform_prior(dataset):
"""LogLike associated with prior and data/model evaluation.
Return probability of parameter values according to prior knowledge.
Parameter limits should be done here through uniform prior ditributions
"""
logprob = 0
for par in dataset.parameters.free_parameters:
logprob += uniform_prior(par.value, par.min, par.max)
return logprob
def lnprob(pars, dataset):
"""Estimate the likelihood of a model including prior on parameters."""
# Update model parameters factors inplace
for factor, par in zip(pars, dataset.parameters.free_parameters):
par.factor = factor
lnprob_priors = ln_uniform_prior(dataset)
# dataset.likelihood returns Cash statistics values
# emcee will maximisise the LogLikelihood so we need -dataset.likelihood
total_lnprob = -dataset.stat_sum() + lnprob_priors
return total_lnprob
def run_mcmc(dataset, nwalkers=8, nrun=1000, threads=1):
"""Run the MCMC sampler.
Parameters
----------
dataset : `~gammapy.modeling.Dataset`
Dataset
nwalkers : int
Number of walkers
nrun : int
Number of steps each walker takes
threads : (optional)
Number of threads or processes to use
Returns
-------
sampler : `emcee.EnsembleSampler`
sampler object containing the trace of all walkers.
"""
import emcee
dataset.parameters.autoscale() # Autoscale parameters
pars = [par.factor for par in dataset.parameters.free_parameters]
ndim = len(pars)
# Initialize walkers in a ball of relative size 0.5% in all dimensions if the
# parameters have been fit, or to 10% otherwise
# TODO: the spread of 0.5% below is valid if a pre-fit of the model has been obtained.
# currently the run_mcmc() doesn't know the status of previous fit.
spread = 0.5 / 100
p0var = np.array([spread * pp for pp in pars])
p0 = emcee.utils.sample_ball(pars, p0var, nwalkers)
labels = []
for par in dataset.parameters.free_parameters:
labels.append(par.name)
if (par.min is np.nan) and (par.max is np.nan):
log.warning(
f"Missing prior for parameter: {par.name}.\nMCMC will likely fail!"
)
log.info(f"Free parameters: {labels}")
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lnprob, args=[dataset], threads=threads
)
log.info(f"Starting MCMC sampling: nwalkers={nwalkers}, nrun={nrun}")
for idx, result in enumerate(sampler.sample(p0, iterations=nrun)):
if idx % (nrun / 4) == 0:
log.info("{:5.0%}".format(idx / nrun))
log.info("100% => sampling completed")
return sampler
def plot_trace(sampler, dataset):
"""
Plot the trace of walkers for every steps
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler object containing the trace of all walkers
dataset : `~gammapy.modeling.Dataset`
Dataset
"""
import matplotlib.pyplot as plt
labels = [par.name for par in dataset.parameters.free_parameters]
fig, axes = plt.subplots(len(labels), sharex=True)
for idx, ax in enumerate(axes):
ax.plot(sampler.chain[:, :, idx].T, "-k", alpha=0.2)
ax.set_ylabel(labels[idx])
plt.xlabel("Nrun")
plt.show()
def plot_corner(sampler, dataset, nburn=0):
"""Corner plot for each parameter explored by the walkers.
Parameters
----------
sampler : `emcee.EnsembleSampler`
Sampler object containing the trace of all walkers
dataset : `~gammapy.modeling.Dataset`
Dataset
nburn : int
Number of runs to discard, because considered part of the burn-in phase
"""
from corner import corner
labels = [par.name for par in dataset.parameters.free_parameters]
samples = sampler.chain[:, nburn:, :].reshape((-1, len(labels)))
corner(samples, labels=labels, quantiles=[0.16, 0.5, 0.84], show_titles=True)
| 2.171875
| 2
|
5/cvicenie/sections/module_4.py
|
sevo/FLP-2020
| 0
|
12777853
|
#
# Section 4: Somehow harder exercises
#
# This section covers regular expressions, input and output, the use of higher-
# order-functions and a little more advanced loops.
#
from collections import defaultdict
import random, re
# 42. Sentence Splitter
# Given a text file, this program separates its sentences based
# on a set of rules and then, returns the result.
def split_sentences( filename = 'data/text-42.md' ):
pass
# 43 Helper: Load Words
# Returns an array of all the words in a file
def load_words( filename ):
pass
# 43. Find Anagram
# Finds all the anagrams in a text file and returns a list
# with all the group of anagrams
# Adapted from answer at
# http://stackoverflow.com/questions/8286554/find-anagrams-for-a-list-of-words
def find_anagrams( filename = 'data/words-43.md' ):
pass
# 44 Helper: Checks whether a set pairs of squared brackets
# is sintactically correct. This means, whether every opening bracket ( '[' )
# is closed.
# Example:
# [] True
# [][] True
# []][[] False
#
def validate_brackets( string ):
pass
# 44. Analyze backets
# This function generates a random string with `n` opening brackets ( '[' ) and `n`
# closing brackets ( ']' ) in a random order. After that, it checks to see whether
# the generated string is combrises for pairs of opening/closing brackets.
# After that, it print the output to the console like in this example.
# Example:
#
# [] OK ][ NOT OK
# [][] OK ][][ NOT OK
# [[][]] OK []][[] NOT OK
#
def analyze_rand_brackets():
pass
# 45 Helper:
# Checks whether the list has a word ending with
# a specific letter and return its position,
# returns false if there are no words that match
# the criteria.
def has_word_starting_with( letter, iterable ):
pass
# 45. This function will generate a sequence with the
# highest possible list of pokemons in which the final letter
# of the name of the preceding one is the first letter of the
# following one
# Example:
#
# banette -> emboar -> relicant -> tirtuga -> audino ...
#
def words_domino( filename = 'data/pokemons-list.md' ):
pass
# 46. Anternade
# Given a word list, this function takes each word and tries to make two
# smaller words using all the letters of that word.
# Example:
#
# 'board': makes 'bad' and 'or'
# 'waists': makes: 'wit' and 'ass'
#
def alternade( filename = 'data/words-43.md' ):
pass
| 4.3125
| 4
|
services/ATM-machine/client/client.py
|
HackerDom/ctfcup-2021-AD
| 0
|
12777854
|
import socket
from enum import IntEnum
from typing import Dict, List
from base64 import b64decode, b64encode
UTF_8 = 'utf-8'
class Stage(IntEnum):
START = 1
TRANSFER = 2
CHECKID = 3
CHECK = 4
SHOW = 5
SEND = 6
class BaseMsg:
def get_bytes(self) -> bytes:
return str(self).encode()
class Transfer(BaseMsg):
def __init__(self):
self.to: str = ''
self.from_: str = ''
self.value: float = -1
self.comment: str = ''
def __str__(self) -> str:
return f'transfer {self.to} {self.from_} {self.value} {self.comment}'
class CheckId(BaseMsg):
def __init__(self):
self.id: int = -1
def __str__(self) -> str:
return f'checkid {self.id}'
class Check(BaseMsg):
def __init__(self):
self.encrypt_bytes: bytes = b''
def __str__(self) -> str:
return f'check {self.encrypt_bytes}'
def get_bytes(self) -> bytes:
return 'check '.encode() + self.encrypt_bytes
class Show(BaseMsg):
def __init__(self):
self.offset: int = -1
self.limit: int = -1
def __str__(self) -> str:
return f'show {self.offset} {self.limit}'
class Sender:
def __init__(self, stage: Stage, msg: BaseMsg):
self.prev_stage: Stage = stage
self.msg: BaseMsg = msg
self.host = '0.0.0.0'
self.port = 5051
@staticmethod
def print_transfer_answer(data: bytes):
split_data = data.strip(b'\x00').split(b'\n')
print(f'id: {split_data[0].decode(UTF_8)} {b64encode(split_data[1]).decode(UTF_8)}')
@staticmethod
def print_check_id_answer(data: bytes):
print(data.strip(b'\x00').decode(UTF_8))
@staticmethod
def print_check_answer(data: bytes):
print(data.strip(b'\x00').decode(UTF_8))
@staticmethod
def print_show_answer(data: bytes):
split_data = data.strip(b'\x00').split('separator'.encode())
print('encrypted transactions:')
for el in split_data:
print(b64encode(el).decode(UTF_8))
def handle_message(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((self.host, self.port))
sock.sendall(self.msg.get_bytes())
data: bytes = sock.recv(1488)
if self.prev_stage == Stage.TRANSFER:
self.print_transfer_answer(data)
elif self.prev_stage == Stage.CHECKID:
self.print_check_id_answer(data)
elif self.prev_stage == Stage.CHECK:
self.print_check_answer(data)
elif self.prev_stage == Stage.SHOW:
self.print_show_answer(data)
class MachineGun:
START_COMMANDS: List[str] = ['transfer', 'checkid', 'check', 'show']
STRING_TO_STAGE: Dict[str, Stage] = {
'transfer': Stage.TRANSFER,
'checkid': Stage.CHECKID,
'check': Stage.CHECK,
'show': Stage.SHOW
}
def __init__(self):
self.stage: Stage = Stage.START
self.prev_stage: Stage = Stage.START
self.msg = None
@staticmethod
def _validate_string(input_msg: str) -> str:
res = input(input_msg)
while res == '':
res = input(input_msg)
return res
@staticmethod
def _validate_num(input_msg: str, num_type: type):
value = -1
while value <= 0:
try:
value = num_type(input(input_msg))
except ValueError:
print('enter the number > 0')
value = -1
return value
@staticmethod
def _validate_bytes(input_msg: str) -> bytes:
res = input(input_msg)
while res == '':
res = input(input_msg)
return b64decode(res)
def handle_start(self):
possible_commands = "; ".join(self.START_COMMANDS)
command = input(f'possible commands: {possible_commands}\n')
while command not in self.START_COMMANDS:
command = input(f'possible commands: {possible_commands}\n')
self.prev_stage = self.stage
self.stage = self.STRING_TO_STAGE[command]
def handle_transfer(self):
transfer = Transfer()
transfer.to = self._validate_string('to:\n')
transfer.from_ = self._validate_string('from:\n')
transfer.value = self._validate_num('value:\n', float)
transfer.comment = self._validate_string('comment:\n')
self.prev_stage = self.stage
self.stage = Stage.SEND
self.msg = transfer
def handle_check_id(self):
check_id = CheckId()
check_id.id = self._validate_num('id:\n', int)
self.prev_stage = self.stage
self.stage = Stage.SEND
self.msg = check_id
def handle_check(self):
check = Check()
check.encrypt_bytes = self._validate_bytes('encrypt bytes in base64:\n')
self.prev_stage = self.stage
self.stage = Stage.SEND
self.msg = check
def handle_show(self):
show = Show()
show.offset = self._validate_num('offset:\n', int)
show.limit = self._validate_num('limit:\n', int)
self.prev_stage = self.stage
self.stage = Stage.SEND
self.msg = show
def run(self):
while True:
try:
if self.stage == Stage.START:
self.handle_start()
elif self.stage == Stage.TRANSFER:
self.handle_transfer()
elif self.stage == Stage.CHECKID:
self.handle_check_id()
elif self.stage == Stage.CHECK:
self.handle_check()
elif self.stage == Stage.SHOW:
self.handle_show()
elif self.stage == Stage.SEND:
sender = Sender(self.prev_stage, self.msg)
sender.handle_message()
self.stage = Stage.START
self.prev_stage = Stage.START
except Exception:
print('kernel panic')
self.stage = Stage.START
def main():
try:
gun = MachineGun()
gun.run()
except KeyboardInterrupt:
exit(0)
if __name__ == '__main__':
main()
# thread_pool = ThreadPool(processes=7)
#
#
# def main():
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# addr = ('0.0.0.0', 5051)
# sock.connect(addr)
# sock.sendall('transfer a v 100 '.encode() + ('a'*3).encode()) # 'transfer <from> <to> <value> <comment>'
# answ = sock.recv(1488).strip(b'\x00').split(b'\n') # разделитель \n
# _id = int(answ[0].decode('utf-8')) # id транзакции
# print(_id)
# print(answ[1]) # [int(x) for x in answ[1]]
# sock.sendall('checkid '.encode() + str(_id).encode()) # 'checkid <id>'
# print(sock.recv(1488).strip(b'\x00').decode('utf-8')) # тело транзакции или not found
# sock.sendall('show 0 1'.encode()) # 'show'
# transactions = sock.recv(1488).strip(b'\x00').split(b'\n') # список транзакций
# print(answ[1] in transactions) # проверка сеществования транзакции в бд
# sock.sendall('check '.encode() + transactions[0][:-1] + b'x\23') # 'check <шифротекст>'
# print(sock.recv(1488).strip(b'\x00').decode('utf-8')) # ok или error
# sock.close()
# for _ in range(100):
# thread_pool.apply_async(main)
# thread_pool.close()
# thread_pool.join()
| 2.6875
| 3
|
reportParsing/auditReport_1_auditor.py
|
ypspy/disclosureSimilarity
| 0
|
12777855
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 15:19:08 2020
@author: user
"""
from bs4 import BeautifulSoup
import os
import glob
import pandas as pd
import numpy as np
from tqdm import tqdm
# 1. 작업 폴더로 변경
os.chdir("C:\data\\") # 작업 폴더로 변경
# 2. 타겟 폴더에 있는 필요 문서 경로 리스트업
pathList = []
for path in tqdm([".\A001_1999\\", ".\A001_2000\\", ".\A001_2001\\",
".\A001_2002\\", ".\A001_2003\\", ".\A001_2004\\",
".\A001_2005\\", ".\A001_2006\\", ".\A001_2007\\",
".\A001_2008\\", ".\A001_2009\\", ".\A001_2010\\",
".\A001_2011\\", ".\A001_2012\\", ".\A001_2013\\",
".\A001_2014\\", ".\A001_2015\\", ".\A001_2016\\",
".\A001_2017\\", ".\A001_2018\\", ".\A001_2019\\",
".\A001_2020\\",
]):
path1 = path + "* 감사보고서_감*.*" # 필요한 Keyword 입력
pathSep = glob.glob(path1)
path2 = path + "* 연결감사보고서_감*.*" # 필요한 Keyword 입력
pathCon = glob.glob(path2)
pathList = pathList + pathSep + pathCon
# 3. 입수 과정에서 중복입수되어 표시된 duplicated 표시 파일 제거
pathList = [x for x in pathList if "duplicated" not in x]
# 4. 12월말만 분리
pathList = [x for x in pathList if ".12)" in x]
# 입수 data의 PathList 정보로 Tabulate
PathListDf = pd.DataFrame(pathList)
df = pd.DataFrame([x.split("_") for x in pathList])
# Generate Unique Key
df["path"] = PathListDf[0]
df["con"] = df[6].str.contains("연결")
df['con'] = np.where(df['con']==True, "C", "S")
df['amend'] = df[6].str.contains("정정")
df['amend'] = np.where(df['amend']==True, "A", "B")
df["key"] = df[2] + df[6].str.slice(stop=10) + df["con"] \
+ df["amend"] + df[5] + df[8] + df[10]
# sort by Entity
df = df.sort_values(by=[10, 5, "con", 2, 6, "amend"],
ascending=[True, True, True, False, False, True])
# Remove duplicates
df["duplc"] = df.duplicated(subset=["key"], keep=False)
isTrue = df[df["duplc"] == True]
df = df.drop_duplicates(subset=["key"])
df = df.drop([0, 1, 14, "duplc"], axis=1)
# Path out
pathListOut = df["path"].tolist()
result = []
for file in tqdm(pathListOut, desc="Main Loop"):
html = open(file, "r", encoding="utf-8")
soup = BeautifulSoup(html, "lxml")
html.close()
firmName = ''
for i in soup.find_all('p'):
pText = ''.join(i.text.split())
if "회계법인" in pText:
firmName = pText
if "감사반" in pText:
firmName = pText
for i in soup.find_all('td'):
pText = ''.join(i.text.split())
if "회계법인" in pText:
firmName = pText
if "감사반" in pText:
firmName = pText
if firmName == '':
firmName = "누락"
result.append(firmName)
df["GAAP"] = result
df = df[["key", 10, 5, "GAAP"]]
os.chdir("C:\data\\financials\\")
df.to_csv("auditReport_1_auditor.txt")
| 2.09375
| 2
|
mytrade/form/fields.py
|
hellwen/mytrade
| 0
|
12777856
|
import time
import datetime
import itertools
from wtforms import fields#, widgets
try:
from wtforms.fields import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from .widgets import (
DateTimePickerWidget,
TimePickerWidget,
Select2Widget,
Select2TagsWidget,
InlineFieldListWidget,
InlineFormWidget,
AjaxSelect2Widget,
XEditableWidget,
)
from mytrade.utils import _
"""
An understanding of WTForms's Custom Widgets is helpful for understanding this code: http://wtforms.simplecodes.com/docs/0.6.2/widgets.html#custom-widgets
"""
class DateTimeField(fields.DateTimeField):
"""
Allows modifying the datetime format of a DateTimeField using form_args.
"""
widget = DateTimePickerWidget()
def __init__(self, label=None, validators=None, format=None, **kwargs):
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param format:
Format for text to date conversion. Defaults to '%Y-%m-%d %H:%M:%S'
:param kwargs:
Any additional parameters
"""
super(DateTimeField, self).__init__(label, validators, **kwargs)
self.format = format or '%Y-%m-%d %H:%M:%S'
class TimeField(fields.Field):
"""
A text field which stores a `datetime.time` object.
Accepts time string in multiple formats: 20:10, 20:10:00, 10:00 am, 9:30pm, etc.
"""
widget = TimePickerWidget()
def __init__(self, label=None, validators=None, formats=None,
default_format=None, widget_format=None, **kwargs):
"""
Constructor
:param label:
Label
:param validators:
Field validators
:param formats:
Supported time formats, as a enumerable.
:param default_format:
Default time format. Defaults to '%H:%M:%S'
:param kwargs:
Any additional parameters
"""
super(TimeField, self).__init__(label, validators, **kwargs)
self.formats = formats or ('%H:%M:%S', '%H:%M',
'%I:%M:%S%p', '%I:%M%p',
'%I:%M:%S %p', '%I:%M %p')
self.default_format = default_format or '%H:%M:%S'
def _value(self):
if self.raw_data:
return u' '.join(self.raw_data)
elif self.data is not None:
return self.data.strftime(self.default_format)
else:
return u''
def process_formdata(self, valuelist):
if valuelist:
date_str = u' '.join(valuelist)
if date_str.strip():
for format in self.formats:
try:
timetuple = time.strptime(date_str, format)
self.data = datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
return
except ValueError:
pass
raise ValueError(_('Invalid time format'))
else:
self.data = None
class Select2Field(fields.SelectField):
"""
`Select2 <https://github.com/ivaynberg/select2>`_ styled select widget.
You must include select2.js, form-x.x.x.js and select2 stylesheet for it to work.
"""
widget = Select2Widget()
def __init__(self, label=None, validators=None, coerce=str,
choices=None, allow_blank=False, blank_text=None,
**kwargs):
super(Select2Field, self).__init__(
label, validators, coerce, choices, **kwargs
)
self.allow_blank = allow_blank
self.blank_text = blank_text or ' '
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
if self.choices:
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def process_data(self, value):
if value is None:
self.data = None
else:
try:
self.data = self.coerce(value)
except (ValueError, TypeError):
self.data = None
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
try:
self.data = self.coerce(valuelist[0])
except ValueError:
raise ValueError(self.gettext(u'Invalid Choice: could not coerce'))
def pre_validate(self, form):
if self.allow_blank and self.data is None:
return
super(Select2Field, self).pre_validate(form)
class Select2TagsField(fields.StringField):
"""`Select2 <http://ivaynberg.github.com/select2/#tags>`_ styled text field.
You must include select2.js, form-x.x.x.js and select2 stylesheet for it to work.
"""
widget = Select2TagsWidget()
def __init__(self, label=None, validators=None, save_as_list=False, coerce=str, **kwargs):
"""Initialization
:param save_as_list:
If `True` then populate ``obj`` using list else string
"""
self.save_as_list = save_as_list
self.coerce = coerce
super(Select2TagsField, self).__init__(label, validators, **kwargs)
def process_formdata(self, valuelist):
if self.save_as_list:
self.data = [self.coerce(v.strip()) for v in valuelist[0].split(',') if v.strip()]
else:
self.data = self.coerce(valuelist[0])
def _value(self):
if isinstance(self.data, (list, tuple)):
return u','.join(v for v in self.data)
elif self.data:
return self.data
else:
return u''
class InlineFieldList(fields.FieldList):
widget = InlineFieldListWidget()
def __init__(self, *args, **kwargs):
super(InlineFieldList, self).__init__(*args, **kwargs)
def __call__(self, **kwargs):
# Create template
meta = getattr(self, 'meta', None)
if meta:
template = self.unbound_field.bind(form=None, name='', _meta=meta)
else:
template = self.unbound_field.bind(form=None, name='')
# Small hack to remove separator from FormField
if isinstance(template, fields.FormField):
template.separator = ''
template.process(None)
return self.widget(self,
template=template,
check=self.display_row_controls,
**kwargs)
def display_row_controls(self, field):
return True
def process(self, formdata, data=None):
res = super(InlineFieldList, self).process(formdata, data)
# Postprocess - contribute flag
if formdata:
for f in self.entries:
key = 'del-%s' % f.id
f._should_delete = key in formdata
return res
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not self.should_delete(subfield) and not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def should_delete(self, field):
return getattr(field, '_should_delete', False)
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in zip(self.entries, candidates):
if not self.should_delete(field):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
class InlineFormField(fields.FormField):
"""
Inline version of the ``FormField`` widget.
"""
widget = InlineFormWidget()
class InlineModelFormField(fields.FormField):
"""
Customized ``FormField``.
Excludes model primary key from the `populate_obj` and
handles `should_delete` flag.
"""
widget = InlineFormWidget()
def __init__(self, form_class, pk, form_opts=None, **kwargs):
super(InlineModelFormField, self).__init__(form_class, **kwargs)
self._pk = pk
self.form_opts = form_opts
def get_pk(self):
return getattr(self.form, self._pk).data
def populate_obj(self, obj, name):
for name, field in iteritems(self.form._fields):
if name != self._pk:
field.populate_obj(obj, name)
class ListEditableFieldList(fields.FieldList):
"""
Modified FieldList to allow for alphanumeric primary keys.
Used in the editable list view.
"""
widget = XEditableWidget()
def __init__(self, *args, **kwargs):
super(ListEditableFieldList, self).__init__(*args, **kwargs)
# min_entries = 1 is required for the widget to determine the type
self.min_entries = 1
def _extract_indices(self, prefix, formdata):
offset = len(prefix) + 1
for name in formdata:
# selects only relevant field (not CSRF, other fields, etc)
if name.startswith(prefix):
# exclude offset (prefix-), remaining text is the index
yield name[offset:]
def _add_entry(self, formdata=None, data=unset_value, index=None):
assert not self.max_entries or len(self.entries) < self.max_entries, \
'You cannot have more than max_entries entries in this FieldList'
if index is None:
index = self.last_index + 1
self.last_index = index
# '%s-%s' instead of '%s-%d' to allow alphanumeric
name = '%s-%s' % (self.short_name, index)
id = '%s-%s' % (self.id, index)
# support both wtforms 1 and 2
meta = getattr(self, 'meta', None)
if meta:
field = self.unbound_field.bind(
form=None, name=name, prefix=self._prefix, id=id, _meta=meta
)
else:
field = self.unbound_field.bind(
form=None, name=name, prefix=self._prefix, id=id
)
field.process(formdata, data)
self.entries.append(field)
return field
def populate_obj(self, obj, name):
# return data from first item, instead of a list of items
setattr(obj, name, self.data.pop())
class AjaxSelectField(fields.SelectFieldBase):
"""
Ajax Model Select Field
"""
widget = AjaxSelect2Widget()
separator = ','
def __init__(self, loader, label=None, validators=None, allow_blank=False, blank_text=u'', **kwargs):
super(AjaxSelectField, self).__init__(label, validators, **kwargs)
self.loader = loader
self.allow_blank = allow_blank
self.blank_text = blank_text
def _get_data(self):
if self._formdata:
model = self.loader.get_one(self._formdata)
if model is not None:
self._set_data(model)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _format_item(self, item):
value = self.loader.format(self.data)
return (value[0], value[1], True)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank and self.data is None:
raise ValidationError(self.gettext(u'Not a valid choice'))
class AjaxSelectMultipleField(AjaxSelectField):
"""
Ajax-enabled model multi-select field.
"""
widget = AjaxSelect2Widget(multiple=True)
def __init__(self, loader, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(AjaxSelectMultipleField, self).__init__(loader, label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata:
data = []
# TODO: Optimize?
for item in formdata:
model = self.loader.get_one(item) if item else None
if model:
data.append(model)
else:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
self._formdata = set()
for field in valuelist:
for n in field.split(self.separator):
self._formdata.add(n)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
| 3
| 3
|
novel_tools/processors/transformers/path_transformer.py
|
ALMSIVI/novel_tools
| 1
|
12777857
|
from pathlib import Path
from novel_tools.framework import Processor
from novel_tools.common import NovelData, ACC, FieldMetadata
class PathTransformer(Processor, ACC):
"""
Given `in_dir`, this transformer will replace all `Path` fields with the paths relative to its `in_dir`.
"""
@staticmethod
def required_fields() -> list[FieldMetadata]:
return [
FieldMetadata('in_dir', 'Path',
description='The parent directory for all the novel data.'),
FieldMetadata('fields', 'list[str]', default=['source'],
description='A list of fields of type `Path` to transform.')
]
def __init__(self, args):
args = self.extract_fields(args)
self.in_dir = args['in_dir']
self.fields = args['fields']
def process(self, data: NovelData) -> NovelData:
for field in self.fields:
path = data.get(field, None)
if isinstance(path, Path):
data.set(**{field: path.relative_to(self.in_dir)})
return data
| 2.578125
| 3
|
prediction/views.py
|
enisteper1/AWS-Deployed-ML
| 0
|
12777858
|
<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
from prediction.models import Data
from .forms import DataForm
from .titanic_automated_prediction import predict_person
# Create your views here.
def main(request):
if request.method == "POST":
form = DataForm(request.POST)
else:
form = DataForm()
try:
# Avoid any error by try and except.
# run predict_person function to get the information of passenger is survived or not.
ml_pred = predict_person(passengerid=int(request.POST["PassengerId"]), pclass=int(request.POST["Pclass"]), name=request.POST["Name"], sex=request.POST["Sex"],
age=float(request.POST["Age"]), sibsp=int(request.POST["SibSp"]), parch=int(request.POST["Parch"]), ticket=request.POST["Ticket"],
fare=float(request.POST["Fare"]), cabin=request.POST["Cabin"], embarked=request.POST["Embarked"])
# If survived return html of passenger is survived
if ml_pred:
return render(request, "prediction/main_survived.html", {"form": form})
# If not survived return html of passenger is not survived
else:
return render(request, "prediction/main_not_survived.html", {"form": form})
except Exception as ex:
# Generally at initialization of page it drops to error because of running post without an inputs. Therefore, basic html is returned.
print(ex)
return render(request, "prediction/main.html", {"form": form})
| 2.65625
| 3
|
troposphere/openstack/heat.py
|
jpvowen/troposphere
| 1
|
12777859
|
# -*- coding: utf-8 -*-
"""
Openstack Heat
--------------
Due to the strange nature of the OpenStack compatability layer, some values
that should be integers fail to validate and need to be represented as
strings. For this reason, we duplicate the AWS::AutoScaling::AutoScalingGroup
and change these types.
"""
# Copyright (c) 2012-2013, <NAME> <<EMAIL>>
# Copyright (c) 2014, <NAME> <<EMAIL>>
# All rights reserved.
#
# See LICENSE file for full license.
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from troposphere import AWSObject
from troposphere.validators import integer
# ----------------------------------------------------------------------------
# Class: AWSAutoScalingGroup
# ----------------------------------------------------------------------------
class AWSAutoScalingGroup(AWSObject):
"""Fix issues with OpenStack compatability layer.
Due to the strange nature of the OpenStack compatability layer, some
values that should be integers fail to validate and need to be
represented as strings. For this reason, we duplicate the
AWS::AutoScaling::AutoScalingGroup and change these types.
"""
resource_type = "AWS::AutoScaling::AutoScalingGroup"
props = {
'AvailabilityZones': (list, True),
'Cooldown': (integer, False),
'DesiredCapacity': (basestring, False),
'HealthCheckGracePeriod': (integer, False),
'HealthCheckType': (basestring, False),
'LaunchConfigurationName': (basestring, True),
'LoadBalancerNames': (list, False),
'MaxSize': (basestring, True),
'MinSize': (basestring, True),
'Tags': (list, False),
'VPCZoneIdentifier': (list, False),
}
| 1.484375
| 1
|
ixbrl_parse/dataframe.py
|
cybermaggedon/ixbrl-parse
| 1
|
12777860
|
import pandas as pd
def values_to_df(values):
data = []
for n, v in values.items():
data.append([
n.localname, v.to_value().get_value(), v.unit
])
return pd.DataFrame(
data,
columns = ["name", "value", "unit"]
)
def instance_to_df(inst):
columns = ["name", "value", "unit", "entity", "scheme", "start", "end",
"instant"]
columnset = set(columns)
data = []
for c in inst.contexts.values():
for v in c.values.values():
row = {
"name": v.name.localname,
"value": str(v.to_value().get_value()),
"unit": v.to_value().get_unit(),
}
if c.entity:
row["entity"] = c.entity.id
row["scheme"] = c.entity.scheme
if c.period:
row["start"] = c.period.start
row["end"] = c.period.end
if c.instant:
row["instant"] = c.instant.instant
for dim in c.dimensions:
d = dim.dimension.localname
v = dim.value.localname
if d not in columnset:
columns.append(d)
columnset.add(d)
row[d] = v
data.append(row)
return pd.DataFrame(data, columns=columns)
| 2.71875
| 3
|
geoscreens/labelstudio/core.py
|
GiscardBiamby/geo
| 1
|
12777861
|
import json
import sys
from copy import deepcopy
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple, Union, cast
from label_studio_sdk import Client, Project
from requests import Response
from tqdm.contrib.bells import tqdm
def get_labelstudio_export_from_api(
project: Project, export_type: str, download_all_tasks: str = "true"
) -> Union[List[Dict], Response]:
response = project.make_request(
method="GET",
url=f"/api/projects/{project.id}/export?exportType={export_type}&download_all_tasks={download_all_tasks}",
timeout=500,
)
if response.headers["Content-Type"] == "application/zip":
return response
else:
export = response.json()
return export
| 2.234375
| 2
|
app.py
|
GeethZin/Biosphere
| 0
|
12777862
|
import flask
import pyodbc
# Initializes app and database connection
app = flask.Flask('biosphere', template_folder='templates')
db_conn = conn = pyodbc.connect(
'Driver={SQL Server};'
'Server=DESKTOP-QR078NF\SQLEXPRESS;'
'Database=BIOSPHERE;'
'Trusted_Connection=yes;'
)
# Function to handle the root path '/'
@app.route('/')
@app.route('/home')
def home():
my_user = {'first': 'Luciano', 'last': 'Santos'}
return flask.render_template('home.html', user=my_user)
# given a result row, extracts and returns the species data
def extract_species(row):
species = {}
species['id'] = row[0]
species['genus'] = row[1]
species['species'] = row[2]
species['subspecies'] = row[3]
species['name'] = species['genus'] + ' ' + species['species']
if species['subspecies'] is not None:
species['name'] += ' ' + species['subspecies']
return species
# Function to handle the species path '/species'
@app.route('/species', defaults={'id': None})
@app.route('/species/<id>')
def species(id):
cursor = db_conn.cursor()
if id is None:
cursor.execute('SELECT * FROM Bio.Species')
all_species = []
for row in cursor:
data = extract_species(row)
all_species.append(data)
return flask.render_template('species.html', species=all_species)
else:
cursor.execute('SELECT * FROM Bio.Species WHERE sp_id=' + id)
row = cursor.fetchone()
if row is None:
return flask.render_template('error.html', message='Species not found!')
data = extract_species(row)
return flask.render_template('species_detail.html', species=data)
# given a result row, extracts and returns the author data
def extract_author(row):
author = {}
author['id'] = row[0]
author['first_name'] = row[1]
author['middle_name'] = row[2]
author['last_name'] = row[3]
author['birthdate'] = row[4]
author['name'] = author['first_name'] + ' '
if author['middle_name'] is not None:
author['name'] += author['middle_name'] + ' '
author['name'] += author['last_name']
return author
@app.route('/authors', defaults={'id': None})
@app.route('/authors/<id>')
def authors(id):
cursor = db_conn.cursor()
if id is None:
cursor.execute('SELECT * FROM Bio.Author')
all_authors = []
for row in cursor:
data = extract_author(row)
all_authors.append(data)
return flask.render_template('authors.html', authors=all_authors)
else:
cursor.execute('SELECT * FROM Bio.Author WHERE au_id=' + id)
all_authors = []
row = cursor.fetchone()
if row is None:
return flask.render_template('error.html', message='Author not found!')
data = extract_author(row)
return flask.render_template('author_detail.html', author=data)
# given a result row, extracts and returns the species data
def extract_species1(row):
species = {}
species['id'] = row[0]
species['genus'] = row[1]
species['species'] = row[2]
species['subspecies'] = row[3]
species['name'] = species['genus'] + ' ' + species['species']
if species['subspecies'] is not None:
species['name'] += ' ' + species['subspecies']
return species
# Function to handle the species path '/species'
@app.route('/species1', defaults={'id': None})
@app.route('/species1/<id>')
def speciescom(id):
cursor = db_conn.cursor()
if id is None:
cursor.execute('SELECT * FROM Bio.Species')
all_species = []
for row in cursor:
data = extract_species1(row)
all_species.append(data)
return flask.render_template('species1.html', species=all_species)
else:
cursor.execute('SELECT * FROM Bio.Species' +id )
row = cursor.fetchone()
if row is None:
return flask.render_template('error.html', message='Species not found!')
data = extract_species1(row)
return flask.render_template('species_detail.html', species=data)
def extract_publication(row):
publication = {}
publication['id'] = row[0]
publication['year'] = row[1]
publication['title'] = row[2]
publication['startPubli'] = row[3]
publication['endPubli'] = row[4]
publication['fname'] = row[5]
publication['lname'] = row[6]
publication['name'] = publication['title']
return publication
def extract_publication2(row):
publication = {}
publication['id'] = row[0]
publication['year'] = row[1]
publication['title'] = row[2]
publication['start'] = row[3]
publication['end'] = row[4]
publication['fname'] = row[5]
publication['lname'] = row[6]
return publication
@app.route('/publication', defaults={'id': None})
@app.route('/publication/<id>')
def publication(id):
cursor = db_conn.cursor()
if id is None:
cursor.execute("SELECT * FROM Bio.Publication")
all_publication = []
for row in cursor:
data = extract_publication(row)
all_publication.append(data)
return flask.render_template('publication.html', publication=all_publication)
else:
cursor.execute("""\
SELECT p.pu_id, p.pu_year, p.pu_title, p.pu_page_start, p.pu_page_end, a.au_fname, a.au_lname
FROM Bio.Publication p, Bio.Au_Writes_Pu w, Bio.Author a
WHERE p.pu_id = w.pu_id AND w.au_id = a.au_id and p.pu_id =
""" + id)
all_publication = []
row = cursor.fetchone()
if row is None:
return flask.render_template('error.html', message='Publication not found!')
data = extract_publication2(row)
return flask.render_template('publication_details.html', publication=data)
# Starts listening for requests...
app.run(port=8080, use_reloader=True)
| 2.734375
| 3
|
classes/basic_cfg.py
|
A26mike/Arma-Python-server-manager
| 0
|
12777863
|
<reponame>A26mike/Arma-Python-server-manager
class BasicCFG:
"""BasicCFG [CFG Calculator for undocumented ]
Args:
uploadSpeed ([int]): [In MB/s]
socket_init ([int]): [description]
socket_min ([int]): [description]
maxPacketSize (int, optional): [ISP MTU settings ]. Defaults to 1400.
"""
def __init__(self, uploadSpeed, socket_init, socket_min, maxPacketSize = 1400):
self.maxPacketSize = maxPacketSize
self.uploadSpeed = uploadSpeed
self.socket_int = socket_init
self.socket_min = socket_min
def mbits_to_bits (self, uploadSpeed):
bits = 10000000 * uploadSpeed
return bits
def mbits_to_bytes(self,uploadSpeed):
bytes = 125000 * uploadSpeed
return bytes
def print_arma_cfg(self):
"""print_arma_arma_cfg [Writes the Server basic.cfg]
"""
cfg_dict = {
"maxPacketSize_Sockets": self.maxPacketSize,
"initBandwidth_Sockets": self.mbits_to_bytes(self.socket_int),
"MinBandwidth_Sockets": self.mbits_to_bytes(self.socket_min),
"MaxBandwidth_Sockets": self.mbits_to_bytes(self.uploadSpeed),
"MinBandwidth_global": self.mbits_to_bits(self.uploadSpeed),
"MaxBandwidth_global": self.mbits_to_bytes(self.uploadSpeed)
}
arma_cfg = f"""
class sockets
{{
1maxPacketSize = {cfg_dict.get("maxPacketSize_Sockets")};
initBandwidth = {cfg_dict.get("initBandwidth_Sockets")}; //{int(cfg_dict.get("initBandwidth_Sockets" ) / 125000)} mb/s
MinBandwidth = {cfg_dict.get("MinBandwidth_Sockets")}; //(64 kbit)
MaxBandwidth = {cfg_dict.get("MaxBandwidth_Sockets")}; //(16 Mbit) 250x minBandwith
}};
MinBandwidth = {cfg_dict.get("MinBandwidth_global")};
// MaxBandwidth = {cfg_dict.get("MaxBandwidth_global")}; // Broken do not use
MaxMsgSend = 2048; // Maximum number of messages that can be sent in one simulation cycle. Increasing this value can decrease lag on high upload bandwidth servers. Default: 128
MaxSizeGuaranteed = 512; // Maximum size of guaranteed packet in bytes (without headers). Small messages are packed to larger frames. Guaranteed messages are used for non-repetitive events like shooting. Default: 512
MaxSizeNonguaranteed = 256; // Maximum size of non-guaranteed packet in bytes (without headers). Non-guaranteed messages are used for repetitive updates like soldier or vehicle position. Increasing this value may improve bandwidth requirement, but it may increase lag. Default: 256
MinErrorToSend = 0.003; // Minimal error to send updates across network. Using a smaller value can make units observed by binoculars or sniper rifle to move smoother. Default: 0.001
MinErrorToSendNear = 0.02; // Minimal error to send updates across network for near units. Using larger value can reduce traffic sent for near units. Used to control client to server traffic as well. Default: 0.01
MaxCustomFileSize = 0; // (bytes) Users with custom face or custom sound larger than this size are kicked when trying to connect.
"""
f = open("basic.cfg", "w")
f.write(arma_cfg)
f.close()
| 2.46875
| 2
|
Basic_ML/Quantum_Tic_Tac_Toe/tic_tac_toe.py
|
jrclimer/Projects
| 27
|
12777864
|
import itertools
import copy
import re
import math
import random
import shelve
class board(object):
def __init__(self,humans=0,AI1=None,AI2=None):
self.board = {(1,1):[],\
(1,2):[],\
(1,3):[],\
(2,1):[],\
(2,2):[],\
(2,3):[],\
(3,1):[],\
(3,2):[],\
(3,3):[]}
self.turn = 1
self.score = 0
self.end = False
self.humans = humans
self.AI1 = AI1
self.AI2 = AI2
def _possible_moves(self):
moves = []
for key,val in self.board.iteritems():
if val != 'X' and val != 'O':
moves.append(key)
return moves
def playermove(self,square1,square2):
moves = self._possible_moves()
if square1 not in moves:
raise Exception('invalid move')
self.board[square1].append(self.turn)
moves.remove(square1)
if square2 not in moves:
raise Exception('invalid move')
self.board[square2].append(self.turn)
def _find_circuit(self,start,current,traversed,board_copy):
traversed.append(current)
circuit = None
while board_copy[current]:
entry = board_copy[current].pop(0)
for key,val in board_copy.iteritems():
try:
if entry in val:
if key == start:
return traversed
circuit = self._find_circuit(start,key,traversed,board_copy)
if circuit:
return circuit
except:
pass
return False
def _prompt_circuit(self,choice1,choice2):
choice = raw_input("Choose between %s and %s: " % (str(choice1)[:3]+str(choice1)[4:],str(choice2)[:3]+str(choice2)[4:]))
while choice != (str(choice1)[:3]+str(choice1)[4:]) and choice != (str(choice2)[:3]+str(choice2)[4:]):
print "invalid choice"
choice = raw_input("Choose between %s and %s: " % (str(choice1),str(choice2)))
choice = (int(choice[1]),int(choice[3]))
return choice
def _ai_circuit(self,choice1,choice2,AI):
choice = AI.break_circuit(choice1,choice2)
return choice
def _break_circuit(self,square,turn):
self.board[square].remove(turn)
for entry in self.board[square]:
for key,val in self.board.iteritems():
try:
if entry in val and key != square:
pair = key
self._break_circuit(pair,entry)
except:
pass
if turn % 2 == 1:
self.board[square] = 'X'
elif turn % 2 == 0:
self.board[square] = 'O'
def _evaluate_board(self):
if (self.board[(1,1)]=='X' and self.board[(1,2)]=='X' and self.board[(1,3)]=='X'):
self.score += 1
self.end = True
if (self.board[(2,1)]=='X' and self.board[(2,2)]=='X' and self.board[(2,3)]=='X'):
self.score += 1
self.end = True
if (self.board[(3,1)]=='X' and self.board[(3,2)]=='X' and self.board[(3,3)]=='X'):
self.score += 1
self.end = True
if (self.board[(1,1)]=='X' and self.board[(2,1)]=='X' and self.board[(3,1)]=='X'):
self.score += 1
self.end = True
if (self.board[(1,2)]=='X' and self.board[(2,2)]=='X' and self.board[(3,2)]=='X'):
self.score += 1
self.end = True
if (self.board[(1,3)]=='X' and self.board[(2,3)]=='X' and self.board[(3,3)]=='X'):
self.score += 1
self.end = True
if (self.board[(1,1)]=='X' and self.board[(2,2)]=='X' and self.board[(3,3)]=='X'):
self.score += 1
self.end = True
if (self.board[(1,3)]=='X' and self.board[(2,2)]=='X' and self.board[(3,1)]=='X'):
self.score += 1
self.end = True
if (self.board[(1,1)]=='O' and self.board[(1,2)]=='O' and self.board[(1,3)]=='O'):
self.score += -1
self.end = True
if (self.board[(2,1)]=='O' and self.board[(2,2)]=='O' and self.board[(2,3)]=='O'):
self.score += -1
self.end = True
if (self.board[(3,1)]=='O' and self.board[(3,2)]=='O' and self.board[(3,3)]=='O'):
self.score += -1
self.end = True
if (self.board[(1,1)]=='O' and self.board[(2,1)]=='O' and self.board[(3,1)]=='O'):
self.score += -1
self.end = True
if (self.board[(1,2)]=='O' and self.board[(2,2)]=='O' and self.board[(3,2)]=='O'):
self.score += -1
self.end = True
if (self.board[(1,3)]=='O' and self.board[(2,3)]=='O' and self.board[(3,3)]=='O'):
self.score += -1
self.end = True
if (self.board[(1,1)]=='O' and self.board[(2,2)]=='O' and self.board[(3,3)]=='O'):
self.score += -1
self.end = True
if (self.board[(1,3)]=='O' and self.board[(2,2)]=='O' and self.board[(3,1)]=='O'):
self.score += -1
self.end = True
filled_squares = 0
for key,val in self.board.iteritems():
if val =='X' or val =='O':
filled_squares += 1
if filled_squares >= 8:
self.end = True
def display_board(self):
s1 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s2 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s3 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s4 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s5 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s6 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s7 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s8 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
s9 = [' ',' ',' ',' ',' ',' ',' ',' ',' ']
for s,b in zip([s1,s2,s3,s4,s5,s6,s7,s8,s9],[self.board[(1,1)],self.board[(1,2)],self.board[(1,3)],\
self.board[(2,1)],self.board[(2,2)],self.board[(2,3)],self.board[(3,1)],self.board[(3,2)],self.board[(3,3)]]):
if b == 'X':
for i in range(len(s)):
s[i] = 'X'
elif b == 'O':
for i in range(len(s)):
s[i] = 'O'
else:
for i in range(len(b)):
s[i] = str(b[i])
print 'turn %i' % self.turn
print '%s %s %s | %s %s %s | %s %s %s' % (s1[0],s1[1],s1[2],s2[0],s2[1],s2[2],s3[0],s3[1],s3[2])
print '%s %s %s | %s %s %s | %s %s %s' % (s1[3],s1[4],s1[5],s2[3],s2[4],s2[5],s3[3],s3[4],s3[5])
print '%s %s %s | %s %s %s | %s %s %s' % (s1[6],s1[7],s1[8],s2[6],s2[7],s2[8],s3[6],s3[7],s3[8])
print '---------------------'
print '%s %s %s | %s %s %s | %s %s %s' % (s4[0],s4[1],s4[2],s5[0],s5[1],s5[2],s6[0],s6[1],s6[2])
print '%s %s %s | %s %s %s | %s %s %s' % (s4[3],s4[4],s4[5],s5[3],s5[4],s5[5],s6[3],s6[4],s6[5])
print '%s %s %s | %s %s %s | %s %s %s' % (s4[6],s4[7],s4[8],s5[6],s5[7],s5[8],s6[6],s6[7],s6[8])
print '---------------------'
print '%s %s %s | %s %s %s | %s %s %s' % (s7[0],s7[1],s7[2],s8[0],s8[1],s8[2],s9[0],s9[1],s9[2])
print '%s %s %s | %s %s %s | %s %s %s' % (s7[3],s7[4],s7[5],s8[3],s8[4],s8[5],s9[3],s9[4],s9[5])
print '%s %s %s | %s %s %s | %s %s %s' % (s7[6],s7[7],s7[8],s8[6],s8[7],s8[8],s9[6],s9[7],s9[8])
def _prompt_move(self):
moves = self._possible_moves()
square1 = raw_input("Choose your first move in format (row,column): ")
while square1 not in moves:
while not re.match('\([1-9],[1-9]\)',square1):
print "invalid choice"
square1 = raw_input("Choose your first move in format (row,column): ")
square1 = (int(square1[1]),int(square1[3]))
if square1 not in moves:
print "invalid choice"
square1 = raw_input("Choose your first move in format (row,column): ")
moves.remove(square1)
square2 = raw_input("Choose your second move in format (row,column): ")
while square2 not in moves:
while not re.match('\([1-9],[1-9]\)',square2):
print "invalid choice"
square2 = raw_input("Choose your second move in format (row,column): ")
square2 = (int(square2[1]),int(square2[3]))
if square2 not in moves:
print "invalid choice"
square2 = raw_input("Choose your second move in format (row,column): ")
return square1,square2
def _ai_move(self,AI):
moves = self._possible_moves()
square1,square2 = AI.choose_move(moves)
return square1,square2
def play_game(self):
if self.humans == 1:
turn = raw_input("Enter '1' to go first or '2' to go second: ")
while turn != "1" and turn != "2":
print "invalid choice"
turn = raw_input("Enter '1' to go first or '2' to go second: ")
turn = int(turn)-1
while self.end == False:
if self.humans >= 1:
self.display_board()
if self.humans == 1 and (self.turn-turn)%2 == 1:
square1,square2 = self._prompt_move()
self.AI1.opponent_move(square1,square2)
elif self.humans == 1 and (self.turn-turn)%2 == 0:
square1,square2 = self._ai_move(self.AI1)
elif self.humans == 2:
square1,square2 = self._prompt_move()
elif self.humans == 0 and self.turn%2 == 1:
square1,square2 = self._ai_move(self.AI1)
self.AI2.opponent_move(square1,square2)
else:
square1,square2 = self._ai_move(self.AI2)
self.AI1.opponent_move(square1,square2)
self.playermove(square1,square2)
circuit1 = self._find_circuit(square1,square1,[],copy.deepcopy(self.board))
if circuit1:
if self.humans == 1 and (self.turn-turn)%2 == 1:
choice = self._prompt_circuit(square1,square2)
self.AI1.opponent_break_circuit(choice)
elif self.humans == 1 and (self.turn-turn)%1 == 0:
choice = self._ai_circuit(square1,square2,self.AI1)
elif self.humans == 2:
choice = self._prompt_circuit(square1,square2)
elif self.humans == 0 and self.turn%2 == 1:
choice = self._ai_circuit(square1,square2,self.AI1)
self.AI2.opponent_break_circuit(choice)
else:
choice = self._ai_circuit(square1,square2,self.AI2)
self.AI1.opponent_break_circuit(choice)
self._break_circuit(choice,self.turn)
else:
circuit2 = self._find_circuit(square2,square2,[],copy.deepcopy(self.board))
if circuit2:
if self.humans == 1 and (self.turn-turn)%2 == 1:
choice = self._prompt_circuit(square1,square2)
self.AI1.opponent_break_circuit(choice)
elif self.humans == 1 and (self.turn-turn)%1 == 0:
choice = self._ai_circuit(square1,square2,self.AI1)
elif self.humans == 2:
choice = self._prompt_circuit(square1,square2)
elif self.humans == 0 and self.turn%2 == 1:
choice = self._ai_circuit(square1,square2,self.AI1)
self.AI2.opponent_break_circuit(choice)
else:
choice = self._ai_circuit(square1,square2,self.AI2)
self.AI1.opponent_break_circuit(choice)
self._break_circuit(choice,self.turn)
self._evaluate_board()
self.turn += 1
if self.humans >= 1:
self.display_board()
if self.score > 0:
print "Player 1 wins"
elif self.score < 0:
print "Player 2 wins"
else:
print "Tie game"
if self.humans == 1 and turn == 1:
self.AI1.backpropogate(-self.score)
self.AI1.reset()
elif self.humans == 1 and turn == 2:
self.AI1.backpropogate(self.score)
self.AI1.reset()
elif self.humans == 0:
self.AI1.backpropogate(self.score)
self.AI2.backpropogate(-self.score)
self.AI1.reset()
self.AI2.reset()
self.AI1,self.AI2 = self.AI2,self.AI1
self.reset()
def _simulate_game(self):
pass
def reset(self):
self.board = {(1,1):[],
(1,2):[],
(1,3):[],
(2,1):[],
(2,2):[],
(2,3):[],
(3,1):[],
(3,2):[],
(3,3):[]}
self.turn = 1
self.score = 0
self.end = False
class monte_carlo_tree(object):
def __init__(self):
self.entries = {}
self.entries['total_games'] = 0
def choose_move(self,valid_moves,pointer,path):
nodes = []
max_upper_bound = -float("inf")
total_visits = 0
for tuple in itertools.combinations(valid_moves,2):
t_pointer = re.sub(r'\D','',str(tuple))
t_pointer2 = re.sub(r'\D','',str((tuple[1],tuple[0])))
try:
score = self.entries[pointer+t_pointer][0]
visits = self.entries[pointer+t_pointer][1]
total_visits += visits
nodes.append(t_pointer)
except:
try:
score = self.entries[pointer+t_pointer2][0]
visits = self.entries[pointer+t_pointer2][1]
total_visits += visits
nodes.append(t_pointer2)
except:
self.entries[pointer+t_pointer] = (0,0)
nodes.append(t_pointer)
if self.entries['total_games'] == 0:
choice = random.choice(nodes)
choice1 = (int(choice[0]),int(choice[1]))
choice2 = (int(choice[2]),int(choice[3]))
path.append(pointer+choice)
pointer = pointer+choice
return choice1,choice2,pointer,path
for n in nodes:
score = self.entries[pointer+n][0]
visits = self.entries[pointer+n][1]
upper_bound = float(score)/(visits+1.) + (2*math.log(total_visits+1.)/(visits+1.))**0.5
if upper_bound > max_upper_bound:
choice = n
choice1 = (int(n[0]),int(n[1]))
choice2 = (int(n[2]),int(n[3]))
max_upper_bound = upper_bound
path.append(pointer+choice)
pointer = pointer+choice
return choice1,choice2,pointer,path
def opponent_move(self,square1,square2,pointer):
tuple = (square1,square2)
t_pointer = re.sub(r'\D','',str(tuple))
t_pointer2 = re.sub(r'\D','',str((tuple[1],tuple[0])))
try:
n = self.entries[pointer+t_pointer]
pointer += t_pointer
except:
try:
n = self.entries[pointer+t_pointer2]
pointer += t_pointer2
except:
self.entries[pointer+t_pointer] = (0,0)
pointer += t_pointer
return pointer
def break_circuit(self,choice1,choice2,pointer,path):
t_pointer1 = re.sub(r'\D','',str(choice1))+'c'
t_pointer2 = re.sub(r'\D','',str(choice2))+'c'
max_upper_bound = -float("inf")
total_visits = 0
for t_pointer in (t_pointer1,t_pointer2):
try:
score = self.entries[pointer+t_pointer][0]
visits = self.entries[pointer+t_pointer][1]
total_visits += visits
except:
self.entries[pointer+t_pointer] = (0,0)
if self.entries['total_games'] == 0:
n = random.choice([t_pointer1,t_pointer2])
choice = (int(n[0]),int(n[1]))
path.append(pointer+n)
pointer = pointer+n
return choice,pointer,path
for t_pointer in (t_pointer1,t_pointer2):
score = self.entries[pointer+t_pointer][0]
visits = self.entries[pointer+t_pointer][1]
upper_bound = float(score)/(visits+1.) + (2*math.log(total_visits+1.)/(visits+1.))**0.5
if upper_bound > max_upper_bound:
n = t_pointer
choice = (int(t_pointer[0]),int(t_pointer[1]))
max_upper_bound = upper_bound
path.append(pointer+n)
pointer = pointer+n
return choice,pointer,path
def opponent_break_circuit(self,choice,pointer):
t_pointer = re.sub(r'\D','',str(choice))+'c'
try:
n = self.entries[pointer+t_pointer]
pointer += t_pointer
except:
self.entries[pointer+t_pointer] = (0,0)
pointer += t_pointer
return pointer
def backpropogate(self,result,path):
for p in path:
score = self.entries[p][0]
visits = self.entries[p][1]
if result > 0:
score += 1
elif result < 0:
score -= 1
visits += 1
self.entries[p] = (score,visits)
def reset(self):
self.entries['total_games'] += 1
class dumb_ai(object):
def __init__(self):
self.wins = 0
self.games = 0
def choose_move(self,valid_moves):
square1 = random.choice(valid_moves)
valid_moves.remove(square1)
square2 = random.choice(valid_moves)
return square1,square2
def opponent_move(self,square1,square2):
pass
def break_circuit(self,choice1,choice2):
choices = [choice1,choice2]
choice = random.choice(choices)
return choice
def opponent_break_circuit(self,choice):
pass
def backpropogate(self,result):
self.games += 1
if result > 0:
self.wins += 1
def reset(self):
pass
class mcts_ai(object):
def __init__(self,mcts):
self.mcts = mcts
self.wins = 0
self.games = 0
self.path = []
self.pointer = ''
def choose_move(self,valid_moves):
square1,square2,self.pointer,self.path = self.mcts.choose_move(valid_moves,self.pointer,self.path)
return square1,square2
def opponent_move(self,square1,square2):
self.pointer = self.mcts.opponent_move(square1,square2,self.pointer)
def break_circuit(self,choice1,choice2):
choice,self.pointer,self.path = self.mcts.break_circuit(choice1,choice2,self.pointer,self.path)
return choice
def opponent_break_circuit(self,choice):
self.pointer = self.mcts.opponent_break_circuit(choice,self.pointer)
def backpropogate(self,result):
self.games += 1
if result > 0:
self.wins += 1
self.mcts.backpropogate(result,self.path)
def reset(self):
self.mcts.reset()
self.path = []
self.pointer = ''
mcts = monte_carlo_tree()
AI1 = mcts_ai(mcts)
entries = shelve.open('mcts.shelve')
mcts.entries = entries
try:
test = mcts.entries['total_games']
except:
mcts.entries['total_games'] = 0
for j in range(1000):
AI2 = dumb_ai()
game = board(humans=0,AI1=AI1,AI2=AI2)
for i in range(5000):
game.play_game()
print "games played: %i" % (mcts.entries['total_games'])
AI2 = mcts_ai(mcts)
game = board(humans=0,AI1=AI1,AI2=AI2)
for i in range(2500):
game.play_game()
print "games played: %i" % (mcts.entries['total_games'])
AI1.wins = 0
AI2 = dumb_ai()
game = board(humans=0,AI1=AI1,AI2=AI2)
for i in range(100):
print "round %i" % (i+1)
game.play_game()
with open("scores.txt","a") as f:
f.write("mcts AI wins out of 100 games after %i iterations: %i\n" % (mcts.entries['total_games'],AI1.wins))
f.write("random choice AI wins out of 100 games after %i iterations: %i\n" % (mcts.entries['total_games'],AI2.wins))
entries.sync()
entries.close()
| 3.890625
| 4
|
python/test/cuda/test_large_blocks.py
|
daniel-falk/nnabla-ext-cuda
| 103
|
12777865
|
<gh_stars>100-1000
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
@pytest.mark.parametrize("m", [1, 2, 3])
def test_cuda_large_blocks(cuda_test_opts, m):
if cuda_test_opts.disable_test_large_blocks:
pytest.skip('`--disable-test-large-blocks` is passed')
CUDA_THREAD_PER_BLOCK = 512
CUDA_MAX_BLOCKS = 65536
size = CUDA_MAX_BLOCKS * CUDA_THREAD_PER_BLOCK * m + 3
print("Variable size:", size)
x = np.zeros((size,), np.float32)
v = nn.Variable(x.shape)
v.d = x
from nnabla.ext_utils import get_extension_context
with nn.context_scope(get_extension_context('cuda')):
y = F.relu(v)
y.forward()
| 2
| 2
|
Python/keithley-2400_mpps.py
|
jmball/simple_solar_simulator
| 0
|
12777866
|
import argparse
import time
import numpy as np
import pyvisa
# Parse folder path, file name, and measurement parameters from command line
# arguments. Remember to include the "python" keyword before the call to the
# python file from the command line, e.g. python example.py "arg1" "arg2".
# Folder paths must use forward slashes to separate subfolders.
parser = argparse.ArgumentParser(
description='Measure and save max power point tracking data')
parser.add_argument(
'folder_path',
metavar='folder_path',
type=str,
help='Absolute path to the folder containing max P stabilisation data')
parser.add_argument(
'file_name',
metavar='file_name',
type=str,
help='Name of the file to save the data to')
parser.add_argument(
'V_start',
metavar='V_start',
type=float,
help='Seed voltage for maximum power point tracker (V)')
parser.add_argument(
'nplc',
metavar='nplc',
type=float,
help='Integration filter in number of power line cycles (NPLC)')
parser.add_argument(
't_settling', metavar='t_settling', type=float, help='Settling delay (ms)')
parser.add_argument(
't_track',
metavar='t_track',
type=float,
help='Time to track maximum power point for (s)')
parser.add_argument('A', metavar='A', type=float, help='Device area (cm^2)')
parser.add_argument(
'num_of_suns',
metavar='num_of_suns',
type=float,
help='Number of suns equivalent illumination intensity')
args = parser.parse_args()
# Assign argparse arguments to variables
folderpath = args.folder_path
filename = args.file_name
V_start = args.V_start
A = args.A
nplc = args.nplc
t_settling = args.t_settling
t_track = args.t_track
suns = args.num_of_suns
V_range = np.absolute(V_start)
# Set current measurement range to 10 times SQ limit for 0.5 eV
# bandgap for the given area
I_range = 10 * 0.065 * A
# Assign the VISA resource to a variable
rm = pyvisa.ResourceManager()
keithley2400 = rm.open_resource('GPIfdf8:f53e:61e4::18::INSTR')
keithley2400.query('*IDN?')
keithley2400.write('*RST')
keithley2400.encoding = 'latin-1'
# Disable the output
keithley2400.write('OUTP OFF')
# Enable 4-wire sense
keithley2400.write(':SYST:RSEN 1')
# Don't auto-off source after measurement
keithley2400.write(':SOUR:CLE:AUTO OFF')
# Set source mode to voltage
keithley2400.write(':SOUR:FUNC VOLT')
# Set output-off mode to high impedance
keithley2400.write(':OUTP:SMOD HIMP')
# Set the voltage range
keithley2400.write(':SOUR:VOLT:RANG {}'.format(V_range))
# Set the current range
keithley2400.write(':SOUR:CURR:RANG {}'.format(I_range))
# Set the delay
keithley2400.write(':SOUR:DEL {}'.format(t_settling))
# Set the integration filter
keithley2400.write(':SENS:CURR:NPLC {}'.format(nplc))
# Disable autozero
keithley2400.write(':SYST:AZER OFF')
def track_max_power(V, t_track):
"""Maximum power point stabilizer.
Holding at a fixed voltage (V), measure the power output for a fixed
amount of time (t_track), taking as many measurements as possible.
Parameters
----------
V : float
Seed voltage for the maximum power point tracker (V)
t_track : float
Time to track the maximum power point for (s)
Returns
-------
ts : list of float
Timestamps for every measurement (UTC)
Vs : list of float
Vs (V)
Is : list of float
Is (A)
Ps : list of float
Ps (W)
Js : list of float
Current densities (mA / cm^2)
PCEs : list of float
Power conversion PCEs (%)
"""
# Initialise empty lists for storing data
ts = []
Vs = []
Is = []
Js = []
Ps = []
PCEs = []
# Turn on the Keithley output at zero volts and measure for 4s in the dark
keithley2400.write(':SOUR:VOLT {}'.format(V))
keithley2400.write('OUTP ON')
# Start timing
t_start = time.time()
t = time.time()
# Measure Jsc in the dark for 3s
while t - t_start < 3:
ts.append(t - t_start)
data = keithley2400.query(':MEAS:CURR?') # Measure the current
data = data.split(',')
data = [float(item) for item in data]
Vs.append(data[0])
Is.append(data[1])
Js.append(data[1] * 1000 / A)
Ps.append(data[0] * data[1])
PCEs.append(np.absolute(data[0] * data[1] * 1000 / (suns * A)))
t = time.time()
# Open the shutter of the solar simulator
keithley2400.write(':SOUR2:TTL 0')
# Measure at V in the light for t_track
i = len(Vs) - 1
while t - t_start < t_track + 3:
ts.append(t - t_start)
data = keithley2400.query(':MEAS:CURR?') # Measure the current
data = data.split(',')
data = [float(item) for item in data]
Vs.append(data[0])
Is.append(data[1])
Js.append(data[1] * 1000 / A)
Ps.append(data[0] * data[1])
PCEs.append(np.absolute(data[0] * data[1] * 1000 / (suns * A)))
t = time.time()
i += 1
return ts, Vs, Is, Js, Ps, PCEs
# Turn off display
keithley2400.write(':DISP:ENAB 0')
# Manually reset zero reference values
keithley2400.write(':SYST:AZER ONCE')
# Track max power
mppt_results = track_max_power(V_start, t_track)
# Disable output
keithley2400.write('OUTP OFF')
# Close shutter
keithley2400.write(':SOUR2:TTL 1')
# Turn off display
keithley2400.write(':DISP:ENAB 1')
# Format and save the results
np.savetxt(
folderpath + filename,
np.transpose(np.array(mppt_results)),
fmt='%.9f',
delimiter='\t',
newline='\r\n',
header='Time (s)\tV\tI (A)\tJ (mA/cm^2)\tP (W)\tPCE (%)',
comments='')
# Close the visa resource manager
keithley2400.close()
| 2.921875
| 3
|
Advanced/Exams/2020_10_Exam/2_checkmate.py
|
tankishev/Python
| 2
|
12777867
|
<reponame>tankishev/Python
# You will be given a chess board (8x8). On the board there will be 3 types of symbols:
# • "." – empty square
# • "Q" – a queen
# • "K" – the king
# Your job is to find which queens can capture the king and print them. The moves that the queen can do is to move
# diagonally, horizontally and vertically (basically all the moves that all the other figures can do except from the
# knight). Beware that there might be queens that stand in the way of other queens and can stop them from capturing
# the king. For more clarification see the examples.
# Input
# • 8 lines – the state of the board (each square separated by single space)
# Output
# • The positions of the queens that can capture the king as lists
# • If the king cannot be captured, print: "The king is safe!"
# • The order of output does not matter
def get_cell(cell: tuple, matrix: list) -> str:
row, col = cell
return str(matrix[row][col])
def is_valid_cell(cell: tuple) -> bool:
if min(cell) >= 0 and max(cell) < 8:
return True
else:
return False
def possible_moves(cell: tuple, matrix: list) -> tuple:
retval = []
row, col = cell
for i in range(row - 1, -1, -1):
j = col - (row - i)
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for i in range(row - 1, -1, -1):
j = col
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for i in range(row - 1, -1, -1):
j = col + (row - i)
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for i in range(row + 1, 8):
j = col - (row - i)
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for i in range(row + 1, 8):
j = col
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for i in range(row + 1, 8):
j = col + (row - i)
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for j in range(col - 1, -1, -1):
i = row
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
for j in range(col + 1, 8):
i = row
if is_valid_cell((i, j)):
if get_cell((i, j), matrix) == 'Q':
break
retval.append((i, j))
return tuple(retval)
def take_king(moves: tuple, matrix: list) -> bool:
for cell in moves:
if get_cell(cell, matrix) == 'K':
return True
return False
def main() -> None:
retval = []
matrix = [[el for el in input().split()] for _ in range(8)]
for row in range(8):
for col in range(8):
if get_cell((row, col), matrix) == 'Q':
moves = possible_moves((row, col), matrix)
if take_king(moves, matrix):
retval.append([row, col])
if retval:
for el in retval:
print(el)
else:
print('The king is safe!')
if __name__ == '__main__':
main()
| 4.15625
| 4
|
imagelib/image/image_writers/__init__.py
|
susautw/imagelib
| 0
|
12777868
|
__all__ = ['ImageWriter', 'BlockingImageWriter', 'ThreadingImageWriter']
from .image_writer import ImageWriter
from .blocking_image_writer import BlockingImageWriter
from .threading_image_writer import ThreadingImageWriter
| 1.523438
| 2
|
lib/color.py
|
runblood/get_mysql_stats
| 2
|
12777869
|
#!/usr/local/bin/python3.6
#-*- coding: utf-8 -*-
#Author WangJiang@2019 15810438848 <EMAIL>
#All rights reserved
################################################################################################################
from colorama import init, Fore, Back, Style
################################################################################################################
### 输出类
class Cls_Color:
### 初始化
def __init__(self):
"""
Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
Style: DIM, NORMAL, BRIGHT, RESET_ALL
"""
init(autoreset=True) # 初始化,并且设置颜色设置自动恢复
### 字体红色
def fore_red(self, content):
return Fore.RED + content
| 2.8125
| 3
|
local/pre-computing.py
|
Ririkoo/DanmakuAnime
| 0
|
12777870
|
<gh_stars>0
# -*- coding: utf-8 -*-
from multiprocessing.dummy import Pool as ThreadPool
from bilisupport import DANMAKULIST,EPISODEINFO,DANMAKURES,OTHERINFO
import os
import re
import requests
import numpy as np
from datetime import datetime
from zhon.hanzi import punctuation
from bs4 import BeautifulSoup
punctuation+='!?'
def danmaku_cleanning(dstr):
if dstr in ['???','。。。','???']:
return dstr
if len(set(dstr))==1:
return dstr[:3]
if dstr[:3] == "233":
return "23333"
dstr = dstr.lower()
dstr = re.sub(r"[%s]+" %punctuation, "",dstr)
return dstr
def compute_alldanmaku_info(sid,graininess):
sid = int(sid)
episode_list = EPISODEINFO.find({"sid":sid})
cid_list = [episode['cid'] for episode in episode_list]
time_danmaku = {}
for cid in cid_list:
all_danmaku = DANMAKULIST.find({"cid":cid})
for danmaku in all_danmaku:
if(graininess=='min'):
time = round(danmaku['time']/60)
elif (graininess=='s'):
time = round(danmaku['time'])
if(time not in time_danmaku.keys()):
time_danmaku[time] = 1
else:
time_danmaku[time] += 1
time_danmaku_sort = sorted(time_danmaku.items(),key= lambda x:x[0])
danmaku_res=[{
'sid': int(sid),
'cid': -1,
'time': int(x[0]),
'danmaku_count': int(x[1]),
'graininess': graininess
} for x in time_danmaku_sort]
DANMAKURES.insert_many(danmaku_res)
def compute_otherinfo(sid):
episode_list = EPISODEINFO.find({"sid":sid})
cid_list = [episode['cid'] for episode in episode_list]
danmaku_text = []
len_cnt_dict = {'1-10':0,'10-20':0,'20-30':0,'>30':0}
danmaku_cnt_dict={}
for cid in cid_list:
all_danmaku = DANMAKULIST.find({"cid":cid})
for danmaku in all_danmaku:
danmaku_text.append(danmaku['text'])
for danmaku in danmaku_text:
length = len(danmaku)
if (length<=10):
len_cnt_dict['1-10']+=1
elif (length>10 and length<=20):
len_cnt_dict['10-20']+=1
elif (length>20 and length<=30):
len_cnt_dict['20-30']+=1
else:
len_cnt_dict['>30']+=1
danmaku = danmaku_cleanning(danmaku)
if danmaku not in danmaku_cnt_dict.keys():
danmaku_cnt_dict[danmaku]=1
else:
danmaku_cnt_dict[danmaku]+=1
sort_danmaku_cnt = sorted(danmaku_cnt_dict.items(),key=lambda x:x[1],reverse=True)[:20]
other_info={
'sid': int(sid),
'topwords': [item[0] for item in sort_danmaku_cnt],
'topwords_fre': [item[1] for item in sort_danmaku_cnt],
'length_distribution': list(len_cnt_dict.values())
}
OTHERINFO.insert(other_info)
def compute_senttime(sid):
episode_list = EPISODEINFO.find({"sid":sid})
cid_list = [episode['cid'] for episode in episode_list]
time_list = [0]*24
for cid in cid_list:
all_danmaku = DANMAKULIST.find({"cid":cid})
for danmaku in all_danmaku:
time_list[datetime.fromtimestamp(danmaku['date']).hour]+=1
OTHERINFO.update({"sid":sid},{"$set":{"senttime":time_list}})
def compute_emotion(sid):
sid = int(sid)
positive_dict = open('positive_dict.txt','r').read().split('\n')
nagtive_dict = open('nagtive_dict.txt','r').read().split('\n')
episode_list = EPISODEINFO.find({"sid":sid})
cid_list = [episode['cid'] for episode in episode_list]
positive_count = 0 # Bangumi Count
negative_count = 0 # Episode Count
time_danmaku = {}
for cid in cid_list:
all_danmaku = DANMAKULIST.find({"cid":cid})
for danmaku in all_danmaku:
# Calculate Emotion
text = danmaku['text']
is_inDict = False
for item in positive_dict:
if text in item:
score = 1.0
is_inDict = True
for item in nagtive_dict:
if text in item:
score = 0
is_inDict = True
if not is_inDict:
cal_response = requests.get("http://172.16.17.32:5000/senti/"+str(text))
score = cal_response.json()['score']
if score > 0.5:
positive_count += 1
elif score < 0.5:
negative_count += 1
# Calculate Time-based Emotion second level
time = round(danmaku['time'])
if(time not in time_danmaku.keys()):
time_danmaku[time] = [score]
else:
time_danmaku[time].append(score)
for t in time_danmaku:
time_danmaku[t] = sum(time_danmaku[t])/len(time_danmaku[t])
time_danmaku_sort = sorted(time_danmaku.items(),key= lambda x:x[0])
for x in time_danmaku_sort:
DANMAKURES.update({'sid': int(sid),'cid': -1,'time': int(x[0]),'graininess': 's'},
{"$set":{"emotion_value":x[1]}})
OTHERINFO.update({"sid":sid},{"$set":{
"positive_danmaku": int(positive_count),
"negative_danmaku": int(negative_count)
}})
if __name__ == "__main__":
# Computing all danmaku_info
for line in open('kyoani_sid.csv', 'r'):
sid = int(line.split(',')[0])
print('Precomputing',sid)
compute_emotion(sid)
# compute_senttime(sid)
#print('Graininess: min')
#compute_alldanmaku_info(line.split(',')[0],graininess='min')
#print('Graininess: s')
#compute_alldanmaku_info(line.split(',')[0],graininess='s')
| 2.46875
| 2
|
tests/test_main.py
|
diogobaeder/convertfrom
| 0
|
12777871
|
from unittest import TestCase
from unittest.mock import patch
from nose.tools import istest
from convertfrom.main import convert, main
class EntryPointTest(TestCase):
@istest
@patch('convertfrom.main.sys')
@patch('convertfrom.main.print')
@patch('convertfrom.main.convert')
def prints_converted_result(self, mock_convert, mock_print, mock_sys):
mock_sys.argv = ['convertfrom', '2a', 'to', 'b']
mock_convert.return_value = '2b'
main()
mock_print.assert_called_once_with('2b')
mock_convert.assert_called_once_with(['2a', 'to', 'b'])
@istest
@patch('convertfrom.main.convert')
def exits_on_exceptions(self, mock_convert):
exception = RuntimeError('oops')
mock_convert.side_effect = exception
with self.assertRaises(SystemExit):
main()
class ConvertTest(TestCase):
@istest
def converts_10m_to_1000cm(self):
result = convert(['10m', 'to', 'cm'])
self.assertEqual(result, '1000.0cm')
@istest
def converts_1m_to_100cm(self):
result = convert(['1m', 'to', 'cm'])
self.assertEqual(result, '100.0cm')
@istest
def converts_1m_to_1000mm(self):
result = convert(['1m', 'to', 'mm'])
self.assertEqual(result, '1000.0mm')
@istest
def converts_200cm_to_2m(self):
result = convert(['200cm', 'to', 'm'])
self.assertEqual(result, '2.0m')
@istest
def converts_1meter_to_100cm(self):
result = convert(['1meter', 'to', 'cm'])
self.assertEqual(result, '100.0cm')
@istest
def converts_1_meter_to_100cm(self):
result = convert(['1', 'meter', 'to', 'cm'])
self.assertEqual(result, '100.0cm')
@istest
def converts_1_meter_to_1m(self):
result = convert(['1', 'meter', 'to', 'meters'])
self.assertEqual(result, '1.0meters')
| 2.625
| 3
|
others/jacobian.py
|
raghuramshankar/kalman-filter-localization
| 3
|
12777872
|
<gh_stars>1-10
import sympy as sp
x, y, psi, v, dpsi, T = sp.symbols('x y psi v dpsi T')
state = sp.Matrix([x,
y,
psi,
v,
dpsi])
F = sp.Matrix([[x + (v/dpsi) * (sp.sin(T * dpsi + psi) - sp.sin(psi))],
[y + (v/dpsi) * (sp.cos(psi) - sp.cos(T * dpsi + psi))],
[T * dpsi + psi],
[v],
[dpsi]])
jF = F.jacobian(state)
# print(F[0], '\n', F[1], '\n', F[2], '\n', F[3], '\n',F[4])
# for i in range(0, 5):
# print(jF[i, 0], ' ', jF[i, 1], ' ', jF[i, 2],
# ' ', jF[i, 3], ' ', jF[i, 4], ' ')
h = sp.Matrix([sp.sqrt(x), sp.sqrt(y)])
jH = h.jacobian(state)
print(jF)
| 2.046875
| 2
|
segmentation/rescore.py
|
PRHLT/docClasifIbPRIA22
| 0
|
12777873
|
<filename>segmentation/rescore.py
import glob, os, re
import numpy as np
import math
def read_results(paths:list, LOG:bool=False) -> dict:
res = {}
min_pag = 1500
for path in paths:
# print("Reading results from : ", path)
f = open(path, "r")
lines = f.readlines()
f.close()
lines = lines[1:]
for line in lines:
fname, _, *probs = line.strip().split(" ")
probs = [float(x) for x in probs]
ini, fin = fname.split("_")[1].split("-")
ini, fin = int(ini), int(fin)
min_pag = min(ini, min_pag)
prob_max = np.max(probs)
if LOG:
prob_max = math.log(prob_max)
if (ini, fin) in res:
raise Exception(f"Group {ini}-{fin} duplicated?")
res[(ini,fin)] = prob_max
return res, min_pag-1
def read_files_segment_IMF(p:str, min_pag:int=0, LOG:bool=False) -> list:
files = glob.glob(os.path.join(p, "*"))
res = []
for file in files:
nbest = int(file.split("/")[-1])
f = open(file, "r")
lines = f.readlines()
f.close()
lines = [x.strip() for x in lines]
prob = lines[0]
segm = lines[2:]
res_segm = []
prob = float(prob.split("#P(s|Z)=")[-1])
for line in segm:
_, ini, fin, *_ = line.split()
ini, fin = int(ini)+min_pag, int(fin)+min_pag
res_segm.append((ini,fin))
if LOG:
prob = math.log(prob)
res.append((nbest, file, prob, res_segm))
res.sort()
return res
def read_results_inf(p:str, LOG:bool=True) -> dict:
res = {}
f = open(p, "r")
lines = f.readlines()
f.close()
lines = lines[1:]
for line in lines:
line = re.sub(' +', ' ', line)
s = line.strip().split(" ")
nb_pos, errs, _, err_porc, *_ = s
# print(nb_pos, errs, err_porc)
nb_pos, errs, err_porc = int(nb_pos), int(errs), float(err_porc)
res[nb_pos] = (errs, err_porc)
return res
def main(pathsegm_IMF:str, paths_results:list, path_results_inf:str):
LOG = False
if LOG:
LOGPROB_str = "LOGPROB_"
else:
LOGPROB_str = ""
results, min_pag = read_results(paths_results, LOG)
files_segm = read_files_segment_IMF(pathsegm_IMF, min_pag, LOG)
res_inf = read_results_inf(path_results_inf)
# print(f"Groups start at {min_pag}")
rescored = []
for nbest, file, prob, segm in files_segm:
# print(nbest, file)
prob_classifier = 0 if LOG else 1
for ini, fin in segm:
# print(ini, fin)
prob_segm = results.get((ini,fin), None)
# print(prob_segm, ini, fin)
if prob_segm is None:
raise Exception(f"{ini}-{fin} group not found")
if LOG:
prob_classifier += prob_segm
else:
prob_classifier *= prob_segm
# print(nbest, prob, prob_classifier)
p = prob+prob_classifier if LOG else prob*prob_classifier
errs, err_porc = res_inf[nbest]
rescored.append((p, nbest, file, prob, segm, prob_classifier, errs, err_porc))
# exit()
rescored.sort()
print("{: >30} {: >30} {: >30} {: >30} {: >30} {: >30}".format(f"{LOGPROB_str}rscored", f"{LOGPROB_str}probSegm", f"{LOGPROB_str}probText", f"nbest_probSegm", "#Errs", "Err(%)"))
for rscore, nbest, file, prob, segm, prob_classifier, errs, err_porc in rescored[::-1]:
# print(rscore, prob, prob_classifier, nbest)
print("{: >30} {: >30} {: >30} {: >30} {: >30} {: >30}".format(rscore, prob, prob_classifier, nbest, errs, err_porc))
if __name__ == "__main__":
path_results_inf = "results.inf"
pathsegm_IMF = "JMBD4950-NB"
work_dir = "works_tr49_te50_groups_6classes"
paths_results = [f"../{work_dir}/work_128_numFeat2048/results.txt", f"../{work_dir}/work_128_numFeat2048/results_prod.txt"]
main(pathsegm_IMF, paths_results, path_results_inf)
| 2.546875
| 3
|
applications/extract_test_cold_questions.py
|
zhenv5/PyStack
| 7
|
12777874
|
<filename>applications/extract_test_cold_questions.py
try:
import cPickle as pickle
except Exception as e:
import pickle
import pandas as pd
import os.path
import random
def process_ques_asker(cate_name):
asker_df = pd.read_csv(os.path.join(cate_name,"QuestionId_AskerId.csv"))
ques_asker_dict = {k:v for k,v in zip(asker_df["QuestionId"],asker_df["AskerId"])}
asker_ques_dict = {}
for a,q in zip(asker_df["AskerId"],asker_df["QuestionId"]):
if a in asker_ques_dict:
asker_ques_dict[a].append(q)
else:
asker_ques_dict[a] = [q]
return ques_asker_dict,asker_ques_dict
def new_questions_new_askers_for_test(cate_name,least_number_of_answers_per_question = 5,least_number_of_questions_per_asker = 2):
test_file = os.path.join(cate_name,"new_questions_new_askers_ques.pkl")
if False:
if os.path.isfile(test_file):
with open(test_file,"rb") as f:
ques_set = pickle.load(f)
print("# questions for Test (new questions new askers): %d" % len(ques_set))
return ques_set
asker_ques_w_bestAnser_dict = {}
ques_asker_dict,asker_ques_dict = process_ques_asker(cate_name)
ques_answerers_dict,answerer_questions_dict = process_ques_answerer(cate_name)
filterd_ques_answerers = dict(filter(lambda x: len(x[1]) >= least_number_of_answers_per_question, ques_answerers_dict.iteritems()))
print("# questions having at least %d answers: %d" % (least_number_of_answers_per_question,len(filterd_ques_answerers)))
filterd_asker_questions = dict(filter(lambda x: len(x[1]) == 1, asker_ques_dict.iteritems()))
print("# askers having asked %d questions: %d" % (1,len(filterd_asker_questions)))
df = pd.read_csv(os.path.join(cate_name,"QuestionId_AcceptedAnswerId.csv"))
for q,u in zip(df["QuestionId"],df["AcceptedAnswerId"]):
a = ques_asker_dict[q]
if a in filterd_asker_questions:
# a has asked more than a specific number of questions
if q in filterd_ques_answerers:
# using the last asked question for test
# q has more than a specific number of answers
if a not in asker_ques_w_bestAnser_dict:
asker_ques_w_bestAnser_dict[a] = [(q,u)]
else:
asker_ques_w_bestAnser_dict[a].append((q,u))
ques_set =set([v[-1][0] for k,v in asker_ques_w_bestAnser_dict.iteritems()])
print("==== # askers for Test (new questions new asker) : %d" % len(asker_ques_w_bestAnser_dict))
print("==== # questions for Test (new questions new askers): %d" % len(ques_set))
with open(test_file,"wb") as f:
pickle.dump(ques_set,f)
return ques_set
def new_questions_old_askers_for_test(cate_name,least_number_of_answers_per_question = 5,least_number_of_questions_per_asker = 2):
test_file = os.path.join(cate_name,"new_questions_old_askers_ques.pkl")
if False:
if os.path.isfile(test_file):
with open(test_file,"rb") as f:
ques_set = pickle.load(f)
print("# questions for Test (new questions old askers) : %d" % len(ques_set))
return ques_set
asker_ques_w_bestAnser_dict = {}
ques_asker_dict,asker_ques_dict = process_ques_asker(cate_name)
ques_answerers_dict,answerer_questions_dict = process_ques_answerer(cate_name)
filterd_ques_answerers = dict(filter(lambda x: len(x[1]) >= least_number_of_answers_per_question, ques_answerers_dict.iteritems()))
print("# questions having at least %d answers: %d" % (least_number_of_answers_per_question,len(filterd_ques_answerers)))
filterd_asker_questions = dict(filter(lambda x: len(x[1]) >= 2, asker_ques_dict.iteritems()))
print("# askers having asked at least %d questions: %d" % (least_number_of_questions_per_asker,len(filterd_asker_questions)))
df = pd.read_csv(os.path.join(cate_name,"QuestionId_AcceptedAnswerId.csv"))
for q,u in zip(df["QuestionId"],df["AcceptedAnswerId"]):
a = ques_asker_dict[q]
if a in filterd_asker_questions:
# a has asked more than a specific number of questions
if q in filterd_ques_answerers:
# using the last asked question for test
# q has more than a specific number of answers
if a not in asker_ques_w_bestAnser_dict:
asker_ques_w_bestAnser_dict[a] = [(q,u)]
else:
asker_ques_w_bestAnser_dict[a].append((q,u))
# corresponding askers should have at least 2 questions which have best answerers
ques_set =set([v[-1][0] for k,v in asker_ques_w_bestAnser_dict.iteritems() if len(v) > 1])
#ques_set =set([v[-1][0] for k,v in asker_ques_w_bestAnser_dict.iteritems()])
print("=== # questions for Test (new questions old askers): %d" % len(ques_set))
with open(test_file,"wb") as f:
pickle.dump(ques_set,f)
return ques_set
def process_ques_answerer(cate_name):
file_name = os.path.join(cate_name,"QuestionId_AnswererId.csv")
df = pd.read_csv(file_name)
ques_users_dict = {}
user_ques_dict = {}
for q,u in zip(df["QuestionId"],df["AnswererId"]):
if q in ques_users_dict:
ques_users_dict[q].append(u)
else:
ques_users_dict[q] = [u]
if u in user_ques_dict:
user_ques_dict[u].append(q)
else:
user_ques_dict[u] = [q]
return ques_users_dict,user_ques_dict
def process_ques_bestAnswerer(cate_name):
df = pd.read_csv(os.path.join(cate_name,"QuestionId_AcceptedAnswererId.csv"))
return {k:v for k,v in zip(df["QuestionId"],df["AcceptedAnswererId"])}
def remove_best_answerers(cate_name,test_rate = 0.15):
# building dataset for resolved questions
file_name = os.path.join(cate_name,"QuestionId_AnswererId.csv")
df = pd.read_csv(file_name)
ques = df["QuestionId"]
users = df["AnswererId"]
asker_df = pd.read_csv(os.path.join(cate_name,"QuestionId_AskerId.csv"))
ques_asker_dict = {k:v for k,v in zip(asker_df["QuestionId"],asker_df["AskerId"])}
asker_ques_dict = {}
for a,q in zip(asker_df["AskerId"],asker_df["QuestionId"]):
if a in asker_ques_dict:
asker_ques_dict[a].append(q)
else:
asker_ques_dict[a] = [q]
ques_users_dict = {}
user_ques_dict = {}
for q,u in zip(ques,users):
if q in ques_users_dict:
ques_users_dict[q].append(u)
else:
ques_users_dict[q] = [u]
if u in user_ques_dict:
user_ques_dict[u].append(q)
else:
user_ques_dict[u] = [q]
print("# instances in dataset: %d" % len(ques))
ques_users_dict = dict((k,len(v)) for k,v in ques_users_dict.iteritems())
user_ques_dict = dict((k,len(v)) for k,v in user_ques_dict.iteritems())
asker_ques_dict = dict((k,len(v)) for k,v in asker_ques_dict.iteritems())
print("# users answered more than 1 question: %d" % len(filter(lambda x: x > 1,user_ques_dict.values())))
print("# questions having more than 1 answer: %d" % len(filter(lambda x: x > 1,ques_users_dict.values())))
print("# askers having more than 1 question: %d (%d)" % (len(filter(lambda x: x > 1, asker_ques_dict.values())),len(asker_ques_dict)))
ba_file_name = os.path.join(cate_name,"QuestionId_AcceptedAnswererId.csv")
df_ba = pd.read_csv(ba_file_name)
ques_ba = df_ba["QuestionId"]
users_ba = df_ba["AcceptedAnswererId"]
deleted_instances = set()
for q,u in zip(ques_ba,users_ba):
askerID = ques_asker_dict[q]
if (ques_users_dict.get(q,0) > 1) and (user_ques_dict.get(u,0) > 1) and (asker_ques_dict.get(askerID,0) > 1):
deleted_instances.add((q,u))
ques_users_dict[q] = ques_users_dict[q] - 1
user_ques_dict[u] = user_ques_dict[u] - 1
asker_ques_dict[askerID] = asker_ques_dict[askerID] - 1
print("# questions having best answers: %d" % len(ques_ba))
num_test = int(len(ques_ba)*test_rate)
num = 0
for (q,u) in deleted_instances:
for i,(q1,u1) in enumerate(zip(df["QuestionId"],df["AnswererId"])):
if (q == q1) and (u == u1):
#if random.random() > test_rate:
# break
df.drop(df.index[i],inplace = True)
num += 1
break
if num >= num_test:
break
print("# questions with best answer removed: %d (%0.4f)" % (num,num*1.0/len(ques_ba)))
print("# instances for training: %d (%0.4f)" % (len(df["QuestionId"]),len(df["QuestionId"])*1.0/len(ques)))
df.to_csv(os.path.join(cate_name,"QuestionId_AnswererId_Train.csv"),index = True, columns = ["QuestionId","AnswerId","AnswererId","Score"])
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input",default= "../dataset/bitcoin", help = "input category name")
parser.add_argument("-r","--test_rate",default = 0.15, type = float, help = "test rate")
args = parser.parse_args()
new_questions_new_askers_for_test(args.input)
new_questions_old_askers_for_test(args.input)
#remove_best_answerers(args.input,args.test_rate)
| 2.6875
| 3
|
letcon/src/utils/__init__.py
|
llFireHawkll/letcon2020-ml-workshop
| 3
|
12777875
|
<filename>letcon/src/utils/__init__.py
'''
File: __init__.py
Project: utils
File Created: Tuesday, 18th August 2020 12:26:46 am
Author: <NAME> (<EMAIL>)
-----
Last Modified: Tuesday, 18th August 2020 12:26:46 am
Modified By: <NAME> (<EMAIL>>)
-----
Copyright 2020 <NAME>
'''
| 1.304688
| 1
|
lunchbot/services.py
|
vekerdyb/lunchbot
| 2
|
12777876
|
import os
import boto3
from slackclient import SlackClient
from lunchbot import logging
logger = logging.getLogger(__name__)
class Slack(object):
client = None
@staticmethod
def get_client():
if Slack.client is not None:
logger.debug("Using cached Slack client")
return Slack.client
logger.debug("Creating fresh Slack client")
slack_token = os.environ["SLACK_API_TOKEN"]
Slack.client = SlackClient(slack_token)
return Slack.client
class Dynamo(object):
table = None
@staticmethod
def get_table():
if Dynamo.table is not None:
logger.debug("Using cached DynamoDB client")
return Dynamo.table
logger.debug("Creating fresh DynamoDB client")
dynamo_resource = boto3.resource("dynamodb")
Dynamo.table = dynamo_resource.Table(os.environ["DYNAMODB_TABLE"])
return Dynamo.table
| 2.171875
| 2
|
PPIG/Linear_VAE/linear_gae/train.py
|
ComputeSuda/PPICT
| 0
|
12777877
|
from __future__ import division
from __future__ import print_function
from evaluation import get_roc_score, clustering_latent_space
from input_data import load_adj_feature
from kcore import compute_kcore, expand_embedding
from model import *
from optimizer import OptimizerAE, OptimizerVAE
from preprocessing import *
import numpy as np
import os
import scipy.sparse as sp
import tensorflow as tf
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
flags = tf.app.flags
FLAGS = flags.FLAGS
# Select graph dataset
flags.DEFINE_string('dataset', 'Cross-talk', 'Name of the graph dataset')
# Select machine learning task to perform on graph
flags.DEFINE_string('task', 'link_prediction', 'Name of the learning task')
# Model
flags.DEFINE_string('model', 'linear_vae', 'Name of the model')
# Model parameters
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_integer('epochs', 1000, 'Number of epochs in training.')
flags.DEFINE_boolean('features', True, 'Include node features or not in encoder')
flags.DEFINE_float('learning_rate', 0.05, 'Initial learning rate (with Adam)')
flags.DEFINE_integer('hidden', 64, 'Number of units in GCN hidden layer(s).')
flags.DEFINE_integer('dimension', 128, 'Dimension of encoder output, i.e. \
embedding dimension')
# Experimental setup parameters
flags.DEFINE_integer('nb_run', 1, 'Number of model run + test')
flags.DEFINE_float('prop_val', 5., 'Proportion of edges in validation set \
(for Link Prediction task)')
flags.DEFINE_float('prop_test', 10., 'Proportion of edges in test set \
(for Link Prediction task)')
flags.DEFINE_boolean('validation', False, 'Whether to report validation \
results at each epoch (for \
Link Prediction task)')
flags.DEFINE_boolean('verbose', True, 'Whether to print comments details.')
flags.DEFINE_boolean('kcore', False, 'Whether to run k-core decomposition \
and use the framework. False = model \
will be trained on the entire graph')
flags.DEFINE_integer('k', 2, 'Which k-core to use. Higher k => smaller graphs\
and faster (but maybe less accurate) training')
flags.DEFINE_integer('nb_iterations', 10, 'Number of fix point iterations in \
algorithm 2 of IJCAI paper. See \
kcore.py file for details')
# Lists to collect average results
if FLAGS.task == 'link_prediction':
mean_roc = []
mean_ap = []
if FLAGS.kcore:
mean_time_kcore = []
mean_time_train = []
mean_time_expand = []
mean_core_size = []
mean_time = []
# Load graph dataset
if FLAGS.verbose:
print("Loading data...")
if FLAGS.dataset == 'Cross-talk':
adj_init, features_init = load_adj_feature('../Cross-talk/Fegs_1.npy',
'../Cross-talk/Cross-talk_Matrix.txt')
else:
adj_init, features_init = load_data(FLAGS.dataset)
print(type(adj_init), type(features_init))
# The entire training+test process is repeated FLAGS.nb_run times
for i in range(FLAGS.nb_run):
if FLAGS.task == 'link_prediction':
if FLAGS.verbose:
print("Masking test edges...")
# Edge Masking for Link Prediction: compute Train/Validation/Test set
adj, val_edges, val_edges_false, test_edges, test_edges_false = \
mask_test_edges(adj_init, FLAGS.prop_test, FLAGS.prop_val)
elif FLAGS.task == 'node_clustering':
adj_tri = sp.triu(adj_init)
adj = adj_tri + adj_tri.T
else:
raise ValueError('Undefined task!')
# Start computation of running times
t_start = time.time()
# Degeneracy Framework / K-Core Decomposition
if FLAGS.kcore:
if FLAGS.verbose:
print("Starting k-core decomposition of the graph")
# Save adjacency matrix of un-decomposed graph
# (needed to embed nodes that are not in k-core, after GAE training)
adj_orig = adj
# Get the (smaller) adjacency matrix of the k-core subgraph,
# and the corresponding nodes
adj, nodes_kcore = compute_kcore(adj, FLAGS.k)
# Get the (smaller) feature matrix of the nb_core graph
if FLAGS.features:
features = features_init[nodes_kcore, :]
# Flag to compute k-core decomposition's running time
t_core = time.time()
elif FLAGS.features:
features = features_init
# Preprocessing and initialization
if FLAGS.verbose:
print("Preprocessing and Initializing...")
# Compute number of nodes
num_nodes = adj.shape[0]
# If features are not used, replace feature matrix by identity matrix
if not FLAGS.features:
features = sp.identity(adj.shape[0])
# Preprocessing on node features
features = sparse_to_tuple(features)
num_features = features[2][1]
features_nonzero = features[1].shape[0]
# Define placeholders
placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=())
}
# Create model
model = None
# Linear Graph Variational Autoencoder
model = LinearModelVAE(placeholders, num_features, num_nodes,
features_nonzero)
# Optimizer
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0]
- adj.sum()) * 2)
with tf.name_scope('optimizer'):
# Optimizer for Non-Variational Autoencoders
opt = OptimizerVAE(preds=model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
model=model,
num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
# Normalization and preprocessing on adjacency matrix
adj_norm = preprocess_graph(adj)
adj_label = sparse_to_tuple(adj + sp.eye(adj.shape[0]))
# Initialize TF session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Model training
if FLAGS.verbose:
print("Training...")
for epoch in range(FLAGS.epochs):
# Flag to compute running time for each epoch
t = time.time()
# Construct feed dictionary
feed_dict = construct_feed_dict(adj_norm, adj_label, features,
placeholders)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
# Weights update
outs = sess.run([opt.opt_op, opt.cost, opt.accuracy],
feed_dict=feed_dict)
# Compute average loss
avg_cost = outs[1]
if FLAGS.verbose:
# Display epoch information
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
"time=", "{:.5f}".format(time.time() - t))
# Validation, for Link Prediction
if not FLAGS.kcore and FLAGS.validation and FLAGS.task == 'link_prediction':
feed_dict.update({placeholders['dropout']: 0})
emb = sess.run(model.z_mean, feed_dict=feed_dict)
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
val_roc, val_ap = get_roc_score(val_edges, val_edges_false, emb)
print("val_roc=", "{:.5f}".format(val_roc), "val_ap=", "{:.5f}".format(val_ap))
# Flag to compute Graph AE/VAE training time
t_model = time.time()
# Compute embedding
# Get embedding from model
emb = sess.run(model.z_mean, feed_dict=feed_dict)
# If k-core is used, only part of the nodes from the original
# graph are embedded. The remaining ones are projected in the
# latent space via the expand_embedding heuristic
if FLAGS.kcore:
if FLAGS.verbose:
print("Propagation to remaining nodes...")
# Project remaining nodes in latent space
emb = expand_embedding(adj_orig, emb, nodes_kcore, FLAGS.nb_iterations)
# Compute mean running times for K-Core, GAE Train and Propagation steps
mean_time_expand.append(time.time() - t_model)
mean_time_train.append(t_model - t_core)
mean_time_kcore.append(t_core - t_start)
# Compute mean size of K-Core graph
# Note: size is fixed if task is node clustering, but will vary if
# task is link prediction due to edge masking
mean_core_size.append(len(nodes_kcore))
# Compute mean total running time
mean_time.append(time.time() - t_start)
print(type(emb))
np.save('../Cross-talk/Cross_talk_gcn_features128_FEGS.npy', emb)
# Test model
if FLAGS.verbose:
print("Testing model...")
# Link Prediction: classification edges/non-edges
# Get ROC and AP scores
roc_score, ap_score = get_roc_score(test_edges, test_edges_false, emb)
# Report scores
mean_roc.append(roc_score)
mean_ap.append(ap_score)
###### Report Final Results ######
# Report final results
print("\nTest results for", FLAGS.model,
"model on", FLAGS.dataset, "on", FLAGS.task, "\n",
"___________________________________________________\n")
if FLAGS.task == 'link_prediction':
print("AUC scores\n", mean_roc)
print("Mean AUC score: ", np.mean(mean_roc),
"\nStd of AUC scores: ", np.std(mean_roc), "\n \n")
print("AP scores\n", mean_ap)
print("Mean AP score: ", np.mean(mean_ap),
"\nStd of AP scores: ", np.std(mean_ap), "\n \n")
else:
print("Adjusted MI scores\n", mean_mutual_info)
print("Mean Adjusted MI score: ", np.mean(mean_mutual_info),
"\nStd of Adjusted MI scores: ", np.std(mean_mutual_info), "\n \n")
print("Total Running times\n", mean_time)
print("Mean total running time: ", np.mean(mean_time),
"\nStd of total running time: ", np.std(mean_time), "\n \n")
| 2.03125
| 2
|
Incident-Response/Tools/cyphon/cyphon/aggregator/filters/tests/test_models.py
|
sn0b4ll/Incident-Playbook
| 1
|
12777878
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Tests the Filter class.
"""
# third party
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
# local
from aggregator.filters.models import Filter
from target.followees.models import Followee
from target.locations.models import Location
from target.searchterms.models import SearchTerm
from tests.fixture_manager import get_fixtures
class FilterModelsTestCase(TestCase):
"""
Base class for testing Filter class and related classes.
"""
fixtures = get_fixtures(['followees', 'filters', 'reservoirs'])
class FilterManagerTestCase(FilterModelsTestCase):
"""
Tests the FilterManager class.
"""
def test_enabled_filters_by_type(self):
"""
Tests the _find_enabled_filters_by_type method.
"""
followee_type = ContentType.objects.get_for_model(Followee)
followees = Filter.objects._find_enabled_filters_by_type(followee_type)
self.assertEqual(len(followees), 2)
location_type = ContentType.objects.get_for_model(Location)
locations = Filter.objects._find_enabled_filters_by_type(location_type)
self.assertEqual(len(locations), 3)
term_type = ContentType.objects.get_for_model(SearchTerm)
terms = Filter.objects._find_enabled_filters_by_type(term_type)
self.assertEqual(len(terms), 1)
def test_get_oldest_time_last_used(self):
"""
Tests the _get_oldest_time_last_used method.
"""
oldest_time = Filter.objects._get_oldest_time_last_used()
self.assertEqual(str(oldest_time), "2000-01-01 05:00:00+00:00")
def test_create_timeframe(self):
"""
Tests the _get_timeframe method.
"""
timeframe = Filter.objects._create_timeframe()
self.assertEqual(str(timeframe.start), "2000-01-01 05:00:00+00:00")
def test_create_reservoir_query(self):
"""
Tests the create_reservoir_query method.
"""
query = Filter.objects.create_reservoir_query()
self.assertEqual(len(query.accounts), 4)
self.assertEqual(len(query.locations), 3)
self.assertEqual(len(query.searchterms), 1)
class FilterTestCase(FilterModelsTestCase):
"""
Tests the Filter class.
"""
def setUp(self):
followee_type = ContentType.objects.get_for_model(Followee)
location_type = ContentType.objects.get_for_model(Location)
srchterm_type = ContentType.objects.get_for_model(SearchTerm)
self.followee_filter = Filter(
content_type=followee_type,
object_id=1,
last_used="2015-01-01T12:00:00+05:00"
)
self.location_filter = Filter(
content_type=location_type,
object_id=1,
last_used="2015-01-01T12:00:00+05:00"
)
self.srchterm_filter = Filter(
content_type=srchterm_type,
object_id=1,
last_used="2015-01-01T12:00:00+05:00"
)
def test_str(self):
"""
Tests the __str__ method.
"""
actual = str(self.location_filter)
expected = 'Point <Point> (location)'
self.assertEqual(actual, expected)
def test_filter_type(self):
"""
Tests the filter_type method.
"""
actual = self.followee_filter.filter_type
expected = 'followee'
self.assertEqual(actual, expected)
def test_create_followee_filter(self):
"""
Test case for a Followee Filter.
"""
try:
self.followee_filter.full_clean()
except ValidationError:
self.fail("Followee filter raised ValidationError unexpectedly")
def test_create_location_filter(self):
"""
Test case for a Location Filter.
"""
try:
self.location_filter.full_clean()
except ValidationError:
self.fail("Location filter raised ValidationError unexpectedly")
def test_create_searchterm_filter(self):
"""
Test case for a SearchTerm Filter.
"""
try:
self.srchterm_filter.full_clean()
except ValidationError:
self.fail("SearchTerm filter raised ValidationError unexpectedly")
def test_create_invalid_filter(self):
"""
Test case for an invalid Filter.
"""
user_type = ContentType.objects.get_for_model(User)
new_filter = Filter(
content_type=user_type,
object_id=1,
last_used="2015-01-01T12:00:00+05:00"
)
with self.assertRaises(ValidationError):
new_filter.full_clean()
| 1.90625
| 2
|
XGBoost/XGBoost_v3/gbtree_xrh.py
|
Xinrihui/Statistical-Learning-Method
| 2
|
12777879
|
<reponame>Xinrihui/Statistical-Learning-Method<filename>XGBoost/XGBoost_v3/gbtree_xrh.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time
import os
import pickle
from sklearn import datasets
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from lib.updater_colmaker_xrh import *
from lib.activation_xrh import *
from lib.sparse_vector_xrh import *
from lib.utils_xrh import *
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
class XGBoost:
"""
简版 XGBoost 模型
功能包括:
1. 回归 ( 使用平方损失函数 )
2. 二分类 ( 使用 交叉熵损失函数 )
3. 多分类 ( 使用 对数损失函数 )
ref:
1. XGBoost: A Scalable Tree Boosting System
2. https://github.com/dmlc/xgboost/
实现细节:
(1) 对 样本数据 按照特征分块, 在块内按照特征值的大小排序
(2) 使用 完全贪心算法 寻找最优切分点
(3) 实现了 稀疏感知 (Sparsity-aware Split Finding) , 在寻找 最优切分点时跳过特征值为缺失值的样本行,
提升了模型在高维稀疏特征下的性能
待实现:
(4) 近似算法寻找最优切分点: 在切分时我们只考虑特征值的分位点即可, 而不是每一个特征值都考虑
(5) 获取分位点时, 需要 将所有值读入内存并排序 ;当内存不够时, 无法做到对所有特征值排序, 此时可采用分位点估计算法
即 加权分位数速写 (Weighted Quantile Sketch), 来估算出分位点的值
(6) 寻找最优分裂点时, 可以使用多线程并行
Author: xrh
Date: 2021-05-29
"""
def __init__(self,
use_pretrained=False, model_path='models/xgboost.model',
print_log=False,
max_iter=10,
max_depth=3,
objective='binary:logistic',
num_class=None,
base_score=0.0,
gama=0.1,
reg_lambda=1.0,
min_child_weight=0,
tree_method='exact',
sketch_eps=0.3,
missing={np.nan, 0}
):
"""
:param print_log: 打印 Cart树
:param max_iter: 最大迭代次数
:param max_depth: CART 回归树 的最大深度
:param objective: 目标函数选择
(1) reg:squarederror:平方损失, 适用于回归
(2) binary:logistic:二元分类的逻辑回归,输出概率
(3) multi:softmax:使用softmax目标函数 进行多类分类,还需要设置 num_class(类数)
:param num_class: 多分类时的 分类数目
:param base_score: 所有实例的 初始预测得分 F_0
:param gama: 损失函数中 树的总叶子个数 T 的系数, 可以控制模型的复杂度
:param reg_lambda: 目标函数中使用 L2 正则化时控制 正则化的强度
:param min_child_weight: 搜索最佳切分点时, 若 min_child_weight < Min(HL, HR) 则放弃此切分点
:param tree_method: 指定了构建树的算法,可以为下列的值:
(1)'exact': 使用 exact greedy 完全贪心算法分裂节点
(2)'approx': 使用近似算法分裂节点
:param sketch_eps: 设定分桶的步长为: 二阶梯度的区间和 * sketch_eps ;
取值范围为 (0,1), 默认值为 0.3, 此时 每一个特征划分 3个桶 ;
它仅仅用于 tree_medhodd='approx'
:param missing: 一个集合,代表发生了数据缺失。默认为 {np.nan, 0}
"""
self.model_path = model_path
if not use_pretrained:
self.print_log = print_log
# 最大迭代次数
self.max_iter = max_iter
# CART 回归树 的最大深度
self.max_depth = max_depth
# 目标函数选择
self.objective = objective
# 多分类时的 分类数目
self.num_class = num_class
# 所有实例的 初始预测得分 F_0
self.base_score = base_score
# 损失函数中 树的总叶子个数 T 的系数, 可以控制模型的复杂度
self.gama = gama
# 目标函数中使用 L2 正则化时控制 正则化的强度
self.reg_lambda = reg_lambda
# 搜索最佳切分点时, 若 min_child_weight < Min(HL, HR) 则放弃此切分点
self.min_child_weight = min_child_weight
# 构建树时 切分子节点的算法
self.tree_method = tree_method
# 分桶的步长
self.sketch_eps = sketch_eps
# 缺失值集合
self.missing = missing
self.G = [] # 弱分类器 集合
else: # 载入预训练的模型
self.load()
def save(self):
"""
保存训练好的模型
:return:
"""
save_dict = {}
save_dict['model_path'] = self.model_path
save_dict['print_log'] = self.print_log
save_dict['max_iter'] = self.max_iter
save_dict['max_depth'] = self.max_depth
save_dict['objective'] = self.objective
save_dict['num_class'] = self.num_class
save_dict['base_score'] = self.base_score
save_dict['gama'] = self.gama
save_dict['reg_lambda'] = self.reg_lambda
save_dict['min_child_weight'] = self.min_child_weight
save_dict['tree_method'] = self.tree_method
save_dict['sketch_eps'] = self.sketch_eps
save_dict['missing'] = self.missing
save_dict['G'] = self.G
with open(self.model_path, 'wb') as f:
pickle.dump(save_dict, f)
print("Save model successful!")
def load(self):
"""
读取预训练的模型
:param file_path:
:return:
"""
with open(self.model_path, 'rb') as f:
save_dict = pickle.load(f)
self.model_path = save_dict['model_path']
self.print_log = save_dict['print_log']
self.max_iter = save_dict['max_iter']
self.max_depth = save_dict['max_depth']
self.objective = save_dict['objective']
self.num_class = save_dict['num_class']
self.base_score = save_dict['base_score']
self.gama = save_dict['gama']
self.reg_lambda = save_dict['reg_lambda']
self.min_child_weight = save_dict['min_child_weight']
self.tree_method = save_dict['tree_method']
self.sketch_eps = save_dict['sketch_eps']
self.missing = save_dict['missing']
self.G = save_dict['G']
print("Load model successful!")
def init_y_predict(self, F_0, N):
"""
初始化 y_predict
:param F_0:
:param N: 样本个数
:return:
"""
y_predict = None
if self.objective == "reg:squarederror": # 回归
y_predict = np.array([F_0] * N)
elif self.objective == "binary:logistic": # 二分类
y_predict = np.array([Activation.sigmoid(F_0)] * N)
elif self.objective == "multi:softmax":
y_predict = np.transpose([Activation.softmax(F_0)] * N) # shape : (K, N)
return y_predict
def cal_g_h(self, y, y_predict):
"""
计算损失函数的 一阶梯度 和 二阶梯度
g - 损失函数 对 打分函数 F 的一阶梯度 , 相当于 GBDT 中的残差 r
h - 损失函数 对 打分函数 F 的 二阶梯度
ref: https://www.cnblogs.com/nxf-rabbit75/p/10440805.html
:param y:
:param y_predict:
:return:
"""
g, h = 0, 0
if self.objective == "reg:squarederror": # 回归
g = y_predict - y
h = np.ones_like(g)
elif self.objective == "binary:logistic": # 二分类
g = y_predict - y
h = y_predict * (1 - y_predict)
elif self.objective == "multi:softmax": # 二分类
g = (y_predict - y)
# 以下实现 参考 xgboost 源码
# ref:
# https://github.com/dmlc/xgboost/blob/master/src/objective/multiclass_obj.cu
# class SoftmaxMultiClassObj
# -> void GetGradient(
h = 2 * y_predict * (1 - y_predict)
h[h < 1e-16] = 1e-16 # h 不能小于0
return g, h
def update_y_predict(self, F):
"""
更新 本轮迭代的 y_predict
:param F:
:return:
"""
y_predict = None
if self.objective == "reg:squarederror": # 回归
y_predict = F
elif self.objective == "binary:logistic": # 二分类
y_predict = Activation.sigmoid(F)
elif self.objective == "multi:softmax":
y_predict = Activation.softmax(F)
return y_predict
def model_error_rate(self, y, y_predict):
"""
计算 当前 所有弱分类器加权 得到的 最终分类器 的 误差率
若为 回归问题, 误差率 为 平均平方误差损失 ( MSE = mean_squared_error )
:param y:
:param y_predict:
:return:
"""
N = len(y)
error_rate = None
if self.objective == "reg:squarederror": # 回归
error_rate = np.average(np.square(y_predict - y)) # error_rate 为 平均平方误差损失 ( mean_squared_error )
elif self.objective == "binary:logistic": # 二分类
y_predict[y_predict >= 0.5] = 1 # 概率 大于 0.5 被标记为 正例
y_predict[y_predict < 0.5] = 0 # 概率 小于 0.5 被标记为 负例
err_arr = np.ones(N, dtype=int)
err_arr[y_predict == y] = 0
error_rate = np.mean(err_arr) # loss 为 分类错误率
elif self.objective == "multi:softmax": # 多分类
y_label = np.argmax(y_predict, axis=0) # 取 概率最大的 作为 预测的标签
err_arr = np.ones(N, dtype=int)
err_arr[y_label == y] = 0
error_rate = np.mean(err_arr) # 计算训练误差
return error_rate
def fit(self, X, y, learning_rate, error_rate_threshold=0.01, print_error_rate=True):
"""
用训练数据拟合模型
:param X: 特征数据 , shape=(N_sample, N_feature)
:param y: 标签数据 , shape=(N_sample,)
:param learning_rate: 学习率
:param error_rate_threshold: 训练中止条件, 若当前得到的基分类器的组合的在训练集上的错误率 小于阈值, 则停止训练;
若为 回归问题, 错误率为 MSE (平均 平方损失)
:param print_error_rate: 在训练模型时, 计算并输出模型在训练集上的误差率
:return:
"""
N = np.shape(X)[0] # 样本的个数
X_DMatrix = DMatrix(X, missing=self.missing)
if self.objective == "multi:softmax": # 多分类
F_0 = np.array([self.base_score] * self.num_class) # shape : (K,)
self.G.append(F_0)
F = np.transpose([F_0] * N) # 对 F_0 进行复制, F shape : (K, N)
y_predict = self.init_y_predict(F_0, N) # shape : (K, N)
y_one_hot = Utils.convert_to_one_hot(x=y, class_num=self.num_class).T # shape: (K,N)
for m in range(1, self.max_iter+1): # 进行 第 m 轮迭代
DT_list = []
for k in range(self.num_class): # 依次训练 K 个 二分类器
print('======= train No.{} 2Classifier ======='.format(k))
g, h = self.cal_g_h(y_one_hot[k], y_predict[k]) # y_predict[k] shape:(N,)
# 训练 用于 2分类的 回归树
RT = Builder(gama=self.gama,
reg_lambda=self.reg_lambda,
max_depth=self.max_depth,
min_child_weight=self.min_child_weight,
tree_method=self.tree_method,
sketch_eps=self.sketch_eps,
print_log=self.print_log)
RT.fit(X_DMatrix, g, h)
f_m = RT.inference(X) # shape:(N,)
DT_list.append(RT)
F[k] = F[k] + learning_rate * f_m # F shape : (K, N), F[k] shape:(N,)
y_predict = self.update_y_predict(F)
self.G.append((learning_rate, DT_list)) # 存储 基分类器
# 计算 当前 所有弱分类器加权 得到的 最终分类器 的 分类错误率
if print_error_rate:
y_predict_copy = y_predict.copy() # y_predict 下一轮迭代 还要使用, 不能被修改
error_rate = self.model_error_rate(y, y_predict_copy)
print('round:{}, error_rate :{}'.format(m, error_rate))
print('======================')
if error_rate < error_rate_threshold: # 错误率 已经小于 阈值, 则停止训练
break
elif self.objective in ('binary:logistic', 'reg:squarederror'): # 二分类 or 回归
F_0 = self.base_score
self.G.append(F_0)
F = np.array([self.base_score] * N) # shape: (N, )
y_predict = self.init_y_predict(F_0, N)
for m in range(1, self.max_iter+1): # 进行 第 m 轮迭代
g, h = self.cal_g_h(y, y_predict)
RT = Builder(gama=self.gama,
reg_lambda=self.reg_lambda,
max_depth=self.max_depth,
min_child_weight=self.min_child_weight,
tree_method=self.tree_method,
sketch_eps=self.sketch_eps,
print_log=self.print_log)
RT.fit(X_DMatrix, g, h)
f_m = RT.inference(X) # 第 m 颗树
self.G.append((learning_rate, RT)) # 存储 基分类器
F = F + learning_rate * f_m
y_predict = self.update_y_predict(F)
if print_error_rate:
y_predict_copy = y_predict.copy() # y_predict 下一轮迭代 还要使用, 不能被修改
error_rate = self.model_error_rate(y, y_predict_copy)
print('')
print('round:{}, error_rate :{}'.format(m, error_rate))
print('======================')
if error_rate < error_rate_threshold: # 错误率 已经小于 阈值, 则停止训练
break
self.save()
def predict_prob(self, X):
"""
对 测试 数据进行预测, 返回预测的概率( 若为回归, 返回的是回归值 )
:param X: 特征数据 , shape=(N_sample, N_feature)
:return:
"""
N = np.shape(X)[0]
y_prob = None # 输出的标签
F_0 = self.G[0] # 最终分类器
if self.objective == "multi:softmax":
F = np.transpose([F_0] * N) # shape : (K, N)
for alpha, DT_list in self.G[1:]:
for k in range(self.num_class):
DT = DT_list[k]
f_m = DT.inference(X)
F[k] += alpha * f_m # F[k] shape:(N,)
y_prob = Activation.softmax(F) # F shape:(K,N) ; y_prob shape:(K,N)
elif self.objective in ("reg:squarederror", "binary:logistic"):
F = F_0 # 第一个 存储的是 初始化情况
for alpha, RT in self.G[1:]:
f_m = RT.inference(X)
F += alpha * f_m
if self.objective == "reg:squarederror": # 回归
y_prob = F
elif self.objective == "binary:logistic": # 二分类
y_prob = Activation.sigmoid(F)
return y_prob
def predict(self, X):
"""
对 测试 数据进行预测, 返回预测标签
:param X: 特征数据 , shape=(N_sample, N_feature)
:return:
"""
N = np.shape(X)[0]
y_predict = None # 输出的标签
F_0 = self.G[0] # 最终分类器
if self.objective == "multi:softmax":
F = np.transpose([F_0] * N) # shape : (K, N)
for alpha, DT_list in self.G[1:]:
for k in range(self.num_class):
DT = DT_list[k]
f_m = DT.inference(X)
F[k] += alpha * f_m # F[k] shape:(N,)
prob = Activation.softmax(F) # F shape:(K,N) ; prob shape:(K,N)
y_predict = np.argmax(prob, axis=0) # y_predict shape:(N,)
elif self.objective in ("reg:squarederror", "binary:logistic"):
F = F_0 # 第一个 存储的是 初始化情况
for alpha, RT in self.G[1:]:
f_m = RT.inference(X)
F += alpha * f_m
if self.objective == "reg:squarederror": # 回归
y_predict = F
elif self.objective == "binary:logistic": # 二分类
y_predict = Activation.sigmoid(F)
y_predict[y_predict >= 0.5] = 1 # 概率 大于 0.5 被标记为 正例
y_predict[y_predict < 0.5] = 0 # 概率 小于 0.5 被标记为 负例
return y_predict
def score(self, X, y):
"""
使用 测试数据集 对模型进行评价, 返回 正确率(accuracy), 仅适用于分类任务
:param X: 特征数据 , shape=(N_sample, N_feature)
:param y: 标签数据 , shape=(N_sample,)
:return: 错误率 error
"""
N = X.shape[0]
y_predict = self.predict(X)
err_arr = np.ones(N, dtype=int)
err_arr[y_predict == y] = 0
error_rate = np.mean(err_arr)
accuracy = 1 - error_rate
return accuracy
class EvaluateModel:
def tow_classify_evaluate(self, y_true, y_pred, y_prob):
"""
模型评估
:param y_true: 样本的真实标签
:param y_pred: 模型预测的标签
:param y_prob: 模型预测的概率
:return:
"""
# 1.正确率
print('test dataset accuracy: {} '.format(accuracy_score(y_true, y_pred)))
print('====================')
# 2.精确率
# print(precision_score(y_true, y_pred, average='macro')) #
# print(precision_score(y_true, y_pred, average='micro')) #
# print(precision_score(y_true, y_pred, average='weighted')) #
print('pos-1 precision: ', precision_score(y_true, y_pred, average='binary'))
precision_list = precision_score(y_true, y_pred, average=None)
print('neg-0 precision:{}, pos-1 precision:{} '.format(precision_list[0], precision_list[1]))
print('====================')
# 3. 召回率
# print(recall_score(y_true, y_pred, average='macro')) #
# print(recall_score(y_true, y_pred, average='micro')) #
# print(recall_score(y_true, y_pred, average='weighted')) #
print('pos-1 recall: ', recall_score(y_true, y_pred, average='binary'))
recall_list = recall_score(y_true, y_pred, average=None)
print('neg-0 recall:{}, pos-1 recall:{} '.format(recall_list[0], recall_list[1]))
print('====================')
# 4. F1-score
# print(f1_score(y_true, y_pred, average='macro'))
# print(f1_score(y_true, y_pred, average='micro'))
# print(f1_score(y_true, y_pred, average='weighted'))
print('pos-1 f1_score: ', f1_score(y_true, y_pred, average='binary'))
f1_score_list = f1_score(y_true, y_pred, average=None)
print('neg-0 f1_score:{}, pos-1 f1_score:{} '.format(f1_score_list[0], f1_score_list[1]))
print('====================')
# 5. 画出 P-R 曲线
precision, recall, thresholds = precision_recall_curve(y_true, y_prob)
# disp = PrecisionRecallDisplay(precision=precision, recall=recall)
# disp.plot()
plt.plot(recall, precision, label="GDBT_2Classifier(xrh)", color='navy') #
plt.title(' Precision-Recall curve ')
# plt.ylim([0.0, 1.05]) # Y 轴的取值范围
# plt.xlim([0.0, 1.0]) # X 轴的取值范围
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc=(0, -.38), prop=dict(size=14)) # 图例
plt.show()
# 6. ROC 曲线
fpr, tpr, _ = roc_curve(y_true, y_prob)
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.plot(fpr, tpr, label="GDBT_2Classifier(xrh)", color='darkorange') #
# plt.xlim( [0.0, 1.0] )
# plt.ylim( [0.0, 1.05] )
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc=(0, -.38), prop=dict(size=14)) # 图例
plt.show()
class Test:
def test_boston_regress_dataset(self):
"""
利用 boston房价 数据集
测试 xgboost 回归
:return:
"""
# 加载sklearn自带的波士顿房价数据集
dataset = load_boston()
# 提取特征数据和目标数据
X = dataset.data
y = dataset.target
# 将数据集以9:1的比例随机分为训练集和测试集,为了重现随机分配设置随机种子,即random_state参数
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=188)
# 输出特征重要性列表
start = time.time()
print('start create model')
clf = XGBoost(
objective='reg:squarederror',
max_iter=100,
max_depth=3,
gama=1.0,
min_child_weight=1,
reg_lambda=1.0,
print_log=False
)
clf.fit(X_train, y_train, learning_rate=0.1, print_error_rate=True)
print(' model complete ')
# 结束时间
end = time.time()
print('time span:', end - start)
y_pred_test = clf.predict(X_test)
print('by xrh , the squared_error:', mean_squared_error(y_test, y_pred_test)) #
def loadData_2classification(self, fileName, n=1000):
"""
加载文件
将 数据集 的标签 转换为 二分类的标签
:param fileName:要加载的文件路径
:param n: 返回的数据集的规模
:return: 数据集和标签集
"""
# 存放数据及标记
dataArr = []
labelArr = []
# 读取文件
fr = open(fileName)
cnt = 0 # 计数器
# 遍历文件中的每一行
for line in fr.readlines():
if cnt == n:
break
# 获取当前行,并按“,”切割成字段放入列表中
# strip:去掉每行字符串首尾指定的字符(默认空格或换行符)
# split:按照指定的字符将字符串切割成每个字段,返回列表形式
curLine = line.strip().split(',')
# 将每行中除标记外的数据放入数据集中(curLine[0]为标记信息)
# 在放入的同时将原先字符串形式的数据转换为整型
# 此外将数据进行了二值化处理,大于128的转换成1,小于的转换成0,方便后续计算
dataArr.append([int(int(num) > 128) for num in curLine[1:]])
# 将标记信息放入标记集中
# 转换成二分类任务
# 标签0设置为1,反之为0
# 显然这会导致 正负 样本的 分布不均衡, 1 的样本很少(10%), 而0 的很多
if int(curLine[0]) == 0:
labelArr.append(1)
else:
labelArr.append(0)
# if int(curLine[0]) <= 5:
# labelArr.append(1)
# else:
# labelArr.append(0)
cnt += 1
fr.close()
# 返回数据集和标记
return dataArr, labelArr
def test_Mnist_dataset_2classification(self, n_train, n_test):
"""
将 Mnist (手写数字) 数据集 转变为 二分类 数据集
测试模型, 并对 模型效果做出评估
:param n_train: 使用训练数据集的规模
:param n_test: 使用测试数据集的规模
:return:
"""
Mnist_dir = '../../dataset/Mnist'
# 获取训练集
trainDataList, trainLabelList = self.loadData_2classification(os.path.join(Mnist_dir, 'mnist_train.csv'), n=n_train)
print('train data, row num:{} , column num:{} '.format(len(trainDataList), len(trainDataList[0])))
trainDataArr = np.array(trainDataList)
trainLabelArr = np.array(trainLabelList)
# 开始时间
print('start training model....')
start = time.time()
model_path = 'models/xgboost_two_classify.model'
clf = XGBoost(
model_path=model_path,
objective='binary:logistic',
max_iter=20,
max_depth=3,
gama=0.5,
reg_lambda=0.5,
print_log=True
)
clf.fit(trainDataArr, trainLabelArr, learning_rate=1.0)
# 结束时间
end = time.time()
print('training cost time :', end - start)
# 获取测试集
testDataList, testLabelList = self.loadData_2classification(os.path.join(Mnist_dir, 'mnist_test.csv'), n=n_test)
print('test data, row num:{} , column num:{} '.format(len(testDataList), len(testDataList[0])))
testDataArr = np.array(testDataList)
testLabelArr = np.array(testLabelList)
clf2 = XGBoost(use_pretrained=True, model_path=model_path)
print('test dataset accuracy: {} '.format(clf2.score(testDataArr, testLabelArr)))
# 模型评估
y_prob = clf2.predict_prob(testDataArr)
y_pred = clf2.predict(testDataArr)
y_true = testLabelArr
eval_model = EvaluateModel()
# eval_model.tow_classify_evaluate(y_true=y_true,y_pred=y_pred,y_prob=y_prob)
def test_Higgs_dataset(self, n=10000):
"""
:return:
"""
Higgs_dataset_path = '../../dataset/higgs/kaggle'
data = np.loadtxt(os.path.join(Higgs_dataset_path, 'training.csv'), delimiter=',', skiprows=1, max_rows=n,
converters={32: lambda x: int(x == 's'.encode('utf-8'))})
# max_rows 设置读取的行数
# converters 对最后一列进行转换
X = data[:, 1:31]
y = data[:, 32]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
# 陈天奇 xgboost 参数
# param1 = {'objective': 'binary:logistic', "eta": 0.1, "max_depth": 3, "nthread": 16}
# num_round = 120
# Accuracy:0.8335
start = time.time()
clf = XGBoost(
objective='binary:logistic',
max_iter=20,
max_depth=3,
gama=0.5,
reg_lambda=0.5,
print_log=False
)
clf.fit(X_train, y_train, learning_rate=1.0)
end = time.time()
ypred = clf.predict(X_test)
print("Accuracy:{}".format(accuracy_score(y_test, ypred)))
print('training cost time :', end - start)
def loadData(self, fileName, n=1000):
"""
加载文件
:param fileName:要加载的文件路径
:param n: 返回的数据集的规模
:return: 数据集和标签集
"""
# 存放数据及标记
dataArr = []
labelArr = []
# 读取文件
fr = open(fileName)
cnt = 0 # 计数器
# 遍历文件中的每一行
for line in fr.readlines():
if cnt == n:
break
# 获取当前行,并按“,”切割成字段放入列表中
# strip:去掉每行字符串首尾指定的字符(默认空格或换行符)
# split:按照指定的字符将字符串切割成每个字段,返回列表形式
curLine = line.strip().split(',')
# 将每行中除标记外的数据放入数据集中(curLine[0]为标记信息)
# 在放入的同时将原先字符串形式的数据转换为整型
# 此外将数据进行了二值化处理,大于128的转换成1,小于的转换成0,方便后续计算
dataArr.append([int(int(num) > 128) for num in curLine[1:]])
# 将标记信息放入标记集中
labelArr.append(int(curLine[0]))
cnt += 1
fr.close()
# 返回数据集和标记
return dataArr, labelArr
def test_Mnist_dataset(self, n_train, n_test):
"""
Mnist (手写数字) 数据集
测试 模型的 多分类
:param n_train: 使用训练数据集的规模
:param n_test: 使用测试数据集的规模
:return:
"""
Mnist_dir = '../../dataset/Mnist'
# 获取训练集
trainDataList, trainLabelList = self.loadData(os.path.join(Mnist_dir, 'mnist_train.csv'), n=n_train)
print('train data, row num:{} , column num:{} '.format(len(trainDataList), len(trainDataList[0])))
trainDataArr = np.array(trainDataList)
trainLabelArr = np.array(trainLabelList)
# 开始时间
print('start training model....')
start = time.time()
model_path = 'models/xgboost_multi_classify.model'
# clf = XGBoost(model_path=model_path,
# objective="multi:softmax",
# num_class=10,
# max_iter=20,
# max_depth=3,
# gama=0,
# reg_lambda=0,
# print_log=True
# )
# clf.fit(trainDataArr, trainLabelArr, learning_rate=1.0)
# 结束时间
end = time.time()
print('training cost time :', end - start)
# 获取测试集
testDataList, testLabelList = self.loadData(os.path.join(Mnist_dir, 'mnist_test.csv'), n=n_test)
print('test data, row num:{} , column num:{} '.format(len(testDataList), len(testDataList[0])))
testDataArr = np.array(testDataList)
testLabelArr = np.array(testLabelList)
clf2 = XGBoost(use_pretrained=True, model_path=model_path)
print('test dataset accuracy: {} '.format(clf2.score(testDataArr, testLabelArr)))
def test_iris_dataset(self):
# 使用iris数据集,其中有三个分类, y的取值为0 , 1 , 2
X, y = datasets.load_iris(True) # 包括150行记录
# 将数据集一分为二,训练数据占80%,测试数据占20%
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=188)
clf = XGBoost(
objective="multi:softmax",
num_class=3,
max_iter=3,
max_depth=2,
print_log=False)
clf.fit(X_train, y_train, learning_rate=0.5)
clf = XGBoost(use_pretrained=True)
print('test dataset accuracy: {} '.format(clf.score(X_test, y_test)))
if __name__ == '__main__':
test = Test()
# test.test_boston_regress_dataset()
test.test_Mnist_dataset_2classification(6000, 1000)
# test.test_Mnist_dataset_2classification(60000, 10000)
# test.test_Higgs_dataset()
# test.test_tiny_multiclassification_dataset()
# test.test_Mnist_dataset(6000,1000)
# test.test_Mnist_dataset(60000, 10000)
# test.test_iris_dataset()
| 2.40625
| 2
|
docs/reorder_divs.py
|
ccmaymay/concrete
| 15
|
12777880
|
#!/usr/bin/env python3
# This file reads in "HTML" from a call to
# thrift --gen html ....
# and does some post-processing to it.
# 1. It wraps struct headers + div definitions
# in a div.
# 2. It reorders struct divs to be alphabetically
# ordered.
from bs4 import BeautifulSoup
import fileinput
import logging
def make_nice_lines():
looking_for_tag = False
have_ending_tag = False
char_buff = []
logging.basicConfig()
logger = logging.getLogger('make_nice_lines')
logger.setLevel(logging.INFO)
for line in fileinput.input():
logger.debug("original line: " + line)
for char in line:
if char == '<' and len(char_buff) > 0:
yield ''.join(char_buff)
char_buff = []
char_buff.append(char)
if char == '<':
looking_for_tag = True
elif char == '/':
if looking_for_tag:
have_ending_tag = True
elif char == '>':
looking_for_tag = False
if have_ending_tag:
yield ''.join(char_buff)
char_buff = []
have_ending_tag = False
else:
pass
yield ''.join(char_buff)
def add_divs():
added_html = []
main_pane = False
in_h2 = False
logging.basicConfig()
logger = logging.getLogger('reorder_div')
logger.setLevel(logging.INFO)
for line in make_nice_lines():
logger.debug("getting line::" + line)
stripped_line = line.strip()
if stripped_line.startswith('<div class="container-fluid"'):
logger.debug("finding main pane")
main_pane = True
added_html.append(line)
continue
if not main_pane:
added_html.append(line)
continue
# otherwise, we're in the main pane
if stripped_line.startswith('<h2 id'):
logger.debug("Found h2: " + stripped_line)
in_h2 = True
h2_id = stripped_line.split('"')[1]
added_html.append('<div id="' + h2_id +'_div">')
added_html.append(line)
continue
if in_h2:
if stripped_line.startswith('<hr'):
in_h2 = False
added_html.append('</div>')
added_html.append(line)
continue
# elif stripped_line.startsWith('<h2'):
else:
added_html.append(line)
else:
added_html.append(line)
return added_html
def main():
doc_with_divs = add_divs()
bs_doc = BeautifulSoup(''.join(doc_with_divs), 'html.parser')
structs = bs_doc.select('#Structs_div .definition')
sorted_structs = sorted(structs, key = lambda x : x.select('h3')[0].get_text().strip())
sd_dv = bs_doc.select('#Structs_div')
if len(sd_dv) == 0:
page = bs_doc.select('title')
import sys
print ("Generated page", page[0].get_text(),
"has no struct definitions. This is probably okay, but you may want to verify.",
file=sys.stderr)
print (bs_doc.prettify())
return 0
div_children = sd_dv[0].contents
if not(len(div_children) - 2 == len(sorted_structs)):
raise Exception("length of div children (%s) != length of struct defs (%s)" % (len(div_children) - 2, len(sorted_structs)))
for i in range(2, len(sorted_structs)+2):
div_children[i] = sorted_structs[i-2]
print (bs_doc.prettify())
return 0
if __name__ == '__main__':
main()
| 2.9375
| 3
|
turtle2.py
|
agam21-meet/meet2019y1lab1
| 0
|
12777881
|
<reponame>agam21-meet/meet2019y1lab1<filename>turtle2.py
import turtle
turtle.penup()
turtle.goto(-200,-100)
turtle.pendown()
turtle.goto(-200,-100+200)
turtle.goto(-200+50,-100)
turtle.goto(-200+100,-100+200)
turtle.goto(-200+100,-100)
turtle.penup()
turtle.goto(-200+150,-100+200)
turtle.pendown()
turtle.goto(-200+150,-100)
turtle.goto(-200+250,-100)
turtle.penup()
turtle.goto(-200+250,0)
turtle.pendown()
turtle.goto(-200+150,0)
turtle.penup()
turtle.goto(-200+250,100)
turtle.pendown()
turtle.goto(-200+150,100)
turtle.mainloop()
| 2.421875
| 2
|
misc/var_calcs.py
|
dmitbor/pointless-war
| 0
|
12777882
|
import math
def two_point_distance(x1, y1, x2, y2):
"""
Calculates distance between two given points.
x1 - X Value of Point 1
y1 - Y Value of Point 1
x2 - X Value of Point 2
y2 - Y Value of Point 2
"""
return math.fabs(math.hypot(x2 - x1, y2 - y1))
def get_closest_enemy(search_squads, my_loc, my_fac, max_range=0):
to_return = None
cur_dist = -1
for squad in search_squads:
if squad.squad_faction is not my_fac:
sqd_ld_loc = squad.squad_leader.get_xy()
distance = two_point_distance(my_loc[0], my_loc[1], sqd_ld_loc[0], sqd_ld_loc[1])
if max_range == 0 or distance <= max_range:
if cur_dist == -1 or distance < cur_dist:
for soldier in squad.squad_members:
if soldier.is_alive():
distance = two_point_distance(my_loc[0], my_loc[1], soldier.get_x(), soldier.get_y())
if (cur_dist == -1 or distance < cur_dist) and (max_range == 0 or distance <= max_range):
to_return = soldier
cur_dist = distance
return to_return
| 3.5625
| 4
|
tests/__init__.py
|
francois-vincent/docker_orchestrator
| 0
|
12777883
|
# encoding: utf-8
import sys
| 1.0625
| 1
|
Educational Round #82 (Div 2)/A.py
|
julianferres/Codeforces
| 4
|
12777884
|
from collections import Counter
t = int(input())
for _ in range(t):
s = input()
firstOne = -1
lastOne = -1
for i in range(len(s)):
if s[i] == '1':
if firstOne == -1:
firstOne = i
else:
lastOne = i
if firstOne > -1 and lastOne > -1:
print(Counter(s[firstOne:lastOne])['0'])
else:
print(0)
| 3.28125
| 3
|
dstk/tests/test_preprocessing.py
|
joseph-jnl/ds-toolkit
| 0
|
12777885
|
import numpy as np
import pandas as pd
from dstk.preprocessing import (onehot_encode,
mark_binary,
nan_to_binary,
num_to_str)
# Create test data
df = pd.DataFrame()
df['numeric1'] = [0, 1, 0, 0, 1, 1]
df['numeric2'] = [1.0, 3.4, 5.4, 2.3, 3.1, 4.1]
df['numericNaN'] = [1, 2, 3, None, 3, None]
df['cat1'] = ['a', 'a', 'b', 'c', 'c', 'a']
df['catNaN'] = ['A', 'B', None, None, 'B', 'C']
# Test for num_to_str function
def test_numtostr():
# Test for converting column type to object
test = num_to_str(df, ['numeric1'])
assert test['numeric1'].dtype == 'O'
def test_numtostr_inplace():
# Test for converting column to object in place
df2 = df.copy()
num_to_str(df2, ['numeric1'], inplace=True)
assert df2['numeric1'].dtype == 'O'
# Tests for nan_to_binary function
def test_nantobinary_inplaceTrue():
# Test for converting dataframe in place
df2 = df.copy()
nan_to_binary(df2, ['numericNaN'], inplace=True)
assert df2['binary#numericNaN'].tolist() == [0, 0, 0, 1, 0, 1]
def test_nantobinary_featureselect():
# Test for converting specified features
test = nan_to_binary(df, ['numericNaN'])
assert test['binary#numericNaN'].tolist() == [0, 0, 0, 1, 0, 1]
def test_nantobinary_auto():
# Test for auto converting columns with NaN > threshold
test = nan_to_binary(df)
assert test['binary#catNaN'].tolist() == [0, 0, 1, 1, 0, 0]
def test_nantobinary_threshold():
# Test for auto converting columns with NaN > specified threshold
test = nan_to_binary(df, threshold=0.5, inplace=False)
assert test.loc[2, 'catNaN'] == None
# Tests for markbinary function
def test_markbinary_inplaceFalse():
# Test for not transforming df in place
test = mark_binary(df, inplace=False)
assert test.columns.tolist()[0] == 'binary#numeric1'
def test_markbinary_inplaceTrue():
# Test for transforming df in place
df2 = df.copy()
mark_binary(df2, inplace=True)
assert df2.columns.tolist()[0] == 'binary#numeric1'
def test_markbinary_inplaceTrue_selectfeature():
# Test for selecting specific features to mark
df2 = df.copy()
mark_binary(df2, ['numeric1'], inplace=True)
assert df2.columns.tolist()[0] == 'binary#numeric1'
# Tests for onehotencode wrapper
def test_onehot_checkprefix():
# Test whether prefixes are created correctly
test = onehot_encode(df)
assert test.columns.tolist() == ['numeric1',
'numeric2',
'numericNaN',
'binary#cat1_b',
'binary#cat1_c',
'binary#catNaN_B',
'binary#catNaN_C',
'binary#catNaN_nan']
def test_onehot_selectfeature():
# Test whether subselection of features is correct
test = onehot_encode(df, features=['cat1'])
assert test.columns.tolist() == ['numeric1',
'numeric2',
'numericNaN',
'catNaN',
'binary#cat1_b',
'binary#cat1_c']
def test_onehot_retainNaNs():
# Test whether nans are retained
test = onehot_encode(df, impute='retain')
assert np.isnan(test['binary#catNaN_B']).tolist() == [
False, False, True, True, False, False]
def test_onehot_modeimputeNaNs():
# Test mode imputing NaNs
test = onehot_encode(df, impute='mode')
assert test['binary#catNaN_B'].tolist() == [0, 1, 1, 1, 1, 0]
def test_onehot_trackNaNs():
# Test whether nans are tracked in separate column
test = onehot_encode(df)
assert test['binary#catNaN_nan'].tolist() == [0, 0, 1, 1, 0, 0]
def test_onehot_drop_zerovar():
# Test whether zero variance columns are dropped
df['cat2'] = ['a', 'a', 'a', 'a', 'a', 'a']
test = onehot_encode(df)
assert test.columns.tolist() == ['numeric1',
'numeric2',
'numericNaN',
'binary#cat1_b',
'binary#cat1_c',
'binary#catNaN_B',
'binary#catNaN_C',
'binary#catNaN_nan']
| 2.9375
| 3
|
src/tasks/santa_fe_trail/implements/field.py
|
technote-space/genetic-algorithms-py
| 3
|
12777886
|
import copy
from typing import MutableMapping
from .field_flags import FieldFlags
from .helper import Helper
class Field:
"""
Description:
------------
Field
"""
__ate: int
__field: MutableMapping[int, FieldFlags]
__foods: int
def __init__(self) -> None:
self.__ate = 0
self.__field = copy.copy(Helper.get_field())
self.__foods = Helper.get_count()
@property
def width(self) -> int:
return Helper.get_width()
@property
def height(self) -> int:
return Helper.get_height()
@property
def rest(self) -> int:
return self.__foods - self.__ate
@property
def is_finished(self) -> bool:
return self.rest <= 0
def __set_flag(self, x: int, y: int, flag: FieldFlags) -> None:
self.__field[Helper.position_to_index(x, y, self.width)] = flag
def get_flag(self, x: int, y: int) -> FieldFlags:
index = Helper.position_to_index(x, y, self.width)
if index in self.__field:
return self.__field[index]
return FieldFlags.NONE
def __add_flag(self, x: int, y: int, flag: FieldFlags) -> None:
# noinspection PyTypeChecker
self.__set_flag(x, y, self.get_flag(x, y) | flag)
def check(self, x: int, y: int, flag: FieldFlags) -> bool:
return (self.get_flag(x, y) & flag) == flag
def on_visited(self, x: int, y: int) -> None:
if not self.check(x, y, FieldFlags.VISITED):
self.__add_flag(x, y, FieldFlags.VISITED)
if self.check(x, y, FieldFlags.FOOD):
self.__ate += 1
def get_fitness(self) -> float:
return self.__ate / self.__foods
| 2.703125
| 3
|
tests/test_0231-indexform.py
|
BioGeek/awkward-1.0
| 519
|
12777887
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test():
for itype in ["i8", "u8", "i32", "u32", "i64"]:
form = ak.forms.ListOffsetForm(itype, ak.forms.EmptyForm())
assert form.offsets == itype
| 1.695313
| 2
|
src/tensorforce/tensorforce/agents/__init__.py
|
linus87/drl_shape_optimization
| 17
|
12777888
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorforce.agents.agent import Agent
from tensorforce.agents.constant_agent import ConstantAgent
from tensorforce.agents.drl_agent import DRLAgent
from tensorforce.agents.random_agent import RandomAgent
from tensorforce.agents.ddpg_agent import DDPGAgent
from tensorforce.agents.dqfd_agent import DQFDAgent
from tensorforce.agents.dqn_agent import DQNAgent
from tensorforce.agents.dqn_nstep_agent import DQNNstepAgent
from tensorforce.agents.naf_agent import NAFAgent
from tensorforce.agents.ppo_agent import PPOAgent
from tensorforce.agents.trpo_agent import TRPOAgent
from tensorforce.agents.vpg_agent import VPGAgent
agents = dict(
constant=ConstantAgent, ddpg=DDPGAgent, dqfd=DQFDAgent, dqn=DQNAgent, dqn_nstep=DQNNstepAgent,
naf=NAFAgent, ppo=PPOAgent, random=RandomAgent, trpo=TRPOAgent, vpg=VPGAgent
)
__all__ = [
'Agent', 'agents', 'ConstantAgent', 'DDPGAgent', 'DQFDAgent', 'DQNAgent', 'DQNNstepAgent',
'DRLAgent', 'NAFAgent', 'PPOAgent', 'RandomAgent', 'TRPOAgent', 'VPGAgent'
]
| 1.53125
| 2
|
bin/run_twitter_client.py
|
kubor/chem_bot
| 2
|
12777889
|
<filename>bin/run_twitter_client.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from chem_bot import twitter as T
def _main():
config = T.Config()
config.load()
api = T.oauth(config)
stream = T.Streamer(api, config)
stream.statuses.filter(track=config.query, follow=config.filter_follow)
# stream.statuses.filter(track='twitter')
if __name__ == '__main__':
_main()
| 2.15625
| 2
|
surveymonty/exceptions.py
|
andrewkshim/surveymonty
| 15
|
12777890
|
<reponame>andrewkshim/surveymonty
"""
surveymonty.exceptions
----------------------
"""
class SurveyMontyError(Exception):
"""Base exception."""
pass
class SurveyMontyAPIError(SurveyMontyError):
"""Error for non-2xx API responses."""
def __init__(self, resp, *args):
super(SurveyMontyAPIError, self).__init__(resp.content, *args)
self.response = resp
| 2.5625
| 3
|
tftpy/context/server.py
|
jcarswell/tftpy
| 0
|
12777891
|
import logging
import time
from io import IOBase
from typing import Callable
from typing import Any
from .base import Context
from tftpy.states import Start
logger = logging.getLogger('tftpy.context.server')
class Server(Context):
"""The context for the server."""
def __init__(self, host: str, port: int, timeout: int, root: str,
dyn_file_func: Callable[[str,str,int],Any] = None,
upload_open: Callable[[str,Context],IOBase]=None,
**kwargs) -> None:
"""Prepare the server context to process data from a client
Args:
host (str): The requesting clients IP
port (int): The requesting clients Port
timeout (int): stocket timeout
root (str): server root path
dyn_file_func (Callable[[str,str,int],Any], optional): A dynamic fucntion function to read from
upload_open (Callable[[str,Context],IOBase], optional): A dynamic function to write data to
"""
super().__init__(host, port, timeout, **kwargs)
# At this point we have no idea if this is a download or an upload. We
# need to let the start state determine that.
self.state = Start(self)
self.root = root
self.dyn_file_func = dyn_file_func
self.upload_open = upload_open
def start(self, buffer: bytes = None) -> None:
"""Start the state cycle. Note that the server context receives an
initial packet in its start method. Also note that the server does not
loop on cycle(), as it expects the TftpServer object to manage
that.
Args:
buffer (bytes, optional): Buffer Data receivied from the client.
Should be either a read or write request
"""
logger.debug("In tftpy.contex.server.start")
self.metrics.start_time = self.last_update = time.time()
pkt = self.factory.parse(buffer)
logger.debug(f"tftpy.contex.server.start() - factory returned a {pkt}")
# Call handle once with the initial packet. This should put us into
# the download or the upload state.
self.state = self.state.handle(pkt,self.host,self.port)
def end(self, *args, **kwargs) -> None:
"""Finish up the context."""
super().end(*args, **kwargs)
self.metrics.end_time = time.time()
logger.debug(f"Set metrics.end_time to {self.metrics.end_time}")
self.metrics.compute()
| 2.5625
| 3
|
admin/c2cgeoportal_admin/views/layer_groups.py
|
vvmruder/c2cgeoportal
| 0
|
12777892
|
from functools import partial
from pyramid.view import view_defaults
from pyramid.view import view_config
from c2cgeoform.schema import GeoFormSchemaNode
from c2cgeoform.views.abstract_views import ListField
from deform.widget import FormWidget
from c2cgeoportal_admin.schemas.treegroup import children_schema_node
from c2cgeoportal_admin.schemas.metadata import metadatas_schema_node
from c2cgeoportal_admin.schemas.treeitem import parent_id_node
from c2cgeoportal_admin.views.treeitems import TreeItemViews
from c2cgeoportal_commons.models.main import LayerGroup, TreeGroup
_list_field = partial(ListField, LayerGroup)
base_schema = GeoFormSchemaNode(LayerGroup, widget=FormWidget(fields_template='layer_group_fields'))
base_schema.add(children_schema_node())
base_schema.add(metadatas_schema_node.clone())
base_schema.add_unique_validator(LayerGroup.name, LayerGroup.id)
base_schema.add(parent_id_node(TreeGroup))
@view_defaults(match_param='table=layer_groups')
class LayerGroupsViews(TreeItemViews):
_list_fields = TreeItemViews._list_fields + \
[_list_field('is_expanded')] + \
TreeItemViews._extra_list_fields
_id_field = 'id'
_model = LayerGroup
_base_schema = base_schema
def _base_query(self, query=None):
return super()._base_query(
self._request.dbsession.query(LayerGroup).distinct())
@view_config(route_name='c2cgeoform_index',
renderer='../templates/index.jinja2')
def index(self):
return super().index()
@view_config(route_name='c2cgeoform_grid',
renderer='fast_json')
def grid(self):
return super().grid()
@view_config(route_name='c2cgeoform_item',
request_method='GET',
renderer='../templates/edit.jinja2')
def view(self):
return super().edit()
@view_config(route_name='c2cgeoform_item',
request_method='POST',
renderer='../templates/edit.jinja2')
def save(self):
return super().save()
@view_config(route_name='c2cgeoform_item',
request_method='DELETE',
renderer='fast_json')
def delete(self):
return super().delete()
@view_config(route_name='c2cgeoform_item_duplicate',
request_method='GET',
renderer='../templates/edit.jinja2')
def duplicate(self):
return super().duplicate()
| 1.726563
| 2
|
noise-removal/test/test_noise_removal_client.py
|
audo-ai/audoai-python
| 1
|
12777893
|
import os
import wave
from io import BytesIO, BufferedIOBase
from tempfile import NamedTemporaryFile
import pytest
from audoai.noise_removal import NoiseRemovalClient
@pytest.fixture()
def noise_removal() -> NoiseRemovalClient:
api_key = os.environ['AUDO_API_KEY']
base_url = os.environ['AUDO_BASE_URL']
noise_removal = NoiseRemovalClient(api_key, base_url)
return noise_removal
@pytest.fixture()
def silence_fileobject() -> BufferedIOBase:
channels = 1
sample_rate = 44100
seconds = 4.0
wav_data = BytesIO(bytes())
with wave.open(wav_data, "wb") as wf:
wf.setparams((1, channels, sample_rate, 0, "NONE", "not compressed"))
wf.writeframes(b"\0" * int(sample_rate * seconds * channels))
wav_data.seek(0)
return wav_data
def test_process_fileobject(
noise_removal: NoiseRemovalClient,
silence_fileobject: BufferedIOBase
):
output = noise_removal.process(silence_fileobject, "wav", output_extension="mp3")
assert output.url
with NamedTemporaryFile(suffix=".mp3") as temp:
output.save(temp.name)
# Invalid extension
with pytest.raises(ValueError):
with NamedTemporaryFile(suffix=".wav") as temp:
output.save(temp.name)
def test_process_filename(
noise_removal: NoiseRemovalClient,
silence_fileobject: BufferedIOBase
):
with NamedTemporaryFile(suffix=".wav") as temp:
temp.write(silence_fileobject.read())
output = noise_removal.process(temp.name)
assert output.url
def test_process_url(
noise_removal: NoiseRemovalClient
):
input_url = "http://dl5.webmfiles.org/big-buck-bunny_trailer.webm"
output = noise_removal.process(input_url, output_extension="mp4")
assert output.url
def test_process_invalid(
noise_removal: NoiseRemovalClient,
silence_fileobject: BufferedIOBase
):
with pytest.raises((OSError, TypeError)):
noise_removal.process('invalid-arg')
| 2.171875
| 2
|
NER/rule_base_ner.py
|
SatoMichi/Information_Extraction_Basic
| 0
|
12777894
|
import re
from janome.tokenizer import Tokenizer
# rules : {boolean function(word):label}
# txt: str
def rule_base_ner(rules,txt):
tokenizer = Tokenizer()
tokens = tokenizer.tokenize(txt)
history = []
for t in tokens:
word = t.surface
for rule,label in rules.items():
if rule(word) and word not in history:
tag1 = "<"+label+">"
tag2 = "</"+label+">"
txt = re.sub(word,tag1+word+tag2,txt)
break
history.append(word)
return txt
if __name__ == "__main__":
rules = { lambda x: x=="男":"People",
lambda x: x=="蟋蟀":"Organism",
lambda x: x[-1]=="門":"Location",
lambda x: x[-2:]=="大路":"Location",
lambda x: x=="一" or x=="二" or x=="三":"Number",}
txt = """広い門の下には、この男のほかに誰もいない。ただ、所々丹塗の剥はげた、大きな円柱に、蟋蟀が一匹とまっている。
羅生門が、朱雀大路にある以上は、この男のほかにも、雨やみをする市女笠や揉烏帽子が、
もう二三人はありそうなものである。それが、この男のほかには誰もいない"""
result = rule_base_ner(rules,txt)
print(txt)
print(result)
| 2.96875
| 3
|
test/test_wordcounter.py
|
williezh/linuxtools
| 13
|
12777895
|
<reponame>williezh/linuxtools
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, unicode_literals
import os, sys
from unittest import TestCase, main
from collections import Counter
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
from wordcounter import WordCounter
class WordCounterMultiprocessesTest(TestCase):
def test_result(self):
f1 = 'tmp1.txt'
f2 = 'tmp2.txt'
words = ['你', '我', '它', '她', '他']
amounts = [20000, 3000, 1, 50000, 6666]
c = Counter(dict(zip(words, amounts)))
result = '\n'.join(['{}: {}'.format(i, j) for i, j in c.most_common()])
s = '\n'.join(['{}\n'.format(i)*j for i,j in zip(words, amounts)])
with open(f1, 'wb') as f:
f.write(s.encode('utf-8'))
ws = [WordCounter(f1, f2, i,None,1) for i in [0, 1, None]]
for w in ws:
w.run()
self.assertEqual(c, w.counter)
self.assertEqual(result, w.result)
if __name__ == '__main__':
main()
| 2.53125
| 3
|
output/models/ms_data/additional/test72597_xsd/test72597.py
|
tefra/xsdata-w3c-tests
| 1
|
12777896
|
<filename>output/models/ms_data/additional/test72597_xsd/test72597.py
from dataclasses import dataclass, field
from typing import List, Optional
__NAMESPACE__ = "foo"
@dataclass
class A:
part: List["A.Part"] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "foo",
"min_occurs": 1,
}
)
@dataclass
class Part:
value: str = field(
default="",
metadata={
"required": True,
}
)
number: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
}
)
number2: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class Root:
class Meta:
name = "root"
namespace = "foo"
a: Optional[A] = field(
default=None,
metadata={
"name": "A",
"type": "Element",
"required": True,
}
)
| 2.328125
| 2
|
trio_cdp/generated/runtime.py
|
vcalvert/trio-chrome-devtools-protocol
| 37
|
12777897
|
<reponame>vcalvert/trio-chrome-devtools-protocol
# DO NOT EDIT THIS FILE!
#
# This code is generated off of PyCDP modules. If you need to make
# changes, edit the generator and regenerate all of the modules.
from __future__ import annotations
import typing
from ..context import get_connection_context, get_session_context
import cdp.runtime
from cdp.runtime import (
BindingCalled,
CallArgument,
CallFrame,
ConsoleAPICalled,
CustomPreview,
EntryPreview,
ExceptionDetails,
ExceptionRevoked,
ExceptionThrown,
ExecutionContextCreated,
ExecutionContextDescription,
ExecutionContextDestroyed,
ExecutionContextId,
ExecutionContextsCleared,
InspectRequested,
InternalPropertyDescriptor,
ObjectPreview,
PrivatePropertyDescriptor,
PropertyDescriptor,
PropertyPreview,
RemoteObject,
RemoteObjectId,
ScriptId,
StackTrace,
StackTraceId,
TimeDelta,
Timestamp,
UniqueDebuggerId,
UnserializableValue
)
async def add_binding(
name: str,
execution_context_id: typing.Optional[ExecutionContextId] = None
) -> None:
'''
If executionContextId is empty, adds binding with the given name on the
global objects of all inspected contexts, including those created later,
bindings survive reloads.
If executionContextId is specified, adds binding only on global object of
given execution context.
Binding function takes exactly one argument, this argument should be string,
in case of any other input, function throws an exception.
Each binding function call produces Runtime.bindingCalled notification.
**EXPERIMENTAL**
:param name:
:param execution_context_id: *(Optional)*
'''
session = get_session_context('runtime.add_binding')
return await session.execute(cdp.runtime.add_binding(name, execution_context_id))
async def await_promise(
promise_object_id: RemoteObjectId,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None
) -> typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]:
'''
Add handler to promise with given promise object id.
:param promise_object_id: Identifier of the promise.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value.
:param generate_preview: *(Optional)* Whether preview should be generated for the result.
:returns: A tuple with the following items:
0. **result** – Promise result. Will contain rejected value if promise was rejected.
1. **exceptionDetails** – *(Optional)* Exception details if stack strace is available.
'''
session = get_session_context('runtime.await_promise')
return await session.execute(cdp.runtime.await_promise(promise_object_id, return_by_value, generate_preview))
async def call_function_on(
function_declaration: str,
object_id: typing.Optional[RemoteObjectId] = None,
arguments: typing.Optional[typing.List[CallArgument]] = None,
silent: typing.Optional[bool] = None,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
user_gesture: typing.Optional[bool] = None,
await_promise: typing.Optional[bool] = None,
execution_context_id: typing.Optional[ExecutionContextId] = None,
object_group: typing.Optional[str] = None
) -> typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]:
'''
Calls function with given declaration on the given object. Object group of the result is
inherited from the target object.
:param function_declaration: Declaration of the function to call.
:param object_id: *(Optional)* Identifier of the object to call function on. Either objectId or executionContextId should be specified.
:param arguments: *(Optional)* Call arguments. All call arguments must belong to the same JavaScript world as the target object.
:param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object which should be sent by value.
:param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result.
:param user_gesture: *(Optional)* Whether execution should be treated as initiated by user in the UI.
:param await_promise: *(Optional)* Whether execution should ````await``` for resulting value and return once awaited promise is resolved.
:param execution_context_id: *(Optional)* Specifies execution context which global object will be used to call function on. Either executionContextId or objectId should be specified.
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects. If objectGroup is not specified and objectId is, objectGroup will be inherited from object.
:returns: A tuple with the following items:
0. **result** – Call result.
1. **exceptionDetails** – *(Optional)* Exception details.
'''
session = get_session_context('runtime.call_function_on')
return await session.execute(cdp.runtime.call_function_on(function_declaration, object_id, arguments, silent, return_by_value, generate_preview, user_gesture, await_promise, execution_context_id, object_group))
async def compile_script(
expression: str,
source_url: str,
persist_script: bool,
execution_context_id: typing.Optional[ExecutionContextId] = None
) -> typing.Tuple[typing.Optional[ScriptId], typing.Optional[ExceptionDetails]]:
'''
Compiles expression.
:param expression: Expression to compile.
:param source_url: Source url to be set for the script.
:param persist_script: Specifies whether the compiled script should be persisted.
:param execution_context_id: *(Optional)* Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
:returns: A tuple with the following items:
0. **scriptId** – *(Optional)* Id of the script.
1. **exceptionDetails** – *(Optional)* Exception details.
'''
session = get_session_context('runtime.compile_script')
return await session.execute(cdp.runtime.compile_script(expression, source_url, persist_script, execution_context_id))
async def disable() -> None:
'''
Disables reporting of execution contexts creation.
'''
session = get_session_context('runtime.disable')
return await session.execute(cdp.runtime.disable())
async def discard_console_entries() -> None:
'''
Discards collected exceptions and console API calls.
'''
session = get_session_context('runtime.discard_console_entries')
return await session.execute(cdp.runtime.discard_console_entries())
async def enable() -> None:
'''
Enables reporting of execution contexts creation by means of ``executionContextCreated`` event.
When the reporting gets enabled the event will be sent immediately for each existing execution
context.
'''
session = get_session_context('runtime.enable')
return await session.execute(cdp.runtime.enable())
async def evaluate(
expression: str,
object_group: typing.Optional[str] = None,
include_command_line_api: typing.Optional[bool] = None,
silent: typing.Optional[bool] = None,
context_id: typing.Optional[ExecutionContextId] = None,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
user_gesture: typing.Optional[bool] = None,
await_promise: typing.Optional[bool] = None,
throw_on_side_effect: typing.Optional[bool] = None,
timeout: typing.Optional[TimeDelta] = None
) -> typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]:
'''
Evaluates expression on global object.
:param expression: Expression to evaluate.
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects.
:param include_command_line_api: *(Optional)* Determines whether Command Line API should be available during the evaluation.
:param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state.
:param context_id: *(Optional)* Specifies in which execution context to perform evaluation. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object that should be sent by value.
:param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the result.
:param user_gesture: *(Optional)* Whether execution should be treated as initiated by user in the UI.
:param await_promise: *(Optional)* Whether execution should ````await``` for resulting value and return once awaited promise is resolved.
:param throw_on_side_effect: **(EXPERIMENTAL)** *(Optional)* Whether to throw an exception if side effect cannot be ruled out during evaluation.
:param timeout: **(EXPERIMENTAL)** *(Optional)* Terminate execution after timing out (number of milliseconds).
:returns: A tuple with the following items:
0. **result** – Evaluation result.
1. **exceptionDetails** – *(Optional)* Exception details.
'''
session = get_session_context('runtime.evaluate')
return await session.execute(cdp.runtime.evaluate(expression, object_group, include_command_line_api, silent, context_id, return_by_value, generate_preview, user_gesture, await_promise, throw_on_side_effect, timeout))
async def get_heap_usage() -> typing.Tuple[float, float]:
'''
Returns the JavaScript heap usage.
It is the total usage of the corresponding isolate not scoped to a particular Runtime.
**EXPERIMENTAL**
:returns: A tuple with the following items:
0. **usedSize** – Used heap size in bytes.
1. **totalSize** – Allocated heap size in bytes.
'''
session = get_session_context('runtime.get_heap_usage')
return await session.execute(cdp.runtime.get_heap_usage())
async def get_isolate_id() -> str:
'''
Returns the isolate id.
**EXPERIMENTAL**
:returns: The isolate id.
'''
session = get_session_context('runtime.get_isolate_id')
return await session.execute(cdp.runtime.get_isolate_id())
async def get_properties(
object_id: RemoteObjectId,
own_properties: typing.Optional[bool] = None,
accessor_properties_only: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None
) -> typing.Tuple[typing.List[PropertyDescriptor], typing.Optional[typing.List[InternalPropertyDescriptor]], typing.Optional[typing.List[PrivatePropertyDescriptor]], typing.Optional[ExceptionDetails]]:
'''
Returns properties of a given object. Object group of the result is inherited from the target
object.
:param object_id: Identifier of the object to return properties for.
:param own_properties: *(Optional)* If true, returns properties belonging only to the element itself, not to its prototype chain.
:param accessor_properties_only: **(EXPERIMENTAL)** *(Optional)* If true, returns accessor properties (with getter/setter) only; internal properties are not returned either.
:param generate_preview: **(EXPERIMENTAL)** *(Optional)* Whether preview should be generated for the results.
:returns: A tuple with the following items:
0. **result** – Object properties.
1. **internalProperties** – *(Optional)* Internal object properties (only of the element itself).
2. **privateProperties** – *(Optional)* Object private properties.
3. **exceptionDetails** – *(Optional)* Exception details.
'''
session = get_session_context('runtime.get_properties')
return await session.execute(cdp.runtime.get_properties(object_id, own_properties, accessor_properties_only, generate_preview))
async def global_lexical_scope_names(
execution_context_id: typing.Optional[ExecutionContextId] = None
) -> typing.List[str]:
'''
Returns all let, const and class variables from global scope.
:param execution_context_id: *(Optional)* Specifies in which execution context to lookup global scope variables.
:returns:
'''
session = get_session_context('runtime.global_lexical_scope_names')
return await session.execute(cdp.runtime.global_lexical_scope_names(execution_context_id))
async def query_objects(
prototype_object_id: RemoteObjectId,
object_group: typing.Optional[str] = None
) -> RemoteObject:
'''
:param prototype_object_id: Identifier of the prototype to return objects for.
:param object_group: *(Optional)* Symbolic group name that can be used to release the results.
:returns: Array with objects.
'''
session = get_session_context('runtime.query_objects')
return await session.execute(cdp.runtime.query_objects(prototype_object_id, object_group))
async def release_object(
object_id: RemoteObjectId
) -> None:
'''
Releases remote object with given id.
:param object_id: Identifier of the object to release.
'''
session = get_session_context('runtime.release_object')
return await session.execute(cdp.runtime.release_object(object_id))
async def release_object_group(
object_group: str
) -> None:
'''
Releases all remote objects that belong to a given group.
:param object_group: Symbolic object group name.
'''
session = get_session_context('runtime.release_object_group')
return await session.execute(cdp.runtime.release_object_group(object_group))
async def remove_binding(
name: str
) -> None:
'''
This method does not remove binding function from global object but
unsubscribes current runtime agent from Runtime.bindingCalled notifications.
**EXPERIMENTAL**
:param name:
'''
session = get_session_context('runtime.remove_binding')
return await session.execute(cdp.runtime.remove_binding(name))
async def run_if_waiting_for_debugger() -> None:
'''
Tells inspected instance to run if it was waiting for debugger to attach.
'''
session = get_session_context('runtime.run_if_waiting_for_debugger')
return await session.execute(cdp.runtime.run_if_waiting_for_debugger())
async def run_script(
script_id: ScriptId,
execution_context_id: typing.Optional[ExecutionContextId] = None,
object_group: typing.Optional[str] = None,
silent: typing.Optional[bool] = None,
include_command_line_api: typing.Optional[bool] = None,
return_by_value: typing.Optional[bool] = None,
generate_preview: typing.Optional[bool] = None,
await_promise: typing.Optional[bool] = None
) -> typing.Tuple[RemoteObject, typing.Optional[ExceptionDetails]]:
'''
Runs script with given id in a given context.
:param script_id: Id of the script to run.
:param execution_context_id: *(Optional)* Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page.
:param object_group: *(Optional)* Symbolic group name that can be used to release multiple objects.
:param silent: *(Optional)* In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides ```setPauseOnException```` state.
:param include_command_line_api: *(Optional)* Determines whether Command Line API should be available during the evaluation.
:param return_by_value: *(Optional)* Whether the result is expected to be a JSON object which should be sent by value.
:param generate_preview: *(Optional)* Whether preview should be generated for the result.
:param await_promise: *(Optional)* Whether execution should ````await``` for resulting value and return once awaited promise is resolved.
:returns: A tuple with the following items:
0. **result** – Run result.
1. **exceptionDetails** – *(Optional)* Exception details.
'''
session = get_session_context('runtime.run_script')
return await session.execute(cdp.runtime.run_script(script_id, execution_context_id, object_group, silent, include_command_line_api, return_by_value, generate_preview, await_promise))
async def set_async_call_stack_depth(
max_depth: int
) -> None:
'''
Enables or disables async call stacks tracking.
:param max_depth: Maximum depth of async call stacks. Setting to ```0``` will effectively disable collecting async call stacks (default).
'''
session = get_session_context('runtime.set_async_call_stack_depth')
return await session.execute(cdp.runtime.set_async_call_stack_depth(max_depth))
async def set_custom_object_formatter_enabled(
enabled: bool
) -> None:
'''
**EXPERIMENTAL**
:param enabled:
'''
session = get_session_context('runtime.set_custom_object_formatter_enabled')
return await session.execute(cdp.runtime.set_custom_object_formatter_enabled(enabled))
async def set_max_call_stack_size_to_capture(
size: int
) -> None:
'''
**EXPERIMENTAL**
:param size:
'''
session = get_session_context('runtime.set_max_call_stack_size_to_capture')
return await session.execute(cdp.runtime.set_max_call_stack_size_to_capture(size))
async def terminate_execution() -> None:
'''
Terminate current or next JavaScript execution.
Will cancel the termination when the outer-most script execution ends.
**EXPERIMENTAL**
'''
session = get_session_context('runtime.terminate_execution')
return await session.execute(cdp.runtime.terminate_execution())
| 1.71875
| 2
|
scripts/exercicios/ex010.py
|
RuanBarretodosSantos/python
| 0
|
12777898
|
<filename>scripts/exercicios/ex010.py
from email.utils import collapse_rfc2231_value
real = float(input('Quantos reais você tem ? R$ '))
dolar = real / 5.16
euro = real / 5.77
print(f'Com {real} você consegue comprar {dolar:.2f} dólares ou {euro:.2f}')
| 2.28125
| 2
|
src/main.py
|
TRex22/picam
| 1
|
12777899
|
# Notes:
# For fixing multi-press See: https://raspberrypi.stackexchange.com/questions/28955/unwanted-multiple-presses-when-using-gpio-button-press-detection
# Supported file types: https://picamera.readthedocs.io/en/release-1.10/api_camera.html#picamera.camera.PiCamera.capture
# 'jpeg' - Write a JPEG file
# 'png' - Write a PNG file
# 'gif' - Write a GIF file
# 'bmp' - Write a Windows bitmap file
# 'yuv' - Write the raw image data to a file in YUV420 format
# 'rgb' - Write the raw image data to a file in 24-bit RGB format
# 'rgba' - Write the raw image data to a file in 32-bit RGBA format
# 'bgr' - Write the raw image data to a file in 24-bit BGR format
# 'bgra' - Write the raw image data to a file in 32-bit BGRA format
# 'raw' - Deprecated option for raw captures; the format is taken from the deprecated raw_format attribute
# available_exposure_compensations = [-25, -20, -15, -10, -5, 0, 5, 10, 15, 20, 25]
# HQ Camera on-sensor defective pixel correction (DPC)
# https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=277768
# 0 - All DPC disabled.
# 1 - Enable mapped on-sensor DPC.
# 2 - Enable dynamic on-sensor DPC.
# 3 - Enable mapped and dynamic on-sensor DPC.
# The default is (3). It would be useful to get feedback from users who do astrophotography if disabling DPC actually makes a difference or not.
# Note that this does not disable the ISP defective pixel correction that will still be active, so you will likely only see changes in the RAW image.
# 8MP pi camera v2.1
# width = 3280
# height = 2464
# 12MP Pi HQ camera
# width = 4056
# height = 3040
VERSION = "0.0.35"
# Modules
import document_handler
import camera_handler
################################################################################
## Config ##
################################################################################
config = {
"colour_profile_path": "/home/pi/Colour_Profiles/imx477/Raspberry Pi High Quality Camera Lumariver 2860k-5960k Neutral Look.json",
"dcim_path": 'home/pi/DCIM',
"dcim_images_path_raw": '/home/pi/DCIM/images/raw',
"dcim_original_images_path": '/home/pi/DCIM/images/original',
"dcim_hdr_images_path": '/home/pi/DCIM/images/hdr',
"dcim_videos_path": '/home/pi/DCIM/videos',
"dcim_tmp_path": '/home/pi/DCIM/tmp',
"filetype": '.dng',
"bpp": 12,
"format": 'jpeg',
"video_format": 'h264', # mjpeg, h264 TODO: Make into an option
"bayer": True,
"delay_time": 0,
"delay_times": [0, 1, 2, 5, 10], # in seconds
"short_delay": False,
"short_delay_time": 0.05,
"min_fps": 0.005, # mode 3 for HQ cam is between 0.005 and 10 fps
"max_fps": 40, # 40, # Possibly equal to screen_fps
"screen_fps": 40, # 120 fps at 1012x760
"default_screen_fps": 60,
"capture_timeout": 1000, # Must be greater than max exposure in seconds
"screen_w": 1280, # 1024 # 1012 # 320 screen res # Needs to be 4:3
"screen_h": 960, # 768 #760 # 240 screen res # Needs to be 4:3
"overlay_w": 320,
"overlay_h": 240,
"width": 4056, # Image width
"height": 3040, # Image height
"video_width": 4056,
"video_height": 3040,
"annotate_text_size": 48, # 6 to 160, inclusive. The default is 32
"exposure_mode": 'auto',
"default_exposure_mode": 'auto',
"default_zoom": (0.0, 0.0, 1.0, 1.0),
"set_zoom": '1x',
"max_zoom": (0.4, 0.4, 0.2, 0.2),
"max_zoom_2": (0.4499885557335775, 0.4499885557335775, 0.09999237048905166, 0.09999237048905166),
"max_zoom_3": (0.5, 0.5, 0.05, 0.05),
"available_exposure_modes": [
"auto", # default has to be first in the list
"off",
"verylong",
"fixedfps",
"antishake",
"night",
"nightpreview",
"backlight",
"spotlight",
"sports",
"snow",
"beach",
"fireworks"
],
"available_isos": [0, 5, 10, 25, 50, 100, 200, 320, 400, 500, 640, 800, 1600], # 0 is auto / 3200, 6400
"iso": 5, #0, # 800 / should shift to 0 - auto
"default_iso": 5,
"available_shutter_speeds": [0, 100, 500, 1000, 1500, 2000, 4000, 8000, 3000, 16667, 33333, 66667, 125000, 250000, 500000, 1000000], # 1/10000, 1/2000, 1/1000, ...
"available_long_shutter_speeds": [0, 1000000, 2000000, 3000000, 4000000, 5000000, 10000000, 15000000, 20000000, 25000000, 30000000, 35000000, 40000000, 200000000],
"take_long_shutter_speed": False,
"shutter_speed": 0,
"long_shutter_speed": 0,
"default_shutter_speed": 0,
"available_awb_mode": ['auto', 'off', 'sunlight', 'cloudy', 'shade', 'tungsten', 'fluorescent', 'incandescent', 'flash', 'horizon'],
"awb_mode": 'auto',
"default_awb_mode": 'auto', # "awb_gains": 0.0 - 8.0 (),
"dpc": 0, # 0 - 3, default is 3 and 0 is disabled
"default_dpc": 0,
"raw_convert": True,
"available_dpc_options": [0, 1, 2, 3], #https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=277768
"current_menu_items": ["auto", "shutter_speed", "iso", "hdr2", "delay_time", "long_shutter_speed", "sub_menu"],
"available_menu_items": ["auto", "shutter_speed", "iso", "hdr2", "delay_time", "long_shutter_speed", "sub_menu"],
"available_sub_menu_items": ["sub_menu", "exposure_mode", "awb_mode", "hdr", "video", "resolution", "encoding", "dpc - star eater", "raw_convert", "fom"],
"menu_item": "auto",
"default_menu_item": "auto",
"hdr": False,
"preview": True,
"fom": False,
"default_fom": True,
"fom_overlay_x_padding": 50, # in pixels
"fom_overlay_y_padding": 50, # in pixels
"hdr2": False,
"preview_mode": "built-in", # "built-in" "continuous_shot"
"default_preview_mode": 'built-in',
"video": False,
"recording": False,
"encoding": False, # TODO
"gpio": {
"button_1": 27,
"button_2": 23,
"button_3": 22,
"button_4": 17,
"bouncetime": 450
}
}
document_handler.check_for_folders(config)
################################################################################
# Main Loop #
################################################################################
# Begin Camera start-up
camera, overlay = camera_handler.start_camera(config) # Runs main camera loop
message = input("Press enter to quit\n\n") # Run until someone presses enter
print('Stop camera!')
camera_handler.stop_camera(camera, overlay, config)
| 1.882813
| 2
|
Algorithm/BOJ/Gold/1915가장큰정사각형.py
|
Nyapy/FMTG
| 0
|
12777900
|
<gh_stars>0
import sys
sys.stdin = open("1915.txt")
n, m = map(int, sys.stdin.readline().split())
array = [list(map(int, sys.stdin.readline().rstrip())) for _ in range(n)]
dp = [[0 for _ in range(m)] for _ in range(n)]
ans = 0
for i in range(n):
if array[i][0] == 1:
dp[i][0] = 1
ans = 1
for j in range(m):
if array[0][j] == 1:
dp[0][j] = 1
ans = 1
for i in range(1,n):
for j in range(1,m):
if array[i][j] == 1:
dp[i][j] = min(dp[i-1][j]+1,dp[i][j-1]+1,dp[i-1][j-1]+1)
if dp[i][j] > ans:
ans = dp[i][j]
print(ans**2)
| 2.0625
| 2
|
ores/scoring/models/rev_id_scorer.py
|
elukey/ores
| 69
|
12777901
|
import time
from revscoring import Datasource, Feature, Model
from revscoring.datasources.revision_oriented import revision
from revscoring.scoring import ModelInfo
from revscoring.scoring.statistics import Classification
def process_last_two_in_rev_id(rev_id):
last_two = str(rev_id)[-2:]
if len(last_two) == 1:
return "0" + last_two
else:
return last_two
last_two_in_rev_id = Datasource(
"revision.last_two_in_rev_id",
process_last_two_in_rev_id,
depends_on=[revision.id]
)
def process_reversed_last_two_in_rev_id(last_two):
return int("".join(reversed(last_two)))
reversed_last_two_in_rev_id = Feature(
"revision.reversed_last_two_in_rev_id",
process_reversed_last_two_in_rev_id,
returns=int,
depends_on=[last_two_in_rev_id]
)
def process_delay():
return 0.0
delay = Feature("delay", process_delay, returns=float)
class RevIdScorer(Model):
"""
Implements a basic, testing scorer that predicts whether a revision ID's
reversed last two digits are greater than 50.
E.g. 974623 = 32 and 23754929 = 92
"""
def __init__(self, version=None):
super().__init__([reversed_last_two_in_rev_id, delay], version=version)
self.info = ModelInfo()
self.info['version'] = version
self.info['type'] = "RevIDScorer"
self.info['behavior'] = "Returns the last two digits in a rev_id " + \
"as a score."
self.info['statistics'] = self.calculate_statistics()
def score(self, feature_values):
last_two_in_rev_id, delay = feature_values
time.sleep(delay)
probability = last_two_in_rev_id / 100
if probability > 0.5:
prediction = True
else:
prediction = False
return {
'prediction': prediction,
'probability': {
True: probability,
False: 1 - probability
}
}
def calculate_statistics(self):
"Jam some data through to generate statistics"
rev_ids = range(0, 100, 1)
feature_values = zip(rev_ids, [0] * 100)
scores = [self.score(f) for f in feature_values]
labels = [s['prediction'] for s in scores]
statistics = Classification(labels, threshold_ndigits=1, decision_key='probability')
score_labels = list(zip(scores, labels))
statistics.fit(score_labels)
return statistics
@classmethod
def from_config(cls, config, name, section_key='scorer_models'):
section = config[section_key][name]
return cls(**{k: v for k, v in section.items() if k != "class"})
| 2.640625
| 3
|
rest_framework/signals.py
|
blackjackgg/drf-with-history-track
| 1
|
12777902
|
# -*- coding: utf-8 -*-
# 创建信号
import datetime
import dictdiffer
from django.dispatch import Signal
from django.apps import apps as django_apps
import json
def format_value(value):
"""格式化数据"""
if isinstance(value, datetime.datetime):
return value.strftime('%Y-%m-%d %H:%M:%S')
return value
def show_change(olddict, newdict):
"""比较两个字典 返回如 [{'field': 'data.sex', 'old': '\xe7\x94\xb7', 'new': '\xe5\xa5\xb3'}, {'field': 'template', 'old': '', 'new': '11'}] """
changelist = []
listchangedict = {}
from dictdiffer.utils import dot_lookup
for diff in list(dictdiffer.diff(olddict, newdict)):
changedict = {}
diff = list(diff)
# print("Rawdifff", diff)
# print(diff,"difffffffffffffff")
if diff[0] == "change":
# 这里重新格式化一下
changedict["field"] = diff[1]
changedict["old"] = format_value(diff[2][0])
changedict["new"] = format_value(diff[2][1])
changelist.append(changedict)
try:
if isinstance(diff[1], list):
changename = ".".join(diff[1][0:-1])
listchangedict[changename] = {"old": dot_lookup(olddict, changename),
"new": dot_lookup(newdict, changename)}
except Exception as e:
print(e)
if diff[0] == "remove" or diff[0] == "add":
changename = diff[1]
listchangedict[changename] = {"old": dot_lookup(olddict, changename),
"new": dot_lookup(newdict, changename)}
for key, value in listchangedict.items():
tmpdict = {"field": key, "old": value["old"], "new": value["new"]}
if isinstance(value["new"], list) and isinstance(value["old"], list):
if value["new"] and (isinstance(value["new"][0], dict) or isinstance(value["new"][0], list)): # 排除掉字典类的
continue
if value["old"] and (isinstance(value["old"][0], dict) or isinstance(value["old"][0], list)): # 排除掉字典类的
continue
changelist.append(tmpdict)
# print("changelist", changelist)
return changelist
api_created = Signal()
api_updated = Signal(providing_args=["old_data", "new_data", ])
def save_history(instance, user, action="--", field_name="--"):
"""保存到数据库"""
HISmodel = django_apps.get_model(app_label='track_actions', model_name="History", require_ready=False)
if HISmodel:
try:
history = HISmodel(
table_name=str(instance._meta.db_table),
user=user,
instance_id=instance.id,
action=action,
field_name=field_name
)
history.save()
except ValueError as e:
print(e)
except Exception as e:
print(e)
def created(sender, **kwargs):
print("create")
print(sender, kwargs)
def formate_bool(change): # 格式化日志布尔类型为中文显示
bool_list = {True: "是", False: "否"}
new_data = bool_list.get(change.get("new"), change.get("new"))
old_data = bool_list.get(change.get("old"), change.get("old"))
change.update(**{"new": new_data, "old": old_data})
return change
def formate_chioce(option, change): # 格式choice类型函数
new_data = option.get(change.get("new"), change.get("new"))
old_data = option.get(change.get("old"), change.get("old"))
change.update(**{"new": new_data, "old": old_data})
return change
def formate_mutichioce(option, change): # 格式化多选类型函数
new_data = []
old_data = []
for ii in option:
if ii['id'] in change.get("new", []):
new_data.append(ii['name'])
if ii['id'] in change.get("old", []):
old_data.append(ii['name'])
change.update(**{"new": ",".join(new_data), "old": ",".join(old_data)})
return change
def loop_zh_name(ser, field_name, change={}):
"""循环ser获得中文名 选项名 键的类型"""
from django.db.models import ManyToManyField, NullBooleanField, BooleanField
try:
if "." in field_name: # 这里只支持两层嵌套 不断循环
all_list = field_name.split('.')
model1_str = all_list[0]
field_name1 = all_list[1::] # 这里fieldname 应该是从头到 尾部
field_name1 = len(field_name) > 1 and ".".join(field_name1) or field_name1[0]
ser1 = ser.__dict__["_fields"].get(model1_str) # 这里获取的不对
# 根据fieldname 判断还有没下一层 没有的话直接提取 有的话进入下一个循环
if "." in field_name1:
res = [False, field_name1, ser1, field_name1]
else:
zh_name = ser1.Meta.model._meta.get_field(field_name1).verbose_name
zh_name = u"%s-%s" % (ser1.Meta.model._meta.verbose_name, zh_name)
# res = [True, zh_name, ser1, field_name1]
# 根据不同的类型 格式化一下返回体
field = ser1.Meta.model._meta.get_field(field_name1)
if hasattr(field, "choices") and field.choices != []: # 格式化单选
option = dict(field.choices)
change = formate_chioce(option, change)
elif isinstance(field, (NullBooleanField, BooleanField)):
change = formate_bool(change)
elif isinstance(field, (ManyToManyField,)): # 格式化多选
BaseMultiChoices = django_apps.get_model(app_label='chestpain', model_name="BaseMultiChoices",
require_ready=False)
option = BaseMultiChoices.objects.filter(choices_type=field.name).values("id", "name")
change = formate_mutichioce(option, change)
change.update(field=zh_name)
res = [True, zh_name, change]
return res
else:
zh_name = ser.Meta.model._meta.get_field(field_name).verbose_name
return [True, zh_name, change]
except Exception as e:
print("error2", e) # 出错这个内容不保存
return [True, field_name, {}]
def get_zh_name(ser, field_name, change={}):
while True:
res = loop_zh_name(ser, field_name, change)
is_end = res[0]
if is_end:
return res[2]
break
else:
ser = res[2]
field_name = res[3]
def map_nullfalse(str):
if str in ["null", "", "Null", "NULL", "None", "none", None]:
return "未填写"
return str
def updated(sender, **kwargs):
old_data = kwargs.get("old_data")
new_data = kwargs.get("new_data")
instance = kwargs.get("instance")
current_request = kwargs.get("current_request")
change = show_change(old_data, new_data)
# sender 是序列化器 尝试通过serializer获取fieldname 没有的话就用英文名
# print("changelist", change)
wronglist = []
for index, item in enumerate(change):
field_name = item['field']
# 这里有嵌套结构 嵌套接口单独分析
new_item = get_zh_name(sender, field_name, item)
item.update(**new_item)
if not new_item:
wronglist.append(index)
for num, i in enumerate(wronglist):
change.pop(i - num)
# 获取中文名
# print("change-----",change)
# 获取单选项
# 获取多选项
# change = reform_change(change)
try:
# 如果建了历史记录的表 就进行记录的操作
if change:
for i in change:
# print("field", i["field"], isinstance(i["field"], list), type(i["field"]))
if "last_modified" in i["field"] or u"最近更新时间" in i["field"] or isinstance(i["field"],
list) or u"创建时间" in i[
"field"] or (not i['old'] and not i["new"]) or i['old'] == i['new'] or "id" in i["field"] or "ID" in \
i["field"]:
continue
changestr = u"由%s更新为%s" % (str(map_nullfalse(i["old"])), str(map_nullfalse(i["new"])))
save_history(instance, user=current_request.user, action=changestr, field_name=i["field"])
except Exception as e:
print(e)
api_created.connect(created) # 注册信号
api_updated.connect(updated) # 注册信号
| 2.34375
| 2
|
dist_helper.py
|
ymwdalex/Segmented-shape-symbolic-time-series-representation-
| 2
|
12777903
|
#!/usr/bin/env python
#########################################################
# #
# Segmented Shape-Symbolic Time series Representation #
# #
# __author__ = "<NAME>"
# __copyright__ = "Copyright 2013, Target-holding B.V."
# __license__ = "FreeBSD"
# __version__ = "1.0.1"
# __email__ = "<EMAIL>"
#
#########################################################
#----------------------------------------------------------
#
# dist_helper: this module provide some distance measurement function and matching function
#
# History
# 2014-01-07: create the file
#
# TRICK labels: show some parts which are not easy to understand
#
# The code follows the code style PEP 8
#--------------------------------------------------------
import operator
import numpy as np
import sys
from ssstsr_publib import *
# lookup table: from number to symbol
num_to_sym_tbl = {0:'a', 1:'b', 2:'e', 3:'c', 4:'f', 5:'d', 6:'g'}
# inverse lookup table: from symbol to number
sym_to_num_tbl = {'a':0, 'b':1, 'e':2, 'c':3, 'f':4, 'd':5, 'g':6}
#---------------------------------------------------
# build distance table: the distance between different symbols
# use dictionary since its faster ( O(1) when lookups)
#---------------------------------------------------
def get_distance_table(n):
x=np.linspace(0,1,n)
shape_abs_val = [shape_flat, shape_linear, shape_linear, shape_leftparab, shape_leftparab, shape_rightparab, shape_rightparab]
shape_sgn = np.array([1,1,-1,1,-1,1,-1])
shapes = []
for i in range(len(shape_abs_val)):
shapes.append((shape_abs_val[i])(x) * shape_sgn[i])
distMat = {}
for i in range(len(shape_abs_val)):
sym1 = num_to_sym_tbl[i]
for j in range(len(shape_abs_val)):
sym2 = num_to_sym_tbl[j]
if i == j:
distMat[(sym1,sym2)] = float(0)
else:
# Euclidean distance of n points
# distMat[(sym1,sym2)] = np.sqrt(np.sum(np.power((shapes[i] - shapes[j]), 2))) / n
# Manhattan distance
distMat[(sym1,sym2)] = np.sum(np.abs(shapes[i] - shapes[j])) / n
return distMat
pass
#---------------------------------------------------
# distance_table version A: calculate the real 4 level distances between standard shapes
# 0: distance between exactly the same shape
# 0.0079728: different shape, but with the same monotonous trend; both increase or decrease
# 0.0316069: different shape, one shape is flat
# 0.0612027: different shape, opposite monotonous trend; one increase while another decrease
#---------------------------------------------------
distance_table = get_distance_table(50000)
def print_distance_table():
for i in range(len(num_to_sym_tbl)):
sym1 = num_to_sym_tbl[i]
for j in range(len(num_to_sym_tbl)):
sym2 = num_to_sym_tbl[j]
sys.stdout.write("(" + sym1 + "," + sym2 + "):" + str(distance_table[(sym1,sym2)]) + "\t")
print ""
#"{0:.2f}".format(13.949999999999999)
#---------------------------------------------------
# distance_table version B: assign four level distances (0,1,2,3) to different shape
# 0: distance between exactly the same shape
# 1: different shape, but with the same monotonous trend; both increase or decrease
# 2: different shape, one shape is flat
# 3: different shape, opposite monotonous trend; one increase while another decrease
#---------------------------------------------------
# use number as symbolic representation
# distance_table = { (0,0):0, (0,1):2, (0,2):2, (0,3):2, (0,4):2, (0,5):2, (0,6):2,
# (1,0):2, (1,1):0, (1,2):3, (1,3):1, (1,4):3, (1,5):1, (1,6):3,
# (2,0):2, (2,1):3, (2,2):0, (2,3):3, (2,4):1, (2,5):3, (2,6):1,
# (3,0):2, (3,1):1, (3,2):3, (3,3):0, (3,4):3, (3,5):1, (3,6):3,
# (4,0):2, (4,1):3, (4,2):1, (4,3):3, (4,4):0, (4,5):3, (4,6):1,
# (5,0):2, (5,1):1, (5,2):3, (5,3):1, (5,4):3, (5,5):0, (5,6):3,
# (6,0):2, (6,1):3, (6,2):1, (6,3):3, (6,4):1, (6,5):3, (6,6):0}
# use string as symbolic representation
# distance_table = { ('a','a'):0, ('a','b'):2, ('a','e'):2, ('a','c'):2, ('a','f'):2, ('a','d'):2, ('a','g'):2,
# ('b','a'):2, ('b','b'):0, ('b','e'):3, ('b','c'):1, ('b','f'):3, ('b','d'):1, ('b','g'):3,
# ('e','a'):2, ('e','b'):3, ('e','e'):0, ('e','c'):3, ('e','f'):1, ('e','d'):3, ('e','g'):1,
# ('c','a'):2, ('c','b'):1, ('c','e'):3, ('c','c'):0, ('c','f'):3, ('c','d'):1, ('c','g'):3,
# ('f','a'):2, ('f','b'):3, ('f','e'):1, ('f','c'):3, ('f','f'):0, ('f','d'):3, ('f','g'):1,
# ('d','a'):2, ('d','b'):1, ('d','e'):3, ('d','c'):1, ('d','f'):3, ('d','d'):0, ('d','g'):3,
# ('g','a'):2, ('g','b'):3, ('g','e'):1, ('g','c'):3, ('g','f'):1, ('g','d'):3, ('g','g'):0}
sym_avg_dist = sum(v for (_, v) in distance_table.items()) / float(len(distance_table))
#---------------------------------
# calculating the levenstein distance of two string
# input:
# @ two strings
# output:
# @ levenstein distance with float type
# example:
# >>> edit_dist('eeba', 'abac')
# 3
# >>> edit_dist('abc', 'cba')
# 2
# >>> edit_dist('cbc', 'eba')
# 2
# >>> edit_dist('recoginze', 'recognize')
# 2
# >>> edit_dist('sailn', 'failing')
# 3
#---------------------------------
def edit_dist(s1, s2):
len1 = len(s1)
len2 = len(s2)
# for all i and j, d[i,j] will hold the Levenshtein distance between
# the first i characters of s and the first j characters of t;
# note that d has (m+1)*(n+1) values
matrix = [[i+j for j in range(len2 + 1)] for i in range(len1 + 1)]
for row in range(len1):
for col in range(len2):
substitute_cost = (s1[row] != s2[col])
matrix[row+1][col+1] = min(matrix[row+1][col]+1, # delete
matrix[row][col+1]+1, # insert
matrix[row][col]+substitute_cost) # substitution
return matrix[len1][len2]
#---------------------------------
# Needleman-Wunch distance: the cost of substitute are arbitrary (cost table in this function)
# the insert and delete cost are the same, also called "gap cost"
# this function calculating the Needleman-Wunch distance of two string
# gap cost is sym_avg_dist, and substitution cost is from distance_table
# input:
# @ normalized: normalized by the length of the string if normalized is true
# output:
# @ Needleman-Wunch distance with float type
#---------------------------------
def nw_dist(s1, s2):
len1 = len(s1)
len2 = len(s2)
matrix = [[(i+j)*sym_avg_dist for j in range(len2 + 1)] for i in range(len1 + 1)]
for row in range(len1):
for col in range(len2):
if s1[row] == s2[col]:
# if new characters are the same
matrix[row+1][col+1] = min(matrix[row+1][col]+sym_avg_dist, # delete
matrix[row][col+1]+sym_avg_dist, # insert
matrix[row][col]) # substitution
else:
matrix[row+1][col+1] = min(matrix[row+1][col]+sym_avg_dist, # delete
matrix[row][col+1]+sym_avg_dist, # insert
matrix[row][col]+distance_table[(s1[row], s2[col])]) # substitution
return matrix[len1][len2]
#---------------------------------
# leven_match: approximate substring matching
# input:
# @
# output:
# @ the position of first matching substring, if no matching, return -1
#---------------------------------
def leven_match(keyword, word, best_n=1):
len_keyword = len(keyword)
len_word = len(word)
if best_n <= 0 or best_n > len_word-len_keyword+1:
raise ValueError, "best_n must be greater or equal than 1 and less equal than maximum number of possible substrings!"
# initial value of edit distance
matrix = [[0 for j in range(len_word + 1)] for i in range(len_keyword + 1)]
for j in range(len_keyword + 1):
matrix[j][0] = j
# the direction of backward searching
dir = [[0 for j in range(len_word + 1)] for i in range(len_keyword + 1)]
for row in range(len_keyword):
for col in range(len_word):
# if keyword[row] == word[col]:
# # if new characters are the same
# complist = np.array([
# matrix[row][col], # substitution
# matrix[row+1][col]+1, # delete
# matrix[row][col+1]+1 # insert
# ])
# else:
# complist = np.array([
# matrix[row][col]+1, # substitution, go diagonal
# matrix[row+1][col]+1, # delete, go right
# matrix[row][col+1]+1 # insert, go down
# ])
# TRICK: the cost of substitution is on the first place of the list, because
# when backward search, we prefer the substring has equal length
if keyword[row] == word[col]:
# if new characters are the same
complist = np.array([
matrix[row][col], # substitution
matrix[row+1][col]+sym_avg_dist, # delete
matrix[row][col+1]+sym_avg_dist # insert
])
else:
complist = np.array([
matrix[row][col]+distance_table[(keyword[row], word[col])], # substitution
matrix[row+1][col]+sym_avg_dist, # delete
matrix[row][col+1]+sym_avg_dist # insert
])
matrix[row+1][col+1] = np.min(complist)
dir[row+1][col+1] = np.argmin(complist) + 1
# get the best_n minimal distances
lenvdist_list = matrix[len_keyword]
matching_words = []
min_dist_list = np.argsort(lenvdist_list)[0:best_n]
row_end = len_keyword
action = []
matching_pos = []
for col_end in min_dist_list:
search_record = []
# backward search three direction
col = col_end
row = row_end
while row != 0:
search_record.append(dir[row][col])
# reverse operation of substitute, delete and insert
if dir[row][col] == 1:
# substitution, diagonal
col = col - 1
row = row - 1
elif dir[row][col] == 2:
# delete, go left
col = col - 1
elif dir[row][col] == 3:
# insert, go up
row = row - 1
else:
raise ValueError, "it is odd: the backward search direction must be in [1,2,3]"
# TRICK: pay attention to the index of the word
matching_words.append(word[col:col_end])
matching_pos.append(col)
# editing action
action_dict = {1:'s', 2:'i', 3:'d'}
action.append("".join([action_dict[i] for i in search_record[::-1]]))
return matching_pos, matching_words, [lenvdist_list[i] for i in min_dist_list], action
#---------------------------------
# calculating the hamming distance of two string
# input:
# @ normalized: normalized by the length of the string if normalized is true
# output:
# @ hamming distance with float type
#---------------------------------
def hamming_dist(str1, str2, normalized=True):
len_str1 = len(str1)
len_str2 = len(str2)
if len_str1 != len_str2:
raise ValueError, "the length of two strings must be the same!"
dist = 0
for i in range(len_str1):
try:
str_dist = distance_table[(str1[i], str2[i])]
except KeyError:
#TODO: use logging
# print "invalid shape symbol (must be in a-g):\t" + str1[i] + "\t" + str2[i]
return sys.float_info.max
dist = dist + str_dist
if normalized:
dist = dist / float(len_str1)
return dist
#---------------------------------
# match: looking up the first matching (distance less equal than the threshold) substring based on the keyword
# input:
# @ dist_thresh: default value is 0
# output:
# @ the position of first matching substring, if no matching, return -1
#---------------------------------
def hamming_match(keyword, word, dist_thresh=0.0):
len_keyword = len(keyword)
len_str = len(word)
if len_keyword > len_str:
raise ValueError, "the length of the keyword should be less than the string!"
for i in range(len_str-len_keyword+1):
dist = hamming_dist(keyword, word[i:i+len_keyword])
if hamming_dist(keyword, word[i:i+len_keyword]) <= dist_thresh:
return i
# elif dist == sys.float_info.max:
# print "invalid shape symbol (must be in a-g) in the position:\t" + str(i)
# no matching
return -1
#---------------------------------
# match: looking up all matching (distance less equal than the threshold) substrings based on the keyword
# output: a list of substring index
#---------------------------------
def hamming_match_all(keyword, word, dist_thresh=0.0):
len_keyword = len(keyword)
len_str = len(word)
if len_keyword > len_str:
raise ValueError, "the length of the keyword should be less than the string!"
matching_pos = []
for i in range(len_str-len_keyword+1):
dist = hamming_dist(keyword, word[i:i+len_keyword])
if dist <= dist_thresh:
matching_pos.append(i)
# elif dist == sys.float_info.max:
# print "invalid shape symbol (must be in a-g) in the position:\t" + str(i)
return matching_pos
#---------------------------------
# match_best_effort: looking up the best n matching substrings based on the keyword
# output: a list of substring index
#---------------------------------
def hamming_match_best_effort(keyword, word, best_n=1):
len_keyword = len(keyword)
len_str = len(word)
if len_keyword > len_str:
raise ValueError, "the length of the keyword must be less than the string!"
if best_n <= 0 or best_n > len_str-len_keyword+1:
raise ValueError, "best_n must be greater or equal than 1 and less equal than maximum number of possible substrings!"
# store all distance
distlist = []
for i in range(len_str-len_keyword+1):
distlist.append(hamming_dist(keyword, word[i:i+len_keyword]))
# get sorted index
vals = np.array(distlist)
matching_pos = np.argsort(vals)[0:best_n]
# return best_n matching result: positions and distances
return matching_pos, [distlist[i] for i in matching_pos]
| 2.53125
| 3
|
manhattan/record.py
|
cartlogic/manhattan
| 1
|
12777904
|
from __future__ import absolute_import, division, print_function
log_version = 1
class Record(object):
base_fields = ('timestamp', 'vid', 'site_id')
fields = ()
def __init__(self, **kwargs):
for field in self.base_fields + self.fields:
setattr(self, field, kwargs.get(field, ''))
def to_list(self):
return ([str(log_version), self.key] +
[getattr(self, field) for field in
self.base_fields + self.fields])
@staticmethod
def from_list(vals):
version = vals[0]
record_type = vals[1]
rest = vals[2:]
assert int(version) == log_version
cls = _record_types[record_type]
kwargs = {field: val for field, val
in zip(cls.base_fields + cls.fields, rest)}
return cls(**kwargs)
class PageRecord(Record):
key = 'page'
fields = ('ip', 'method', 'url', 'user_agent', 'referer')
class PixelRecord(Record):
key = 'pixel'
fields = ()
class GoalRecord(Record):
key = 'goal'
fields = ('name', 'value', 'value_type', 'value_format')
class SplitRecord(Record):
key = 'split'
fields = ('test_name', 'selected')
_record_types = {cls.key: cls for cls in
(PageRecord, PixelRecord, GoalRecord, SplitRecord)}
| 2.40625
| 2
|
setup.py
|
rapatchi/SFMergeUtility
| 0
|
12777905
|
<reponame>rapatchi/SFMergeUtility
from setuptools import setup, find_packages
setup(
name='SFMergeUtility',
version='0.1',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='SFMergeUtility',
long_description=open('README.md').read(),
install_requires=[''],
url='',
author='SFDev',
author_email='SF<PASSWORD>'
)
| 1.023438
| 1
|
cross2sheet/main.py
|
jaylorch/cross2sheet
| 10
|
12777906
|
<filename>cross2sheet/main.py
#!/usr/bin/python
import argparse
import urllib.request
from cross2sheet.excel import save_xlsx
from cross2sheet.html14 import parse_html_grid
from cross2sheet.htmltable import parse_html_table
from cross2sheet.transforms import autonumber, outside_bars, pad
def read(string):
if '://' in string:
req=urllib.request.urlopen(string)
data=req.read()
req.close()
return data
else:
with open(string,'rb') as f:
return f.read()
class NotRecognized(Exception):
pass
class ReadFailed(Exception):
pass
def read_image(data,args):
try:
from cross2sheet.image import ImageGrid
except ImportError as e:
if e.name in ('cv2','numpy'):
raise NotRecognized('Image detection disabled because the module %s was not found.'%e.name)
else:
raise e
try:
img=ImageGrid(data)
except ValueError:
raise NotRecognized
grid=img.grid()
if args.detect_background:
grid.features.extend(img.read_background(args.color_levels))
if args.detect_bars:
grid.features.extend(img.read_bars())
if args.autonumber_cells_with_text:
grid.features.extend(img.autonumber_if_text_found())
if args.ocr_text:
grid.features.extend(img.read_text_ocr())
if args.autonumber is None:
args.autonumber=not (args.autonumber_cells_with_text or args.ocr_text)
return grid
def read_html(data,args):
try:
data=data.decode()
except UnicodeDecodeError:
raise NotRecognized
if '<div class="bk"' in data:
return parse_html_grid(data)
elif '<table' in data:
if not args.color_attribute:
raise NotRecognized('HTML contains a table, but --color-attribute not specified. Try specifying --color-attribute and --color-value-dark.')
return parse_html_table(data,styleattr=args.color_attribute,styledict={args.color_value_dark:0})
else:
raise NotRecognized
def read_data(data,args):
errors = []
for fn in (read_image,read_html):
try:
return fn(data,args)
except NotRecognized as e:
errors.extend(e.args)
msg='Error: file format not recognized. If the grid is an HTML file, try taking a screenshot. If the grid is an image file, try converting it to PNG format.'
if errors:
msg='%s\nThe following warnings were encountered:\n%s'%(msg,'\n'.join(errors))
raise ReadFailed(msg)
def process(grid,args):
if args.print_testdata:
from cross2sheet.test.test import print_tests
url = args.input_file_or_url
url = url.replace('http://web.mit.edu/puzzle/www/','')
print_tests(url,grid)
if args.autonumber:
grid.features.extend(autonumber(grid))
if args.outer_border:
grid.features.extend(outside_bars(grid))
pad(grid,*args.padding)
def save(grid,args):
save_xlsx(grid,args.output_file,text_in_cells=args.number_in_cell,text_in_comments=args.number_in_comment)
def boolean_arg(s):
sl=s.lower()
if sl in ['y','yes','t','true','1','on']:
return True
elif sl in ['n','no','f','false','0','off']:
return False
else:
raise ValueError('Unrecognized value %s'%s)
class ToggleAction(argparse.Action):
def __init__(self,*args,**kwargs):
kwargs.setdefault('metavar','{y,n}')
super().__init__(*args,**kwargs)
def __call__(self,parser,namespace,values,option_string=None):
setattr(namespace,self.dest,boolean_arg(values))
if __name__=='__main__':
parser=argparse.ArgumentParser(description='Convert a crossword to a spreadsheet.')
parser.add_argument('input_file_or_url',type=str)
parser.add_argument('output_file',type=str)
parser.add_argument('--padding',type=int,nargs=2,default=(1,3),metavar=('ROWS','COLS'),help='The number of blank rows and columns to add on the top and left of the grid')
parser.add_argument('--detect-background',action=ToggleAction,default=True,help='Determines whether to detect the background color of the cells.')
# Detecting bars in crosswords without them appears to be
# harmless, as bars just get added between dark squares
parser.add_argument('--detect-bars',action=ToggleAction,default=True,help='Determines whether to detect the cell border is thick or thin.')
parser.add_argument('--autonumber',action=ToggleAction,help='Determines whether clue numbers will automatically be added in the cells that would be expected to have them under the usual crossword numbering conventions.')
parser.add_argument('--autonumber-cells-with-text',action=ToggleAction,default=False,help='Determines whether clue numbers will be added in sequential order in cells that appear to have text in them. (This is not too reliable.)')
parser.add_argument('--ocr-text',action=ToggleAction,default=False,help="Determines whether to use the 'tesseract' program to recognize clue numbers. (This is very unreliable.)")
parser.add_argument('--number-in-comment',action=ToggleAction,default=True,help='Determines whether to write the clue numbers in comments. (A triangle will appear in the corner of the cell, and hovering over it will reveal the clue number.)')
parser.add_argument('--number-in-cell',action=ToggleAction,default=True,help='Determines whether to write the clue numbers in the spreadsheet cells.')
parser.add_argument('--outer-border',action=ToggleAction,default=True,help='Determines whether to draw a border around the outside of the grid.')
parser.add_argument('--color-levels',type=int,default=2,help='(image input) The number of different levels of color to distinguish')
parser.add_argument('--color-attribute',type=str,help='(HTML table input) The name of the attribute that determines whether the cell is light or dark.')
parser.add_argument('--color-value-dark',type=str,help='(HTML table input) The value of the above attribute when the cell is dark.')
parser.add_argument('--print-testdata',action=ToggleAction,default=False,help='Print unit test data for this crossword.')
args=parser.parse_args()
data=read(args.input_file_or_url)
try:
grid=read_data(data,args)
except ReadFailed as e:
import sys
print(e.args[0],file=sys.stderr)
sys.exit(65)
process(grid,args)
save(grid,args)
| 2.84375
| 3
|
scripts/problem0003.py
|
Joel301/Project_Euler
| 0
|
12777907
|
<reponame>Joel301/Project_Euler
#! python3
#-*- coding: utf-8 -*-
"""
Euler description from https://projecteuler.net/
Problem 0003
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?
"""
def primeFactors(number= 13195):
p=2
while number >= p*p:
if number % p == 0:
yield p
number = number/p
else:
p=p+1
yield int(number)
return
#main function
def compute():
ans = [x for x in primeFactors(600851475143)]
return ans
if __name__ == "__main__":
print(compute())
| 3.359375
| 3
|
tests/test_titles.py
|
openstack/api-wg
| 33
|
12777908
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glob
import os
import docutils.core
import testtools
class TestTitles(testtools.TestCase):
def _get_title(self, section_tree):
section = {
'subtitles': [],
}
for node in section_tree:
if node.tagname == 'title':
section['name'] = node.rawsource
elif node.tagname == 'section':
subsection = self._get_title(node)
section['subtitles'].append(subsection['name'])
return section
def _get_titles(self, spec):
titles = {}
for node in spec:
if node.tagname == 'section':
# Note subsection subtitles are thrown away
section = self._get_title(node)
titles[section['name']] = section['subtitles']
return titles
def _check_titles(self, filename, expect, actual):
missing_sections = [x for x in expect.keys() if x not in actual.keys()]
extra_sections = [x for x in actual.keys() if x not in expect.keys()]
msgs = []
if len(missing_sections) > 0:
msgs.append("Missing sections: %s" % missing_sections)
if len(extra_sections) > 0:
msgs.append("Extra sections: %s" % extra_sections)
for section in expect.keys():
missing_subsections = [x for x in expect[section]
if x not in actual[section]]
# extra subsections are allowed
if len(missing_subsections) > 0:
msgs.append("Section '%s' is missing subsections: %s"
% (section, missing_subsections))
if len(msgs) > 0:
self.fail("While checking '%s':\n %s"
% (filename, "\n ".join(msgs)))
def test_template(self):
filenames = glob.glob("guidelines/*")
for filename in filenames:
if filename.endswith('~'):
continue
if os.path.isdir(filename):
continue
self.assertTrue(
filename.endswith(".rst") or filename.endswith(".json"),
"guideline file must use 'rst' or 'json'"
"extension: {filename}".format(filename=filename))
with open(filename) as f:
data = f.read()
docutils.core.publish_doctree(data)
| 2.359375
| 2
|
lights/admin.py
|
and-dmitry/demolighting
| 0
|
12777909
|
from django.contrib import admin
from . import models
@admin.register(models.Lamp)
class LampAdmin(admin.ModelAdmin):
list_display = ('name', 'is_on', 'brightness')
ordering = ('name',)
@admin.register(models.WorkingPeriod)
class WorkingPeriodAdmin(admin.ModelAdmin):
list_display = ('lamp', 'brightness', 'start', 'end')
ordering = ('-start',)
| 1.859375
| 2
|
lizardanalysis/version.py
|
JojoReikun/ClimbingLizardDLCAnalysis
| 1
|
12777910
|
<gh_stars>1-10
"""
LizardDLCAnalysis Toolbox
© <NAME>
© <NAME>
Licensed under MIT License
"""
__version__ = '0.1'
VERSION = __version__
| 1.023438
| 1
|
test/service/test_response.py
|
NoopDog/azul
| 0
|
12777911
|
import json
from typing import (
Any,
Dict,
List,
Optional,
)
import unittest
from unittest import (
mock,
)
import urllib.parse
from more_itertools import (
one,
)
import requests
from app_test_case import (
LocalAppTestCase,
)
from azul import (
cached_property,
config,
)
from azul.indexer import (
BundleFQID,
)
from azul.indexer.document import (
null_str,
)
from azul.indexer.index_service import (
IndexService,
)
from azul.logging import (
configure_test_logging,
)
from azul.service.hca_response_v5 import (
FileSearchResponse,
KeywordSearchResponse,
)
from azul.types import (
JSON,
)
from service import (
WebServiceTestCase,
)
from service.test_pagination import (
parse_url_qs,
)
# noinspection PyPep8Naming
def setUpModule():
configure_test_logging()
class TestResponse(WebServiceTestCase):
maxDiff = None
@classmethod
def bundles(cls) -> List[BundleFQID]:
return super().bundles() + [
BundleFQID('fa5be5eb-2d64-49f5-8ed8-bd627ac9bc7a', '2019-02-14T192438.034764Z'),
BundleFQID('d0e17014-9a58-4763-9e66-59894efbdaa8', '2018-10-03T144137.044509Z'),
BundleFQID('e0ae8cfa-2b51-4419-9cde-34df44c6458a', '2018-12-05T230917.591044Z'),
BundleFQID('411cd8d5-5990-43cd-84cc-6c7796b8a76d', '2018-10-18T204655.866661Z'),
BundleFQID('412cd8d5-5990-43cd-84cc-6c7796b8a76d', '2018-10-18T204655.866661Z'),
BundleFQID('ffac201f-4b1c-4455-bd58-19c1a9e863b4', '2019-10-09T170735.528600Z'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._setup_indices()
@classmethod
def tearDownClass(cls):
cls._teardown_indices()
super().tearDownClass()
def get_hits(self, entity_type: str, entity_id: str):
"""
Fetches hits from ES instance searching for a particular entity ID
"""
body = {
"query": {
"term": {
"entity_id.keyword": entity_id
}
}
}
# Tests are assumed to only ever run with the azul dev index
results = self.es_client.search(index=config.es_index_name(catalog=self.catalog,
entity_type=entity_type,
aggregate=True),
body=body)
return self._index_service.translate_fields(catalog=self.catalog,
doc=[results['hits']['hits'][0]['_source']],
forward=False)
@cached_property
def _index_service(self):
return IndexService()
def test_key_search_files_response(self):
"""
This method tests the KeywordSearchResponse object for the files entity type.
It will make sure the functionality works as appropriate by asserting the
apiResponse attribute is the same as expected.
"""
# Still need a way to test the response.
keyword_response = KeywordSearchResponse(
# the entity_id is hardcoded, but corresponds to the bundle above
hits=self.get_hits('files', '0c5ac7c0-817e-40d4-b1b1-34c3d5cfecdb'),
entity_type='files',
catalog=self.catalog
).return_response().to_json()
expected_response = {
"hits": [
{
"bundles": [
{
"bundleUuid": "aaa96233-bf27-44c7-82df-b4dc15ad4d9d",
"bundleVersion": "2018-11-02T113344.698028Z"
}
],
"cellLines": [
],
"cellSuspensions": [
{
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"selectedCellType": [None],
"totalCells": 1
}
],
"donorOrganisms": [
{
"biologicalSex": ["female"],
"disease": ['normal'],
"developmentStage": [None],
"genusSpecies": ["Australopithecus"],
"id": ["DID_scRSq06"],
"donorCount": 1,
"organismAge": ["38"],
"organismAgeUnit": ["year"],
"organismAgeRange": [{"gte": 1198368000.0, "lte": 1198368000.0}]
}
],
"entryId": "0c5ac7c0-817e-40d4-b1b1-34c3d5cfecdb",
"files": [
{
"content_description": [None],
"format": "fastq.gz",
"name": "SRR3562915_1.fastq.gz",
"sha256": "77337cb51b2e584b5ae1b99db6c163b988cbc5b894dda2f5d22424978c3bfc7a",
"size": 195142097,
"uuid": "7b07f99e-4a8a-4ad0-bd4f-db0d7a00c7bb",
"version": "2018-11-02T113344.698028Z"
}
],
"organoids": [
],
"projects": [
{
"laboratory": ["<NAME>"],
"projectShortname": ["Single of human pancreas"],
"projectTitle": ["Single cell transcriptome patterns."]
}
],
"protocols": [
{
"libraryConstructionApproach": ["Smart-seq2"],
"nucleicAcidSource": ["single cell"],
},
{
"instrumentManufacturerModel": ["Illumina NextSeq 500"],
"pairedEnd": [True],
}
],
"samples": [
{
"sampleEntityType": ["specimens"],
"effectiveOrgan": ['pancreas'],
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
],
"specimens": [
{
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
]
}
]
}
self.assertElasticsearchResultsEqual(keyword_response, expected_response)
def test_key_search_samples_response(self):
"""
KeywordSearchResponse for the specimens endpoint should return file type summaries instead of files
"""
keyword_response = KeywordSearchResponse(
# the entity_id is hardcoded, but corresponds to the bundle above
hits=self.get_hits('samples', 'a21dc760-a500-4236-bcff-da34a0e873d2'),
entity_type='samples',
catalog=self.catalog
).return_response().to_json()
expected_response = {
"hits": [
{
"cellLines": [
],
"cellSuspensions": [
{
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"selectedCellType": [None],
"totalCells": 1
}
],
"donorOrganisms": [
{
"biologicalSex": ["female"],
"disease": ['normal'],
"developmentStage": [None],
"genusSpecies": ["Australopithecus"],
"id": ["DID_scRSq06"],
"donorCount": 1,
"organismAge": ["38"],
"organismAgeUnit": ["year"],
"organismAgeRange": [{"gte": 1198368000.0, "lte": 1198368000.0}]
}
],
"entryId": "a21dc760-a500-4236-bcff-da34a0e873d2",
"fileTypeSummaries": [
{
"count": 2,
"fileType": "fastq.gz",
"totalSize": 385472253
}
],
"organoids": [
],
"projects": [
{
"laboratory": ["<NAME>"],
"projectShortname": ["Single of human pancreas"],
"projectTitle": ["Single cell transcriptome patterns."]
}
],
"protocols": [
{
"instrumentManufacturerModel": ["Illumina NextSeq 500"],
"pairedEnd": [True],
},
{
"libraryConstructionApproach": ["Smart-seq2"],
"nucleicAcidSource": ["single cell"],
}
],
"samples": [
{
"sampleEntityType": "specimens",
"effectiveOrgan": "pancreas",
"id": "DID_scRSq06_pancreas",
"disease": ["normal"],
"organ": "pancreas",
"organPart": ["islet of Langerhans"],
"preservationMethod": None,
"source": "specimen_from_organism",
}
],
"specimens": [
{
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism",
]
}
]
}
]
}
self.assertElasticsearchResultsEqual(keyword_response, expected_response)
path = "/index/files"
query = "?size=5&search_after=cbb998ce-ddaf-34fa-e163-d14b399c6b34&search_after_uid=meta%2332"
@property
def paginations(self):
return [
{
"count": 2,
"order": "desc",
"pages": 1,
"size": 5,
"sort": "entryId",
"total": 2
},
{
"count": 2,
"order": "desc",
"pages": 1,
"next": self.base_url + self.path + self.query,
"size": 5,
"sort": "entryId",
"total": 2
}
]
def test_file_search_response(self):
"""
n=0: Test the FileSearchResponse object, making sure the functionality works as appropriate by asserting the
apiResponse attribute is the same as expected.
n=1: Tests the FileSearchResponse object, using 'next' pagination.
"""
hits = [
{
"bundles": [
{
"bundleUuid": "aaa96233-bf27-44c7-82df-b4dc15ad4d9d",
"bundleVersion": "2018-11-02T113344.698028Z"
}
],
"cellLines": [
],
"cellSuspensions": [
{
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"selectedCellType": [None],
"totalCells": 1
}
],
"donorOrganisms": [
{
"biologicalSex": ["female"],
"disease": ['normal'],
"developmentStage": [None],
"genusSpecies": ["Australopithecus"],
"id": ["DID_scRSq06"],
"donorCount": 1,
"organismAge": ["38"],
"organismAgeUnit": ["year"],
"organismAgeRange": [{"gte": 1198368000.0, "lte": 1198368000.0}]
}
],
"entryId": "0c5ac7c0-817e-40d4-b1b1-34c3d5cfecdb",
"files": [
{
"content_description": [None],
"format": "fastq.gz",
"name": "SRR3562915_1.fastq.gz",
"sha256": "77337cb51b2e584b5ae1b99db6c163b988cbc5b894dda2f5d22424978c3bfc7a",
"size": 195142097,
"uuid": "7b07f99e-4a8a-4ad0-bd4f-db0d7a00c7bb",
"version": "2018-11-02T113344.698028Z"
}
],
"organoids": [
],
"projects": [
{
"laboratory": ["<NAME>"],
"projectShortname": ["Single of human pancreas"],
"projectTitle": ["Single cell transcriptome patterns."]
}
],
"protocols": [
{
"libraryConstructionApproach": ["Smart-seq2"],
"nucleicAcidSource": ["single cell"],
},
{
"instrumentManufacturerModel": ["Illumina NextSeq 500"],
"pairedEnd": [True],
}
],
"samples": [
{
"sampleEntityType": ["specimens"],
"effectiveOrgan": ['pancreas'],
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism",
]
}
],
"specimens": [
{
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism",
]
}
]
}
]
responses = [
{
"hits": hits,
"pagination": {
"count": 2,
"order": "desc",
"pages": 1,
"next": None,
"previous": None,
"size": 5,
"sort": "entryId",
"total": 2
},
"termFacets": {}
},
{
"hits": hits,
"pagination": {
"count": 2,
"order": "desc",
"pages": 1,
"next": self.base_url + self.path + self.query,
"previous": None,
"size": 5,
"sort": "entryId",
"total": 2
},
"termFacets": {}
}
]
for n in 0, 1:
with self.subTest(n=n):
filesearch_response = FileSearchResponse(
hits=self.get_hits('files', '0c5ac7c0-817e-40d4-b1b1-34c3d5cfecdb'),
pagination=self.paginations[n],
facets={},
entity_type='files',
catalog=self.catalog
).return_response().to_json()
self.assertElasticsearchResultsEqual(filesearch_response, responses[n])
def test_file_search_response_file_summaries(self):
"""
Test non-'files' entity type passed to FileSearchResponse will give file summaries
"""
filesearch_response = FileSearchResponse(
hits=self.get_hits('samples', 'a21dc760-a500-4236-bcff-da34a0e873d2'),
pagination=self.paginations[0],
facets={},
entity_type='samples',
catalog=self.catalog
).return_response().to_json()
for hit in filesearch_response['hits']:
self.assertTrue('fileTypeSummaries' in hit)
self.assertFalse('files' in hit)
facets_populated = {
"organ": {
"doc_count": 21,
"untagged": {
"doc_count": 0
},
"myTerms": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "silver",
"doc_count": 11
},
{
"key": "teal",
"doc_count": 10
}
]
}
},
"disease": {
"doc_count": 21,
"untagged": {
"doc_count": 12
},
"myTerms": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "silver",
"doc_count": 9
}
]
}
}
}
def test_file_search_response_add_facets(self):
"""
Test adding facets to FileSearchResponse with missing values in one facet
and no missing values in the other
null term should not appear if there are no missing values
"""
facets = FileSearchResponse.add_facets(self.facets_populated)
expected_output = {
"organ": {
"terms": [
{
"term": "silver",
"count": 11
},
{
"term": "teal",
"count": 10
}
],
"total": 21,
"type": "terms"
},
"disease": {
"terms": [
{
"term": "silver",
"count": 9
},
{
"term": None,
"count": 12
}
],
"total": 21,
"type": "terms"
}
}
self.assertElasticsearchResultsEqual(facets, expected_output)
def _params(self, filters: Optional[JSON] = None, **params: Any) -> Dict[str, Any]:
return {
**({} if filters is None else {'filters': json.dumps(filters)}),
'catalog': self.catalog,
**params
}
def test_sorting_details(self):
for entity_type in 'files', 'samples', 'projects', 'bundles':
with self.subTest(entity_type=entity_type):
base_url = self.base_url
url = base_url + "/index/" + entity_type
response = requests.get(url, params=self._params())
response.raise_for_status()
response_json = response.json()
# Verify default sort field is set correctly
self.assertEqual(response_json['pagination']["sort"], self.app_module.sort_defaults[entity_type][0])
# Verify all fields in the response that are lists of primitives are sorted
for hit in response_json['hits']:
self._verify_sorted_lists(hit)
def test_transform_request_with_file_url(self):
base_url = self.base_url
for entity_type in ('files', 'bundles'):
with self.subTest(entity_type=entity_type):
url = base_url + f"/index/{entity_type}"
response = requests.get(url, params=self._params())
response.raise_for_status()
response_json = response.json()
for hit in response_json['hits']:
if entity_type == 'files':
self.assertEqual(len(hit['files']), 1)
else:
self.assertGreater(len(hit['files']), 0)
for file in hit['files']:
self.assertIn('url', file.keys())
actual_url = urllib.parse.urlparse(file['url'])
actual_query_vars = {k: one(v) for k, v in urllib.parse.parse_qs(actual_url.query).items()}
expected_base_url = urllib.parse.urlparse(base_url)
self.assertEqual(expected_base_url.netloc, actual_url.netloc)
self.assertEqual(expected_base_url.scheme, actual_url.scheme)
self.assertIsNotNone(actual_url.path)
self.assertEqual(self.catalog, actual_query_vars['catalog'])
self.assertIsNotNone(actual_query_vars['version'])
def test_projects_key_search_response(self):
"""
Test building response for projects
Response should include project detail fields that do not appear for other entity type responses
"""
keyword_response = KeywordSearchResponse(
hits=self.get_hits('projects', 'e8642221-4c2c-4fd7-b926-a68bce363c88'),
entity_type='projects',
catalog=self.catalog
).return_response().to_json()
expected_response = {
"hits": [
{
"cellLines": [
],
"cellSuspensions": [
{
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"selectedCellType": [None],
"totalCells": 1
}
],
"donorOrganisms": [
{
"biologicalSex": ["female"],
"disease": ['normal'],
"developmentStage": [None],
"genusSpecies": ["Australopithecus"],
"id": ["DID_scRSq06"],
"donorCount": 1,
"organismAge": ["38"],
"organismAgeUnit": ["year"],
"organismAgeRange": [{"gte": 1198368000.0, "lte": 1198368000.0}]
}
],
"entryId": "e8642221-4c2c-4fd7-b926-a68bce363c88",
"fileTypeSummaries": [
{
"count": 2,
"fileType": "fastq.gz",
"totalSize": 385472253
}
],
"organoids": [
],
"projects": [
{
"arrayExpressAccessions": [None],
"geoSeriesAccessions": [None],
"insdcProjectAccessions": [None],
"insdcStudyAccessions": [None],
"contributors": [
{
"contactName": "<NAME>",
"correspondingContributor": None,
"email": "<EMAIL>",
"institution": "University",
"laboratory": None,
"projectRole": None
},
{
"contactName": "Matthew,,Green",
"correspondingContributor": False,
"email": "<EMAIL>",
"institution": "Farmers Trucks",
"laboratory": "<NAME>",
"projectRole": "Human Cell Atlas wrangler"
},
{
"contactName": "Laura,,Huerta",
"correspondingContributor": False,
"email": "<EMAIL>",
"institution": "Farmers Trucks",
"laboratory": "<NAME>",
"projectRole": "external curator"
}
],
"laboratory": ["<NAME>"],
"projectDescription": "As organisms age, cells accumulate genetic and epigenetic changes "
"that eventually lead to impaired organ function or catastrophic "
"failure such as cancer. Here we describe a single-cell "
"transcriptome analysis of 2544 human pancreas cells from donors, "
"spanning six decades of life. We find that islet cells from older "
"donors have increased levels of disorder as measured both by noise "
"in the transcriptome and by the number of cells which display "
"inappropriate hormone expression, revealing a transcriptional "
"instability associated with aging. By analyzing the spectrum of "
"somatic mutations in single cells from previously-healthy donors, "
"we find a specific age-dependent mutational signature "
"characterized by C to A and C to G transversions, indicators of "
"oxidative stress, which is absent in single cells from human brain "
"tissue or in a tumor cell line. Cells carrying a high load of such "
"mutations also express higher levels of stress and senescence "
"markers, including FOS, JUN, and the cytoplasmic superoxide "
"dismutase SOD1, markers previously linked to pancreatic diseases "
"with substantial age-dependent risk, such as type 2 diabetes "
"mellitus and adenocarcinoma. Thus, our single-cell approach "
"unveils gene expression changes and somatic mutations acquired in "
"aging human tissue, and identifies molecular pathways induced by "
"these genetic changes that could influence human disease. Also, "
"our results demonstrate the feasibility of using single-cell "
"RNA-seq data from primary cells to derive meaningful insights into "
"the genetic processes that operate on aging human tissue and to "
"determine which molecular mechanisms are coordinated with these "
"processes. Examination of single cells from primary human pancreas "
"tissue",
"projectShortname": "Single of human pancreas",
"projectTitle": "Single cell transcriptome patterns.",
"publications": [
{
"publicationTitle": "Single-Cell Analysis of Human Pancreas Reveals "
"Transcriptional Signatures of Aging and Somatic Mutation "
"Patterns.",
"publicationUrl": "https://www.ncbi.nlm.nih.gov/pubmed/28965763"
}
],
"supplementaryLinks": [
"https://www.ebi.ac.uk/gxa/sc/experiments/E-GEOD-81547/Results"
],
"matrices": {},
"contributorMatrices": {}
}
],
"protocols": [
{
"libraryConstructionApproach": ["Smart-seq2"],
"nucleicAcidSource": ["single cell"],
},
{
"instrumentManufacturerModel": ["Illumina NextSeq 500"],
"pairedEnd": [True],
}
],
"samples": [
{
"sampleEntityType": ["specimens"],
"effectiveOrgan": ["pancreas"],
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
],
"specimens": [
{
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
]
}
]
}
self.assertElasticsearchResultsEqual(keyword_response, expected_response)
def test_projects_file_search_response(self):
"""
Test building response for projects
Response should include project detail fields that do not appear for other entity type responses
"""
keyword_response = FileSearchResponse(
hits=self.get_hits('projects', 'e8642221-4c2c-4fd7-b926-a68bce363c88'),
pagination=self.paginations[0],
facets=self.facets_populated,
entity_type='projects',
catalog=self.catalog
).return_response().to_json()
expected_response = {
"hits": [
{
"cellLines": [
],
"cellSuspensions": [
{
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"selectedCellType": [None],
"totalCells": 1
}
],
"donorOrganisms": [
{
"biologicalSex": ["female"],
"disease": ['normal'],
"developmentStage": [None],
"genusSpecies": ["Australopithecus"],
"id": ["DID_scRSq06"],
"donorCount": 1,
"organismAge": ["38"],
"organismAgeUnit": ["year"],
"organismAgeRange": [{"gte": 1198368000.0, "lte": 1198368000.0}]
}
],
"entryId": "e8642221-4c2c-4fd7-b926-a68bce363c88",
"fileTypeSummaries": [
{
"count": 2,
"fileType": "fastq.gz",
"totalSize": 385472253
}
],
"organoids": [
],
"projects": [
{
"arrayExpressAccessions": [None],
"geoSeriesAccessions": [None],
"insdcProjectAccessions": [None],
"insdcStudyAccessions": [None],
"contributors": [
{
"contactName": "Matthew,,Green",
"correspondingContributor": False,
"email": "<EMAIL>",
"institution": "Farmers Trucks",
"laboratory": "John Dear",
"projectRole": "Human Cell Atlas wrangler"
},
{
"contactName": "<NAME>",
"correspondingContributor": None,
"email": "<EMAIL>",
"institution": "University",
"laboratory": None,
"projectRole": None
},
{
"contactName": "Laura,,Huerta",
"correspondingContributor": False,
"email": "<EMAIL>",
"institution": "Farmers Trucks",
"laboratory": "John Dear",
"projectRole": "external curator"
}
],
"laboratory": ["John Dear"],
"projectDescription": "As organisms age, cells accumulate genetic and epigenetic changes "
"that eventually lead to impaired organ function or catastrophic "
"failure such as cancer. Here we describe a single-cell "
"transcriptome analysis of 2544 human pancreas cells from donors, "
"spanning six decades of life. We find that islet cells from older "
"donors have increased levels of disorder as measured both by noise "
"in the transcriptome and by the number of cells which display "
"inappropriate hormone expression, revealing a transcriptional "
"instability associated with aging. By analyzing the spectrum of "
"somatic mutations in single cells from previously-healthy donors, "
"we find a specific age-dependent mutational signature "
"characterized by C to A and C to G transversions, indicators of "
"oxidative stress, which is absent in single cells from human brain "
"tissue or in a tumor cell line. Cells carrying a high load of such "
"mutations also express higher levels of stress and senescence "
"markers, including FOS, JUN, and the cytoplasmic superoxide "
"dismutase SOD1, markers previously linked to pancreatic diseases "
"with substantial age-dependent risk, such as type 2 diabetes "
"mellitus and adenocarcinoma. Thus, our single-cell approach "
"unveils gene expression changes and somatic mutations acquired in "
"aging human tissue, and identifies molecular pathways induced by "
"these genetic changes that could influence human disease. Also, "
"our results demonstrate the feasibility of using single-cell "
"RNA-seq data from primary cells to derive meaningful insights into "
"the genetic processes that operate on aging human tissue and to "
"determine which molecular mechanisms are coordinated with these "
"processes. Examination of single cells from primary human pancreas "
"tissue",
"projectShortname": "Single of human pancreas",
"projectTitle": "Single cell transcriptome patterns.",
"publications": [
{
"publicationTitle": "Single-Cell Analysis of Human Pancreas Reveals "
"Transcriptional Signatures of Aging and Somatic Mutation "
"Patterns.",
"publicationUrl": "https://www.ncbi.nlm.nih.gov/pubmed/28965763"
}
],
"supplementaryLinks": [
'https://www.ebi.ac.uk/gxa/sc/experiments/E-GEOD-81547/Results'
],
"matrices": {},
"contributorMatrices": {}
}
],
"protocols": [
{
"libraryConstructionApproach": ["Smart-seq2"],
"nucleicAcidSource": ["single cell"],
},
{
"instrumentManufacturerModel": ["Illumina NextSeq 500"],
"pairedEnd": [True],
}
],
"samples": [
{
"sampleEntityType": ["specimens"],
"effectiveOrgan": ["pancreas"],
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
],
"specimens": [
{
"disease": ["normal"],
"id": ["DID_scRSq06_pancreas"],
"organ": ["pancreas"],
"organPart": ["islet of Langerhans"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
]
}
],
"pagination": {
"count": 2,
"order": "desc",
"pages": 1,
"next": None,
"previous": None,
"size": 5,
"sort": "entryId",
"total": 2
},
"termFacets": {
"disease": {
"terms": [
{
"count": 9,
"term": "silver"
},
{
"count": 12,
"term": None
}
],
"total": 21,
"type": "terms"
},
"organ": {
"terms": [
{
"count": 11,
"term": "silver"
},
{
"count": 10,
"term": "teal"
}
],
"total": 21,
"type": "terms"
}
}
}
self.assertElasticsearchResultsEqual(keyword_response, expected_response)
def test_project_accessions_response(self):
"""
This method tests the KeywordSearchResponse object for the projects entity type,
specifically making sure the accessions fields are present in the response.
"""
keyword_response = KeywordSearchResponse(
hits=self.get_hits('projects', '627cb0ba-b8a1-405a-b58f-0add82c3d635'),
entity_type='projects',
catalog=self.catalog
).return_response().to_json()
expected_response = {
"hits": [
{
"cellLines": [
],
"cellSuspensions": [
{
"organ": ["brain"],
"organPart": ["amygdala"],
"selectedCellType": [None],
"totalCells": 10000
}
],
"donorOrganisms": [
{
"biologicalSex": ["male"],
"disease": ['H syndrome'],
"developmentStage": ["human adult stage"],
"genusSpecies": ["Homo sapiens"],
"id": ["donor_ID_1"],
"donorCount": 1,
"organismAge": ["20"],
"organismAgeUnit": ["year"],
"organismAgeRange": [{"gte": 630720000.0, "lte": 630720000.0}]
}
],
"entryId": "627cb0ba-b8a1-405a-b58f-0add82c3d635",
"fileTypeSummaries": [
{
"count": 1,
"fileType": "bai",
"totalSize": 2395616
},
{
"count": 1,
"fileType": "bam",
"totalSize": 55840108
},
{
"count": 1,
"fileType": "csv",
"totalSize": 665
},
{
"count": 1,
"fileType": "unknown",
"totalSize": 2645006
},
{
"count": 2,
"fileType": "mtx",
"totalSize": 6561141
},
{
"count": 3,
"fileType": "fastq.gz",
"totalSize": 44668092
},
{
"count": 3,
"fileType": "h5",
"totalSize": 5573714
},
{
"count": 4,
"fileType": "tsv",
"totalSize": 15872628
}
],
"organoids": [
],
"projects": [
{
"contributors": [
{
"contactName": "John,D,Doe. ",
"correspondingContributor": False,
"email": "<EMAIL>",
"institution": "EMBL-EBI",
"laboratory": "Department of Biology",
"projectRole": "principal investigator"
}
],
"arrayExpressAccessions": ["E-AAAA-00"],
"geoSeriesAccessions": ["GSE00000"],
"insdcProjectAccessions": ["SRP000000"],
"insdcStudyAccessions": ["PRJNA000000"],
"laboratory": ["Department of Biology"],
"projectDescription": "Contains a small file set from the dataset: 4k PBMCs from a "
"Healthy Donor, a Single Cell Gene Expression Dataset by Cell "
"Ranger 2.1.0. Peripheral blood mononuclear cells (PBMCs) were "
"taken from a healthy donor (same donor as pbmc8k). PBMCs are "
"primary cells with relatively small amounts of RNA (~1pg "
"RNA/cell). Data/Analysis can be found here "
"https://support.10xgenomics.com/single-cell-gene-expression/datasets"
"/2.1.0/pbmc4k and all data is licensed under the creative commons "
"attribution license (https://creativecommons.org/licenses/by/4.0/). "
"This test also contains extensive metadata for browser testing. "
"Metadata is fabricated.",
"projectShortname": "staging/10x/2019-02-14T18:29:38Z",
"projectTitle": "10x 1 Run Integration Test",
"publications": [
{
"publicationTitle": "A title of a publication goes here.",
"publicationUrl": "https://europepmc.org"
}
],
"supplementaryLinks": [None],
"matrices": {},
"contributorMatrices": {}
}
],
"protocols": [
{
"workflow": ['cellranger_v1.0.2']
},
{
"libraryConstructionApproach": ["10X v2 sequencing"],
"nucleicAcidSource": [None],
},
{
"instrumentManufacturerModel": ["Illumina HiSeq 2500"],
"pairedEnd": [False],
}
],
"samples": [
{
"sampleEntityType": ["specimens"],
"effectiveOrgan": ["brain"],
"disease": ["H syndrome"],
"id": ["specimen_ID_1"],
"organ": ["brain"],
"organPart": ["amygdala"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
],
"specimens": [
{
"disease": ["H syndrome"],
"id": ["specimen_ID_1"],
"organ": ["brain"],
"organPart": ["amygdala"],
"preservationMethod": [None],
"source": [
"specimen_from_organism"
]
}
]
}
]
}
self.assertElasticsearchResultsEqual(keyword_response, expected_response)
def test_cell_suspension_response(self):
"""
Test KeywordSearchResponse contains the correct selectedCellType value
"""
keyword_response = KeywordSearchResponse(
hits=self.get_hits('projects', '250aef61-a15b-4d97-b8b4-54bb997c1d7d'),
entity_type='projects',
catalog=self.catalog
).return_response().to_json()
cell_suspension = one(keyword_response['hits'][0]['cellSuspensions'])
self.assertEqual(["Plasma cells"], cell_suspension['selectedCellType'])
def test_cell_line_response(self):
"""
Test KeywordSearchResponse contains the correct cell_line and sample field values
"""
keyword_response = KeywordSearchResponse(
hits=self.get_hits('projects', 'c765e3f9-7cfc-4501-8832-79e5f7abd321'),
entity_type='projects',
catalog=self.catalog
).return_response().to_json()
expected_cell_lines = {
'id': ['cell_line_Day7_hiPSC-CM_BioRep2', 'cell_line_GM18517'],
'cellLineType': ['primary', 'stem cell-derived'],
'modelOrgan': ['blood (parent_cell_line)', 'blood (child_cell_line)'],
}
cell_lines = one(one(keyword_response['hits'])['cellLines'])
self.assertElasticsearchResultsEqual(cell_lines, expected_cell_lines)
expected_samples = {
'sampleEntityType': ['cellLines'],
'effectiveOrgan': ['blood (child_cell_line)'],
'id': ['cell_line_Day7_hiPSC-CM_BioRep2'],
'cellLineType': ['stem cell-derived'],
'modelOrgan': ['blood (child_cell_line)'],
}
samples = one(one(keyword_response['hits'])['samples'])
self.assertElasticsearchResultsEqual(samples, expected_samples)
def test_file_response(self):
"""
Test KeywordSearchResponse contains the correct file field values
"""
keyword_response = KeywordSearchResponse(
hits=self.get_hits('files', '4015da8b-18d8-4f3c-b2b0-54f0b77ae80a'),
entity_type='files',
catalog=self.catalog
).return_response().to_json()
expected_file = {
'content_description': ['RNA sequence'],
'format': 'fastq.gz',
'name': 'Cortex2.CCJ15ANXX.SM2_052318p4_D8.unmapped.1.fastq.gz',
'sha256': '709fede4736213f0f71ae4d76719fd51fa402a9112582a4c52983973cb7d7e47',
'size': 22819025,
'uuid': 'a8b8479d-cfa9-4f74-909f-49552439e698',
'version': '2019-10-09T172251.560099Z'
}
file = one(one(keyword_response['hits'])['files'])
self.assertElasticsearchResultsEqual(file, expected_file)
def test_filter_with_none(self):
"""
Test response when using a filter with a None value
"""
test_data_values = [["year"], [None], ["year", None]]
for test_data in test_data_values:
with self.subTest(test_data=test_data):
url = self.base_url + "/index/samples"
params = self._params(size=10,
filters={'organismAgeUnit': {'is': test_data}})
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
organism_age_units = {
oau
for hit in response_json['hits']
for donor in hit['donorOrganisms']
for oau in donor['organismAgeUnit']
}
# Assert that the organismAgeUnits values found in the response only match what was filtered for
self.assertEqual(organism_age_units, set(test_data))
def test_filter_by_projectId(self):
"""
Test response when using a projectId filter
"""
test_data_sets = [
{
'id': '627cb0ba-b8a1-405a-b58f-0add82c3d635',
'title': '10x 1 Run Integration Test'
},
{
'id': '250aef61-a15b-4d97-b8b4-54bb997c1d7d',
'title': 'Bone marrow plasma cells from hip replacement surgeries'
}
]
for test_data in test_data_sets:
for entity_type in 'files', 'samples', 'projects', 'bundles':
with self.subTest(entity_type=entity_type):
url = self.base_url + "/index/" + entity_type
params = self._params(size=2,
filters={'projectId': {'is': [test_data['id']]}})
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
for hit in response_json['hits']:
for project in hit['projects']:
if entity_type == 'projects':
self.assertEqual(test_data['title'], project['projectTitle'])
else:
self.assertIn(test_data['title'], project['projectTitle'])
for term in response_json['termFacets']['project']['terms']:
self.assertEqual(term['projectId'], [test_data['id']])
def test_translated_facets(self):
"""
Test that response facets values are correctly translated back to the
correct data types and that the translated None value is not present.
"""
url = self.base_url + "/index/samples"
params = self._params(size=10, filters={})
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
facets = response_json['termFacets']
paired_end_terms = {term['term'] for term in facets['pairedEnd']['terms']}
self.assertEqual(paired_end_terms, {'true', 'false'})
preservation_method_terms = {term['term'] for term in facets['preservationMethod']['terms']}
self.assertEqual(preservation_method_terms, {None})
model_organ_part_terms = {term['term'] for term in facets['modelOrganPart']['terms']}
self.assertEqual(model_organ_part_terms, {None})
for facet in facets.values():
for term in facet['terms']:
self.assertNotEqual(term['term'], null_str.to_index(None))
def test_sample(self):
"""
Test that sample(s) in the response contain values matching values in the source cellLine/organoid/specimen
"""
for entity_type in 'projects', 'samples', 'files', 'bundles':
with self.subTest(entity_type=entity_type):
url = self.base_url + "/index/" + entity_type
response = requests.get(url, params=self._params())
response.raise_for_status()
response_json = response.json()
if entity_type == 'samples':
for hit in response_json['hits']:
for sample in hit['samples']:
sample_entity_type = sample['sampleEntityType']
for key, val in sample.items():
if key not in ['sampleEntityType', 'effectiveOrgan']:
if isinstance(val, list):
for one_val in val:
self.assertIn(one_val, hit[sample_entity_type][0][key])
else:
self.assertIn(val, hit[sample_entity_type][0][key])
def test_bundles_outer_entity(self):
entity_type = 'bundles'
url = self.base_url + "/index/" + entity_type
response = requests.get(url, params=self._params())
response.raise_for_status()
response = response.json()
indexed_uuids = set(self.bundles())
self.assertEqual(len(self.bundles()), len(indexed_uuids))
hits_uuids = {
(one(hit['bundles'])['bundleUuid'], one(hit['bundles'])['bundleVersion'])
for hit in response['hits']
}
self.assertEqual(len(response['hits']), len(hits_uuids))
self.assertSetEqual(indexed_uuids, hits_uuids)
def test_ranged_values(self):
test_hits = [
[
{
"biologicalSex": [
"male",
"female"
],
"developmentStage": [None],
"disease": ['normal'],
"genusSpecies": [
"Homo sapiens"
],
"id": [
"HPSI0314i-hoik",
"HPSI0214i-wibj",
"HPSI0314i-sojd",
"HPSI0214i-kucg"
],
"donorCount": 4,
"organismAge": [
"45-49",
"65-69"
],
"organismAgeRange": [
{
"gte": 2049840000.0,
"lte": 2175984000.0
},
{
"gte": 1419120000.0,
"lte": 1545264000.0
}
],
"organismAgeUnit": [
"year"
]
}
],
[
{
"biologicalSex": [
"male",
"female"
],
"developmentStage": [None],
"disease": ['normal'],
"genusSpecies": [
"Homo sapiens"
],
"id": [
"HPSI0314i-hoik",
"HPSI0214i-wibj",
"HPSI0314i-sojd",
"HPSI0214i-kucg"
],
"donorCount": 4,
"organismAge": [
"40-44",
"55-59"
],
"organismAgeRange": [
{
"gte": 1734480000.0,
"lte": 1860624000.0
},
{
"gte": 1261440000.0,
"lte": 1387584000.0
}
],
"organismAgeUnit": [
"year"
]
}
]
]
url = self.base_url + '/index/projects'
for relation, range_value, expected_hits in [('contains', (1419130000, 1545263000), test_hits[:1]),
('within', (1261430000, 1545265000), test_hits),
('intersects', (1860623000, 1900000000), test_hits[1:]),
('contains', (1860624000, 2049641000), []),
('within', (1734490000, 1860623000), []),
('intersects', (1860624100, 2049641000), [])]:
with self.subTest(relation=relation, value=range_value):
params = self._params(filters={'organismAgeRange': {relation: [range_value]}},
order='desc',
sort='entryId')
response = requests.get(url, params=params)
actual_value = [hit['donorOrganisms'] for hit in response.json()['hits']]
self.assertElasticsearchResultsEqual(expected_hits, actual_value)
def test_ordering(self):
sort_fields = [
('cellCount', lambda hit: hit['cellSuspensions'][0]['totalCells']),
('donorCount', lambda hit: hit['donorOrganisms'][0]['donorCount'])
]
url = self.base_url + '/index/projects'
for sort_field, accessor in sort_fields:
responses = {
order: requests.get(url, params=self._params(filters={},
order=order,
sort=sort_field))
for order in ['asc', 'desc']
}
hit_sort_values = {}
for order, response in responses.items():
response.raise_for_status()
hit_sort_values[order] = [accessor(hit) for hit in response.json()['hits']]
self.assertEqual(hit_sort_values['asc'], sorted(hit_sort_values['asc']))
self.assertEqual(hit_sort_values['desc'], sorted(hit_sort_values['desc'], reverse=True))
def test_missing_field_sorting(self):
"""
Test that sorting by a field that doesn't exist in all hits produces
results with the hits missing the field placed at the end of a
ascending sort and the beginning of a descending sort.
"""
ascending_values = [
['induced pluripotent'],
['induced pluripotent'],
['primary', 'stem cell-derived'],
None, # The last 4 hits don't have any 'cellLines' inner entities
None, # so for purposes of this test we use None to represent
None, # that there is a hit however it has no 'cellLineType'.
None
]
def extract_cell_line_types(response_json):
# For each hit yield the 'cellLineType' value or None if not present
for hit in response_json['hits']:
if hit['cellLines']:
yield one(hit['cellLines'])['cellLineType']
else:
yield None
for ascending in (True, False):
with self.subTest(ascending=ascending):
url = self.base_url + '/index/projects'
params = self._params(size=15,
filters={},
sort='cellLineType',
order='asc' if ascending else 'desc')
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
actual_values = list(extract_cell_line_types(response_json))
expected = ascending_values if ascending else list(reversed(ascending_values))
self.assertEqual(actual_values, expected)
def test_multivalued_field_sorting(self):
"""
Test that sorting by a multi-valued field responds with hits that are
correctly sorted based on the first value from each multi-valued field, and
that each multi-valued field itself is sorted low to high regardless of the search sort
"""
for order, reverse in (('asc', False), ('desc', True)):
with self.subTest(order=order, reverse=reverse):
url = self.base_url + "/index/projects"
params = self._params(size=15,
filters={},
sort='laboratory',
order=order)
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
laboratories = []
for hit in response_json['hits']:
laboratory = one(hit['projects'])['laboratory']
self.assertEqual(laboratory, sorted(laboratory))
laboratories.append(laboratory[0])
self.assertGreater(len(laboratories), 1)
self.assertEqual(laboratories, sorted(laboratories, reverse=reverse))
def test_disease_facet(self):
"""
Verify the values of the different types of disease facets
"""
url = self.base_url + "/index/projects"
test_data = {
# disease specified in donor, specimen, and sample (the specimen)
'627cb0ba-b8a1-405a-b58f-0add82c3d635': {
'sampleDisease': [{'term': 'H syndrome', 'count': 1}],
'donorDisease': [{'term': 'H syndrome', 'count': 1}],
'specimenDisease': [{'term': 'H syndrome', 'count': 1}],
},
# disease specified in donor only
'250aef61-a15b-4d97-b8b4-54bb997c1d7d': {
'sampleDisease': [{'term': None, 'count': 1}],
'donorDisease': [{'term': 'isolated hip osteoarthritis', 'count': 1}],
'specimenDisease': [{'term': None, 'count': 1}],
},
# disease specified in donor and specimen, not in sample (the cell line)
'c765e3f9-7cfc-4501-8832-79e5f7abd321': {
'sampleDisease': [{'term': None, 'count': 1}],
'donorDisease': [{'term': 'normal', 'count': 1}],
'specimenDisease': [{'term': 'normal', 'count': 1}]
}
}
self._assert_term_facets(test_data, url)
def _assert_term_facets(self, project_term_facets: JSON, url: str) -> None:
for project_id, term_facets in project_term_facets.items():
with self.subTest(project_id=project_id):
params = self._params(filters={'projectId': {'is': [project_id]}})
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
actual_term_facets = response_json['termFacets']
for facet, terms in term_facets.items():
self.assertEqual(actual_term_facets[facet]['terms'], terms)
def test_organism_age_facet(self):
"""
Verify the terms of the organism age facet
"""
url = self.base_url + "/index/projects"
test_data = {
# This project has one donor organism
'627cb0ba-b8a1-405a-b58f-0add82c3d635': {
'organismAge': [
{
'term': {
'value': '20',
'unit': 'year'
},
'count': 1
}
],
'organismAgeUnit': [
{
'term': 'year',
'count': 1
}
],
'organismAgeValue': [
{
'term': '20',
'count': 1
}
],
},
# This project has multiple donor organisms
'2c4724a4-7252-409e-b008-ff5c127c7e89': {
'organismAge': [
{
'term': {
'value': '40-44',
'unit': 'year'
},
'count': 1
},
{
'term': {
'value': '55-59',
'unit': 'year'
},
'count': 1
}
],
'organismAgeUnit': [
{
'term': 'year',
'count': 1
}
],
'organismAgeValue': [
{
'term': '40-44',
'count': 1
},
{
'term': '55-59',
'count': 1
}
]
},
# This project has one donor but donor has no age
'c765e3f9-7cfc-4501-8832-79e5f7abd321': {
'organismAge': [
{
'term': None,
'count': 1
}
],
'organismAgeUnit': [
{
'term': None,
'count': 1
}
],
'organismAgeValue': [
{
'term': None,
'count': 1
}
],
}
}
self._assert_term_facets(test_data, url)
def test_organism_age_facet_search(self):
"""
Verify filtering by organism age
"""
url = self.base_url + "/index/projects"
test_cases = [
(
'627cb0ba-b8a1-405a-b58f-0add82c3d635',
{
'is': [
{
'value': '20',
'unit': 'year'
}
]
}
),
(
'c765e3f9-7cfc-4501-8832-79e5f7abd321',
{
'is': [
None
]
}
),
(
None,
{
'is': [
{}
]
}
),
(
None,
{
'is': [
{
'value': None,
'unit': 'weeks'
}
]
}
)
]
for project_id, filters in test_cases:
with self.subTest(filters=filters):
response = requests.get(url, params=dict(catalog=self.catalog,
filters=json.dumps({'organismAge': filters})))
if project_id is None:
self.assertTrue(response.status_code, 400)
else:
response.raise_for_status()
response = response.json()
hit = one(response['hits'])
self.assertEqual(hit['entryId'], project_id)
donor_organism = one(hit['donorOrganisms'])
age = one(one(filters.values()))
self.assertEqual(donor_organism['organismAge'],
[None if age is None else age['value']])
self.assertEqual(donor_organism['organismAgeUnit'],
[None if age is None else age['unit']])
def test_pagination_search_after_search_before(self):
"""
Test search_after and search_before values when using sorting on a field containing None values
"""
url = self.base_url + "/index/samples"
params = self._params(size=3, filters={}, sort='workflow', order='asc')
response = requests.get(url + '?' + urllib.parse.urlencode(params))
response.raise_for_status()
response_json = response.json()
first_page_next = parse_url_qs(response_json['pagination']['next'])
expected_entry_ids = [
'58c60e15-e07c-4875-ac34-f026d6912f1c',
'195b2621-ec05-4618-9063-c56048de97d1',
'2d8282f0-6cbb-4d5a-822c-4b01718b4d0d',
]
self.assertEqual(expected_entry_ids, [h['entryId'] for h in response_json['hits']])
# NOTE: The sort field `workflow` is an `analysis_protocol` field and
# does not exist in all bundles. This is why the `search_after` field
# has the value `null` (JSON representation of `None`) because the last
# row in this page of results does not have an `analysis_protocol` or
# `workflow` field. If the last row did have a `workflow` field with a
# value `None`, `search_after` would be a translated `None` (`"~null"`)
self.assertIsNotNone(response_json['pagination']['next'])
self.assertIsNone(response_json['pagination']['previous'])
self.assertEqual(first_page_next['search_after'], 'null')
self.assertEqual(first_page_next['search_after_uid'], 'doc#2d8282f0-6cbb-4d5a-822c-4b01718b4d0d')
response = requests.get(response_json['pagination']['next'])
response.raise_for_status()
response_json = response.json()
second_page_next = parse_url_qs(response_json['pagination']['next'])
second_page_previous = parse_url_qs(response_json['pagination']['previous'])
expected_entry_ids = [
'308eea51-d14b-4036-8cd1-cfd81d7532c3',
'73f10dad-afc5-4d1d-a71c-4a8b6fff9172',
'79682426-b813-4f69-8c9c-2764ffac5dc1',
]
self.assertEqual(expected_entry_ids, [h['entryId'] for h in response_json['hits']])
self.assertEqual(second_page_next['search_after'], 'null')
self.assertEqual(second_page_next['search_after_uid'], 'doc#79682426-b813-4f69-8c9c-2764ffac5dc1')
self.assertEqual(second_page_previous['search_before'], 'null')
self.assertEqual(second_page_previous['search_before_uid'], 'doc#308eea51-d14b-4036-8cd1-cfd81d7532c3')
class TestSortAndFilterByCellCount(WebServiceTestCase):
maxDiff = None
@classmethod
def bundles(cls) -> List[BundleFQID]:
return super().bundles() + [
# 2 bundles from 1 project with 7738 total cells across 2 cell suspensions
BundleFQID('97f0cc83-f0ac-417a-8a29-221c77debde8', '2019-10-14T195415.397406Z'),
BundleFQID('8c90d4fe-9a5d-4e3d-ada2-0414b666b880', '2019-10-14T195415.397546Z'),
# other bundles
BundleFQID('fa5be5eb-2d64-49f5-8ed8-bd627ac9bc7a', '2019-02-14T192438.034764Z'),
BundleFQID('411cd8d5-5990-43cd-84cc-6c7796b8a76d', '2018-10-18T204655.866661Z'),
BundleFQID('ffac201f-4b1c-4455-bd58-19c1a9e863b4', '2019-10-09T170735.528600Z'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._setup_indices()
@classmethod
def tearDownClass(cls):
cls._teardown_indices()
super().tearDownClass()
def _count_total_cells(self, response_json):
"""
Return the number of cell suspension inner entities and total cell count
per hit.
"""
return [
(
len(hit['cellSuspensions']),
sum([cs['totalCells'] for cs in hit['cellSuspensions']])
)
for hit in response_json['hits']
]
def test_sorting_by_cell_count(self):
"""
Verify sorting by 'cellCount' sorts the documents based on the total
number of cells in each document, using the sum of total cells when a
document contains more than one cell suspension inner entity.
"""
ascending_results = [
(1, 1),
(1, 349),
(1, 6210),
(2, 7738),
(1, 10000)
]
for ascending in (True, False):
with self.subTest(ascending=ascending):
url = self.base_url + '/index/projects'
params = {
'catalog': self.catalog,
'sort': 'cellCount',
'order': 'asc' if ascending else 'desc'
}
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
actual_results = self._count_total_cells(response_json)
expected = ascending_results if ascending else list(reversed(ascending_results))
self.assertEqual(actual_results, expected)
def test_filter_by_cell_count(self):
"""
Verify filtering by 'cellCount' filters the documents based on the total
number of cells in each document, using the sum of total cells when a
document contains more than one cell suspension inner entity.
"""
url = self.base_url + "/index/projects"
params = {
'catalog': self.catalog,
'filters': json.dumps({
'cellCount': {
'within': [
[
6000,
9000
]
]
}
})
}
response = requests.get(url, params=params)
response.raise_for_status()
response_json = response.json()
actual_results = self._count_total_cells(response_json)
expected_results = [
(1, 6210),
(2, 7738)
]
self.assertEqual(actual_results, expected_results)
class TestProjectMatrices(WebServiceTestCase):
maxDiff = None
@classmethod
def bundles(cls) -> List[BundleFQID]:
return super().bundles() + [
# An analysis bundle that has two files with a 'dcp2' submitter_id
BundleFQID('f0731ab4-6b80-4eed-97c9-4984de81a47c', '2019-07-23T062120.663434Z'),
# A contributor-generated matrix bundle for the same project
BundleFQID('1ec111a0-7481-571f-b35a-5a0e8fca890a', '2020-10-07T11:11:17.095956Z')
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._setup_indices()
@classmethod
def tearDownClass(cls):
cls._teardown_indices()
super().tearDownClass()
@property
def params(self):
return {
'filters': json.dumps({'projectId': {'is': ['091cf39b-01bc-42e5-9437-f419a66c8a45']}}),
'catalog': self.catalog,
'size': 20
}
def test_contributor_matrix_files(self):
"""
Verify the files endpoint returns all the files from both the analysis
and CGM bundles.
"""
url = self.base_url + '/index/files'
response = requests.get(url, params=self.params)
response.raise_for_status()
response_json = response.json()
expected_files = [
# files from the analysis bundle
'13eab62e-0038-4997-aeab-aa3192cc090e.zarr/.zattrs',
'BoneMarrow_CD34_2_IGO_07861_2_S2_L001_R1_001.fastq.gz',
'BoneMarrow_CD34_2_IGO_07861_2_S2_L001_R2_001.fastq.gz',
'empty_drops_result.csv',
'merged-cell-metrics.csv.gz',
'merged-gene-metrics.csv.gz',
'merged.bam',
'sparse_counts.npz',
'sparse_counts_col_index.npy',
'sparse_counts_row_index.npy',
"matrix.csv.zip",
# files from the contributor-generated matrices bundle
'4d6f6c96-2a83-43d8-8fe1-0f53bffd4674.BaderLiverLandscape-10x_cell_type_2020-03-10.csv',
'4d6f6c96-2a83-43d8-8fe1-0f53bffd4674.HumanLiver.zip',
]
self.assertEqual(len(expected_files), len(response_json['hits']))
actual_files = [one(hit['files'])['name'] for hit in response_json['hits']]
self.assertEqual(sorted(expected_files), sorted(actual_files))
def test_matrices_tree(self):
"""
Verify the projects endpoint includes a valid 'matrices' and
'contributorMatrices' tree inside the projects inner-entity.
"""
url = self.base_url + '/index/projects'
response = requests.get(url, params=self.params)
response.raise_for_status()
response_json = response.json()
hit = one(response_json['hits'])
self.assertEqual('091cf39b-01bc-42e5-9437-f419a66c8a45', hit['entryId'])
matrices = {
'genusSpecies': {
'Homo sapiens': {
'developmentStage': {
'human adult stage': {
'libraryConstructionApproach': {
'10X v2 sequencing': {
'organ': {
'blood': [
{
'name': 'matrix.csv.zip',
'url': self.base_url + '/fetch/dss/files/'
'535d7a99-9e4f-406e-a478-32afdf78a522'
'?version=2019-07-23T064742.317855Z'
'&catalog=test'
}
],
'hematopoietic system': [
{
'name': 'sparse_counts.npz',
'url': self.base_url + '/fetch/dss/files/'
'787084e4-f61e-4a15-b6b9-56c87fb31410'
'?version=2019-07-23T064557.057500Z'
'&catalog=test'
},
{
'name': 'merged-cell-metrics.csv.gz',
'url': self.base_url + '/fetch/dss/files/'
'9689a1ab-02c3-48a1-ac8c-c1e097445ed8'
'?version=2019-07-23T064556.193221Z'
'&catalog=test'
}
]
}
}
}
}
}
}
}
}
self.assertEqual(matrices, one(hit['projects'])['matrices'])
contributor_matrices = {
'organ': {
'liver': {
'genusSpecies': {
'Homo sapiens': {
'developmentStage': {
'human adult stage': {
'library': {
'10X v2 sequencing': [
{
'name': '4d6f6c96-2a83-43d8-8fe1-0f53bffd4674.'
'BaderLiverLandscape-10x_cell_type_2020-03-10.csv',
'url': self.base_url + '/fetch/dss/files/'
'0d8607e9-0540-5144-bbe6-674d233a900e'
'?version=2020-10-20T15%3A53%3A50.322559Z'
'&catalog=test'
}
],
'Smart-seq2': [
{
'name': '4d6f6c96-2a83-43d8-8fe1-0f53bffd4674.'
'BaderLiverLandscape-10x_cell_type_2020-03-10.csv',
'url': self.base_url + '/fetch/dss/files/'
'0d8607e9-0540-5144-bbe6-674d233a900e'
'?version=2020-10-20T15%3A53%3A50.322559Z'
'&catalog=test'
}
]
}
}
}
},
'Mus musculus': {
'developmentStage': {
'adult': {
'library': {
'10X v2 sequencing': [
{
'name': '4d6f6c96-2a83-43d8-8fe1-0f53bffd4674.HumanLiver.zip',
'url': self.base_url + '/fetch/dss/files/'
'7c3ad02f-2a7a-5229-bebd-0e729a6ac6e5'
'?version=2020-10-20T15%3A53%3A50.322559Z'
'&catalog=test'
}
]
}
}
}
}
}
}
}
}
self.assertEqual(contributor_matrices, one(hit['projects'])['contributorMatrices'])
class TestResponseSummary(WebServiceTestCase):
maxDiff = None
@classmethod
def bundles(cls) -> List[BundleFQID]:
return super().bundles() + [
BundleFQID('dcccb551-4766-4210-966c-f9ee25d19190', '2018-10-18T204655.866661Z'),
BundleFQID('94f2ba52-30c8-4de0-a78e-f95a3f8deb9c', '2019-04-03T103426.471000Z') # an imaging bundle
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._setup_indices()
@classmethod
def tearDownClass(cls):
cls._teardown_indices()
super().tearDownClass()
def test_summary_response(self):
"""
Verify the /index/summary response with two sequencing bundles and
one imaging bundle that has no cell suspension.
- bundle=aaa96233…, fileCount=2, donorCount=1, totalCellCount=1.0, organType=pancreas, labCount=1
- bundle=dcccb551…, fileCount=19, donorCount=4, totalCellCount=6210.0, organType=Brain, labCount=1
- bundle=94f2ba52…, fileCount=227, donorCount=1, totalCellCount=0, organType=brain, labCount=(None counts as 1)
"""
url = self.base_url + "/index/summary"
response = requests.get(url, params=dict(catalog=self.catalog))
response.raise_for_status()
summary_object = response.json()
self.assertEqual(summary_object['fileCount'], 2 + 19 + 227)
self.assertEqual(summary_object['labCount'], 1 + 1 + 1)
self.assertEqual(summary_object['donorCount'], 1 + 4 + 1)
self.assertEqual(summary_object['totalCellCount'], 1.0 + 6210.0 + 0)
file_counts_expected = {
'tiff': 221,
'json': 6,
'fastq.gz': 5,
'tsv': 4,
'h5': 3,
'pdf': 3,
'mtx': 2,
'bai': 1,
'bam': 1,
'csv': 1,
'unknown': 1
}
file_counts_actual = {summary['fileType']: summary['count'] for summary in summary_object['fileTypeSummaries']}
self.assertEqual(file_counts_actual, file_counts_expected)
self.assertEqual(set(summary_object['organTypes']), {'Brain', 'brain', 'pancreas'})
self.assertEqual(summary_object['cellCountSummaries'], [
# 'brain' from the imaging bundle is not represented in cellCountSummaries as these values are tallied
# from the cell suspensions and the imaging bundle does not have any cell suspensions
{'organType': ['Brain'], 'countOfDocsWithOrganType': 1, 'totalCellCountByOrgan': 6210.0},
{'organType': ['pancreas'], 'countOfDocsWithOrganType': 1, 'totalCellCountByOrgan': 1.0},
])
def test_summary_filter_none(self):
for use_filter, labCount in [(False, 3), (True, 2)]:
with self.subTest(use_filter=use_filter, labCount=labCount):
url = self.base_url + '/index/summary'
params = dict(catalog=self.catalog)
if use_filter:
params['filters'] = json.dumps({"organPart": {"is": [None]}})
response = requests.get(url, params=params)
response.raise_for_status()
summary_object = response.json()
self.assertEqual(summary_object['labCount'], labCount)
class TestUnpopulatedIndexResponse(WebServiceTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.index_service.create_indices(cls.catalog)
@classmethod
def tearDownClass(cls):
cls.index_service.delete_indices(cls.catalog)
super().tearDownClass()
def test_empty_response(self):
url = self.base_url + "/index/projects"
response = requests.get(url)
response.raise_for_status()
response = response.json()
self.assertEqual([], response['hits'])
self.assertEqual({None}, set(response['pagination'].values()))
self.assertEqual({}, response['termFacets'])
class TestPortalIntegrationResponse(LocalAppTestCase):
@classmethod
def lambda_name(cls) -> str:
return "service"
maxDiff = None
# Mocked DB content for the tests
_portal_integrations_db = [
{
"portal_id": "9852dece-443d-42e8-869c-17b9a86d447e",
"integrations": [
{
"integration_id": "b87b7f30-2e60-4ca5-9a6f-00ebfcd35f35",
"integration_type": "get_manifest",
"entity_type": "file",
"manifest_type": "full",
},
{
"integration_id": "977854a0-2eea-4fec-9459-d4807fe79f0c",
"integration_type": "get",
"entity_type": "project",
"entity_ids": ["c4077b3c-5c98-4d26-a614-246d12c2e5d7"]
}
]
},
{
"portal_id": "f58bdc5e-98cd-4df4-80a4-7372dc035e87",
"integrations": [
{
"integration_id": "e8b3ca4f-bcf5-42eb-b58c-de6d7e0fe138",
"integration_type": "get",
"entity_type": "project",
"entity_ids": ["c4077b3c-5c98-4d26-a614-246d12c2e5d7"]
},
{
"integration_id": "dbfe9394-a326-4574-9632-fbadb51a7b1a",
"integration_type": "get",
"entity_type": "project",
"entity_ids": ["90bd6933-40c0-48d4-8d76-778c103bf545"]
},
{
"integration_id": "f13ddf2d-d913-492b-9ea8-2de4b1881c26",
"integration_type": "get",
"entity_type": "project",
"entity_ids": ["cddab57b-6868-4be4-806f-395ed9dd635a"]
},
{
"integration_id": "224b1d42-b939-4d10-8a8f-2b2ac304b813",
"integration_type": "get",
"entity_type": "project",
# NO entity_ids field
}
]
}
]
def _mock_portal_crud(self, operation):
operation(self._portal_integrations_db)
def _get_integrations(self, params: dict) -> dict:
url = self.base_url + '/integrations'
response = requests.get(url, params=params)
response.raise_for_status()
return response.json()
@classmethod
def _extract_integration_ids(cls, response_json):
return [
integration['integration_id']
for portal in response_json
for integration in portal['integrations']
]
@mock.patch('azul.portal_service.PortalService._crud')
def test_integrations(self, portal_crud):
"""
Verify requests specifying `integration_type` and `entity_type` only return integrations matching those types
"""
test_cases = [
('get_manifest', 'file', ['b87b7f30-2e60-4ca5-9a6f-00ebfcd35f35']),
('get', 'bundle', []),
(
'get',
'project',
[
'977854a0-2eea-4fec-9459-d4807fe79f0c',
'e8b3ca4f-bcf5-42eb-b58c-de6d7e0fe138',
'dbfe9394-a326-4574-9632-fbadb51a7b1a',
'f13ddf2d-d913-492b-9ea8-2de4b1881c26',
'224b1d42-b939-4d10-8a8f-2b2ac304b813'
]
)
]
portal_crud.side_effect = self._mock_portal_crud
with mock.patch.object(type(config), 'dss_deployment_stage', 'prod'):
for integration_type, entity_type, expected_integration_ids in test_cases:
params = dict(integration_type=integration_type, entity_type=entity_type)
with self.subTest(**params):
response_json = self._get_integrations(params)
found_integration_ids = self._extract_integration_ids(response_json)
self.assertEqual(len(expected_integration_ids), len(found_integration_ids))
self.assertEqual(set(expected_integration_ids), set(found_integration_ids))
self.assertTrue(all(isinstance(integration.get('entity_ids', []), list)
for portal in response_json
for integration in portal['integrations']))
@mock.patch('azul.portal_service.PortalService._crud')
def test_integrations_by_entity_ids(self, portal_crud):
"""
Verify requests specifying `entity_ids` only return integrations matching those entity_ids
"""
# 224b1d42-b939-4d10-8a8f-2b2ac304b813 must appear in every test since it lacks the entity_ids field
test_cases = [
# One project entity id specified by one integration
(
'cddab57b-6868-4be4-806f-395ed9dd635a',
[
'f13ddf2d-d913-492b-9ea8-2de4b1881c26',
'224b1d42-b939-4d10-8a8f-2b2ac304b813'
]
),
# Two project entity ids specified by two different integrations
(
'cddab57b-6868-4be4-806f-395ed9dd635a, 90bd6933-40c0-48d4-8d76-778c103bf545',
[
'f13ddf2d-d913-492b-9ea8-2de4b1881c26',
'dbfe9394-a326-4574-9632-fbadb51a7b1a',
'224b1d42-b939-4d10-8a8f-2b2ac304b813'
]
),
# One project entity id specified by two different integrations
(
'c4077b3c-5c98-4d26-a614-246d12c2e5d7',
[
'977854a0-2eea-4fec-9459-d4807fe79f0c',
'e8b3ca4f-bcf5-42eb-b58c-de6d7e0fe138',
'224b1d42-b939-4d10-8a8f-2b2ac304b813'
]
),
# Blank entity id, to match integrations lacking the entity_id field
(
'',
[
'224b1d42-b939-4d10-8a8f-2b2ac304b813'
]
),
# No entity id, accepting all integrations
(
None,
[
'f13ddf2d-d913-492b-9ea8-2de4b1881c26',
'dbfe9394-a326-4574-9632-fbadb51a7b1a',
'977854a0-2eea-4fec-9459-d4807fe79f0c',
'e8b3ca4f-bcf5-42eb-b58c-de6d7e0fe138',
'224b1d42-b939-4d10-8a8f-2b2ac304b813'
]
)
]
portal_crud.side_effect = self._mock_portal_crud
with mock.patch.object(type(config), 'dss_deployment_stage', 'prod'):
for entity_ids, integration_ids in test_cases:
params = dict(integration_type='get', entity_type='project')
if entity_ids is not None:
params['entity_ids'] = entity_ids
with self.subTest(**params):
response_json = self._get_integrations(params)
found_integration_ids = self._extract_integration_ids(response_json)
self.assertEqual(set(integration_ids), set(found_integration_ids))
if __name__ == '__main__':
unittest.main()
| 2.09375
| 2
|
examples/learning/reinforcement/upswing/_model/double_pendulum.py
|
JonathanLehner/korali
| 43
|
12777912
|
<filename>examples/learning/reinforcement/upswing/_model/double_pendulum.py
#!/user/bin/env python3
## Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
## Distributed under the terms of the MIT license.
##
## Created by <NAME> (<EMAIL>).
import math
from math import sin, cos
import numpy as np, sys
from scipy.integrate import ode
class DoublePendulum:
def __init__(self):
self.dt = 0.01
self.step=0
# x, th1, th2, xdot, th1dot, th2dot
self.u = np.asarray([0, 0, 0, 0, 0, 0])
self.F=0
self.t=0
self.ODE = ode(self.system).set_integrator('dopri5')
def reset(self):
self.u = np.random.uniform(-0.05, 0.05, 6)
self.u[1] += math.pi # start from bottom
self.u[2] += math.pi
self.step = 0
self.F = 0
self.t = 0
def isFailed(self):
return (abs(self.u[0])>7.0)
def isOver(self): # is episode over
return self.isFailed()
def isTruncated(self): # check that cause for termination is time limits
return (abs(self.u[0])<=7.0)
"""Based on 'Control Design and Analysis for
Underactuated Tobotic Systems [Xin, Liu] (Chapter14)"""
@staticmethod
def system(t, y, fact): #dynamics function
#mc: mass cart
#comi: center of mass of link i (at li/2)
#li: length of links
#g: gravitational constant
mc, com1, com2, l1, l2, g = 1.0, 0.5, 0.5, 1.0, 1.0, 9.81
# internal params
lc1, lc2 = 0.5*l1, 0.5*l2
J1 = com1*lc1*lc1/3
J2 = com2*lc2*lc2/3
a0 = com1 + com2 + mc
a1 = J1+com1*lc1*lc1+com2*lc2*lc2
a2 = J2+com2*lc2*lc2
a3 = com2*l1*lc2
b1 = (com1*lc1+com2*l1)*g
b2 = com2*lc2*g
# simplify
x = y[0]
th1 = y[1]
th2 = y[2]
xdot = y[3]
th1dot = y[4]
th2dot = y[5]
qdot = np.array([xdot, th1dot, th2dot])
B = np.array([1, 0, 0])
M = np.array([
[a0, b1/g*cos(th1), b2/g*cos(th2)],
[b1/g*cos(th1), a1, a3*cos(th1-th2)],
[b2/g*cos(th2), a3*cos(th1-th2), a2]])
C = np.array([
[0.0, -b1/g*th1dot*sin(th1), -b2/g*th2dot*sin(th2)],
[0.0, 0.0, a3*th2dot*sin(th1-th2)],
[0.0, -a3*th1dot*sin(th1-th2), 0]])
G = np.array([0.0, -b1*sin(th1), -b2*sin(th2)])
RHS = np.linalg.solve(M, B*fact-G-np.matmul(C,qdot))
# xdot, th1dot, th2dot, xdotdot, th1dotdot, th2dotdot
return np.concatenate((qdot,RHS))
def wrapToNPiPi(self, rad):
return (rad + np.pi) % (2 * np.pi) - np.pi
def advance(self, action):
self.F = action[0]
self.u += self.dt*self.system(self.t, self.u, self.F)
#self.ODE.set_initial_value(self.u, self.t).set_f_params(self.F)
#self.u = self.ODE.integrate(self.t + self.dt)
self.u[1] = self.wrapToNPiPi(self.u[1])
self.u[2] = self.wrapToNPiPi(self.u[2])
self.t = self.t + self.dt
self.step = self.step + 1
if self.isOver():
return 1
else:
return 0
def getState(self):
state = np.zeros(7)
state[0] = np.copy(self.u[0])
state[1] = np.copy(self.u[1])
state[2] = np.copy(self.u[2])
state[3] = np.copy(self.u[3])
state[4] = np.copy(self.u[4])
state[5] = np.copy(self.u[3])
state[6] = cos(state[1])+cos(state[2])
# maybe transform state
# ..
# ..
assert np.any(np.isfinite(state) == False) == False, "State is nan: {}".format(state)
return state
def getReward(self):
th1 = np.copy(self.u[1])
th2 = np.copy(self.u[2])
# return 2 + cos(th1) + cos(th2) - 200.*float(self.isFailed())
return 2 + cos(th1) + cos(th2)
if __name__ == '__main__':
print("init..")
dp = DoublePendulum()
dp.reset()
state = dp.getState()
print("state:")
print(state)
dp.advance([1.0, 0.0, 0.0])
state = dp.getState()
print("state after one step:")
print(state)
print("exit.. BYE!")
| 2.828125
| 3
|
ungit.py
|
jakebruce/qutico-8
| 0
|
12777913
|
#! /usr/bin/env python3
import os
import sys
def error(msg):
print(msg)
print(f"Usage: {sys.argv[0]} FILE.gitp8")
print(" Converts FILE.gitp8 in merge-friendly format to FILE.p8 in pico-8 format.")
sys.exit(1)
if len(sys.argv) < 2 or len(sys.argv) > 2:
error("Exactly 1 argument required.")
if not os.path.exists(sys.argv[1]):
error(f"{sys.argv[1]} does not exist.")
if not sys.argv[1].endswith(".gitp8"):
error(f"{sys.argv[1]} does not have extension .gitp8")
fname=sys.argv[1]
base=fname.split(".gitp8")[0]
with open(fname, "r") as f:
lines=f.read().splitlines()
labels = ["__lua__", "__gfx__", "__gff__", "__label__", "__map__", "__sfx__", "__music__"]
def get_segment(lines, label):
segment=[]
reading=False
for line in lines:
if line == label:
reading=True
elif line in labels:
reading=False
if reading:
segment.append(line)
return segment
def reserialize(lines, w, h, L):
if not lines:
return lines
lbl = lines[0]
lines = lines[1:]
CW = L
CH = len(lines)*w*h//L
serialized = [['z' for _ in range(CW)] for __ in range(CH)]
BW = CW // w
BH = CH // h
for i in range(len(lines)):
bx = i % BW
by = i // BW
for dx in range(w):
for dy in range(h):
serialized[by*h+dy][bx*w+dx] = lines[i][dy*w+dx]
serialized = ["".join(l) for l in serialized]
return [lbl] + serialized
hdr = lines[:2]
lua = get_segment(lines, "__lua__")
gfx = get_segment(lines, "__gfx__")
gff = get_segment(lines, "__gff__")
lbl = get_segment(lines, "__label__")
map = get_segment(lines, "__map__")
sfx = get_segment(lines, "__sfx__")
msc = get_segment(lines, "__music__")
gfx = reserialize(gfx, 8, 8, 128)
gff = reserialize(gff, 2, 1, 256)
map = reserialize(map, 2, 1, 256)
if os.path.exists(base+".p8"):
if input(f"{base}.p8 exists. Overwrite? (y/n) ") not in ["y", "Y"]:
print("Aborted.")
sys.exit(1)
with open(base+".p8", "w") as f:
for lines in [hdr, lua, gfx, gff, lbl, map, sfx, msc]:
if lines:
f.write("\n".join(lines))
f.write("\n")
| 2.8125
| 3
|
func/Functions.py
|
cviaai/unsupervised-heartbeat-anomaly-detection
| 2
|
12777914
|
<reponame>cviaai/unsupervised-heartbeat-anomaly-detection<gh_stars>1-10
import pandas as pd
import numpy as np
import sys,os
import time
import biosppy
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy
from sliding.ecg_slider import ECGSlider
from sliding.slider import Slider
from statistic.wasserstein_distance import WassersteinDistance, WassersteinDistanceDeviation
from transform.indexed_transform import IndexedTransformer
from transform.interpolate import SplineInterpolate
from transform.pca import PCATransformer
from transform.scale import ScaleTransform
from transform.series_to_curve import CurveProjection, IndicesWindow
from transform.transformer import SequentialTransformer
from transform.triangle_pattern import TrianglePattern
from tqdm import tqdm
#here additional functions located
dist = WassersteinDistance()
#make std and means equal between 2 distributions
def std_mean_change(was_dev,was):
new_data=[]
for i in was_dev:
new_data.append(np.mean(was)+(i-np.mean(was_dev))*\
(np.std(was)/np.std(was_dev)))
return new_data
#chunk time series to some equal parts
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def recall(df,new_list,sig_series):
if len(new_list)==0 :
recall=('arrhythmia not detected')
else:
start_ind=sig_series.index[0]
last_ind=sig_series.index[-1]
new_list=(new_list.flatten()).tolist()
T_p=[]
Tp_Fn=[]
for i in (df).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
if int(i) in new_list:
T_p.append(i)
Tp_Fn.append(i)
if len(Tp_Fn)==0:
recall=('wrong detection')
else:
recall=(len(T_p)/len(Tp_Fn))
print(len(T_p),len(Tp_Fn))
return recall
def specifity(df,new_list,sig_series):
new_list=(new_list.flatten()).tolist()
start_ind=sig_series.index[0]
last_ind=sig_series.index[-1]
T_n=[]
F_p=[]
for i in (df).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
if int(i) not in new_list:
T_n.append(i)
if int(i) in new_list:
F_p.append(i)
d=len(T_n)+len(F_p)
if d==0:
spec=('no such data')
else:
spec=len(T_n)/d
return spec,len(T_n),len(F_p)
def precision(df_Norm,df,new_list,sig_series):
new_list=(new_list.flatten()).tolist()
start_ind=sig_series.index[0]
last_ind=sig_series.index[-1]
T_p=[]
F_p=[]
for i in (df_Norm).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
if int(i) in new_list:
F_p.append(i)
for i in (df).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
if int(i) in new_list:
T_p.append(i)
d=len(T_p)+len(F_p)
if d==0:
prec='no such data'
else:
prec=len(T_p)/d
return prec, len(T_p),len(F_p)
def F_score(recall,precision):
F_score=2*recall*precision/(recall+precision)
return F_score
def accuracy(df_Norm,df,new_list,sig_series):
new_list=(new_list.flatten()).tolist()
start_ind=sig_series.index[0]
last_ind=sig_series.index[-1]
T_p=[]
F_p=[]
T_n=[]
F_n=[]
for i in (df_Norm).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
if int(i) in new_list:
F_p.append(i)
else:
T_n.append(i)
for i in (df).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
if int(i) in new_list:
T_p.append(i)
else:
F_n.append(i)
d=len(T_p)+len(T_n)+len(F_n)+len(F_p)
if d==0:
accur='no such data'
else:
accur=(len(T_p)+len(T_n))/d
return accur
def AUC_score(FPR,TPR):
AUC=1/2 - FPR/2 + TPR/2
return AUC
#create new time series only with parts labeled with arrhythmia
def index_to_series(list_ind,data):
list_ind=(np.array(list_ind).flatten())
arrhythmia_series=[]
for i in list_ind:
for j in i:
arrhythmia_series.append(data[j])
return arrhythmia_series
#for dataset, list of file names
def names(root):
names=[]
for path, subdirs, files in os.walk(root):
for name in files:
names.append(os.path.join(path, name))
names=sorted(names)
return names
#for dataset, list of file names
def data(batch_gen):
dataiter = iter(batch_gen)
ecg,name,target_name= dataiter.next()
ecg=ecg.numpy()
for i in ecg:
ecg_new=i
return ecg_new,name,target_name
#calculation of curves(for each window and total)
def curves_calculation(signal,p,n_components,size):
smooth_transform = SequentialTransformer(
ScaleTransform(0, 1),
SplineInterpolate(0.01)
)
curve_transform = SequentialTransformer(
CurveProjection(
window=IndicesWindow.range(size=size, step=5),
step=1
),
PCATransformer(n_components)
)
smooth_data = smooth_transform(signal)
window_curve=curve_transform(signal)
dist = WassersteinDistance(p)
dist_dev = WassersteinDistanceDeviation(p)
slider = ECGSlider(smooth_data, 6, 200).iterator()
curves=[]
total_curve=[]
for index, window_data in (slider):
window_curve = curve_transform(window_data)
curves.append(window_curve)
total_curve.append(curve_transform(smooth_data[:index[-1]]))
return curves,total_curve
#annotations for files
def annotations(file_number):
data_f = pd.read_csv('annotations/'+str(file_number)+'annotations.txt')
r=data_f.iloc[:,0]
rr=r.str.split(expand=True)
rr.columns=['time', 'sample','type','sub','chan','Num','Aux']
data_arrhythmia=rr.loc[(rr['type'] != 'N')&(rr['type'] != '· ') ]
data_normal=rr.loc[(rr['type'] == 'N')|(rr['type'] == '· ') ]
return data_arrhythmia,data_normal,rr
#True arrhythmia
def true_labels(data_arrhythmia,sig_series):
start_ind=sig_series.index[0]
last_ind=sig_series.index[-1]
True_labels=[]
for i in (data_arrhythmia['sample']).tolist():
if int(i)> int(start_ind) and int(i)<int(last_ind):
True_labels.append(i)
return True_labels
def arrhythmia_index(res,sig_series,pad):
indexes=[]
ind=[]
for k,i in enumerate(res[0].index):
for j in res[0].index[1:]:
if (j-i)==1:
indexes.append(np.arange(res[0][i]+sig_series.index[0],res[0][j]+sig_series.index[0]+pad))
ind.append(i)
ind.append(j)
ind=np.unique(ind)
result =list(set(list(res[0].index))-set(ind.tolist()))
for i in result:
indexes.append(np.arange(res[0][i],res[0][i]+pad))
return indexes
def arrhythmia_index_check(res,sig_series,pad):
indexes=np.array([])
ind=[]
for k,i in enumerate(res[0].index):
for j in res[0].index[1:]:
if (j-i)==1:
indexes=np.append((np.arange(res[0][i]+sig_series.index[0],res[0][j]+sig_series.index[0]+pad)),indexes)
ind.append(i)
ind.append(j)
ind=np.unique(ind)
result =list(set(list(res[0].index))-set(ind.tolist()))
for i in result:
indexes=np.append((np.arange(res[0][i],res[0][i]+pad)),indexes)
return indexes
| 2.359375
| 2
|
wildfire/multiprocessing.py
|
Ferrumofomega/goes
| 1
|
12777915
|
"""Utilities for multiprocessing."""
from contextlib import contextmanager
import logging
import time
from dask.distributed import Client, LocalCluster, progress
from dask_jobqueue import PBSCluster
import numpy as np
_logger = logging.getLogger(__name__)
def map_function(function, function_args, pbs=False, **cluster_kwargs):
"""Parallize `function` over `function_args` across available CPUs.
Utilizes dask.distributed.Client.map which follows the implementation of built-in
`map`. See https://docs.python.org/3/library/functions.html#map and
https://distributed.dask.org/en/latest/client.html.
Examples
--------
```
def add(x, y):
return x + y
xs = [1, 2, 3, 4]
ys = [11, 12, 13, 14]
map_function(add, [xs, ys]) => [12, 14, 16, 18]
```
Parameters
----------
function : function | method
function_args : list
If `function` takes multiple args, follow implementation of `map`. Namely, if
f(x1, x2) => y, then `function_args` should be `[all_x1, all_x2]`.
pbs : bool, optional
Whether or not to create a PBS job over whose cluster to parallize, by default
False.
Returns
-------
list
"""
_logger.info(
"Running %s in parallel with args of shape %s",
function.__name__,
np.shape(function_args),
)
with dask_client(pbs=pbs, **cluster_kwargs) as client:
if len(np.shape(function_args)) == 1:
function_args = [function_args]
futures = client.map(function, *function_args)
progress(futures)
return_values = client.gather(futures)
return return_values
@contextmanager
def dask_client(pbs=False, **cluster_kwargs):
"""Context manager surrounding a dask client. Handles closing upon completion.
Examples
--------
```
with dask_client() as client:
client.do_something()
```
Parameters
----------
pbs: bool, optional
Whether or not dask should submit a PBS job over whose cluster to operate.
**cluster_kwargs:
Arguments to either `PBSCluster` or `LocalCluster` which are pretty much the
same. Some usefule arguments include:
- n_workers
- cores
- interface
- memory
- walltime
"""
if pbs:
cluster = PBSCluster(**cluster_kwargs)
if "n_workers" not in cluster_kwargs:
cluster.scale(1)
else:
cluster = LocalCluster(processes=False, **cluster_kwargs)
client = Client(cluster)
client.wait_for_workers(n_workers=1)
time.sleep(5)
try:
_logger.info("Dask Cluster: %s\nDask Client: %s", cluster, client)
yield client
finally:
client.close()
cluster.close()
_logger.info("Closed client and cluster")
def flatten_array(arr):
"""Flatten an array by 1 dimension."""
shape = np.array(arr).shape
if len(shape) == 1:
return arr
return [item for list_1d in arr for item in list_1d]
| 2.75
| 3
|
examples/random_dataguy.py
|
redfungus/webtraversallibrary
| 41
|
12777916
|
<reponame>redfungus/webtraversallibrary<gh_stars>10-100
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Defines a subset of all active elements (menu items) and clicks randomly on those.
"""
from random import choice
from typing import List
import webtraversallibrary as wtl
from webtraversallibrary.actions import Click
from .util import parse_cli_args
@wtl.single_tab
def policy(_, view: wtl.View) -> wtl.Action:
menu_actions = view.actions.by_type(Click).by_score("menu")
return choice(menu_actions)
def menu_classifier_func(elements: wtl.Elements, _) -> List[wtl.PageElement]:
# The condition here is completely hard-coded for the given page.
return [elem for elem in elements if elem.location.x < 10 and elem.location.y < 200 and elem.metadata["tag"] == "a"]
if __name__ == "__main__":
cli_args = parse_cli_args()
workflow = wtl.Workflow(config=wtl.Config(cli_args.config), policy=policy, url=cli_args.url, output=cli_args.output)
workflow.classifiers.add(wtl.ActiveElementFilter(action=Click))
workflow.classifiers.add(
wtl.ElementClassifier(
name="menu",
action=Click,
subset="is_active", # Consider only active elements
highlight=True,
callback=menu_classifier_func,
)
)
workflow.run()
workflow.quit()
| 2.1875
| 2
|
2020/1/main.py
|
klrkdekira/adventofcode
| 1
|
12777917
|
with open('input') as file:
prev = []
found_twins = False
found_triplets = False
for val in map(int, map(lambda i: i.strip(), file)):
for x in prev:
if not found_twins and x + val == 2020:
print('twins', x * val)
found_twins = True
for y in prev:
if not found_triplets and x + y + val == 2020:
print('triplets', x * y * val)
found_triplets = True
if found_triplets:
break
if found_twins and found_triplets:
break
prev.append(val)
| 3.375
| 3
|
samples/awsCall/awsCall.py
|
aws-samples/cloudwatch-custom-widgets-samples
| 12
|
12777918
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# CloudWatch Custom Widget sample: call any read-only AWS API and return raw results in JSON
import boto3
import json
import os
import re
DOCS = """
## Make an AWS Call
Calls any (read-only) AWS API and displays the result as JSON.
### Widget parameters
Param | Description
---|---
**service** | The name of the AWS service to call, e.g. **EC2** or **CloudWatch**
**api** | The API name to call
**params** | The parameters to pass to the API
### Example parameters
``` yaml
service: EC2
api: describeInstances
params:
Filters:
- Name: instance-state-name
Values:
- running
```"""
def lambda_handler(event, context):
if 'describe' in event:
return DOCS
service = event.get('service', 'cloudwatch').lower()
apiRaw = event.get('api', 'list_dashboards')
api = re.sub(r'(?<!^)(?=[A-Z])', '_', apiRaw).lower() # Convert to snakecase in case it's in CamelCase
region = event.get('region', os.environ['AWS_REGION'])
params = event.get('params', {})
client = boto3.client(service)
try:
apiFunc = getattr(client, api)
result = apiFunc(**params)
return json.dumps(result, sort_keys=True, default=str)
except AttributeError:
return f"api '{api}' not found for service '{service}'"
| 2.46875
| 2
|
testlogging/tests/test_handler.py
|
freeekanayaka/testlogging
| 0
|
12777919
|
import time
from testtools import TestResult
from logging import (
Formatter,
Logger,
INFO,
)
from six import b
from mimeparse import parse_mime_type
from testtools import TestCase
from testlogging import SubunitHandler
from testlogging.testing import StreamResultDouble
class SubunitHandlerTest(TestCase):
def setUp(self):
super(SubunitHandlerTest, self).setUp()
self.result = StreamResultDouble()
self.handler = SubunitHandler()
self.handler.setResult(self.result)
self.logger = Logger("test")
self.logger.addHandler(self.handler)
self.logger.setLevel(INFO)
def test_default(self):
"""The handler has sane defaults."""
self.logger.info("hello")
event = self.result.getEvent(0)
self.assertEqual("status", event.name)
self.assertIsNone(event.test_id)
self.assertEqual("test.log", event.file_name)
self.assertEqual(b("hello\n"), event.file_bytes)
_, _, parameters = parse_mime_type(event.mime_type)
self.assertEqual("python", parameters["language"])
self.assertEqual("default", parameters["format"])
self.assertAlmostEqual(
time.time(), time.mktime(event.timestamp.timetuple()), delta=5)
def test_decorated(self):
self.addCleanup(self.handler.setResult, self.result)
self.handler.setResult(TestResult())
error = self.assertRaises(RuntimeError, self.logger.info, "hello")
self.assertEqual("Not a stream result", str(error))
def test_format(self):
"""A custom formatter and format name can be specified."""
formatter = Formatter("[%(name)s:%(levelname)s] %(message)s")
self.handler.setFormatter(formatter, "myformat")
self.logger.info("hello")
event = self.result.getEvent(0)
self.assertEqual(b("[test:INFO] hello\n"), event.file_bytes)
_, _, parameters = parse_mime_type(event.mime_type)
self.assertEqual("python", parameters["language"])
self.assertEqual("myformat", parameters["format"])
def test_file_name(self):
"""A custom file name can be specified."""
self.handler.setFileName("my.log")
self.logger.info("hello")
event = self.result.getEvent(0)
self.assertEqual("my.log", event.file_name)
def test_test_id(self):
"""A custom test ID can be specified."""
self.handler.setTestId("my.test")
self.logger.info("hello")
event = self.result.getEvent(0)
self.assertEqual("my.test", event.test_id)
def test_close(self):
"""
When the handler is closed, an EOF packet is written.
"""
self.handler.close()
event = self.result.getEvent(0)
self.assertEqual(b(""), event.file_bytes)
self.assertTrue(event.eof)
| 2.421875
| 2
|
py/spider/miscellany/Airlines_xiecheng_seating.py
|
7134g/mySpiderAll
| 0
|
12777920
|
<filename>py/spider/miscellany/Airlines_xiecheng_seating.py
from copy import copy
from pprint import pprint
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import UnexpectedAlertPresentException, ElementNotVisibleException, TimeoutException
from selenium.webdriver import ChromeOptions, Chrome
from lxml import etree
import time
import traceback
import json
from _decorator import timer
MISS_ERROR = 3
THEAD_TASK_COUNT = 3
class SeleniumDriver:
def __init__(self):
self.options = ChromeOptions()
self.options.add_argument(
'user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36')
# self.options.add_argument('--headless') # 去掉可视窗
self.options.add_argument('--no-sandbox')
self.options.add_argument('--disable-gpu')
self.options.add_argument('--log-level=3')
# self.options.add_experimental_option('excludeSwitches', ['enable-automation'])
# self.options.add_experimental_option('debuggerAddress', '127.0.0.1:9222')
prefs = {"profile.managed_default_content_settings.images": 2}
self.options.add_experimental_option("prefs", prefs) # 图片不加载
# Chrome 浏览器
def Chrome(self):
# PROXY_IP = get_proxy()
# PROXY_IP = proxyclient.get_ip()
# self.options.add_argument('--proxy-server=http://{}'.format(PROXY_IP))
driver = Chrome(executable_path="D:/python/Scripts/chromedriver.exe",chrome_options=self.options)
return driver
def user_form(driver, msg):
WebDriverWait(driver, 5, 0.1).until(EC.presence_of_element_located((By.XPATH, '//input[@value="M"]')))
surnames = driver.find_element("xpath", '//div[@data-field="SURNAME"]//input[@class="form-input"]').send_keys(msg['surnames'])
name = driver.find_element("css selector", "div[data-field='GIVEN_NAME'] input").send_keys(msg['name'])
born = driver.find_element("css selector", "div[data-field='BIRTHDAY'] input").send_keys(msg['born'])
gender = driver.find_element("css selector", "div[data-field='GENDER'] input[value='M']").click()
# 点击国家
country = driver.find_element("css selector", "div[data-field='NATIONALITY'] input").click()
WebDriverWait(driver, 5, 0.1).until(EC.element_to_be_clickable((By.CLASS_NAME, 'popover-fuzzylist')))
country_se = driver.find_element("xpath", "//div[@data-field='NATIONALITY']//ul/li[1]").click()
# button_js = """$().click()"""
# driver.execute_script(button_js)
passport = driver.find_element("css selector", "#cardNum_0_0").send_keys(msg['passport'])
passport_time = driver.find_element("css selector", "#input_card_limit_time_0_0").send_keys(msg['passport_time'])
phone = driver.find_element("css selector", "div[data-field='MOBILE'] input").send_keys(msg['phone'])
phone2 = driver.find_element("css selector", "#contactPhoneNum").send_keys(msg['phone'])
email = driver.find_element("css selector", "div[data-field='EMAIL'] input").send_keys(msg['email'])
@timer
def Do(driver, task_list, error_list, logger):
"""
:param task: dict
:return: list—>[str,dict]
"""
end_task = 0
for task in task_list:
# print(task)
task["msg"] = "0000"
userinfo = task['user']
airline = task['company']
# https://flights.ctrip.com/international/search/oneway-tpe-osa?depdate=2019-08-09&cabin=y_s&adult=1&child=0&infant=0&directflight=1&airline=D7
url = "https://flights.ctrip.com/international/search/oneway-{start}-{end}?depdate={date}&cabin=y_s&adult=1&child=0&infant=0&directflight=1&airline={airline}".format(
start=task['start'],
end=task['end'],
date=task['date'],
airline=airline[:2])
driver.delete_all_cookies()
driver.get(url)
element = WebDriverWait(driver, 30, 0.1)
driver.implicitly_wait(5) # 隐式
# print(driver.page_source)
try:
# element.until(EC.presence_of_element_located((By.CSS_SELECTOR,".loading.finish" )))
startRun = int(time.time())
while not (etree.HTML(driver.page_source).xpath('//div[@class="loading finish"]/@style')):
if int(time.time())-startRun>30:
error_list.append(task)
driver.quit()
# # 定位航司
# airline_element = "//div[contains(@id, 'comfort-{}')]".format(airline)
# element.until(EC.presence_of_element_located((By.XPATH, airline_element)))
tag_index = -1 # 用于判断是否有该航线
# 获取当前页面航线名称list
html = etree.HTML(driver.page_source)
place_list = html.xpath('//span[@class="plane-No"]/text()')
print(place_list)
for index, place in enumerate(place_list):
if task['company'] in place:
tag_index = index
break
if tag_index != -1:
# 价格
# element.until(EC.presence_of_element_located((By.XPATH, '//div[@class="price-box"]/div')))
price_list = html.xpath('//div[contains(@id,"price_{}")]/text()'.format(tag_index))
# print(price_list)
# price_list = price[0]
print("当前价格{},价格区间{}-{}".format(price_list, task['min_price'], task['max_price']))
if not price_list:
raise TimeoutException
price = None
pay_index = 0
for price_index,temp_price in enumerate(price_list):
if (int(task['min_price']) < int(temp_price) < int(task['max_price'])):
price = temp_price # 得到价格
pay_index = price_index # 得到索引
break
else:
raise ElementNotVisibleException
if price:
# 点击预订
print("".join(["开始预定========",price]))
WebDriverWait(driver, 3, 0.1).until(EC.element_to_be_clickable((By.ID, '{}_{}'.format(tag_index,pay_index)))).click()
# print(driver.window_handles)
if len(driver.window_handles)>1:
driver.switch_to_window(driver.window_handles[1])
driver.close()
driver.switch_to_window(driver.window_handles[0])
# 打开登录窗
try:
WebDriverWait(driver, 3, 0.1).until(EC.element_to_be_clickable((By.ID, 'nologin'))).click()
except:
# raise Exception("登录窗问题")
pass
try:
# 个人信息
user_form(driver, userinfo)
except:
# WebDriverWait(driver, 1, 0.1).until(EC.element_to_be_clickable((By.ID, 'outer')))
continue
# 提交订单
element.until(EC.element_to_be_clickable((By.CLASS_NAME, 'btn-next'))).click()
# print(driver.page_source)
startRun = int(time.time())
while not ("护照:{}".format(task["user"]["passport"]) in driver.page_source ):
if int(time.time()) - startRun > 30:
driver.quit()
findStr = "{}/{}".format(task["user"]["surnames"],task["user"]["name"]).replace(" ","")
if ((findStr in driver.page_source) or ("目前该舱位已售完" in driver.page_source)) and (not "护照:{}".format(task["user"]["passport"]) in driver.page_source):
yield task
end_task = 1
break
# print(task_list)
if end_task:
re_task_list = []
for task_ in task_list:
# if task_["start"]==task['start'] and task_["end"]==task['end'] and task_["date"]==task['date'] and task_["company"]==task['company']:
if task_["ts"] == task['ts']:
task_["min_price"] = str(int(price)+1)
re_task_list.append(task_)
else:
re_task_list.append(task_)
task_list = re_task_list
continue
# 获取订单号
# try:
# WebDriverWait(driver, 3, 0.5).until(
# EC.element_to_be_clickable((By.CLASS_NAME, 'a.btn.btn-primary'))).click()
# except:
# try:
# pass
# WebDriverWait(driver, 15, 0.5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.notice')))
# except:
# traceback.print_exc()
# try:
# WebDriverWait(driver, 3, 0.5).until(
# EC.element_to_be_clickable((By.CLASS_NAME, 'a.btn.btn-primary'))).click()
# except:
# pass
order = str(driver.current_url).split('/')[-2]
# print(order)
# WebDriverWait(driver, 20, 0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.header-wrapper')))
# 获取cookies
js = 'window.open("https://flights.ctrip.com/online/orderdetail/index?oid={}");'.format(order)
driver.execute_script(js)
driver.switch_to_window(driver.window_handles[1])
startRun = int(time.time())
while (("没有查到您的订单信息" in driver.page_source) or ("繁忙" in driver.page_source)):
driver.refresh()
if int(time.time()) - startRun > 30:
error_list.append(task)
driver.quit()
# try:
# WebDriverWait(driver, 3, 0.1).until(EC.presence_of_element_located((By.XPATH, '//a[@data-ubt-v="pay-去支付"]')))
# except:
# pass
cookies = driver.get_cookies()
# print(cookies)
# if not cookies:
# driver.refresh()
# try:
# WebDriverWait(driver, 3, 0.1).until(
# EC.presence_of_element_located((By.XPATH, '//a[@data-ubt-t="btns-1005"]')))
# except:
# pass
# cookies = driver.get_cookies()
key_result = "".join(
[task["date"].replace('-', ''), task["start"], task["end"], task["company"]])
# 返回数据
result = [
key_result, {
"order": order,
"cookies": cookies,
"modele_name": "1.1.2",
"type": 2,
"parent": task['ts'],
"up": int(time.time() * 1000),
"baseTime":1200000,
},
price
]
# print(result)
driver.close()
yield result
driver.switch_to_window(driver.window_handles[0])
driver.delete_all_cookies()
else:
print("不在价格范围========================》")
task['msg'] = '0001'
yield task
except UnexpectedAlertPresentException:
driver.switch_to.alert.accept()
driver.delete_all_cookies()
traceback.print_exc()
length = len(driver.window_handles)
if length > 1:
for index in range(1, length):
driver.switch_to_window(driver.window_handles[index])
driver.close()
driver.switch_to_window(driver.window_handles[0])
error_list.append(task)
except Exception:
# 如果有弹出框 点击确定
driver.delete_all_cookies()
if alert_is_present(driver):
driver.switch_to.alert.accept()
traceback.print_exc()
length = len(driver.window_handles)
if length>1:
for index in range(1,length):
driver.switch_to_window(driver.window_handles[index])
driver.close()
driver.switch_to_window(driver.window_handles[0])
error_list.append(task)
def alert_is_present(driver):
try:
alert = driver.switch_to.alert
alert.text
return alert
except:
return False
def main_ctrl(params, logger):
# if params['type'] != 1:
# raise Exception('任务错误')
# if params["modele_name"] != 'plat.xiecheng.xiecheng_seating':
# raise Exception('模块错误')
results = []
error = []
failure_task = []
failure_ts_list =set()
try:
driver = SeleniumDriver().Chrome()
driver.maximize_window()
for count in range(MISS_ERROR):
if count == 0:
for result in Do(driver, params, error, logger):
# 记录日志
if isinstance(result, list):
logger.info('{module_name}.py 执行成功:{jsondata}'.format(
module_name=result[1]["modele_name"],
jsondata=json.dumps(result, ensure_ascii=False)))
results.append(result)
else:
failure_index = []
for index,failure in enumerate(params):
if result["ts"] == failure["ts"] and result["ts"] not in failure_ts_list:
failure_ts_list.add(result["ts"])
failure_index.append(index)
if result['msg'] == '0001':
if result["ts"] not in failure_ts_list:
failure_task.append(result)
else:
error.append(result)
# 弹出这类全部任务
# for index in failure_index[::-1]:
# params.pop(index)
# failure_task.append(result)
elif error:
repeat_task = copy(error)
error = []
for result in Do(driver, repeat_task, error, logger):
# 记录日志
if isinstance(result, list):
logger.info('{module_name}.py 执行成功:{jsondata}'.format(
module_name=result[1]["modele_name"],
jsondata=json.dumps(result, ensure_ascii=False)))
results.append(result)
else:
failure_index = []
for index,failure in enumerate(params):
if result["ts"] == failure["ts"] and result["ts"] not in failure_ts_list:
failure_ts_list.add(result["ts"])
failure_index.append(index)
if result['msg'] == '0001':
if result["ts"] not in failure_ts_list:
failure_task.append(result)
else:
error.append(result)
# print("执行完毕,开始处理错误任务")
print("当前第{}次执行".format(count))
if len(results)>=len(params):
print("已达到任务包数量,停止回发")
except:
traceback.print_exc()
logger.error('启动模拟器失败,错误信息{tp}'.format(tp=traceback.format_exc()))
finally:
driver.quit()
print("全部任务处理完毕")
return results,failure_task
if __name__ == '__main__':
import os, psutil
import logmanage
process = psutil.Process(os.getpid())
print('Used Memory:', process.memory_info().rss / 1024 / 1024, 'MB')
logger = logmanage.get_log("我是测试携程占座日志")
params = [
{
'up': 1234567890,
'type': 1,
"modele_name": '1.1.1',
'start': 'tpe',
'end': 'osa',
'company': 'D7370',
'date': '2019-08-09',
'min_price': 0,
'max_price': 40000,
"ts": 123123,
'user': {
'surnames': 'LAO',
'name': 'WANG',
'gender': 'M',
'country': '中国大陆',
'passport': 'XS1245378',
'born': '1996-12-30',
'passport_time': '2029-11-11',
'phone': '16644663659',
'email': '<EMAIL>',
}
},
{
'up': 1234567890,
'type': 1,
'is_stop': 0,
"modele_name": '1.1.1',
'start': 'tpe',
'end': 'osa',
'company': 'D7370',
'date': '2019-08-09',
'min_price': 0,
'max_price': 40000,
"ts": 123123,
'user': {
'surnames': 'LAO',
'name': 'WANG',
'gender': 'M',
'country': '中国大陆',
'passport': 'XS1245378',
'born': '1996-12-30',
'passport_time': '2029-11-11',
'phone': '16644663659',
'email': '<EMAIL>',
}
},
]
result = main_ctrl(params,logger)
print(result)
print("数据长度:{}".format(len(result)))
| 2.3125
| 2
|
Step-3-DeepQLearning/main.py
|
kasey-/ArduinoDQNCar
| 4
|
12777921
|
import numpy as np
import gym
import gym_carsim
from gym import spaces
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'carsim-v0'
class WrapThreeFrames(gym.Wrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = spaces.Box(low=0.0, high=1.0, shape=(9,))
self.past_obs = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
def shift_past_obs(self, new_obs):
self.past_obs = self.past_obs[3:]+new_obs
return self.past_obs
def reset(self):
obs = self.env.reset()
return self.shift_past_obs(obs)
def step(self, action):
obs, reward, done, info = self.env.step(action)
return self.shift_past_obs(obs), reward, done, info
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
env = WrapThreeFrames(env)
np.random.seed(98283476)
env.seed(87518645)
nb_actions = env.action_space.n
# Next, we build a very simple model regardless of the dueling architecture
# if you enable dueling network in DQN , DQN will build a dueling network base on your model automatically
# Also, you can build a dueling network by yourself and turn off the dueling network in DQN.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions, activation='sigmoid'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
# enable the dueling network
# you can specify the dueling_type to one of {'avg','max','naive'}
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
enable_dueling_network=True, dueling_type='avg', target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=False, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
#dqn.load_weights('duel_dqn_{}_weights.h5f'.format(ENV_NAME))
# Finally, evaluate our algorithm for 5 episodes.
print(dqn.test(env, nb_episodes=5, nb_max_episode_steps=10000, visualize=True))
| 2.25
| 2
|
pytimeNSW/pytimeNSW.py
|
MatthewBurke1995/PyTimeNSW
| 0
|
12777922
|
#!/usr/bin/env python
# encoding: utf-8
"""
pytimeNSW
~~~~~~~~~~~~~
A easy-use module to solve the datetime needs by string.
:copyright: (c) 2017 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import datetime
import calendar
from .filter import BaseParser
bp = BaseParser.main
dp = BaseParser.parse_diff
def parse(value):
return bp(value)
def count(value1, value2):
_val1, _val2 = parse(value1), parse(value2)
if type(_val1) == type(_val2):
return _val1 - _val2
else:
_val1 = _val1 if isinstance(_val1, datetime.datetime) else midnight(_val1)
_val2 = _val2 if isinstance(_val2, datetime.datetime) else midnight(_val2)
return _val1 - _val2
# max, min
_date = datetime.date.today()
_datetime = datetime.datetime.now()
_year = _date.year
_month = _date.month
_day = _date.day
_SEVEN_DAYS = datetime.timedelta(days=7)
_ONE_DAY = datetime.timedelta(days=1)
def today(year=None):
"""this day, last year"""
return datetime.date(int(year), _date.month, _date.day) if year else _date
def tomorrow(date=None):
"""tomorrow is another day"""
if not date:
return _date + datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date + datetime.timedelta(days=1)
def yesterday(date=None):
"""yesterday once more"""
if not date:
return _date - datetime.timedelta(days=1)
else:
current_date = parse(date)
return current_date - datetime.timedelta(days=1)
########################
# function method
########################
def daysrange(first=None, second=None, wipe=False):
"""
get all days between first and second
:param first: datetime, date or string
:param second: datetime, date or string
:return: list
"""
_first, _second = parse(first), parse(second)
(_start, _end) = (_second, _first) if _first > _second else (_first, _second)
days_between = (_end - _start).days
date_list = [_end - datetime.timedelta(days=x) for x in range(0, days_between + 1)]
if wipe and len(date_list) >= 2:
date_list = date_list[1:-1]
return date_list
def lastday(year=_year, month=_month):
"""
get the current month's last day
:param year: default to current year
:param month: default to current month
:return: month's last day
"""
last_day = calendar.monthrange(year, month)[1]
return datetime.date(year=year, month=month, day=last_day)
def midnight(arg=None):
"""
convert date to datetime as midnight or get current day's midnight
:param arg: string or date/datetime
:return: datetime at 00:00:00
"""
if arg:
_arg = parse(arg)
if isinstance(_arg, datetime.date):
return datetime.datetime.combine(_arg, datetime.datetime.min.time())
elif isinstance(_arg, datetime.datetime):
return datetime.datetime.combine(_arg.date(), datetime.datetime.min.time())
else:
return datetime.datetime.combine(_date, datetime.datetime.min.time())
def before(base=_datetime, diff=None):
"""
count datetime before `base` time
:param base: minuend -> str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
if not diff:
return _base
result_dict = dp(diff)
# weeks already convert to days in diff_parse function(dp)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year - _val))
elif unit == 'months':
if _base.month <= _val:
_month_diff = 12 - (_val - _base.month)
_base = _base.replace(year=_base.year - 1).replace(month=_month_diff)
else:
_base = _base.replace(month=_base.month - _val)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base - datetime.timedelta(**{unit: _val})
return _base
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base
def _datetime_to_date(arg):
"""
convert datetime/str to date
:param arg:
:return:
"""
_arg = parse(arg)
if isinstance(_arg, datetime.datetime):
_arg = _arg.date()
return _arg
# Monday to Monday -> 00:00:00 to 00:00:00 month 1st - next month 1st
def this_week(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return _arg - datetime.timedelta(days=_arg.weekday()), _arg + datetime.timedelta(
days=6 - _arg.weekday()) if clean else _arg + datetime.timedelta(days=6 - _arg.weekday()) + _ONE_DAY
def last_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] - _SEVEN_DAYS, this_week_tuple[1] - _SEVEN_DAYS if clean \
else this_week_tuple[1] - _SEVEN_DAYS + _ONE_DAY
def next_week(arg=_date, clean=False):
this_week_tuple = this_week(arg)
return this_week_tuple[0] + _SEVEN_DAYS, this_week_tuple[1] + _SEVEN_DAYS if clean \
else this_week_tuple[1] + _SEVEN_DAYS + _ONE_DAY
def this_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
return datetime.date(_arg.year, _arg.month, 1), lastday(_arg.year, _arg.month) if clean \
else lastday(_arg.year, _arg.month) + _ONE_DAY
def last_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_first_day = datetime.date(_arg.year, _arg.month, 1)
last_month_last_day = this_month_first_day - _ONE_DAY
last_month_first_day = datetime.date(last_month_last_day.year, last_month_last_day.month, 1)
return last_month_first_day, last_month_last_day if clean else this_month_first_day
def next_month(arg=_date, clean=False):
_arg = _datetime_to_date(arg)
this_month_last_day = lastday(_arg.year, _arg.month)
next_month_first_day = this_month_last_day + _ONE_DAY
next_month_last_day = lastday(next_month_first_day.year, next_month_first_day.month)
return next_month_first_day, next_month_last_day if clean else next_month_last_day + _ONE_DAY
######################
# festival
######################
def newyear(year=None):
january_first = datetime.date(int(year), 1, 1) if year else datetime.date(_year, 1, 1)
weekday_seq = january_first.weekday()
if is_weekend(january_first):
return datetime.date(january_first.year, 1, (8 - weekday_seq)%7)
else:
return january_first
def valentine(year=None):
return datetime.date(int(year), 2, 14) if year else datetime.date(_year, 2, 14)
def fool(year=None):
return datetime.date(int(year), 4, 1) if year else datetime.date(_year, 4, 1)
def christmas(year=None):
december_25 = datetime.date(int(year), 12, 25) if year else datetime.date(_year, 12, 25)
weekday_seq = december_25.weekday()
if is_weekend(december_25):
return datetime.date(december_25.year, 12, 27)
else:
return december_25
def boxing(year=None):
return datetime.date(int(year), 12, 26) if year else datetime.date(_year, 12, 26)
def mother(year=None):
"""
the 2nd Sunday in May
:param year: int
:return: Mother's day
"""
may_first = datetime.date(_year, 5, 1) if not year else datetime.date(int(year), 5, 1)
weekday_seq = may_first.weekday()
return datetime.date(may_first.year, 5, (14 - weekday_seq))
def father(year=None):
"""
the 1st Sunday in September
:param year: int
:return: Father's day
"""
september_first = datetime.date(_year, 9, 1) if not year else datetime.date(int(year), 9, 1)
weekday_seq = september_first.weekday()
return datetime.date(september_first.year, 9, (7 - weekday_seq))
def halloween(year=None):
return lastday(month=10) if not year else lastday(year, 10)
def goodfri(year=None):
return yesterday(eastersat(year))
def eastersat(year=None):
return yesterday(eastersun(year))
def eastersun(year=None):
"""
1900 - 2099 limit
:param year: int
:return: Easter day
"""
y = int(year) if year else _year
n = y - 1900
a = n % 19
q = n // 4
b = (7 * a + 1) // 19
m = (11 * a + 4 - b) % 29
w = (n + q + 31 - m) % 7
d = 25 - m - w
if d > 0:
return datetime.date(y, 4, d)
else:
return datetime.date(y, 3, (31 + d))
def eastermon(year=None):
return tomorrow(eastersun(year))
def easter(year=None):
return goodfri(year), eastersat(year), eastersun(year), eastermon(year)
def anzac(year=None):
return datetime.date(int(year), 4, 25) if year else datetime.date(_year, 4, 25)
def australia(year=None):
return datetime.date(int(year), 1, 26) if year else datetime.date(_year, 1, 26)
def queen(year=None): #check later
"""
the 2nd Monday in June
:param year: int
:return: Queen's birthday
"""
june_eight = datetime.date(_year, 6, 8) if not year else datetime.date(int(year), 6, 8)
weekday_seq = june_eight.weekday()
return datetime.date(june_eight.year, 6, 7 + (8 - weekday_seq)%7)
def labour(year=None):
"""
the 1st Monday in October
:param year: int
:return: Labour day
"""
october_first = datetime.date(_year, 10, 1) if not year else datetime.date(int(year), 10, 1)
weekday_seq = october_first.weekday()
return datetime.date(october_first.year, 10, (8 - weekday_seq)%7)
def family(year=None):
year = year if year else _year
family_day = {2011: datetime.date(2011,10,10), 2012: datetime.date(2012,10,8), 2013: datetime.date(2013,9,30),
2014: datetime.date(2014,9,29), 2015: datetime.date(2015,9,28), 2016: datetime.date(2016,9,26),
2017: datetime.date(2017,9,25), 2018: datetime.date(2018,10,8), 2019: datetime.date(2019,9,30) }
return family_day.get(year)
def canberra(year=None):
"""
the 2nd monday of March
:param year: int
:return: Canberra day
"""
march_eight = datetime.date(year, 3, 8) if not year else datetime.date(int(year), 3, 8)
weekday_seq = march_eight.weekday()
return datetime.date(march_eight.year, 3, 7 + (8 - weekday_seq)%7)
def public_holidays(year):
"""
returns a list of datetime objects that correspond to NSW public holidays
:param year: int
:return: list of datetime objects
"""
year = year if year else _year
return [i for i in easter(year)] + [newyear(year), australia(year),anzac(year), queen(year),
labour(year), christmas(year), boxing(year)]
def public_holidays_can(year):
"""
returns a list of datetime objects that correspond to NSW public holidays
:param year: int
:return: list of datetime objects
"""
year = year if year else _year
return [i for i in easter(year)] + [newyear(year), australia(year),anzac(year), queen(year),
labour(year), christmas(year), boxing(year), family(year), canberra(year)]
def is_public(date_):
"""
"""
if type(date_) == datetime.date:
pass
elif type(date_) == datetime.datetime:
date_ = date_.date()
else:
date_ = parse(date_)
year = date_.year
return (date_ in public_holidays(year))
def is_public_can(date_):
if type(date_) == datetime.date:
pass
elif type(date_) == datetime.datetime:
date_ = date_.date()
else:
date_ = parse(date_)
year = date_.year
return (date_ in public_holidays_can(year))
def is_weekend(date_):
if type(date_) == datetime.date:
pass
elif type(date_) == datetime.datetime:
date_ = date_.date()
else:
date_ = parse(date_)
return (date_.weekday() >=5 )
if __name__ == '__main__':
# _time_filter('2015-01-03')
# print(calendar.monthrange(2015, 10))
print(bp('2015-01-03'))
| 3.71875
| 4
|
ensemble/__init__.py
|
wwyf/pyml
| 0
|
12777923
|
from pyml.tree.regression import DecisionTreeRegressor
from pyml.metrics.pairwise import euclidean_distance
import numpy as np
# TODO: 使用平方误差,还是绝对值误差,还是Huber Loss
class GradientBoostingRegression():
def __init__(self,
learning_rate=0.1,
base_estimator=DecisionTreeRegressor,
n_estimators=500,
random_state=None
):
self.estimators = []
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.learning_rate = learning_rate
self.parameters = {
'f' : [],
'lr' : []
}
# key='f' : a list of estimator
# key='lr' : a list of learning_rate
def optimizer(self, X, Y, watch=False):
"""
训练一次
"""
cur_Y_pred = self.predict(X)
# print('cur_Y_pred : ', cur_Y_pred)
# 计算cost
cost = euclidean_distance(cur_Y_pred, Y)
# 计算残差 or 计算梯度
d_fx = cur_Y_pred - Y
# print('d_fx : ', d_fx)
# 梯度取负数
d_fx = - d_fx
# 计算学习率,这里默认为初始化参数
lr = self.learning_rate
# 创建一个新回归器,去拟合梯度
new_estimator = self.base_estimator()
new_estimator.fit(X,d_fx)
self.parameters['f'].append(new_estimator)
self.parameters['lr'].append(lr)
return cost
def fit(self, X, Y, watch=False):
init_estimator = self.base_estimator()
init_estimator.fit(X,Y)
self.parameters['f'].append(init_estimator)
self.parameters['lr'].append(1)
for i in range(self.n_estimators):
cost = self.optimizer(X,Y)
if i % 10 == 0:
print('train {}/{} current cost : {}'.format(i,self.n_estimators,cost))
def predict(self, X_pred):
"""
Parameters
-------------
X_pred : 2d array-like shape(n_samples, n_feature)
Returns
--------------
pre_Y : 1d array-like shape(n_samples,)
"""
# the number of features should be consistent.
total_num = X_pred.shape[0]
Y_pred = np.zeros((total_num))
for cur_estimator, lr in zip(self.parameters['f'], self.parameters['lr']):
Y_pred += cur_estimator.predict(X_pred) * lr
return Y_pred
if __name__ == '__main__':
mini_train_X = np.array([
[1,2,3,4,5,6,7,8],
[2,3,4,5,6,7,8,9],
[3,4,5,6,7,8,9,10],
[4,5,6,7,8,9,10,11],
[5,6,7,8,9,10,11,12],
[6,7,8,9,10,11,12,13],
[7,8,9,10,11,12,13,14]
])
mini_train_Y = np.array([
1.5,2.5,3.5,4.5,5.5,6.5,7.5
])
mini_test_X = np.array([
[2,3,4,5,6,7.5,8,9],
[4,5,6,7.5,8,9,10,11]
])
mini_standard_out_Y = np.array([
2.5,4.5
])
rgs = GradientBoostingRegression()
rgs.fit(mini_train_X,mini_train_Y)
print(rgs.predict(mini_test_X))
| 2.765625
| 3
|
tests/test_set5.py
|
svkirillov/cryptopals-python3
| 0
|
12777924
|
class TestSet5:
def test_challenge33(self):
from cryptopals.set5.challenge33 import challenge33
assert challenge33(), "The result does not match the expected value"
def test_challenge34(self):
from cryptopals.set5.challenge34 import challenge34
assert challenge34(), "The result does not match the expected value"
def test_challenge35(self):
from cryptopals.set5.challenge35 import challenge35
assert challenge35(), "The result does not match the expected value"
| 2.734375
| 3
|
examples/readme.py
|
elsholz/PyMarkAuth
| 2
|
12777925
|
from pymarkauth import MarkDown
with MarkDown('../README.md') as doc:
sec = doc.section("PyMarkAuth")
sec.paragraphs(
'With PyMarkAuth you can author markdown code simply from python code.'
' To view the source code that generated this readme, take a look at the examples directory!',
)
subsec = sec.section("Here's a list of features")
subsec.unordered_list([
'lines and paragraphs',
'nested sections',
'text styling',
'links and images',
'nested ordered lists and unordered lists',
'code blocks',
doc.italics('future features:'),
doc.ordered_list([
'Tables',
'List of emojies (github specific)',
'programming language enum'
])
])
sec.text('Heres some code:')
sec.code(
'for x in range(10):',
' print(f"Hello, World! {x}")',
language='python'
)
subsec = sec.section('You can also add images and links')
subsec.image(source='logo/logo.svg')
subsec.newline()
subsec.text('You can go to python.org from ')
subsec.link(target='https://python.org', display_text='here.')
| 3.046875
| 3
|
src/gausskernel/dbmind/tools/ai_server/service/datafactory/collector/agent_collect.py
|
Yanci0/openGauss-server
| 360
|
12777926
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#############################################################################
# Copyright (c): 2021, Huawei Tech. Co., Ltd.
# FileName : agent_collect.py
# Version :
# Date : 2021-4-7
# Description : Receives and stores agent data.
#############################################################################
try:
import sys
import os
from flask import request, Response
from flask_restful import Resource
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../"))
from common.logger import CreateLogger
from service.datafactory.storage.insert_data_to_database import SaveData
except ImportError as err:
sys.exit("agent_collect.py: Failed to import module: %s." % str(err))
LOGGER = CreateLogger("debug", "server.log").create_log()
class ResponseTuple:
"""
This class is used for generating a response tuple.
"""
@staticmethod
def success(result=None):
if result is None:
return {"status": "success"}, 200
return {"status": "success", "result": result}
@staticmethod
def error(msg="", status_code=400):
return {"status": "error", "msg": msg}, status_code
class Source(Resource):
"""
This class is used for acquiring metric data from agent and save data
in sqlite database.
"""
def __init__(self):
pass
@staticmethod
def post():
content = request.json
client_ip = request.remote_addr
LOGGER.info("Successfully received request from: %s." % client_ip)
try:
insert_db = SaveData(LOGGER)
insert_db.run(content)
return ResponseTuple.success()
except Exception as e:
return ResponseTuple.error(msg=str(e), status_code=Response.status_code)
@staticmethod
def get():
return ResponseTuple.success(result="Server service is normal.")
@staticmethod
def delete():
return ResponseTuple.error(status_code=400)
| 2.375
| 2
|
posting.py
|
syjang/autoblog
| 1
|
12777927
|
from selenium import webdriver
from realtimekeyword import getNaverRealtimekeyword
import time
from bs4 import BeautifulSoup
class TistoryPostingBot:
def __init__(self,driver, dir, id,password):
self.id = id
self.dir =dir
self.password = password
self.driver = driver
return
def login(self):
dir = self.dir + '/manage'
driver = self.driver
driver.get(dir)
driver.find_element_by_id("loginId").send_keys(self.id)
driver.find_element_by_id("loginPw").send_keys(self.password)
driver.find_element_by_class_name("btn_login").click()
return True
def writePost(self, title, text ,istag =False):
dir = self.dir + '/admin/entry/post/'
driver = self.driver
driver.get(dir)
driver.find_element_by_id("titleBox").send_keys(title)
if istag == False:
driver.switch_to_frame(driver.find_element_by_id("tx_canvas_wysiwyg"))
driver.find_element_by_class_name("tx-content-container").send_keys((text))
driver.switch_to_default_content()
else:
driver.find_element_by_id("tx_switchertoggle").click()
for t in text:
driver.find_element_by_id("tx_canvas_source").send_keys((t))
time.sleep(2)
#post btn
driver.find_element_by_xpath("//*[@id=\"tistoryFoot\"]/div/button[3]").click()
#popup close
window_after = driver.window_handles[1]
driver.switch_to_window(window_after)
driver.find_element_by_id("btnSubmit").click()
| 2.59375
| 3
|
flskweb/app/forms.py
|
yunpochen/yunpochenex
| 0
|
12777928
|
#form.py
from flask_wtf import FlaskForm
from wtforms import StringField , PasswordField , SubmitField , BooleanField #導入用途是為了建立表單
from wtforms.validators import DataRequired, Length , Email , EqualTo , ValidationError #導入用途是為了建立表單 ValidationError是檢視重複輸入
from app.models import User
class RegisterForm(FlaskForm):
username = StringField('Username', validators = [DataRequired(), Length(min = 6 , max = 20)])#監控用長度監控
password = PasswordField('Password', validators = [DataRequired(), Length(min = 8 , max = 20)])#監控用長度監控
confirm = PasswordField('Repeat Password', validators = [DataRequired(), EqualTo('password')])#監控相等於密碼監控
email = StringField('E-MAIL', validators = [DataRequired(), Email()])#監控用EMAIL
submit = SubmitField('Register')
def validate_username(self, username): #導入用途是為了建立表單 ValidationError是檢視重複輸入
user = User.query.filter_by(username = username.data).first()
if user:
raise ValidationError('Username already token')
def validate_email(self, email):
mail = User.query.filter_by(email = email.data).first()
if mail:
raise ValidationError('email already token')
class LoginForm(FlaskForm):
username = StringField('Username', validators = [DataRequired(), Length(min = 6 , max = 20)])#監控用長度監控
password = PasswordField('Password', validators = [DataRequired(), Length(min = 8 , max = 20)])#監控用長度監控
remember = BooleanField('Remember')
submit = SubmitField('sign in')
class PasswordResetRequestForm(FlaskForm):
email = StringField('E-MAIL', validators = [DataRequired(), Email()])
submit = SubmitField('sent')
def validate_email(self, email):
mail = User.query.filter_by(email = email.data).first()
if not email:
raise ValidationError('email does not exsit')
| 2.984375
| 3
|
model.py
|
yvesscherrer/stanzatagger
| 0
|
12777929
|
# was stanza.models.pos.model
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence, PackedSequence
from biaffine import BiaffineScorer
from hlstm import HighwayLSTM
from dropout import WordDropout
from char_model import CharacterModel
class Tagger(nn.Module):
def __init__(self, args, vocab, emb_matrix=None):
super().__init__()
self.vocab = vocab
self.args = args
self.use_pretrained = emb_matrix is not None
self.use_char = args['char_emb_dim'] > 0
self.use_word = args['word_emb_dim'] > 0
self.share_hid = args['pos_emb_dim'] < 1
self.unsaved_modules = []
def add_unsaved_module(name, module):
self.unsaved_modules += [name]
setattr(self, name, module)
# input layers
input_size = 0
if self.use_word:
# frequent word embeddings
self.word_emb = nn.Embedding(len(vocab['word']), self.args['word_emb_dim'], padding_idx=0)
input_size += self.args['word_emb_dim']
if not self.share_hid:
# pos embeddings
self.pos_emb = nn.Embedding(len(vocab['pos']), self.args['pos_emb_dim'], padding_idx=0)
if self.use_char:
self.charmodel = CharacterModel(args, vocab, bidirectional=args['char_bidir'])
self.trans_char = nn.Linear(self.charmodel.num_dir * self.args['char_hidden_dim'], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
if self.use_pretrained:
# pretrained embeddings, by default this won't be saved into model file
add_unsaved_module('pretrained_emb', nn.Embedding.from_pretrained(torch.from_numpy(emb_matrix), freeze=True))
self.trans_pretrained = nn.Linear(emb_matrix.shape[1], self.args['transformed_dim'], bias=False)
input_size += self.args['transformed_dim']
# recurrent layers
self.taggerlstm = HighwayLSTM(input_size, self.args['tag_hidden_dim'], self.args['tag_num_layers'], batch_first=True, bidirectional=True, dropout=self.args['dropout'], rec_dropout=self.args['tag_rec_dropout'], highway_func=torch.tanh)
self.drop_replacement = nn.Parameter(torch.randn(input_size) / np.sqrt(input_size))
self.taggerlstm_h_init = nn.Parameter(torch.zeros(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim']))
self.taggerlstm_c_init = nn.Parameter(torch.zeros(2 * self.args['tag_num_layers'], 1, self.args['tag_hidden_dim']))
# classifiers
self.pos_hid = nn.Linear(self.args['tag_hidden_dim'] * 2, self.args['deep_biaff_hidden_dim'])
self.pos_clf = nn.Linear(self.args['deep_biaff_hidden_dim'], len(vocab['pos']))
self.pos_clf.weight.data.zero_()
self.pos_clf.bias.data.zero_()
if self.share_hid:
clf_constructor = lambda insize, outsize: nn.Linear(insize, outsize)
else:
self.feats_hid = nn.Linear(self.args['tag_hidden_dim'] * 2, self.args['composite_deep_biaff_hidden_dim'])
clf_constructor = lambda insize, outsize: BiaffineScorer(insize, self.args['pos_emb_dim'], outsize)
self.feats_clf = nn.ModuleList()
for l in vocab['feats'].lens():
if self.share_hid:
self.feats_clf.append(clf_constructor(self.args['deep_biaff_hidden_dim'], l))
self.feats_clf[-1].weight.data.zero_()
self.feats_clf[-1].bias.data.zero_()
else:
self.feats_clf.append(clf_constructor(self.args['composite_deep_biaff_hidden_dim'], l))
# criterion
self.crit = nn.CrossEntropyLoss(ignore_index=0) # ignore padding
self.drop = nn.Dropout(args['dropout'])
self.worddrop = WordDropout(args['word_dropout'])
def forward(self, word, word_mask, wordchars, wordchars_mask, pos, feats, pretrained, word_orig_idx, sentlens, wordlens):
def pack(x):
return pack_padded_sequence(x, sentlens, batch_first=True)
def get_batch_sizes(sentlens):
b = []
for i in range(max(sentlens)):
c = len([x for x in sentlens if x > i])
b.append(c)
return torch.tensor(b)
def pad(x):
return pad_packed_sequence(PackedSequence(x, batch_sizes), batch_first=True)[0]
inputs = []
if self.use_word:
word_emb = self.word_emb(word)
word_emb = pack(word_emb)
inputs += [word_emb]
batch_sizes = word_emb.batch_sizes
else:
batch_sizes = get_batch_sizes(sentlens)
if self.use_pretrained:
pretrained_emb = self.pretrained_emb(pretrained)
pretrained_emb = self.trans_pretrained(pretrained_emb)
pretrained_emb = pack(pretrained_emb)
inputs += [pretrained_emb]
if self.use_char:
char_reps = self.charmodel(wordchars, wordchars_mask, word_orig_idx, sentlens, wordlens)
char_reps = PackedSequence(self.trans_char(self.drop(char_reps.data)), char_reps.batch_sizes)
inputs += [char_reps]
lstm_inputs = torch.cat([x.data for x in inputs], 1)
lstm_inputs = self.worddrop(lstm_inputs, self.drop_replacement)
lstm_inputs = self.drop(lstm_inputs)
lstm_inputs = PackedSequence(lstm_inputs, inputs[0].batch_sizes)
lstm_outputs, _ = self.taggerlstm(lstm_inputs, sentlens, hx=(self.taggerlstm_h_init.expand(2 * self.args['tag_num_layers'], word.size(0), self.args['tag_hidden_dim']).contiguous(), self.taggerlstm_c_init.expand(2 * self.args['tag_num_layers'], word.size(0), self.args['tag_hidden_dim']).contiguous()))
lstm_outputs = lstm_outputs.data
pos_hid = F.relu(self.pos_hid(self.drop(lstm_outputs)))
pos_pred = self.pos_clf(self.drop(pos_hid))
preds = [pad(pos_pred).max(2)[1]]
pos = pack(pos).data
loss = self.crit(pos_pred.view(-1, pos_pred.size(-1)), pos.view(-1))
if self.share_hid:
feats_hid = pos_hid
clffunc = lambda clf, hid: clf(self.drop(hid))
else:
feats_hid = F.relu(self.feats_hid(self.drop(lstm_outputs)))
# TODO: self.training is never set, but check if this is a bug
#if self.training: pos_emb = self.pos_emb(pos) else:
pos_emb = self.pos_emb(pos_pred.max(1)[1])
clffunc = lambda clf, hid: clf(self.drop(hid), self.drop(pos_emb))
feats_preds = []
feats = pack(feats).data
for i in range(len(self.vocab['feats'])):
feats_pred = clffunc(self.feats_clf[i], feats_hid)
loss += self.crit(feats_pred.view(-1, feats_pred.size(-1)), feats[:, i].view(-1))
feats_preds.append(pad(feats_pred).max(2, keepdim=True)[1])
preds.append(torch.cat(feats_preds, 2))
return loss, preds
if __name__ == "__main__":
print("This file cannot be used on its own.")
print("To launch the tagger, use tagger.py instead of model.py")
| 2.1875
| 2
|
heat/engine/resources/openstack/barbican/order.py
|
maestro-hybrid-cloud/heat
| 0
|
12777930
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class Order(resource.Resource):
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'barbican'
entity = 'orders'
PROPERTIES = (
NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION,
ALGORITHM, BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN,
SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA,
PASS_PHRASE
) = (
'name', 'payload_content_type', 'mode', 'expiration',
'algorithm', 'bit_length', 'type', 'request_type', 'subject_dn',
'source_container_ref', 'ca_id', 'profile', 'request_data',
'pass_phrase'
)
ATTRIBUTES = (
STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY,
CERTIFICATE, INTERMEDIATES, CONTAINER_REF
) = (
'status', 'order_ref', 'secret_ref', 'public_key', 'private_key',
'certificate', 'intermediates', 'container_ref'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Human readable name for the secret.'),
),
PAYLOAD_CONTENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type/format the secret data is provided in.'),
),
EXPIRATION: properties.Schema(
properties.Schema.STRING,
_('The expiration date for the secret in ISO-8601 format.'),
constraints=[
constraints.CustomConstraint('iso_8601'),
],
),
ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm type used to generate the secret.'),
),
BIT_LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('The bit-length of the secret.'),
),
MODE: properties.Schema(
properties.Schema.STRING,
_('The type/mode of the algorithm associated with the secret '
'information.'),
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the order.'),
constraints=[
constraints.AllowedValues([
'key', 'asymmetric', 'certificate'
]),
],
required=True,
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
SUBJECT_DN: properties.Schema(
properties.Schema.STRING,
_('The subject of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
SOURCE_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('The source of certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
CA_ID: properties.Schema(
properties.Schema.STRING,
_('The identifier of the CA to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PROFILE: properties.Schema(
properties.Schema.STRING,
_('The profile of certificate to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_DATA: properties.Schema(
properties.Schema.STRING,
_('The content of the CSR.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PASS_PHRASE: properties.Schema(
properties.Schema.STRING,
_('The passphrase the created key.'),
support_status=support.SupportStatus(version='5.0.0'),
),
}
attributes_schema = {
STATUS: attributes.Schema(
_('The status of the order.'),
type=attributes.Schema.STRING
),
ORDER_REF: attributes.Schema(
_('The URI to the order.'),
type=attributes.Schema.STRING
),
SECRET_REF: attributes.Schema(
_('The URI to the created secret.'),
type=attributes.Schema.STRING
),
CONTAINER_REF: attributes.Schema(
_('The URI to the created container.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PUBLIC_KEY: attributes.Schema(
_('The payload of the created public key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PRIVATE_KEY: attributes.Schema(
_('The payload of the created private key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
CERTIFICATE: attributes.Schema(
_('The payload of the created certificate, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
INTERMEDIATES: attributes.Schema(
_('The payload of the created intermediates, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
info = dict((k, v) for k, v in self.properties.items()
if v is not None)
order = self.client().orders.create(**info)
order_ref = order.submit()
self.resource_id_set(order_ref)
# NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string
# need not to be fixed re LP bug #1393268
return order_ref
def check_create_complete(self, order_href):
order = self.client().orders.get(order_href)
if order.status == 'ERROR':
reason = order.error_reason
code = order.error_status_code
msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s")
% {'name': self.name, 'code': code, 'reason': reason})
raise exception.Error(msg)
return order.status == 'ACTIVE'
def _resolve_attribute(self, name):
client = self.client()
order = client.orders.get(self.resource_id)
if name in (
self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE,
self.INTERMEDIATES):
container = client.containers.get(order.container_ref)
secret = getattr(container, name)
return secret.payload
return getattr(order, name)
# TODO(ochuprykov): remove this method when bug #1485619 will be fixed
def _show_resource(self):
order = self.client().orders.get(self.resource_id)
info = order._get_formatted_entity()
return dict(zip(info[0], info[1]))
def resource_mapping():
return {
'OS::Barbican::Order': Order,
}
| 1.679688
| 2
|
zero/patrickstar_utils/utils.py
|
Wesley-Jzy/ColossalAI-Benchmark
| 39
|
12777931
|
import os
import torch
from zero.common.utils import CONFIG, get_gpu_memory_mb, print_log
from torch.distributed import init_process_group
def init_w_ps(builder):
from patrickstar.runtime import initialize_engine
config = CONFIG.copy()
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
host = os.environ['MASTER_ADDR']
port = int(os.environ['MASTER_PORT'])
init_process_group(rank=rank, world_size=world_size, init_method=f'tcp://{host}:{port}', backend='nccl')
torch.cuda.set_device(rank)
if CONFIG.get('gpu_mem_fraction', None) is not None:
torch.cuda.set_per_process_memory_fraction(CONFIG['gpu_mem_fraction'])
print_log(f'Set max GPU mem: {get_gpu_memory_mb() * CONFIG["gpu_mem_fraction"]:.2f} MB')
build_data, build_model, build_loss, _, build_scheduler = builder()
train_data, test_data = build_data()
criterion = build_loss()
model, optimizer = initialize_engine(model_func=build_model, local_rank=rank, config=config)
lr_scheduler = build_scheduler(len(train_data), optimizer)
return model, train_data, test_data, criterion, optimizer, None, lr_scheduler
| 2.09375
| 2
|
tools.py
|
vinx13/WikiCrawler
| 0
|
12777932
|
<reponame>vinx13/WikiCrawler
import DbHelper
def singleton(_class):
"""
a decorator that is used to implement singleton patten;
Usage:
@singleton
class Foo:
pass
"""
instances = {}
def _getInstance(*args, **kwargs):
if _class not in instances:
instances[_class] = _class(*args, **kwargs)
return instances[_class]
return _getInstance
def enum(*sequential):
"""
pseudo Enum type; Usage: EnumType=enum('key1','key2',...,)
"""
enums = dict(zip(sequential, range(len(sequential))))
return type('Enum', (), enums)
def checkDB(fun):
def _fun(self, *args):
if self.db is None:
self.db = DbHelper.DbHelper()
return fun(self, *args)
return _fun
| 3.28125
| 3
|
src/markdown_exec/pycon.py
|
pawamoy/markdown-exec
| 0
|
12777933
|
<reponame>pawamoy/markdown-exec
"""Formatter for executing `pycon` code."""
from __future__ import annotations
import textwrap
from typing import Any
from markdown.core import Markdown
from markdown_exec.python import run_python
from markdown_exec.rendering import add_source, markdown
def format_pycon( # noqa: WPS231
code: str,
md: Markdown,
html: bool,
source: str,
tabs: tuple[str, str],
**options: Any,
) -> str:
"""Execute `pycon` code and return HTML.
Parameters:
code: The code to execute.
md: The Markdown instance.
html: Whether to inject output as HTML directly, without rendering.
source: Whether to show source as well, and where.
tabs: Titles of tabs (if used).
**options: Additional options passed from the formatter.
Returns:
HTML contents.
"""
markdown.mimic(md)
python_lines = []
for line in code.split("\n"):
if line.startswith(">>> "):
python_lines.append(line[4:])
python_code = "\n".join(python_lines)
extra = options.get("extra", {})
output = run_python(python_code, html, **extra)
if source:
source_code = textwrap.indent(python_code, ">>> ")
output = add_source(source=source_code, location=source, output=output, language="pycon", tabs=tabs, **extra)
return markdown.convert(output)
| 2.828125
| 3
|
docker/turtlebot2i_deep_qlearning/turtlebot2i_deep_qlearning/dqn/respawnGoal.py
|
EricssonResearch/tnmt
| 0
|
12777934
|
#!/usr/bin/env python
import rospy
import random
import time
import os
from gazebo_msgs.srv import SpawnModel, DeleteModel
from gazebo_msgs.msg import ModelStates
from geometry_msgs.msg import Pose
import pdb;
class Respawn():
def __init__(self):
self.modelPath = os.path.dirname(os.path.realpath(__file__)) + "/goal_square/goal_box/model.sdf"
self.f = open(self.modelPath, 'r')
self.model = self.f.read()
self.stage = 2
self.goal_position = Pose()
self.init_goal_x = 0.6
self.init_goal_y = 0.0
self.goal_position.position.x = self.init_goal_x
self.goal_position.position.y = self.init_goal_y
self.modelName = 'goal'
self.obstacle_1 = -1.0, -1.0
self.obstacle_2 = -1.0, 1.0
self.obstacle_3 = 1.0, -1.0
self.obstacle_4 = 1.0, 1.0
self.human_1 = 1.74723, -1.88126
self.human_2 = -2.21807, 2.24004
self.last_goal_x = self.init_goal_x
self.last_goal_y = self.init_goal_y
self.last_index = 0
self.sub_model = rospy.Subscriber('gazebo/model_states', ModelStates, self.checkModel)
self.check_model = False
self.index = 0
def checkModel(self, model):
self.check_model = False
for i in range(len(model.name)):
if model.name[i] == "goal":
self.check_model = True
def respawnModel(self):
while True:
if not self.check_model:
rospy.wait_for_service('gazebo/spawn_sdf_model')
spawn_model_prox = rospy.ServiceProxy('gazebo/spawn_sdf_model', SpawnModel)
spawn_model_prox(self.modelName, self.model, 'robotos_name_space', self.goal_position, "world")
rospy.loginfo("Goal position : %.1f, %.1f", self.goal_position.position.x,
self.goal_position.position.y)
break
else:
#print("Goal Model did not spawn")
pass
def deleteModel(self):
while True:
if self.check_model:
rospy.wait_for_service('gazebo/delete_model')
del_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel)
del_model_prox(self.modelName)
break
else:
pass
def getPosition(self, position_check=False, delete=False):
if delete:
self.deleteModel()
while position_check:
goal_x = random.randrange(-26, 27) / 10.0 # (-12, 13)
goal_y = random.randrange(-26, 27) / 10.0 # (-12, 13)
if abs(goal_x - self.obstacle_1[0]) <= 0.4 and abs(goal_y - self.obstacle_1[1]) <= 0.4:
position_check = True
elif abs(goal_x - self.obstacle_2[0]) <= 0.4 and abs(goal_y - self.obstacle_2[1]) <= 0.4:
position_check = True
elif abs(goal_x - self.obstacle_3[0]) <= 0.4 and abs(goal_y - self.obstacle_3[1]) <= 0.4:
position_check = True
elif abs(goal_x - self.obstacle_4[0]) <= 0.4 and abs(goal_y - self.obstacle_4[1]) <= 0.4:
position_check = True
elif abs(goal_x - self.human_1[0]) <= 0.4 and abs(goal_y - self.human_1[1]) <= 0.4:
position_check = True
elif abs(goal_x - self.human_2[0]) <= 0.4 and abs(goal_y - self.human_2[1]) <= 0.4:
position_check = True
elif abs(goal_x - 0.0) <= 0.4 and abs(goal_y - 0.0) <= 0.4:
position_check = True
else:
position_check = False
if abs(goal_x - self.last_goal_x) < 1 and abs(goal_y - self.last_goal_y) < 1:
position_check = True
self.goal_position.position.x = goal_x
self.goal_position.position.y = goal_y
time.sleep(0.5)
self.respawnModel()
# Setting up the last goal position
self.last_goal_x = self.goal_position.position.x
self.last_goal_y = self.goal_position.position.y
return self.goal_position.position.x, self.goal_position.position.y
| 2.59375
| 3
|
test/test_digital_signature_transaction.py
|
signingtoday/signingtoday-sdk-python
| 0
|
12777935
|
<gh_stars>0
# coding: utf-8
"""
Signing Today Web
*Signing Today* is the perfect Digital Signature Gateway. Whenever in Your workflow You need to add one or more Digital Signatures to Your document, *Signing Today* is the right choice. You prepare Your documents, *Signing Today* takes care of all the rest: send invitations (`signature tickets`) to signers, collects their signatures, send You back the signed document. Integrating *Signing Today* in Your existing applications is very easy. Just follow these API specifications and get inspired by the many examples presented hereafter. # noqa: E501
The version of the OpenAPI document: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import signing_today_client
from signing_today_client.models.digital_signature_transaction import DigitalSignatureTransaction # noqa: E501
from signing_today_client.rest import ApiException
class TestDigitalSignatureTransaction(unittest.TestCase):
"""DigitalSignatureTransaction unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test DigitalSignatureTransaction
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = signing_today_client.models.digital_signature_transaction.DigitalSignatureTransaction() # noqa: E501
if include_optional :
return DigitalSignatureTransaction(
id = 'd9b4df92-cf85-48dc-a2de-955f518a2992',
domain = 'bit4id',
title = 'Real estate sales contract transaction',
replaces = '0d563aea-e39b-42a5-ad9b-b4d06b379696',
replaced_by = 'a4f0cd5b-e49a-4692-a846-4455110eda74',
created_by_user = 'e6419924-fd1d-4c42-9fa2-88023461f5df',
created_at = '2007-04-02T19:30:10Z',
documents = [
signing_today_client.models.document.Document(
_instance_id = 1,
id = 1,
plain_document_uuid = '737dc132-a3f0-11e9-a2a3-2a2ae2dbcce4',
filled_document_uuid = '192db8d8-4128-11ea-b77f-2e728ce88125',
signed_document_uuid = '2a126504-4128-11ea-b77f-2e728ce88125',
status = 'signed',
forms = [
signing_today_client.models.fillable_form.FillableForm(
_instance_id = 1,
id = 2,
document_id = 3,
type = 'SignatureForm',
position_x = 100.00,
position_y = 58.14,
width = 10.1,
height = 5.66,
page = 1,
signer_id = 1,
to_fill = True,
filled = False,
invisible = True,
extra_data = {"signatureRequestId":1}, )
],
signature_requests = [
signing_today_client.models.signature_request.SignatureRequest(
_instance_id = 1,
id = 3,
reason = 'As the Buyer',
description = 'The proponent',
signer_id = 2,
sign_profile = 'PAdES',
with_timestamp = True,
declinable = False,
restrictions = [
signing_today_client.models.signature_restriction.SignatureRestriction(
rule = '0',
operator = '0',
value = '0', )
],
extra_data = {"st_ticketUrl":"http://signing.today/ticket/8bd4aead-ad37-42bc-b3b0-22ce3d1c9e79"}, )
],
signer_groups = [
signing_today_client.models.signers_group.SignersGroup(
_instance_id = 1,
signers = [
signing_today_client.models.signer.Signer(
_instance_id = 1,
id = 1,
name = 'Adam',
surname = 'Smith',
email = '<EMAIL>',
phone = '+013392213450',
role = 'buyer',
user_uuid = '737dc132-a3f0-11e9-a2a3-2a2ae2dbcce4',
template_label = 'Buyer', )
], )
], )
],
published_at = '2007-04-05T11:10:42Z',
expires_at = '2007-04-25T12:00Z',
resources = [
signing_today_client.models.lf_resource.LFResource(
id = '05a80817-a3a5-48fe-83c0-0df0f48a2a26',
domain = 'bit4id',
type = 'PDFResource',
dst_uuid = 'd9b4df92-cf85-48dc-a2de-955f518a2992',
title = 'Sales Contract',
filename = 'contract.pdf',
url = 'https://storage.myapi.com/resource/05a80817-a3a5-48fe-83c0-0df0f48a2a26',
size = 1024,
created_at = '2007-04-02T19:30:10Z',
mimetype = 'application/pdf',
pages = 3,
extra_data = {"toSign":true}, )
],
signatures = [
signing_today_client.models.signature.Signature(
_instance_id = 1,
document_id = 3,
signature_request_id = 2,
signed_at = '2021-10-17T07:26Z',
declined_reason = 'Reason of declination',
status = 'signed',
extra_data = {"st_uuid":"d6ebb1ed-73a4-45ba-b33a-7db8a6cdd882"}, )
],
status = 'published',
error_message = '0',
deleted_at = '2007-04-02T19:30:10Z',
tags = [
'important'
],
template = False,
public_template = False,
extra_data = {"st_uuid":"d6ebb1ed-73a4-45ba-b33a-7db8a6cdd882"},
visible_to = [
'737dc132-a3f0-11e9-a2a3-2a2ae2dbcce4'
],
cc_groups = [
'Marketing'
],
cc_users = [
'7<PASSWORD>-a<PASSWORD>-<PASSWORD>'
],
urgent = False,
updated_at = '2007-04-02T19:30:10Z'
)
else :
return DigitalSignatureTransaction(
)
def testDigitalSignatureTransaction(self):
"""Test DigitalSignatureTransaction"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 2.078125
| 2
|
code/displayData.py
|
Tobiaskri/Heart-Rate-Measurement
| 0
|
12777936
|
<filename>code/displayData.py<gh_stars>0
import cv2
import numpy as np
from matplotlib import pyplot as plt
def display(in_signal):
return 0
| 2.046875
| 2
|
tests/functional/test_cookiecutter.py
|
cjolowicz/cutty
| 1
|
12777937
|
<filename>tests/functional/test_cookiecutter.py
"""Functional tests for the cookiecutter CLI."""
from pathlib import Path
import pytest
from cutty.projects.projectconfig import PROJECT_CONFIG_FILE
from cutty.util.git import Repository
from tests.functional.conftest import RunCutty
from tests.functional.conftest import RunCuttyError
from tests.util.files import project_files
from tests.util.files import template_files
from tests.util.git import move_repository_files_to_subdirectory
from tests.util.git import updatefile
EXTRA = Path("post_gen_project")
def test_help(runcutty: RunCutty) -> None:
"""It exits with a status code of zero."""
runcutty("cookiecutter", "--help")
def test_default(runcutty: RunCutty, template: Path) -> None:
"""It generates a project."""
runcutty("cookiecutter", str(template))
assert template_files(template) == project_files("example") - {EXTRA}
def test_input(runcutty: RunCutty, template: Path) -> None:
"""It generates a project."""
runcutty("cookiecutter", str(template), input="foobar\n\n\n")
assert Path("foobar", "README.md").read_text() == "# foobar\n"
def test_no_repository(runcutty: RunCutty, template: Path) -> None:
"""It does not create a git repository for the project."""
runcutty("cookiecutter", str(template))
assert not Path("example", ".git").is_dir()
def test_no_cutty_json(runcutty: RunCutty, template: Path) -> None:
"""It does not create a cutty.json file."""
runcutty("cookiecutter", str(template))
assert not Path("example", PROJECT_CONFIG_FILE).is_file()
def test_no_input(runcutty: RunCutty, template: Path) -> None:
"""It does not prompt for variables."""
runcutty("cookiecutter", "--no-input", str(template))
assert template_files(template) == project_files("example") - {EXTRA}
def test_extra_context(runcutty: RunCutty, template: Path) -> None:
"""It allows setting variables on the command-line."""
runcutty("cookiecutter", str(template), "project=awesome")
assert template_files(template) == project_files("awesome") - {EXTRA}
def test_extra_context_invalid(runcutty: RunCutty, template: Path) -> None:
"""It raises an exception if additional arguments cannot be parsed."""
with pytest.raises(Exception):
runcutty("cookiecutter", str(template), "invalid")
def test_checkout(runcutty: RunCutty, template: Path) -> None:
"""It uses the specified revision of the template."""
initial = Repository.open(template).head.commit.id
updatefile(template / "{{ cookiecutter.project }}" / "LICENSE")
runcutty("cookiecutter", f"--checkout={initial}", str(template))
assert not Path("example", "LICENSE").exists()
def test_output_dir(runcutty: RunCutty, template: Path, tmp_path: Path) -> None:
"""It generates the project under the specified directory."""
outputdir = tmp_path / "outputdir"
runcutty("cookiecutter", f"--output-dir={outputdir}", str(template))
assert template_files(template) == project_files(outputdir / "example") - {EXTRA}
def test_directory(runcutty: RunCutty, template: Path, tmp_path: Path) -> None:
"""It uses the template in the given subdirectory."""
directory = "a"
move_repository_files_to_subdirectory(template, directory)
runcutty("cookiecutter", f"--directory={directory}", str(template))
assert template_files(template / "a") == project_files("example") - {EXTRA}
def test_overwrite(runcutty: RunCutty, template: Path) -> None:
"""It overwrites existing files."""
readme = Path("example", "README.md")
readme.parent.mkdir()
readme.touch()
runcutty("cookiecutter", "--overwrite-if-exists", str(template))
assert readme.read_text() == "# example\n"
def test_skip(runcutty: RunCutty, template: Path) -> None:
"""It skips existing files."""
readme = Path("example", "README.md")
readme.parent.mkdir()
readme.touch()
runcutty(
"cookiecutter",
"--overwrite-if-exists",
"--skip-if-file-exists",
str(template),
)
assert readme.read_text() == ""
def test_empty_template(emptytemplate: Path, runcutty: RunCutty) -> None:
"""It exits with a non-zero status code."""
with pytest.raises(RunCuttyError):
runcutty("cookiecutter", str(emptytemplate))
| 2.375
| 2
|
tests/test_util.py
|
EarthObservationSimulator/orbits
| 4
|
12777938
|
"""Unit tests for orbitpy.util module.
"""
import unittest
import numpy as np
from numpy.core.numeric import tensordot
from instrupy.util import Orientation
from instrupy import Instrument
from orbitpy.util import OrbitState, SpacecraftBus, Spacecraft
import orbitpy.util
import propcov
from util.spacecrafts import spc1_json, spc2_json, spc3_json
class TestOrbitState(unittest.TestCase):
def test_date_from_dict(self):
x = OrbitState.date_from_dict({"@type":"JULIAN_DATE_UT1", "jd":2459270.75})
self.assertIsInstance(x, propcov.AbsoluteDate)
y = OrbitState.date_from_dict({"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0})
self.assertIsInstance(y, propcov.AbsoluteDate)
self.assertEqual(x, y)
def test_state_from_dict(self):
x = OrbitState.state_from_dict({"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6867, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25})
self.assertIsInstance(x, propcov.OrbitState)
cart_state = x.GetCartesianState().GetRealArray()
y = OrbitState.state_from_dict({"@type": "CARTESIAN_EARTH_CENTERED_INERTIAL", "x": cart_state[0], "y": cart_state[1], "z": cart_state[2], "vx": cart_state[3], "vy": cart_state[4], "vz": cart_state[5]})
self.assertIsInstance(y, propcov.OrbitState)
self.assertEqual(x, y)
def test_date_to_dict(self): #@TODO
pass
def test_state_to_dict(self): #@TODO
pass
def test_get_julian_date(self): #@TODO
pass
def test_get_cartesian_earth_centered_inertial_state(self): #@TODO
pass
def test_get_keplerian_earth_centered_inertial_state(self): #@TODO
pass
def test_from_dict(self):
# Julian date, Cartesian state
o = OrbitState.from_dict({"date":{"@type":"JULIAN_DATE_UT1", "jd":2459270.75},
"state":{"@type": "CARTESIAN_EARTH_CENTERED_INERTIAL", "x": 6878.137, "y": 0, "z": 0, "vx": 0, "vy": 7.6126, "vz": 0},
"@id": 123})
self.assertIsInstance(o, OrbitState)
self.assertEqual(o._id, 123)
self.assertEqual(o.date, propcov.AbsoluteDate.fromJulianDate(2459270.75))
self.assertEqual(o.state, propcov.OrbitState.fromCartesianState(propcov.Rvector6([6878.137,0,0,0,7.6126,0])))
# Gregorian date, Keplerian state
o = OrbitState.from_dict({"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0},
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25},
})
self.assertIsInstance(o, OrbitState)
self.assertIsNone(o._id)
self.assertEqual(o.date, propcov.AbsoluteDate.fromGregorianDate(2021, 2, 25, 6 ,0, 0))
self.assertEqual(o.state, propcov.OrbitState.fromKeplerianState(6878.137, 0.001, np.deg2rad(45), np.deg2rad(35), np.deg2rad(145), np.deg2rad(-25)))
def test_to_dict(self): #@TODO test Keplerian state output
# Input: Julian date, Cartesian state
o = OrbitState.from_dict({"date":{"@type":"JULIAN_DATE_UT1", "jd":2459270.75},
"state":{"@type": "CARTESIAN_EARTH_CENTERED_INERTIAL", "x": 6878.137, "y": 0, "z": 0, "vx": 0, "vy": 7.6126, "vz": 0},
})
d = o.to_dict()
self.assertEqual(d["date"]["@type"], "JULIAN_DATE_UT1")
self.assertEqual(d["date"]["jd"], 2459270.75)
self.assertEqual(d["state"]["@type"], "CARTESIAN_EARTH_CENTERED_INERTIAL")
self.assertAlmostEqual(d["state"]["x"], 6878.137)
self.assertEqual(d["state"]["y"], 0)
self.assertEqual(d["state"]["z"], 0)
self.assertEqual(d["state"]["vx"], 0)
self.assertEqual(d["state"]["vy"], 7.6126)
self.assertEqual(d["state"]["vz"], 0)
self.assertIsNone(d["@id"])
# Input: Gregorian date, Keplerian state
o = OrbitState.from_dict({"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0},
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25},
"@id": "123"})
d = o.to_dict()
date = o.get_julian_date()
state = o.get_cartesian_earth_centered_inertial_state()
self.assertEqual(d["date"]["@type"], "JULIAN_DATE_UT1")
self.assertEqual(d["date"]["jd"], date)
self.assertEqual(d["state"]["@type"], "CARTESIAN_EARTH_CENTERED_INERTIAL")
self.assertAlmostEqual(d["state"]["x"], state[0])
self.assertEqual(d["state"]["y"], state[1])
self.assertEqual(d["state"]["z"], state[2])
self.assertEqual(d["state"]["vx"], state[3])
self.assertEqual(d["state"]["vy"], state[4])
self.assertEqual(d["state"]["vz"], state[5])
self.assertEqual(d["@id"], "123")
class TestSpacecraftBus(unittest.TestCase):
def test_from_json(self):
# typical case
o = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
self.assertEqual(o.name, "BlueCanyon")
self.assertEqual(o.mass, 20)
self.assertEqual(o.volume, 0.5)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"Nadir_pointing", "convention": "REF_FRAME_ALIGNED", "@id": "abc"}))
self.assertIsNone(o.solarPanelConfig)
self.assertEqual(o._id, 123)
# check default orientation
o = SpacecraftBus.from_json('{"name": "Microsat", "mass": 100, "volume": 1}')
self.assertEqual(o.name, "Microsat")
self.assertEqual(o.mass, 100)
self.assertEqual(o.volume, 1)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"Nadir_pointing", "convention": "REF_FRAME_ALIGNED"}))
self.assertIsNone(o.solarPanelConfig)
self.assertIsNone(o._id)
# side look orientation
o = SpacecraftBus.from_json('{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":-10}, "@id":123}')
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-10}))
self.assertIsNone(o.solarPanelConfig)
self.assertEqual(o._id, 123)
# Euler rotation specification, ECI frame
o = SpacecraftBus.from_json('{"orientation":{"referenceFrame": "EARTH_CENTERED_INERTIAL", "convention": "XYZ","xRotation":10,"yRotation":-10.4,"zRotation":20.78}}')
self.assertIsNone(o.name)
self.assertIsNone(o.mass)
self.assertIsNone(o.volume)
self.assertEqual(o.orientation, Orientation.from_dict({"referenceFrame":"EARTH_CENTERED_INERTIAL", "convention": "XYZ","xRotation":10,"yRotation":-10.4,"zRotation":20.78}))
self.assertIsNone(o.solarPanelConfig)
self.assertIsNone(o._id)
def test_to_dict(self):
# typical case
o = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
o_dict = o.to_dict()
self.assertEqual(o_dict['name'], 'BlueCanyon')
self.assertEqual(o_dict['mass'], 20)
self.assertEqual(o_dict['volume'], 0.5)
self.assertIsNone(o_dict['solarPanelConfig'])
self.assertEqual(o_dict['orientation']['eulerAngle1'], 0)
self.assertEqual(o_dict['orientation']['eulerAngle2'], 0)
self.assertEqual(o_dict['orientation']['eulerAngle3'], 0)
self.assertEqual(o_dict['orientation']['eulerSeq1'], 1)
self.assertEqual(o_dict['orientation']['eulerSeq2'], 2)
self.assertEqual(o_dict['orientation']['eulerSeq3'], 3)
self.assertEqual(o_dict['orientation']['@id'], 'abc')
self.assertEqual(o_dict['@id'], 123)
def test___eq_(self):
# typical case, note that "@id" can be different.
o1 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
o2 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":"abc"}')
self.assertEqual(o1, o2)
o2 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 10, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED", "@id": "abc"}, "@id":123}')
self.assertNotEqual(o1, o2)
# Equivalent orientation specifications in different input format
o1 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "REF_FRAME_ALIGNED"}, "@id":123}')
o2 = SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, "orientation":{"referenceFrame": "NADIR_POINTING", \
"convention": "XYZ","xRotation":0,"yRotation":0,"zRotation":0}, "@id":123}')
self.assertEqual(o1, o2)
class TestSpacecraft(unittest.TestCase):
def test_from_json(self):
spc1 = Spacecraft.from_json(spc1_json)
spc2 = Spacecraft.from_json(spc2_json)
spc3 = Spacecraft.from_json(spc3_json)
# typical case 1 instrument
self.assertEqual(spc1.name, "Mars")
self.assertEqual(spc1.spacecraftBus, SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}'))
self.assertEqual(spc1.instrument, [Instrument.from_json('{"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}')])
self.assertEqual(spc1.orbitState, OrbitState.from_json('{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}'))
self.assertEqual(spc1._id, "sp1")
# no instruments
self.assertEqual(spc2.name, "Jupyter")
self.assertEqual(spc2.spacecraftBus, SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}'))
self.assertIsNone(spc2.instrument)
self.assertEqual(spc2.orbitState, OrbitState.from_json('{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}'))
self.assertEqual(spc2._id, 12)
# 3 instruments with multiple modes, no spacecraft id assignment
self.assertEqual(spc3.name, "Saturn")
self.assertEqual(spc3.spacecraftBus, SpacecraftBus.from_json('{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}'))
self.assertEqual(len(spc3.instrument), 3)
# 1st instrument
self.assertEqual(spc3.instrument[0].get_id(), 'bs1')
self.assertEqual(spc3.instrument[0].get_mode_id()[0], '0')
# 2nd instrument
self.assertIsNotNone(spc3.instrument[1].get_id())
self.assertIsNotNone(spc3.instrument[1].get_mode_id()[0], '0')
# 3rd instrument
self.assertEqual(spc3.instrument[2].get_id(), 'bs3')
self.assertEqual(spc3.instrument[2].get_mode_id()[0], 0)
self.assertEqual(spc3.instrument[2].get_mode_id()[1], 1)
self.assertIsNotNone(spc3.instrument[2].get_mode_id()[2])
self.assertEqual(spc3.orbitState, OrbitState.from_json('{"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}'))
self.assertIsNotNone(spc3._id)
def test_get_instrument(self):
spc1 = Spacecraft.from_json(spc1_json)
spc2 = Spacecraft.from_json(spc2_json)
spc3 = Spacecraft.from_json(spc3_json)
# spc1 has 1 instrument with id 'bs1'
self.assertEqual(spc1.get_instrument(sensor_id='bs1'), spc1.instrument[0])
self.assertEqual(spc1.get_instrument(), spc1.instrument[0]) # no sensor_id specification
self.assertIsNone(spc1.get_instrument('bs2')) # wrong sensor_id
# spc2 has no instruments
self.assertIsNone(spc2.get_instrument())
# spc3 has three instruments
self.assertEqual(spc3.get_instrument(sensor_id='bs1'), spc3.instrument[0])
self.assertEqual(spc3.get_instrument(), spc3.instrument[0])
self.assertEqual(spc3.get_instrument(sensor_id='bs3'), spc3.instrument[2])
def test_add_instrument(self): #TODO
pass
def test_add_to_list(self): #TODO
pass
def test_get_id(self): #TODO
pass
def test_to_dict(self): #TODO
pass
'''
def test___eq__(self):
o1 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertEqual(o1, o2)
# spacecraft bus different (orientation)
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-1} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertNotEqual(o1, o2)
# instrument different (fieldOfViewGeometry)
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-1} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":15 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertNotEqual(o1, o2)
# orbitState different (date)
o2 = Spacecraft.from_json('{"@id": "sp1", "name": "Spock", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame":"Nadir_pointing", "convention": "SIDE_LOOK", "sideLookAngle":-1} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":15 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":3, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
self.assertNotEqual(o1, o2)
'''
class TestUtilModuleFunction(unittest.TestCase):
def test_helper_extract_spacecraft_params(self):
# 1 instrument, 1 mode
o1 = Spacecraft.from_json('{"@id": "sp1", "name": "Mars", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": {"name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor"}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
# no instruments
o2 = Spacecraft.from_json('{"@id": 12, "name": "Jupyter", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
# 3 instruments with multiple modes, no spacecraft id assignment
o3 = Spacecraft.from_json('{"name": "Saturn", \
"spacecraftBus":{"name": "BlueCanyon", "mass": 20, "volume": 0.5, \
"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"instrument": [ \
{ "name": "Alpha", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"numberDetectorRows":5, "numberDetectorCols":10, "@id":"bs1", "@type":"Basic Sensor" \
}, \
{ "name": "Beta", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":5 }, \
"maneuver":{"maneuverType": "SINGLE_ROLL_ONLY", "A_rollMin":10, "A_rollMax":15}, \
"mode": [{"@id":101, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@type":"Basic Sensor" \
}, \
{ "name": "Gamma", "mass":10, "volume":12.45, "dataRate": 40, "bitsPerPixel": 8, "power": 12, \
"fieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":10 }, \
"maneuver":{"maneuverType": "Double_Roll_Only", "A_rollMin":10, "A_rollMax":15, "B_rollMin":-15, "B_rollMax":-10}, \
"mode": [{"@id":0, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}}, \
{"@id":1, "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": 25}}, \
{ "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_LOOK", "sideLookAngle": -25}} \
], \
"numberDetectorRows":5, "numberDetectorCols":10, "@id": "bs3", "@type":"Basic Sensor" \
} \
], \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}')
# single sc tests
x = orbitpy.util.helper_extract_spacecraft_params([o1])
self.assertEqual(len(x), 1)
self.assertEqual(x[0].sc_id, 'sp1')
self.assertEqual(x[0].instru_id,'bs1')
self.assertEqual(x[0].mode_id, '0')
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertAlmostEqual(x[0].fov_height, 5.0)
self.assertAlmostEqual(x[0]. fov_width, 5.0)
self.assertAlmostEqual(x[0].for_height, 15.0)
self.assertAlmostEqual(x[0].for_width, 15.0)
# spacecraft with no instruments
x = orbitpy.util.helper_extract_spacecraft_params([o2])
self.assertEqual(len(x), 1)
self.assertEqual(x[0].sc_id, 12)
self.assertIsNone(x[0].instru_id)
self.assertIsNone(x[0].mode_id)
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertIsNone(x[0].fov_height)
self.assertIsNone(x[0]. fov_width)
self.assertIsNone(x[0].for_height)
self.assertIsNone(x[0].for_width)
x = orbitpy.util.helper_extract_spacecraft_params([o3])
self.assertEqual(len(x), 8)
self.assertIsNotNone(x[0].sc_id)
self.assertIsNotNone(x[1].sc_id)
self.assertIsNotNone(x[2].sc_id)
self.assertIsNotNone(x[3].sc_id)
self.assertIsNotNone(x[4].sc_id)
self.assertIsNotNone(x[5].sc_id)
self.assertIsNotNone(x[6].sc_id)
self.assertIsNotNone(x[7].sc_id)
self.assertEqual(x[0].instru_id,'bs1')
self.assertIsNotNone(x[1].instru_id)
self.assertEqual(x[2].instru_id,'bs3')
self.assertEqual(x[3].instru_id,'bs3')
self.assertEqual(x[4].instru_id,'bs3')
self.assertEqual(x[5].instru_id,'bs3')
self.assertEqual(x[6].instru_id,'bs3')
self.assertEqual(x[7].instru_id,'bs3')
self.assertEqual(x[0].mode_id, '0')
self.assertEqual(x[1].mode_id, 101)
self.assertEqual(x[2].mode_id, 0)
self.assertEqual(x[3].mode_id, 0)
self.assertEqual(x[4].mode_id, 1)
self.assertEqual(x[5].mode_id, 1)
self.assertIsNotNone(x[6].mode_id)
self.assertIsNotNone(x[7].mode_id)
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertAlmostEqual(x[1].sma, 6878.136999999998)
self.assertAlmostEqual(x[2].sma, 6878.136999999998)
self.assertAlmostEqual(x[3].sma, 6878.136999999998)
self.assertAlmostEqual(x[4].sma, 6878.136999999998)
self.assertAlmostEqual(x[5].sma, 6878.136999999998)
self.assertAlmostEqual(x[6].sma, 6878.136999999998)
self.assertAlmostEqual(x[7].sma, 6878.136999999998)
self.assertAlmostEqual(x[0].fov_height, 5.0)
self.assertAlmostEqual(x[1].fov_height, 5.0)
self.assertAlmostEqual(x[2].fov_height, 5.0)
self.assertAlmostEqual(x[3].fov_height, 5.0)
self.assertAlmostEqual(x[4].fov_height, 5.0)
self.assertAlmostEqual(x[5].fov_height, 5.0)
self.assertAlmostEqual(x[6].fov_height, 5.0)
self.assertAlmostEqual(x[0]. fov_width, 5)
self.assertAlmostEqual(x[1]. fov_width, 5)
self.assertAlmostEqual(x[2]. fov_width, 10.0)
self.assertAlmostEqual(x[3]. fov_width, 10.0)
self.assertAlmostEqual(x[4]. fov_width, 10.0)
self.assertAlmostEqual(x[5]. fov_width, 10.0)
self.assertAlmostEqual(x[6]. fov_width, 10.0)
self.assertAlmostEqual(x[7]. fov_width, 10.0)
self.assertAlmostEqual(x[0].for_height, 15.0)
self.assertAlmostEqual(x[1].for_height, 5.0)
self.assertAlmostEqual(x[2].for_height, 5.0)
self.assertAlmostEqual(x[3].for_height, 5.0)
self.assertAlmostEqual(x[4].for_height, 5.0)
self.assertAlmostEqual(x[5].for_height, 5.0)
self.assertAlmostEqual(x[6].for_height, 5.0)
self.assertAlmostEqual(x[7].for_height, 5.0)
self.assertAlmostEqual(x[0].for_width, 15.0)
self.assertAlmostEqual(x[1].for_width, 10.0)
self.assertAlmostEqual(x[2].for_width, 15.0)
self.assertAlmostEqual(x[3].for_width, 15.0)
self.assertAlmostEqual(x[4].for_width, 15.0)
self.assertAlmostEqual(x[5].for_width, 15.0)
self.assertAlmostEqual(x[6].for_width, 15.0)
self.assertAlmostEqual(x[7].for_width, 15.0)
# test multiple spacecraft list, test first and last element of the resultant list
x = orbitpy.util.helper_extract_spacecraft_params([o1, o2, o3])
self.assertEqual(len(x), 10)
self.assertEqual(x[0].sc_id, 'sp1')
self.assertEqual(x[0].instru_id,'bs1')
self.assertEqual(x[0].mode_id, '0')
self.assertAlmostEqual(x[0].sma, 6878.136999999998)
self.assertAlmostEqual(x[0].fov_height, 5.0)
self.assertAlmostEqual(x[0]. fov_width, 5.0)
self.assertAlmostEqual(x[0].for_height, 15.0)
self.assertAlmostEqual(x[0].for_width, 15.0)
self.assertEqual(x[1].sc_id, 12)
self.assertIsNotNone(x[2].sc_id)
self.assertEqual(x[3].sc_id, x[2].sc_id)
self.assertEqual(x[4].sc_id, x[2].sc_id)
self.assertEqual(x[5].sc_id, x[2].sc_id)
self.assertEqual(x[6].sc_id, x[2].sc_id)
self.assertEqual(x[7].sc_id, x[2].sc_id)
self.assertEqual(x[8].sc_id, x[2].sc_id)
self.assertEqual(x[9].sc_id, x[2].sc_id)
self.assertEqual(x[9].instru_id,'bs3')
self.assertIsNotNone(x[9].mode_id)
self.assertAlmostEqual(x[9].sma, 6878.136999999998)
self.assertAlmostEqual(x[9].fov_height, 5.0)
self.assertAlmostEqual(x[9]. fov_width, 10.0)
self.assertAlmostEqual(x[9].for_height, 5.0)
self.assertAlmostEqual(x[9].for_width, 15.0)
def test_extract_auxillary_info_from_state_file(self): # TODO
pass
class TestGroundStation(unittest.TestCase): #TODO
pass
class TestUtilFunctions(unittest.TestCase):
def test_dictionary_list_to_object_list(self): #TODO
pass
def test_object_list_to_dictionary_list(self): #TODO
pass
def test_initialize_object_list(self): #TODO
pass
def test_add_to_list(self): #TODO
pass
class TestOutputInfoUtility(unittest.TestCase): #TODO
pass
| 2.640625
| 3
|
tests/spec2k/extract_timings.py
|
kapkic/native_client
| 1
|
12777939
|
#!/usr/bin/python2
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script produces csv data from multiple benchmarking runs with the
# spec2k harness.
#
# A typical usage would be
#
# export SPEC_RUN_REPETITIONS=3
# ./run_all.sh RunTimedBenchmarks SetupPnaclX8664Opt ref > ../timings.setting1
# [change the compiler settings]
# ./run_all.sh RunTimedBenchmarks SetupPnaclX8664Opt ref > ../timings.setting2
#
# tests/spec2k/extract_timings.py time.inline time.noinline time.lowinline
#
# which produces output like:
# name , inline , noinline , lowinline
# ammp , 250.47 , 263.83 , 262.20
# art , 222.12 , 219.36 , 259.28
# bzip2 , 179.05 , 194.05 , missing
# crafty , 60.24 , 73.33 , missing
# ...
#
# Alternatively, if your data already has the form:
#
# <bechmark> <setting> <value>
#
# You can run the tool like so:
# tests/spec2k/extract_timings.py < <data-file>
import sys
# The name the individual settings derived from the filename in the order
# they were given on the command-line
SETTINGS = []
# dictionary of dictionaries accessed like so:
# BENCHMARKS['benchmark']['setting']
BENCHMARKS = {}
def AddDataPoint(benchmark, setting, v):
if setting not in SETTINGS:
# TODO: linear search is slightly inefficient
SETTINGS.append(setting)
values = BENCHMARKS.get(benchmark, {})
values[setting] = v
BENCHMARKS[benchmark] = values
def ExtractResults(name, inp):
for line in inp:
if not line.startswith('RESULT'):
continue
tokens = line.split()
# NOTE: the line we care about look like this:
# 'RESULT runtime_equake: pnacl.opt.x8664= [107.36,116.28,116.4] secs'
assert tokens[0] == 'RESULT'
assert tokens[1].endswith(':')
assert tokens[2].endswith('=')
assert tokens[3].startswith('[')
assert tokens[3].endswith(']')
benchmark = tokens[1][:-1].split('_')[-1]
data = tokens[3][1:][:-1].split(',')
data = [float(d) for d in data]
m = min(data)
AddDataPoint(benchmark, name, m)
# Note: we are intentionally not using the csv module
# as it does not provide nicely formatted output
def DumpRow(row):
sys.stdout.write('%-20s' % row[0])
for val in row[1:]:
if type(val) == str:
sys.stdout.write(', %10s' % val)
else:
sys.stdout.write(', %10.2f' % val)
sys.stdout.write('\n')
def DumpCsv():
row = ['name'] + SETTINGS
DumpRow(row)
for k in sorted(BENCHMARKS.keys()):
row = [k]
values = BENCHMARKS[k]
for s in SETTINGS:
if s in values:
row.append(values[s])
else:
row.append('missing')
DumpRow(row)
if len(sys.argv) > 1:
for f in sys.argv[1:]:
setting = f.split('.')[-1]
fin = open(f)
ExtractResults(setting, fin)
fin.close()
else:
for line in sys.stdin:
tokens = line.split()
if not tokens: continue
assert len(tokens) == 3
AddDataPoint(tokens[0], tokens[1], float(tokens[2]))
DumpCsv()
| 2.359375
| 2
|
chumpy/np_tensordot.py
|
Syze/chumpy
| 5
|
12777940
|
<reponame>Syze/chumpy
# Up to numpy 1.13, the numpy implementation of tensordot could be
# reinterpreted using chumpy. With numpy 1.14 the implementation started using
# ufunc.multiply.reduce which can't be understood by chumpy. This is the
# chumpy-compatible implementation of tensodrot from numpy 1.13.3.
#
# i.e.
#
# import inspect
# with open('np_tensordot.py', 'w') as f:
# f.write(''.join(inspect.getsourcelines(np.tensordot)[0]))
"""
Copyright (c) 2005-2017, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the NumPy Developers nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an array_like object containing two array_like
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : array_like, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]], dtype=bool)
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([[a, b],
[c, d]], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, 1)
array([[[acc, bdd],
[aaacccc, bbbdddd]],
[[aaaaacccccc, bbbbbdddddd],
[aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[[a, b],
[c, d]],
...
>>> np.tensordot(a, A, (0, 1))
array([[[abbbbb, cddddd],
[aabbbbbb, ccdddddd]],
[[aaabbbbbbb, cccddddddd],
[aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[[abb, cdd],
[aaabbbb, cccdddd]],
[[aaaaabbbbbb, cccccdddddd],
[aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
"""
try:
iter(axes)
except:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
| 2.171875
| 2
|
modules/pyxmpp2/expdict.py
|
gthreepwood/yats
| 0
|
12777941
|
<filename>modules/pyxmpp2/expdict.py
#
# (C) Copyright 2003-2011 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Dictionary with item expiration."""
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
import time
import threading
import logging
logger = logging.getLogger("pyxmpp2.expdict")
_NO_DEFAULT = object()
class ExpiringDictionary(dict):
"""An extension to standard Python dictionary objects which implements item
expiration.
Each item in ExpiringDictionary has its expiration time assigned, after
which the item is removed from the mapping.
:Ivariables:
- `_timeouts`: a dictionary with timeout values and timeout callback for
stored objects.
- `_default_timeout`: the default timeout value (in seconds from now).
- `_lock`: access synchronization lock.
:Types:
- `_timeouts`: `dict`
- `_default_timeout`: `float`
- `_lock`: :std:`threading.RLock`"""
__slots__ = ['_timeouts', '_default_timeout', '_lock']
def __init__(self, default_timeout = 300.0):
"""Initialize an `ExpiringDictionary` object.
:Parameters:
- `default_timeout`: default timeout value (in seconds) for stored
objects.
:Types:
- `default_timeout`: `float`
"""
dict.__init__(self)
self._timeouts = {}
self._default_timeout = default_timeout
self._lock = threading.RLock()
def __delitem__(self, key):
with self._lock:
logger.debug("expdict.__delitem__({0!r})".format(key))
del self._timeouts[key]
return dict.__delitem__(self, key)
def __getitem__(self, key):
with self._lock:
logger.debug("expdict.__getitem__({0!r})".format(key))
self._expire_item(key)
return dict.__getitem__(self, key)
def pop(self, key, default = _NO_DEFAULT):
with self._lock:
self._expire_item(key)
del self._timeouts[key]
if default is not _NO_DEFAULT:
return dict.pop(self, key, default)
else:
return dict.pop(self, key)
def __setitem__(self, key, value):
logger.debug("expdict.__setitem__({0!r}, {1!r})".format(key, value))
return self.set_item(key, value)
def set_item(self, key, value, timeout = None, timeout_callback = None):
"""Set item of the dictionary.
:Parameters:
- `key`: the key.
- `value`: the object to store.
- `timeout`: timeout value for the object (in seconds from now).
- `timeout_callback`: function to be called when the item expires.
The callback should accept none, one (the key) or two (the key
and the value) arguments.
:Types:
- `key`: any hashable value
- `value`: any python object
- `timeout`: `int`
- `timeout_callback`: callable
"""
with self._lock:
logger.debug("expdict.__setitem__({0!r}, {1!r}, {2!r}, {3!r})"
.format(key, value, timeout, timeout_callback))
if not timeout:
timeout = self._default_timeout
self._timeouts[key] = (time.time() + timeout, timeout_callback)
return dict.__setitem__(self, key, value)
def expire(self):
"""Do the expiration of dictionary items.
Remove items that expired by now from the dictionary.
:Return: time, in seconds, when the next item expires or `None`
:returntype: `float`
"""
with self._lock:
logger.debug("expdict.expire. timeouts: {0!r}"
.format(self._timeouts))
next_timeout = None
for k in self._timeouts.keys():
ret = self._expire_item(k)
if ret is not None:
if next_timeout is None:
next_timeout = ret
else:
next_timeout = min(next_timeout, ret)
return next_timeout
def clear(self):
with self._lock:
self._timeouts.clear()
dict.clear(self)
def _expire_item(self, key):
"""Do the expiration of a dictionary item.
Remove the item if it has expired by now.
:Parameters:
- `key`: key to the object.
:Types:
- `key`: any hashable value
"""
(timeout, callback) = self._timeouts[key]
now = time.time()
if timeout <= now:
item = dict.pop(self, key)
del self._timeouts[key]
if callback:
try:
callback(key, item)
except TypeError:
try:
callback(key)
except TypeError:
callback()
return None
else:
return timeout - now
# vi: sts=4 et sw=4
| 2.125
| 2
|
semi_supervised_learning/bayesian_gan_resgmcmc.py
|
gaoliyao/Replica_Exchange_Stochastic_Gradient_MCMC
| 21
|
12777942
|
import os
import sys
import argparse
import json
import time
import numpy as np
from math import ceil
from PIL import Image
import tensorflow as tf
from tensorflow.contrib import slim
from bgan_util import AttributeDict
from bgan_util import print_images, MnistDataset, CelebDataset, Cifar10, Cifar100, SVHN, ImageNet
from bgan_models import BDCGAN
import time
def get_session():
if tf.get_default_session() is None:
print "Creating new session"
tf.reset_default_graph()
_SESSION = tf.InteractiveSession()
else:
print "Using old session"
_SESSION = tf.get_default_session()
return _SESSION
def get_gan_labels(lbls):
# add class 0 which is the "fake" class
if lbls is not None:
labels = np.zeros((lbls.shape[0], lbls.shape[1] + 1))
labels[:, 1:] = lbls
else:
labels = None
return labels
def get_supervised_batches(dataset, size, batch_size, class_ids):
def batchify_with_size(sampled_imgs, sampled_labels, size):
rand_idx = np.random.choice(range(sampled_imgs.shape[0]), size, replace=False)
imgs_ = sampled_imgs[rand_idx]
lbls_ = sampled_labels[rand_idx]
rand_idx = np.random.choice(range(imgs_.shape[0]), batch_size, replace=True)
imgs_ = imgs_[rand_idx]
lbls_ = lbls_[rand_idx]
return imgs_, lbls_
labeled_image_batches, lblss = [], []
num_passes = int(ceil(float(size) / batch_size))
for _ in xrange(num_passes):
for class_id in class_ids:
labeled_image_batch, lbls = dataset.next_batch(int(ceil(float(batch_size)/len(class_ids))),
class_id=class_id)
labeled_image_batches.append(labeled_image_batch)
lblss.append(lbls)
labeled_image_batches = np.concatenate(labeled_image_batches)
lblss = np.concatenate(lblss)
if size < batch_size:
labeled_image_batches, lblss = batchify_with_size(labeled_image_batches, lblss, size)
shuffle_idx = np.arange(lblss.shape[0]); np.random.shuffle(shuffle_idx)
labeled_image_batches = labeled_image_batches[shuffle_idx]
lblss = lblss[shuffle_idx]
while True:
i = np.random.randint(max(1, size/batch_size))
yield (labeled_image_batches[i*batch_size:(i+1)*batch_size],
lblss[i*batch_size:(i+1)*batch_size])
def get_test_batches(dataset, batch_size):
try:
test_imgs, test_lbls = dataset.test_imgs, dataset.test_labels
except:
test_imgs, test_lbls = dataset.get_test_set()
all_test_img_batches, all_test_lbls = [], []
test_size = test_imgs.shape[0]
i = 0
while (i+1)*batch_size <= test_size:
all_test_img_batches.append(test_imgs[i*batch_size:(i+1)*batch_size])
all_test_lbls.append(test_lbls[i*batch_size:(i+1)*batch_size])
i += 1
return all_test_img_batches, all_test_lbls
def get_test_accuracy(session, dcgan, all_test_img_batches, all_test_lbls):
# only need this function because bdcgan has a fixed batch size for *everything*
# test_size is in number of batches
all_d_logits, all_d1_logits, all_s_logits = [], [], []
for test_image_batch, test_lbls in zip(all_test_img_batches, all_test_lbls):
test_d_logits, test_d1_logits, test_s_logits = session.run([dcgan.test_D_logits, dcgan.test_D1_logits, dcgan.test_S_logits], feed_dict={dcgan.test_inputs: test_image_batch})
all_d_logits.append(test_d_logits)
all_d1_logits.append(test_d1_logits)
all_s_logits.append(test_s_logits)
test_d_logits = np.concatenate(all_d_logits)
test_d1_logits = np.concatenate(all_d1_logits)
test_s_logits = np.concatenate(all_s_logits)
test_lbls = np.concatenate(all_test_lbls)
not_fake = np.where(np.argmax(test_d_logits, 1) > 0)[0]
not_fake1 = np.where(np.argmax(test_d1_logits, 1) > 0)[0]
if len(not_fake) < 1000:
print "WARNING: not enough samples for SS results"
return -1, -1, -1
if len(not_fake1) < 1000:
print "WARNING: not enough samples for SS results"
return -1, -1, -1
semi_sup_acc = (100. * np.sum(np.argmax(test_d_logits[not_fake], 1) == np.argmax(test_lbls[not_fake], 1) + 1))\
/ len(not_fake)
semi_sup1_acc = (100. * np.sum(np.argmax(test_d1_logits[not_fake1], 1) == np.argmax(test_lbls[not_fake1], 1) + 1))\
/ len(not_fake1)
sup_acc = (100. * np.sum(np.argmax(test_s_logits, 1) == np.argmax(test_lbls, 1)))\
/ test_lbls.shape[0]
return sup_acc, semi_sup_acc, semi_sup1_acc
def get_test_variance(session, dcgan, dataset, batch_size, z_dim, fileNameV):
# only need this function because bdcgan has a fixed batch size for *everything*
# test_size is in number of batches
d_losses, d1_losses = [], []
if hasattr(dataset, "supervised_batches"):
# implement own data feeder if data doesnt fit in memory
supervised_batches = dataset.supervised_batches(args.N, batch_size)
else:
supervised_batches = get_supervised_batches(dataset, args.N, batch_size, range(dataset.num_classes))
ENOUGH_INTR_NUM = 1000000
for i in range(ENOUGH_INTR_NUM):
labeled_image_batch, labels = supervised_batches.next()
batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
image_batch, _ = dataset.next_batch(batch_size, class_id=None)
d_loss, d1_loss = session.run([dcgan.d_loss_semi, dcgan.d1_loss_semi], feed_dict={dcgan.labeled_inputs: labeled_image_batch, dcgan.labels: get_gan_labels(labels), dcgan.inputs: image_batch, dcgan.z: batch_z})
d_losses.append(d_loss)
d1_losses.append(d1_loss)
if len(d_losses) > args.repeats:
break
with open(fileNameV, 'a') as f_variance:
f_variance.write("%.2f %.2f \n" % (float(np.std(d_losses)), float(np.std(d1_losses))))
return float(np.std(d_losses)), float(np.std(d1_losses))
def b_dcgan(dataset, dataset_get_variance, args):
fileNameV = "variance"
fileNameAcc = "accuracy"
if args.N == 1000:
fileNameV += "1000"
fileNameAcc += "1000"
if args.N == 2000:
fileNameV += "2000"
fileNameAcc += "2000"
if args.N == 4000:
fileNameV += "4000"
fileNameAcc += "4000"
if args.N == 8000:
fileNameV += "8000"
fileNameAcc += "8000"
fileNameV += args.fileName
fileNameAcc += args.fileName
fileNameV += ".txt"
fileNameAcc += ".txt"
f_variance = open(fileNameV, "wb")
f_variance.write("Low, High Variance here: \n")
f_accuracy = open(fileNameAcc, "wb")
f_variance.close()
f_accuracy.close()
corrections = [[], []]
mv_corrections = []
mv_corrections.append(sys.float_info.max)
mv_corrections.append(sys.float_info.max)
z_dim = args.z_dim
x_dim = dataset.x_dim
batch_size = args.batch_size
dataset_size = dataset.dataset_size
session = get_session()
if args.random_seed is not None:
tf.set_random_seed(args.random_seed)
# due to how much the TF code sucks all functions take fixed batch_size at all times
dcgan = BDCGAN(x_dim, z_dim, dataset_size, batch_size=batch_size, J=args.J, M=args.M,
lr=args.lr, optimizer=args.optimizer, gen_observed=args.gen_observed,
num_classes=dataset.num_classes if args.semi_supervised else 1)
dcgan.set_parallel_chain_params(args.invT, args.Tgap, args.LRgap, args.Egap, args.anneal, args.lr_anneal)
print "Starting session"
session.run(tf.global_variables_initializer())
print "Starting training loop"
num_train_iter = args.train_iter
if hasattr(dataset, "supervised_batches"):
# implement own data feeder if data doesnt fit in memory
supervised_batches = dataset.supervised_batches(args.N, batch_size)
else:
supervised_batches = get_supervised_batches(dataset, args.N, batch_size, range(dataset.num_classes))
test_image_batches, test_label_batches = get_test_batches(dataset, batch_size)
optimizer_dict = {"semi_d": dcgan.d_optim_semi_adam,
"semi_d1": dcgan.d1_optim_semi_adam,
"sup_d": dcgan.s_optim_adam,
"gen": dcgan.g_optims_adam,
"gen1": dcgan.g1_optims_adam}
base_learning_rate = args.lr # for now we use same learning rate for Ds and Gs
base_learning_rate1 = args.lr / args.LRgap # for now we use same learning rate for Ds and Gs
lr_decay_rate = 3.0 # args.lr_decay
zero_lr = 0.0
swap_count = 0
for train_iter in range(num_train_iter):
if train_iter == 5000:
print "Switching to user-specified optimizer"
optimizer_dict = {"semi_d": dcgan.d_optim_semi,
"semi_d1": dcgan.d1_optim_semi,
"sup_d": dcgan.s_optim,
"gen": dcgan.g_optims,
"gen1": dcgan.g1_optims}
learning_rate = base_learning_rate * np.exp(-lr_decay_rate *
min(1.0, (train_iter*batch_size)/float(dataset_size)))
learning_rate1 = base_learning_rate1 * np.exp(-lr_decay_rate *
min(1.0, (train_iter*batch_size)/float(dataset_size)))
batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
image_batch, _ = dataset.next_batch(batch_size, class_id=None)
if args.semi_supervised:
labeled_image_batch, labels = supervised_batches.next()
_, d_loss = session.run([optimizer_dict["semi_d"], dcgan.d_loss_semi], feed_dict={dcgan.labeled_inputs: labeled_image_batch,
dcgan.labels: get_gan_labels(labels),
dcgan.inputs: image_batch,
dcgan.z: batch_z,
dcgan.d_semi_learning_rate: learning_rate})
_, d1_loss = session.run([optimizer_dict["semi_d1"], dcgan.d1_loss_semi], feed_dict={dcgan.labeled_inputs: labeled_image_batch,
dcgan.labels: get_gan_labels(labels),
dcgan.inputs: image_batch,
dcgan.z: batch_z,
dcgan.d1_semi_learning_rate: learning_rate1})
_, s_loss = session.run([optimizer_dict["sup_d"], dcgan.s_loss], feed_dict={dcgan.inputs: labeled_image_batch,
dcgan.lbls: labels})
bias = (mv_corrections[0] + mv_corrections[1]) * args.bias_multi
if np.log(np.random.uniform(0, 1)) < (d1_loss - d_loss + bias) * (args.invT - args.invT*args.Tgap) and args.baseline == 0:
swap_count += 1
print "Copy Iter %i, Copy Count %i" % (train_iter, swap_count)
print "Disc1 (high temperature) loss = %.2f, Gen loss = %s" % (d1_loss, ", ".join(["%.2f" % gl for gl in g1_losses]))
print "Disc loss = %.2f, Gen loss = %s" % (d_loss, ", ".join(["%.2f" % g for g in g_losses]))
with open(fileNameAcc, 'a') as f_accuracy:
f_accuracy.write("Copy Iter %i, Copy Count %i \n" % (train_iter, swap_count))
f_accuracy.write("Disc1 (high temperature) loss = %.2f, Gen loss = %s \n" % (d1_loss, ", ".join(["%.2f" % gl for gl in g1_losses])))
f_accuracy.write("Disc loss = %.2f, Gen loss = %s \n" % (d_loss, ", ".join(["%.2f" % g for g in g_losses])))
print("Copy status of the second discriminator to the first one")
s_acc, ss_acc, ss1_acc= get_test_accuracy(session, dcgan, test_image_batches, test_label_batches)
print "Semi-sup classification acc before copy: %.2f" % (ss_acc)
with open(fileNameAcc, 'a') as f_accuracy:
f_accuracy.write("Copy status of the second discriminator to the first one \n")
f_accuracy.write("Semi-sup classification acc before copy: %.2f \n" % (ss_acc))
dcgan.copy_discriminator(session)
# get test set performance on real labels only for both GAN-based classifier and standard one
s_acc, ss_acc, ss1_acc= get_test_accuracy(session, dcgan, test_image_batches, test_label_batches)
print "Semi-sup 1 (high temperature) classification acc before copy: %.2f" % (ss1_acc)
print "Semi-sup classification acc after copy: %.2f" % (ss_acc)
with open(fileNameAcc, 'a') as f_accuracy:
f_accuracy.write("Semi-sup 1 (high temperature) classification acc before copy: %.2f \n" % (ss1_acc))
f_accuracy.write("Semi-sup classification acc after copy: %.2f \n" % (ss_acc))
g_losses = []
g1_losses = []
for gi in xrange(dcgan.num_gen):
# compute g_sample loss
batch_z = np.random.uniform(-1, 1, [batch_size, z_dim])
for m in range(dcgan.num_mcmc):
_, g_loss = session.run([optimizer_dict["gen"][gi*dcgan.num_mcmc+m], dcgan.generation["g_losses"][gi*dcgan.num_mcmc+m]],
feed_dict={dcgan.z: batch_z, dcgan.g_learning_rate: learning_rate})
_, g1_loss = session.run([optimizer_dict["gen1"][gi*dcgan.num_mcmc+m], dcgan.generation["g1_losses"][gi*dcgan.num_mcmc+m]],
feed_dict={dcgan.z: batch_z, dcgan.g_learning_rate: learning_rate1})
g_losses.append(g_loss)
g1_losses.append(g1_loss)
if train_iter > 0 and train_iter % args.n_save == 0:
print "Iter %i" % train_iter
print "Disc1 (high temperature) loss = %.2f, Gen loss = %s" % (d1_loss, ", ".join(["%.2f" % gl for gl in g1_losses]))
# print "Disc1 (high temperature) loss = %.2f" % (d1_loss)
print "Disc loss = %.2f, Gen loss = %s" % (d_loss, ", ".join(["%.2f" % g for g in g_losses]))
with open(fileNameAcc, 'a') as f_accuracy:
f_accuracy.write("Iter %i \n" % train_iter)
f_accuracy.write("Disc1 (high temperature) loss = %.2f, Gen loss = %s \n" % (d1_loss, ", ".join(["%.2f" % gl for gl in g1_losses])))
f_accuracy.write("Disc loss = %.2f, Gen loss = %s \n" % (d_loss, ", ".join(["%.2f" % g for g in g_losses])))
# collect samples
if args.save_samples: # saving samples
all_sampled_imgs = []
for gi in xrange(dcgan.num_gen):
_imgs, _ps = [], []
for _ in range(10):
sample_z = np.random.uniform(-1, 1, size=(batch_size, z_dim))
sampled_imgs, sampled_probs = session.run([dcgan.generation["gen_samplers"][gi*dcgan.num_mcmc],
dcgan.generation["d_probs"][gi*dcgan.num_mcmc]],
feed_dict={dcgan.z: sample_z})
_imgs.append(sampled_imgs)
_ps.append(sampled_probs)
sampled_imgs = np.concatenate(_imgs); sampled_probs = np.concatenate(_ps)
all_sampled_imgs.append([sampled_imgs, sampled_probs[:, 1:].sum(1)])
s_acc, ss_acc, ss1_acc= get_test_accuracy(session, dcgan, test_image_batches, test_label_batches)
d_std, d1_std = get_test_variance(session, dcgan, dataset_get_variance, batch_size, z_dim, fileNameV)
print "Semi-sup 1 (high temperature) classification before correction acc: %.2f" % (ss1_acc)
print "Semi-sup classification acc: %.2f" % (ss_acc)
with open(fileNameAcc, 'a') as f_accuracy:
f_accuracy.write("Semi-sup 1 (high temperature) classification before correction acc: %.2f \n" % (ss1_acc))
f_accuracy.write("Semi-sup classification acc: %.2f \n" % (ss_acc))
f_accuracy.write("============================================ \n")
# moving window average
corrections[0].append(0.5 * d_std**2)
corrections[1].append(0.5 * d1_std**2)
# exponential smoothing average
if mv_corrections[0] == sys.float_info.max:
mv_corrections[0] = 0.5 * d_std**2
else:
mv_corrections[0] = (1 - args.alpha) * mv_corrections[0] + args.alpha * 0.5 * d_std ** 2
if mv_corrections[1] == sys.float_info.max:
mv_corrections[1] = 0.5 * d1_std**2
else:
mv_corrections[1] = (1 - args.alpha) * mv_corrections[1] + args.alpha * 0.5 * d1_std ** 2
# print "Sup classification acc: %.2f" % (s_acc)
print "Semi-sup 1 (high temperature) classification acc: %.2f" % (ss1_acc)
print "Semi-sup classification acc: %.2f" % (ss_acc)
with open(fileNameAcc, 'a') as f_accuracy:
f_accuracy.write("Semi-sup 1 (high temperature) classification acc: %.2f \n" % (ss1_acc))
f_accuracy.write("Semi-sup classification acc: %.2f \n" % (ss_acc))
f_accuracy.write("============================================ \n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Script to run Bayesian GAN experiments')
parser.add_argument('--out_dir',
type=str,
required=True,
help="location of outputs (root location, which exists)")
parser.add_argument('--fileName',
type=str,
required=True,
help="location of outputs (root location, which exists)")
parser.add_argument('--n_save',
type=int,
default=100,
help="every n_save iteration save samples and weights")
parser.add_argument('--z_dim',
type=int,
default=100,
help='dim of z for generator')
parser.add_argument('--gen_observed',
type=int,
default=1000,
help='number of data "observed" by generator')
parser.add_argument('--data_path',
type=str,
default='./datasets/',
help='path to where the datasets live')
parser.add_argument('--dataset',
type=str,
default="mnist",
help='datasate name mnist etc.')
parser.add_argument('--batch_size',
type=int,
default=64,
help="minibatch size")
parser.add_argument('--prior_std',
type=float,
default=1.0,
help="NN weight prior std.")
parser.add_argument('--numz',
type=int,
dest="J",
default=1,
help="number of samples of z to integrate it out")
parser.add_argument('--num_mcmc',
type=int,
dest="M",
default=1,
help="number of MCMC NN weight samples per z")
parser.add_argument('--N',
type=int,
default=128,
help="number of supervised data samples")
parser.add_argument('--semi_supervised',
action="store_true",
help="do semi-supervised learning")
parser.add_argument('--train_iter',
type=int,
default=50000,
help="number of training iterations")
parser.add_argument('--wasserstein',
action="store_true",
help="wasserstein GAN")
parser.add_argument('--ml_ensemble',
type=int,
default=0,
help="if specified, an ensemble of --ml_ensemble ML DCGANs is trained")
parser.add_argument('--save_samples',
action="store_true",
help="wether to save generated samples")
parser.add_argument('--save_weights',
action="store_true",
help="wether to save weights")
parser.add_argument('--random_seed',
type=int,
default=None,
help="random seed")
parser.add_argument('--lr',
type=float,
default=0.003,
help="learning rate")
parser.add_argument('--lr_decay',
type=float,
default=1.003,
help="learning rate")
parser.add_argument('--optimizer',
type=str,
default="sgd",
help="optimizer --- 'adam' or 'sgd'")
parser.add_argument('--gpu',
type=str,
default="0",
help="GPU number")
# Parallel chain hyperparameters
parser.add_argument('-chains',
default=2,
type=int,
help='Total number of chains')
parser.add_argument('-types',
default='greedy',
type=str,
help='swap type: greedy (low T copy high T), swap (low high T swap)')
parser.add_argument('-invT',
default=1,
type=float,
help='Inverse temperature for high temperature chain')
parser.add_argument('-Tgap',
default=1,
type=float,
help='Temperature gap between chains')
parser.add_argument('-LRgap',
default=1,
type=float,
help='Learning rate gap between chains')
parser.add_argument('-Egap',
default=1.025,
type=float,
help='Energy gap between partitions')
parser.add_argument('-anneal',
default=1.0,
type=float,
help='simulated annealing factor')
parser.add_argument('-lr_anneal',
default=0.992,
type=float,
help='lr simulated annealing factor')
parser.add_argument('-bias_multi',
default=5.0,
type=float,
help='multiplier for bias')
parser.add_argument('-alpha',
default=0.1,
type=float,
help='Constant for exponential smoothness')
parser.add_argument('-repeats',
default=60,
type=float,
help='Number of samples to estimate sample std')
parser.add_argument('-baseline',
default=0,
type=int,
help='Baseline (1) and Two Chain (0)')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
if args.random_seed is not None:
#np.random.seed(args.random_seed)
np.random.seed(2222)
tf.set_random_seed(args.random_seed)
if not os.path.exists(args.out_dir):
print "Creating %s" % args.out_dir
os.makedirs(args.out_dir)
args.out_dir = os.path.join(args.out_dir, "bgan_%s_%i" % (args.dataset, int(time.time())))
os.makedirs(args.out_dir)
import pprint
with open(os.path.join(args.out_dir, "hypers.txt"), "w") as hf:
hf.write("Hyper settings:\n")
hf.write("%s\n" % (pprint.pformat(args.__dict__)))
celeb_path = os.path.join(args.data_path, "celebA")
cifar_path = os.path.join(args.data_path, "cifar10")
cifar100_path = os.path.join(args.data_path, "cifar100")
svhn_path = os.path.join(args.data_path, "svhn")
mnist_path = os.path.join(args.data_path, "mnist") # can leave empty, data will self-populate
imagenet_path = os.path.join(args.data_path, args.dataset)
if args.dataset == "mnist":
dataset = MnistDataset(mnist_path)
elif args.dataset == "celeb":
dataset = CelebDataset(celeb_path)
elif args.dataset == "cifar100":
dataset = Cifar100(cifar100_path)
dataset_get_variance = Cifar100(cifar100_path)
elif args.dataset == "cifar":
dataset = Cifar10(cifar_path)
dataset_get_variance = Cifar10(cifar_path)
elif args.dataset == "svhn":
dataset = SVHN(svhn_path)
dataset_get_variance = SVHN(svhn_path)
elif "imagenet" in args.dataset:
num_classes = int(args.dataset.split("_")[-1])
dataset = ImageNet(imagenet_path, num_classes)
else:
raise RuntimeError("invalid dataset %s" % args.dataset)
### main call
b_dcgan(dataset, dataset_get_variance, args)
| 2.328125
| 2
|
jssmanifests/models.py
|
aysiu/manana
| 9
|
12777943
|
<filename>jssmanifests/models.py
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User, Group
from reports.models import BusinessUnit
from manifests.models import Manifest
from datetime import datetime, timedelta
from jssmanifests.jsshelper import fetch_account_sites
from guardian.shortcuts import assign_perm
import jss
from string import atoi
# Create your models here.
try:
BUSINESS_UNITS_ENABLED = settings.BUSINESS_UNITS_ENABLED
except:
BUSINESS_UNITS_ENABLED = False
try:
JSS_MAIN_SITE_NAME = settings.JSS_MAIN_SITE_NAME
except AttributeError:
JSS_MAIN_SITE_NAME = 'Full Site Access'
## Helper functions
# This should live in jsshelpers, but this brings with it a cicular dependancy
# So ...
def sync_sites():
# Sync time
# This is a little icky
if JSSSite.objects.count() > 0:
sync_obj = JSSSite.objects.earliest('last_refresh')
try:
site_cache = timedelta(seconds=settings.JSS_SITE_CACHE_TIME)
except AttributeError:
site_cache = timedelta(seconds=300)
# Check freshness
now = datetime.now()
delta = now - sync_obj.last_refresh
if delta < site_cache:
return
jss_connection = jss.JSS(user=settings.JSS_READONLY_USER,
password=<PASSWORD>.<PASSWORD>,
url=settings.JSS_URL,
ssl_verify=settings.JSS_VERIFY_CERT)
jss_sites = jss_connection.Site()
jss_site_dict = {}
for jss_site in jss_sites:
jss_site_dict[ jss_site['id'] ] = jss_site
local_sites = JSSSite.objects.filter(jsssiteid__exact=jss_site['id'])
if local_sites.count() > 1:
# If there is more than one matching id, throw *all*
# away, and start again
local_sites.delete() ## XXX this needs testing
if local_sites.count() == 1:
site = local_sites[0]
if site.jsssitename != jss_site['name']:
site.jsssitename = jss_site['name']
site.save()
site.businessunit.name = site.jsssitename
site.businessunit.save()
else:
group, created = Group.objects.get_or_create(
name = 'JSS Site Access: %s' % jss_site['name'] )
if BUSINESS_UNITS_ENABLED:
bu, created = BusinessUnit.objects.get_or_create( name = jss_site['name'] )
site = JSSSite(jsssiteid = jss_site['id'],
jsssitename = jss_site['name'],
businessunit = bu,
group = group )
else:
site = JSSSite(jsssiteid = jss_site['id'],
jsssitename = jss_site['name'],
group = group )
site.save()
assign_perm('can_view_jsssite', group, site)
seen_full_site = False
for local_site in JSSSite.objects.all():
# Try not to remove the full site, hey ?
if local_site.jsssiteid < 0 and seen_full_site:
raise ValueError('Can only have one full site (i.e. with a negative jsssiteid)')
elif local_site.jsssiteid < 0:
seen_full_site = True
local_site.jsssitename = settings.JSS_MAIN_SITE_NAME
if BUSINESS_UNITS_ENABLED:
bu, created = BusinessUnit.objects.get_or_create(
name = JSS_MAIN_SITE_NAME )
local_site.businessunit = bu
local_site.save()
continue
local_site_id = '%d' % local_site.jsssiteid
if not jss_site_dict.has_key( local_site_id ):
# Should we also delete the business unit and group ?
# (Not sure; currently not, as this seems safest, but it may
# not be what people want/expect; perhaps this should be an
# option in the future)
local_site.delete()
if not seen_full_site:
group, created = Group.objects.get_or_create(
name = 'JSS Site Access: %s' % settings.JSS_MAIN_SITE_NAME )
if BUSINESS_UNITS_ENABLED:
bu, created = BusinessUnit.objects.get_or_create(
name = settings.JSS_MAIN_SITE_NAME )
site = JSSSite(jsssiteid = -1,
jsssitename = settings.JSS_MAIN_SITE_NAME,
businessunit = bu,
group = group )
else:
site = JSSSite(jsssiteid = -1,
jsssitename = settings.JSS_MAIN_SITE_NAME,
group = group )
site.save()
assign_perm('can_view_jsssite', group, site)
return
class JSSSite(models.Model):
jsssiteid = models.IntegerField('JSS Site ID')
jsssitename = models.CharField('Type Label', max_length=1024)
# Um ... this might be currently pointless
last_refresh = models.DateTimeField(auto_now=True)
# /Um
#
# Allow business units to be used, if enabled (and cope if not)
#
businessunit = models.ForeignKey(BusinessUnit, null=True,
blank=True, default=None,
verbose_name = 'Business Unit' )
# We are using groups to model permissions, so make sure each
# site has a group
group = models.OneToOneField(Group, verbose_name = 'Related Group')
class Meta:
permissions = (
('can_view_jsssite', 'Can view JSS Site'),
('can_edit_jsssite', 'Can edit JSS Site'),
)
verbose_name = 'JSS Site'
verbose_name_plural = 'JSS Sites'
def __unicode__(self):
return '%s (JSS Site %d)' % (self.jsssitename, self.jsssiteid)
class JSSComputerAttributeType(models.Model):
label = models.CharField('Type Label', max_length=1024)
# This is is the XPath expression used to retrieve data from
# a JSS computer recordData retrieved via a Computer JSS record
# e.g //computer/general/site/name/text()
computer_xpath = models.CharField('XPath expression', max_length=1024)
# The following two variables are for use in order to give options
# to a user. The idea is that we will query the the JSS using the
# api_path to get an XML result. The api_xpath then lets us extract
# suitable values to present to the user to choose on a per-mapping basis
api_path = models.CharField('API URI (api path to this object)',
max_length=1024,
blank=True)
api_xpath = models.CharField(
'API Xpath (expression to extract data from the API object)',
max_length=1024,blank=True)
# For some JSS items, we need a key to pick out the correct value(s) to
# use - e.g. with an extension attribute we need to know the attribute
# name and then pull out the value for comparision. This flag says
# whether or not a key is required; the default is that a key is not
# required
xpath_needs_key = models.BooleanField(
'Key required for data extraction from xpath',
default=False)
class Meta:
verbose_name = 'JSS Computer Attribute Type'
verbose_name_plural = 'JSS Computer Attribute Types'
def __unicode__(self):
return self.label
def dump_debug_xml(self,manifest):
if not manifest.has_key('jss_attribute_types'):
manifest['jss_attribute_types']=[]
if not manifest.has_key('jss_attribute_type_sets'):
manifest['jss_attribute_type_sets']={}
choices = self.jsscomputerattributemapping_set.all()
manifest['jss_attribute_types'].append("%s = %s choices" %
(self.label, choices.count() ) )
manifest['jss_attribute_type_sets'][self.label]=[]
choices_list = manifest['jss_attribute_type_sets'][self.label]
for ch in choices:
choices_list.append('%s' % (ch,) )
def get_data(self, computer, key):
rv = computer.xpath(self.computer_xpath, key=key)
return rv
class JSSComputerAttributeMapping(models.Model):
MANIFEST_ELEMENTS = [
('c', 'Catalog'),
('m', 'Manifest'),
('p', 'Package'),
]
PACKAGE_ACTIONS = [
('managed_installs', 'Managed installs'),
('managed_uninstalls', 'Managed uninstalls'),
('managed_updates', 'Managed updates'),
('optional_installs', 'Optional installs'),
]
jss_computer_attribute_type = \
models.ForeignKey(JSSComputerAttributeType,
verbose_name = 'Computer Attribute Type')
jss_computer_attribute_type.short_description = 'Computer Attribute Type'
jss_computer_attribute_key = models.CharField('Attribute Key',
max_length=1024, blank=True)
jss_computer_attribute_value = models.CharField('Attribute Value',
max_length=1024)
manifest_element_type = models.CharField('Manifest Element',
choices=MANIFEST_ELEMENTS, max_length='1' )
catalog_name = models.CharField('Catalog Name', max_length=1024,
blank=True)
package_name = models.CharField('Package Name',
max_length=1024, blank=True)
package_action = models.CharField('Package Action',
choices=PACKAGE_ACTIONS, blank=True, max_length=256)
manifest_name = models.CharField('Manifest Name', max_length=1024,
blank=True)
remove_from_xml = models.BooleanField('Remove from Manifest')
priority = models.IntegerField('Priority', default = 0)
jsssite = models.ForeignKey(JSSSite, verbose_name= 'JSS Site')
# This is to let people temporarily enable and disable mappings
enabled = models.BooleanField('Mapping enabled', default=True)
class Meta:
verbose_name = 'JSS Computer Attribute Mapping'
verbose_name_plural = 'JSS Computer Attribute Mappings'
permissions = (
('can_view_jsscomputerattributemapping', 'Can view JSS Computer Attribute Mappings'),
)
def __unicode__(self):
if self.jss_computer_attribute_type.xpath_needs_key:
return '%s: If %s matches %s then %s %s (applies to site %s)' \
% ( self.jss_computer_attribute_type.label,
self.jss_computer_attribute_key,
self.jss_computer_attribute_value,
self.action(),
self.mapping_description(),
self.jsssite.jsssitename)
return '%s: If %s matches %s then %s %s (applies to site %s)' \
% ( self.jss_computer_attribute_type.label,
self.jss_computer_attribute_type.label,
self.jss_computer_attribute_value,
self.action(),
self.mapping_description(),
self.jsssite.jsssitename )
def action(self):
if self.remove_from_xml:
return 'remove'
return 'add'
def mapping_description(self):
if self.manifest_element_type == 'c':
type = 'catalog'
element = self.catalog_name
if self.manifest_element_type == 'm':
type = 'manifest'
element = self.manifest_name
if self.manifest_element_type == 'p':
type = 'package'
element = '%s to %s' % ( self.package_name, self.package_action)
return '%s: %s' % (type, element)
def is_in_site(self,site_id):
# Every mapping *should* have a site, but just to be sure:
try:
our_siteid = self.jsssite.jsssiteid
except AttributeError:
return False # If no site set, we do not belong to it
if our_siteid < 0:
# i.e. the full JSS Site (which has id -1 in this app)
return True
site_id = atoi(site_id) # Convert to int to be sure
return (site_id == our_siteid)
def computer_match(self,computer):
# Check that we are in the correct site
site_id = computer.findtext('general/site/id')
if not self.is_in_site(site_id):
return False
elements = self.jss_computer_attribute_type.get_data(computer,self.jss_computer_attribute_key);
for value in elements:
if value == self.jss_computer_attribute_value:
return True
return False
def apply_mapping(self,manifest):
if not self.enabled:
return
if self.manifest_element_type == 'c':
self.update_manifest_catalog(manifest)
if self.manifest_element_type == 'm':
self.update_manifest_manifest(manifest)
if self.manifest_element_type == 'p':
self.update_manifest_package(manifest)
return
# Question: should we always remove, then add (so that priorties have
# a real effect ?
def _update_list(self, list, name):
if self.remove_from_xml:
while list.count(name) > 0:
list.remove(name)
return
if list.count(name) <= 0:
list.append(name)
return
def update_manifest_catalog(self, manifest):
if not manifest.has_key('catalogs'):
manifest['catalogs'] = []
self._update_list(manifest['catalogs'], self.catalog_name)
return
def update_manifest_manifest(self, manifest):
if not manifest.has_key('included_manifests'):
manifest['included_manifests'] = []
self._update_list(manifest['included_manifests'], self.manifest_name)
return
def update_manifest_package(self, manifest):
if not manifest.has_key(self.package_action):
manifest[self.package_action] = []
self._update_list(manifest[self.package_action], self.package_name)
return
#
# A glue class for permissions modelling
#
class JSSUser(models.Model):
user = models.OneToOneField(User)
sites = models.ManyToManyField(JSSSite,blank=True,
verbose_name='JSS Site(s)')
# We assume that user.name == JSS username, but looking up
# information from the JSS (at least in version 9.72) via userid
# gives a richer set of data
# Sigh; one gets different information from calling account with
# name=%s and userid=%s
# the former seems not to contain the 'Group access' string (even
# when the user is setup for group access, but only 'Full Access'
# or 'Site Access'
# It also doesn't provide the group membership info
jssuserid = models.IntegerField('JSS User ID')
last_site_refresh = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.user.username
class Meta:
verbose_name = 'JSS User'
verbose_name_plural = 'JSS Users'
def site_permissions(self):
try:
user_cache = timedelta(seconds=settings.JSS_USER_CACHE_TIME)
except AttributeError:
user_cache = timedelta(seconds=300)
# Check freshness
now = datetime.now()
delta = now - self.last_site_refresh
if delta < user_cache:
return
# Step 0: Update sites with the JSS
sync_sites()
siteids = fetch_account_sites(self.user.username,self.jssuserid)
# Step 1: Revoke membership of all *JSS* related groups
# This could probably be more efficient (i.e look at
# what needs to change and only change that)
for group in self.user.groups.filter(jsssite__isnull=False):
group.user_set.remove(self.user)
for site in self.sites.all():
self.sites.remove(site)
# If the user has full site access, add them to all JSS Related groups
if siteids == []:
for group in Group.objects.filter(jsssite__isnull=False):
group.user_set.add(self.user)
for site in JSSSite.objects.all():
self.sites.add(site)
# If the user has some site access, add them to the right groups
if siteids is not None:
for sid in siteids:
jsssite = JSSSite.objects.filter(jsssiteid=sid)[0]
jsssite.group.user_set.add(self.user)
self.sites.add(jsssite)
# Add them to the full site too
jsssite = JSSSite.objects.filter(jsssiteid__lt=0)[0]
jsssite.group.user_set.add(self.user)
self.sites.add(jsssite)
# Update timestamp (as a marker
self.save()
| 2.015625
| 2
|
leetcode/dp/removeInvalidParentheses.py
|
BennyJane/algorithm_mad
| 0
|
12777944
|
from typing import List
# 经典题目:多种解决方案对比,技巧比较多
# 301. 删除无效的括号 (Hard)
# https://leetcode-cn.com/problems/remove-invalid-parentheses/
"""
思路:
暴力:找出合法子序列的数量以及删除字符个数,再筛选删除字符个树最小的数量
暴力:先计算最少删除字符个数,然后找出长度为target的合法子序列,统计数量
暴力:
"""
class Solution:
# 长度较小,25以内,可以考虑暴力求解
def removeInvalidParentheses(self, s: str) -> List[str]:
# 计算需要被删除的左右括号数目
left_rm, right_rm = 0, 0
# 模拟栈操作
# TODO 该方法,即可以检测括号合法性,也可以统计出需要删除非法括号的最小数量
for c in s:
if c == "(":
left_rm += 1
elif c == ")": # 弹出右括号,需要考虑左括号数量
if left_rm == 0:
right_rm += 1
if left_rm > 0:
left_rm -= 1
# 回溯法
valid_expressions = set()
n = len(s)
def dfs(index, leftRemove, rightRemove, exp: List[str]):
if index == n:
if leftRemove == 0 and rightRemove == 0 and self.isValid(exp):
valid_expressions.add("".join(exp))
return
cur = s[index]
# 情况1: 删除当前符号
if cur == "(" and leftRemove > 0:
dfs(index + 1, leftRemove - 1, rightRemove, exp)
if cur == ")" and rightRemove > 0:
dfs(index + 1, leftRemove, rightRemove - 1, exp)
# 情况2: 保留当前符号
exp.append(cur)
if cur != "(" and cur != ")":
dfs(index + 1, leftRemove, rightRemove, exp)
elif cur == "(":
dfs(index + 1, leftRemove, rightRemove, exp)
elif cur == ")":
dfs(index + 1, leftRemove, rightRemove, exp)
# 回溯法
exp.pop()
dfs(0, left_rm, right_rm, [])
return list(valid_expressions)
def isValid(self, word: str):
count = 0
for c in word:
if c == "(":
count += 1
if c == ")":
if count == 0:
return False
else:
count -= 1
return count == 0
class Solution1:
def removeInvalidParentheses(self, s: str) -> List[str]:
res = []
lremove, rremove = 0, 0
for c in s:
if c == '(':
lremove += 1
elif c == ')':
if lremove == 0:
rremove += 1
else:
lremove -= 1
def isValid(str):
cnt = 0
for c in str:
if c == '(':
cnt += 1
elif c == ')':
cnt -= 1
if cnt < 0:
return False
return cnt == 0
def helper(s, start, lcount, rcount, lremove, rremove):
if lremove == 0 and rremove == 0:
if isValid(s):
res.append(s)
return
for i in range(start, len(s)):
# FIXME 剪枝操作,同类型连续符号只需要考虑其中一个(最后一个)
if i > start and s[i] == s[i - 1]:
continue
# 如果剩余的字符无法满足去掉的数量要求,直接返回
if lremove + rremove > len(s) - i:
break
# 尝试去掉一个左括号
if lremove > 0 and s[i] == '(':
helper(s[:i] + s[i + 1:], i, lcount, rcount, lremove - 1, rremove);
# 尝试去掉一个右括号
if rremove > 0 and s[i] == ')':
helper(s[:i] + s[i + 1:], i, lcount, rcount, lremove, rremove - 1);
# TODO 无效操作????
# 统计当前字符串中已有的括号数量
if s[i] == '(':
lcount += 1
elif s[i] == ')': # FIXME 不需要考虑左括号
if lcount > 0:
lcount -= 1
else:
rcount += 1
# 当前右括号的数量大于左括号的数量则为非法,直接返回.
if rcount > lcount:
break
helper(s, 0, 0, 0, lremove, rremove)
return res
# 广度优先搜索
class Solution3:
def removeInvalidParentheses(self, s: str) -> List[str]:
def isValid(s):
count = 0
for c in s:
if c == '(':
count += 1
elif c == ')':
count -= 1
if count < 0:
return False
return count == 0
ans = []
currSet = set([s])
while True:
for ss in currSet:
if isValid(ss):
ans.append(ss)
if len(ans) > 0:
return ans
nextSet = set()
for ss in currSet:
for i in range(len(ss)):
if i > 0 and ss[i] == s[i - 1]:
continue
if ss[i] == '(' or ss[i] == ')':
nextSet.add(ss[:i] + ss[i + 1:])
currSet = nextSet
return ans
# 枚举子状态
class Solution4:
def removeInvalidParentheses(self, s: str) -> List[str]:
def checkValid(str, lmask, left, rmask, right):
pos1, pos2 = 0, 0
cnt = 0
for i in range(len(str)):
if pos1 < len(left) and i == left[pos1]:
if lmask & (1 << pos1) == 0:
cnt += 1
pos1 += 1
elif pos2 < len(right) and i == right[pos2]:
if rmask & (1 << pos2) == 0:
cnt -= 1
if cnt < 0:
return False
pos2 += 1
return cnt == 0
def recoverStr(lmask, left, rmask, right):
pos1, pos2 = 0, 0
res = ""
for i in range(len(s)):
if pos1 < len(left) and i == left[pos1]:
if lmask & (1 << pos1) == 0:
res += s[i]
pos1 += 1
elif pos2 < len(right) and i == right[pos2]:
if rmask & (1 << pos2) == 0:
res += s[i]
pos2 += 1
else:
res += s[i]
return res
def countBit(x):
res = 0
while x != 0:
x = x & (x - 1)
res += 1
return res
lremove, rremove = 0, 0
left, right = [], []
ans = []
cnt = set()
for i in range(len(s)):
if s[i] == '(':
left.append(i)
lremove += 1
elif s[i] == ')':
right.append(i)
if lremove == 0:
rremove += 1
else:
lremove -= 1
m, n = len(left), len(right)
maskArr1, maskArr2 = [], []
for i in range(1 << m):
if countBit(i) != lremove:
continue
maskArr1.append(i)
for i in range(1 << n):
if countBit(i) != rremove:
continue
maskArr2.append(i)
for mask1 in maskArr1:
for mask2 in maskArr2:
if checkValid(s, mask1, left, mask2, right):
cnt.add(recoverStr(mask1, left, mask2, right))
return [val for val in cnt]
| 3.703125
| 4
|
CodeStomp/AmyCare/fit/migrations/0005_auto_20201123_1627.py
|
mayank712jindal/Code-Innovation-Series-ChitkaraUniversity
| 0
|
12777945
|
# Generated by Django 3.1.3 on 2020-11-23 10:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fit', '0004_auto_20201123_1625'),
]
operations = [
migrations.AlterField(
model_name='disease',
name='med1',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med2',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med3',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med4',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med5',
field=models.CharField(blank=True, default='', max_length=60),
),
migrations.AlterField(
model_name='disease',
name='med6',
field=models.CharField(blank=True, default='', max_length=60),
),
]
| 1.515625
| 2
|
sgit/commit.py
|
russelldavis/SublimeGit
| 310
|
12777946
|
<gh_stars>100-1000
# coding: utf-8
from functools import partial
import sublime
from sublime_plugin import WindowCommand, TextCommand, EventListener
from .util import find_view_by_settings, noop, get_setting
from .cmd import GitCmd
from .helpers import GitStatusHelper
from .status import GIT_WORKING_DIR_CLEAN
GIT_COMMIT_VIEW_TITLE = "COMMIT_EDITMSG"
GIT_COMMIT_VIEW_SYNTAX = 'Packages/SublimeGit/syntax/SublimeGit Commit Message.tmLanguage'
GIT_NOTHING_STAGED = u'No changes added to commit. Use s on files/sections in the status view to stage changes.'
GIT_COMMIT_TEMPLATE = u"""{old_msg}
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
{status}"""
GIT_AMEND_PUSHED = (u"It is discouraged to rewrite history which has already been pushed. "
u"Are you sure you want to amend the commit?")
CUT_LINE = u"------------------------ >8 ------------------------\n"
CUT_EXPLANATION = u"# Do not touch the line above.\n# Everything below will be removed.\n"
class GitCommit(object):
windows = {}
class GitCommitWindowCmd(GitCmd, GitStatusHelper):
@property
def is_verbose(self):
return get_setting('git_commit_verbose', False)
def get_commit_template(self, repo, add=False, amend=False):
cmd = ['commit', '--dry-run', '--status',
'--all' if add else None,
'--amend' if amend else None,
'--verbose' if self.is_verbose else None]
exit, stdout, stderr = self.git(cmd, cwd=repo)
stderr = stderr.strip()
if stderr:
for line in stderr.splitlines():
stdout += "# %s\n" % line
old_msg = ''
if amend:
old_msg = self.git_lines(['rev-list', '--format=%B', '--max-count=1', 'HEAD'], cwd=repo)
old_msg = "%s\n" % "\n".join(old_msg[1:])
if self.is_verbose and CUT_LINE not in stdout:
comments = []
other = []
for line in stdout.splitlines():
if line.startswith('#'):
comments.append(line)
else:
other.append(line)
status = "\n".join(comments)
status += "\n# %s" % CUT_LINE
status += CUT_EXPLANATION
status += "\n".join(other)
else:
status = stdout
return GIT_COMMIT_TEMPLATE.format(status=status, old_msg=old_msg)
def show_commit_panel(self, content):
panel = self.window.get_output_panel('git-commit')
panel.run_command('git_panel_write', {'content': content})
self.window.run_command('show_panel', {'panel': 'output.git-commit'})
class GitCommitCommand(WindowCommand, GitCommitWindowCmd):
"""
Documentation coming soon.
"""
def run(self, add=False):
repo = self.get_repo()
if not repo:
return
staged = self.has_staged_changes(repo)
dirty = self.has_unstaged_changes(repo)
if not add and not staged:
return sublime.error_message(GIT_NOTHING_STAGED)
elif add and (not staged and not dirty):
return sublime.error_message(GIT_WORKING_DIR_CLEAN)
view = find_view_by_settings(self.window, git_view='commit', git_repo=repo)
if not view:
view = self.window.new_file()
view.set_name(GIT_COMMIT_VIEW_TITLE)
view.set_syntax_file(GIT_COMMIT_VIEW_SYNTAX)
view.set_scratch(True)
view.settings().set('git_view', 'commit')
view.settings().set('git_repo', repo)
GitCommit.windows[view.id()] = (self.window, add, False)
self.window.focus_view(view)
template = self.get_commit_template(repo, add=add)
view.run_command('git_commit_template', {'template': template})
class GitCommitAmendCommand(GitCommitWindowCmd, WindowCommand):
"""
Documentation coming soon.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
unpushed = self.git_exit_code(['diff', '--exit-code', '--quiet', '@{upstream}..'], cwd=repo)
if unpushed == 0:
if not sublime.ok_cancel_dialog(GIT_AMEND_PUSHED, 'Amend commit'):
return
view = find_view_by_settings(self.window, git_view='commit', git_repo=repo)
if not view:
view = self.window.new_file()
view.set_name(GIT_COMMIT_VIEW_TITLE)
view.set_syntax_file(GIT_COMMIT_VIEW_SYNTAX)
view.set_scratch(True)
view.settings().set('git_view', 'commit')
view.settings().set('git_repo', repo)
GitCommit.windows[view.id()] = (self.window, False, True)
self.window.focus_view(view)
template = self.get_commit_template(repo, amend=True)
view.run_command('git_commit_template', {'template': template})
class GitCommitTemplateCommand(TextCommand):
def is_visible(self):
return False
def run(self, edit, template=''):
if self.view.size() > 0:
self.view.erase(edit, sublime.Region(0, self.view.size()))
self.view.insert(edit, 0, template)
self.view.sel().clear()
self.view.sel().add(sublime.Region(0))
class GitCommitEventListener(EventListener):
_lpop = False
def mark_pedantic(self, view):
if view.settings().get('git_view') == 'commit' or view.file_name() == 'COMMIT_EDITMSG':
# Header lines should be a max of 50 chars
view.erase_regions('git-commit.header')
firstline = view.line(view.text_point(0, 0))
if firstline.end() > 50 and not view.substr(firstline).startswith('#'):
view.add_regions('git-commit.header', [sublime.Region(50, firstline.end())], 'invalid', 'dot')
# The second line should be empty
view.erase_regions('git-commit.line2')
secondline = view.line(view.text_point(1, 0))
if secondline.end() - secondline.begin() > 0 and not view.substr(secondline).startswith('#'):
view.add_regions('git-commit.line2', [secondline], 'invalid', 'dot')
# Other lines should be at most 72 chars
view.erase_regions('git-commit.others')
for l in view.lines(sublime.Region(view.text_point(2, 0), view.size())):
if view.substr(l).startswith('#'):
break
if l.end() - l.begin() > 72:
view.add_regions('git-commit.others', [sublime.Region(l.begin() + 72, l.end())], 'invalid', 'dot')
def on_activated(self, view):
if sublime.version() < '3000' and get_setting('git_commit_pedantic') is True:
self.mark_pedantic(view)
def on_modified(self, view):
if sublime.version() < '3000' and get_setting('git_commit_pedantic') is True:
self.mark_pedantic(view)
def on_modified_async(self, view):
if get_setting('git_commit_pedantic') is True:
self.mark_pedantic(view)
def on_activated_async(self, view):
if get_setting('git_commit_pedantic') is True:
self.mark_pedantic(view)
def on_close(self, view):
if view.settings().get('git_view') == 'commit' and view.id() in GitCommit.windows:
message = view.substr(sublime.Region(0, view.size()))
window, add, amend = GitCommit.windows[view.id()]
repo = view.settings().get('git_repo')
window.run_command('git_commit_perform', {'message': message, 'add': add, 'amend': amend, 'repo': repo})
class GitCommitPerformCommand(WindowCommand, GitCommitWindowCmd):
def run(self, repo, message, add=False, amend=False):
cmd = ['commit', '--cleanup=strip',
'--all' if add else None,
'--amend' if amend else None,
'--verbose' if self.is_verbose else None, '-F', '-']
exit, stdout, stderr = self.git(cmd, stdin=message, cwd=repo)
self.show_commit_panel(stdout if exit == 0 else stderr)
self.window.run_command('git_status', {'refresh_only': True})
def is_visible(self):
return False
class GitCommitSaveCommand(TextCommand):
def is_visible(self):
return False
def run(self, edit):
if self.view.settings().get('git_view') == 'commit' and self.view.id() in GitCommit.windows:
return
self.view.run_command('save')
class GitQuickCommitCommand(WindowCommand, GitCommitWindowCmd):
"""
Quickly commit changes with a one-line commit message.
If there are any staged changes, only those changes will be added. If there
are no staged changes, any changed files that git know about will be added
in the commit.
If the working directory is clean, an error will be shown indicating it.
After entering the commit message, press enter to commit, or esc to cancel.
An empty commit message will also result in the commit being cancelled.
"""
def run(self):
repo = self.get_repo()
if not repo:
return
staged = self.has_staged_changes(repo)
dirty = self.has_unstaged_changes(repo)
if not staged and not dirty:
sublime.error_message(GIT_WORKING_DIR_CLEAN.capitalize())
return
self.window.show_input_panel("Commit message:", '', partial(self.on_commit_message, repo), noop, noop)
def on_commit_message(self, repo, msg=None):
if not msg:
msg = ''
cmd = ['commit', '-F', '-'] if self.has_staged_changes(repo) else ['commit', '-a', '-F', '-']
stdout = self.git_string(cmd, stdin=msg, cwd=repo)
self.show_commit_panel(stdout)
self.window.run_command('git_status', {'refresh_only': True})
class GitQuickCommitCurrentFileCommand(TextCommand, GitCmd, GitStatusHelper):
"""
Documentation coming soon.
"""
def run(self, edit):
filename = self.view.file_name()
if not filename:
sublime.error_message("Cannot commit a file which has not been saved.")
return
repo = self.get_repo()
if not repo:
return
if not self.file_in_git(repo, filename):
if sublime.ok_cancel_dialog("The file %s is not tracked by git. Do you want to add it?" % filename, "Add file"):
exit, stdout, stderr = self.git(['add', '--force', '--', filename], cwd=repo)
if exit == 0:
sublime.status_message('Added %s' % filename)
else:
sublime.error_message('git error: %s' % stderr)
else:
return
self.view.window().show_input_panel("Commit message:", '', partial(self.on_commit_message, repo, filename), noop, noop)
def on_commit_message(self, repo, filename, msg=None):
if not msg:
msg = ''
# run command
cmd = ['commit', '-F', '-', '--only', '--', filename]
stdout = self.git_string(cmd, stdin=msg, cwd=repo)
# show output panel
panel = self.view.window().get_output_panel('git-commit')
panel.run_command('git_panel_write', {'content': stdout})
self.view.window().run_command('show_panel', {'panel': 'output.git-commit'})
# update status if necessary
self.view.window().run_command('git_status', {'refresh_only': True})
| 2.296875
| 2
|
oled_ip.py
|
zlite/OLED_IP
| 0
|
12777947
|
# For use with I2C OLED screens.
# This requires the Adafruit Circuit Python OLED library, which superceeds earlier Adafruit OLED libraries
# Install it with `pip install adafruit-circuitpython-ssd1306`
import time
from subprocess import check_output
from board import SCL, SDA
import busio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
def get_ip():
cmd = "hostname -I | cut -d\' \' -f1"
return check_output(cmd, shell=True).decode("utf-8").strip()
# Create the I2C interface.
i2c = busio.I2C(SCL, SDA)
# Create the SSD1306 OLED class.
# The first two parameters are the pixel width and pixel height. Change these
# to the right size for your display!
disp = adafruit_ssd1306.SSD1306_I2C(128, 32, i2c)
# Clear display.
disp.fill(0)
disp.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new("1", (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Load default font.
font = ImageFont.load_default()
no_IP = True
draw.text((x, top + 0), "Starting search for WiFi", font=font, fill=255)
disp.image(image)
disp.show()
time.sleep(1)
while no_IP:
# Clear display.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
disp.fill(0)
disp.show()
ip_addr = get_ip()
if ip_addr:
draw.text((x, top + 0), "IP: " + ip_addr, font=font, fill=255)
no_IP = False
else:
draw.text((x, top + 0), "Searching for WiFi", font=font, fill=255)
disp.image(image)
disp.show()
time.sleep(1)
| 2.859375
| 3
|
goodadmin/migrations/0004_stockpick_stockcode.py
|
waynezh86/tango_with_django_project
| 0
|
12777948
|
<reponame>waynezh86/tango_with_django_project
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-05-28 03:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goodadmin', '0003_auto_20180525_2115'),
]
operations = [
migrations.AddField(
model_name='stockpick',
name='StockCode',
field=models.CharField(default='0000', max_length=50),
),
]
| 1.414063
| 1
|
scripts/plot_conv.py
|
wordsworthgroup/libode
| 11
|
12777949
|
import numpy as np
import matplotlib.pyplot as plt
#plt.rc('font', family='serif')
#plt.rc('text', usetex=True)
sol1err = np.fromfile('../out/sol1err')
sol2err = np.fromfile('../out/sol2err')
L2err = np.sqrt(sol2err**2 + sol1err**2)
h = np.fromfile('../out/h')
x = np.sort(h)
fig, ax = plt.subplots(1,1)
for i in range(1,10):
hh = np.logspace(np.log10(min(h)), np.log10(max(h)), 2500)
b = np.log10(L2err[0]/(10**(i*np.log10(h[0]))))
y = 10**(i*np.log10(hh) + b)
mask = (y > min(L2err))
hh = hh[mask]
y = y[mask]
ax.loglog(hh, y, ':', label='$\propto (\Delta t)^{%d}$' % i)
ax.text(min(hh), min(y), str(i), ha='right', va='bottom')
ax.loglog(h, L2err, 'k.', label='results')
ax.set_xlabel('step size $(\Delta t)$')
ax.set_ylabel('$l_2$ error')
ax.legend()
ax.set_title('Convergence Test')
plt.tight_layout()
plt.show()
| 2.625
| 3
|
task1/task.py
|
garncarz/prague-transport-2017
| 0
|
12777950
|
<filename>task1/task.py
import re
import subprocess
from main import cache, celery
def dict_to_stdin(d):
s = '%d\n' % d['citiesCount']
for offer in d['costOffers']:
s += '%d %d %d\n' % (offer['from'], offer['to'], offer['price'])
return s.encode()
def stdout_to_dict(b):
d = {}
m = re.split(r'[ \t\n\r:]+', b.decode())
d['feasible'] = True if m[0] == '1' else False
d['totalCost'] = int(m[1])
d['depotId'] = int(m[2])
d['recommendedOffers'] = []
for i in range(3, len(m) - 1, 3):
d['recommendedOffers'].append({
'from': int(m[i]),
'to': int(m[i + 1]),
'price': int(m[i + 2]),
})
return d
def _solve(_input):
p = subprocess.Popen(['task1/find_hub'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
p.stdin.write(dict_to_stdin(_input))
p.stdin.close()
p.wait()
return stdout_to_dict(p.stdout.read())
@celery.app.task
def solve(_input):
key = {'task': 1, 'input': _input}
result = cache.get(key)
if not result:
result = _solve(_input)
cache.set(key, result)
return result
| 2.78125
| 3
|