max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
services/engine/webs/api/models/result.py
|
huang-zp/crawloop
| 19
|
12778151
|
# -*- coding: utf-8 -*-
"""
存储结果
"""
from sqlalchemy import Column, BigInteger, String, TIMESTAMP, func, Integer, Text
from sqlalchemy.dialects.postgresql import JSONB
from webs.api.models import db
class Result(db.Model):
__tablename__ = 'results'
id = Column(BigInteger, primary_key=True, autoincrement=True)
subtask_id = Column(Integer, nullable=False, index=True) # 所属子任务任务id
url_id = Column(Integer, nullable=False, index=True) # url id
url_address = Column(String(1024), nullable=False) # url 地址
http_code = Column(Integer) # 网站状态码
title = Column(Text) # 网站标题
content = Column(Text) # 网站内容
text = Column(Text) # 网页正文
current_url = Column(String(1024)) # 网站最后相应的地址
redirect_chain = Column(JSONB) # 重定向链接
response_headers = Column(JSONB) # response headers
har_uuid = Column(String(128)) # 网站交互过程
screenshot_id = Column(String(128)) # 截图Id
cookies = Column(JSONB) # cookies
finished_at = Column(TIMESTAMP) # 完成时间
wappalyzer_results = Column(JSONB) # 网站指纹
callback_failure_msg = Column(Text) # 回调错误信息
favicon_md5 = Column(String(50)) # 网站图标hash值
favicon_link = Column(String(1024)) # 网站图标链接
response_time = Column(Integer) # 网站响应时间
load_complete_time = Column(Integer) # 页面加载完成时间
charset = Column(String(256)) # 网站编码
create_time = Column(TIMESTAMP, server_default=func.now(), index=True)
update_time = Column(TIMESTAMP, server_default=func.now(), onupdate=func.now(), index=True)
def __repr__(self):
return f'<Result-{self.id}>'
def as_dict(self):
from webs.api.models.db_proxy import task_model_proxy
task_obj = task_model_proxy.query_task_obj_by_subtask(self.subtask_id)
return {
'result_id': self.id,
'subtask_id': self.subtask_id,
'task_id': task_obj.id if task_obj else None,
'customer_id': task_obj.customer_id if task_obj else None,
'url_id': self.url_id,
'url_address': self.url_address,
'http_code': self.http_code,
'title': self.title,
'content': self.content,
'text': self.text,
'current_url': self.current_url,
'redirect_chain': self.redirect_chain,
'response_headers': self.response_headers,
'har_uuid': self.har_uuid,
'screenshot_id': self.screenshot_id,
'cookies': self.cookies,
'favicon_md5': self.favicon_md5,
'favicon_link': self.favicon_link,
'wappalyzer_results': self.wappalyzer_results,
'response_time': self.response_time,
'load_complete_time': self.load_complete_time,
'charset': self.charset,
'finished_at': self.finished_at.strftime("%Y-%m-%d %H:%M:%S")
}
| 2.28125
| 2
|
apps/panel/migrations/0004_log.py
|
ivall/IVmonitor
| 190
|
12778152
|
# Generated by Django 3.0.7 on 2021-02-05 09:15
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('panel', '0003_auto_20210205_0955'),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=4)),
('time', models.DateTimeField(default=django.utils.timezone.now)),
('monitor_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='panel.MonitorObject')),
],
),
]
| 1.679688
| 2
|
ibis_substrait/proto/substrait/plan_pb2.py
|
gforsyth/ibis-substrait
| 14
|
12778153
|
<reponame>gforsyth/ibis-substrait
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from ..substrait import algebra_pb2 as substrait_dot_algebra__pb2
from ..substrait.extensions import extensions_pb2 as substrait_dot_extensions_dot_extensions__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14substrait/plan.proto\x12\tsubstrait\x1a\x17substrait/algebra.proto\x1a%substrait/extensions/extensions.proto"c\n\x07PlanRel\x12"\n\x03rel\x18\x01 \x01(\x0b2\x0e.substrait.RelH\x00R\x03rel\x12(\n\x04root\x18\x02 \x01(\x0b2\x12.substrait.RelRootH\x00R\x04rootB\n\n\x08rel_type"\xe3\x02\n\x04Plan\x12O\n\x0eextension_uris\x18\x01 \x03(\x0b2(.substrait.extensions.SimpleExtensionURIR\rextensionUris\x12P\n\nextensions\x18\x02 \x03(\x0b20.substrait.extensions.SimpleExtensionDeclarationR\nextensions\x120\n\trelations\x18\x03 \x03(\x0b2\x12.substrait.PlanRelR\trelations\x12X\n\x13advanced_extensions\x18\x04 \x01(\x0b2\'.substrait.extensions.AdvancedExtensionR\x12advancedExtensions\x12,\n\x12expected_type_urls\x18\x05 \x03(\tR\x10expectedTypeUrlsB+\n\x12io.substrait.protoP\x01\xaa\x02\x12Substrait.Protobufb\x06proto3')
_PLANREL = DESCRIPTOR.message_types_by_name['PlanRel']
_PLAN = DESCRIPTOR.message_types_by_name['Plan']
PlanRel = _reflection.GeneratedProtocolMessageType('PlanRel', (_message.Message,), {'DESCRIPTOR': _PLANREL, '__module__': 'substrait.plan_pb2'})
_sym_db.RegisterMessage(PlanRel)
Plan = _reflection.GeneratedProtocolMessageType('Plan', (_message.Message,), {'DESCRIPTOR': _PLAN, '__module__': 'substrait.plan_pb2'})
_sym_db.RegisterMessage(Plan)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\x12io.substrait.protoP\x01\xaa\x02\x12Substrait.Protobuf'
_PLANREL._serialized_start = 99
_PLANREL._serialized_end = 198
_PLAN._serialized_start = 201
_PLAN._serialized_end = 556
| 1.179688
| 1
|
CodingInterview2/28_01_SymmetricalBinaryTree/symmetrical_binary_tree.py
|
hscspring/TheAlgorithms-Python
| 10
|
12778154
|
<reponame>hscspring/TheAlgorithms-Python
"""
面试题 28:对称的二叉树
题目:请实现一个函数,用来判断一棵二叉树是不是对称的。如果一棵二叉树和
它的镜像一样,那么它是对称的。
"""
class BinaryTreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def connect_binarytree_nodes(parent: BinaryTreeNode,
left: BinaryTreeNode,
right: BinaryTreeNode) -> BinaryTreeNode:
if parent:
parent.left = left
parent.right = right
return parent
def print_node(node: BinaryTreeNode):
if node:
print("node value: ", node.val)
if node.left:
print("left child value: ", node.left.val)
else:
print("left child null")
if node.right:
print("right child value: ", node.right.val)
else:
print("right child null")
else:
print("node is null")
def print_tree(root: BinaryTreeNode):
print_node(root)
if root:
if root.left:
print_tree(root.left)
if root.right:
print_tree(root.right)
def is_symmetrical(bt: BinaryTreeNode) -> bool:
"""
Whether the given BinaryTree is symmetrical.
Parameters
-----------
bt: BinaryTreeNode
Returns
---------
out: bool
Notes
------
Whether the preorder traversal is the same as symmetrical preorder traversal.
Or is the same as its mirror.
"""
if not bt:
return True
pre, spre = [], []
preorder(bt, pre)
symmetrical_preorder(bt, spre)
return pre == spre
def preorder(bt: BinaryTreeNode, res: list) -> list:
if not bt:
res.append(None)
return res
res.append(bt.val)
preorder(bt.left, res)
preorder(bt.right, res)
def symmetrical_preorder(bt: BinaryTreeNode, res: list) -> list:
if not bt:
res.append(None)
return res
res.append(bt.val)
symmetrical_preorder(bt.right, res)
symmetrical_preorder(bt.left, res)
def is_symmetrical_recursion(bt: BinaryTreeNode) -> bool:
return is_symmetrical_recursion_core(bt, bt)
def is_symmetrical_recursion_core(bt1, bt2) -> bool:
if not bt1 and not bt2:
return True
if not bt1 or not bt2:
return False
if bt1.val != bt2.val:
return False
return (is_symmetrical_recursion_core(bt1.left, bt2.right) and
is_symmetrical_recursion_core(bt1.right, bt2.left))
if __name__ == '__main__':
# 5
# 5 5
# 5 5
tree = BinaryTreeNode(5)
connect_binarytree_nodes(tree, BinaryTreeNode(5), BinaryTreeNode(5))
connect_binarytree_nodes(tree.left, BinaryTreeNode(5), None)
connect_binarytree_nodes(tree.right, BinaryTreeNode(5), None)
# tree = BinaryTreeNode(7)
# connect_binarytree_nodes(tree, BinaryTreeNode(7), BinaryTreeNode(7))
# connect_binarytree_nodes(tree.left, BinaryTreeNode(7), BinaryTreeNode(7))
# connect_binarytree_nodes(tree.right, BinaryTreeNode(7), None)
res = []
preorder(tree, res)
print(res)
ress = []
symmetrical_preorder(tree, ress)
print(ress)
print(is_symmetrical(tree))
print(is_symmetrical_recursion(tree))
| 3.875
| 4
|
setup.py
|
jneight/pydup
| 1
|
12778155
|
<reponame>jneight/pydup
# coding=utf-8
from setuptools import setup, find_packages
setup(
name='pydup',
version='0.11',
install_requires=[],
url='https://github.com/jneight/pydup',
description='Simple implementation of LSH Algorithm',
packages=find_packages(),
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
author='<NAME>',
author_email='<EMAIL>'
)
| 1.390625
| 1
|
app/core/tests/test_admin.py
|
avinashgundala/recipe-app-api
| 1
|
12778156
|
<gh_stars>1-10
from django.test import TestCase,Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTest(TestCase):
"""testing admin site interface"""
def setup(self):
"""setting up superuser and user for admin page access"""
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='<EMAIL>',
name='admin',
password='<PASSWORD>'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
name='test',
password='<PASSWORD>'
)
def test_users_listed(self):
"""testing new user is listed in userlist"""
url = reverse('admin:core_user_changelist')
response = self.client.get(url)
self.assertContains(response, self.user.name)
self.assertContains(response, self.user.email)
def test_user_change(self):
"""testing user change"""
url = reverse('admin:core_user_change', args=[self.user.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_user_add(self):
"""testing user add page"""
url = reverse('admin:core_user_add')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| 2.640625
| 3
|
spatialpooch/_vector.py
|
achapkowski/spatial-pooch
| 1
|
12778157
|
import os
import importlib
import pooch
from pooch import Unzip
from ._spooch import SPATIALPOOCH as _GOODBOY
###########################################################################
allowed_formats = {
"pandas" : False,
"numpy" : False,
"string" : True,
"sedf" : False
}
###########################################################################
if importlib.util.find_spec('numpy') is not None:
import numpy as np
allowed_formats['numpy'] = True
if importlib.util.find_spec('pandas') is not None:
import pandas as pd
allowed_formats['pandas'] = True
if importlib.util.find_spec('arcgis') is not None:
from arcgis.features import GeoAccessor, GeoSeriesAccessor
allowed_formats['arcgis'] = True
###########################################################################
#--------------------------------------------------------------------------
def _fetch(data, f, **kwargs):
"""gets the data in the proper format"""
data = _GOODBOY.fetch(fname=data, processor=Unzip())
if f is None:
f = 'string'
if str(f) == 'string':
return data
elif str(f) == 'arcgis' and allowed_formats['arcgis']:
for f in data:
if str(f).lower().endswith(".shp"):
return pd.DataFrame.spatial.from_featureclass(f)
elif str(f).lower().endswith('.gdb') and 'dataset' in kwargs:
fc = os.path.join(f, kwargs['dataset'])
return pd.DataFrame.spatial.from_featureclass(f)
return data
#--------------------------------------------------------------------------
def fetch_beach_access_data(f=None):
"""gets the data in the proper format"""
data = _fetch(data="vector/Public_Access_Information.zip", f=f)
return data
#--------------------------------------------------------------------------
def fetch_shipping_lanes_data(f=None):
"""gets the data in the proper format"""
return _fetch(data="vector/Shipping_Lanes.zip", f=f)
#--------------------------------------------------------------------------
def fetch_crime_shp_data(f=None):
"""gets the data in the proper format"""
return _fetch(data="vector/Crime.zip", f=f)
#--------------------------------------------------------------------------
def fetch_family_resource_centers_data(f=None):
"""gets the data in the proper format"""
return _fetch(data="vector/Family_Resource_Centers.zip", f=f)
| 2.1875
| 2
|
leetcode/contest/week_143_1103.py
|
JamesCao2048/CodingQuestions
| 1
|
12778158
|
# Distribute candies to people
# Easy
class Solution(object):
def distributeCandies(self, candies, num_people):
"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""
if num_people <= 0 or candies < 0:
raise Exception("Invalid input")
result = []
for i in range(num_people):
result.append(0)
if candies == 0:
return result
rd, last_candy = self.getRound(candies, num_people)
for i in range(num_people):
result[i] += (i+1) * (rd -1)
index = 0
while last_candy > (index + 1) * rd:
result[index] += (index + 1) * rd
last_candy -= (index + 1) * rd
index += 1
result[index] += last_candy
return result
def getRound(self, candies, num_people):
rd = 0
cur_candy_add = num_people * (1 + num_people) / 2
cur_candy_sum = 0
while cur_candy_sum < candies:
cur_candy_sum += cur_candy_add
cur_candy_add += num_people * num_people
rd += 1
last_candy = candies - (cur_candy_sum - cur_candy_add + num_people * num_people)
return rd, int(last_candy)
| 3.640625
| 4
|
tests/test_chatbot.py
|
jvm123/botstory
| 0
|
12778159
|
import sys
import os
import unittest
from botstory.botclass import BotClass
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
class TestChatbot(unittest.TestCase):
def test_chatbot(self):
chatbot = BotClass()
# Check whether the bot is able to respond to a simple phrase from conversations.json
self.assertEqual(chatbot.process_query("Thank you."), "You're welcome.")
self.assertEqual(chatbot.process_query("thank you"), "You're welcome.")
| 3.125
| 3
|
apero/recipes/spirou/cal_preprocess_spirou.py
|
njcuk9999/apero-drs
| 1
|
12778160
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# CODE DESCRIPTION HERE
Created on 2019-03-05 16:38
@author: ncook
Version 0.0.1
"""
import numpy as np
import os
from apero import core
from apero import lang
from apero.core import constants
from apero.science import preprocessing as pp
from apero.io import drs_image
from apero.io import drs_fits
from apero.core.instruments.spirou import file_definitions
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'cal_preprocess_spirou.py'
__INSTRUMENT__ = 'SPIROU'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# Get Logging function
WLOG = core.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
# Raw prefix
RAW_PREFIX = file_definitions.raw_prefix
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(directory=None, files=None, **kwargs):
"""
Main function for cal_preprocess_spirou.py
:param directory: string, the night name sub-directory
:param files: list of strings or string, the list of files to process
:param kwargs: any additional keywords
:type directory: str
:type files: list[str]
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
:rtype: dict
"""
# assign function calls (must add positional)
fkwargs = dict(directory=directory, files=files, **kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = core.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = core.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return core.end_main(params, llmain, recipe, success, outputs='None')
def __main__(recipe, params):
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
# Get hot pixels for corruption check
hotpixels = pp.get_hot_pixels(params)
# get skip parmaeter
skip = params['SKIP_DONE_PP']
# ----------------------------------------------------------------------
# Loop around input files
# ----------------------------------------------------------------------
# get files
infiles = params['INPUTS']['FILES'][1]
# Number of files
num_files = len(params['INPUTS']['FILES'][1])
# storage for output files
output_names = []
# loop around number of files
for it in range(num_files):
# ------------------------------------------------------------------
# add level to recipe log
log1 = recipe.log.add_level(params, 'num', it)
# ------------------------------------------------------------------
# print file iteration progress
core.file_processing_update(params, it, num_files)
# ge this iterations file
file_instance = infiles[it]
# ------------------------------------------------------------------
# Fix the spirou header
# ------------------------------------------------------------------
# certain keys may not be in some spirou files
file_instance = drs_fits.fix_header(params, recipe, file_instance)
# ------------------------------------------------------------------
# identification of file drs type
# ------------------------------------------------------------------
# identify this iterations file type
cond, infile = pp.drs_infile_id(params, recipe, file_instance)
# ------------------------------------------------------------------
# if it wasn't found skip this file, if it was print a message
if cond:
eargs = [infile.name]
WLOG(params, 'info', TextEntry('40-010-00001', args=eargs))
else:
eargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00002', args=eargs))
continue
# get data from file instance
image = np.array(infile.data)
# ------------------------------------------------------------------
# Get out file and check skip
# ------------------------------------------------------------------
# get the output drs file
oargs = [params, recipe, infile, recipe.outputs['PP_FILE'], RAW_PREFIX]
found, outfile = pp.drs_outfile_id(*oargs)
# construct out filename
outfile.construct_filename(params, infile=infile)
# if we didn't find the output file we should log this error
if not found:
eargs = [outfile.name]
WLOG(params, 'error', TextEntry('00-010-00003', args=eargs))
if skip:
if os.path.exists(outfile.filename):
wargs = [infile.filename]
WLOG(params, 'info', TextEntry('40-010-00012', args=wargs))
continue
# ----------------------------------------------------------------------
# Check for pixel shift and/or corrupted files
# ----------------------------------------------------------------------
# storage
snr_hotpix, rms_list = [], []
# do this iteratively as if there is a shift need to re-workout QC
for iteration in range(2):
# get pass condition
cout = pp.test_for_corrupt_files(params, image, hotpixels)
snr_hotpix, rms_list = cout[0], cout[1]
shiftdx, shiftdy = cout[2], cout[3]
# use dx/dy to shift the image back to where the engineering flat
# is located
if shiftdx != 0 or shiftdy != 0:
# log process
wmsg = TextEntry('40-010-00013', args=[shiftdx, shiftdy])
WLOG(params, '', wmsg)
# shift image
image = np.roll(image, [shiftdy], axis=0)
image = np.roll(image, [shiftdx], axis=1)
# work out QC here
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=False)
# if passed break
if passed:
break
# ------------------------------------------------------------------
# Quality control to check for corrupt files
# ------------------------------------------------------------------
# re-calculate qc
qargs = [snr_hotpix, infile, rms_list]
qc_params, passed = pp.quality_control(params, *qargs, log=True)
# update recipe log
log1.add_qc(params, qc_params, passed)
if not passed:
# end log here
log1.end(params)
# go to next iteration
continue
# ------------------------------------------------------------------
# correct image
# ------------------------------------------------------------------
# correct for the top and bottom reference pixels
WLOG(params, '', TextEntry('40-010-00003'))
image = pp.correct_top_bottom(params, image)
# correct by a median filter from the dark amplifiers
WLOG(params, '', TextEntry('40-010-00004'))
image = pp.median_filter_dark_amps(params, image)
# correct for the 1/f noise
WLOG(params, '', TextEntry('40-010-00005'))
image = pp.median_one_over_f_noise(params, image)
# ------------------------------------------------------------------
# calculate mid observation time
# ------------------------------------------------------------------
mout = drs_fits.get_mid_obs_time(params, infile.header)
mid_obs_time, mid_obs_method = mout
# ------------------------------------------------------------------
# rotate image
# ------------------------------------------------------------------
# rotation to match HARPS orientation (expected by DRS)
image = drs_image.rotate_image(image, params['RAW_TO_PP_ROTATION'])
# ------------------------------------------------------------------
# Save rotated image
# ------------------------------------------------------------------
# define header keys for output file
# copy keys from input file
outfile.copy_original_keys(infile)
# add version
outfile.add_hkey('KW_PPVERSION', value=params['DRS_VERSION'])
# add dates
outfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
outfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
outfile.add_hkey('KW_PID', value=params['PID'])
# add input filename
outfile.add_hkey_1d('KW_INFILE1', values=[infile.basename],
dim1name='infile')
# add qc parameters
outfile.add_qckeys(qc_params)
# add dprtype
outfile.add_hkey('KW_DPRTYPE', value=outfile.name)
# add the shift that was used to correct the image
outfile.add_hkey('KW_PPSHIFTX', value=shiftdx)
outfile.add_hkey('KW_PPSHIFTY', value=shiftdy)
# add mid observation time
outfile.add_hkey('KW_MID_OBS_TIME', value=mid_obs_time.mjd)
outfile.add_hkey('KW_MID_OBSTIME_METHOD', value=mid_obs_method)
# ------------------------------------------------------------------
# copy data
outfile.data = image
# ------------------------------------------------------------------
# log that we are saving rotated image
wargs = [outfile.filename]
WLOG(params, '', TextEntry('40-010-00009', args=wargs))
# ------------------------------------------------------------------
# writefits image to file
outfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(outfile)
# index this file
core.end_main(params, None, recipe, success=True, outputs='pp',
end=False)
# ------------------------------------------------------------------
# append to output storage in p
# ------------------------------------------------------------------
output_names.append(outfile.filename)
# ------------------------------------------------------------------
# update recipe log file
# ------------------------------------------------------------------
log1.end(params)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return core.return_locals(params, dict(locals()))
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
| 1.867188
| 2
|
examen_2_sim02/p5/p5.py
|
Munoz-Rojas-Adriana/Computacion_para_Ingenieria
| 0
|
12778161
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 17 00:39:13 2022
@author: ACER
"""
Clase Vehiculo :
def __init__ ( self , color , marca ):
uno mismo color = color
uno mismo marca = marca
def mostrarse ( self ):
print ( f"la marca { self . marca } y color { self . color } " )
Clase Auto ( Vehículo ):
def __init__ ( self , color , marca , maxVelocidad ):
súper (). __init__ ( color , marca )
uno mismo maxVelocidad = maxVelocidad
clase Bicicleta ( Vehiculo ):
def __init__ ( self , color , marca , tipoFreno ):
súper (). __init__ ( color , marca )
uno mismo tipoFreno = tipoFreno
Persona de clase :
def __init__ ( self , nombre , ci , vehiculo ):
uno mismo nombre = nombre
uno mismo ci = ci
uno mismo vehículo = vehículo
def mostrarDatos ( auto ):
print ( f"persona { self . nombre } tiene como vehiculo { self . vehiculo . mostrarse () } " )
# como se usa todas estas clases
vici_phoenix = Bicicleta ( "negro" , "Phoenix" , "Tacos" )
carlos = Persona ( "<NAME>" , 75757 , vici_phoenix )
carlos _ mostrarDatos ()
| 3.15625
| 3
|
picking/algorithms/pso.py
|
mattianeroni/IndustryAlgorithms
| 1
|
12778162
|
from typing import Dict, List, Tuple, Union, Callable, Set, cast
import random
import math
import time
def _bra (lst : List[int], beta : float = 0.3) -> int:
"""
The estraction of an item from a list, by using a biased randomisation based
on a quasi-geometric distribution (i.e. f(x) = (1-beta)^x).
:param beta: The parameter of the quasi-geometric.
:return: The estracted element.
"""
return lst[int(math.log(random.random(), 1 - beta)) % len(lst)]
def _triangular (lst : List[int]) -> int:
"""
The estraction of an item from a list, by using a triangular distribution.
:return: The estracted element.
"""
return lst[int(len(lst) - len(lst)*random.random()/2) % len(lst)]
def _make_negative_exp (max_iter : int = 1000, max_v : float = 1.0, min_v : float = 0.5) -> Callable[[int],float]:
"""
This method generates an exponential function used to increase the weight
given to the current position of the particle.
As the number of iterations increase, the particles get more and more static.
***Note*** : Lower is the value of the weight, grater is the relevance given to
the current position of the particles. Hence, for a low weight, the particles are
more static. As the number of iterations without improvement increases, the
mobility of the particles increases too.
:param max_iter: The maximum number of iterations
:param max_v: The maximum value the weight of the current position must assume
:param min_v: The minimum value the weight of the current position must assume
:return: A callable function which represents the exponential needed.
def negative_exp (x : int) -> float
:param x: The current iteration without improvement.
:return: The weight of the current position of the particles.
"""
alpha = math.log(max_v + min_v)/max_iter
def negative_exp (x : int) -> float:
return math.exp(alpha * x) - min_v
return negative_exp
def _negative_exp (x : int, alpha : float) -> float:
"""
This method return the negative exponential according to equation
f(x) = e^(-alpha*x)
:param x: The input.
:param alpha: The parameter of the exponential (the higher is alpha, the
faster is the decrease).
:return: The output f(x).
"""
return math.exp(-alpha*x)
def _compute_distance (lst : List[int], distances : List[List[int]]) -> int:
"""
Given a picking list and a distance matrix, this method calculates the
distance ran to complete the picking list.
:param lst: The picking list
:param distances: The distance matrix
:return: The distance ran.
"""
return sum(distances[lst[i]][lst[i+1]] for i in range(len(lst) - 1)) + distances[lst[-1]][0] + distances[0][lst[0]]
def _two_opt (lst : List[int], i : int, j : int) -> List[int]:
"""
This method, given two cutting positions i and j, makes a 2-Opt on the
starting list.
:param lst: The starting list.
:param i: First cutting point.
:param j: Second cutting point.
:return: The new list.
"""
return lst[:min(i,j)] + list(reversed(lst[min(i,j):max(i,j)])) + lst[max(i,j):]
def _greedy (lst : List[int], distances : List[List[int]]) -> List[int]:
"""
This method returns a purely greedy solution.
:param lst: The list of nodes to visit.
:param distances: The distance matrix.
:return: The nodes in the order in which they should be visited.
"""
c_node = 0; sol : List[int] = []; options = list(lst)
while len(options) > 0:
options = sorted(options, key=lambda i: distances[c_node][i])
c_node = options.pop(0)
sol.append (c_node)
return sol
class Particle (object):
"""
An instance of this class represents a particle used in this algorithm.
"""
def __init__(self, *,
distances : Dict[int, Dict[int,int]],
picking_list : List[int],
paths : Dict[int,Dict[int, Set[int]]],
greediness : float = 0.1,
beta : float = 0.7,
check_paths : float = 0.1,
deepsearch : float = 0.05,
fulldeepsearch : float = 0.5,
max_depth : int = 2500,
) -> None:
'''
:param distances: The distance matrix
:param picking_list: The picking list.
:param paths: The nodes in between two others
:param greediness: The importance given to the greedy solution. To the random intention is given a weigth
equal to (1 - alpha).
:param beta: The parameter of the geometric.
:param check_paths: The probability to include the nodes between node i and j, when going from i to j.
:param deepsearch: Probability to do deep search.
:param fulldeepsearch: Probability to do full deep search.
:param max_depth: Maximum number of iteration in case of deep search
:attr current: The current solution.
:attr intention: The current intention.
:attr pbest: The current personal best found do far.
:attr vcurrent: The cost of the current.
:attr vintention: The cost of the intention.
:attr vpbest: The cost of the personal best.
:attr greedy: The greedy solution.
:attr vgreedy: The cost of the greedy solution.
:attr explorations: The number of solutons explored up to now.
'''
# set parameters
self.distances = dict(distances)
self.picking_list = list(picking_list)
self.paths = dict(paths)
self.greediness = greediness
self.beta = beta
self.check_paths = check_paths
self.deepsearch = deepsearch
self.fulldeepsearch = fulldeepsearch
self.max_depth = max_depth
# starting solutions
self.current = list(picking_list)
random.shuffle(self.current)
self.pbest = list(self.current)
self.intention = list(self.current)
random.shuffle(self.intention)
# evaluate solutions (i.e., distances)
self.vpbest, self.vcurrent, self.vintention = cast(int,float("inf")), 0, 0
self.update_dist ()
# greedy solution
self.greedy = _greedy (picking_list, distances)
self.vgreedy = _compute_distance (self.greedy, distances)
# The number of solutions explored
self.explorations : int = 0
def update_dist (self) -> None:
"""
This method updates the cost of the solutions kept in memory, i.e. current, intention, and pbest.
"""
self.vcurrent, self.vintention = 0, 0
for i in range(len(self.picking_list) - 1):
self.vcurrent += self.distances[self.current[i]][self.current[i+1]]
self.vintention += self.distances[self.intention[i]][self.intention[i+1]]
self.vcurrent += self.distances[0][self.current[0]]
self.vintention += self.distances[0][self.intention[0]]
self.vcurrent += self.distances[self.current[-1]][0]
self.vintention += self.distances[self.intention[-1]][0]
if self.vcurrent < self.vpbest:
self.vpbest, self.pbest = self.vcurrent, list(self.current)
def move (self, gbest : List[int], vgbest : int) -> Tuple[List[int], int]:
"""
This method represents the movement of the particle that explores a new solution.
:param gbest: The global best of the whole swarm.
:param vgbest: The cost of the gbest.
:return: the personal best and its cost.
"""
# Reset the current -> !!! To remove if we want to consider it in the
# construction process.
self.current = []
# Initialize variables used in the construction process
nodes : Set[int] = set(self.picking_list)
c_node : int = 0
n_node : int
options : List[Tuple[int,float]]
# Construct node-by-node a new solution
while len(nodes) > 0:
options = []
if c_node == 0:
options = [(self.intention[0], 1.0 - self.greediness),
(self.greedy[0], self.greediness),
(self.pbest[0], 1.0),
(gbest[0], 1.0)
]
else:
options = [(sol[sol.index(c_node) + 1], w)
for sol, w in ((self.intention, 1.0 - self.greediness), (self.greedy, self.greediness),(self.pbest, 1.0), (gbest, 1.0))
if sol.index(c_node) != len(sol) - 1 and sol[sol.index(c_node) + 1] in nodes]
if len(options) == 0:
n_node = random.choice(list(nodes))
elif len (options) == 1:
n_node = options[0][0]
else:
n_node = _bra (sorted(options, key=lambda i: self.distances[c_node][i[0]]/i[1]), self.beta)[0]
nodes.remove (n_node)
# Eventually include before the new node the nodes on the shortest path
# between the last visited node and the new one.
r = random.random()
if r < self.check_paths:
in_middle = [i for i in nodes if i in self.paths[c_node][n_node]]
while len(in_middle) > 0:
in_middle = sorted (in_middle, key=lambda i: self.distances[c_node][i])
c_node = in_middle.pop(0)
self.current.append (c_node)
nodes.remove (c_node)
# Add the new node to the solution
self.current.append (n_node)
c_node = n_node
# Update the number of solutions explored
self.explorations += 1
# Shuffle the intention
random.shuffle(self.intention)
# Update the personal best if needed, the cost of the current
# and the cost of the new intention
self.update_dist ()
# Eventually do a deepsearch
r = random.random()
if len(self.picking_list) > 3 and r < self.deepsearch:
r2 = random.random ()
if r2 < self.fulldeepsearch:
self.deep_search(list(self.current), full=True)
else:
self.deep_search(list(self.current), full=False)
if self.vcurrent < self.vpbest:
self.pbest, self.vpbest = list(self.current), self.vcurrent
return self.pbest, self.vpbest
def deep_search(self, lst : List[int], full : bool = False, starting_depth : int = 0) -> None:
"""
This method does a deepsearch via 2-Opt in the neighbourhood of the
current solution.
:param lst: The picking list.
:param full: If TRUE every time there is an improvement and the maximum depth has
not been reached the deepsearch goes on.
:param starting_depth: Used in case of full == TRUE to control the depth.
"""
edges = [(i,j) for i in range(0,len(lst)-2) for j in range(i+2,len(lst))]
random.shuffle(edges)
self.explorations += len(edges)
for i, j in edges:
sol = _two_opt (lst, i, j)
cost = _compute_distance (sol, self.distances)
if cost < self.vcurrent:
self.current, self.vcurrent = list(sol), cost
if full is True and starting_depth < self.max_depth:
starting_depth += 1
self.deep_search(sol, True, starting_depth)
class Mattia_PSO:
"""
An instance of this class represents the Particle Swarm Optimization published by <NAME>,
Zammori in 2021.
An Hibrid PSO for TSP
Solution is generated node by node
selecting from four possibilities, namely: current solution, particle best, overall best
and intention; the latter one is a random sequence.
Say that the generated sequence is 1-3-4 and the alternative are:
1-2-3-4-5; 5-4-3-2-1; 3-2-1-5-4; 5-4-1-2-3
so "suggested nodes" are: (5, 3, nan, 1), since 3 is already in, (5,1) remain
choice depends (in a probabilistic way on the corrected distance from 3 to 5 and to 1 to 5
the less the better. Distance is corrected with weigth used to give more importance
to the current solution, then to the best and so on.
This is the basic generation scheme. Solution may be shaked (using a first level or
deep level 2Opt Procedure)
"""
def __init__ (self,*,
distances : Dict[int, Dict[int,int]],
picking_list : List[int],
paths : Dict[int, Dict[int, Set[int]]],
era : int = 10_000,
particles : int = 40,
max_noimp : int = 1000,
print_every : int = 100,
finalsearch : bool = True,
particle_data : Dict[str, Union[int, float, Callable[[int], float], Dict[str,float], Tuple[float,float], List[int], List[List[int]]]]
) -> None:
"""
Initialize.
:param distances: The distance matrix.
:param era: The number of iterations.
:param particles: The number of particles.
:param max_noimp: The maximum number of iterations with no getting any improvement.
:param print_every: The number of iterations between a log and the next one.
:attr history: The history of the best solutions found by the algorithm.
:attr computations: The number of solutions explored before finding the best.
"""
self.era = era
self.max_noimp = max_noimp
self.print_every = print_every
self.finalsearch = finalsearch
particle_data["distances"] = distances
particle_data["picking_list"] = picking_list
particle_data["paths"] = paths
self.particle_data = particle_data
self.swarm : List[Particle] = [Particle(**particle_data) for _ in range(particles)]
self.history : List[int]
self.computations : int = 0
self.computational_time : float = 0.0
def reset(self):
particles = len(self.swarm)
self.swarm = [Particle(**self.particle_data) for _ in range(particles)]
self.history = []
self.computations = 0
def run (self, verbose : bool = False) -> Tuple[List[int], int]:
"""
This is the method to execute the algorithm.
It finally returns the best solution found and its cost.
:return: gbest, vgbest
"""
# Initialize starting time
start = time.time()
# Initilaize the best starting position
gbest : List[int]
vgbest : int = cast(int, float("inf"))
for particle in self.swarm:
if particle.vpbest < vgbest:
gbest, vgbest = list(particle.pbest), particle.vpbest
new_vgbest : int = vgbest
new_gbest : List[int] = list(gbest)
self.history = [vgbest]
# Iterations
noimp = 0
for i in range(self.era):
for particle in self.swarm:
pbest, vpbest = particle.move (gbest, vgbest)
if vpbest < new_vgbest:
new_gbest, new_vgbest = list(pbest), vpbest
if new_vgbest < vgbest:
gbest, vgbest = new_gbest, new_vgbest
noimp = 0
self.computations = sum(p.explorations for p in self.swarm)
else:
noimp += 1
if noimp > self.max_noimp:
break
self.history.append(vgbest)
if i % self.print_every == 0 and verbose is True:
print('Epoch', i, ' Best: ', vgbest)
# Final deepsearch
if self.finalsearch is True:
for particle in self.swarm:
particle.deep_search (list(particle.current), True, 0)
if particle.vcurrent < particle.vpbest:
particle.pbest, particle.vpbest = list(particle.current), particle.vcurrent
if particle.vpbest < vgbest:
gbest, vgbest = list(particle.current), particle.vcurrent
self.computations = sum(p.explorations for p in self.swarm)
# Set computational time
self.computational_time = time.time() - start
return gbest, vgbest
| 3.78125
| 4
|
simplepipreqs/simplepipreqs.py
|
Atharva-Gundawar/simplepipreqs
| 1
|
12778163
|
<filename>simplepipreqs/simplepipreqs.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-import os
from pathlib import Path
import subprocess
from yarg import json2package
from yarg.exceptions import HTTPError
import requests
import argparse
import os
import sys
import json
import threading
import itertools
import time
try:
from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
def get_installed_packages(pip_version: str = "pip"):
installed_with_versions = []
installed = []
stdout, stderr = subprocess.Popen(
[pip_version, "freeze"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
for i in stdout.splitlines():
installed_with_versions.append(i.decode("utf-8"))
installed.append(i.decode("utf-8").split('==')[0])
return installed_with_versions, installed
def get_version_info(module: str, pypi_server: str = "https://pypi.python.org/pypi/", proxy=None):
try:
response = requests.get(
"{0}{1}/json".format(pypi_server, module), proxies=proxy)
if response.status_code == 200:
if hasattr(response.content, 'decode'):
data = json2package(response.content.decode())
else:
data = json2package(response.content)
elif response.status_code >= 300:
raise HTTPError(status_code=response.status_code,
reason=response.reason)
except HTTPError:
return None
return str(module) + '==' + str(data.latest_release_id)
def get_project_imports(directory: str = os.curdir):
modules = []
for path, subdirs, files in os.walk(directory):
for name in files:
if name.endswith('.py'):
# print(path)
with open(os.path.join(path, name)) as f:
contents = f.readlines()
for lines in contents:
words = lines.split(' ')
if 'import' == words[0] or 'from' == words[0]:
line_module = words[1].split('.')[0].split(',')
for module in line_module:
module = module.split('\n')[0]
if module and module not in modules:
modules.append(module)
# print('found {} in {}'.format(module,name))
elif name.endswith('.ipynb'):
with open(str(Path(os.path.join(path, name)).absolute())) as f:
contents = f.readlines()
listToStr = ' '.join([str(elem) for elem in contents])
contents = json.loads(listToStr)
# contents = json.loads(Path(os.path.join(path, name)).absolute().read_text())
for cell in contents["cells"]:
for line in cell["source"]:
words = line.split(' ')
if 'import' == words[0] or 'from' == words[0]:
line_module = words[1].split('.')[0].split(',')
for module in line_module:
module = module.split('\n')[0]
if module and module not in modules:
modules.append(module)
# print('found {} in {}'.format(module, name))
return modules
def init(args):
done_imports = False
def animate_imports():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done_imports:
break
print('Getting imports ' + c, end="\r")
sys.stdout.flush()
time.sleep(0.1)
t_imports = threading.Thread(target=animate_imports)
print()
t_imports.start()
output_text = []
modules = get_project_imports(
) if args['path'] is None else get_project_imports(args['path'])
installed_with_versions, installed = get_installed_packages(
"pip3") if args['version'] is None else get_installed_packages(args['version'])
done_imports = True
time.sleep(0.2)
done_versions = False
def animate_versions():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done_versions:
print("\033[A \033[A")
break
print('Getting versions ' + c, end="\r")
sys.stdout.flush()
time.sleep(0.1)
t_versions = threading.Thread(target=animate_versions)
t_versions.start()
for mod in modules:
if mod in installed:
mod_info = get_version_info(mod)
if mod_info:
output_text.append(mod_info)
done_versions = True
time.sleep(0.2)
print('\nGenrating requirements.txt ... ')
if args['path']:
with open(args['path'] + "/requirements.txt", 'w') as f:
f.write("\n".join(map(str, list(set(output_text)))))
print("Successfuly created/updated requirements.txt")
else:
with open("requirements.txt", 'w') as f:
f.write("\n".join(map(str, list(set(output_text)))))
print("Successfuly created/updated requirements.txt")
print()
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--version", type=str, help="Pip version")
ap.add_argument("-p", "--path", type=str, help="Path to target directory")
args = vars(ap.parse_args())
try:
init(args)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == '__main__':
main()
| 2.390625
| 2
|
nicos_virt_mlz/treff/devices/detector.py
|
ebadkamil/nicos
| 12
|
12778164
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""VTREFF detector image based on McSTAS simulation."""
from nicos.core import Attach, Override, Readable
from nicos.devices.generic import Slit
from nicos.devices.mcstas import McStasSimulation as BaseSimulation
from nicos_mlz.treff.devices import MirrorSample
class McStasSimulation(BaseSimulation):
parameter_overrides = {
'mcstasprog': Override(default='treff_fast'),
}
attached_devices = {
'sample': Attach('Mirror sample', MirrorSample),
's1': Attach('Slit 1', Slit),
's2': Attach('Slit 2', Slit),
'sample_x': Attach('Sample position x', Readable),
'sample_y': Attach('Sample position y', Readable),
'sample_z': Attach('Sample position z', Readable),
'beamstop': Attach('Beam stop positon', Readable),
'omega': Attach('Sample omega rotation', Readable),
'chi': Attach('Sample chi rotation', Readable),
'phi': Attach('Sample phi rotation', Readable),
'detarm': Attach('Position detector arm', Readable),
}
def _prepare_params(self):
params = []
sample = self._attached_sample
params.append('s1_width=%s' % self._attached_s1.width.read(0))
params.append('s1_height=%s' % self._attached_s1.height.read(0))
params.append('s2_width=%s' % self._attached_s2.width.read(0))
params.append('s2_height=%s' % self._attached_s2.height.read(0))
params.append('sample_x=%s' % self._attached_sample_x.read(0))
sample_y = self._attached_sample_y
params.append('sample_y=%s' % (sample_y.read(0) + sample_y.offset +
sample._misalignments['sample_y']))
params.append('sample_z=%s' % self._attached_sample_z.read(0))
params.append('beamstop_pos=%s' % self._attached_beamstop.read(0))
omega = self._attached_omega
params.append('omega=%s' % (
omega.read(0) + omega.offset + sample._misalignments['omega']))
chi = self._attached_chi
params.append('chi=%s' % (
chi.read(0) + chi.offset + sample._misalignments['chi']))
params.append('phi=%s' % self._attached_phi.read(0))
detarm = self._attached_detarm
params.append('detarm=%s' % (
detarm.read(0) + detarm.offset + sample._misalignments['detarm']))
params.append('mirror_length=%s' % self._attached_sample.length)
params.append('mirror_thickness=%s' % self._attached_sample.thickness)
params.append('mirror_height=%s' % self._attached_sample.height)
params.append('mirror_m=%s' % self._attached_sample.m)
params.append('mirror_alfa=%s' % self._attached_sample.alfa)
params.append('mirror_wav=%s' % self._attached_sample.waviness)
if self._attached_sample.rflfile:
params.append('rflfile=%s' %
self._attached_sample.getReflectivityFile())
else:
params.append('rflfile=0')
return params
| 1.898438
| 2
|
tests/zq_crawler/test_yahoo.py
|
feng-zhe/ZheQuant-brain-python
| 2
|
12778165
|
<gh_stars>1-10
'''
Unit Tests for yahoo.py
'''
import unittest
import json
import random
from datetime import datetime
from datetime import timedelta
import pytz
from zq_crawler.yahoo import *
# Unit test class
class TestYahooCrawler(unittest.TestCase):
'''
Test case for yahoo crawler
'''
# test response string
_rsp_str = '{"chart":{"result":[{"meta":{"currency":"CNY","symbol":"600497.SS",\
"exchangeName":"SHH","instrumentType":"EQUITY","firstTradeDate":1082424600,\
"gmtoffset":28800,"timezone":"CST","exchangeTimezoneName":"Asia/Shanghai",\
"currentTradingPeriod":{"pre":{"timezone":"CST","end":1511746200,"start":1511746200,\
"gmtoffset":28800},"regular":{"timezone":"CST","end":1511766000,"start":1511746200,\
"gmtoffset":28800},"post":{"timezone":"CST","end":1511766000,"start":1511766000,\
"gmtoffset":28800}},"dataGranularity":"1d","validRanges":["1d","5d","1mo","3mo",\
"6mo","1y","2y","5y","10y","ytd","max"]},"timestamp":[1510709400,1510795800,\
1510882200,1511141400,1511227800,1511314200,1511400600,1511487000,1511746200],\
"indicators":{"quote":[{"low":[null,6.489999771118164,6.099999904632568,6.03000020980835,\
6.119999885559082,6.170000076293945,6.230000019073486,6.190000057220459,6.449999809265137],\
"volume":[null,34039227,53016969,28656684,39235021,41324595,63648648,52108224,54005417],\
"close":[null,6.510000228881836,6.199999809265137,6.239999771118164,6.179999828338623,\
6.269999980926514,6.309999942779541,6.5,6.510000228881836],"open":[null,6.650000095367432,\
6.46999979019165,6.199999809265137,6.210000038146973,6.269999980926514,6.289999961853027,\
6.25,6.489999771118164],"high":[null,6.679999828338623,6.539999961853027,6.260000228881836,\
6.260000228881836,6.28000020980835,6.480000019073486,6.519999980926514,6.670000076293945]}],\
"unadjclose":[{"unadjclose":[null,6.510000228881836,6.199999809265137,6.239999771118164,\
6.179999828338623,6.269999980926514,6.309999942779541,6.5,6.510000228881836]}],\
"adjclose":[{"adjclose":[null,6.510000228881836,6.199999809265137,6.239999771118164,\
6.179999828338623,6.269999980926514,6.309999942779541,6.5,6.510000228881836]}]}}],\
"error":null}}'
def test_validate_response(self):
'''
Test response validation
'''
self.assertTrue(validate_response(self._rsp_str))
def test_extract_stock_data(self):
'''
Test extracting data from response
'''
act_docs = extract_stock_data(self._rsp_str)
tzinfo = pytz.timezone('Asia/Shanghai')
exp_docs = [
{
'code' : '600497.SS',
# set to close time because we use close time
'date' : datetime.fromtimestamp(1510795800, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 34039227,
'open' : 6.65,
'close' : 6.51,
'low' : 6.49,
'high' : 6.68
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1510882200, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 53016969,
'open' : 6.47,
'close' : 6.20,
'low' : 6.10,
'high' : 6.54
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1511141400, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 28656684,
'open' : 6.20,
'close' : 6.24,
'low' : 6.03,
'high' : 6.26
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1511227800, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 39235021,
'open' : 6.21,
'close' : 6.18,
'low' : 6.12,
'high' : 6.26
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1511314200, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 41324595,
'open' : 6.27,
'close' : 6.27,
'low' : 6.17,
'high' : 6.28
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1511400600, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 63648648,
'open' : 6.29,
'close' : 6.31,
'low' : 6.23,
'high' : 6.48
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1511487000, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 52108224,
'open' : 6.25,
'close' : 6.50,
'low' : 6.19,
'high' : 6.52
},
{
'code' : '600497.SS',
'date' : datetime.fromtimestamp(1511746200, tz=tzinfo)\
.replace(hour=15, minute=0, second=0, microsecond=0),
'volume' : 54005417,
'open' : 6.49,
'close' : 6.51,
'low' : 6.45,
'high' : 6.67
}
]
self.assertEqual(act_docs, exp_docs)
def test_request_data(self):
'''
Test request data from internet.
The internet connection must be correct. So as the response format.
'''
tzinfo = pytz.timezone('Asia/Shanghai')
start = datetime(2017, 11, 18, tzinfo=tzinfo)
end = datetime(2017, 11, 22, tzinfo=tzinfo)
rsp = request_data(start, end, '600497.SS')
self.assertIsNotNone(rsp)
self.assertTrue(validate_response(rsp))
def test_one_attempt(self):
'''
Go through one attempt to crawl data (without database updating)
Aassuming there is no 15-day continuous vacation
Otherwise the test may fail
'''
tzinfo = pytz.timezone('Asia/Shanghai')
month = random.randint(1, 11)
day = random.randint(1, 20)
start = datetime(2017, month, day, tzinfo=tzinfo)
end = start + timedelta(days=15)
rsp = request_data(start, end, '600497.SS')
self.assertIsNotNone(rsp)
self.assertTrue(validate_response(rsp))
docs = extract_stock_data(rsp)
self.assertNotEqual(len(docs), 0)
if __name__ == '__main__':
unittest.main()
| 2.65625
| 3
|
squarelet_auth/mixins.py
|
MuckRock/squarelet-auth
| 0
|
12778166
|
# Django
from django.contrib.auth import login
# Third Party
import requests
# SquareletAuth
from squarelet_auth.users.utils import squarelet_update_or_create
from squarelet_auth.utils import squarelet_post
class MiniregMixin:
"""A mixin to expose miniregister functionality to a view"""
minireg_source = "Default"
field_map = {}
def _create_squarelet_user(self, form, data):
"""Create a corresponding user on squarelet"""
generic_error = (
"Sorry, something went wrong with the user service. "
"Please try again later"
)
try:
resp = squarelet_post("/api/users/", data=data)
except requests.exceptions.RequestException:
form.add_error(None, generic_error)
raise
if resp.status_code / 100 != 2:
try:
error_json = resp.json()
except ValueError:
form.add_error(None, generic_error)
else:
for field, errors in error_json.iteritems():
for error in errors:
form.add_error(self.field_map.get(field, field), error)
finally:
resp.raise_for_status()
return resp.json()
def miniregister(self, form, full_name, email):
"""Create a new user from their full name and email"""
full_name = full_name.strip()
user_json = self._create_squarelet_user(
form, {"name": full_name, "preferred_username": full_name, "email": email}
)
user, _ = squarelet_update_or_create(user_json["uuid"], user_json)
login(self.request, user, backend="squarelet_auth.backends.SquareletBackend")
return user
| 2.265625
| 2
|
yasha/constants.py
|
alextremblay/yasha
| 0
|
12778167
|
ENCODING = 'utf-8'
EXTENSION_FILE_FORMATS = ('.py', '.yasha', '.j2ext', '.jinja-ext')
| 1.140625
| 1
|
opts.py
|
YBZh/Label-Propagation-with-Augmented-Anchors
| 18
|
12778168
|
import argparse
def opts():
parser = argparse.ArgumentParser(description='Train alexnet on the cub200 dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data_path_source', type=str, default='',
help='Root of train data set of the source domain')
parser.add_argument('--data_path_source_t', type=str, default='',
help='Root of train data set of the target domain')
parser.add_argument('--data_path_target', type=str, default='',
help='Root of the test data set')
parser.add_argument('--src', type=str, default='amazon',
help='choose between amazon | dslr | webcam')
parser.add_argument('--src_t', type=str, default='webcam',
help='choose between amazon | dslr | webcam')
parser.add_argument('--tar', type=str, default='webcam',
help='choose between amazon | dslr | webcam')
parser.add_argument('--num_classes', type=int, default=31,
help='number of classes of data used to fine-tune the pre-trained model')
# Optimization options
parser.add_argument('--epochs', '-e', type=int, default=200, help='Number of epochs to train')
parser.add_argument('--batch_size', type=int, default=64, help='Batch size of the source data.')
parser.add_argument('--lr', '--learning_rate', type=float, default=0.01, help='The Learning Rate.')
parser.add_argument('--lrw', type=float, default=1.0, help='The Learning Rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--weight_decay', '-wd', type=float, default=0.0001, help='Weight decay (L2 penalty).')
parser.add_argument('--schedule', type=str, default='rev', help='rev | constant')
parser.add_argument('--gamma', type=float, default=0.75, help='2.25 (visda) and 0.75 (others).')
# checkpoints
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--resume', type=str, default='', help='Checkpoints path to resume(default none)')
parser.add_argument('--pretrained_checkpoint', type=str, default='', help='Pretrained checkpoint to resume (default none)')
parser.add_argument('--test_only', '-t', action='store_true', help='Test only flag')
#### graph
parser.add_argument('--dis_gra', type=str, default='l2', help='dis for graph')
parser.add_argument('--cor', type=float, default=1.0, help='cor in the computation of l2 distance')
parser.add_argument('--TopkGraph', action='store_true', help='full graph 2 topk graph')
parser.add_argument('--graphk', type=int, default=10, help='KNN grapg')
parser.add_argument('--AlphaGraph', type=float, default=0.5, help='level for propagation.')
parser.add_argument('--noise_level', type=float, default=0.1, help='cor in the computation of l2 distance')
parser.add_argument('--noise_flag', action='store_true', help='full graph 2 topk graph')
# Architecture
parser.add_argument('--arch', type=str, default='resnet101', help='Model name')
parser.add_argument('--img_process_t', type=str, default='simple', help='Model name')
parser.add_argument('--img_process_s', type=str, default='simple', help='Model name')
parser.add_argument('--flag', type=str, default='original', help='flag for different settings')
parser.add_argument('--type', type=str, default='type1', help='type1 | type2 | type3')
parser.add_argument('--dis', type=str, default='cross_entropy', help='cross_entropy | kl | l1')
parser.add_argument('--pretrained', action='store_true', help='whether using pretrained model')
parser.add_argument('--per_category', type=int, default=4, help='number of domains')
parser.add_argument('--fea_dim', type=int, default=2048, help='feature dim')
parser.add_argument('--uniform_type_s', type=str, default='soft', help='hard | soft | none')
parser.add_argument('--uniform_type_t', type=str, default='soft', help='hard | soft | none')
parser.add_argument('--dsbn', action='store_true', help='whether use domain specific bn')
parser.add_argument('--fixbn', action='store_true', help='whether fix the ImageNet pretrained BN layer')
parser.add_argument('--OurMec', action='store_true', help='whether use our cross entropy style MEC | original mec')
parser.add_argument('--OurPseudo', action='store_true', help='whether use cluster label for cross entropy directly | tangs')
parser.add_argument('--category_mean', action='store_true', help='Only True for visda, acc calculated over categories')
parser.add_argument('--clufrq_dec', action='store_true', help='whether decrease the cluster freq.')
parser.add_argument('--threed', action='store_true', help='ori + aug + grey | ori + grey.')
parser.add_argument('--only_lrw', action='store_true', help='lrw weight | lamda')
parser.add_argument('--niter', type=int, default=500, help='iteration of clustering')
parser.add_argument('--pseudo_type', type=str, default='cluster', help='cluster (spherical_kmeans cluster) or lp (label propagation)')
parser.add_argument('--l2_process', action='store_true', help='')
parser.add_argument('--spherical_kmeans', action='store_true', help='')
parser.add_argument('--entropy_weight', action='store_true', help='whether adopt the prediction entropy of LP prediction as weight')
parser.add_argument('--S4LP', type=str, default='all', help='all | cluster | center')
parser.add_argument('--LPSolver', type=str, default='Itera', help='Itera | CloseF')
parser.add_argument('--LPType', type=str, default='lgc', help='lgc | hmn | parw | omni')
parser.add_argument('--alpha', type=float, default=0.99, help='hyper-parameter.')
parser.add_argument('--lamb', type=float, default=1.0, help='hyper-parameter')
parser.add_argument('--NC4LP', type=int, default=3, help='number of clusters for each category in clustering')
parser.add_argument('--LPIterNum', type=int, default=15, help='number of clusters for each category in clustering')
parser.add_argument('--LPIterationType', type=str, default='add', help='replace | add')
parser.add_argument('--min_num_cate', type=int, default=3, help='lowest number of image in each class')
parser.add_argument('--filter_low', action='store_true', help='filter the samples with low prediction confidence')
parser.add_argument('--cos_threshold', type=float, default=0.05, help='hyper-parameter.')
parser.add_argument('--weight_type', type=str, default='cas_ins', help='replace | add')
parser.add_argument('--graph_gama', type=int, default=1, help='for graph construction, follow manifold-based search')
parser.add_argument('--dis_margin', type=float, default=1.0, help='hyper-parameter.')
parser.add_argument('--moving_weight', type=float, default=0.7, help='hyper-parameter.')
# i/o
parser.add_argument('--log', type=str, default='./checkpoints', help='Log folder')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--test_freq', default=10, type=int,
help='test frequency (default: 1)')
parser.add_argument('--cluster_freq', default=1, type=int,
help='clustering frequency (default: 1)')
parser.add_argument('--print_freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--score_frep', default=300, type=int,
metavar='N', help='print frequency (default: 300, not download score)')
args = parser.parse_args()
args.data_path_source_t = args.data_path_source
args.data_path_target = args.data_path_source
args.src_t = args.tar
args.log = args.log + '_' + args.src + '2' + args.tar + '_' + args.arch + '_' + args.flag + '_' + args.type + '_' + \
args.dis + '_' + args.uniform_type_s + '_' + args.pseudo_type + str(args.lrw) + '_' + str(args.cos_threshold) + args.dis_gra
return args
| 2.734375
| 3
|
rcsb/workflow/targets/ProteinTargetSequenceExecutionWorkflow.py
|
rcsb/py-rcsb_workflow
| 0
|
12778169
|
<filename>rcsb/workflow/targets/ProteinTargetSequenceExecutionWorkflow.py
##
# File: ProteinTargetSequenceExecutionWorkflow.py
# Author: <NAME>
# Date: 25-Jun-2021
#
# Updates:
#
##
"""
Execution workflow for protein target data ETL operations.
"""
__docformat__ = "google en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import logging
import os
import platform
import resource
import time
from rcsb.utils.taxonomy.TaxonomyProvider import TaxonomyProvider
from rcsb.workflow.targets.ProteinTargetSequenceWorkflow import ProteinTargetSequenceWorkflow
from rcsb.utils.config.ConfigUtil import ConfigUtil
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
HERE = os.path.abspath(os.path.dirname(__file__))
class ProteinTargetSequenceExecutionWorkflow(object):
def __init__(self):
self.__mockTopPath = None
configPath = os.path.join(HERE, "exdb-config-example.yml")
configName = "site_info_remote_configuration"
self.__cfgOb = ConfigUtil(configPath=configPath, defaultSectionName=configName, mockTopPath=self.__mockTopPath)
self.__cachePath = os.path.join(HERE, "CACHE")
#
self.__remotePrefix = None
self.__startTime = time.time()
logger.info("Starting at %s", time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def resourceCheck(self):
unitS = "MB" if platform.system() == "Darwin" else "GB"
rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
endTime = time.time()
logger.info("Completed at %s (%.4f seconds)\n", time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def cacheTaxonomy(self):
"""Cache NCBI taxonomy database files"""
ok = False
try:
tU = TaxonomyProvider(cachePath=self.__cachePath, useCache=False, cleanup=False)
ok = tU.testCache()
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def fetchUniProtTaxonomy(self):
"""Reload UniProt taxonomy mapping"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.reloadUniProtTaxonomy()
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def updateUniProtTaxonomy(self):
"""Test case - initialize the UniProt taxonomy provider (from scratch ~3482 secs)"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.updateUniProtTaxonomy()
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def fetchProteinEntityData(self):
"""Export RCSB protein entity sequence FASTA, taxonomy, and sequence details"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.exportRCSBProteinEntityFasta()
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def fetchChemicalReferenceMappingData(self):
"""Export RCSB chemical reference identifier mapping details"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.exportRCSBChemRefMapping()
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def fetchLigandNeighborMappingData(self):
"""Export RCSB ligand neighbor mapping details"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.exportRCSBLigandNeighborMapping()
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def exportFasta(self):
"""Export FASTA target files (and load Pharos from source)"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.exportTargetsFasta(useCache=True, addTaxonomy=True, reloadPharos=True, fromDbPharos=True, resourceNameList=["sabdab", "card", "drugbank", "chembl", "pharos"])
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def createSearchDatabases(self):
"""Create search databases"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.createSearchDatabases(resourceNameList=["sabdab", "card", "drugbank", "chembl", "pharos", "pdbprent"], addTaxonomy=True, timeOutSeconds=3600, verbose=False)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def searchDatabases(self):
"""Search sequence databases"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok1 = ptsW.search(
referenceResourceName="pdbprent", resourceNameList=["sabdab", "card", "drugbank", "chembl", "pharos"], identityCutoff=0.95, sensitivity=4.5, timeOutSeconds=1000
)
ok2 = ptsW.search(referenceResourceName="pdbprent", resourceNameList=["card"], identityCutoff=0.95, sensitivity=4.5, timeOutSeconds=1000, useBitScore=True)
ok = ok1 and ok2
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def buildFeatures(self):
"""Build features from search results"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.buildFeatureData(referenceResourceName="pdbprent", resourceNameList=["sabdab", "card", "imgt"], useTaxonomy=True, backup=True, remotePrefix=self.__remotePrefix)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def buildActivityData(self):
"""Build features from search results"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.buildActivityData(referenceResourceName="pdbprent", resourceNameList=["chembl", "pharos"], backup=True, remotePrefix=self.__remotePrefix)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def buildCofactorData(self):
"""Build features from search results"""
ok = False
try:
ptsW = ProteinTargetSequenceWorkflow(self.__cfgOb, self.__cachePath)
ok = ptsW.buildCofactorData(referenceResourceName="pdbprent", resourceNameList=["chembl", "pharos", "drugbank"], backup=True, remotePrefix=self.__remotePrefix)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
#
# --- --- --- ---
def fullWorkflow():
"""Entry point for the full targets sequence and cofactor update workflow."""
ptsWf = ProteinTargetSequenceExecutionWorkflow()
ok = True
ok = ptsWf.cacheTaxonomy()
ok = ptsWf.fetchUniProtTaxonomy()
ok = ptsWf.fetchProteinEntityData() and ok
ok = ptsWf.fetchChemicalReferenceMappingData() and ok
ok = ptsWf.fetchLigandNeighborMappingData() and ok
ok = ptsWf.exportFasta() and ok
ok = ptsWf.createSearchDatabases() and ok
ok = ptsWf.searchDatabases() and ok
ok = ptsWf.buildFeatures() and ok
ok = ptsWf.buildActivityData() and ok
ok = ptsWf.buildCofactorData() and ok
ptsWf.resourceCheck()
return ok
if __name__ == "__main__":
status = fullWorkflow()
print("Full workflow completion status (%r)", status)
| 2.046875
| 2
|
Module3_Data_for_ML/Linear_regression.py
|
EllieBrakoniecki/AICOREDATASCIENCE
| 0
|
12778170
|
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model, metrics, preprocessing
from sklearn.model_selection import train_test_split
import itertools
import typing
class LinearRegression():
def __init__(self, n_features, optimiser):
np.random.seed(2)
self.w = np.random.randn(n_features)
self.b = np.random.randn()
self.optimiser = optimiser
def fit(self, X, y):
'''
Fit model to data
'''
losses = []
for epoch in range(self.optimiser.epochs):
y_pred = self.predict(X)
new_w, new_b = self.optimiser.step(self.w, self.b, X, y_pred, y)
self._update_params(new_w, new_b)
losses.append(LinearRegression.mse_loss(y_pred, y))
LinearRegression.plot_loss(losses)
print('Final cost:', losses[-1])
print('Weight values:', self.w)
print('Bias values:', self.b)
def predict(self, X):
'''
Calculate prediction
'''
y_pred = np.dot(X, self.w) + self.b
return y_pred
@staticmethod
def mse_loss(y_pred, y_true):
'''
Calculate mean squared error
'''
m = y_pred.size
errors = y_pred - y_true
mse = 1/m * np.dot(errors.T, errors)
return mse
@staticmethod
def plot_loss(losses):
'''
Plot losses
'''
plt.figure()
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.plot(losses)
plt.show()
def _update_params(self, w, b):
'''
Update parameters
'''
self.w = w
self.b = b
return w, b
def score(self, y_pred, y_true):
'''
Calculate R2 score
'''
u = np.dot((y_pred - y_true).T, (y_pred - y_true))
y_true_mean = np.full(y_true.shape, np.mean(y_true))
v = np.dot((y_true_mean - y_true).T, (y_true_mean - y_true))
R2 = 1 - u/v
return R2
class SGDOptimiser:
def __init__(self, alpha, epochs):
self.alpha = alpha
self.epochs = epochs
def _calc_deriv(self, X, y_pred, y_true):
'''
Calculate derivate of mean square error(loss) with respect to parameters
'''
m = y_pred.size
errors = y_pred - y_true
dLdw = 2/m * np.sum(X.T * errors).T
print('dLdw',dLdw)
dLdb = 2/m * np.sum(errors)
print('dLdb',dLdb)
return dLdw, dLdb
def step(self, w, b, X, y_pred, y_true):
'''
Calculate updated paramters to decrease mean square error
'''
dLdw, dLdb = self._calc_deriv(X, y_pred, y_true)
new_w = w - self.alpha * dLdw
new_b = b - self.alpha * dLdb
return new_w, new_b
class DataLoader:
def __init__(self, X, y):
idx = np.random.permutation(X.shape[0])
self.X = X[idx]
self.y = y[idx]
def yield_data(self, n):
X_yield = self.X[0:n+1]
y_yield = self.y[0:n+1]
self.X = self.X[n+1:]
self.y = self.y[n+1:]
return X_yield, y_yield
def add_data(self, X_new, y_new):
self.X = np.append(X, X_new)
self.y = np.append(y, y_new)
#%%
np.random.seed(2)
X, y = datasets.fetch_california_housing(return_X_y=True)
scaler = preprocessing.StandardScaler()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.5)
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_val = scaler.transform(X_val)
np.random.seed(2)
epochs = 1000
a = 0.001
optimiser = SGDOptimiser(alpha=a, epochs=epochs)
model = LinearRegression(optimiser=optimiser, n_features=X_train.shape[1])
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
score = model.score(y_pred,y_train)
print(score)
# %%
# %%
| 3.3125
| 3
|
src/LetterFrequency.py
|
abench/spectrum_of_line_codes
| 0
|
12778171
|
<reponame>abench/spectrum_of_line_codes
import sys
def LetterFrequency(fname):
Freq=[]
for i in xrange(256):
Freq.append(0.0)
while True:
try:
b=fname.read(1)
# print b
# Freq[ord(b)]=Freq[ord(b)]+1
except EOFError:
break
#print b, ord(b)
if len(b)!=0:
Freq[ord(b)]=Freq[ord(b)]+1
else:
break
return Freq
def Sum(list):
s=0
for i in xrange(len(list)):
s=s+list[i]
return s
def PrintResult(list,sum,stream):
for i in xrange(len(list)):
print >> stream,i,'[',chr(i),']=',list[i]/sum
def main():
f=open(sys.argv[1])
Freq=LetterFrequency(f)
total=Sum(Freq)
PrintResult(Freq,total,sys.stdout)
return
if __name__=='__main__':
main()
| 2.65625
| 3
|
python-the-hard-way/27-memorizing-logic.py
|
Valka7a/python-playground
| 0
|
12778172
|
<filename>python-the-hard-way/27-memorizing-logic.py
# Exercise 27: Memorizing Logic
#The Truth Terms:
# and
# or
# not
# != (not equal)
# == (equal)
# >= (greater-than-equal)
# <= (less-than-equal)
# True
# False
# The Truth Tables
# NOT Table
#____________________________
#| NOT | TRUE? |
#----------------------------
#| not False | True |
#| not True | False |
#----------------------------
# OR Table AND Table
#____________________________ ________________________________
#| OR | TRUE? | | AND | TRUE? |
#---------------------------- ---------------------------------
#| True or False | True | | True and False | False |
#| True or True | True | | True and True | True |
#| False or True | True | | False and True | False |
#| False or False | False | | False and False | False |
#---------------------------- ---------------------------------
# NOT OR Table NOT AND Table
#____________________________________ _________________________________
#| NOT OR | TRUE? | | NOT AND | TRUE? |
#------------------------------------ ---------------------------------
#| not (True or False) | False | | not (True and False) | True |
#| not (True or True) | False | | not (True and True) | False |
#| not (False or True) | False | | not (False and True) | True |
#| not (False or False) | True | | not (False and False) | True |
#------------------------------------ ---------------------------------
# !=(Not Equal) Table ==(Equal) Table
#____________________ _____________________
#| != | TRUE? | | == | TRUE? |
#-------------------- ---------------------
#| 1 != 0 | True | | 1 == 0 | False |
#| 1 != 1 | False | | 1 == 1 | True |
#| 0 != 1 | True | | 0 == 1 | False |
#| 0 != 0 | False | | 0 == 0 | True |
#-------------------- ---------------------
| 4
| 4
|
app.py
|
edumoraisv/testegeekieo
| 0
|
12778173
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, redirect, render_template, request, url_for
import logging
from logging import Formatter, FileHandler
from forms import *
import os
from geekie_api_client import GeekieAPIClient
from geekie_oauth import OAuthClient
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object("config")
app.config["geekie_api_client"] = GeekieAPIClient(
shared_secret=app.config.get("GEEKIE_API_SHARED_SECRET"),
)
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route("/")
def home():
return render_template("pages/home.html")
@app.route("/who-am-i", methods=["POST"])
def who_am_i():
api_client = app.config.get("geekie_api_client")
remote_organization_id = api_client.who_am_i(request.form["organization_id"]).get(
"organization_id"
)
return redirect(url_for("show_organization", organization_id=remote_organization_id))
@app.route("/organizations/<organization_id>")
def show_organization(organization_id):
return render_template("pages/show_organization.html", organization_id=organization_id)
@app.route("/organizations/<organization_id>/members")
def list_organization_memberships(organization_id):
api_client = app.config.get("geekie_api_client")
api_response = api_client.get_all_memberships(organization_id)
memberships = api_response["results"]
oauth_params = {}
for membership in memberships:
oauth_client = OAuthClient(
shared_secret=app.config.get("GEEKIE_API_SHARED_SECRET"),
organization_id=organization_id,
user_id=membership["id"]
)
oauth_params[membership["id"]] = oauth_client.get_oauth_params()
return render_template(
"pages/members.html",
organization_id=organization_id,
memberships=memberships,
oauth_params=oauth_params,
)
@app.route("/organizations/<organization_id>/memberships", methods=["POST"])
def create_membership(organization_id):
api_client = app.config.get("geekie_api_client")
form_data = request.form
membership_data = {
"full_name": form_data["full_name"],
}
api_client.create_membership(
organization_id=organization_id,
membership_data=membership_data
)
return redirect(
url_for("list_organization_memberships", organization_id=organization_id)
)
@app.route("/organizations/<organization_id>/memberships/<membership_id>/edit", methods=["GET"])
def edit_membership(organization_id, membership_id):
api_client = app.config.get("geekie_api_client")
membership = api_client.get_membership(organization_id, membership_id)
return render_template(
"pages/edit_member.html",
organization_id=organization_id,
membership_id=membership_id,
membership=membership,
)
@app.route("/organizations/<organization_id>/memberships/<membership_id>", methods=["POST"])
def update_membership(organization_id, membership_id):
api_client = app.config.get("geekie_api_client")
form_data = request.form
membership_data = {
"content_group_ids": [],
"full_name": form_data["full_name"],
"roles": form_data["roles"].split(", "),
"tags": form_data["tags"].split(", "),
"deleted": form_data.get("deleted", "false"),
"external_id": form_data.get("external_id", ""),
}
api_client.update_membership(
organization_id=organization_id,
membership_id=membership_id,
membership_data=membership_data,
)
return redirect(
url_for("list_organization_memberships", organization_id=organization_id)
)
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template("errors/500.html"), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template("errors/404.html"), 404
if not app.debug:
file_handler = FileHandler("error.log")
file_handler.setFormatter(
Formatter("%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]")
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info("errors")
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == "__main__":
app.run()
# Or specify port manually:
"""
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host="0.0.0.0", port=port)
"""
| 2.0625
| 2
|
spam/forms.py
|
iamsushanth/sms-spam-detector
| 1
|
12778174
|
<reponame>iamsushanth/sms-spam-detector
from django import forms
class SearchForm(forms.Form):
q = forms.CharField(label='',widget=forms.Textarea(
attrs={
'class':'search-query form-control',
'placeholder':'Search'
}
))
| 2.234375
| 2
|
bin/train_word_vectors.py
|
ivigamberdiev/spaCy
| 12
|
12778175
|
<reponame>ivigamberdiev/spaCy
#!/usr/bin/env python
from __future__ import print_function, unicode_literals, division
import logging
from pathlib import Path
from collections import defaultdict
from gensim.models import Word2Vec
from preshed.counter import PreshCounter
import plac
import spacy
logger = logging.getLogger(__name__)
class Corpus(object):
def __init__(self, directory, min_freq=10):
self.directory = directory
self.counts = PreshCounter()
self.strings = {}
self.min_freq = min_freq
def count_doc(self, doc):
# Get counts for this document
for word in doc:
self.counts.inc(word.orth, 1)
return len(doc)
def __iter__(self):
for text_loc in iter_dir(self.directory):
with text_loc.open("r", encoding="utf-8") as file_:
text = file_.read()
yield text
def iter_dir(loc):
dir_path = Path(loc)
for fn_path in dir_path.iterdir():
if fn_path.is_dir():
for sub_path in fn_path.iterdir():
yield sub_path
else:
yield fn_path
@plac.annotations(
lang=("ISO language code"),
in_dir=("Location of input directory"),
out_loc=("Location of output file"),
n_workers=("Number of workers", "option", "n", int),
size=("Dimension of the word vectors", "option", "d", int),
window=("Context window size", "option", "w", int),
min_count=("Min count", "option", "m", int),
negative=("Number of negative samples", "option", "g", int),
nr_iter=("Number of iterations", "option", "i", int),
)
def main(
lang,
in_dir,
out_loc,
negative=5,
n_workers=4,
window=5,
size=128,
min_count=10,
nr_iter=2,
):
logging.basicConfig(
format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
)
model = Word2Vec(
size=size,
window=window,
min_count=min_count,
workers=n_workers,
sample=1e-5,
negative=negative,
)
nlp = spacy.blank(lang)
corpus = Corpus(in_dir)
total_words = 0
total_sents = 0
for text_no, text_loc in enumerate(iter_dir(corpus.directory)):
with text_loc.open("r", encoding="utf-8") as file_:
text = file_.read()
total_sents += text.count("\n")
doc = nlp(text)
total_words += corpus.count_doc(doc)
logger.info(
"PROGRESS: at batch #%i, processed %i words, keeping %i word types",
text_no,
total_words,
len(corpus.strings),
)
model.corpus_count = total_sents
model.raw_vocab = defaultdict(int)
for orth, freq in corpus.counts:
if freq >= min_count:
model.raw_vocab[nlp.vocab.strings[orth]] = freq
model.scale_vocab()
model.finalize_vocab()
model.iter = nr_iter
model.train(corpus)
model.save(out_loc)
if __name__ == "__main__":
plac.call(main)
| 2.5
| 2
|
chapter_05/example_0001.py
|
yuchen352416/leetcode-example
| 0
|
12778176
|
<reponame>yuchen352416/leetcode-example
#!/usr/bin/python3
from typing import List
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
description: 合并两个有序数组
type nums1: List[int]
type m: int
type nums2: List[int]
type n: int
rtype: None
"""
if len(nums1) == 0:
for x in nums2[:n]:
nums1.append(x)
return
if len(nums2) == 0:
tempNums1 = nums1[:m]
nums1.clear()
for x in tempNums1:
nums1.append(x)
return
tempNums1 = nums1.copy()
i = 0
j = 0
nums1.clear()
while i < m or j < n:
if i < m and (j == n or tempNums1[i] < nums2[j]):
nums1.append(tempNums1[i])
i += 1
elif j < n:
nums1.append(nums2[j])
j += 1
# 大神写的(这tm就看不懂了)
# nums1[m:m + n] = nums2
# nums1.sort()
if __name__ == '__main__':
# 简单的问候一下世界
nums1 = [2, 0]
nums2 = [1]
m = 1
n = 1
Solution().merge(nums1, m, nums2, n)
print(nums1)
| 3.796875
| 4
|
core/base_page.py
|
zoltancsontos/pystack-framework
| 0
|
12778177
|
import os
from falcon import falcon
from settings.settings import SETTINGS
from chameleon import PageTemplateLoader
class BasePage(object):
"""
Generic base page object
"""
model = None
property_types = []
default_404 = SETTINGS['VIEWS']['DEFAULT_404_TEMPLATE']
templates_dir = 'templates/'
template = 'index.html'
data = {}
allowed_methods = ['GET']
group_access = SETTINGS['PERMISSIONS']['GROUPS']
def load_templates(self, base_dir=None):
"""
Loads the specified templates
Args:
base_dir: string|None
Returns:
"""
base_dir_path = base_dir if base_dir else self.templates_dir
app_path = os.path.abspath(base_dir_path)
return PageTemplateLoader(app_path)
def get_data(self, req):
"""
Method to override for data retrieval
Args:
req: object
Returns: mixed
"""
return self.data
def __forbidden_handler__(self, req, resp):
"""
Default forbidden case handler.
Explanation: As this is the BasePage super class
anything except GET should be forbidden you should use
BaseResource instead of page and create a proper REST api
Args:
req:
resp:
Returns:
"""
templates = self.load_templates(base_dir="/templates")
template = templates[self.default_404]
resp.status = falcon.HTTP_404
resp.content_type = "text/html"
data = {
'req': req
}
resp.body = (template(data=data))
def on_get(self, req, resp):
"""
Default HTTP GET method definition
Args:
req: object
resp: object
Returns:
"""
data = self.get_data(req)
templates = self.load_templates()
try:
template = templates[self.template]
except ValueError as val:
self.__forbidden_handler__(req, resp)
resp.status = falcon.HTTP_200
resp.content_type = "text/html"
resp.body = (template(data=data))
def on_post(self, req, resp):
"""
Default POST http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
def on_put(self, req, resp):
"""
Default PUT http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
def on_delete(self, req, resp):
"""
Default DELETE http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
def on_patch(self, req, resp):
"""
Default PATCH http method handler
Args:
req:
resp:
Returns:
"""
self.__forbidden_handler__(req, resp)
| 2.34375
| 2
|
src/load_predicate_embedding.py
|
heindorf/www19-fair-classification
| 4
|
12778178
|
<reponame>heindorf/www19-fair-classification
# -----------------------------------------------------------------------------
# WWW 2019 Debiasing Vandalism Detection Models at Wikidata
#
# Copyright (c) 2019 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import collections
import logging
import numpy as np
import pandas as pd
from scipy.sparse import vstack
from sklearn.feature_selection import SelectKBest
from sklearn.preprocessing import Binarizer
from load_csr_matrix import load_csr_matrix
from transformers import FrequencyTransformer
FILE_ITEM_PREDICATES = '../../data/item-properties/item-properties.bz2'
PATH_FEATURES = '../../data/features/'
PATH_TRAIN = PATH_FEATURES + 'training/' # noqa
PATH_VAL = PATH_FEATURES + 'validation/' # noqa
PATH_TEST = PATH_FEATURES + 'test/' # noqa
def load_matrices():
matrices = collections.OrderedDict()
path = PATH_TRAIN + 'embeddings/'
matrices['X_S_train'] = path + 'subjectOut'
matrices['X_P_train'] = path + 'predicate'
matrices['X_O_train'] = path + 'objectIn'
matrices['X_OO_train'] = path + 'objectOut'
path = PATH_VAL + 'embeddings/'
matrices['X_S_val'] = path + 'subjectOut'
matrices['X_P_val'] = path + 'predicate'
matrices['X_O_val'] = path + 'objectIn'
matrices['X_OO_val'] = path + 'objectOut'
path = PATH_TEST + 'embeddings/'
matrices['X_S_test'] = path + 'subjectOut'
matrices['X_P_test'] = path + 'predicate'
matrices['X_O_test'] = path + 'objectIn'
matrices['X_OO_test'] = path + 'objectOut'
for key, X in matrices.items():
logging.debug('load {}...'.format(key))
matrices[key] = load_csr_matrix(X)
meta = collections.OrderedDict()
meta['n_train'] = matrices['X_O_train'].shape[0]
meta['n_val'] = matrices['X_O_val'].shape[0]
meta['n_test'] = matrices['X_O_test'].shape[0]
data = collections.OrderedDict()
data['X_S_all'] = vstack([
matrices['X_S_train'],
matrices['X_S_val'],
matrices['X_S_test']
])
data['X_P_all'] = vstack([
matrices['X_P_train'],
matrices['X_P_val'],
matrices['X_P_test']
])
data['X_O_all'] = vstack([
matrices['X_O_train'],
matrices['X_O_val'],
matrices['X_O_test']
])
data['X_OO_all'] = vstack([
matrices['X_OO_train'],
matrices['X_OO_val'],
matrices['X_OO_test']
])
meta['X_S_all'] = np.array(
['S' + str(p) for p in range(data['X_S_all'].shape[1])])
meta['X_P_all'] = np.array(
['P' + str(p) for p in range(data['X_P_all'].shape[1])])
meta['X_O_all'] = np.array(
['O' + str(p) for p in range(data['X_O_all'].shape[1])])
meta['X_OO_all'] = np.array(
['OO' + str(p) for p in range(data['X_OO_all'].shape[1])])
return data, meta
def binarize_features(data):
encoder = Binarizer(threshold=0.5, copy=False)
for key, X in data.items():
data[key] = encoder.fit_transform(X)
def select_item_predicates_at_end_of_training_set(data, meta):
item_predicates = pd.read_csv(FILE_ITEM_PREDICATES, header=None)
item_predicates = item_predicates.values.flatten()
def _remove_attribute_predicates_from_X(X):
# mask = np.zeros((1, data['X_object_pred_all'].shape[1]))
# mask[0, item_predicates] = 1
# return X.multiply(mask).tocsr()
return X.tocsc()[:, item_predicates].tocsr()
for key, X in data.items():
logging.debug(key)
data[key] = _remove_attribute_predicates_from_X(X)
meta[key] = meta[key][item_predicates]
def count_nonzero(X, _):
return np.asarray((X != 0).sum(axis=0)).ravel()
def select_features(
data, meta, y, slice_fit, score_func=count_nonzero, k=100):
if y is None:
rand_X = next(iter(data.values()))
y = np.zeros(rand_X[slice_fit].shape[0])
logging.debug(slice_fit)
for key in data:
logging.debug(data[key].shape)
selector = SelectKBest(score_func=score_func, k=k)
selector = selector.fit(data[key][slice_fit], y[slice_fit])
data[key] = selector.transform(data[key])
meta[key] = meta[key][selector.get_support()]
def frequency_encoding(data, slice_fit):
# slice_fit = slice(0, n_train + n_val)
# convert to DataFrame
df_freq = pd.DataFrame()
df_freq['subjectPredEmbedFrequency'] = rows_to_str(data['X_S_all'])
df_freq['objectPredEmbedFrequency'] = rows_to_str(data['X_O_all'])
df_freq['objectOutPredEmbedFrequency'] = rows_to_str(data['X_OO_all'])
transformer = FrequencyTransformer()
transformer = transformer.fit(
df_freq[['subjectPredEmbedFrequency']][slice_fit])
df_freq[['subjectPredEmbedFrequency']] = transformer.transform(
df_freq[['subjectPredEmbedFrequency']])
transformer = FrequencyTransformer()
transformer = transformer.fit(
df_freq[['objectPredEmbedFrequency']][slice_fit])
df_freq[['objectPredEmbedFrequency']] = transformer.transform(
df_freq[['objectPredEmbedFrequency']])
transformer = FrequencyTransformer()
transformer = transformer.fit(
df_freq[['objectOutPredEmbedFrequency']][slice_fit])
df_freq[['objectOutPredEmbedFrequency']] = transformer.transform(
df_freq[['objectOutPredEmbedFrequency']])
return df_freq
def rows_to_str(array):
rows = array.tolil().rows
for i in range(len(rows)):
rows[i] = ','.join(str(elem) for elem in rows[i])
return rows
# ---------------------------------------------------------
# Internal Functions
# ---------------------------------------------------------
def _get_slice_fit(meta, use_test_set):
if use_test_set:
return slice(0, meta['n_train'] + meta['n_val'])
else:
return slice(0, meta['n_train'])
| 1.460938
| 1
|
Chapter 01/_aux/anomaly.py
|
bpbpublications/Time-Series-Forecasting-using-Deep-Learning
| 7
|
12778179
|
import matplotlib.pyplot as plt
import random
if __name__ == '__main__':
random.seed(9)
length = 100
A = 5
B = .2
C = 1
trend = [A + B * i for i in range(length)]
noise = []
for i in range(length):
if 65 <= i <= 75:
noise.append(7 * C * random.gauss(0, 1))
plt.axvspan(i, i + 1, color = 'red', alpha = 0.1)
else:
noise.append(C * random.gauss(0, 1))
ts = [trend[i] + noise[i] for i in range(length)]
plt.plot(ts)
plt.xticks([])
plt.yticks([])
plt.show()
| 3.125
| 3
|
forest/distinguisher/regex_distinguisher.py
|
Marghrid/Forest
| 7
|
12778180
|
import random
import re
import time
from itertools import combinations
import z3
from forest.logger import get_logger
from forest.utils import check_conditions
from forest.visitor import ToZ3, RegexInterpreter
logger = get_logger('forest')
use_derivatives = True
# z3.set_param('smt.string_solver', 'z3str3')
class RegexDistinguisher:
def __init__(self):
self._toz3 = ToZ3()
self._printer = RegexInterpreter()
self.force_multi_distinguish = False
self.force_distinguish2 = False
def distinguish(self, programs):
logger.debug(f"Distinguishing {len(programs)}: "
f"{','.join(map(self._printer.eval, programs))}")
assert len(programs) >= 2
if not self.force_multi_distinguish and len(programs) == 2:
return self.distinguish2(programs[0], programs[1])
if self.force_distinguish2:
dist_input, keep_if_valid, keep_if_invalid, _ = \
self.distinguish2(programs[0], programs[1])
return dist_input, keep_if_valid, keep_if_invalid, programs[2:]
else:
return self.multi_distinguish(programs)
def distinguish2(self, r1, r2):
global use_derivatives
solver = z3.Solver()
solver.set('random_seed', 7)
solver.set('sat.random_seed', 7)
if use_derivatives:
try:
solver.set('smt.seq.use_derivatives', True)
solver.check()
except:
pass
z3_r1 = self._toz3.eval(r1[0])
z3_r2 = self._toz3.eval(r2[0])
dist = z3.String("distinguishing")
ro_1 = z3.Bool(f"ro_1")
solver.add(ro_1 == z3.InRe(dist, z3_r1))
ro_2 = z3.Bool(f"ro_2")
solver.add(ro_2 == z3.InRe(dist, z3_r2))
solver.add(ro_1 != ro_2)
if solver.check() == z3.sat:
if len(r1[2][0]) == 0 and len(r2[2][0]) == 0:
dist_input = solver.model()[dist].as_string()
if solver.model()[ro_1]:
return dist_input, [r1], [r2], []
else:
return dist_input, [r2], [r1], []
# Find dist_input that respects conditions
r1_str = self._printer.eval(r1[0], captures=r1[2][1])
r1_conditions = list(map(lambda c: " ".join(map(str, c)), r1[2][0]))
r2_str = self._printer.eval(r2[0], captures=r2[2][1])
r2_conditions = list(map(lambda c: " ".join(map(str, c)), r2[2][0]))
while True:
dist_input = solver.model()[dist].as_string()
match = re.fullmatch(r1_str, dist_input)
if match is not None and check_conditions(r1_conditions, match):
break
match = re.fullmatch(r2_str, dist_input)
if match is not None and check_conditions(r2_conditions, match):
break
solver.add(dist != z3.StringVal(dist_input))
if not solver.check() == z3.sat:
return None, None, None, None
if solver.model()[ro_1]:
return dist_input, [r1], [r2], []
else:
return dist_input, [r2], [r1], []
else:
return None, None, None, None
def multi_distinguish(self, regexes):
start = time.time()
# Problem: cannot distinguish more than 4 regexes at once: it takes forever.
# Solution: use only 4 randomly selected regexes for the SMT maximization,
# and then add the others to the solution.
if len(regexes) <= 4:
selected_regexes = regexes
others = []
else:
random.seed('regex')
random.shuffle(regexes)
selected_regexes = regexes[:4]
others = regexes[4:]
solver = z3.Optimize()
z3_regexes = []
for regex in selected_regexes:
z3_regex = self._toz3.eval(regex)
z3_regexes.append(z3_regex)
dist = z3.String("distinguishing")
# solver.add(z3.Length(dist) <= 6)
ro_z3 = []
for i, z3_regex in enumerate(z3_regexes):
ro = z3.Bool(f"ro_{i}")
ro_z3.append(ro)
solver.add(ro == z3.InRe(dist, z3_regex))
# ro_z3[i] == true if dist matches regex[i].
big_or = []
for ro_i, ro_j in combinations(ro_z3, 2):
big_or.append(z3.Xor(ro_i, ro_j))
solver.add_soft(z3.Xor(ro_i, ro_j))
solver.add(z3.Or(big_or)) # at least one regex is distinguished
if solver.check() == z3.sat:
# print(solver.model())
print("took", round(time.time() - start, 2), "seconds")
keep_if_valid = []
keep_if_invalid = []
dist_input = str(solver.model()[dist]).strip('"')
for i, ro in enumerate(ro_z3):
if solver.model()[ro]:
keep_if_valid.append(selected_regexes[i])
else:
keep_if_invalid.append(selected_regexes[i])
smallest_regex = min(selected_regexes, key=lambda r: len(self._printer.eval(r)))
return dist_input, keep_if_valid, keep_if_invalid, others
else:
return None, None, None, None
| 2.515625
| 3
|
aws/context.py
|
robertcsapo/aws-lambda-python-local
| 0
|
12778181
|
import uuid
from datetime import date
import os
import humanize
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = "arn:aws:lambda:eu-north-1:000000000000:function:{}".format(self.function_name)
self.aws_request_id = uuid.uuid1()
self.log_group_name = "/aws/lambda/{}".format(self.function_name)
today = date.today()
self.log_stream_name = "{}/[{}]4459c970fa6d4c77aca62c95850fce54".format(today.strftime("%Y/%m/%d"), self.function_version)
self.memory_limit_in_mb = Context.memory(self)
pass
def memory(self):
mem = int(os.popen("cat /sys/fs/cgroup/memory/memory.limit_in_bytes").read())
self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)
return (self.memory_limit_in_mb)
pass
| 2.75
| 3
|
spherov2/test/BoltTest.py
|
Cole1220/spherov2.py
| 1
|
12778182
|
# python3
#import sys
#sys.path.append('/spherov2/')
import time
from spherov2 import scanner
from spherov2.sphero_edu import EventType, SpheroEduAPI
from spherov2.types import Color
print("Testing Starting...")
print("Connecting to Bolt...")
toy = scanner.find_BOLT()
if toy is not None:
print("Connected.")
with SpheroEduAPI(toy) as droid:
print("Testing Start...")
droid.set_main_led(Color(r=0, g=255, b=0)) #Sets whole Matrix
droid.reset_aim()
droid.set_main_led(Color(r=0,g=0,b=255))
print("Luminosity: " + str(droid.get_luminosity()))
print("Accel: " + str(droid.get_acceleration()))
"""
print("Testing Main LED")
droid.set_main_led(Color(r=0, g=0, b=255)) #Sets whole Matrix
time.sleep(1)
print("Testing Front LED")
droid.set_front_led(Color(r=0, g=255, b=0)) #Sets front LED
time.sleep(1)
print("Testing Back LED")
droid.set_back_led(Color(r=255, g=0, b=0)) #Sets back LED
time.sleep(1)
print("Set Matrix Pixel")
droid.set_matrix_pixel(0, 0, Color(r=255, g=255, b=0)) #Set Matrix Pixel
time.sleep(1)
print("Set Matrix Line")
droid.set_matrix_line(1, 0, 1, 7, Color(r=255, g=0, b=255)) #Set Matrix Line
time.sleep(1)
print("Set Matrix Fill")
droid.set_matrix_fill(2, 0, 6, 6, Color(r=0, g=255, b=255)) #Set Matrix Box
time.sleep(2)
"""
droid.set_main_led(Color(r=255, g=0, b=0)) #Sets whole Matrix
print("Testing End...")
#droid.register_event(EventType.on_sensor_streaming_data, droid.SensorStreamingInfo) #how you would register to data (function name is custom)
| 2.8125
| 3
|
src/def_func.py
|
maokuntao/python-study
| 0
|
12778183
|
<reponame>maokuntao/python-study
'''
函数定义
Created on 2017年12月22日
@author: taomaokun
'''
from my_lib import my_abs
# print(my_abs('A')) #TypeError
# print(my_abs('-233'))#TypeError
print(my_abs(-233))
# 函数名其实就是指向一个函数对象的引用,完全可以把函数名赋给一个变量,相当于给这个函数起了一个“别名”:
another_my_abs = my_abs;
print(another_my_abs(-2.333))
| 3.71875
| 4
|
Swift-FHIR/fhir-parser/Python/mappings.py
|
technosoftgit/Smart_2_8_2_Swift4
| 0
|
12778184
|
<reponame>technosoftgit/Smart_2_8_2_Swift4
# Mappings for the FHIR class generator
# Which class names to map to resources and elements
classmap = {
'Any': 'Resource',
'boolean': 'bool',
'integer': 'int',
'positiveInt': 'int',
'unsignedInt': 'int',
'date': 'FHIRDate',
'dateTime': 'FHIRDate',
'instant': 'FHIRDate',
'time': 'FHIRDate',
'decimal': 'float',
'string': 'str',
'markdown': 'str',
'id': 'str',
'code': 'str', # for now we're not generating enums for these
'uri': 'str',
'oid': 'str',
'uuid': 'str',
'xhtml': 'str',
'base64Binary': 'str',
}
# Classes to be replaced with different ones at resource rendering time
replacemap = {
'Reference': 'FHIRReference', # `FHIRReference` adds dereferencing capabilities
}
# Some properties (in Conformance, Profile and Questionnaire currently) can be
# any (value) type and have the `value[x]` form - how to substitute is defined
# here
starexpandtypes = {
'integer',
'decimal',
'dateTime',
'date',
'instant',
'time',
'string',
'uri',
'boolean',
'code',
'base64Binary',
'Coding',
'CodeableConcept',
'Attachment',
'Identifier',
'Quantity',
'Range',
'Period',
'Ratio',
'HumanName',
'Address',
'ContactPoint',
'Timing',
'Signature',
'Reference',
}
# Which class names are native to the language (or can be treated this way)
natives = ['bool', 'int', 'float', 'str', 'dict']
# Which classes are to be expected from JSON decoding
jsonmap = {
'str': 'str',
'int': 'int',
'bool': 'bool',
'float': 'float',
'FHIRDate': 'str',
}
jsonmap_default = 'dict'
# Properties that need to be renamed because of language keyword conflicts
reservedmap = {
'for': 'for_fhir',
'class': 'class_fhir',
'import': 'import_fhir',
'global': 'global_fhir',
'assert': 'assert_fhir',
'except': 'except_fhir',
}
| 2.140625
| 2
|
elosports/elo.py
|
Anjum48/Elo
| 0
|
12778185
|
<gh_stars>0
class Elo:
def __init__(self, k, home_advantage=100):
"""
:param k: Elo K-Factor
:param home_advantage: Home field advantage, Default=100
"""
self.ratingDict = {}
self.k = k
self.home_advantage = home_advantage
def add_player(self, name, rating=1500):
"""
:param name: Player name
:param rating: Initial rating. Default=1500
:return:
"""
self.ratingDict[name] = rating
def game_over(self, winner, loser, location):
"""
Update ratings after a game
:param winner: Name of the winning team
:param loser: Name of the losing team
:param location: Location of the winning team. 'H' if the played at home, 'A' for away, 'N' for neutral
:return:
"""
if location == 'H': # Home
result = self.expected_result(self.ratingDict[winner], self.ratingDict[loser], bias=self.home_advantage)
elif location == 'A': # Away
result = self.expected_result(self.ratingDict[winner], self.ratingDict[loser], bias=-self.home_advantage)
else: # Neutral venue
result = self.expected_result(self.ratingDict[winner], self.ratingDict[loser])
self.ratingDict[winner] += self.k * (1 - result) # score = 1 for win, minus expected score
self.ratingDict[loser] += self.k * (0 - (1 - result)) # score = 0 for loss, minus expected score
def expected_result(self, pr_a, pr_b, bias=0, names=False):
"""
See https://en.wikipedia.org/wiki/Elo_rating_system#Mathematical_details
:param pr_a: player A performance rating or names
:param pr_b: player B performance rating or names
:param bias: Bias number which adds a constant offset to the ratings. Positive bias factors favor player A
:param names: Flag to indicate if the inputs re names or performance ratings
:return: Expected score
"""
if names:
pr_a = self.ratingDict[pr_a]
pr_b = self.ratingDict[pr_b]
exp = (pr_b - pr_a + bias) / 400.0
return 1 / (1 + 10.0 ** exp)
| 3.515625
| 4
|
spider/spide.py
|
virusdefender/qdu_empty_classroot
| 3
|
12778186
|
<gh_stars>1-10
# coding=utf-8
import time
import json
import re
import requests
from thread_pool import ThreadPool
class Spider(object):
def __init__(self):
self.cookies = {}
self.r = re.compile(
u'<tr style="display:" id="tr\d+"[^>]*?>\s*<td>([^<]*?)</td>[\s\S]+?<tr align="center" >[\s\S]+?<tr align="center" >\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)[\s\S]*?(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)[\s\S]*?(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>\s*'
u'<td[^>]*?>(?:<a href="#" title="[^"]*?"><font color="#\w+">|)([\s\S]*?)(?:</font></a>|)</td>')
def request(self, url, data):
return requests.post(url, data=data, cookies=self.cookies)
def craw(self, campus, building, week, week_day):
#time.sleep(3)
data = {"aid": campus, "buildingid": building, "room": "-1", "whichweek": week, "week": week_day}
try:
html = self.request("http://jw.qdu.edu.cn/academic/teacher/teachresource/roomschedule_week.jsdo",
data).content
# print html
except Exception as e:
print e
print campus, building, week, week_day
return None
content = self.r.findall(html)
rooms = []
for item in content:
l = []
for i in range(0, len(item)):
c = item[i].decode("gb2312")
if i == 0:
l.append(c)
else:
if c[0] == "&":
l.append(0)
else:
l.append(1)
rooms.append(l)
with open("data/" + campus + "." + building + "." + week + "." + week_day + ".json", "w") as f:
f.write(json.dumps(rooms))
print "finish: week:" + week + " week_day:" + week_day
return "success"
if __name__ == "__main__":
s = Spider()
s.cookies = {"JSESSIONID": "8B7DA565F71772D37B04170241A757A8.TAB2;"}
pool = ThreadPool(size=20)
pool.start()
for week in range(1, 21):
for week_day in range(1, 8):
print "start week:" + str(week) + " week_day:" + str(week_day)
# 请自行确定info.py中的校区id和教学楼id是正确的
# 然后按照info.py中的数据修改校区和教学楼id
pool.append_job(s.craw, "1709", "1783", str(week), str(week_day))
pool.join()
| 2.8125
| 3
|
tool.py
|
zhongguozhi2/myblog
| 0
|
12778187
|
import hashlib
import json
import sys
import time
from random import random
def custom_print(*args, sep=' ', end='\n', file=None):
"""
print补丁
:param x:
:return:
"""
# 获取被调用函数在被调用时所处代码行数
line = sys._getframe().f_back.f_lineno
# 获取被调用函数所在模块文件名
# file_name = sys._getframe(1).f_code.co_filename
# sys.stdout.write(f'"{__file__}:{sys._getframe().f_lineno}" {x}\n')
args = (str(arg) for arg in args) # REMIND 防止是数字不能被join
sys.stdout.write(f'{line}: \033[0;32m{" ".join(args)}\033[0m\n')
def create_digest(username):
KEY = b'xdf'
PERSON = b'xzz'
timestamp = time.time()
salt = str(random()).encode('utf-8')[:16]
digest = hashlib.blake2b((username + str(timestamp)).encode('utf-8'), key=KEY, salt=salt, person=PERSON).hexdigest()
return digest
# print(digest.hexdigest())
def postman_to_markdown(postmanfilename, postman_varname, postman_varname_global, markdowndocname=None):
with open(postmanfilename, 'r', encoding='UTF-8') as f1:
content = json.load(f1)
markdowndocname = content['info']['name'] + '接口文档.md'
with open(markdowndocname, 'w', encoding='UTF-8') as f:
f.write('# ' + content['info']['name'] + '\n')
for item in content['item']:
custom_print(68)
f.write('## ' + item['request']['method'] + ' ' + item['name'] + '\n')
f.write(item['request']['url']['raw'] + '\n')
try:
formdata = item['request']['body']['formdata']
except KeyError:
pass
else:
if formdata:
f.write('### ' + 'BODY formdata' + '\n')
f.write('参数名|参数值' + '\n')
f.write('---:|---:|' + '\n')
for i in formdata:
custom_print(72)
f.write(i['key'] + '|' + i['value'] + '\n')
with open(postman_varname, 'r', encoding='UTF-8') as f:
content = json.load(f)
with open(postman_varname_global, 'r', encoding='UTF-8') as f2:
content2 = json.load(f2)
key_values = {value['key']: value['value'] for value in content['values']}
key2_values = {value['key']: value['value'] for value in content2['values']}
key_values.update(key2_values)
with open(markdowndocname, 'r', encoding='UTF-8') as f1:
content1 = f1.read()
for k, v in key_values.items():
custom_print(k, v)
if k in content1:
custom_print(k)
content1 = content1.replace('{{' + k + '}}', v)
with open(markdowndocname, 'w', encoding='UTF-8') as f2:
f2.write(content1)
if __name__ == '__main__':
postman_to_markdown('logreport.postman_collection.json', 'logreport_outer_net.postman_environment.json', 'global.postman_environment.json')
| 2.6875
| 3
|
process_deposition_data.py
|
johnmgregoire/JCAPdepositionmonitor
| 1
|
12778188
|
# <NAME> and <NAME>
# Created: 6/05/2013
# Last Updated: 6/14/2013
# For JCAP
import numpy as np
from PyQt4 import QtCore
from dictionary_helpers import *
import date_helpers
import filename_handler
import datareader
# global dictionary holds all processed (z, x, y, rate) data for the experiment
DEP_DATA = []
zndec = 1
tndec = 0
radius1 = 28.
radius2 = 45.
""" does all of the data processing necessary for deposition plots """
class ProcessorThread(QtCore.QThread):
# transfers new line from reader to MainMenu
lineRead = QtCore.pyqtSignal(list)
# transfers new processed data to deposition graph
newData = QtCore.pyqtSignal(tuple)
srcError = QtCore.pyqtSignal(int)
def __init__(self, parent=None, filename='default.csv'):
super(ProcessorThread, self).__init__()
self.file = filename
self.rowBuffer = []
self.changeZ = False
self.running = True
self.reader = datareader.DataReader(parent=self, filename=self.file)
self.reader.lineRead.connect(self.newLineRead)
def run(self):
self.reader.start()
# initialize DATA_DICT column numbers used for data processing
try:
self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
except IndexError:
self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
self.zcolnum = getCol('Platen Zshift Motor 1 Position')
self.anglecolnum = getCol('Platen Motor Position')
while self.running:
pass
""" called whenever the reader sends a full line """
def newLineRead(self, newRow):
self.lineRead.emit(newRow)
self.processRow(newRow)
""" adds a new row to its own row buffer and processes the
data in the row buffer if the azimuth or z-value of the
instrument has changed """
def processRow(self, row):
if self.rowBuffer == []:
self.rowBuffer += [row]
else:
angle = round(float(row[self.anglecolnum]))
zval = round(float(row[self.zcolnum]), 2)
prevangle = round(float(self.rowBuffer[-1][self.anglecolnum]), 0)
prevz = round(float(self.rowBuffer[-1][self.zcolnum]), 2)
if (angle == prevangle and zval == prevz):
self.rowBuffer += [row]
elif (angle == prevangle):
self.processData(prevz, prevangle, radius1)
self.processData(prevz, prevangle, radius2)
# indicates that center point will need to be
# computed in next round of processing
self.changeZ = True
# reset row buffer
self.rowBuffer = [row]
else:
self.processData(zval, prevangle, radius1)
self.processData(zval, prevangle, radius2)
self.rowBuffer = [row]
""" processes all rates at the same angle and z-value
to produce a single (z, x, y, rate) data point """
def processData(self, z, angle, radius):
global DEP_DATA
rowRange = self.getRowRange()
# only one or two data points indicates a transitional angle
# that can be ignored - Savitzky Golay can be used in the future
if rowRange[1] - rowRange[0] <= 2:
pass
else:
# get only valid rows from buffer
dataArray = self.rowBuffer[rowRange[0]:(rowRange[1]+1)]
# transpose matrix so that each column in the
# spreadsheet becomes a row
dataArrayT = np.array(dataArray).T
timespan = self.getTimeSpan(dataArrayT)
depRates = self.getDepRates(timespan, dataArrayT)
# normalize based on drifting center point
rate0 = self.getXtalRate(3, dataArrayT).mean()
rate = rate0
if radius == radius1:
if angle == 0 or self.changeZ:
# plot center point along with first set
# of data for this z-value
DEP_DATA.append((z, 0.0, 0.0, rate))
self.newData.emit((z, 0.0, 0.0, rate))
self.changeZ = False
x = radius * np.cos(angle * np.pi/180.)
y = radius * np.sin(angle * np.pi/180.)
# rate1 corresponds to Xtal4 Rate
rate = rate0 * depRates[2]/depRates[1]
else:
x = radius * np.cos(angle * np.pi/180. + np.pi)
y = radius * np.sin(angle * np.pi/180. + np.pi)
# rate2 corresponds to Xtal2 Rate
rate = rate0 * depRates[0]/depRates[1]
# store data points for initializing new graph
DEP_DATA.append((z, x, y, rate))
# indicate to exisiting graphs that there is
# new data to display
self.newData.emit((z, x, y, rate))
""" helper function to correct for instrument noise in measuring z-value """
def roundZ(self, zcol):
zrnd=np.round(zcol, decimals=zndec)
for i, zval in enumerate(zrnd):
if zval not in filename_handler.FILE_INFO['Z_mm']:
zrnd[i] = -1
return zrnd
""" helper function to correct for instrument noise in measuring tilt """
def roundT(self, tcol):
trnd=np.round(tcol, decimals=tndec)
for i, tval in enumerate(trnd):
if tval not in filename_handler.FILE_INFO['TiltDeg']:
trnd[i] = -1
return trnd
""" gets range of valid rows in row buffer based on
whether z and t values match experimental parameters """
def getRowRange(self):
data = np.array(self.rowBuffer)
datacols = data.T
zcol = map(float, datacols[self.zcolnum])
tcol = map(float, datacols[self.tcolnum])
inds_useful=np.where((self.roundZ(zcol)>=0)&(self.roundT(tcol)>=0))[0]
# if rowRange is nonzero, send it
if inds_useful.size:
return (inds_useful[0], inds_useful[-1])
# otherwise, send dummy rowRange to processData
return (0, 0)
""" gets time span of valid data set for given angle and z-value """
def getTimeSpan(self, dataArrayT):
datecol = getCol('Date')
timecol = getCol('Time')
datetimeTup = zip(dataArrayT[datecol], dataArrayT[timecol])
startStr = datetimeTup[0][0] + ' ' + datetimeTup[0][1]
endStr = datetimeTup[-1][0] + ' ' + datetimeTup[-1][1]
durationObj = date_helpers.dateObjFloat(endStr) - date_helpers.dateObjFloat(startStr)
return durationObj.total_seconds()
""" helper function to return column of Xtal rates from valid data set """
def getXtalRate(self, ratenum, dataArrayT):
rcolnum = getCol('Xtal%d Rate' % ratenum)
return np.array(map(float, dataArrayT[rcolnum]))
""" helper function to compute all deposition rates
as time-averaged Xtal rates """
def getDepRates(self, timespan, dataArrayT):
depRates = []
for x in range(2,5):
rateData = self.getXtalRate(x, dataArrayT)
rateDiff = rateData[-1] - rateData[0]
depRates += [rateDiff/timespan]
return depRates
""" re-initializes data sets and reader when a new
spreadsheet file is loaded """
def newFile(self, newfile):
global DEP_DATA
DEP_DATA = []
self.rowBuffer = []
if self.reader:
self.reader.end()
self.reader = datareader.DataReader(parent=self, filename=newfile)
self.reader.lineRead.connect(self.newLineRead)
self.reader.start()
# re-initialize DATA_DICT column numbers used for data processing
try:
self.tcolnum = getCol('Src%d Motor Tilt Position' %int(filename_handler.FILE_INFO['Source']))
except IndexError:
self.srcError.emit(int(filename_handler.FILE_INFO['Source']))
self.zcolnum = getCol('Platen Zshift Motor 1 Position')
self.anglecolnum = getCol('Platen Motor Position')
""" empties row buffer and kills reader when experiment has ended """
def onEndExperiment(self):
if self.rowBuffer:
angle = round(float(self.rowBuffer[0][self.anglecolnum]))
zval = round(float(self.rowBuffer[0][self.zcolnum]), 1)
self.processData(zval, angle, radius1)
self.processData(zval, angle, radius2)
self.rowBuffer = []
if self.reader:
self.reader.end()
self.reader = None
""" kills both the reader and data processor threads;
called when application exits """
def end(self):
if self.reader:
self.reader.end()
self.running = False
| 2.171875
| 2
|
contas/forms.py
|
Setti7/itaipu
| 1
|
12778189
|
<reponame>Setti7/itaipu<filename>contas/forms.py
from django import forms
from django.contrib.auth import (
password_validation,
)
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.forms import widgets
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
from contas.models import Residente, Visitante, Chacara
from itaipu.settings import REGISTRATION_EMAIL
class AssociarResidenteForm(forms.Form):
token = forms.CharField(max_length=8)
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput,
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
strip=False,
widget=forms.PasswordInput,
)
email = forms.EmailField(max_length=254)
field_order = ['token', 'email', 'new_password1', 'new_password2']
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
'invalid_token': 'Esse token é inválido.',
'email_not_unique': "Esse email já está em uso.",
'account_already_activated': 'Esse token já foi utilizado.<br>Caso tenha esquecido a senha, vá para a página de '
'login e clique em "Esqueceu a senha?".',
}
email_template_name = 'contas/associar-residente-email.html'
subject = 'Parque Itaipu - Ativação da Conta'
def __init__(self, request, *args, **kwargs):
self.request = request
super().__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
if Residente.objects.filter(email=email).exists():
raise forms.ValidationError(
self.error_messages['email_not_unique'],
code='email_not_unique',
)
return email
def clean(self):
cleaned_data = super().clean()
# Token validation
token = cleaned_data.get('token')
qs = Residente.objects.filter(token=token)
if not qs.exists():
error = forms.ValidationError(
self.error_messages['invalid_token'],
code='invalid_token',
)
self.add_error('token', error)
self.user = None
else:
self.user = qs[0]
# Active user validation
if self.user.is_active:
error = forms.ValidationError(
self.error_messages['account_already_activated'],
code='account_already_activated',
)
self.add_error('token', error)
# Password validation
password1 = <PASSWORD>_data.get('<PASSWORD>')
password2 = <PASSWORD>_data.get('<PASSWORD>')
if password1 and password2:
if password1 != password2:
error = forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
self.add_error('new_password2', error)
password_validation.validate_password(password2, self.user)
return cleaned_data
def save(self, commit=True):
password = self.cleaned_data["<PASSWORD>"]
self.user.set_password(password)
self.user.email = self.cleaned_data['email']
if commit:
self.user.save()
current_site = get_current_site(self.request)
context = {
'email': self.user.email,
'domain': current_site.domain,
'site_name': current_site.name,
'email_uidb64': urlsafe_base64_encode(force_bytes(self.user.email)).decode(),
'user': self.user,
'token_uidb64': urlsafe_base64_encode(force_bytes(self.cleaned_data['token'])).decode(),
'protocol': 'https' if self.request.is_secure() else 'http',
}
body = loader.render_to_string(self.email_template_name, context)
send_mail(
subject=self.subject,
message=None,
html_message=body,
from_email=REGISTRATION_EMAIL,
recipient_list=[self.user.email]
)
return self.user
class EditarVisitanteForm(forms.ModelForm):
# Editáveis
data = forms.DateField(label='Data', input_formats=['%d/%m/%Y', '%Y-%m-%d'],
widget=widgets.DateInput(format='%d/%m/%Y'))
# Hidden
form_id = forms.IntegerField(min_value=0, max_value=999999, widget=forms.HiddenInput)
nomeres = forms.CharField(max_length=50, required=False)
foto = forms.ImageField(required=False)
class Meta:
model = Visitante
fields = ['nome', 'data', 'form_id', 'nomeres', 'foto']
def __init__(self, nomeres, *args, **kwargs):
self.nomeres = nomeres
super().__init__(*args, **kwargs)
def save(self, commit=True):
v = super().save(commit=False)
nome = self.cleaned_data.get('nome')
data = self.cleaned_data.get('data')
foto = self.cleaned_data.get('foto')
pk = self.cleaned_data.get('form_id')
nomeres = self.nomeres
v = Visitante.objects.get(pk=pk)
if commit:
v.nome = nome
v.foto = foto
v.data = data
v.agendado = True
v.nomeres = nomeres
v.save()
return v
class NovoVisitanteForm(forms.ModelForm):
# Editáveis
data = forms.DateField(label='Data', input_formats=['%d/%m/%Y', '%Y-%m-%d'],
widget=widgets.DateInput(format='%d/%m/%Y'))
class Meta:
model = Visitante
fields = ['nome', 'data']
def __init__(self, user, *args, **kwargs):
self.chacara = user.chacara
self.nomeres = user.nome
super().__init__(*args, **kwargs)
def save(self, commit=True):
v = super().save(commit=False)
nome = self.cleaned_data.get('nome')
data = self.cleaned_data.get('data')
chacara = self.chacara
nomeres = self.nomeres
if commit:
v = Visitante.objects.create(nome=nome, chacara=chacara, nomeres=nomeres, data=data, agendado=True)
v.save()
return v
class EditarTelefoneForm(forms.ModelForm):
class Meta:
model = Chacara
fields = ['telefone']
class EditarResidenteForm(forms.ModelForm):
# Hidden
form_id = forms.IntegerField(min_value=0, max_value=999999, widget=forms.HiddenInput)
class Meta:
model = Residente
fields = ['nome', 'status', 'token', 'email', 'form_id']
class NovoResidenteForm(UserCreationForm):
STATUS_CHOICES = (
('P', 'Proprietário'),
('C', 'Caseiro'),
)
status = forms.ChoiceField(choices=STATUS_CHOICES)
class Meta:
model = Residente
fields = ['nome', 'status', 'email', 'password1', '<PASSWORD>']
def __init__(self, chac_id, status, *args, **kwargs):
super().__init__(*args, **kwargs)
self.chac_id = chac_id
self.status = status
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update({'autofocus': True})
def clean_status(self):
self.error_messages['caseiro_not_authorized'] = 'Caseiros só podem criar outros caseiros.'
status = self.cleaned_data.get("status")
if self.status == 'C' and status != 'C':
raise forms.ValidationError(
self.error_messages['caseiro_not_authorized'],
code='caseiro_not_authorized',
)
return status
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["<PASSWORD>"])
user.chacara = self.chac_id
user.is_active = True
if commit:
user.save()
return user
| 2.1875
| 2
|
pyPLS/pls.py
|
ocloarec/pyPLS
| 1
|
12778190
|
from __future__ import print_function
import numpy as np
from ._PLSbase import plsbase as pls_base
from .utilities import nanmatprod, isValid
from .engines import pls as pls_engine
class pls(pls_base):
"""
This is the classic multivariate NIPALS PLS algorithm.
Parameters:
X: {N, P} array like
a table of N observations (rows) and P variables (columns) - The explanatory variables,
Y: {N, Q} array like
a table of N observations (rows) and Q variables (columns) - The dependent variables,
a: int
the number of PLS component to be fitted
scaling: float, optional
A number typically between 0.0 and 1.0 corresponding to the scaling, typical example are
0.0 corresponds to mean centring
0.5 corresponds to Pareto scaling
1.0 corresponds to unit variance scaling
cvfold: int, optional
the number of folds in the cross-validation - default is 7
Returns
-------
out : a pls2 object with a components
Attributes:
W : PLS weights table
T : PLS scores table
P : PLS loadings table
C : PLS score regression coefficients
B : PLS regression coefficients
Yhat: model predicted Y
Yhatcv: cross-validation predicted Y
R2Y: Determination coefficients of Y
Q2Ycol: Cross validation parameters per colums of Y
Q2Ycum: Cumulative cross validation parameter
Methods:
scores(n), loadings(n), weights(n)
n: int
component id
return the scores of the nth component
predict(Xnew)
Xnew: array like
new observation with the same number of variables tha X
return predicted Y
"""
def __init__(self, X, Y, ncp=1, cvfold=None, scaling=0):
pls_base.__init__(self, X, Y, ncp=ncp, scaling=scaling, cvfold=cvfold)
self.model = "pls"
missingValues = False
if self.missingValuesInX or self.missingValuesInY:
# TODO: For now nissing values in both X and Y are dealt the same way -> Improve this
missingValues = True
self.T, self.U, self.P, self.W, self.C, self.B = pls_engine(self.X, self.Y, self.ncp, missing_values=missingValues)
self.Wstar = self.W @ np.linalg.inv(self.P.T @ self.W)
self.Yhat = self.predict(self.X, preprocessing=False)
self.R2Y, self.R2Ycol = self._calculateR2Y(self.Yhat)
self.cross_validation(ncp=ncp)
self.R2X = np.sum(np.square(self.T @ self.P.T))/self.SSX
def predict(self, Xnew, preprocessing=True, statistics=False, **kwargs):
Xnew, nnew, pxnew = isValid(Xnew, forPrediction=True)
if preprocessing:
Xnew = (Xnew - self.Xbar)
Xnew /= np.power(self.Xstd, self.scaling)
assert pxnew == self.px, "New observations do not have the same number of variables!!"
if statistics:
That = Xnew @ self.W
Xpred = That @ self.P.T
Xres = Xnew - Xpred
Xnew2 = np.square(Xres)
if np.isnan(Xnew2).any():
ssrx = np.nansum(Xnew2, axis=0)
else:
ssrx = np.sum(Xnew2, axis=0)
stats = {'That':That, 'ESS':ssrx}
if self.B is not None:
# Yhat = Xnew @ self.B
if self.missingValuesInX:
Yhat = nanmatprod(Xnew, self.B)
else:
Yhat = Xnew @ self.B
if preprocessing:
Yhat = Yhat * np.power(self.Ystd, self.scaling) + self.Ybar
else:
Yhat = None
if statistics:
return Yhat, stats
else:
return Yhat
| 2.859375
| 3
|
yoloface.py
|
dsp-c01/patrol_and_greet
| 0
|
12778191
|
<filename>yoloface.py<gh_stars>0
# *******************************************************************
#
# Author : <NAME>, 2018
# Email : <EMAIL>
# Github : https://github.com/sthanhng
#
# BAP, AI Team
# Face detection using the YOLOv3 algorithm
#
# Description : yoloface.py
# The main code of the Face detection using the YOLOv3 algorithm
#
# *******************************************************************
# Usage example: python yoloface.py --image samples/outside_000001.jpg \
# --output-dir outputs/
# python yoloface.py --video samples/subway.mp4 \
# --output-dir outputs/
# python yoloface.py --src 1 --output-dir outputs/
import argparse
import sys
import os
from utils import *
import math
import time
import cv2
import numpy as np
from age_gender_ssrnet.SSRNET_model import SSR_net_general, SSR_net
#####################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--model-cfg', type=str, default='./models/face-yolov3-tiny.cfg',
help='path to config file')
parser.add_argument('--model-weights', type=str,
default='./models/face-yolov3-tiny_41000.weights',
help='path to weights of model')
parser.add_argument('--image', type=str, default='',
help='path to image file')
parser.add_argument('--video', type=str, default='',
help='path to video file')
parser.add_argument('--src', type=int, default=0,
help='source of the camera')
parser.add_argument('--output-dir', type=str, default='outputs/',
help='path to the output directory')
args = parser.parse_args()
#####################################################################
# print the arguments
print('----- info -----')
print('[i] The config file: ', args.model_cfg)
print('[i] The weights of model file: ', args.model_weights)
print('[i] Path to image file: ', args.image)
print('[i] Path to video file: ', args.video)
print('###########################################################\n')
# Give the configuration and weight files for the model and load the network
# using them.
net = cv2.dnn.readNetFromDarknet(args.model_cfg, args.model_weights)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
######################## Agender model parameter ##################################
# Setup global parameters
face_size = 64
face_padding_ratio = 0.10
# Default parameters for SSR-Net
stage_num = [3, 3, 3]
lambda_local = 1
lambda_d = 1
# Initialize gender net
gender_net = SSR_net_general(face_size, stage_num, lambda_local, lambda_d)()
gender_net.load_weights('age_gender_ssrnet/ssrnet_gender_3_3_3_64_1.0_1.0.h5')
# Initialize age net
age_net = SSR_net(face_size, stage_num, lambda_local, lambda_d)()
age_net.load_weights('age_gender_ssrnet/ssrnet_age_3_3_3_64_1.0_1.0.h5')
################ from agender #######################
def predictAgeGender(faces):
# Convert faces to N,64,64,3 blob
blob = np.empty((len(faces), face_size, face_size, 3))
for i, face_bgr in enumerate(faces):
blob[i, :, :, :] = cv2.resize(face_bgr, (64, 64))
blob[i, :, :, :] = cv2.normalize(blob[i, :, :, :], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# Predict gender and age
genders = gender_net.predict(blob)
ages = age_net.predict(blob)
# Construct labels
labels = ['{},{}'.format('Male' if (gender >= 0.5) else 'Female', int(age)) for (gender, age) in zip(genders, ages)]
return labels
def collectFaces(frame, face_boxes):
faces = []
# Process faces
for i, box in enumerate(face_boxes):
# Convert box coordinates from resized frame_bgr back to original frame
box_orig = [
int(round(box[0] * width_orig / width)),
int(round(box[1] * height_orig / height)),
int(round(box[2] * width_orig / width)),
int(round(box[3] * height_orig / height)),
]
# Extract face box from original frame
face_bgr = frame[
max(0, box_orig[1]):min(box_orig[3] + 1, height_orig - 1),
max(0, box_orig[0]):min(box_orig[2] + 1, width_orig - 1),
:
]
faces.append(face_bgr)
return faces
########################################################################
def _main():
global width, height, height_orig, width_orig
wind_name = 'face detection using YOLOv3'
cv2.namedWindow(wind_name, cv2.WINDOW_NORMAL)
cap = cv2.VideoCapture(args.src)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_XI_HEIGHT, 240)
while True:
has_frame, frame = cap.read()
start_time = time.time()
# source = frame.copy()
############## initial parameter of agender input type ##################
height_orig, width_orig = frame.shape[:2]
area = width * height
width = int(math.sqrt(area * width_orig / height_orig))
height = int(math.sqrt(area * height_orig / width_orig))
#########################################################################
# Stop the program if reached end of video
if not has_frame:
print('[i] ==> Done processing!!!')
print('[i] ==> Output file is stored at', os.path.join(args.output_dir, output_file))
cv2.waitKey(1000)
break
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1 / 255, (IMG_WIDTH, IMG_HEIGHT),
[0, 0, 0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(get_outputs_names(net))
# Remove the bounding boxes with low confidence
faces = post_process(frame, outs, CONF_THRESHOLD, NMS_THRESHOLD)
if len(faces) > 0:
#####################################
# convert to agender input type
face = collectFaces(frame, faces)
# Get age and gender
labels = predictAgeGender(face)
for (x1, y1, x2, y2) in faces:
cv2.rectangle(frame, (x1, y1), (x2, y2), color=(0, 255, 0), lineType=8)
# Draw labels
for (label, box) in zip(labels, faces):
cv2.putText(frame, label, org=(box[0], box[1] - 10), fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=1, color=COLOR_BLUE, thickness=1, lineType=cv2.LINE_AA)
######################################
# source = source[faces[0][1]-20:faces[0][3]+20, faces[0][0]-20:faces[0][2]+20]
print('[i] ==> # detected faces: {}'.format(len(faces)))
print('#' * 60)
end_time = time.time()
# initialize the set of information we'll displaying on the frame
info = [
('FPS', '{:.2f}'.format(1/(end_time-start_time)))
]
for (i, (txt, val)) in enumerate(info):
text = '{}: {}'.format(txt, val)
cv2.putText(frame, text, (5, (i * 20) + 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR_RED, 2)
# cv2.imshow("source", source)
cv2.imshow(wind_name, frame)
key = cv2.waitKey(1)
if key == 27 or key == ord('q'):
print('[i] ==> Interrupted by user!')
break
cap.release()
cv2.destroyAllWindows()
print('==> All done!')
print('***********************************************************')
if __name__ == '__main__':
width = 480
height = 340
_main()
| 2.015625
| 2
|
tests/test_parameter.py
|
lukasz-migas/SimpleParam
| 0
|
12778192
|
<reponame>lukasz-migas/SimpleParam
"""Test Parameter class"""
import operator
import pytest
import simpleparam as param
class TestParameterSetup(object):
"""Test Parameter class"""
@staticmethod
def test_creation_float():
"""Test Parameter - correct initilization"""
value = 1.0
num_a = param.Parameter(value=value)
assert num_a.value == value
@staticmethod
def test_creation_doc():
"""Test Parameter - correct initilization"""
value = 42.01
doc = "I am a parameter"
num_a = param.Parameter(value=value, doc=doc)
assert num_a.value == value
assert num_a.doc == doc
@staticmethod
def test_allow_none():
"""Test Parameter - correct initilization"""
value = None
num_a = param.Parameter(value=value, allow_None=True)
assert num_a.value == value
@staticmethod
def test_kind():
"""Test Parameter - correct initilization"""
value = 11.01474
num_a = param.Parameter(value=value)
assert num_a.kind == "Parameter"
@staticmethod
def test_set_kind():
"""Test Parameter - correct initilization"""
value = 11.01474
num_a = param.Parameter(value=value)
num_a.kind = "Number"
assert num_a.kind == "Number"
@staticmethod
def test_setting_wrong():
"""Test Parameter - correct initilization"""
with pytest.raises(ValueError) as __:
value = 11.01474
num_a = param.Parameter(value=value, allow_None="False")
class TestParameterOperations(object):
"""Test Parameter class operations"""
@staticmethod
def test_add():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value + 1
num_a.value = num_a.__add__(1)
assert num_a.value == new_value
@staticmethod
def test_sub():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value - 1
num_a.value = num_a.__sub__(1)
assert num_a.value == new_value
@staticmethod
def test_div():
"""Test Parameter - correct initilization"""
value = 42.0
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value / 2
num_a.value = num_a.__truediv__(2)
assert num_a.value == new_value
@staticmethod
def test_mul():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value * 2
num_a.value = num_a.__mul__(2)
assert num_a.value == new_value
@staticmethod
def test_pow():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value ** 2
num_a.value = num_a.__pow__(2)
assert num_a.value == new_value
@staticmethod
def test_floordiv():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value // 2
num_a.value = num_a.__floordiv__(2)
assert num_a.value == new_value
@staticmethod
def test_mod():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = value % 2
num_a.value = num_a.__mod__(2)
assert num_a.value == new_value
@staticmethod
def test_rshift():
"""Test Parameter - correct initilization"""
value = 42
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = operator.rshift(value, 1)
num_a.value = num_a.__rshift__(1)
assert num_a.value == new_value
@staticmethod
def test_lshift():
"""Test Parameter - correct initilization"""
value = 42
num_a = param.Parameter(value=value)
assert num_a.value == value
new_value = operator.lshift(value, 1)
num_a.value = num_a.__lshift__(1)
assert num_a.value == new_value
@staticmethod
def test_lt():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
assert num_a.value.__lt__(100)
@staticmethod
def test_gt():
"""Test Parameter - correct initilization"""
value = 42.01
num_a = param.Parameter(value=value)
assert num_a.value == value
assert num_a.value.__gt__(1)
@staticmethod
def test_abs():
"""Test Parameter - correct initilization"""
value = -42.01
num_a = param.Parameter(value=value)
assert num_a.__abs__() == abs(value)
@staticmethod
def test_neg():
"""Test Parameter - correct initilization"""
value = -42.01
num_a = param.Parameter(value=value)
assert num_a.__neg__() == -value
@staticmethod
def test_pos():
"""Test Parameter - correct initilization"""
value = -42.01
num_a = param.Parameter(value=value)
assert num_a.__pos__() == +value
@staticmethod
def test_setting_wrong():
"""Test Parameter - correct initilization"""
with pytest.raises(ValueError) as __:
value = 11.01474
num_a = param.Parameter(value=value, allow_None="False")
del num_a.value
| 2.90625
| 3
|
dissononce/processing/impl/cipherstate.py
|
dineshks1/dissononce
| 34
|
12778193
|
<filename>dissononce/processing/impl/cipherstate.py
class CipherState(object):
def __init__(self, cipher):
"""
:param cipher:
:type cipher: dissononce.cipher.Cipher
"""
self._cipher = cipher
self._key = None
self._nonce = 0
@property
def cipher(self):
return self._cipher
def initialize_key(self, key):
self._key = key
self.set_nonce(0)
def has_key(self):
return self._key is not None
def set_nonce(self, nonce):
"""
SetNonce(nonce): Sets n = nonce.
This function is used for handling out-of-order transport messages
:param nonce:
:type nonce: int
:return:
:rtype:
"""
self._nonce = nonce
def rekey(self):
self._key = self._cipher.rekey(self._key)
def encrypt_with_ad(self, ad, plaintext):
"""
EncryptWithAd(ad, plaintext):
If k is non-empty returns ENCRYPT(k, n++, ad, plaintext). Otherwise returns plaintext.
:param ad:
:type ad: bytes
:param plaintext:
:type plaintext: bytes
:return:
:rtype: bytes
"""
if self._key is None:
return plaintext
result = self._cipher.encrypt(self._key, self._nonce, ad, plaintext)
self._nonce += 1
return result
def decrypt_with_ad(self, ad, ciphertext):
"""
DecryptWithAd(ad, ciphertext):
If k is non-empty returns DECRYPT(k, n++, ad, ciphertext). Otherwise returns ciphertext.
If an authentication failure occurs in DECRYPT() then n is not incremented
and an error is signaled to the caller.
:param ad:
:type ad: bytes
:param ciphertext:
:type ciphertext: bytes
:return: bytes
:rtype:
"""
if self._key is None:
return ciphertext
result = self._cipher.decrypt(self._key, self._nonce, ad, ciphertext)
self._nonce += 1
return result
| 2.78125
| 3
|
ymir/command/tests/unit/test_cmd_export.py
|
phoenix-xhuang/ymir
| 0
|
12778194
|
import os
import shutil
from typing import List, Tuple
import unittest
from google.protobuf import json_format
from mir.commands import exporting
from mir.protos import mir_command_pb2 as mirpb
from mir.tools import hash_utils, mir_storage_ops
from mir.tools.code import MirCode
from tests import utils as test_utils
class TestCmdExport(unittest.TestCase):
# life cycle
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:])
self._assets_location = os.path.join(self._test_root, 'assets_location')
self._dest_root = os.path.join(self._test_root, 'export_dest')
self._gt_root = os.path.join(self._dest_root, 'gt_dir')
self._mir_root = os.path.join(self._test_root, 'mir-repo')
def setUp(self) -> None:
self.__prepare_dirs()
test_utils.prepare_labels(mir_root=self._mir_root, names=['freshbee', 'type1', 'person', 'airplane,aeroplane'])
self.__prepare_mir_repo()
self.__prepare_assets()
return super().setUp()
def tearDown(self) -> None:
self.__deprepare_dirs()
return super().tearDown()
# private: prepare env
def __prepare_dirs(self):
test_utils.remake_dirs(self._test_root)
test_utils.remake_dirs(self._assets_location)
test_utils.remake_dirs(self._dest_root)
test_utils.remake_dirs(self._mir_root)
def __deprepare_dirs(self):
if os.path.isdir(self._test_root):
shutil.rmtree(self._test_root)
def __prepare_assets(self):
'''
copy all assets from project to assets_location, assumes that `self._assets_location` already created
'''
image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg']
sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path)
for image_path in image_paths] # type: List[Tuple[str, str]]
for sha1sum, image_path in sha1sum_path_pairs:
shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum))
def __prepare_mir_repo(self):
'''
creates mir repo, assumes that `self._mir_root` already created
'''
test_utils.mir_repo_init(self._mir_root)
test_utils.mir_repo_create_branch(self._mir_root, 'a')
# metadatas
metadatas_dict = {
'attributes': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 281,
'imageChannels': 3
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'assetType': 'AssetTypeImageJpeg',
'width': 500,
'height': 333,
'imageChannels': 3
}
}
}
mir_metadatas = mirpb.MirMetadatas()
json_format.ParseDict(metadatas_dict, mir_metadatas)
# annotations
annotations_dict = {
'task_annotations': {
'a': {
'image_annotations': {
'430df22960b0f369318705800139fcc8ec38a3e4': {
'annotations': [{
'index': 0,
'box': {
'x': 104,
'y': 78,
'w': 272,
'h': 105
},
'class_id': 3,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}, {
'index': 1,
'box': {
'x': 133,
'y': 88,
'w': 65,
'h': 36
},
'class_id': 3,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}, {
'index': 2,
'box': {
'x': 195,
'y': 180,
'w': 19,
'h': 50
},
'class_id': 2,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}, {
'index': 3,
'box': {
'x': 26,
'y': 189,
'w': 19,
'h': 95
},
'class_id': 2,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}],
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'annotations': [{
'index': 0,
'box': {
'x': 181,
'y': 127,
'w': 94,
'h': 67
},
'class_id': 3,
'score': 1,
'anno_quality': 0.95,
'tags': {'fake tag name': 'fake tag data'},
}],
},
}
}
},
'image_cks': {
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'cks': {
'weather': 'sunny',
},
'image_quality': 0.5
},
'430df22960b0f369318705800139fcc8ec38a3e4': {
'cks': {
'weather': 'sunny',
},
'image_quality': 0.3
}
}
}
mir_annotations = mirpb.MirAnnotations()
json_format.ParseDict(annotations_dict, mir_annotations)
# tasks
task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData,
task_id='a',
message='test_tools_data_exporter_branch_a')
# save and commit
mir_datas = {
mirpb.MirStorage.MIR_METADATAS: mir_metadatas,
mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations,
}
mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root,
mir_branch='a',
his_branch='master',
mir_datas=mir_datas,
task=task)
def test_normal_00(self):
# normal case: voc:raw
fake_args = type('', (), {})()
fake_args.mir_root = self._mir_root
fake_args.asset_dir = self._dest_root
fake_args.annotation_dir = self._dest_root
fake_args.gt_dir = self._gt_root
fake_args.media_location = self._assets_location
fake_args.src_revs = 'a@a'
fake_args.dst_rev = ''
fake_args.format = 'voc'
fake_args.asset_format = 'raw'
fake_args.in_cis = 'person'
fake_args.work_dir = ''
runner = exporting.CmdExport(fake_args)
result = runner.run()
self.assertEqual(MirCode.RC_OK, result)
# normal case: voc:lmdb
fake_args = type('', (), {})()
fake_args.mir_root = self._mir_root
fake_args.asset_dir = self._dest_root
fake_args.annotation_dir = self._dest_root
fake_args.gt_dir = self._gt_root
fake_args.media_location = self._assets_location
fake_args.src_revs = 'a@a'
fake_args.dst_rev = ''
fake_args.format = 'voc'
fake_args.asset_format = 'lmdb'
fake_args.in_cis = 'person'
fake_args.work_dir = ''
runner = exporting.CmdExport(fake_args)
result = runner.run()
self.assertEqual(MirCode.RC_OK, result)
# abnormal case: no asset_dir, annotation_dir, media_location
fake_args = type('', (), {})()
fake_args.mir_root = self._mir_root
fake_args.asset_dir = ''
fake_args.annotation_dir = ''
fake_args.gt_dir = ''
fake_args.media_location = ''
fake_args.src_revs = 'a@a'
fake_args.dst_rev = '' # too fast, default task_id will be the same as previous one
fake_args.format = 'voc'
fake_args.asset_format = 'raw'
fake_args.in_cis = 'person'
fake_args.work_dir = ''
runner = exporting.CmdExport(fake_args)
result = runner.run()
self.assertNotEqual(MirCode.RC_OK, result)
| 2.109375
| 2
|
quizmake/__main__.py
|
jnguyen1098/quizmake
| 1
|
12778195
|
<filename>quizmake/__main__.py
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Initialization."""
import sys
from . import core
if __name__ == "__main__":
sys.exit(core.main(sys.argv))
| 1.90625
| 2
|
joelib/physics/jethead.py
|
Joefdez/joelib
| 1
|
12778196
|
from numpy import *
import joelib.constants.constants as cts
from joelib.physics.synchrotron_afterglow import *
from scipy.stats import binned_statistic
from scipy.interpolate import interp1d
from tqdm import tqdm
class jetHeadUD(adiabatic_afterglow):
###############################################################################################
# Methods for initializing the cells in the jet head
###############################################################################################
def __init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, nlayers, joAngle, shell_type='thin', Rb=1.):#, obsAngle=0.0):
self.nlayers = nlayers # Number of layers for the partition
#self.nn1 = nn1 # Number of cells in the first layer
self.__totalCells() # obtain self.ncells
self.joAngle = joAngle # Jet opening angle
#self.obsAngle = obsAngle # Angle of jet axis with respect to line of sight
self.angExt = 2.*pi*(1.-cos(joAngle)) # Solid area covered by the jet head
self.cellSize = self.angExt/self.ncells # Angular size of each cell
self.__makeCells() # Generate the cells: calculate the angular positions of the shells
adiabatic_afterglow.__init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, shell_type, Rb)
self.ee = EE/self.ncells # Energy per cell
def __makeCells(self):
"""
This method generates the individual cells: positions of borders between cells
and angular positions of the cells themselves.
"""
self.layer = array([])
self.thetas = array([])
self.phis = array([])
self.cthetas = array([])
self.cphis = array([])
fac1 = arange(0,self.nlayers+1)/float(self.nlayers) # Numerical factor for use during execution
self.thetas = 2.*arcsin(fac1*sin(self.joAngle/4.)) # Calculate the propagation angle with respect to jet axis
for ii in range(self.nlayers): # Loop over layers and populate the arrays
num = self.cellsInLayer(ii)
self.phis = append(self.phis, arange(0,num+1)*2.*pi/num) # Phi value of the edges
self.layer = append(self.layer,ones(num)*(ii+1)) # Layer on which the cells are
self.cthetas = append(self.cthetas,ones(num)*0.5*(self.thetas[ii]+self.thetas[ii+1])) # Central theta values of the cells
self.cphis = append(self.cphis,(arange(0,num)+0.5)*2.*pi/num ) # Central phi values of the cells
#num = int(round(self.cellsInLayer(ii)/2))
#self.layer = append(self.layer,ones(num+1)*(ii+1)) # Layer on which the phi edges are
#self.phis = append(self.phis, arange(0,num+1)*2.*pi/num) # Phi value of the edges
#self.cthetas = append(self.cthetas,ones(num)*0.5*(self.thetas[ii]+self.thetas[ii+1])) # Central theta values
#self.cphis = append(self.cphis,(arange(0,num)+0.5)*pi/num ) # Central phi values
def __totalCells(self):
tot = 0
for ii in range(0,self.nlayers):
tot = tot + self.cellsInLayer(ii)
#tot = tot + int(round(self.cellsInLayer(ii)/2))
self.ncells = tot
###############################################################################################
# Methods used by initializers and for getting different physics and general methods not used by initializers
###############################################################################################
def cellsInLayer(self, ii):
"""
Return number of cells in layer ii
"""
return (2*ii+1)
def obsangle(self, theta_obs):
"""
Return the cosine of the observer angle for the different shockwave segments and and
and observer at and angle theta_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(theta_obs), cos(theta_obs)
u_obs_y, u_obs_z = sin(theta_obs), cos(theta_obs)
#seg_x =
seg_y = sin(self.cthetas)*sin(self.cphis)
seg_z = cos(self.cthetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
def obsangle_cj(self, theta_obs):
"""
Return the cosine of the observer angle for the different shockwave
segments in the counter jet and observer at an angle theta_obs with respect to the jet axis
(contained in yz plane)
"""
#u_obs_x, u_obs_y, u_obs_z = 0., sin(theta_obs), cos(theta_obs)
u_obs_y, u_obs_z = sin(theta_obs), cos(theta_obs)
#seg_x =
seg_y = sin(pi-self.cthetas)*sin(self.cphis)
seg_z = cos(pi-self.cthetas)
#return arccos(u_obs_x*seg_x + u_obs_y*seg_y + u_obs_z*seg_z)
return u_obs_y*seg_y + u_obs_z*seg_z
def dopplerFactor(self, cosa, beta):
"""
Calculate the doppler factors of the different jethead segments
cosa -> cosine of observeration angle, obtained using obsangle
"""
return (1.-beta)/(1.-beta*cosa)
def light_curve_adiabatic(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
if self.evolution == 'adiabatic':
max_Tobs = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha_cj)))/cts.sTd
elif self.evolution == 'peer':
max_Tobs = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha_cj)))/cts.sTd
if (ttf>max_Tobs or ttf>max_Tobs_cj):
print("ttf larger than maximum observable time. Adjusting value.")
ttf = min(max_Tobs, max_Tobs_cj)
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(self.ncells)):
#for ii in range(self.ncells):
onAxisTint = interp1d(self.RRs, self.TTs)
ttobs = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii])
ttobs_cj = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha_cj[ii])
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs_cj))[0]
Rint = interp1d(ttobs, self.RRs)
Robs = Rint(tts[filTM][filTm])
GamObs = self.GamInt(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
#if self.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif self.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
# Forward shock stuff
Bfield = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs
gamMobs, nuMobs = minGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield)
gamCobs, nuCobs = critGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Fnuobs = fluxMax(Robs, GamObs, self.nn, Bfield, self.DD)
# Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = params_tt_RS(self, onAxisTobs, Rb)
# Counter-jet stuff
Rint_cj = interp1d(ttobs_cj, self.RRs)
Robs_cj = Rint_cj(tts[filTM_cj][filTm_cj])
GamObs_cj = self.GamInt(Robs_cj)
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
#onAxisTint = interp1d(self.RRs, self.TTs)
#if self.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif self.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs_cj = onAxisTint(Robs_cj)
Bfield_cj = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs_cj
gamMobs_cj, nuMobs_cj = minGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj)
gamCobs_cj, nuCobs_cj = critGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD)
dopFacs = self.dopplerFactor(calpha[ii], BetaObs)
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], BetaObs_cj)
afac_cj = self.cellSize/maximum(self.cellSize*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
afac[fil1] * dopFacs[fil1]**3. * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# afac[fil2] * dopFacs[fil2]**3. * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
afac[fil3] * dopFacs[fil3]**3. * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha[ii]
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
afac_cj[fil5] * dopFacs_cj[fil5]**3. * FluxNuSC_arr(self, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))*calpha_cj[ii]
#return tts, 2.*light_curve, 2.*light_curve_RS
return tts, light_curve, light_curve_RS, light_curve_CJ
def light_curve_peer(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
if self.evolution == 'adiabatic':
max_Tobs = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, max(alpha_cj)))/cts.sTd
elif self.evolution == 'peer':
max_Tobs = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha)))/cts.sTd
max_Tobs_cj = max(obsTime_offAxis_General(self.RRs, self.TTs, max(alpha_cj)))/cts.sTd
if (ttf>max_Tobs or ttf>max_Tobs_cj):
print("ttf larger than maximum observable time. Adjusting value. ")
ttf = min(max_Tobs, max_Tobs_cj)
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(self.ncells)):
#for ii in range(self.ncells):
onAxisTint = interp1d(self.RRs, self.TTs)
ttobs = obsTime_offAxis_General(self.RRs, self.TTs, alpha[ii])
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs, alpha_cj[ii])
#ttobs = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii])
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs))[0]
#print(len(tts[filT]))
Rint = interp1d(ttobs, self.RRs)
Robs = Rint(tts[filTM][filTm])
GamObs = self.GamInt(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
#if self.evolution == 'adiabatic':
# onAxisTobs = obsTime_onAxis_adiabatic(Robs, BetaObs)
#elif self.evolution == 'peer':
# onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
Rint_cj = interp1d(ttobs_cj, self.RRs)
Robs_cj= Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = self.GamInt(Robs_cj)
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
onAxisTobs_cj = onAxisTint(Robs_cj)
# Forward shock stuff
#gamMobs, gamCobs = self.gamMI(Robs), self.gamCI(Robs)
#nuMobs, nuCobs = self.nuMI(Robs), self.nuCI(Robs)
#Fnuobs = self.FnuMI(Robs)
#Bfield = sqrt(32.*pi*cts.mp*self.nn*self.epB*GamObs*(GamObs-1.))*cts.cc
Bfield = Bfield_modified(GamObs, BetaObs, self.nn, self.epB)
gamMobs, nuMobs = minGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, self.Xp)
gamCobs, nuCobs = critGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Fnuobs = fluxMax_modified(Robs, GamObs, self.nn, Bfield, self.DD, self.PhiP)
Bfield_cj = Bfield_modified(GamObs_cj, BetaObs_cj, self.nn, self.epB)
gamMobs_cj, nuMobs_cj = minGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, self.Xp)
gamCobs_cj, nuCobs_cj = critGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax_modified(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD, self.PhiP)
# Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = params_tt_RS(self, onAxisTobs, Rb)
dopFacs = self.dopplerFactor(calpha[ii], BetaObs)
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], BetaObs_cj)
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
self.cellSize * (GamObs[fil1]*(1.-BetaObs[fil1]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))#*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# afac[fil2] * dopFacs[fil2]**3. * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
self.cellSize * (GamObs[fil3]*(1.-BetaObs[fil3]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))#*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha[ii]
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
self.cellSize * (GamObs_cj[fil5]*(1.-BetaObs_cj[fil5]*calpha_cj[ii]))**(-3.) * FluxNuSC_arr(self, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil3]))#*calpha[ii]
#return tts, 2.*light_curve, 2.*light_curve_RS
return tts, light_curve, light_curve_RS, light_curve_CJ
def lightCurve_interp(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if self.evolution == "adiabatic":
tts, light_curve, light_curve_RS, light_curve_CJ = self.light_curve_adiabatic(theta_obs, obsFreqs, tt0, ttf, num, Rb)
elif self.evolution == "peer":
tts, light_curve, light_curve_RS,light_curve_CJ = self.light_curve_peer(theta_obs, obsFreqs, tt0, ttf, num, Rb)
return tts, light_curve, light_curve_RS, light_curve_CJ
def skymap(self, theta_obs, tt_obs, freq, nx, ny, xx0, yy0):
calpha = zeros([2*self.ncells])
alpha = zeros([2*self.ncells])
calpha[:self.ncells] = self.obsangle(theta_obs)
calpha[self.ncells:] = self.obsangle_cj(theta_obs)
alpha = arccos(calpha)
TTs, RRs, Gams, Betas = zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
fluxes = zeros(2*self.ncells)
im_xxs, im_yys = zeros(2*self.ncells), zeros(2*self.ncells)
im_xxs[:self.ncells] = -1.*cos(theta_obs)*sin(self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(self.cthetas)
im_yys[:self.ncells] = sin(self.cthetas)*cos(self.cphis)
im_xxs[self.ncells:] = -1.*cos(theta_obs)*sin(pi-self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(pi-self.cthetas)
im_yys[self.ncells:] = sin(pi-self.cthetas)*cos(self.cphis)
indices = where(im_yys>0)
if self.evolution == 'adiabatic':
Tint = interp1d(self.RRs, self.TTs)
for ii in tqdm(indices):#tqdm(range(self.ncells)):
ttobs = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii])
ttobs_cj = obsTime_offAxis_UR(self.RRs, self.TTs, self.Betas, alpha[ii+self.ncells])
Rint = interp1d(ttobs, self.RRs)
Rint_cj = interp1d(ttobs_cj, self.RRs)
RRs[ii] = Rint(tt_obs)
RRs[ii+self.ncells] = Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
Gams[ii], Gams[ii+self.ncells] = self.GamInt(RRs[ii]), self.GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = (32.*pi*self.nn*self.epB*cts.mp)**(1./2.) * Gams*cts.cc
gamM, nuM = minGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf)
gamC, nuC = critGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
fMax = fluxMax(RRs, Gams, self.nn, Bf, self.DD)
dopFacs = self.dopplerFactor(calpha, sqrt(1.-Gams**(-2)))
afac = self.cellSize/maximum(self.cellSize*ones(len(Gams)), 2.*pi*(1.-cos(1./Gams)))
obsFreqs = freq/dopFacs
fluxes = (self.DD**2./(calpha*self.cellSize*RRs**2.)) *afac * dopFacs**3. * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)*1./calpha
#fluxes = afac * dopFacs**3. * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)*calpha
elif self.evolution == 'peer':
Tint = interp1d(self.RRs, self.TTs)
for ii in tqdm(range(self.ncells)):
ttobs = obsTime_offAxis_General(self.RRs, self.TTs, alpha[ii])
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs, alpha[ii+self.ncells])
Rint, Rint_cj = interp1d(ttobs, self.RRs), interp1d(ttobs_cj, self.RRs)
RRs[ii], RRs[ii+self.ncells] = Rint(tt_obs), Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
Gams[ii], Gams[ii+self.ncells] = self.GamInt(RRs[ii]), self.GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = Bfield_modified(Gams, Betas, self.nn, self.epB)
gamM, nuM = minGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
fMax = fluxMax_modified(RRs, Gams, self.nn, Bf, self.DD, self.PhiP)
dopFacs = self.dopplerFactor(calpha, sqrt(1.-Gams**(-2)))
obsFreqs = freq/dopFacs
#fluxes = (self.DD/self.cellSize*RRs)**2. * self.cellSize * (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)*1./calpha
fluxes = (self.DD**2./(calpha*self.cellSize*RRs**2.)) * self.cellSize * (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)
#fluxes = self.cellSize * (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fMax, obsFreqs)#*calpha
im_xxs = RRs*im_xxs
im_yys = RRs*im_yys
return im_xxs, im_yys, fluxes, RRs, Gams, calpha, TTs
class jetHeadGauss(jetHeadUD):
def __init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, nlayers, joAngle, coAngle, shell_type='thin', Rb=1.):
# In this case, EE refers to the total energy and Gam0 to the central Gam0 value
self.coAngle = coAngle
jetHeadUD.__init__(self, EE, Gam0, nn, epE, epB, pp, DD, steps, evolution, nlayers, joAngle, shell_type, Rb)
self.__energies_and_LF()
if self.evolution == 'adiabatic':
self.cell_Rds = (3./(4.*pi) * 1./(cts.cc**2.*cts.mp) *
self.cell_EEs/(self.nn*self.cell_Gam0s**2.))**(1./3.)
self.cell_Tds = self.cell_Rds/(cts.cc*self.cell_Beta0s) * (1.-self.cell_Beta0s)
#self.cell_Tds = self.cell_Rds/(2.*cts.cc*self.cell_Gam0s**2.)
#self.Rd/(2.*self.Gam0**2 * cts.cc)
elif self.evolution == 'peer':
self.cell_Rds = (3./(4.*pi) * 1./(cts.cc**2.*cts.mp) *
self.cell_EEs/(self.nn*self.cell_Gam0s**2.))**(1./3.)
self.cell_Tds = self.cell_Rds/(cts.cc*self.cell_Beta0s) * (1.-self.cell_Beta0s)
print("Calculating dynamical evolution")
self.__evolve()
print("Calculating reverse shock parmeters")
self.__peakParamsRS_struc()
def __energies_and_LF(self):
#AngFacs = exp(-1.*self.cthetas**2./(2.*self.coAngle**2.))
self.cell_EEs = self.EE * exp(-1.*self.cthetas**2./(self.coAngle**2.)) # Just for texting
#self.cell_EEs = self.EE * exp(-1.*self.cthetas**2./(self.coAngle**2.))
self.cell_Gam0s = 1.+(self.Gam0-1)*exp(-1.*self.cthetas**2./(2.*self.coAngle**2.))
self.cell_Beta0s = sqrt(1.-(self.cell_Gam0s)**(-2.))
def __evolve(self):
if self.evolution == 'peer':
self.RRs, self.Gams, self.Betas = self.evolve_relad_struct()
self.TTs = self.obsTime_onAxis_struct()
self.Bfield = Bfield_modified(self.Gams, self.Betas, self.nn, self.epB)
elif self.evolution == 'adiabatic':
self.RRs, self.Gams, self.Betas = self.evolve_ad_struct()
self.TTs = self.obsTime_onAxis_struct()
self.Bfield = (32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*self.Gams*cts.cc
def __peakParamsRS_struc(self):
RSpeak_nuM_struc = zeros(self.ncells)
RSpeak_nuC_struc = zeros(self.ncells)
RSpeak_Fnu_struc = zeros(self.ncells)
if self.shell_type=='thin':
print("Setting up thin shell")
for ii in tqdm(range(self.ncells)):
#self.RSpeak_nuM = 9.6e14 * epE**2. * epB**(1./2.) * nn**(1./2) * Gam0**2.
#self.RSpeak_nuC = 4.0e16 * epB**(-3./2.) * EE**(-2./3.) * nn**(-5./6.) * Gam0**(4./3.)
#self.RSpeak_Fnu = 5.2 * DD**(-2.) * epB**(1./2.) * EE * nn**(1./2.) * Gam0
Rd, Td = self.cell_Rds[ii], self.cell_Tds[ii]
#print Rd
if self.evolution == 'peer':
#print shape(self.RRs), shape(self.Gams)
GamsInt = interp1d(self.RRs[:], self.Gams[:,ii])
Gam0 = GamsInt(Rd)
Beta0 = sqrt(1.-Gam0**(-2.))
Bf = Bfield_modified(Gam0, Beta0, self.nn, self.epB)
gamM, nuM = minGam_modified(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, Td)
Fnu = fluxMax_modified(Rd, Gam0, self.nn, Bf, self.DD, self.PhiP)
elif self.evolution == 'adiabatic':
GamsInt = interp1d(self.RRs[:,ii], self.Gams[:,ii])
Gam0 = GamsInt(Rd)
Bf = (32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*Gam0*cts.cc
gamM, nuM = minGam(Gam0, self.epE, self.epB, self.nn, self.pp, Bf)
gamC, nuC = critGam(Gam0, self.epE, self.epB, self.nn, self.pp, Bf, Td)
Fnu = fluxMax(Rd, Gam0, self.nn, Bf, self.DD)
#print Rd, max(self.RRs[:,ii]), min(self.RRs[:,ii]), self.cell_Gam0s[ii], self.cthetas[ii]
#gamM = self.epE*(self.pp-2.)/(self.pp-1.) * cts.mp/cts.me * Gam0
#gamC = 3.*cts.me/(16.*self.epB*cts.sigT*cts.mp*cts.cc*Gam0**3.*Td*self.nn)
#nuM = Gam0*gamM**2.*cts.qe*(32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*Gam0*cts.cc/(2.*pi*cts.me*cts.cc)
#nuC = Gam0*gamC**2.*cts.qe*(32.*pi*cts.mp*self.epB*self.nn)**(1./2.)*Gam0*cts.cc/(2.*pi*cts.me*cts.cc)
#Fnu = self.nn**(3./2.)*Rd**3. * cts.sigT * cts.cc**3. *cts.me* (32.*pi*cts.mp*self.epB
# )**(1./2.)*Gam0**2./(9.*cts.qe*self.DD**2.)
#RSpeak_nuM_struc[ii] = nuM/(self.cell_Gam0s[ii]**2.)
#RSpeak_nuC_struc[ii] = nuC
#RSpeak_Fnu_struc[ii] = self.cell_Gam0s[ii] * Fnu
RSpeak_nuM_struc[ii] = nuM/(Gam0**2)
RSpeak_nuC_struc[ii] = nuC
RSpeak_Fnu_struc[ii] = Gam0*Fnu
self.RSpeak_nuM_struc = RSpeak_nuM_struc #self.Rb**(1./2.)*RSpeak_nuM_struc
self.RSpeak_nuC_struc = RSpeak_nuC_struc #self.Rb**(-3./2.)*RSpeak_nuC_struc
self.RSpeak_Fnu_struc = RSpeak_Fnu_struc #self.Rb**(1./2.)*RSpeak_Fnu_struc
def evolve_relad_struct(self):
"""
Evolution following Pe'er 2012. Adbaiatic expansion into a cold, uniform ISM using conservation of energy in relativstic form. This solution
transitions smoothly from the ultra-relativistic to the Newtonian regime. Modified for stuctured jet
"""
Gam0 = self.Gam0
Rl = self.Rd * Gam0**(2./3.)
RRs = logspace(log10(self.Rd/1000.), log10(Rl)+3., self.steps+1) #10
#MMs = 4.*pi * cts.mp*self.nn*RRs**3./3.#4./3. *pi*cts.mp*self.nn*RRs**3.
MMs = 4./3. * pi*RRs**3. * self.nn * cts.mp
#Gams[0,:] = self.cell_Gam0s
#print("Calculating Gamma as a function of R for each cell")
print("Calculating dynamical evolution for each layer")
#for ii in tqdm(range(1,len(self.Betas))):
# Gams[ii,:] = rk4(dgdm_struc, self, log10(MMs[ii-1]), Gams[ii-1,:], (log10(MMs[ii])-log10(MMs[ii-1])))
for ii in tqdm(range(self.nlayers)):
# Set up initial conditions for the layer
#GamEv[0] = Gams[0,self.layer==ii+1][0]
MM0 = self.cell_EEs[self.layer==ii+1][0]/(self.cell_Gam0s[self.layer==ii+1][0]*cts.cc**2.)
self.cell_Gam0s[self.layer==ii+1][0]
#Gams = zeros(len(RRs))
GamEv = zeros([len(RRs)])
GamEv[0] = self.cell_Gam0s[self.layer==ii+1][0]
# Calculate dynamical evolution of the layer
for jj in range(1, len(GamEv)):
GamEv[jj] = rk4(dgdm_mod, MM0, log10(MMs[jj-1]), GamEv[jj-1], (log10(MMs[jj])-log10(MMs[jj-1])))
# Share the values with the rest of the cells of the layer
if ii==0:
Gams = array([GamEv,]).T
else:
GamEv = array([GamEv]*self.cellsInLayer(ii)).T
#Gams = column_stack((Gams, GamEv))
Gams = concatenate([Gams, GamEv], axis=1)
Betas = sqrt(1.-1./Gams**2.)
#Betas[-1] = 0.0
#print(shape(Gams))
return RRs, Gams, Betas
def evolve_ad_struct(self):
"""
Evolution following simple energy conservation for an adiabatically expanding relativistic shell. Same scaling as
Blanford-Mckee blastwave solution. This calculation is only valid in ultrarelativstic phase.
"""
Gam = self.Gam0
GamSD = 1.021
Rsd = Gam**(2./3.) *self.Rd / GamSD # Radius at Lorentz factor=1.005 -> after this point use Sedov-Taylor scaling
Rl = self.Rd * self.Gam0**(2./3.)
#RRs = logspace(log10(self.Rd/100.), log10(Rl), self.steps+1) #10
RRs = zeros([self.steps+1, self.ncells])
Gams = zeros([self.steps+1, self.ncells])
Betas = zeros([self.steps+1, self.ncells])
Gams[0,:] = self.cell_Gam0s
for ii in range(self.ncells):
RRs[:,ii] = logspace(log10(self.cell_Rds[ii]/100.), log10(0.9999*self.cell_Rds[ii] * self.cell_Gam0s[ii]**(2./3.)), self.steps+1) # All start at same point
Gams[RRs[:,ii]<=self.cell_Rds[ii],ii] = self.cell_Gam0s[ii]
Gams[RRs[:,ii]>self.cell_Rds[ii], ii] = (self.cell_Rds[ii]/RRs[RRs[:,ii]>self.cell_Rds[ii],ii])**(3./2.) * self.cell_Gam0s[ii]
#Gams[RRs>=Rsd] = 1./sqrt( 1.-(Rsd/RRs[RRs>=Rsd])**(6.)*(1.-1./(Gams[(RRs>jet.Rd) & (RRs<Rsd)][-1]**2.)))
#Gams[RRs>=jet.Rd] = odeint(jet.dgdr, jet.Gam0, RRs[RRs>=jet.Rd])[:,0]
#Gams[RRs>=jet.Rd] = odeint(jet.dgdr, jet.Gam0, RRs[RRs>=jet.Rd])[:,0]
Betas[RRs[:,ii]<=self.cell_Rds[ii],ii] = sqrt(1.-(1./self.cell_Gam0s[ii])**2.)
Betas[RRs[:,ii]>self.cell_Rds[ii], ii] = sqrt(1.-(1./Gams[RRs[:,ii]>self.cell_Rds[ii], ii])**2.)
Betas[-1,:] = 0.
#Gams[Gams<=1.] = 1.
return RRs, Gams, Betas
def obsTime_onAxis_struct(self):
"""
On-axis observer times calculated for each individual cell
"""
print("Calculating on-axis observerd time for each cell")
#for ii in tqdm(range(1,len(self.Betas))):
if self.evolution == "adiabatic":
for layer in range(self.nlayers):
if layer==0:
TTs = obsTime_onAxis_adiabatic(self.RRs[:, layer],self.Betas[:, layer])
else:
layerTime = obsTime_onAxis_adiabatic(self.RRs[:, self.layer==layer+1][:,0], self.Betas[:, self.layer==layer+1][:,0])
for cell in range(self.cellsInLayer(layer)):
TTs = column_stack((TTs, layerTime))
elif self.evolution == "peer":
for layer in tqdm(range(self.nlayers)):
if layer==0:
TTs = obsTime_onAxis_integrated(self.RRs, self.Gams[:, layer], self.Betas[:, layer])
TTs = array([TTs,]).T
else:
layerTime = obsTime_onAxis_integrated(self.RRs, self.Gams[:, self.layer==layer+1][:,0],
self.Betas[:, self.layer==layer+1][:,0])
#TTs = column_stack((TTs, layerTime))
layerTime = array([layerTime]*self.cellsInLayer(layer)).T
TTs = concatenate([TTs, layerTime], axis=1)
return TTs
def params_tt_RS(self, tt, ii, Rb):
if type(tt) == 'float': tt = array([tt])
fil1, fil2 = where(tt<=self.cell_Tds[ii])[0], where(tt>self.cell_Tds[ii])[0]
#print ii, len(tt)
nuM = zeros(len(tt))
nuC = zeros(len(tt))
fluxMax = zeros(len(tt))
#print len(nuM), len(nuC), len()
nuM[fil1] = self.RSpeak_nuM_struc[ii]*(tt[fil1]/self.cell_Tds[ii])**(6.)
nuC[fil1] = self.RSpeak_nuC_struc[ii]*(tt[fil1]/self.cell_Tds[ii])**(-2.)
fluxMax[fil1] = self.RSpeak_Fnu_struc[ii]*(tt[fil1]/self.cell_Tds[ii])**(3./2.) # Returns fluxes in Jy
nuM[fil2] = self.RSpeak_nuM_struc[ii]*(tt[fil2]/self.cell_Tds[ii])**(-54./35.)
nuC[fil2] = self.RSpeak_nuC_struc[ii]*(tt[fil2]/self.cell_Tds[ii])**(4./35.)
fluxMax[fil2] = self.RSpeak_Fnu_struc[ii]*(tt[fil2]/self.cell_Tds[ii])**(-34./35.) # Returns fluxes in Jy
return Rb**(1./2.)*nuM, Rb**(-3./2.)*nuC, Rb**(1./2.)*fluxMax
def light_curve_adiabatic(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
# Obserer angle for the counter-jet
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
Tfil = self.TTs[:,-1]== max(self.TTs[:,-1])
max_Tobs = self.RRs[Tfil, -1]/(self.Betas[Tfil,-1]*cts.cc) * (1.-self.Betas[Tfil,-1]*cos(max(alpha)))
#max_Tobs_oa = max(self.TTs[:,-1])
#max_Tobs = max(obsTime_offAxis(self, self.RRs, self.TTs[:,alpha==max(alpha)][:,0], max(alpha)))/cts.sTd
if ttf>max_Tobs:
print("ttf larger than maximum observable time. Adjusting value. ")
ttf = max_Tobs
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
for ii in tqdm(range(self.ncells)):
#for ii in range(self.ncells):
ttobs = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha[ii])
RRs = self.RRs[:,ii]
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
filTM_cj = where(tts<=max(ttobs))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs))[0]
Rint = interp1d(ttobs, RRs)
Gamint = interp1d(RRs, self.Gams[:,ii])
Robs = Rint(tts[filTM][filTm])
GamObs = Gamint(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
dopFacs = self.dopplerFactor(calpha[ii], sqrt(1.-GamObs**(-2)))
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
onAxisTobs = dopFacs*tts[filTM][filTm]
# Forward shock stuff
Bfield = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs
gamMobs, nuMobs = minGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield)
gamCobs, nuCobs = critGam(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Fnuobs = fluxMax(Robs, GamObs, self.nn, Bfield, self.DD)
#Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = self.params_tt_RS(onAxisTobs, ii, Rb)
# Counter jet stuff
ttobs_cj = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha_cj[ii])
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM]>=min(ttobs_cj))[0]
Rint_cj = interp1d(ttobs_cj, RRs)
#Gamint = interp1d(RRs, self.Gams[:,ii])
Robs_cj = Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = Gamint(Robs_cj)
if len(GamObs_cj)==0: continue
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], sqrt(1.-GamObs_cj**(-2)))
afac_cj = self.cellSize/maximum(self.cellSize*ones(num)[filTM_cj][filTm_cj], 2.*pi*(1.-cos(1./GamObs_cj)))
onAxisTobs_cj = dopFacs_cj*tts[filTM_cj][filTm_cj]
Bfield_cj = sqrt(32.*pi*self.nn*self.epB*cts.mp)*cts.cc*GamObs_cj
gamMobs_cj, nuMobs_cj = minGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj)
gamCobs_cj, nuCobs_cj = critGam(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD)
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], sqrt(1.-GamObs_cj**(-2)))
dopFacs = self.dopplerFactor(calpha[ii], sqrt(1.-GamObs**(-2)))
afac = self.cellSize/maximum(self.cellSize*ones(num)[filTM][filTm], 2.*pi*(1.-cos(1./GamObs)))
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
freqs_cj = freq/dopFacs_cj
#print shape(freqs), shape(freqs[fil1]), shape(nuMobs[fil1]), shape(nuCobs[fil1]), shape(Fnuobs[fil1]), shape(afac[fil1]), shape(calpha)
#print shape(light_curve[obsFreqs==freq, filT]), shape([fil1])
#print fil1
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
afac[fil1] * dopFacs[fil1]**3. * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# afac[fil2] * dopFacs[fil2]**3. * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
afac[fil3] * dopFacs[fil3]**3. * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# afac[fil4] * dopFacs[fil4]**3. * FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))*calpha[ii]
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
afac_cj[fil5] * dopFacs_cj[fil5]**3. * FluxNuSC_arr(self, nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))*calpha_cj[ii]
return tts, light_curve, light_curve_RS, light_curve_CJ
#return tts, 2.*light_curve, 2.*light_curve_RS
def light_curve_peer(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if type(obsFreqs)==float:
obsFreqs = array([obsFreqs])
calpha = self.obsangle(theta_obs)
alpha = arccos(calpha)
# Obserer angle for the counter-jet
calpha_cj = self.obsangle_cj(theta_obs)
alpha_cj = arccos(calpha_cj)
Tfil = self.TTs[:,-1]== max(self.TTs[:,-1])
max_Tobs = max(obsTime_offAxis_General(self.RRs, self.TTs[:,-1], max(alpha)))
if ttf>max_Tobs:
print("ttf larger than maximum observable time. Adjusting value.")
ttf = max_Tobs
lt0 = log10(tt0*cts.sTd) # Convert to seconds and then logspace
ltf = log10(ttf*cts.sTd) # Convert to seconds and then logspace
tts = logspace(lt0, ltf+(ltf-lt0)/num, num) # Timeline on which the flux is evaluated.
light_curve = zeros([len(obsFreqs), num])
light_curve_RS = zeros([len(obsFreqs), num])
light_curve_CJ = zeros([len(obsFreqs), num])
RRs = self.RRs
for ii in tqdm(range(self.ncells)):
ttobs = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha[ii])
filTM = where(tts<=max(ttobs))[0]
filTm = where(tts[filTM]>=min(ttobs))[0]
Rint = interp1d(ttobs, RRs)
Gamint = interp1d(RRs, self.Gams[:,ii])
Robs = Rint(tts[filTM][filTm])
GamObs = Gamint(Robs)
BetaObs = sqrt(1.-GamObs**(-2.))
if len(GamObs)==0: continue
onAxisTint = interp1d(RRs, self.TTs[:,ii])
#onAxisTobs = obsTime_onAxis_integrated(Robs, GamObs, BetaObs)
onAxisTobs = onAxisTint(Robs)
#Bfield = sqrt(32.*pi*cts.mp*self.nn*self.epB*GamObs*(GamObs-1.))*cts.cc
#gamMobs, nuMobs = minGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield)
#gamCobs, nuCobs = critGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
Bfield = Bfield_modified(GamObs, BetaObs, self.nn, self.epB)
gamMobs, nuMobs = minGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, self.Xp)
gamCobs, nuCobs = critGam_modified(GamObs, self.epE, self.epB, self.nn, self.pp, Bfield, onAxisTobs)
#nuMobs, nuCobs = GamObs*nuMobs, GamObs*nuCobs
Fnuobs = fluxMax_modified(Robs, GamObs, self.nn, Bfield, self.DD, self.PhiP)
#Reverse shock stuff
nuM_RS, nuC_RS, Fnu_RS = self.params_tt_RS(onAxisTobs, ii, Rb)
dopFacs = self.dopplerFactor(calpha[ii], sqrt(1.-GamObs**(-2)))
# Counter jet stuff
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha_cj[ii])
filTM_cj = where(tts<=max(ttobs_cj))[0]
filTm_cj = where(tts[filTM_cj]>=min(ttobs_cj))[0]
Rint_cj = interp1d(ttobs_cj, RRs)
#Gamint = interp1d(RRs, self.Gams[:,ii])
Robs_cj = Rint(tts[filTM_cj][filTm_cj])
GamObs_cj = Gamint(Robs_cj)
if len(GamObs_cj)==0: continue
BetaObs_cj = sqrt(1.-GamObs_cj**(-2.))
onAxisTobs_cj = onAxisTint(Robs_cj)
Bfield_cj = Bfield_modified(GamObs_cj, BetaObs_cj, self.nn, self.epB)
gamMobs_cj, nuMobs_cj = minGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, self.Xp)
gamCobs_cj, nuCobs_cj = critGam_modified(GamObs_cj, self.epE, self.epB, self.nn, self.pp, Bfield_cj, onAxisTobs_cj)
Fnuobs_cj = fluxMax_modified(Robs_cj, GamObs_cj, self.nn, Bfield_cj, self.DD, self.PhiP)
dopFacs_cj = self.dopplerFactor(calpha_cj[ii], sqrt(1.-GamObs_cj**(-2)))
#nuMobs = nuMobs/dopFacs
#nuCobs = nuCobs/dopFacs
#nuMobs_cj = nuMobs_cj/dopFacs_cj
#nuCobs_cj = nuCobs_cj/dopFacs_cj
for freq in obsFreqs:
fil1, fil2 = where(gamMobs<=gamCobs)[0], where(gamMobs>gamCobs)[0]
fil3, fil4 = where(nuM_RS<=nuC_RS)[0], where(nuM_RS>nuC_RS)[0]
freqs = freq/dopFacs # Calculate the rest-frame frequencies correspondng to the observed frequency
light_curve[obsFreqs==freq, filTM[filTm][fil1]] = light_curve[obsFreqs==freq, filTM[filTm][fil1]] + (
self.cellSize*(GamObs[fil1]*(1.-BetaObs[fil1]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuMobs[fil1], nuCobs[fil1], Fnuobs[fil1], freqs[fil1]))#*calpha[ii]
#light_curve[obsFreqs==freq, filTM[filTm][fil2]] = light_curve[obsFreqs==freq, filTM[filTm][fil2]] + (
# (GamObs[fil2]*(1.-BetaObs[fil2]*calpha[fil2][ii]))**(-3.) * FluxNuFC_arr(self, nuMobs[fil2], nuCobs[fil2], Fnuobs[fil2], freqs[fil2]))#*calpha[ii]
light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil3]] + (
self.cellSize*(GamObs[fil3]*(1.-BetaObs[fil3]*calpha[ii]))**(-3.) * FluxNuSC_arr(self, nuM_RS[fil3], nuC_RS[fil3], Fnu_RS[fil3], freqs[fil3]))#*calpha[ii]
#light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] = light_curve_RS[obsFreqs==freq, filTM[filTm][fil4]] + (
# (GamObs[fil4]*(1.-BetaObs[fil4]*calpha[fil4][ii]))**(-3.)* FluxNuFC_arr(self, nuM_RS[fil4], nuC_RS[fil4], Fnu_RS[fil4], freqs[fil4]))#*calpha[ii]
fil5, fil6 = where(nuMobs_cj<=nuCobs_cj)[0], where(nuMobs_cj>nuCobs_cj)[0]
freqs_cj = freq/dopFacs_cj
light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] = light_curve_CJ[obsFreqs==freq, filTM_cj[filTm_cj][fil5]] + (
self.cellSize*(GamObs_cj[fil5]*(1.-BetaObs_cj[fil5]*calpha_cj[ii]))**(-3.) * FluxNuSC_arr(self,
nuMobs_cj[fil5], nuCobs_cj[fil5], Fnuobs_cj[fil5], freqs_cj[fil5]))#*calpha[ii]
return tts, light_curve, light_curve_RS, light_curve_CJ
#return tts, 2.*light_curve, 2.*light_curve_RS
def lightCurve_interp(self, theta_obs, obsFreqs, tt0, ttf, num, Rb):
if self.evolution == "adiabatic":
tts, light_curve, light_curve_RS, light_curve_CJ = self.light_curve_adiabatic(theta_obs, obsFreqs, tt0, ttf, num, Rb)
elif self.evolution == "peer":
tts, light_curve, light_curve_RS, light_curve_CJ = self.light_curve_peer(theta_obs, obsFreqs, tt0, ttf, num, Rb)
return tts, light_curve, light_curve_RS, light_curve_CJ
def skymap(self, theta_obs, tt_obs, freq, nx, ny, xx0, yy0):
calpha = zeros([2*self.ncells])
alpha = zeros([2*self.ncells])
calpha[:self.ncells] = self.obsangle(theta_obs)
calpha[self.ncells:] = self.obsangle_cj(theta_obs)
alpha = arccos(calpha)
TTs, RRs, Gams, Betas = zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells), zeros(2*self.ncells)
#nuMs, nuCs, fluxes = zeros(2.*self.ncells), zeros(2.*self.ncells), zeros(2.*self.ncells)
fluxes = zeros(2*self.ncells)
im_xxs, im_yys = zeros(2*self.ncells), zeros(2*self.ncells)
im_xxs[:self.ncells] = -1.*cos(theta_obs)*sin(self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(self.cthetas)
im_yys[:self.ncells] = sin(self.cthetas)*cos(self.cphis)
im_xxs[self.ncells:] = -1.*cos(theta_obs)*sin(pi-self.cthetas)*sin(self.cphis) + sin(theta_obs)*cos(pi-self.cthetas)
im_yys[self.ncells:] = sin(pi-self.cthetas)*cos(self.cphis)
if self.evolution == 'adiabatic':
for ii in tqdm(range(self.ncells)):
Tint = interp1d(self.RRs[:,ii], self.TTs[:,ii])
ttobs = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha[ii])
ttobs_cj = obsTime_offAxis_UR(self.RRs[:,ii], self.TTs[:,ii], self.Betas[:,ii], alpha[ii+self.ncells])
Rint = interp1d(ttobs, self.RRs[:,ii])
Rint_cj = interp1d(ttobs_cj, self.RRs[:,ii])
RRs[ii] = Rint(tt_obs)
RRs[ii+self.ncells] = Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
GamInt = interp1d(self.RRs[:,ii], self.Gams[:,ii])
Gams[ii], Gams[ii+self.ncells] = GamInt(RRs[ii]), GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = (32.*pi*self.nn*self.epB*cts.mp)**(1./2.) * Gams*cts.cc
gamM, nuM = minGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf)
gamC, nuC = critGam(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
flux = fluxMax(RRs, Gams, self.nn, Bf, self.DD)
#fluxMax[Gams<=2] = 0.
dopFacs = self.dopplerFactor(calpha, sqrt(1.-Gams**(-2)))
afac = self.cellSize/maximum(self.cellSize, 2.*pi*(1.-cos(1./Gams)))
obsFreqs = freq/dopFacs
fluxes = (self.DD**2./(abs(calpha)*self.cellSize*RRs**2.)) * afac * dopFacs**3. * FluxNuSC_arr(self, nuM, nuC, flux, obsFreqs)
elif self.evolution == 'peer':
for ii in tqdm(range(self.ncells)):
Tint = interp1d(self.RRs, self.TTs[:,ii])
ttobs = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha[ii])
ttobs_cj = obsTime_offAxis_General(self.RRs, self.TTs[:,ii], alpha[ii+self.ncells])
Rint, Rint_cj = interp1d(ttobs, self.RRs), interp1d(ttobs_cj, self.RRs)
RRs[ii], RRs[ii+self.ncells] = Rint(tt_obs), Rint_cj(tt_obs)
TTs[ii], TTs[ii+self.ncells] = Tint(RRs[ii]), Tint(RRs[ii+self.ncells])
GamInt = interp1d(self.RRs, self.Gams[:,ii])
Gams[ii], Gams[ii+self.ncells] = GamInt(RRs[ii]), GamInt(RRs[ii+self.ncells])
Betas = sqrt(1.-Gams**(-2.))
Bf = Bfield_modified(Gams, Betas, self.nn, self.epB)
gamM, nuM = minGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, self.Xp)
gamC, nuC = critGam_modified(Gams, self.epE, self.epB, self.nn, self.pp, Bf, TTs)
flux = fluxMax_modified(RRs, Gams, self.nn, Bf, self.DD, self.PhiP)
#fluxMax[Gams<=5] = 0.
#nuM, nuC = nuM/Gams, nuC/Gams
dopFacs = self.dopplerFactor(calpha, Betas)
obsFreqs = freq/dopFacs
#afac = self.cellSize/maximum(self.cellSize*ones(self.ncells), 2.*pi*(1.-cos(1./Gams)))
fluxes = (self.DD**2./(abs(calpha)*self.cellSize*RRs**2.)) *self.cellSize* (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, flux, obsFreqs)
#fluxes = (Gams*(1.-Betas*calpha))**(-3.) * FluxNuSC_arr(self, nuM, nuC, fluxMax, obsFreqs)*1./calpha
fluxes2 = self.cellSize*(Gams*(1.-Betas*calpha))**(-3.)*FluxNuSC_arr(self, nuM, nuC, flux, obsFreqs)
im_xxs = RRs*im_xxs
im_yys = RRs*im_yys
return im_xxs, im_yys, fluxes, fluxes2, RRs, Gams, calpha, TTs
| 2.078125
| 2
|
example.py
|
reening/pysflow
| 4
|
12778197
|
<reponame>reening/pysflow
from binascii import unhexlify
from pprint import pprint
from sflow import decode
# Example datagram taken from http://packetlife.net/captures/protocol/sflow/
raw = '0000000500000001ac15231100000001000001a6673f36a00000000100000002' +\
'0000006c000021280000040c0000000100000001000000580000040c00000006' +\
'0000000005f5e100000000010000000300000000018c6e9400009b9e00029062' +\
'0001f6c400000000000000000000000000000000005380600000a0de0000218a' +\
'000008d7000000000000000000000000'
data = unhexlify(raw)
pprint(decode(data))
| 2.671875
| 3
|
RabbitMqUdn/client/quorum-queue-test.py
|
allensanborn/ChaosTestingCode
| 73
|
12778198
|
#!/usr/bin/env python
import pika
import sys
import time
import datetime
import subprocess
import random
import threading
import requests
import json
from command_args import get_args, get_mandatory_arg, get_optional_arg, is_true, get_optional_arg_validated
from RabbitPublisher import RabbitPublisher
from MultiTopicConsumer import MultiTopicConsumer
from QueueStats import QueueStats
from ChaosExecutor import ChaosExecutor
from printer import console_out
from MessageMonitor import MessageMonitor
from ConsumerManager import ConsumerManager
from BrokerManager import BrokerManager
def main():
print("quorum-queue-test.py")
args = get_args(sys.argv)
count = -1 # no limit
tests = int(get_mandatory_arg(args, "--tests"))
actions = int(get_mandatory_arg(args, "--actions"))
in_flight_max = int(get_optional_arg(args, "--in-flight-max", 10))
grace_period_sec = int(get_mandatory_arg(args, "--grace-period-sec"))
cluster_size = get_optional_arg(args, "--cluster", "3")
queue = get_mandatory_arg(args, "--queue")
sac_enabled = is_true(get_mandatory_arg(args, "--sac"))
chaos_mode = get_optional_arg(args, "--chaos-mode", "mixed")
chaos_min_interval = int(get_optional_arg(args, "--chaos-min-interval", "30"))
chaos_max_interval = int(get_optional_arg(args, "--chaos-max-interval", "120"))
prefetch = int(get_optional_arg(args, "--pre-fetch", "10"))
rmq_version = get_optional_arg_validated(args, "--rmq-version", "3.8-beta", ["3.7", "3.8-beta", "3.8-alpha"])
for test_number in range(1, tests+1):
print("")
console_out(f"TEST RUN: {str(test_number)} of {tests}--------------------------", "TEST RUNNER")
setup_complete = False
while not setup_complete:
broker_manager = BrokerManager()
broker_manager.deploy(cluster_size, True, rmq_version, False)
initial_nodes = broker_manager.get_initial_nodes()
console_out(f"Initial nodes: {initial_nodes}", "TEST RUNNER")
print_mod = in_flight_max * 5
queue_name = queue + "_" + str(test_number)
mgmt_node = broker_manager.get_random_init_node()
queue_created = False
qc_ctr = 0
while queue_created == False and qc_ctr < 20:
qc_ctr += 1
if sac_enabled:
queue_created = broker_manager.create_quorum_sac_queue(mgmt_node, queue_name, cluster_size, 0)
else:
queue_created = broker_manager.create_quorum_queue(mgmt_node, queue_name, cluster_size, 0)
if queue_created:
setup_complete = True
else:
time.sleep(5)
time.sleep(10)
msg_monitor = MessageMonitor("qqt", test_number, print_mod, True, False)
publisher = RabbitPublisher(1, test_number, broker_manager, in_flight_max, 120, print_mod)
publisher.configure_sequence_direct(queue_name, count, 0, 1)
consumer_manager = ConsumerManager(broker_manager, msg_monitor, "TEST RUNNER", False)
consumer_manager.add_consumers(1, test_number, queue_name, prefetch)
chaos = ChaosExecutor(initial_nodes)
if chaos_mode == "partitions":
chaos.only_partitions()
elif chaos_mode == "nodes":
chaos.only_kill_nodes()
monitor_thread = threading.Thread(target=msg_monitor.process_messages)
monitor_thread.start()
consumer_manager.start_consumers()
pub_thread = threading.Thread(target=publisher.start_publishing)
pub_thread.start()
console_out("publisher started", "TEST RUNNER")
for action_num in range(1, actions+1):
wait_sec = random.randint(chaos_min_interval, chaos_max_interval)
console_out(f"waiting for {wait_sec} seconds before next action", "TEST RUNNER")
time.sleep(wait_sec)
console_out(f"execute chaos action {str(action_num)}/{actions} of test {str(test_number)}", "TEST RUNNER")
chaos.execute_chaos_action()
subprocess.call(["bash", "../cluster/cluster-status.sh"])
time.sleep(60)
console_out("repairing cluster", "TEST RUNNER")
chaos.repair()
console_out("repaired cluster", "TEST RUNNER")
publisher.stop_publishing()
console_out("starting grace period for consumer to catch up", "TEST RUNNER")
ctr = 0
while True:
ms_since_last_msg = datetime.datetime.now() - msg_monitor.get_last_msg_time()
if msg_monitor.get_unique_count() >= publisher.get_pos_ack_count() and len(publisher.get_msg_set().difference(msg_monitor.get_msg_set())) == 0:
break
elif ctr > grace_period_sec and ms_since_last_msg.total_seconds() > 15:
break
time.sleep(1)
ctr += 1
confirmed_set = publisher.get_msg_set()
lost_msgs = confirmed_set.difference(msg_monitor.get_msg_set())
console_out("RESULTS------------------------------------", "TEST RUNNER")
if len(lost_msgs) > 0:
console_out(f"Lost messages count: {len(lost_msgs)}", "TEST RUNNER")
for msg in lost_msgs:
console_out(f"Lost message: {msg}", "TEST RUNNER")
console_out(f"Confirmed count: {publisher.get_pos_ack_count()} Received count: {msg_monitor.get_receive_count()} Unique received: {msg_monitor.get_unique_count()}", "TEST RUNNER")
success = True
if msg_monitor.get_out_of_order() == True:
console_out("FAILED TEST: OUT OF ORDER MESSAGES", "TEST RUNNER")
success = False
if len(lost_msgs) > 0:
console_out("FAILED TEST: LOST MESSAGES", "TEST RUNNER")
success = False
if success == True:
console_out("TEST OK", "TEST RUNNER")
console_out("RESULTS END------------------------------------", "TEST RUNNER")
try:
consumer_manager.stop_all_consumers()
pub_thread.join()
except Exception as e:
console_out("Failed to clean up test correctly: " + str(e), "TEST RUNNER")
console_out(f"TEST {str(test_number)} COMPLETE", "TEST RUNNER")
if __name__ == '__main__':
main()
| 2.015625
| 2
|
src/titiler/mosaic/titiler/mosaic/__init__.py
|
kalxas/titiler
| 0
|
12778199
|
"""titiler.mosaic"""
__version__ = "0.6.0"
from . import errors, factory # noqa
from .factory import MosaicTilerFactory # noqa
| 1.054688
| 1
|
initadmin.py
|
fga-eps-mds/2017.2-SiGI-Op_API
| 6
|
12778200
|
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'sigi_op.settings'
import django
django.setup()
from django.contrib.auth.management.commands.createsuperuser import get_user_model
if get_user_model().objects.filter(username='admin'):
print("Super user already created")
else:
get_user_model()._default_manager.db_manager('default').create_superuser(username='admin', email='<EMAIL>', password='<PASSWORD>')
print("Super user created")
| 2.15625
| 2
|
atalaya/parameters.py
|
jacr13/Atalaya
| 0
|
12778201
|
<reponame>jacr13/Atalaya
import json
from os.path import join as pjoin
class Parameters:
"""Class that loads hyperparameters from a json file.
From :
- https://github.com/cs230-stanford/cs230-code-examples/blob/master/pytorch/vision/utils.py
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, params=None, path=None):
if params is not None:
self.__dict__.update(params)
elif path is not None:
self.update(path)
else:
raise Exception("params and path at None ! One of them must be not None.")
def save(self, path):
"""Saves parameters to a json file"""
with open(pjoin(path, "params.json"), "w") as f:
json.dump(self.__dict__, f, indent=4)
def update(self, path):
"""Loads parameters from json file"""
with open(pjoin(path, "params.json")) as f:
params = json.load(f)
params[
list(self.__dict__.keys())[list(self.__dict__.values()).index(path)]
] = path
self.__dict__.update(params)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']"""
return self.__dict__
| 3.21875
| 3
|
GitHubScripts/merge_data.py
|
bdqnghi/sstubs_bug_miner
| 0
|
12778202
|
import os, shutil
from distutils.dir_util import copy_tree
import numpy as np
import shutil
path = "dataset"
split_path = "dataset_splits"
all_paths = []
for folder in os.listdir(split_path):
folder_path = os.path.join(split_path, folder)
print(folder_path)
for project_folder in os.listdir(folder_path):
# print(project_folder)
project_folder_path = os.path.join(folder_path, project_folder)
try:
shutil.move(project_folder_path, path)
except Exception as e:
print(e)
| 2.328125
| 2
|
tkinterUI/historyPage.py
|
moreviraj2000/license-detection-project
| 0
|
12778203
|
import os
import sqlite3
from tkinter import *
from tkinter import simpledialog
from tkinter import ttk
from PIL import Image, ImageTk
from DetailsPage import DetailsPage
import constants
from datetime import datetime
import tkinter.filedialog
from tkinter import messagebox
import xlwt
class HistoryPage(Frame):
def __init__(self, master):
Frame.__init__(self, master, padx=20, bg=constants.colors['main']['bg'])
self.grid(row=1, column=1)
self.style = ttk.Style()
self.style.map('TCombobox', fieldbackground=[('readonly', 'gray90')])
self.style.map('TCombobox', selectbackground=[('readonly', 'gray90')])
self.style.map('TCombobox', selectforeground=[('readonly', 'black')])
self.style.map('Treeview', background=[('selected', 'gray70')])
self.searchBar()
self.dbtable()
self.downloadBar()
# connect database
cwd = os.getcwd()
parDir = cwd.replace('tkinterUI', 'realtest.db')
self.db = sqlite3.connect(parDir)
self.cur = self.db.cursor()
self.cur.execute("SELECT rowid,* FROM realtest")
self.searchedEntries = self.entries = self.cur.fetchall()
self.resetTree()
# resizable
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.columnconfigure(2, weight=1)
self.columnconfigure(3, weight=1)
self.columnconfigure(4, weight=1)
def __del__(self):
self.db.commit()
self.db.close()
def searchBar(self):
# First Row (search)
self.searchby = StringVar(value='Reg. Number')
self.entryVar = StringVar(value='Enter Query')
searchComboVals = ('Reg. Number','Date','Time','Vehicle','Address',)
label = Label(self, text='Search by ', padx=10, pady=10)
comboBox = ttk.Combobox(self, textvariable=self.searchby, state="readonly", justify='center')
comboBox['values'] = searchComboVals
entryBox = ttk.Entry(self, textvariable=self.entryVar, width=40, justify='center')
searchBut = ttk.Button(self, text='Search', command=self.searchTree)
resetButton = ttk.Button(self, text='Reset', command=self.resetTree)
entryBox.bind('<Button-1>', self.OnSingleClickEntry)
entryBox.bind("<Return>",lambda _:self.searchTree())
comboBox.bind("<FocusIn>", lambda _: comboBox.selection_range(0, 0))
comboBox.current(0)
# grid
label.grid(row=0, column=0, sticky=N + E + S + W, pady=(15, 2), padx=(0, 2))
comboBox.grid(row=0, column=1, sticky=N + E + S + W, pady=(15, 2), padx=2)
entryBox.grid(row=0, column=2, sticky=N + E + S + W, pady=(15, 2), padx=2)
searchBut.grid(row=0, column=3, sticky=N + E + S + W, pady=(15, 2), padx=2)
resetButton.grid(row=0, column=4, sticky=N + E + S + W, pady=(15, 2), padx=(2, 0))
def dbtable(self):
# treeview
self.table = ttk.Treeview(self, height=30, selectmode='browse')
verscrlbar = ttk.Scrollbar(self, orient="vertical", command=self.table.yview)
self.table.configure(xscrollcommand=verscrlbar.set)
self.table["columns"] = ("1", "2", "3", "4", "5")
self.table['show'] = 'headings'
self.table.column("1", width=30, anchor='c')
self.table.column("2", width=120, anchor='c')
self.table.column("3", width=220, anchor='c')
self.table.column("4", width=230, anchor='c')
self.table.column("5", width=300, anchor='c')
# Assigning the heading names to the
# respective columns
self.table.heading("1", text="Id")
self.table.heading("2", text="Number")
self.table.heading("3", text="TimeStamp")
self.table.heading("4", text="Vehicle")
self.table.heading("5", text="Address")
self.table.bind("<Double-1>", self.OnDoubleClick)
self.table.grid(row=1, column=0, columnspan=5, sticky=N + E + S + W)
verscrlbar.grid(row=1, column=5, sticky=N + E + S + W)
def downloadBar(self):
# download frame
downloadFrame = Frame(self, bg=constants.colors['main']['bg'])
self.downloadType = StringVar(value='Number Plate Image')
self.downloadWhat = StringVar(value='Selected Row')
downloadLabel = Label(downloadFrame, text='Download the ', padx=10, pady=10)
downCombo = ttk.Combobox(downloadFrame, textvariable=self.downloadType, state="readonly", justify='center')
downCombo['values'] = ('Number Plate Image','Captured Image','Data as Excel')
downCombo.current(0)
ofLabel = Label(downloadFrame, text=' of ', padx=10, pady=10)
whatCombo = ttk.Combobox(downloadFrame, textvariable=self.downloadWhat, state="readonly", justify='center')
whatCombo['values'] = ('Selected Row','Searched Rows','All Rows',)
whatCombo.current(0)
downloadBut = ttk.Button(downloadFrame, text='Download', command=self.download)
downCombo.bind("<FocusIn>", lambda _: downCombo.selection_range(0, 0))
whatCombo.bind("<FocusIn>", lambda _: whatCombo.selection_range(0, 0))
# pack
downloadLabel.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=(0, 2))
downCombo.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=2)
ofLabel.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=2)
whatCombo.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=2)
downloadBut.pack(side=LEFT, fill=X, expand=True, pady=(2, 2), padx=(2, 0))
downloadFrame.grid(row=2, column=0, columnspan=5, sticky=N + E + S + W, pady=(2, 15), padx=(2, 2))
def OnSingleClickEntry(self, event):
if self.entryVar.get() == 'Enter Query':
self.entryVar.set('')
def OnDoubleClick(self, event):
id = self.table.selection()[0]
DetailsPage(self.master, id=id, cur=self.cur)
def updateTable(self, entries):
self.table.delete(*self.table.get_children())
self.table.tag_configure('odd',background='gray90')
self.table.tag_configure('even', background='snow')
FirstWhite = 0 if len(entries)%2 == 0 else 1
for entry in reversed(entries):
self.table.insert("", 'end', text="", iid=entry[0], values=(
entry[0], entry[1], entry[2], entry[3],
entry[4]), tags = ('even',) if entry[0]%2 == FirstWhite else ('odd',))
# resets tree to full data
resetTree = lambda self: self.updateTable(entries=self.entries)
def searchTree(self):
# searches and updates table
columnMap = {
'Vehicle': 'name',
'Reg. Number': 'numPlate',
'Date': 'timeStamp',
'Time': 'timeStamp',
'Address': 'address',
}
column = self.searchby.get()
query = self.entryVar.get()
if column == 'Time':
query = f"SELECT rowid,* FROM realtest WHERE {columnMap[column]} LIKE '{query}% | %'"
elif column == 'Date':
query = f"SELECT rowid,* FROM realtest WHERE {columnMap[column]} LIKE '% | {query}%'"
else:
query = f"SELECT rowid,* FROM realtest WHERE {columnMap[column]} LIKE '%{query}%'"
self.cur.execute(query)
self.searchedEntries = self.cur.fetchall()
self.updateTable(entries=self.searchedEntries)
def download(self):
# ifelse for selecting the number of rows to download
if self.downloadWhat.get() == 'Selected Row':
id = self.table.selection()
if not id :
tkinter.messagebox.showerror(title='Row Selection Expected', message='No Row Selected')
return None
self.cur.execute(f"SELECT rowid,* FROM realtest WHERE rowid = {int(id[0])}")
dList = [self.cur.fetchone()]
elif self.downloadWhat.get() == 'All Rows' :
dList = self.entries
else:#Searched Row
dList = self.searchedEntries
# ask save location
dirname = tkinter.filedialog.askdirectory(parent=self, initialdir="/",title='Please select location to save file ')
if not dirname:
return
# excel save code
if self.downloadType.get() == 'Data as Excel':
# ask for file name to save
fileName = simpledialog.askstring(title="Excel File Name",
prompt="Enter the name to save the excel file :")
if not fileName:
return
excel = xlwt.Workbook()
sheet = excel.add_sheet("VInLP export", datetime.now())
style = xlwt.easyxf('font: bold 1, color blue; borders: left thick, right thin, top thin, bottom thin; pattern: pattern solid, fore_color white;')
tl = xlwt.easyxf('font: bold 1, color blue; border: left thick, top thick, right thin, bottom thick')
t = xlwt.easyxf('font: bold 1, color blue; border: left thin, top thick, right thin, bottom thick')
tr = xlwt.easyxf('font: bold 1, color blue; border: left thin, top thick, right thick, bottom thick')
r = xlwt.easyxf('border: left thin,right thick')
br = xlwt.easyxf('border: left thin, right thick, bottom thick')
b = xlwt.easyxf('border: left thin,right thin, bottom thick')
bl = xlwt.easyxf('border: left thick, right thin, bottom thick')
l = xlwt.easyxf('border: left thick,right thin')
m = xlwt.easyxf('border: left thin,right thin')
sheet.write(0, 0, 'Id', tl)
sheet.write(0, 1, 'Registration Number', t)
sheet.write(0, 2, 'Date', t)
sheet.write(0, 3, 'Time', t)
sheet.write(0, 4, 'Vehicle', t)
sheet.write(0, 5, 'Address', tr)
sheet.col(0).width = int(4 * 260)
sheet.col(1).width = int(17 * 260)
sheet.col(2).width = int(11 * 260)
sheet.col(3).width = int(12 * 260)
sheet.col(4).width = int(30 * 260)
sheet.col(5).width = int(35 * 260)
sheet.write(1, 0, '', l)
sheet.write(1, 1, '', m)
sheet.write(1, 2, '', m)
sheet.write(1, 3, '', m)
sheet.write(1, 4, '', m)
sheet.write(1, 5, '', r)
for index, row in enumerate(dList):
time, date = row[2].split(' | ')
sheet.write(index+2, 0, row[0], l)
sheet.write(index+2, 1, row[1], m)
sheet.write(index+2, 2, date, m)
sheet.write(index+2, 3, time, m)
sheet.write(index+2, 4, row[3], m)
sheet.write(index+2, 5, row[4], r)
index = len(dList) + 1
sheet.write(index, 0,style=bl)
sheet.write(index, 1, style=b)
sheet.write(index, 2, style=b)
sheet.write(index, 3, style=b)
sheet.write(index, 4, style=b)
sheet.write(index, 5, style=br)
excel.save(f'{dirname}/{fileName}.xls')
# saving images
else:
for row in dList:
print(row[0])
with open(f'{dirname}/{row[1]}.png', 'wb') as file:
file.write(row[6] if self.downloadType.get() == 'Captured Image' else row[7])
| 2.375
| 2
|
ali/ali/__init__.py
|
makefu/ali-orders
| 3
|
12778204
|
from core import run_casper,save_db,load_db
from datetime import datetime,timedelta
import logging
log = logging.getLogger('ali-module')
list_js="ali/get_order_list.js"
order_js="ali/get_order.js"
confirm_js="ali/confirm_order.js"
login_js="ali/login.js"
order_url="http://trade.aliexpress.com/order_detail.htm?orderId=%s"
def get_order_list(full=False):
if not full: ret = run_casper (list_js)
else: ret = run_casper (list_js,["full"])
return ret
def get_order(ident):
""" calculate for an order
"payment-time": "2014-07-11 01:32:35",
"protection-reminder": {
"hours": 3,
"days": 14,
"seconds": 50,
"minutes": 50
},
run_casper raises exception if get_order failed.
"""
ret = run_casper(order_js,[ident])
rem = ret['protection-reminder']
if rem:
now=datetime.now()
#payment_time=datetime.strptime(ret["payment-time"],"%Y-%m-%d %H:%M:%S")
prot_secs=rem["hours"]*60*60+rem["minutes"]*60+rem["seconds"]
protection_timeout = timedelta(days=rem["days"],seconds=prot_secs)
ret['protection-timeout'] = (datetime.now()+protection_timeout).strftime("%Y-%m-%d %H:%M:%S")
del(ret['protection-reminder'])
ret['type']='aliexpress'
return ret
def get_order_link(ident):
return order_url % (ident)
def confirm_order(ident):
try:
log.info("confirm order status: %s" %run_casper(confirm_js,[ident]))
except Exception as e:
log.error("could not confirm order %s"%ident)
log.error(e)
raise
def login():
return run_casper(login_js,[])
| 2.140625
| 2
|
posts/AmericaByTrain/arrow.py
|
capecchi/capecchi.github.io
| 0
|
12778205
|
#Amtrak Recursive ROute Writer (ARROW)
#cont- does not write initial .npz file, relies on existing partials
def main(newdata=False,
cont=False,
newredund=False,
arrive=True):
import json
import numpy as np
import os
import route_builder
import glob
import find_redundancy
local = 'F:/Python34/America_By_Train/'
rb = local+'route_builder/'
direc = 'C:/Users/Owner/Documents/GitHub/capecchi.github.io/posts/AmericaByTrain/'
if newdata or not os.path.isfile(local+'endpts.npz'):
with open(direc+'amtrak.geojson') as f:
data = json.load(f)
feats = data['features']
index = np.arange(len(feats))
strt = []
end = []
for i in index:
cc = feats[i]['geometry']['coordinates']
strt.append(cc[0])
end.append(cc[-1])
#NEED route GPS endpoints to look for
fcoords = local
#fraarcid
stpaulid = 182592 #keep east pt
stpaul_iarr_cid = 182614 #mark eastern segment as redundant so we only search west
portland_cid = 266301 #block southern route to Portland
seattleid = 241310 #keep south pt
laid = 211793 #keep south pt
palmspringsid = 263261 #keep west pt
neworleansid_end = 178659 #keep east pt NOTE does not connect to neworleans_start
neworleansid_start = 243859 #keep south or east pt
phillyid = 204870 #keep north pt
dcid = 164103 #keep south pt
chicagoid = 253079 #keep north pt
eb_block = np.array([],dtype=int)
cs_block = np.array([],dtype=int)
sl_block = np.array([],dtype=int)
cr_block = np.array([],dtype=int)
cl_block = np.array([],dtype=int)
for i in index:
cid = feats[i]['properties']['FRAARCID']
coords = feats[i]['geometry']['coordinates']
c1 = coords[0]
c2 = coords[-1]
if cid == stpaulid:
if c1[0] > c2[0]: stpaul = c1
else: stpaul = c2
if cid == stpaul_iarr_cid or cid == portland_cid:
eb_block = np.append(eb_block,i)
if cid == seattleid:
if c1[1] < c2[1]: seattle = c1
else: seattle = c2
if cid == laid:
if c1[1] < c2[1]: la = c1
else: la = c2
if cid == seattleid or cid == portland_cid or cid == 189128\
or cid == 244148 or cid == 254149:
cs_block = np.append(cs_block,i)
if cid == palmspringsid:
if c1[0] < c2[0]: palmsprings = c1
else: palmsprings = c2
if cid == neworleansid_end:
if c1[0] > c2[0]: neworleans_end = c1
else: neworleans_end = c2
if cid == 263258 or cid == 266284 or cid == 178673:
sl_block = np.append(sl_block,i)
if cid == neworleansid_start:
if c1[0] > c2[0]: neworleans_start = c1
else: neworleans_start = c2
if cid == phillyid:
if c1[1] > c2[1]: philly = c1
else: philly = c2
if cid == 243812 or cid == 204623 or cid == 169919 or cid == 169921\
or cid == 125491 or cid == 164053 or cid == 275062 or cid == 261822:
cr_block = np.append(cr_block,i)
if cid == dcid:
if c1[1] < c2[1]: dc = c1
else: dc = c2
if cid == chicagoid:
if c1[1] > c2[1]: chicago = c1
else: chicago = c2
if cid == 252822 or cid == 164114 or cid == 252939 or cid == 152297\
or cid == 197933 or cid == 197961 or cid == 192650 or cid == 192649\
or cid == 253070 or cid == 256677 or cid == 193489 or cid == 266257\
or cid == 266676:
cl_block = np.append(cl_block,i)
cid = [feats[i]['properties']['FRAARCID'] for i in index]
if newredund:
#Identify redundant track segments
fraarcid = [feats[i]['properties']['FRAARCID'] for i in index]
iredund = np.array([],dtype=int)
np.save(local+'redundant',iredund)
redundant = find_redundancy.main(index,strt,end,fraarcid,local)
#SAVE STUFF
np.savez(local+'endpts',index=index,strt=strt,end=end,cid=cid,
stpaul=stpaul,seattle=seattle,la=la,palmsprings=palmsprings,
neworleans_end=neworleans_end,neworleans_start=neworleans_start,
philly=philly,dc=dc,chicago=chicago,eb_block=eb_block,
cs_block=cs_block,sl_block=sl_block,cr_block=cr_block,cl_block=cl_block)
print('saved endpts arrays and city GPS coords')
else:
f=np.load(local+'endpts.npz')
index = f['index']
strt = f['strt']
end = f['end']
cid = f['cid']
stpaul = f['stpaul']
eb_block = f['eb_block']
seattle = f['seattle']
la = f['la']
cs_block = f['cs_block']
palmsprings = f['palmsprings']
neworleans_end = f['neworleans_end']
sl_block = f['sl_block']
neworleans_start = f['neworleans_start']
philly = f['philly']
cr_block = f['cr_block']
dc = f['dc']
chicago = f['chicago']
cl_block = f['cl_block']
#EMPIRE BUILDER
if 1:
print('finding EMPIRE BUILDER routes')
ptA = [stpaul]
iredund = np.load(local+'redundant.npy')
#for i in eb_block: iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,seattle,rb+'empire_builder',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#COAST STARLIGHT
if 0:
print('finding COAST STARLIGHT routes')
ptA = [seattle]
ptB = la
iredund = np.load(local+'redundant.npy')
for i in cs_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'coast_starlight',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#SUNSET LIMITED
if 0:
print('finding SUNSET LIMITED routes')
ptA = [palmsprings]
ptB = neworleans_end
iredund = np.load(local+'redundant.npy')
for i in sl_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'sunset_limited',\
level,iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#CRESCENT
if 0:
print('finding CRESCENT routes')
ptA = [neworleans_start]
ptB = philly
iredund = np.load(local+'redundant.npy')
for i in cr_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'crescent',level,iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
#CAPITOL LIMITED
if 0:
print('finding CAPITOL LIMITED routes')
ptA = [dc]
ptB = chicago
iredund = np.load(local+'redundant.npy')
for i in cl_block:
iredund = np.append(iredund,i)
iarr = np.array([],dtype=int)
if not cont: np.savez(rb+'partial',ptA=ptA,iarr=iarr)
partials = glob.glob(rb+'*.npz')
while len(partials) > 0:
level = 0
with np.load(partials[0]) as f:
ptA = f['ptA']
iarr = f['iarr']
os.remove(partials[0])
route_builder.main(ptA,iarr,ptB,rb+'capitol_limited',level,\
iredund,arrive=arrive)
partials = glob.glob(rb+'*.npz')
| 2.21875
| 2
|
urbanairship/reports/experiments.py
|
tirkarthi/python-library
| 0
|
12778206
|
<gh_stars>0
from typing import Dict, Any
from urbanairship import Airship
class ExperimentReport(object):
def __init__(self, airship: Airship) -> None:
"""Access reporting related to A/B Tests (experiments)
:param airship: An urbanairship.Airship instance.
"""
self.airship = airship
def get_overview(self, push_id: str) -> Dict[str, Any]:
"""Returns statistics and metadata about an experiment (A/B Test).
:param push_id: A UUID representing an A/B test of the requested experiment.
:returns: JSON from the API
"""
url = self.airship.urls.get("reports_url") + "experiment/overview/{0}".format(
push_id
)
response = self.airship._request("GET", None, url, version=3)
return response.json()
def get_variant(self, push_id: str, variant_id: str) -> Dict[str, Any]:
"""Returns statistics and metadata about a specific variant in an experiment (A/B Test).
:param push_id: A UUID representing an A/B test of the requested experiment.
:param variant_id: An integer represennting the variant requested.
:returns: JSON from the API
"""
url = self.airship.urls.get("reports_url") + "experiment/detail/{0}/{1}".format(
push_id, variant_id
)
response = self.airship._request("GET", None, url, version=3)
return response.json()
| 2.796875
| 3
|
models.py
|
iperez319/dog-tinder
| 0
|
12778207
|
from google.appengine.ext import ndb
class Dog(ndb.Model):
name = ndb.StringProperty()
breed = ndb.StringProperty()
gender = ndb.StringProperty()
age = ndb.StringProperty()
size = ndb.StringProperty()
socialLevel = ndb.StringProperty()
activityLevel = ndb.StringProperty()
profilePic = ndb.BlobProperty()
ownerEmail = ndb.StringProperty()
class UserProfile(ndb.Model):
name = ndb.StringProperty()
email = ndb.StringProperty()
dogs = ndb.KeyProperty(Dog, repeated=True)
city = ndb.StringProperty()
state = ndb.StringProperty()
age = ndb.IntegerProperty()
sex = ndb.StringProperty(choices=["Female", "Male", "Prefer not to say"])
profilePic = ndb.BlobProperty()
| 2.546875
| 3
|
aiokts/manage.py
|
ktsstudio/aiokts
| 6
|
12778208
|
import argparse
import inspect
import logging
import logging.config
import os
import pkgutil
import sys
from aiokts.managecommands import Command
from aiokts.store import Store
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(CURRENT_DIR)
class BaseManage(object):
commands_package_path = None
store_cls = Store
_modules = {}
_commands = None
def __init__(self):
self._logger = None
assert self.commands_package_path is not None, \
'Must specify path to where commands are'
self.commands_package_path = os.path.abspath(
os.path.join(
os.path.dirname(inspect.getfile(self.__class__)),
self.commands_package_path))
self.logger.debug('Commands path: %s', self.commands_package_path)
@property
def commands(self):
if self._commands is None:
self._commands = ['help']
for loader, name, ispkg in \
pkgutil.iter_modules([self.commands_package_path]):
if not ispkg:
self._commands.append(name)
self._modules[name] = loader.find_module(name)
return self._commands
@property
def config(self):
return {}
@property
def debug(self):
return self.config.get('debug', False)
def help(self):
print('Available commands:\n - %s' % ('\n - '.join(self.commands)))
def run(self):
args = self._parse_manage_arguments()
command = None
try:
command = args.command
if command not in self.commands:
logging.error('Command %s not found' % command)
self.help()
return 1
if command == 'help':
self.help()
return 0
self._run_command(command, *args.opts)
except Exception:
self.logger.exception('Exception while running command %s',
command)
return 2
except BaseException:
self.logger.exception('Exception while running command %s',
command)
return 3
def _run_command(self, command, *args):
module = self._modules[command].load_module(command)
if hasattr(module, 'main'):
module.main(*args)
cmd_cls = None
for name, cls in module.__dict__.items():
if isinstance(cls, type) and issubclass(cls, Command)\
and cls.__module__ == module.__name__:
cmd_cls = cls
break
assert cmd_cls is not None, \
"Couldn't find Command in command {}".format(command)
cmd = cmd_cls(self)
cmd.run(*args)
def _parse_manage_arguments(self):
parser = argparse.ArgumentParser()
parser.add_argument('command', help='command to execute')
parser.add_argument('opts', nargs=argparse.REMAINDER, default=None)
args = parser.parse_args()
return args
@property
def logger(self):
if self._logger is None:
self._logger = logging.getLogger('Manager')
return self._logger
def main(manager_cls):
manage = manager_cls()
exit(manage.run())
if __name__ == '__main__':
main(Manage)
| 2.375
| 2
|
compressor/simple/seven.py
|
httpwg/compression-test
| 11
|
12778209
|
#!/usr/bin/env python
"""
Serialise ASCII as seven bits.
Yes, I threw up a bit too.
"""
from bitarray import bitarray
def encode(text):
ba = bitarray()
out = bitarray()
ba.fromstring(text)
s = 0
while s < len(ba):
byte = ba[s:s+8]
out.extend(byte[1:8])
s += 8
# print out
return out.tobytes()
def decode(bits):
ba = bitarray()
out = bitarray()
ba.frombytes(bits)
s = 0
while s < len(ba):
seven = ba[s:s+7]
out.append(0)
out.extend(seven)
s += 7
return out.tostring()[:-1].encode('ascii')
if __name__ == "__main__":
import sys
instr = sys.argv[1].strip().encode('ascii')
print "before: %s" % len(instr)
f = encode(instr)
print "after: %s" % len(f)
g = decode(f)
assert instr == g, "\n%s\n%s" % (repr(instr), repr(g))
| 3.671875
| 4
|
setup.py
|
andsor/pyggcq
| 1
|
12778210
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for ggcq.
This file was generated with PyScaffold 1.2, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import inspect
import os
import sys
from distutils.cmd import Command
import setuptools
from setuptools import setup
from setuptools.command.test import test as TestCommand
from distutils.extension import Extension
import versioneer
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# Change these settings according to your needs
MAIN_PACKAGE = "ggcq"
DESCRIPTION = (
"Scientific Python Package for G/G/c Queueing Simulation"
)
LICENSE = "apache"
URL = "http://github.com/andsor/pyggcq"
AUTHOR = "<NAME>"
EMAIL = "<EMAIL>"
# Add here all kinds of additional classifiers as defined under
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
]
# Add here console scripts like ['hello_world = devs.module:function']
CONSOLE_SCRIPTS = []
# Versioneer configuration
versioneer.VCS = 'git'
versioneer.versionfile_source = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.versionfile_build = os.path.join(MAIN_PACKAGE, '_version.py')
versioneer.tag_prefix = 'v' # tags are like v1.2.0
versioneer.parentdir_prefix = MAIN_PACKAGE + '-'
class Tox(TestCommand):
user_options = [
('tox-args=', 'a', "Arguments to pass to tox"),
]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = None
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
import shlex
errno = tox.cmdline(
args=shlex.split(self.tox_args) if self.tox_args else None
)
sys.exit(errno)
class ToxAutoDocs(Tox):
def finalize_options(self):
Tox.finalize_options(self)
if self.tox_args is None:
self.tox_args = ''
self.tox_args += ' -e autodocs '
def sphinx_builder():
try:
from sphinx.setup_command import BuildDoc
except ImportError:
class NoSphinx(Command):
user_options = []
def initialize_options(self):
raise RuntimeError("Sphinx documentation is not installed, "
"run: pip install sphinx")
return NoSphinx
class BuildSphinxDocs(BuildDoc):
def run(self):
if self.builder == "doctest":
import sphinx.ext.doctest as doctest
# Capture the DocTestBuilder class in order to return the total
# number of failures when exiting
ref = capture_objs(doctest.DocTestBuilder)
BuildDoc.run(self)
errno = ref[-1].total_failures
sys.exit(errno)
else:
BuildDoc.run(self)
return BuildSphinxDocs
class ObjKeeper(type):
instances = {}
def __init__(cls, name, bases, dct):
cls.instances[cls] = []
def __call__(cls, *args, **kwargs):
cls.instances[cls].append(super(ObjKeeper, cls).__call__(*args,
**kwargs))
return cls.instances[cls][-1]
def capture_objs(cls):
from six import add_metaclass
module = inspect.getmodule(cls)
name = cls.__name__
keeper_class = add_metaclass(ObjKeeper)(cls)
setattr(module, name, keeper_class)
cls = getattr(module, name)
return keeper_class.instances[cls]
def get_install_requirements(path):
content = open(os.path.join(__location__, path)).read()
return [req for req in content.split("\\n") if req != '']
def read(fname):
return open(os.path.join(__location__, fname)).read()
def setup_package():
# Assemble additional setup commands
cmdclass = versioneer.get_cmdclass()
cmdclass['docs'] = sphinx_builder()
cmdclass['doctest'] = sphinx_builder()
cmdclass['test'] = Tox
cmdclass['autodocs'] = ToxAutoDocs
# Some helper variables
version = versioneer.get_version()
docs_path = os.path.join(__location__, "docs")
docs_build_path = os.path.join(docs_path, "_build")
install_reqs = get_install_requirements("requirements.txt")
extra_doc_reqs = get_install_requirements("requirements-doc.txt")
command_options = {
'docs': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path)},
'doctest': {'project': ('setup.py', MAIN_PACKAGE),
'version': ('setup.py', version.split('-', 1)[0]),
'release': ('setup.py', version),
'build_dir': ('setup.py', docs_build_path),
'config_dir': ('setup.py', docs_path),
'source_dir': ('setup.py', docs_path),
'builder': ('setup.py', 'doctest')},
'test': {'test_suite': ('setup.py', 'tests')},
}
setup(name=MAIN_PACKAGE,
version=version,
url=URL,
description=DESCRIPTION,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
long_description=read('README.rst'),
classifiers=CLASSIFIERS,
test_suite='tests',
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=install_reqs,
setup_requires=['six', 'setuptools_git>=1.1'],
cmdclass=cmdclass,
tests_require=['tox'],
command_options=command_options,
entry_points={'console_scripts': CONSOLE_SCRIPTS},
extras_require={
'docs': extra_doc_reqs,
},
include_package_data=True, # include everything in source control
# but exclude these files
exclude_package_data={'': ['.gitignore']},
)
if __name__ == "__main__":
setup_package()
| 1.765625
| 2
|
utils/mp4-dash-clone.py
|
kahache/video_packaging_platform
| 8
|
12778211
|
<reponame>kahache/video_packaging_platform
#!/usr/bin/env python3
__author__ = '<NAME> (<EMAIL>)'
__copyright__ = 'Copyright 2011-2012 Axiomatic Systems, LLC.'
###
# NOTE: this script needs Bento4 command line binaries to run
# You must place the 'mp4info' and 'mp4encrypt' binaries
# in a directory named 'bin/<platform>' at the same level as where
# this script is.
# <platform> depends on the platform you're running on:
# Mac OSX --> platform = macosx
# Linux x86 --> platform = linux-x86
# Windows --> platform = win32
### Imports
import sys
import os
import os.path as path
from optparse import OptionParser
import urllib.request, urllib.error, urllib.parse
import shutil
import json
import sys
from xml.etree import ElementTree
from subprocess import check_output, CalledProcessError
# constants
DASH_NS_URN_COMPAT = 'urn:mpeg:DASH:schema:MPD:2011'
DASH_NS_URN = 'urn:mpeg:dash:schema:mpd:2011'
DASH_NS_COMPAT = '{'+DASH_NS_URN_COMPAT+'}'
DASH_NS = '{'+DASH_NS_URN+'}'
MARLIN_MAS_NS_URN = 'urn:marlin:mas:1-0:services:schemas:mpd'
MARLIN_MAS_NS = '{'+MARLIN_MAS_NS_URN+'}'
def Bento4Command(name, *args, **kwargs):
cmd = [path.join(Options.exec_dir, name)]
for kwarg in kwargs:
arg = kwarg.replace('_', '-')
cmd.append('--'+arg)
if not isinstance(kwargs[kwarg], bool):
cmd.append(kwargs[kwarg])
cmd += args
#print cmd
try:
return check_output(cmd)
except CalledProcessError as e:
#print e
raise Exception("binary tool failed with error %d" % e.returncode)
def Mp4Info(filename, **args):
return Bento4Command('mp4info', filename, **args)
def GetTrackIds(mp4):
track_ids = []
json_info = Mp4Info(mp4, format='json')
info = json.loads(json_info, strict=False)
for track in info['tracks']:
track_ids.append(track['id'])
return track_ids
def ProcessUrlTemplate(template, representation_id, bandwidth, time, number):
if representation_id is not None: result = template.replace('$RepresentationID$', representation_id)
if number is not None:
nstart = result.find('$Number')
if nstart >= 0:
nend = result.find('$', nstart+1)
if nend >= 0:
var = result[nstart+1 : nend]
if 'Number%' in var:
value = var[6:] % (int(number))
else:
value = number
result = result.replace('$'+var+'$', value)
if bandwidth is not None: result = result.replace('$Bandwidth$', bandwidth)
if time is not None: result = result.replace('$Time$', time)
result = result.replace('$$', '$')
return result
class DashSegmentBaseInfo:
def __init__(self, xml):
self.initialization = None
self.type = None
for type in ['SegmentBase', 'SegmentTemplate', 'SegmentList']:
e = xml.find(DASH_NS+type)
if e is not None:
self.type = type
# parse common elements
# type specifics
if type == 'SegmentBase' or type == 'SegmentList':
init = e.find(DASH_NS+'Initialization')
if init is not None:
self.initialization = init.get('sourceURL')
if type == 'SegmentTemplate':
self.initialization = e.get('initialization')
self.media = e.get('media')
self.timescale = e.get('timescale')
self.startNumber = e.get('startNumber')
# segment timeline
st = e.find(DASH_NS+'SegmentTimeline')
if st is not None:
self.segment_timeline = []
entries = st.findall(DASH_NS+'S')
for entry in entries:
item = {}
s_t = entry.get('t')
if s_t is not None:
item['t'] = int(s_t)
s_d = entry.get('d')
if s_d is not None:
item['d'] = int(s_d)
s_r = entry.get('r')
if s_r is not None:
item['r'] = int(s_r)
else:
item['r'] = 0
self.segment_timeline.append(item)
break
class DashRepresentation:
def __init__(self, xml, parent):
self.xml = xml
self.parent = parent
self.init_segment_url = None
self.segment_urls = []
self.segment_base = DashSegmentBaseInfo(xml)
self.duration = 0
# parse standard attributes
self.bandwidth = xml.get('bandwidth')
self.id = xml.get('id')
# compute the segment base type
node = self
self.segment_base_type = None
while node is not None:
if node.segment_base.type in ['SegmentTemplate', 'SegmentList']:
self.segment_base_type = node.segment_base.type
break
node = node.parent
# compute the init segment URL
self.ComputeInitSegmentUrl()
def SegmentBaseLookup(self, field):
node = self
while node is not None:
if field in node.segment_base.__dict__:
return node.segment_base.__dict__[field]
node = node.parent
return None
def AttributeLookup(self, field):
node = self
while node is not None:
if field in node.__dict__:
return node.__dict__[field]
node = node.parent
return None
def ComputeInitSegmentUrl(self):
node = self
while node is not None:
if node.segment_base.initialization is not None:
self.initialization = node.segment_base.initialization
break
node = node.parent
self.init_segment_url = ProcessUrlTemplate(self.initialization, representation_id=self.id, bandwidth=self.bandwidth, time=None, number=None)
def GenerateSegmentUrls(self):
if self.segment_base_type == 'SegmentTemplate':
return self.GenerateSegmentUrlsFromTemplate()
else:
return self.GenerateSegmentUrlsFromList()
def GenerateSegmentUrlsFromTemplate(self):
media = self.SegmentBaseLookup('media')
if media is None:
print('WARNING: no media attribute found for representation')
return
timeline = self.SegmentBaseLookup('segment_timeline')
if timeline is None:
start = self.SegmentBaseLookup('startNumber')
if start is None:
current_number = 1
else:
current_number = int(start)
while True:
url = ProcessUrlTemplate(media, representation_id=self.id, bandwidth=self.bandwidth, time="0", number=str(current_number))
current_number += 1
yield url
else:
current_number = 1
current_time = 0
for s in timeline:
if 't' in s:
current_time = s['t']
for _ in range(1+s['r']):
url = ProcessUrlTemplate(media, representation_id=self.id, bandwidth=self.bandwidth, time=str(current_time), number=str(current_number))
current_number += 1
current_time += s['d']
yield url
def GenerateSegmentUrlsFromList(self):
segs = self.xml.find(DASH_NS+'SegmentList').findall(DASH_NS+'SegmentURL')
for seg in segs:
media = seg.get('media')
if media is not None:
yield media
def __str__(self):
result = "Representation: "
return result
class DashAdaptationSet:
def __init__(self, xml, parent):
self.xml = xml
self.parent = parent
self.segment_base = DashSegmentBaseInfo(xml)
self.representations = []
for r in self.xml.findall(DASH_NS+'Representation'):
self.representations.append(DashRepresentation(r, self))
def __str__(self):
result = 'Adaptation Set:\n' + '\n'.join([str (r) for r in self.representations])
return result
class DashPeriod:
def __init__(self, xml, parent):
self.xml = xml
self.parent = parent
self.segment_base = DashSegmentBaseInfo(xml)
self.adaptation_sets = []
for s in self.xml.findall(DASH_NS+'AdaptationSet'):
self.adaptation_sets.append(DashAdaptationSet(s, self))
def __str__(self):
result = 'Period:\n' + '\n'.join([str(s) for s in self.adaptation_sets])
return result
class DashMPD:
def __init__(self, url, xml):
self.url = url
self.xml = xml
self.parent = None
self.periods = []
self.segment_base = DashSegmentBaseInfo(xml)
self.type = xml.get('type')
for p in self.xml.findall(DASH_NS+'Period'):
self.periods.append(DashPeriod(p, self))
# compute base URL (note: we'll just use the MPD URL for now)
self.base_urls = [url]
base_url = self.xml.find(DASH_NS+'BaseURL')
if base_url is not None:
self.base_urls = [base_url.text]
def __str__(self):
result = "MPD:\n" + '\n'.join([str(p) for p in self.periods])
return result
def ParseMpd(url, xml):
mpd_tree = ElementTree.XML(xml)
if mpd_tree.tag.startswith(DASH_NS_COMPAT):
global DASH_NS
global DASH_NS_URN
DASH_NS = DASH_NS_COMPAT
DASH_NS_URN = DASH_NS_URN_COMPAT
if Options.verbose:
print('@@@ Using backward compatible namespace')
mpd = DashMPD(url, mpd_tree)
if not (mpd.type is None or mpd.type == 'static'):
raise Exception('Only static MPDs are supported')
return mpd
def MakeNewDir(dir, is_warning=False):
if path.exists(dir):
if is_warning:
print('WARNING: ', end=' ')
else:
print('ERROR: ', end=' ')
print('directory "'+dir+'" already exists')
if not is_warning:
sys.exit(1)
else:
os.mkdir(dir)
def OpenURL(url):
if url.startswith("file://"):
return open(url[7:], 'rb')
else:
return urllib.request.urlopen(url)
def ComputeUrl(base_url, url):
if url.startswith('http://') or url.startswith('https://'):
raise Exception('Absolute URLs are not supported')
if base_url.startswith('file://'):
return path.join(path.dirname(base_url), url)
else:
return urllib.parse.urljoin(base_url, url)
class Cloner:
def __init__(self, root_dir):
self.root_dir = root_dir
self.track_ids = []
self.init_filename = None
def CloneSegment(self, url, path_out, is_init):
while path_out.startswith('/'):
path_out = path_out[1:]
target_dir = path.join(self.root_dir, path_out)
if Options.verbose:
print('Cloning', url, 'to', path_out)
#os.makedirs(target_dir)
try:
os.makedirs(path.dirname(target_dir))
except OSError:
if path.exists(target_dir):
pass
except:
raise
data = OpenURL(url)
outfile_name = path.join(self.root_dir, path_out)
use_temp_file = False
if Options.encrypt:
use_temp_file = True
outfile_name_final = outfile_name
outfile_name += '.tmp'
outfile = open(outfile_name, 'wb')
try:
shutil.copyfileobj(data, outfile)
outfile.close()
if Options.encrypt:
if is_init:
self.track_ids = GetTrackIds(outfile_name)
self.init_filename = outfile_name
#shutil.copyfile(outfile_name, outfile_name_final)
args = ["--method", "MPEG-CENC"]
for t in self.track_ids:
args.append("--property")
args.append(str(t)+":KID:"+Options.kid.encode('hex'))
for t in self.track_ids:
args.append("--key")
args.append(str(t)+":"+Options.key.encode('hex')+':random')
args += [outfile_name, outfile_name_final]
if not is_init:
args += ["--fragments-info", self.init_filename]
if Options.verbose:
print('mp4encrypt '+(' '.join(args)))
Bento4Command("mp4encrypt", *args)
finally:
if use_temp_file and not is_init:
os.unlink(outfile_name)
def Cleanup(self):
if (self.init_filename):
os.unlink(self.init_filename)
def main():
# determine the platform binary name
platform = sys.platform
if platform.startswith('linux'):
platform = 'linux-x86'
elif platform.startswith('darwin'):
platform = 'macosx'
# parse options
parser = OptionParser(usage="%prog [options] <file-or-http-url> <output-dir>\n")
parser.add_option('', '--quiet', dest="verbose",
action='store_false', default=True,
help="Be quiet")
parser.add_option('', "--encrypt", metavar='<KID:KEY>',
dest='encrypt', default=None,
help="Encrypt the media, with KID and KEY specified in Hex (32 characters each)")
parser.add_option('', "--exec-dir", metavar="<exec_dir>",
dest="exec_dir", default=path.join(SCRIPT_PATH, 'bin', platform),
help="Directory where the Bento4 executables are located")
global Options
(Options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
# process arguments
mpd_url = args[0]
output_dir = args[1]
if Options.encrypt:
if len(Options.encrypt) != 65:
raise Exception('Invalid argument for --encrypt option')
Options.kid = bytes.fromhex(Options.encrypt[:32])
Options.key = bytes.fromhex(Options.encrypt[33:])
# create the output dir
MakeNewDir(output_dir, True)
# load and parse the MPD
if Options.verbose: print("Loading MPD from", mpd_url)
try:
mpd_xml = OpenURL(mpd_url).read().decode('utf-8')
except Exception as e:
print("ERROR: failed to load MPD:", e)
sys.exit(1)
if Options.verbose: print("Parsing MPD")
mpd_xml = mpd_xml.replace('nitialisation', 'nitialization')
mpd = ParseMpd(mpd_url, mpd_xml)
ElementTree.register_namespace('', DASH_NS_URN)
ElementTree.register_namespace('mas', MARLIN_MAS_NS_URN)
cloner = Cloner(output_dir)
for period in mpd.periods:
for adaptation_set in period.adaptation_sets:
for representation in adaptation_set.representations:
# compute the base URL
base_url = representation.AttributeLookup('base_urls')[0]
if Options.verbose:
print('Base URL = '+base_url)
# process the init segment
if Options.verbose:
print('### Processing Initialization Segment')
url = ComputeUrl(base_url, representation.init_segment_url)
cloner.CloneSegment(url, representation.init_segment_url, True)
# process all segment URLs
if Options.verbose:
print('### Processing Media Segments for AdaptationSet', representation.id)
for seg_url in representation.GenerateSegmentUrls():
url = ComputeUrl(base_url, seg_url)
try:
cloner.CloneSegment(url, seg_url, False)
except (urllib.error.HTTPError, urllib.error.URLError, IOError):
# move to the next representation
break
# cleanup the init segment
cloner.Cleanup()
# modify the MPD if needed
if Options.encrypt:
for p in mpd.xml.findall(DASH_NS+'Period'):
for s in p.findall(DASH_NS+'AdaptationSet'):
cp = ElementTree.Element(DASH_NS+'ContentProtection', schemeIdUri='urn:uuid:5E629AF5-38DA-4063-8977-97FFBD9902D4')
cp.tail = s.tail
cids = ElementTree.SubElement(cp, MARLIN_MAS_NS+'MarlinContentIds')
cid = ElementTree.SubElement(cids, MARLIN_MAS_NS+'MarlinContentId')
cid.text = 'urn:marlin:kid:'+Options.kid.encode('hex')
s.insert(0, cp)
# write the MPD
xml_tree = ElementTree.ElementTree(mpd.xml)
xml_tree.write(path.join(output_dir, path.basename(urllib.parse.urlparse(mpd_url).path)), encoding="UTF-8", xml_declaration=True)
###########################
SCRIPT_PATH = path.abspath(path.dirname(__file__))
if __name__ == '__main__':
main()
| 1.9375
| 2
|
examples/multi-apps/app/libs/logging.py
|
luohu1/flask-example
| 0
|
12778212
|
# coding: utf-8
import logging
import sys
from flask.logging import default_handler
default_formatter = '%(asctime)s %(process)d,%(threadName)s %(filename)s:%(lineno)d [%(levelname)s] %(message)s'
def configure_logging(app):
# handler = None
if app.debug:
handler = logging.StreamHandler(sys.stdout)
else:
filename = app.config['LOGFILE']
handler = logging.handlers.TimedRotatingFileHandler(filename, when='D')
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(default_formatter))
app.logger.addHandler(handler)
app.logger.removeHandler(default_handler)
| 2.375
| 2
|
audb/core/info.py
|
audeering/audb
| 1
|
12778213
|
<filename>audb/core/info.py<gh_stars>1-10
import typing
import pandas as pd
import audformat
from audb.core import define
from audb.core.api import (
dependencies,
latest_version,
)
from audb.core.load import (
database_cache_folder,
load_header,
)
def author(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""Author(s) of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
author(s) of database
Example:
>>> author('emodb', version='1.1.1')
'<NAME>, <NAME>, <NAME>, <NAME>, <NAME>'
""" # noqa: E501
db = header(name, version=version, cache_root=cache_root)
return db.author
def bit_depths(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Set[int]:
"""Media bit depth.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
bit depths
Example:
>>> bit_depths('emodb', version='1.1.1')
{16}
"""
deps = dependencies(name, version=version, cache_root=cache_root)
df = deps()
return set(df[df.type == define.DependType.MEDIA].bit_depth)
def channels(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Set[int]:
"""Media channels.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
channel numbers
Example:
>>> channels('emodb', version='1.1.1')
{1}
"""
deps = dependencies(name, version=version, cache_root=cache_root)
df = deps()
return set(df[df.type == define.DependType.MEDIA].channels)
def description(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""Description of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
description of database
Example:
>>> desc = description('emodb', version='1.1.1')
>>> desc.split('.')[0] # show first sentence
'Berlin Database of Emotional Speech'
"""
db = header(name, version=version, cache_root=cache_root)
return db.description
def duration(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> pd.Timedelta:
"""Total media duration.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
duration
Example:
>>> duration('emodb', version='1.1.1')
Timedelta('0 days 00:24:47.092187500')
"""
deps = dependencies(name, version=version, cache_root=cache_root)
df = deps()
return pd.to_timedelta(
df[df.type == define.DependType.MEDIA].duration.sum(),
unit='s',
)
def formats(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Set[str]:
"""Media formats.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
format
Example:
>>> formats('emodb', version='1.1.1')
{'wav'}
"""
deps = dependencies(name, version=version, cache_root=cache_root)
df = deps()
return set(df[df.type == define.DependType.MEDIA].format)
def header(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> audformat.Database:
r"""Load header of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
database object without table data
Example:
>>> db = header('emodb', version='1.1.1')
>>> db.name
'emodb'
"""
if version is None:
version = latest_version(name)
db_root = database_cache_folder(name, version, cache_root)
db, _ = load_header(db_root, name, version)
return db
def languages(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.List[str]:
"""Languages of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
languages of database
Example:
>>> languages('emodb', version='1.1.1')
['deu']
"""
db = header(name, version=version, cache_root=cache_root)
return db.languages
def license(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""License of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
license of database
Example:
>>> license('emodb', version='1.1.1')
'CC0-1.0'
"""
db = header(name, version=version, cache_root=cache_root)
return db.license
def license_url(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""License URL of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
license URL of database
Example:
>>> license_url('emodb', version='1.1.1')
'https://creativecommons.org/publicdomain/zero/1.0/'
"""
db = header(name, version=version, cache_root=cache_root)
return db.license_url
def media(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Dict:
"""Audio and video media of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
media of database
Example:
>>> media('emodb', version='1.1.1')
microphone:
{type: other, format: wav, channels: 1, sampling_rate: 16000}
"""
db = header(name, version=version, cache_root=cache_root)
return db.media
def meta(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Dict:
"""Meta information of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
meta information of database
Example:
>>> meta('emodb', version='1.1.1')
pdf:
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.130.8506&rep=rep1&type=pdf
"""
db = header(name, version=version, cache_root=cache_root)
return db.meta
def organization(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""Organization responsible for database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
organization responsible for database
Example:
>>> organization('emodb', version='1.1.1')
'audEERING'
"""
db = header(name, version=version, cache_root=cache_root)
return db.organization
def raters(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Dict:
"""Raters contributed to database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
raters of database
Example:
>>> raters('emodb', version='1.1.1')
gold:
{type: human}
"""
db = header(name, version=version, cache_root=cache_root)
return db.raters
def sampling_rates(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Set[int]:
"""Media sampling rates.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
sampling rates
Example:
>>> sampling_rates('emodb', version='1.1.1')
{16000}
"""
deps = dependencies(name, version=version, cache_root=cache_root)
df = deps()
return set(df[df.type == define.DependType.MEDIA].sampling_rate)
def schemes(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Dict:
"""Schemes of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
schemes of database
Example:
>>> list(schemes('emodb', version='1.1.1'))
['confidence', 'duration', 'emotion', 'speaker', 'transcription']
"""
db = header(name, version=version, cache_root=cache_root)
return db.schemes
def source(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""Source of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
source of database
Example:
>>> source('emodb', version='1.1.1')
'http://emodb.bilderbar.info/download/download.zip'
"""
db = header(name, version=version, cache_root=cache_root)
return db.source
def splits(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Dict:
"""Splits of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
splits of database
Example:
>>> splits('emodb', version='1.1.1')
"""
db = header(name, version=version, cache_root=cache_root)
return db.splits
def tables(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> typing.Dict:
"""Tables of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
tables of database
Example:
>>> list(tables('emodb', version='1.1.1'))
['emotion', 'files']
"""
db = header(name, version=version, cache_root=cache_root)
return db.tables
def usage(
name: str,
*,
version: str = None,
cache_root: str = None,
) -> str:
"""Usage of database.
Args:
name: name of database
version: version of database
cache_root: cache folder where databases are stored.
If not set :meth:`audb.default_cache_root` is used
Returns:
usage of database
Example:
>>> usage('emodb', version='1.1.1')
'unrestricted'
"""
db = header(name, version=version, cache_root=cache_root)
return db.usage
| 2.484375
| 2
|
tool/border_binaries_finder/utils.py
|
MageWeiG/karonte
| 1
|
12778214
|
import string
# Defines
CMP_SUCCS = ["strcmp", "memcmp", "strncmp", "strlcmp", "strcasecmp", "strncasecmp", "strstr"]
NETWORK_KEYWORDS = ["QUERY_STRING", "username", "HTTP_", "REMOTE_ADDR", "boundary=", "Content-Type", "Content-Length", "http_", "http", "HTTP", "query", "remote", "user-agent", "soap", "index."]
CASE_SENS_NETWORK_KEYWORDS = ["GET", "POST", "PUT", "DELETE", "HEAD"]
MIN_STR_LEN = 3
STR_LEN = 255
ALLOWED_CHARS = string.digits + string.ascii_letters + '-/_'
EXTENDED_ALLOWED_CHARS = ALLOWED_CHARS + "%,.;+=_)(*&^%$#@!~`|<>{}[]"
DEFAULT_PICKLE_DIR = '/tmp/karonte/pickles/parser/'
def populate_symbol_table(p):
"""
Populate a binary symbol table, if present
:param p: angr project
:return: None
"""
buckets = p.loader.main_object.hashtable.buckets + p.loader.main_object.hashtable.chains
symtab = p.loader.main_object.hashtable.symtab
names = [symtab.get_symbol(n).name for n in buckets]
names = list(set([str(n) for n in names if n]))
for name in names:
# this will provoke symbol table to be populated
[x for x in p.loader.find_all_symbols(name)]
| 2.125
| 2
|
src/carim_discord_bot/__init__.py
|
schana/carim-discord-bot
| 14
|
12778215
|
VERSION = '2.2.5'
| 1.132813
| 1
|
practicas/diccionarios.py
|
7junior7/python_comands
| 2
|
12778216
|
#********************************************************DICCIONARIOS********************************************************
# Los diccionarios en python son tipos de datos muy parecidos a los archivos json, los cuales nos permiten crear una lista
# pero con identificadores definidos por nosotros.
# sintaxis dicc = {"id":"valor"}
producto = {"Tipo":"Laptop", "Marca":"Asus", "Precio":350.9, "Modelo":"mod01b148s"}
print producto # imprime todo los datos
print producto["Marca"] # imprime la marca de la laptop
producto["Tipo"] = "PC"
print producto
| 3.234375
| 3
|
Practice/Python/Basic Data Types/List_Comprehensions.py
|
alexanderbauer89/HackerRank
| 1
|
12778217
|
def print_list_comprehensions(x, y, z, n):
print([[a, b, c] for a in range(0, x + 1)
for b in range(0, y + 1)
for c in range(0, z + 1)
if a + b + c != n ])
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
print_list_comprehensions(x, y, z, n)
| 3.6875
| 4
|
Scripts/Cogs/setup.py
|
Mahas1/BotMan.py-rewritten
| 0
|
12778218
|
import json
from discord.ext import commands
import discord
import os
with open('config.json') as configFile:
configs = json.load(configFile)
prefix = configs.get('prefix_list')[0]
class Setup(commands.Cog, description='Used to set up the bot for welcome messages, mute/unmute etc.'):
def __init__(self, bot):
self.bot = bot
@commands.command(name='setup', description='Used to set the bot up, for welcome messages, mute roles, etc.\n'
'Recommended to set the bot up as early as possible when it joins a '
'server.')
@commands.guild_only()
async def setup_welcome(self, ctx):
embed = discord.Embed(title='You can setup preferences for your server with these commands.',
timestamp=ctx.message.created_at,
color=discord.Color.random())
embed.add_field(name='Set channel for welcome messages',
value=f'`{prefix}setwelcomechannel [channel]`\nExample: `{prefix}setwelcomechannel #welcome`\n'
f'__**What you\'d see:**__\n'
f'{ctx.author.mention} has joined **{ctx.guild.name}**! Say hi!\n'
f'{ctx.author.mention} has left **{ctx.guild.name}**. Until Next time!',
inline=False)
embed.add_field(name='Set default reason when kicking/banning members',
value=f'`{prefix}setkickreason [reason]`\nExample: `{prefix}setkickreason Being a jerk`\n'
f'__**What the kicked member would see**__:\n'
f'You have been kicked from **{ctx.guild.name}** for **Being a jerk**.',
inline=False)
embed.add_field(name='Set the mute role for this server',
value=f'`{prefix}setmuterole [role]`\nExample: `{prefix}setmuterole muted` '
f'(muted must be an actual role).\n'
f'You can create a mute role by `{prefix}createmuterole [role name]`',
inline=False)
embed.add_field(name='Set the default Member role for this server',
value=f'`{prefix}setmemberrole [role]`\nExample: `{prefix}setmemberrole Member`'
f' (Member must be an actual role).',
inline=False)
embed.set_footer(text=f'Command requested by {ctx.author.name}')
await ctx.send(embed=embed)
@commands.command(name='setwelcomechannel', description="Used to set the channel welcome messages arrive. "
"See description of the `setup` command for more info.")
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_welcome_channel(self, ctx, channel: discord.TextChannel):
channel_id = channel.id
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['welcome_channel'] = channel_id
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Welcome channel set to {channel.mention} successfully.')
@commands.command(name='setkickreason', description='Used to set the default kick/ban reason '
'in a case where no reason is given.\n'
'Check the description of the `setup` command '
'for more information.')
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_kick_reason(self, ctx, *, reason):
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['default_kick_ban_reason'] = str(reason)
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Default kick/ban reason set to **{reason}** successfully.')
@commands.command(name='setmemberrole', description='Used to set the role which is given to every member upon '
'joining. '
'Check description of `setup` command for more info.')
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_member_role(self, ctx, role: discord.Role):
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['member_role'] = role.id
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Member role set to **{role.name}** successfully.')
@commands.command(name='setmuterole', description='Sets the role assigned to muted people. '
'Use `createmuterole` for creating a muted role and '
'automatically setting permissions to every channel.')
@commands.has_permissions(administrator=True)
@commands.guild_only()
async def set_mute_role(self, ctx, role: discord.Role):
if not os.path.exists(f'./configs/guild{ctx.guild.id}.json'):
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump({}, jsonFile)
with open(f'./configs/guild{ctx.guild.id}.json', 'r') as jsonFile:
data = json.load(jsonFile)
data['mute_role'] = role.id
with open(f'./configs/guild{ctx.guild.id}.json', 'w') as jsonFile:
json.dump(data, jsonFile)
await ctx.send(f'Mute role set to **{role.name}** successfully.')
@commands.command(name='createmuterole', description='Creates a mute role, and sets messaging permissions to '
'every channel.\n '
'the `rolename` argument is optional. (Defaults to "Muted")')
@commands.has_permissions(manage_roles=True)
@commands.guild_only()
async def create_mute_role(self, ctx, rolename=None):
if rolename is None:
rolename = 'Muted'
guild = ctx.guild
mutedRole = await guild.create_role(name=rolename) # creating the role
for channel in guild.channels:
await channel.set_permissions(mutedRole, speak=False, send_messages=False, use_slash_commands=False)
# setting permissions for each channel
await ctx.send(f'Created role **{mutedRole}** and set permissions accordingly.')
await Setup.set_mute_role(self, ctx, mutedRole)
def setup(bot):
bot.add_cog(Setup(bot))
| 2.734375
| 3
|
nmosquery/__init__.py
|
bbc/nmos-query
| 1
|
12778219
|
VALID_TYPES = ["flows", "sources", "nodes", "devices", "senders", "receivers"]
| 1.078125
| 1
|
oxasl/basil.py
|
physimals/oxasl
| 1
|
12778220
|
#!/usr/bin/env python
"""
OXASL - Bayesian model fitting for ASL
The BASIL module is a little more complex than the other Workspace based
modules because of the number of options available and the need for flexibility
in how the modelling steps are run.
The main function is ``basil`` which performs model fitting on ASL data
in the Workspace ``asldata`` attribute.
wsp = Workspace()
wsp.asldata = AslImage("asldata.nii.gz", tis=[1.6,])
wsp.infertiss = True
basil(wsp.sub("basil"))
basil.finalstep.mean_ftiss.save("mean_ftiss.nii.gz")
Because of the number of options possible for the modelling process, the
workspace attribute ``basil_options`` can be set as a dictionary of extra
options relevant only to Basil:
wsp = Workspace()
wsp.asldata = AslImage("asldata.nii.gz", tis=[1.6,])
wsp.basil_options = {"infertiss" : True, "spatial" : True}
basil(wsp.sub("basil"))
basil.finalstep.mean_ftiss.save("mean_ftiss.nii.gz")
All options specified in basil_options are either consumed by Basil, or
if not passed directly to the model.
Copyright (c) 2008-2020 Univerisity of Oxford
"""
import sys
import math
import numpy as np
import scipy.ndimage
from fsl.wrappers import LOAD
from fsl.data.image import Image
from oxasl import __version__, __timestamp__, AslImage, Workspace, image, reg
from oxasl.options import AslOptionParser, OptionCategory, OptionGroup, GenericOptions
def basil(wsp, prefit=True, **kwargs):
"""
For oxasl_deblur compatibility
"""
run(wsp, prefit, **kwargs)
def run(wsp, prefit=True, **kwargs):
"""
Run BASIL modelling on ASL data in a workspace
:param wsp: Workspace object
:param prefit: If True, run a pre-fitting step using the mean over repeats of the ASL data
Required workspace attributes
-----------------------------
- ``asldata`` : AslImage object
Optional workspace attributes
-----------------------------
- ``mask`` : Brain mask (fsl.Image)
- ``wp`` : If True, use 'white paper' mode (Alsop et al) - modifies some defaults and infers tissue component only
- ``infertiss`` : If True, infer tissue component (default: True)
- ``inferbat`` : If True, infer bolus arrival time (default: False)
- ``infertau`` : If True, infer bolus duration (default: False)
- ``inferart`` : If True, infer arterial component (default: False)
- ``infert1`` : If True, infer T1 (default: False)
- ``inferpc`` : If True, infer PC (default: False)
- ``t1``: Assumed/initial estimate for tissue T1 (default: 1.65 in white paper mode, 1.3 otherwise)
- ``t1b``: Assumed/initial estimate for blood T1 (default: 1.65)
- ``bat``: Assumed/initial estimate for bolus arrival time (s) (default 0 in white paper mode, 1.3 for CASL, 0.7 otherwise)
- ``t1im`` : T1 map as Image
- ``pgm`` : Grey matter partial volume map as Image
- ``pwm`` : White matter partial volume map as Image
- ``initmvn`` : MVN structure to use as initialization as Image
- ``spatial`` : If True, include final spatial VB step (default: False)
- ``onestep`` : If True, do all inference in a single step (default: False)
- ``basil_options`` : Optional dictionary of additional options for underlying model
"""
wsp.log.write("\nRunning BASIL Bayesian modelling on ASL data in '%s' data space\n" % wsp.ifnone("image_space", "native"))
# Single or Multi TI setup
if wsp.asldata.ntis == 1:
# Single TI data - don't try to infer arterial component of bolus duration, we don't have enough info
wsp.log.write(" - Operating in Single TI mode - no arterial component, fixed bolus duration\n")
wsp.inferart = False
wsp.infertau = False
batsd_default = 0.1
else:
# For multi TI/PLD data, set a more liberal prior for tissue ATT since we should be able to
# determine this from the data. NB this leaves the arterial BAT alone.
batsd_default = 1
if wsp.wp:
# White paper mode - this overrides defaults, but can be overwritten by command line
# specification of individual parameters
wsp.log.write(" - Analysis in white paper mode: T1 default=1.65, BAT default=0, voxelwise calibration\n")
t1_default = 1.65
bat_default = 0.0
else:
t1_default = 1.3
if wsp.asldata.casl:
bat_default = 1.3
else:
bat_default = 0.7
if wsp.t1 is None:
wsp.t1 = t1_default
if wsp.t1b is None:
wsp.t1b = 1.65
if wsp.bat is None:
wsp.bat = bat_default
if wsp.batsd is None:
wsp.batsd = batsd_default
if wsp.infertiss is None:
wsp.infertiss = True
# if we are doing CASL then fix the bolus duration, unless explicitly told us otherwise
if wsp.infertau is None:
wsp.infertau = not wsp.asldata.casl
# Pick up extra BASIL options
wsp.basil_options = dict(wsp.ifnone("basil_options", {}))
mask_policy = wsp.ifnone("basil_mask", "default")
if mask_policy in ("default", "dilated"):
wsp.log.write(" - Using pipeline analysis mask\n")
# Two possible locations for compatibility
if wsp.rois is not None and wsp.rois.mask is not None:
mask = wsp.rois.mask
else:
mask = wsp.mask
if mask_policy == "dilated":
# Use 3x3x3 kernel for compatibility with fslmaths default
wsp.log.write(" - Dilating mask for Basil analysis\n")
struct = scipy.ndimage.generate_binary_structure(3, 3)
mask = Image(scipy.ndimage.binary_dilation(mask.data, structure=struct).astype(np.int), header=mask.header)
elif mask_policy == "none":
wsp.log.write(" - Not using mask for Basil - will fit every voxel\n")
mask = Image(np.ones(wsp.asldata.data.shape[:3]), header=wsp.asldata.header)
else:
raise ValueError("Unrecognized mask policy: %s" % mask_policy)
# If we only have one volume, set a nominal noise prior as it is not possible to
# estimate from the data
if wsp.asldata.nvols / wsp.asldata.ntc == 1:
wsp.log.write(" - Restricting noise prior as only one ASL volume\n")
wsp.basil_options["prior-noise-stddev"] = 1.0
if prefit and max(wsp.asldata.rpts) > 1:
# Initial BASIL run on mean data
wsp.log.write(" - Doing initial fit on mean at each TI\n\n")
init_wsp = wsp.sub("init")
main_wsp = wsp.sub("main")
basil_fit(init_wsp, wsp.asldata.mean_across_repeats(), mask=mask)
wsp.basil_options["continue-from-mvn"] = wsp.init.finalstep.finalMVN
main_wsp.initmvn = wsp.basil_options["continue-from-mvn"]
else:
main_wsp = wsp
# Main run on full ASL data
wsp.log.write("\n - Doing fit on full ASL data\n\n")
basil_fit(main_wsp, wsp.asldata, mask=mask)
wsp.finalstep = main_wsp.finalstep
def basil_fit(wsp, asldata, mask=None):
"""
Run Bayesian model fitting on ASL data
See ``basil`` for details of workspace attributes used
:param wsp: Workspace object
:param asldata: AslImage object to use as input data
"""
if len(asldata.tes) > 1:
steps = basil_steps_multite(wsp, asldata, mask)
else:
steps = basil_steps(wsp, asldata, mask)
prev_result = None
wsp.asldata_diff = asldata.diff().reorder("rt")
wsp.basil_mask = mask
for idx, step in enumerate(steps):
step_wsp = wsp.sub("step%i" % (idx+1))
desc = "Step %i of %i: %s" % (idx+1, len(steps), step.desc)
if prev_result is not None:
desc += " - Initialise with step %i" % idx
step_wsp.log.write(desc + " ")
result = step.run(prev_result, log=wsp.log, fsllog=wsp.fsllog,
fabber_corelib=wsp.fabber_corelib, fabber_libs=wsp.fabber_libs,
fabber_coreexe=wsp.fabber_coreexe, fabber_exes=wsp.fabber_exes)
for key, value in result.items():
if key == "modelfit":
# Treat model fit specially - make it an AslImage and also output a mean
# across repeats version for comparison
value = wsp.asldata_diff.derived(value.data, header=value.header)
modelfit_mean = value.mean_across_repeats()
setattr(step_wsp, "modelfit_mean", modelfit_mean)
setattr(step_wsp, key, value)
if step_wsp.logfile is not None and step_wsp.savedir is not None:
step_wsp.set_item("logfile", step_wsp.logfile, save_fn=str)
prev_result = result
wsp.finalstep = step_wsp
wsp.log.write("\nEnd\n")
def _calc_slicedt(wsp, options):
"""
Calculate the slicedt for basil given that we may be quantifying in
a space other than the usual ASL space
We do this by generating a slice time offset image and transforming it
to quantification space. Since this could be rotated wrt to the asl data
we may need to warn if the resulting image has significant slice time variation
across X or Y axes
"""
img_space = wsp.ifnone("image_space", "native")
if img_space != "native":
asldata = options["data"]
_x, _y, z, _t = np.indices(list(asldata.data.shape[:3]) + [asldata.ntis,])
print(z.shape)
tis_arr = np.array(asldata.tis) + (z.astype(np.float32) * options["slicedt"])
print(tis_arr.shape)
tis_img = Image(tis_arr, header=options["data"].header)
wsp.tiimg = reg.change_space(wsp, tis_img, wsp.ifnone("image_space", "native"))
#print(ztrans.data)
print(wsp.tiimg.data.shape)
del options["slicedt"]
ti_idx = 1
while "ti%i" % ti_idx in options:
del options["ti%i" % ti_idx]
ti_idx += 1
options["tiimg"] = wsp.tiimg
def basil_steps(wsp, asldata, mask=None):
"""
Get the steps required for a BASIL run
This is separated for the case where an alternative process wants to run
the actual modelling, or so that the steps can be checked prior to doing
an actual run.
Arguments are the same as the ``basil`` function. No workspace is required.
"""
if asldata is None:
raise ValueError("Input ASL data is None")
wsp.log.write("BASIL v%s\n" % __version__)
asldata.summary(log=wsp.log)
asldata = asldata.diff().reorder("rt")
# Default Fabber options for VB runs and spatial steps. Note that attributes
# which are None (e.g. sliceband) are not passed to Fabber
options = {
"data" : asldata,
"model" : "aslrest",
"disp" : "none",
"exch" : "mix",
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"max-iterations" : 20,
"convergence" : "trialmode",
"max-trials" : 10,
"save-mean" : True,
"save-mvn" : True,
"save-std" : True,
"save-model-fit" : True,
"save-residuals" : wsp.ifnone("output_residuals", False),
}
if mask is not None:
options["mask"] = mask
# We choose to pass TIs (not PLDs). The asldata object ensures that
# TIs are correctly derived from PLDs, when these are specified, by adding
# the bolus duration.
for idx, ti in enumerate(asldata.tis):
options["ti%i" % (idx+1)] = ti
options["rpt%i" % (idx+1)] = asldata.rpts[idx]
# Bolus duration - use a single value where possible as cannot infer otherwise
taus = getattr(asldata, "taus", [1.8,])
if min(taus) == max(taus):
options["tau"] = taus[0]
else:
for idx, tau in enumerate(taus):
options["tau%i" % (idx+1)] = tau
# Other asl data parameters
for attr in ("casl", "slicedt", "sliceband"):
if getattr(asldata, attr, None) is not None:
options[attr] = getattr(asldata, attr)
_calc_slicedt(wsp, options)
if wsp.noiseprior:
# Use an informative noise prior
if wsp.noisesd is None:
snr = wsp.ifnone("snr", 10)
wsp.log.write(" - Using SNR of %f to set noise std dev\n" % snr)
# Estimate signal magntiude FIXME diffdata_mean is always 3D?
if wsp.diffdata_mean.ndim > 3:
datamax = np.amax(wsp.diffdata_mean.data, 3)
else:
datamax = wsp.diffdata_mean.data
brain_mag = np.mean(datamax.data[mask.data != 0])
# this will correspond to whole brain CBF (roughly) - about 0.5 of GM
noisesd = math.sqrt(brain_mag * 2 / snr)
else:
noisesd = wsp.noisesd
wsp.log.write(" - Using a prior noise sd of: %f\n" % noisesd)
options["prior-noise-stddev"] = noisesd
# Add Basil-specific options defined on the workspace
options.update(wsp.ifnone("basil_options", {}))
# Additional optional workspace arguments
for attr in ("t1", "t1b", "bat", "FA", "pwm", "pgm", "batsd"):
value = getattr(wsp, attr)
if value is not None:
options[attr] = value
# Options for final spatial step
prior_type_spatial = "M"
prior_type_mvs = "A"
options_svb = {
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"convergence" : "maxits",
"max-iterations": 20,
}
wsp.log.write("Model (in fabber) is : %s\n" % options["model"])
wsp.log.write("Dispersion model option is %s\n" % options["disp"])
wsp.log.write("Compartment exchange model option is %s\n" % options["exch"])
inferdisp = options["disp"] != "none"
inferexch = options["exch"] != "mix"
# Partial volume correction
pvcorr = "pgm" in options or "pwm" in options
if pvcorr:
if not wsp.infertiss:
raise ValueError("ERROR: PV correction is not compatible with --artonly option (there is no tissue component)")
options["incpve"] = True
if "pgm" not in options or "pwm" not in options:
raise ValueError("Only one partial volume map (GM / WM) was supplied for PV correctioN")
# Need a spatial step with more iterations for the PV correction
wsp.spatial = True
options_svb["max-iterations"] = 200
# Ignore partial volumes below 0.1
pgm_img = options.pop("pgm")
pwm_img = options.pop("pwm")
pgm = np.copy(pgm_img.data)
pwm = np.copy(pwm_img.data)
pgm[pgm < 0.1] = 0
pgm[pgm > 1] = 1
pwm[pwm < 0.1] = 0
pwm[pwm > 1] = 1
pgm = Image(pgm, header=pgm_img.header)
pwm = Image(pwm, header=pwm_img.header)
# Set general parameter inference and inclusion
if wsp.infertiss:
options["inctiss"] = True
if wsp.inferbat:
options["incbat"] = True
options["inferbat"] = True # Infer in first step
if wsp.inferart:
options["incart"] = True
if wsp.inferpc:
options["incpc"] = True
if wsp.infertau:
options["inctau"] = True
if wsp.infert1:
options["inct1"] = True
# Keep track of the number of spatial priors specified by name
spriors = 1
if wsp.initmvn:
# we are being supplied with an initial MVN
wsp.log.write("Initial MVN being loaded %s\n" % wsp.initmvn.name)
options["continue-from-mvn"] = wsp.initmvn
# T1 image prior
if wsp.t1im is not None:
spriors = _add_prior(options, spriors, "T_1", type="I", image=wsp.t1im)
# BAT image prior
if wsp.batim is not None:
# With a BAT image prior we must include BAT even if we are not inferring it
# (in this case the image prior will be treated as ground truth)
spriors = _add_prior(options, spriors, "delttiss", type="I", image=wsp.batim)
options["incbat"] = True
steps = []
components = ""
### --- TISSUE MODULE ---
if wsp.infertiss:
components += " Tissue "
options["infertiss"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "ftiss", type=prior_type_spatial)
### --- ARTERIAL MODULE ---
if wsp.inferart:
components += " Arterial "
options["inferart"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "fblood", type=prior_type_mvs)
### --- BOLUS DURATION MODULE ---
if wsp.infertau:
components += " Bolus duration "
options["infertau"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- MODEL EXTENSIONS MODULE ---
# Add variable dispersion and/or exchange parameters and/or pre-capiliary
if inferdisp or inferexch or wsp.inferpc:
if inferdisp:
components += " dispersion"
options["inferdisp"] = True
if inferexch:
components += " exchange"
options["inferexch"] = True
if wsp.inferpc:
components += " pre-capiliary"
options["inferpc"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- T1 MODULE ---
if wsp.infert1:
components += " T1 "
options["infert1"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- PV CORRECTION MODULE ---
if pvcorr:
# setup ready for PV correction, which has to be done with spatial priors
components += " PVE"
options["pvcorr"] = True
# set the image priors for the PV maps
spriors = _add_prior(options, spriors, "pvgm", type="I", image=pgm)
spriors = _add_prior(options, spriors, "pvwm", type="I", image=pwm)
spriors = _add_prior(options, spriors, "fwm", type="M")
if steps:
# Add initialisaiton step for PV correction - ONLY if we have something to init from
steps.append(PvcInitStep(wsp, {"data" : asldata, "mask" : mask, "pgm" : pgm, "pwm" : pwm}, "PVC initialisation"))
### --- SPATIAL MODULE ---
if wsp.spatial:
step_desc = "Spatial VB - %s" % components
options.update(options_svb)
del options["max-trials"]
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- SINGLE-STEP OPTION ---
if wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
if not steps:
raise ValueError("No steps were generated - no parameters were set to be inferred")
return steps
def basil_steps_multite(wsp, asldata, mask=None, **kwargs):
"""
Get the steps required for a BASIL run on multi-TE data
This is separated for the case where an alternative process wants to run
the actual modelling, or so that the steps can be checked prior to doing
an actual run.
Arguments are the same as the ``basil`` function.
"""
if asldata is None:
raise ValueError("Input ASL data is None")
wsp.log.write("BASIL v%s\n" % __version__)
asldata.summary(log=wsp.log)
asldata = asldata.diff().reorder("rt")
# Default Fabber options for VB runs and spatial steps. Note that attributes
# which are None (e.g. sliceband) are not passed to Fabber
options = {
"data" : asldata,
"model" : "asl_multite",
"method" : "vb",
"noise" : "white",
"allow-bad-voxels" : True,
"max-iterations" : 20,
"convergence" : "trialmode",
"max-trials" : 10,
"save-mean" : True,
"save-mvn" : True,
"save-std" : True,
"save-model-fit" : True,
}
if mask is not None:
options["mask"] = mask
# We choose to pass TIs (not PLDs). The asldata object ensures that
# TIs are correctly derived from PLDs, when these are specified, by adding
# the bolus duration.
_list_option(options, asldata.tis, "ti")
# Pass multiple TEs
_list_option(options, asldata.tes, "te")
# Bolus duration must be constant for multi-TE model
if min(asldata.taus) != max(asldata.taus):
raise ValueError("Multi-TE model does not support variable bolus durations")
else:
options["tau"] = asldata.taus[0]
# Repeats must be constant for multi-TE model
if min(asldata.rpts) != max(asldata.rpts):
raise ValueError("Multi-TE model does not support variable repeats")
else:
options["repeats"] = asldata.rpts[0]
# Other asl data parameters
for attr in ("casl", "slicedt", "sliceband"):
if getattr(asldata, attr, None) is not None:
options[attr] = getattr(asldata, attr)
# Keyword arguments override options
options.update(kwargs)
# Additional optional workspace arguments
for attr in ("t1", "t1b", "t2", "t2b"):
value = getattr(wsp, attr)
if value is not None:
if attr.startswith("t2"):
# Model expects T2 in seconds not ms
options[attr] = float(value) / 1000
else:
options[attr] = value
# Options for final spatial step
prior_type_spatial = "M"
prior_type_mvs = "A"
options_svb = {
"method" : "spatialvb",
"param-spatial-priors" : "N+",
"convergence" : "maxits",
"max-iterations": 20,
}
wsp.log.write("Model (in fabber) is : %s\n" % options["model"])
# Set general parameter inference and inclusion
if not wsp.infertiss:
wsp.log.write("WARNING: infertiss=False but ftiss is always inferred in multi-TE model\n")
if not wsp.inferbat:
wsp.log.write("WARNING: inferbat=False but BAT is always inferred in multi-TE model\n")
if wsp.inferart:
wsp.log.write("WARNING: inferart=True but multi-TE model does not support arterial component\n")
if wsp.infertau:
options["infertau"] = True
if wsp.infert1:
options["infert1"] = True
if wsp.infert2:
options["infert2"] = True
# Keep track of the number of spatial priors specified by name
spriors = 1
if wsp.initmvn:
# we are being supplied with an initial MVN
wsp.log.write("Initial MVN being loaded %s\n" % wsp.initmvn.name)
options["continue-from-mvn"] = wsp.initmvn
# T1 image prior
if wsp.t1im:
spriors = _add_prior(options, spriors, "T_1", type="I", image=wsp.t1im)
# BAT image prior
if wsp.batim is not None:
# With a BAT image prior we must include BAT even if we are not inferring it
# (in this case the image prior will be treated as ground truth)
spriors = _add_prior(options, spriors, "delttiss", type="I", image=wsp.batim)
options["incbat"] = True
steps = []
components = ""
### --- TISSUE MODULE ---
#if wsp.infertiss:
if True:
components += " Tissue"
### Inference options
if wsp.infertau:
components += " Bolus duration"
options["infertau"] = True
if wsp.infert1:
components += " T1"
options["infert1"] = True
if wsp.infertexch:
components += " Exchange time"
options["infertexch"] = True
step_desc = "VB - %s" % components
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
# Setup spatial priors ready
spriors = _add_prior(options_svb, spriors, "ftiss", type=prior_type_spatial)
### --- SPATIAL MODULE ---
if wsp.spatial:
step_desc = "Spatial VB - %s" % components
options.update(options_svb)
del options["max-trials"]
if not wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
### --- SINGLE-STEP OPTION ---
if wsp.onestep:
steps.append(FabberStep(wsp, options, step_desc))
if not steps:
raise ValueError("No steps were generated - no parameters were set to be inferred")
return steps
def _list_option(options, values, name):
for idx, value in enumerate(values):
options["%s%i" % (name, idx+1)] = value
def _add_prior(options, prior_idx, param, **kwargs):
options["PSP_byname%i" % prior_idx] = param
for key, value in kwargs.items():
options["PSP_byname%i_%s" % (prior_idx, key)] = value
return prior_idx + 1
class Step(object):
"""
A step in the Basil modelling process
"""
def __init__(self, wsp, options, desc):
self.options = dict(options)
self.desc = desc
# Need to convert all images to target image space
for key in list(options.keys()):
poss_img = self.options[key]
if isinstance(poss_img, Image):
image_space = wsp.ifnone("image_space", "native")
self.options[key] = reg.change_space(wsp, poss_img, image_space, mask=(key == 'mask'))
class FabberStep(Step):
"""
A Basil step which involves running Fabber
"""
def run(self, prev_output, log=sys.stdout, fsllog=None, **kwargs):
"""
Run Fabber, initialising it from the output of a previous step
"""
if prev_output is not None:
self.options["continue-from-mvn"] = prev_output["finalMVN"]
from .wrappers import fabber
ret = fabber(self.options, output=LOAD, progress_log=log, log=fsllog, **kwargs)
log.write("\n")
return ret
class PvcInitStep(Step):
"""
A Basil step which initialises partial volume correction
"""
def run(self, prev_output, log=sys.stdout, fsllog=None, **kwargs):
"""
Update the MVN from a previous step to include initial estimates
for PVC parameters
"""
log.write("Initialising partial volume correction...\n")
# set the inital GM amd WM values using a simple PV correction
wm_cbf_ratio = 0.4
# Modified pvgm map
temp_pgm = np.copy(self.options["pgm"].data)
temp_pgm[temp_pgm < 0.2] = 0.2
# First part of correction psuedo WM CBF term
prev_ftiss = prev_output["mean_ftiss"].data
wm_cbf_term = (prev_ftiss * wm_cbf_ratio) * self.options["pwm"].data
gmcbf_init = (prev_ftiss - wm_cbf_term) / temp_pgm
wmcbf_init = gmcbf_init * wm_cbf_ratio
mvn = prev_output["finalMVN"]
gmcbf_init = Image(gmcbf_init, header=mvn.header)
wmcbf_init = Image(wmcbf_init, header=mvn.header)
# HACK: This seems to be required to get the fslpy decorators to write
# the temporary file correctly
mask = Image(self.options["mask"].data, header=self.options["mask"].header)
# load these into the MVN
mvn = prev_output["finalMVN"]
from .wrappers import mvntool
params = prev_output["paramnames"]
mvn = mvntool(mvn, params.index("ftiss")+1, output=LOAD, mask=mask, write=True, valim=gmcbf_init, var=0.1, log=fsllog)["output"]
mvn = mvntool(mvn, params.index("fwm")+1, output=LOAD, mask=mask, write=True, valim=wmcbf_init, var=0.1, log=fsllog)["output"]
log.write("DONE\n")
return {"finalMVN" : mvn, "gmcbf_init" : gmcbf_init, "wmcbf_init" : wmcbf_init}
class BasilOptions(OptionCategory):
"""
BASIL option category
"""
def __init__(self):
OptionCategory.__init__(self, "basil")
def groups(self, parser):
groups = []
group = OptionGroup(parser, "BASIL options")
group.add_option("--infertau", help="Infer bolus duration", action="store_true", default=False)
group.add_option("--inferart", help="Infer macro vascular (arterial) signal component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--inferpc", help="Infer pre-capillary signal component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--infert1", help="Include uncertainty in T1 values", action="store_true", default=False)
group.add_option("--infertexch", help="Infer exchange time (multi-TE data only)", action="store_true", default=False)
group.add_option("--artonly", help="Remove tissue component and infer only arterial component (not supported for multi-TE data)", action="store_true", default=False)
group.add_option("--fixbat", help="Fix bolus arrival time", action="store_false", default=True)
group.add_option("--batsd", help="Bolus arrival time standard deviation (s) - default 1.0 for multi-PLD, 0.1 otherwise", type=float)
group.add_option("--spatial", help="Add step that implements adaptive spatial smoothing on CBF", action="store_true", default=False)
group.add_option("--fast", help="Faster analysis (1=faster, 2=single step", type=int, default=0)
group.add_option("--noiseprior", help="Use an informative prior for the noise estimation", action="store_true", default=False)
group.add_option("--noisesd", help="Set a custom noise std. dev. for the nosie prior", type=float)
group.add_option("--basil-mask", help="Masking policy to use for Basil model fitting. Does not affect analysis mask used in rest of pipeline. 'dilate' means dilate the default analysis mask. 'none' means use no masking",
type="choice", choices=["default", "dilated", "none"])
group.add_option("--basil-options", "--fit-options", help="File containing additional options for model fitting step", type="optfile")
groups.append(group)
group = OptionGroup(parser, "Model options")
group.add_option("--disp", help="Model for label dispersion", default="none")
group.add_option("--exch", help="Model for tissue exchange (residue function)", default="mix")
groups.append(group)
group = OptionGroup(parser, "Partial volume correction / CBF estimation (enforces --spatial)")
group.add_option("--pgm", help="Gray matter PV map", type="image")
group.add_option("--pwm", help="White matter PV map", type="image")
groups.append(group)
group = OptionGroup(parser, "Special options")
group.add_option("--t1im", help="Voxelwise T1 tissue estimates", type="image")
group.add_option("--batim", "--attim", help="Voxelwise BAT (ATT) estimates in seconds", type="image")
groups.append(group)
return groups
def main():
"""
Entry point for BASIL command line application
"""
try:
parser = AslOptionParser(usage="basil -i <ASL input file> [options...]", version=__version__)
parser.add_category(image.AslImageOptions())
parser.add_category(BasilOptions())
parser.add_category(GenericOptions())
options, _ = parser.parse_args(sys.argv)
if not options.output:
options.output = "basil"
if not options.asldata:
sys.stderr.write("Input file not specified\n")
parser.print_help()
sys.exit(1)
asldata = AslImage(options.asldata, **parser.filter(options, "image"))
wsp = Workspace(savedir=options.output, **vars(options))
wsp.asldata = asldata
# Deal with --artonly
if wsp.artonly:
wsp.infertiss = False
wsp.inferart = True
# Adjust number of iterations based on fast option
if not wsp.fast:
num_iter, num_trials, onestep = 20, 10, False
elif wsp.fast == 1:
num_iter, num_trials, onestep = 5, 2, False
elif wsp.fast == 2:
num_iter, num_trials, onestep = 10, 5, True
else:
raise ValueError("Not a valid option for fast: %s" % str(wsp.fast))
wsp.max_iterations = num_iter
wsp.max_trials = num_trials
wsp.onestep = onestep
# Run BASIL processing, passing options as keyword arguments using **
basil(wsp)
except ValueError as exc:
sys.stderr.write("\nERROR: " + str(exc) + "\n")
sys.stderr.write("Use --help for usage information\n")
sys.exit(1)
if __name__ == "__main__":
main()
| 2.5625
| 3
|
src/controls/array_control.py
|
furbrain/CVExplorer
| 0
|
12778221
|
from typing import Optional, TYPE_CHECKING
import wx
if TYPE_CHECKING:
from gui.pane import FunctionPane
# noinspection PyPep8Naming
class ArrayControl(wx.ComboBox):
# noinspection PyShadowingBuiltins
def __init__(self, parent, id):
from functions import Function
choices = list(Function.get_all_vars().keys())
super().__init__(parent, id, choices=choices, value=choices[0])
def get_pane(self, window: wx.Window) -> "FunctionPane":
from gui.pane import FunctionPane
parent = window.GetParent()
if isinstance(parent, FunctionPane):
return parent
if parent is None:
raise ValueError("Could not find a FunctionPane parent for element")
return self.get_pane(parent)
def SetValue(self, value: Optional[str]):
if value is None:
self.SetSelection(0)
else:
super().SetValue(value)
def GetValue(self):
from gui.gui import MainFrame
frame: MainFrame = self.GetTopLevelParent()
return eval(super().GetValue(), frame.get_vars(self))
def GetCode(self):
return super().GetValue()
| 2.453125
| 2
|
memwatch.py
|
Ezibenroc/memwatch
| 0
|
12778222
|
import sys
import csv
import datetime
import time
import argparse
from subprocess import Popen, PIPE
class Watcher:
def __init__(self, cmd, time_interval, filename):
self.cmd = cmd
self.time_interval = time_interval
self.filename = filename
self.outputfile = open(filename, 'w')
self.writer = csv.writer(self.outputfile)
self.meminfo_keys = list(self.parse_meminfo().keys())
self.writer.writerow(['timestamp'] + self.meminfo_keys)
@staticmethod
def parse_meminfo():
with open('/proc/meminfo') as meminfo:
lines = meminfo.readlines()
result = {}
for line in lines:
name, value = line.split(':')
value = value.strip()
if value.endswith('kB'):
value = int(value[:-2])*1000
else:
value = int(value)
result[name] = value
return result
def add_measure(self):
meminfo = self.parse_meminfo()
timestamp = str(datetime.datetime.now())
self.writer.writerow([timestamp] + [meminfo[k] for k in self.meminfo_keys])
return meminfo['MemAvailable']
def run_and_watch(self):
min_mem = self.add_measure()
max_mem = min_mem
proc = Popen(self.cmd, stdout=PIPE, stderr=PIPE)
while proc.poll() is None:
time.sleep(self.time_interval)
new_mem = self.add_measure()
if new_mem > max_mem:
max_mem = new_mem
if new_mem < min_mem:
min_mem = new_mem
stdout, stderr = proc.communicate()
sys.stdout.write(stdout.decode())
sys.stderr.write(stderr.decode())
sys.stderr.write(f'Memory consumption: {(max_mem - min_mem)*1e-9:.3f} GB\n')
self.outputfile.flush()
sys.exit(proc.returncode)
def main(args):
parser = argparse.ArgumentParser(description='Monitoring of a command memory consumption')
parser.add_argument('--time_interval', '-t', type=int, default=1,
help='Period of the measures, in seconds')
parser.add_argument('--output', '-o', type=str, default='/tmp/memwatch.csv',
help='Output file for the measures')
parser.add_argument('command', type=str,
help='Command line to execute')
args = parser.parse_args(args)
watcher = Watcher(cmd=args.command.split(), time_interval=args.time_interval, filename=args.output)
watcher.run_and_watch()
if __name__ == '__main__':
main(sys.argv[1:])
| 2.6875
| 3
|
mobtick/models.py
|
proteus2171/test
| 0
|
12778223
|
from django.db import models
# Create your models here.
class ticket(models.Model):
timestamp = models.DateField(auto_now_add=True,auto_now=False,)
tech = models.CharField(max_length=50,)
site = models.CharField(max_length=50,)
user = models.CharField(max_length=50,)
issue = models.CharField(max_length=200,)
tickid = models.AutoField(primary_key=True)
complete = models.BooleanField()
reqact = models.CharField(max_length=200, blank=True,)
def __unicode__(self):
return self.site
| 2.078125
| 2
|
tests/derivate/linear_equation_derivate_test.py
|
cenkbircanoglu/clustering
| 23
|
12778224
|
from unittest import TestCase
from similarityPy.derivate.linear_equation_derivate import LinearEquationDerivate
from tests import test_logger
__author__ = 'cenk'
class LinearEquationDerivateTest(TestCase):
def setUp(self):
pass
def test_algorithm(self):
test_logger.debug("LinearEquationDerivateTest - test_algorithm Starts")
"""
This data symbolise "y=2x + 1"
"""
data = [2, 1]
linear_equation_derivate = LinearEquationDerivate.calculate(data)
expected_result = [2]
self.assertEquals(expected_result, linear_equation_derivate)
linear_equation_derivate = LinearEquationDerivate.calculate_equation(data)
expected_result = "2"
self.assertEquals(expected_result, linear_equation_derivate)
data = [3, 4, 5, 6, 2, 1]
linear_equation_derivate = LinearEquationDerivate.calculate(data)
expected_result = [15, 16, 15, 12, 2]
self.assertEquals(expected_result, linear_equation_derivate)
linear_equation_derivate = LinearEquationDerivate.calculate_equation(data)
expected_result = "15*x^4+ 16*x^3+ 15*x^2+ 12*x+ 2"
self.assertEquals(expected_result, linear_equation_derivate)
test_logger.debug("LinearEquationDerivateTest - test_algorithm Ends")
def derivative(f):
"""Computes the numerical derivative of a function."""
def df(x, h=0.1e-5):
return (f(x + h / 2) - f(x - h / 2) ) / h
return df
def g(x):
return x * x * x
dg = derivative(g)
print dg(3)
| 3.671875
| 4
|
leetcode/LCP_40.py
|
zhaipro/acm
| 0
|
12778225
|
class Solution:
def maxmiumScore(self, cards: List[int], cnt: int) -> int:
cards.sort()
r = sum(cards[-cnt:])
if r % 2 == 0:
return r
r0 = 0
r1 = 0
try:
x0 = next(x for x in cards[-cnt:] if x % 2 == 0)
y1 = next(x for x in cards[-cnt - 1::-1] if x % 2)
r0 = r - x0 + y1
except:
pass
try:
x1 = next(x for x in cards[-cnt:] if x % 2)
y0 = next(x for x in cards[-cnt - 1::-1] if x % 2 == 0)
r1 = r - x1 + y0
except:
pass
return max(r0, r1)
| 2.78125
| 3
|
backend/models.py
|
nhatnxn/layout_GateGCN
| 17
|
12778226
|
import torch
from vietocr.tool.config import Cfg
from vietocr.tool.predictor import Predictor
import configs as cf
from models.saliency.u2net import U2NETP
from backend.text_detect.craft_utils import get_detector
def load_text_detect():
text_detector = get_detector(cf.text_detection_weights_path, cf.device)
return text_detector
def load_saliency():
net = U2NETP(3, 1)
net = net.to(cf.device)
net.load_state_dict(torch.load(cf.saliency_weight_path, map_location=cf.device))
net.eval()
return net
def load_text_recognize():
config = Cfg.load_config_from_name("vgg_seq2seq")
config["cnn"]["pretrained"] = False
config["device"] = cf.device
config["predictor"]["beamsearch"] = False
detector = Predictor(config)
return detector
| 2.171875
| 2
|
python/cartons_inventory/cartons.py
|
sdss/cartons_inventory
| 0
|
12778227
|
<reponame>sdss/cartons_inventory<filename>python/cartons_inventory/cartons.py
import csv
import inspect
import os
import numpy as np
import pandas as pd
from astropy.io import ascii
from sdssdb.peewee.sdss5db.targetdb import (Cadence, Carton, CartonToTarget,
Category, Instrument, Magnitude,
Mapper, Version)
import cartons_inventory
from cartons_inventory import log, main
Car = Carton.alias()
CarTar = CartonToTarget.alias()
Cad = Cadence.alias()
Inst = Instrument.alias()
Categ = Category.alias()
Map = Mapper.alias()
Mag = Magnitude.alias()
class CartonInfo(object):
"""Saves targetdb info for cartons.
This class takes basic information from a carton (``name``, ``plan``, and ``category_label``
at minimum) and at instantiation sets the carton dependent (as opposed to target dependent)
information of the carton. ``stage`` and ``active`` parameters can also be provided but
currently nothing is done with those. Carton dependent information is either taken from
input parameters of __init__ or by the assign_carton_info function that also set the
boolean in_targetdb to check the existence of the carton.
Then, function assign_target_info assigns target dependent information which can be
the magnitude placholders used for the different photometric system in the carton
(calculate_mag_placeholders=True), and/or python sets with the unique values found per
cadence, lambda, and instrument in the carton, along with ``priority`` and ``value``
ranges.
Finally, function process_cartons wraps all the functions of this class. Based on the value
of the ``origin`` parameter, takes as input a file from rsconfig or curstom, or takes a
selection criteria to search cartons in targetdb. With this function we can evaluate the
existence of a list of cartons, check their content, save a selection criteria as an input
file ready to be used by process_cartons, runs assign_target_info to get target parameter
set, ranges, and/or magnitude_placeholders, saves an output .csv file with the information
of each carton, or return a list of all the CartonInfo objects.
Parameters
----------
carton: str
Carton name in table targetdb.carton
plan: str
Plan in table targetdb.version
category_label: str
Label in targetdb.category table (e.g. science, standard_boss, guide)
stage: str
Robostrategy stage, could be srd, open, none, filler. Default is 'N/A'
active: str
``y`` or ``n`` to check if it is active in robostrategy. Default is 'N/A'
mapper_label: str
Label in targetdb.mapper (MWM or BHM)
program: str
Program in targetdb.program table
version_pk: int
ID in targetdb.verion table
tag: str
tag in targetdb.version table
mapper_pk: int
Mapper_pk in targetdb.carton table. 0 for MWM and 1 for BHM
category_pk: int
category_pk in targetdb.carton table (e.g. 0 for science)
in_targetdb: bool
True is carton/plan/category_label combination is found in targetdb, false if not.
sets_calculated: bool
True when in_targetdb is True and target dependent parameters value_min, value_max,
priority_min, priority_max, cadence_pk, cadence_label, lambda_eff, instrument_pk,
and instrument_label have been calculated for the carton using
assign_target_info(calculate_sets=True)
mag_placeholders_calculated: bool
True when magnitude placholdes used for SDSS, TMASS, and GAIA photometric systems
have been calculated. These are calculated using check_magnitude_outliers function
"""
cfg = cartons_inventory.config
def __init__(self, carton, plan, category_label, stage='N/A', active='N/A'):
self.carton = carton
self.plan = plan
self.category_label = category_label
self.stage = stage
self.active = active
self.mapper_label, self.program, self.version_pk = [], [], []
self.tag, self.mapper_pk, self.category_pk = [], [], []
self.in_targetdb = False
self.sets_calculated = False
self.mag_placeholders_calculated = False
self.assign_carton_info()
def assign_carton_info(self):
"""Assigns carton dependent information for cartons in targetdb.
If the carton/plan/category_label combination in the CartonInfo object
is found in targetdb this function assigns attributes for carton dependent
parameters (parameters shared for all targets in the carton). These paraemters
are mapper_label, program, version_pk, tag, mapper_pk, and category_pk.
Finally it set in_targetdb attribute as True when found in the database.
"""
cfg = cartons_inventory.config
basic_info = (
Car
.select(Map.label.alias('mapper_label'), Car.version_pk.alias('version_pk'),
Car.category_pk.alias('category_pk'), Car.mapper_pk.alias('mapper_pk'),
Version.tag, Car.program)
.join(Version, on=(Version.pk == Car.version_pk))
.join(Categ, 'LEFT JOIN', Car.category_pk == Categ.pk)
.join(Map, 'LEFT JOIN', Car.mapper_pk == Map.pk)
.where(Car.carton == self.carton)
.where(Version.plan == self.plan)
.where(Categ.label == self.category_label).dicts()
)
if len(basic_info) > 0: # If the carton is in targetdb assigns carton info
res = basic_info[0]
carton_parameter_names = cfg['db_fields']['carton_dependent']
for parameter in carton_parameter_names:
setattr(self, parameter, res[parameter])
self.in_targetdb = True
if self.in_targetdb is False: # If not in targetdb still tries to get the Version info
query_version = (
Version
.select(Version.tag, Version.pk)
.where(Version.plan == self.plan).dicts()
)
if len(query_version) > 0:
ver_info = query_version[0]
self.tag = ver_info['tag']
self.version_pk = ver_info['pk']
def build_query_target(self):
"""Creates the query with the target dependet information of the carton."""
query_target = (
Car
.select(Inst.label.alias('instrument_label'), CarTar.cadence_pk.alias('cadence_pk'),
CarTar.lambda_eff, CarTar.instrument_pk.alias('instrument_pk'),
CarTar.priority, CarTar.value, Cad.label.alias('cadence_label'), Mag.g, Mag.r,
Mag.i, Mag.z, Mag.h, Mag.j, Mag.k, Mag.bp, Mag.rp, Mag.gaia_g)
.join(Version, on=(Version.pk == Car.version_pk))
.join(CarTar, on=(CarTar.carton_pk == Car.pk))
.join(Cad, 'LEFT JOIN', on=(Cad.pk == CarTar.cadence_pk))
.join(Inst, 'LEFT JOIN', CarTar.instrument_pk == Inst.pk)
.join(Mag, 'LEFT JOIN', CarTar.pk == Mag.carton_to_target_pk)
.where(Car.carton == self.carton)
.where((Version.plan == self.plan) & (Version.tag == self.tag))
)
return query_target
def return_target_dataframe(self):
"""Executes query from build_query_target and returns it in a Pandas DataFrame."""
if not self.in_targetdb:
print(self.carton, 'not in targetdb so we cant return the target dataframe')
return
target_query = self.build_query_target()
df = pd.DataFrame(list(target_query.dicts()))
return df
def assign_target_info(self, calculate_sets=True, calculate_mag_placeholders=False):
"""Assignt target dependent information for cartons in targetdb.
This function calls return_target_dataframe to get a Pandas DataFrame
with target dependent information for a carton. Then it sets different attributes
to the CartonInfo object depending on the values of calculate_sets and
calculate_mag_placeholders
Parameters
----------
calculate_sets : bool
If true this function assigns the attributes value_min, value_max,
priority_min, priority_max, cadence_pk, cadence_label, lambda_eff, instrument_pk,
and instrument_label, based on information from targetdb. It also sets the attribute
sets_calculated as True to keep record.
calculate_mag_placeholders : bool
If true this function assigns the attribute magnitude_placeholders using
check_mag_outliers function, and sets mag_placeholers_calculated=True to keep record.
magnitude_placeholres is a set with all the combination of photometric system
(SDSS, TMASS, GAIA) and mag placeholder used for that photometric system in that
carton (None, Invalid, 0.0, -9999.0, 999, 99.9).
"""
dataframe_created = False
if not self.in_targetdb:
print('carton', self.carton, 'version_pk', self.version_pk,
'category_label', self.category_label, 'not found in database',
'so we cant assign target info')
return
if calculate_sets:
if self.sets_calculated:
print('Sets already calculated for this carton')
else:
dataframe = self.return_target_dataframe()
dataframe_created = True
target_parameters = self.cfg['db_fields']
set_names = target_parameters['sets']
set_range_names = target_parameters['set_ranges']
for set_name in set_names:
setattr(self, set_name, main.set_or_none(dataframe[set_name]))
for set_name in set_range_names:
set_range = main.get_range(getattr(self, set_name))
setattr(self, set_name + '_min', set_range[0])
setattr(self, set_name + '_max', set_range[1])
self.sets_calculated = True
if calculate_mag_placeholders:
if self.mag_placeholders_calculated:
print('Magnitude placeholders already caclulated for this carton')
else:
if not dataframe_created:
dataframe = self.return_target_dataframe()
dataframe_created = True
bands = self.cfg['bands']
mags_names = [el for key in bands.keys() for el in bands[key]]
systems_names = [key for key in bands.keys() for el in bands[key]]
self.magnitude_placeholders = check_mag_outliers(dataframe, mags_names,
systems_names)
self.mag_placeholders_calculated = True
def check_existence(self, log, verbose=True):
"""Checks if the carton/plan/category_label from object is found in targetdb.
This function checks whether a carton exists or not in targetdb, to be used
when a list of cartons is used in process_cartons (i.e. ``origin`` rsconfig or custom)
or to check the existence of a single carton.
Parameters
----------
log : SDSSLogger
Log used to store information of cartons_inventory
verbose : bool
If true and if the carton is not found in the database the function will print
and save on log information to try to correct the input file from which the
carton/plan/category_label was taken (and stored in the object). If no carton
with that name is found in targetdb it will print the associated warning, and if
cartons with the same name but different plan or category_label are found a line
with input file format will be printed for each of those cartons so the user
can replace the line in the input file with one of the options proposed.
Returns
-------
cartons_aleternatives : Pandas DataFrame
A Pandas DataFrame that for each carton/plan/category_label combination not found
in targetdb has an entry for it and for all the alternative cartons found in targetdb
that have the same carton name but different plan or category. For each entry the
dataframe contains the columns carton, plan, category_label, stage, active, tag,
version_pk, and in_targetdb.
"""
df_data = {}
msg = ''
if self.in_targetdb is False:
colnames = ['carton', 'plan', 'category_label', 'stage',
'active', 'tag', 'version_pk', 'in_targetdb']
for index in range(len(colnames)):
colname = colnames[index]
locals()[colname] = []
locals()[colname].append(getattr(self, colname))
alternatives_info = (
Car
.select(Car.carton, Version.plan, Car.version_pk.alias('version_pk'),
Categ.label.alias('category_label'), Version.tag, Car.program)
.join(Version, on=(Version.pk == Car.version_pk))
.join(Categ, 'LEFT JOIN', Car.category_pk == Categ.pk)
.where(Car.carton == self.carton).dicts()
)
if len(alternatives_info) == 0:
msg = 'Wargning: Carton' + self.carton + ' not in targetdb'\
'not in targetdb and there is no carton with that name'
else:
msg = 'Carton ' + self.carton + ' not in targetdb, to avoid this you can replace '\
'the next\nline with the information that follows '\
'replacing (stage) and (active) if it corresponds\n'
msg += '|' + self.carton.rjust(41) + ' | ' + self.plan.rjust(6) + ' | '\
+ self.category_label.rjust(20) + ' |'\
+ self.stage.rjust(6) + ' | ' + self.active.rjust(6) + ' | '\
+ '--> Replace this line\n'
for ind in range(len(alternatives_info)):
res = alternatives_info[ind]
res['stage'], res['active'] = 'N/A', 'N/A'
for colname in colnames[:-1]:
locals()[colname].append(res[colname])
locals()['in_targetdb'].append(True)
msg += '|' + res['carton'].rjust(41) + ' | ' + res['plan'].rjust(6) + ' | '\
+ res['category_label'].rjust(20) + ' | N/A | N/A |\n'
for index in range(len(colnames)):
df_data[colnames[index]] = locals()[colnames[index]]
if verbose is True and msg != '':
log.debug(msg)
print(msg)
df = pd.DataFrame(data=df_data)
return df
def visualize_content(self, log, width=140):
"""Logs and prints information from targetdb for a given carton."""
pars = cartons_inventory.config['db_fields']
log.info(' ')
log.info('#' * width)
print_centered_msg('CARTON DEPENDENT INFORMATION', width, log)
print_centered_msg(' ', width, log)
for par in ['carton'] + pars['input_dependent'] + ['in_targetdb']:
self.print_param(par, width, log)
for par in pars['carton_dependent']:
self.print_param(par, width, log)
log.info('#' * width)
if not self.in_targetdb:
print_centered_msg('Since the carton is not in targetdb', width, log)
print_centered_msg('this is all the information we can get', width, log)
log.info('#' * width)
return
if not(self.sets_calculated):
print_centered_msg('The list of values par target parameter has', width, log)
print_centered_msg('not been calculated for this carton, to do so', width, log)
print_centered_msg('first run assign_target_info on this carton', width, log)
print_centered_msg('using calculate_sets=True (default)', width, log)
log.info('#' * width)
else:
print_centered_msg('VALUES PER TARGET DEPENDENT PARAMETER', width, log)
print_centered_msg(' ', width, log)
for par in [el for el in pars['sets'] if el not in pars['set_ranges']]:
self.print_param(par, width, log)
for par in pars['set_ranges']:
self.print_range(par, width, log)
log.info('#' * width)
if not(self.mag_placeholders_calculated):
print_centered_msg('The list of mag placeholers for each photometric', width, log)
print_centered_msg('system has not been calculated for this carton yet,', width, log)
print_centered_msg('to do so first run assign_target_info on this carton', width, log)
print_centered_msg('using calculate_mag_placeholers=True (not default)', width, log)
log.info('#' * width)
else:
print_centered_msg('MAGNITUDE PLACEHOLDERS PER PHOTOMETRIC SYSTEM', width, log)
print_centered_msg(' ', width, log)
self.print_param('magnitude_placeholders', width, log)
log.info('#' * width)
def print_param(self, par, width, log):
"""logs a message with width=width containing a parameter from carton object."""
log.info('### ' + par + ': ' + str(getattr(self, par)).ljust(width - len(par) - 10) +
' ###')
def print_range(self, par, width, log):
"""logs a message with width=width containing the range of a parameter from the carton."""
left_msg = str(getattr(self, par + '_min'))
right_msg = str(getattr(self, par + '_max'))
log.info('### ' + par + ' range: ' + left_msg + ' to ' + right_msg +
' ' * (width - len(left_msg) - len(right_msg) - len(par) - 20) + ' ###')
def print_centered_msg(st, width, log):
"""Logs and prints string st with width=width in the log"""
left = round((width - len(st) - 7) / 2.0)
right = width - len(st) - 7 - left
log.info('###' + ' ' * left + st + ' ' * right + ' ###')
def gets_carton_info(carton_list_filename, header_length=1, delimiter='|'):
"""Get the necessary information from the input carton list file."""
cat = np.loadtxt(carton_list_filename, dtype='str',
skiprows=header_length, delimiter=delimiter)
cartons = [str.strip(cat[ind, 1]) for ind in range(len(cat))]
plans = [str.strip(cat[ind, 2]) for ind in range(len(cat))]
categories = [str.strip(cat[ind, 3]) for ind in range(len(cat))]
stages = [str.strip(cat[ind, 4]) for ind in range(len(cat))]
actives = [str.strip(cat[ind, 5]) for ind in range(len(cat))]
return cartons, plans, categories, stages, actives
def check_mag_outliers(datafr, bands, systems):
"""Returns a list with all the types of outliers found for each photometric system.
Parameters
----------
datafr : Pandas DataFrame
Containing the magnitudes from different photometric systems for the stars in a
given carton.
bands : strings list
Containing the bands to search each belonging to a given photometric system.
system : strings list
Photometric system to which each band listed belongs to. The options are
'SDSS', 'TMASS', and 'GAIA'. The system to which a band belongs is defined by the
index of the band in the list (i.e. band[ind] belongs to systems[ind])
Returns
-------
placeholders : set
A set of strings where each string starts with the photometric system,
then an underscore and finally the type of magnitude outlier that at least one
magnitude of the corresponding system has.
The type of outliers are: None (For empty entries), Invalid (For Nan's and
infinite values), and <<Number>> (For values brighter than -9, dimmer than 50,
or equal to zero), in the latter cases the number itself is returned as the outlier type.
For example if a carton contains stars with h=999.9, k=999.9, j=None, and bp=Inf.
This function will return {'TMASS_999.9', 'TMASS_None', 'GAIA_Invalid}.
"""
out_bands, out_systems = [], []
for ind_band in range(len(bands)):
maglist = datafr[bands[ind_band]]
nonempty_maglist = [el for el in maglist if el is not None]
magarr_filled = np.array(nonempty_maglist)
ind_valid = np.where(np.isfinite(magarr_filled))[0]
magarr_valid = magarr_filled[ind_valid]
ind_out = np.where((magarr_valid < -9) | (magarr_valid > 50) | (magarr_valid == 0))[0]
out_band = list(set([str(magarr_valid[indice]) for indice in ind_out]))
if len(maglist) > len(nonempty_maglist):
out_band.append('None')
if len(magarr_filled) > len(magarr_valid):
out_band.append('Invalid')
n_out = len(out_band)
out_bands = out_bands + out_band
out_systems = out_systems + [systems[ind_band]] * n_out
out = main.set_or_none([out_systems[idx] + '_' + out_bands[idx]
for idx in range(len(out_bands))])
return out
def process_cartons(origin='rsconfig', files_folder='./files/', inputname=None,
delim='|', check_exists=False, verb=False, return_objects=False,
write_input=False, write_output=False, assign_sets=False,
assign_placeholders=False, visualize=False, overwrite=False,
all_cartons=False, cartons_name_pattern=None, versions='latest',
forced_versions=None, unique_version=None):
"""Get targetdb information for list of cartons or selection criteria and outputs .csv file.
Takes as input a file with a list of cartons from rsconfig (origin=``rsconfig``)
or custom (origin=``custom``) or a selection criteria to be applied on targetdb
(origin=``targetdb) in which case an input list file can also be created
(with write_input=True) for future use.
This function can be used to check the existence of the cartons (check_exist=True)
in which case it returns a dataframe with the alternative cartons information, or
it can be used to call assign_target_info to get the targetdb information of all
the cartons (check_exists=False) and store it in a .csv file and/or return the
CartonInfo objects.
The function also has provides the option of logging and printing the targetdb information
from all the cartons in a human readable way by using visualize=True.
Parameters
----------
origin : str
``rsconfig`` to use input list file from rsconfig, ``custom`` to use custom input list
of carton or ``targetdb`` to look for cartons in targetdb based on the ``all_cartons``,
``cartons_name_pattern``, ``versions``, ``forced_versions``, and ``unique_versions``
parameters.
files_folder : str
Main folder where input and output files would be stored. In this folder subfolders
rsconfig, custom, and targetdb are expected.
inputname : str or None
Name of input file to be searched in <<files_folder>>/<<origin>> folder
delim : str
Delimiter character to use when creating output .csv file
check_exists : bool
If true and origin is rsconfig or custom the function looks for alternatives to cartons
that exist in targetdb but have different values of plan or category_label than carton
object. In this case the function returns a dataframe with the original carton versions
not found and the alternatives and exits the function
verb : bool
If True function logs and prints alternatives to replace the input lines
corresponding to carton/plan/category_label combinations not found in targetdb with lines
corresponding to the same carton but with existing plan/category_label combinations.
return_objects : bool
If True the function returns the CartonInfo objects.
write_input : bool
If True the function writes a file to be used then as input by
process_cartons with the cartons retrieved by the targetdb query.
write_output : bool
If True the function creates an output .csv file with the
information of each CartonInfo object.
assign_sets : bool
If True assign_target_info assigns the attributes for target dependent parameters for the
carton. For each parameter returns a python set with all the values present in the carton
targets or the range spanned by them.
assign_placeholders : bool
If True assign_target_info assigns magnitude placeholders found in targetdb for each
photometric system (SDSS, TMASS, GAIA) for each carton using check_mag_outliers.
visualize : bool
If True we log and print all the information found in targetdb for each carton in a human
readable way
overwrite : bool
If True enables that inputfile like and output file could be overwritten.
all_cartons : bool
If True and origin=targetdb cartons with any name are taken from targetdb.
from targetdb
cartons_name_pattern : str or None
If True and origin=targetdb only cartons with pattern name cartons_name_pattern are
are taken from targetdb. The string uses * character as wildcard
versions : str
If True and origin=targetdb sets the versions that would be taken for each carton name
If ``single`` only versions matching ``unique_version`` will be taken, if ``latest``
only the latest version of each carton would be taken, if ``all`` all versions from each
carton is taken.
forced_versions: dict or None
If present, and origin=targetdb all cartons in this dictionary are forced to consider
only the version in the dictionary corresponding value, independent on the ``versions``
value.
unique_version : Int or None
If present, origin=targetdb, and versions=single then only this version_pk will be
considered for each carton
Returns
-------
"""
cfg = cartons_inventory.config
# Check that we have a valid origin parameter
assert origin in ['targetdb', 'rsconfig', 'custom'], f'{origin!r} is not a valid'\
' option for origin parameter'
fullfolder = files_folder + origin + '/'
# If an input file is used check that it exists and that we are not trying to overwrite it
if origin in ['rsconfig', 'custom']:
assert write_input is False, 'write_input=True only available for origin=\'targetdb\''
assert inputname is not None, f'for origin={origin!r} an inputname has to be provided'
inputread_filename = fullfolder + inputname
assert os.path.isfile(inputread_filename), 'file: ' + \
os.path.realpath(inputread_filename) + '\n' + f' required for origin={origin!r}'\
f'and inputname={inputname!r} but file doesn\'t exist'
outputbase_filename = fullfolder + 'Info_' + inputname.replace('.txt', '')
if origin == 'targetdb':
# First check if the input arguments are valid
assert check_exists is False, 'check_exists=True option only valid for origin'\
'\'rsconfig\' or \'custom\''
assert versions in ['latest', 'all', 'single'], f'{versions!r} is not a valid option'\
' for versions parameter'
assert forced_versions is None or type(forced_versions) == dict, 'if used, '\
f'forced_versions has to be type=dict not type={type(forced_versions)}'
assert all_cartons is True or cartons_name_pattern is not None, ' carton_name_pattern'\
' needed when all_cartons=False (e.g. cartons_name_pattern=\'bhm_rm_*\')'
assert versions != 'single' or type(unique_version) == int, 'If versions=\'single\' then'\
' unique version has to be an integer'
assert write_input is True or write_output is False, 'To create an output file'\
' an input file has to be created as well to help keep record'
# Then I calculate the base name for input and output files based on selection criteria
if all_cartons is True:
basename = 'Cartons_all'
if all_cartons is False:
basename = 'Cartons_sample'
if versions != 'unique':
basename += '_Versions_' + versions
if versions == 'unique':
basename += '_Version_' + str(unique_version)
if forced_versions is not None:
basename += '_and_forced'
inputwrite_filename = fullfolder + basename + '.txt'
outputbase_filename = fullfolder + 'Info_' + basename
if write_input is True and overwrite is False:
assert not os.path.isfile(inputwrite_filename), 'input file '\
f'{os.path.realpath(inputwrite_filename)}\n already exists and overwrite=False'
# If write_output set the final output_filename and check overwritting
if write_output is True:
assert assign_sets is True or assign_placeholders is True, 'to create an output .csv'\
'at least one of assign_sets or assign_placeholders has to be True'
if assign_sets is True and assign_placeholders is False:
output_filename = outputbase_filename + '_sets.csv'
if assign_sets is False and assign_placeholders is True:
output_filename = outputbase_filename + '_magplaceholers.csv'
if assign_sets is True and assign_placeholders is True:
output_filename = outputbase_filename + '_all.csv'
if overwrite is False:
assert not os.path.isfile(output_filename), 'output file '\
f'{os.path.realpath(output_filename)}\n already exists and overwrite=False'
if origin in ['rsconfig', 'custom']:
cartons, plans, categories, stages, actives = gets_carton_info(inputread_filename)
if origin == 'targetdb':
if all_cartons is True:
pattern = '%%'
if all_cartons is False:
pattern = cartons_name_pattern.replace('*', '%')
cartons_list = (
Car
.select(Car.carton, Version.pk.alias('version_pk'), Version.plan,
Categ.label.alias('category_label'))
.join(Version, on=(Version.pk == Car.version_pk))
.join(Categ, 'LEFT JOIN', Car.category_pk == Categ.pk)
.where(Car.carton ** pattern)
.dicts()
)
# Here we look for the basic information of each carton/plan/category_label
# available in targetdb to then instantiate the objects with that information
# For each carton name we calculate the version_pk(s) that match the selection criteria
# according to the value of ``versions`` parameter (single, all, latest) and override
# the value if carton is present in forced_versions dictionary.
cart_results = pd.DataFrame(cartons_list)
cartons_unique = np.sort(list(set(cart_results['carton'])))
all_indices = []
for name in cartons_unique:
indcart = np.where(cart_results['carton'] == name)[0]
if forced_versions and name in forced_versions.keys():
inds = np.where((cart_results['carton'] == name) &
(cart_results['version_pk'] == forced_versions[name]))[0]
elif versions == 'single':
inds = np.where((cart_results['carton'] == name) &
(cart_results['version_pk'] == unique_version))[0]
elif versions == 'all':
inds = indcart
elif versions == 'latest':
max_version = np.max(cart_results['version_pk'][indcart])
inds = np.where((cart_results['carton'] == name) &
(cart_results['version_pk'] == max_version))[0]
all_indices += list(inds)
assert len(all_indices) > 0, 'There are no carton/version_pk pairs matching the selection'\
' criteria used'
carts_sel = cart_results.iloc[all_indices]
cartons = carts_sel['carton'].values.tolist()
plans = carts_sel['plan'].values.tolist()
categories = carts_sel['category_label'].values.tolist()
stages, actives = ['N/A'] * len(carts_sel), ['N/A'] * len(carts_sel)
# Here we start the corresponding log based on the origin, assign_sets,
# and assign_placeholders value
log.start_file_logger(f'./logs/origin_{origin}_sets_{assign_sets}'
f'_mags_{assign_placeholders}.log')
log.info('#' * 60)
print_centered_msg('STARTING CODE EXECUTION', 60, log)
log.info('#' * 60)
log.info('Ran process_cartons using the following arguments')
signature = inspect.signature(process_cartons)
# First thing we log is the parameters used in process_cartons function
for param in signature.parameters.keys():
arg = locals()[param]
log.info(f'{param}={arg}')
log.info(' ')
# Here we write an input-like file if requested
if origin == 'targetdb' and write_input is True:
data = np.transpose([cartons, plans, categories, stages, actives])
ascii.write(data, inputwrite_filename, format='fixed_width',
names=['carton', 'plan', 'category', 'stage', 'active'],
overwrite=overwrite)
log.info(f'Wrote file {inputwrite_filename}')
# If write_output then we prepare the .csv writer
if write_output is True:
fields = cfg['db_fields']
f = open(output_filename, 'w')
writer = csv.writer(f, delimiter=delim)
columns = ['carton'] + fields['input_dependent'] + fields['carton_dependent']
if assign_sets is True:
new_cols = [x for x in fields['sets'] if x not in fields['set_ranges']]
columns += new_cols
for col in fields['set_ranges']:
columns += [col + '_min', col + '_max']
if assign_placeholders is True:
columns += ['magnitude_placeholders']
writer.writerow(columns)
# Here we start the actual processing of the cartons
objects, diffs = [], []
for index in range(len(cartons)):
# First we instantiate the CartonInfo objects with the information we have
obj = CartonInfo(cartons[index], plans[index], categories[index],
stages[index], actives[index])
# If check_exists we run check_existence on the cartons and return the diff dataframe
if check_exists is True:
output = None
diff = obj.check_existence(log, verbose=verb)
if len(diff) > 0:
diffs.append(diff)
if index == len(cartons) - 1:
log.info('Ran check_existence to compare input file '
f'{inputname} with targetdb content')
if len(diffs) > 0:
output = pd.concat(diffs)
return output
continue
if obj.in_targetdb is False:
log.debug(f'carton={obj.carton} plan={obj.plan} version_pk={obj.version_pk}'
f'category={obj.category_label} not found in targetdb')
# Here we assign sets and or mag placeholders info based on input arguments
# And we visualize and write in output .csv if it corresponds
if obj.in_targetdb is True:
if assign_sets is True or assign_placeholders is True:
obj.assign_target_info(calculate_sets=assign_sets,
calculate_mag_placeholders=assign_placeholders)
objects.append(obj)
log.info(f'Ran assign_target_info on carton {obj.carton}')
else:
objects.append(obj)
log.info(f'Appending object for carton {obj.carton}'
'but without running assign_target_info')
if visualize is True:
obj.visualize_content(log)
if write_output is True:
curr_info = [getattr(obj, attr) for attr in columns]
writer.writerow(curr_info)
log.info(f'wrote row to output csv for carton={obj.carton}'
f' ({index + 1}/{len(cartons)})')
if write_output is True:
f.close()
log.info(f'Saved output file={output_filename}')
if return_objects is True:
return objects
| 2.796875
| 3
|
shadon/testsToken.py
|
subbc/devops_jkweb
| 0
|
12778228
|
#!/usr/bin/evn python
# -*- coding:utf-8 -*-
from shadon.tsetsHttp import testsHttp
from shadon.testsConfig import testsConfig
import os
class testsToken():
def __init__(self):
self.url = '/oauth/authorizationServer/accessToken'
self.mytestsConfig = testsConfig()
self.mytestsConfig.getConfig()
self.path = os.path.dirname(__file__) + "/../config/" + self.mytestsConfig.env + "/"
self.grant='client_credentials'
pass
def setGrant(self,grant):
global localgrant
localgrant = grant
if os.path.exists(self.path + 'token.txt') != True:
os.remove(self.path + 'token.txt')
pass
def getToken(self):
global apiToken
if os.path.exists(self.path+ 'token.txt') != True:
self.setToken(localgrant)
file = open(self.path + 'token.txt', 'r')
value = file.read()
apiToken = eval(value)
file.close()
return apiToken
def setToken(self,grant):
myhttp = testsHttp()
myhttp.set_url(self.url)
self.data = {"grant_type": "client_credentials", "client_id": self.mytestsConfig.client_id,"client_secret": self.mytestsConfig.client_secret}
if grant == 'password':
self.mytestsConfig.grant_type = self.mytestsConfig.getFile('password', 'grant_type')
self.mytestsConfig.username = self.mytestsConfig.getFile('password', 'username')
self.mytestsConfig.password = self.mytestsConfig.getFile('password', 'password')
self.data = {"grant_type": "password", "client_id": self.mytestsConfig.client_id,"client_secret": self.mytestsConfig.client_secret,"username":self.mytestsConfig.username,"password":self.mytestsConfig.password}
myhttp.set_data(self.data)
tokenInfo =myhttp.post().json()
#如果目录不存在,建立目录
if os.path.exists(self.path) != True:
os.makedirs(self.path)
#写入数据
file = open(self.path+'token.txt','w')
file.write(str(tokenInfo))
file.close()
pass
if __name__ == "__main__":
shadon = testsToken()
shadon.setToken('<PASSWORD>')
print(shadon.getToken())
| 2.265625
| 2
|
finchan/__main__.py
|
msgroup/finchan
| 3
|
12778229
|
<reponame>msgroup/finchan
# -*- coding: utf-8 -*-
# This file is part of finchan.
# Copyright (C) 2017-present qytz <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import asyncio
import logging
import logging.config
import click
import uvloop
from .env import Env
from .exts import ExtManager
from .options import load_configs
from .dispatcher import get_dispatcher
@click.command()
@click.option(
"-v", "--verbose", count=True, help="Count output level, can set multipule times."
)
@click.option("-c", "--config", help="Specify config file.")
def main(verbose=0, config=None):
"""Console script for finchan
Copyright (C) 2017-present qytz <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Project url: https://github.com/qytz/finchan
"""
env = Env()
# asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
env.verbose = verbose
if not config:
# first find the configs in current directory
conf_path = ".finchan_configs"
if not os.path.exists(conf_path):
# the use the final default one
conf_path = os.path.expanduser("~/.finchan/configs")
else:
conf_path = config
try:
env.options = load_configs(conf_path)
except (SyntaxError, TypeError) as e:
print("Parse configure file failed, please check: %s" % e)
return
work_dir = os.path.expanduser(env.options.get("work_dir", "~/.finchan"))
os.makedirs(work_dir, exist_ok=True)
os.makedirs(os.path.join(work_dir, "logs"), exist_ok=True)
os.chdir(work_dir)
env.set_work_dir(work_dir)
log_config = env.options.get("log_config", {})
# patch the log filter parameters
if "filters" in log_config and "finchan" in log_config["filters"]:
log_config["filters"]["finchan"]["env"] = env
logging.config.dictConfig(log_config)
root_logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s %(tracktime)s %(levelname)-8s %(message)s")
)
if verbose > 0:
handler.setLevel("DEBUG")
else:
handler.setLevel("INFO")
root_logger.addHandler(handler)
root_logger.info("Run in %s mode", env.options["run_mode"])
if env.options["run_mode"] == "backtrack":
env.run_mode = "backtrack"
exts = env.options.get("enabled_backtrack_exts", [])
else:
env.run_mode = "live_track"
exts = env.options.get("enabled_live_exts", [])
dispatcher = get_dispatcher(env)
env.set_dispatcher(dispatcher)
extm_args = env.options["ext_manager"]
if not extm_args:
extm_args = {}
ext_manager = ExtManager(env, **extm_args)
env.set_ext_manager(ext_manager)
env.load_exts(exts)
return env.run()
if __name__ == "__main__":
main()
| 1.679688
| 2
|
scripts/test_velocity.py
|
done-jithinlal/ubiquity_motor
| 0
|
12778230
|
<reponame>done-jithinlal/ubiquity_motor
#!/usr/bin/env python
# VELOCITY can be positive (driving forward) or negative (driving backward)
VELOCITY = 0.2
# Initial turn angle (Z axis)
ANGLE = 0.0
import rospy
from geometry_msgs.msg import Twist,Point
from nav_msgs.msg import Odometry
rospy.init_node('slow_motion', anonymous=True)
last_t = None
last_pos = None
print ("""-------------------------------------------------------------
Odometer consistency check
-------------------------------------------------------------
""")
def odometry_callback(msg):
# Calculate velocity error (%)
global last_t
global last_pos
now = rospy.Time.now().to_sec()
cur_pos = msg.pose.pose.position
if last_pos:
pos_distance = ((cur_pos.x-last_pos.x)**2 + (cur_pos.y-last_pos.y)**2)**0.5
t_distance = VELOCITY*(now-last_t)
print "Velocity error: {}%".format(round(abs(msg.twist.twist.linear.x-VELOCITY)/VELOCITY*100,2)),\
" Position error: {}%".format(round(abs(pos_distance-t_distance)/t_distance*100,2))
last_pos = cur_pos
last_t = now
def slow_motion():
pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
rospy.Subscriber('odom',Odometry,odometry_callback)
vel_msg = Twist()
initial = True
vel_msg.linear.x = VELOCITY
vel_msg.angular.z = ANGLE
while not rospy.is_shutdown():
# Publish the velocity
pub.publish(vel_msg)
if initial:
vel_msg.angular.z = 0.0
initial = False
rospy.sleep(0.05)
if __name__ == '__main__':
if abs(VELOCITY)<0.000001:
print ("VELOCITY must be different from zero")
else:
try:
slow_motion()
except rospy.ROSInterruptException:
pass
| 2.75
| 3
|
quickq/model.py
|
valleau-lab/quickq
| 0
|
12778231
|
<gh_stars>0
"""Keras based deepchem DNN model.
The native deepchem DNN has been changed to pytorch, and tensorflow is
desired. Here we create a Deepchem simple dense Neural network.
"""
import os
from typing import Iterable, Union, List
import deepchem.models
import deepchem.data
import numpy
import tensorflow.keras as ks
import tensorflow as tf
try:
from collections.abc import Sequence as SequenceCollection
except:
from collections import Sequence as SequenceCollection
class DCDNN(deepchem.models.KerasModel):
"""Adapted from deepchem RobustMultitaskRegressor.
Parameters
----------
n_features : int
size of feature vector
layer_sizes : iterable
Neurons counts for the DNN. Length of the iterable determines the
layer counts, and the values the number of neurons in each of those
layers. Alternative to specifying neuron and layer count.
neuron_count : int
Number of neurons in each hidden layer, alternative to specifying layer_sizes
layer_count : int
Number of layers with neuron_count, alternative to specifting layer_sizes
weight_init_stdevs : iterable of float or float
Standard deviation of random weight initialization for each or all
layers
bias_init_consts : iterable of float or float
value of bias initialization for each or all layers
weight_decay_penalty : float
Value of weight regularization
weight_decay_penalty_type : str
Type of regularization eg. "l2"
dropouts : iterable of float or float
Dropout rates to use for each or all layers.
activation_fns : iterable of callable or callable
tensorflow activation functions to use for each or all layers.
"""
def __init__(
self,
n_features: int,
layer_sizes: Union[List[int], int] = None,
neuron_count: int = None,
layer_count: int = None,
weight_init_stddevs: Union[List[float], float] = 0.02,
bias_init_consts: Union[List[float], float] = 1.0,
weight_decay_penalty: float = 0.0,
weight_decay_penalty_type: str = "l2",
dropouts: Union[List[float], float] = 0.0,
activation_fns: Union[List[callable], callable] = tf.nn.relu,
**kwargs
):
if layer_sizes is not None:
assert neuron_count is None, 'Cannot specify both layer_sizes and neuron_count.'
assert layer_count is None, 'Cannot specify both layer_sizes and layer_count.'
else:
if neuron_count is None or layer_count is None:
raise TypeError(
'Must specify neuron and layer count if layer_sizes not specified.'
)
layer_sizes = [neuron_count]*layer_count
self.n_features = n_features
n_layers = len(layer_sizes)
if not isinstance(weight_init_stddevs, SequenceCollection):
weight_init_stddevs = [weight_init_stddevs] * n_layers
if not isinstance(bias_init_consts, SequenceCollection):
bias_init_consts = [bias_init_consts] * n_layers
if not isinstance(dropouts, SequenceCollection):
dropouts = [dropouts] * n_layers
if not isinstance(activation_fns, SequenceCollection) or type(activation_fns) == str:
activation_fns = [activation_fns] * n_layers
if weight_decay_penalty != 0.0:
if weight_decay_penalty_type == 'l1':
regularizer = ks.regularizers.l1(weight_decay_penalty)
else:
regularizer = ks.regularizers.l2(weight_decay_penalty)
else:
regularizer = None
def build():
# begin with the input
features = ks.Input(shape=(n_features,))
prev_layer = features
# add the DNN layers
for size, weight_stddev, bias_const, dropout, activation_fn in zip(
layer_sizes, weight_init_stddevs, bias_init_consts, dropouts,
activation_fns
):
if size == 0:
continue
layer = ks.layers.Dense(
size,
activation=activation_fn,
kernel_initializer=ks.initializers.TruncatedNormal(
stddev=weight_stddev
),
bias_initializer=tf.constant_initializer(value=bias_const),
kernel_regularizer=regularizer
)(prev_layer)
if dropout > 0.0:
layer = ks.layers.Dropout(rate=dropout)(layer)
prev_layer = layer
# add the output layer
output = ks.layers.Dense(1)(prev_layer)
model = ks.Model(inputs=features, outputs=output)
return model
model = build()
# init the deepchem wrapper
super().__init__(
model, deepchem.models.losses.L2Loss(), output_types=['prediction'], **kwargs
)
return
def default_generator(
self,
dataset: deepchem.data.Dataset,
epochs: int = 1,
mode: str = 'fit',
deterministic: bool = True,
pad_batches: bool = False
):
"""Default data generator for the model.
Wraps the dataset iterbatches to produce data of the correct form for
this class.
Parameters
----------
dataset : deepchem.data.Dataset
dataset to iterate
epochs : int
Number of passes through the data
mode : str
ignored
deterministic : bool, default True
Whether to produce deterministic target values
pad_batches : bool, default False
Whether to pad the last batch.
"""
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches
):
yield ([X_b], [y_b], [w_b])
| 2.78125
| 3
|
src/cacofonix/main.py
|
jonathanj/cacofonix
| 5
|
12778232
|
<filename>src/cacofonix/main.py
import click
import datetime
from fs import open_fs
from collections import OrderedDict
from typing import Optional, List, Tuple, TextIO
from . import _yaml
from ._app import Application
from ._cli import (
iso8601date,
validate_fragment_type,
validate_section,
split_issues,
compose_interactive,
guess_version)
from ._prompt import (
print_formatted_yaml_text,
prompt_confirm)
from ._config import Config
from ._util import (
pluralize,
string_escape)
from ._effects import make_effects
from ._log import setup_logging
pass_app = click.make_pass_decorator(Application)
@click.group()
@click.option('--dry-run', '-n', 'dry_run',
is_flag=True,
default=False,
help='''Perform a dry run.''')
@click.option('--log-level',
default='ERROR',
type=click.Choice([
'DEBUG',
'INFO',
'WARNING',
'ERROR']))
@click.option('--config',
required=True,
type=click.File())
@click.version_option()
@click.pass_context
def cli(ctx, config: TextIO, dry_run: bool, log_level: str):
"""
Compose and compile change fragments into changelogs.
New changes will be integrated into an existing changelog.
"""
setup_logging(log_level)
root_fs = open_fs('.')
config = Config.parse(config)
ctx.obj = Application(
config=config,
effects=make_effects(root_fs, config, dry_run))
if dry_run:
echo_warning('Performing a dry run, no changes will be made!')
@cli.command()
@pass_app
def list_types(app: Application):
"""
List known fragment types.
"""
for fragment_type in app.config.available_fragment_types():
if fragment_type:
echo_out(fragment_type)
@cli.command()
@pass_app
def list_sections(app: Application):
"""
List known sections.
"""
for section in app.config.available_sections():
if section:
echo_out(section)
@cli.command()
@pass_app
def list_versions(app: Application):
"""
List all versions tracked by this tool.
"""
for version in app.known_versions():
echo = echo_warning_out if version.prerelease else echo_out
echo(str(version))
@cli.command()
@click.option('-t', '--type', 'fragment_type',
callback=validate_fragment_type,
help='Fragment type, should match a value from `list-types`')
@click.option('-s', '--section',
callback=validate_section,
help='Section type, should match a value from `list-sections`')
@click.option('-i', '--issue', 'issues',
multiple=True,
callback=split_issues,
help='''Related issue, should be formatted as issue_number or
issue_number:issue_url, can be specified multiple times''')
@click.option('-f', '--feature-flag', 'feature_flags',
multiple=True,
help='Required feature flag, can be specified multiple times')
@click.option('-d', '--description',
help='Description of the change')
@click.option('--edit',
is_flag=True,
default=None,
help='Complete the changelog fragment in EDITOR')
@click.option('--interactive / --no-interactive',
is_flag=True,
help='Complete the changelog fragment interactively')
@pass_app
def compose(app: Application, interactive: bool, **kw):
"""
Compose a new change fragment.
Preset values can be given as options with the unspecified value being
completed interactively or via a text editor.
"""
def _validate(yaml_text):
try:
app.validate_fragment_text(yaml_text)
return True
except Exception as e:
echo_error('Oops! There was a problem with your change data.')
echo(str(e))
return False
def _compose(fragment_type: str,
section: Optional[str],
issues: List[Tuple[str, str]],
feature_flags: List[str],
description: str,
edit: bool):
change_fragment_data = OrderedDict([
('type', fragment_type),
('section', section),
('issues', dict(issues)),
('feature_flags', list(feature_flags)),
('description', _yaml.literal_str(
string_escape(description or ''))),
])
yaml_text = _yaml.dump(change_fragment_data)
echo_info('\nOkay, this is your change:\n')
print_formatted_yaml_text(yaml_text)
edit = kw.get('edit')
if interactive:
if edit is None:
edit = prompt_confirm('Open it in your editor?')
else:
if not _validate(yaml_text):
raise SystemExit(2)
if edit:
while True:
yaml_text = click.edit(
yaml_text,
require_save=False,
extension='.yaml')
if not yaml_text:
echo_error('Aborting composition!')
raise SystemExit(2)
if _validate(yaml_text):
break
else:
if not prompt_confirm('Open it in your editor?'):
raise SystemExit(2)
else:
continue
fragment_filename = app.create_new_fragment(yaml_text)
echo_success('Wrote new fragment {}'.format(fragment_filename))
if interactive:
config = app.config
kw = compose_interactive(
available_sections=config.available_sections(),
available_fragment_types=config.available_fragment_types(),
**kw)
return _compose(**kw)
@cli.command()
@click.option('--draft',
is_flag=True,
help='Do not perform any permanent actions.')
@click.option('--version', 'project_version',
default=None,
callback=guess_version,
help='Version to stamp in the changelog.')
@click.option('--date', 'version_date',
callback=iso8601date,
help='ISO8601 date for the changelog, defaults to today.')
@click.option('--archive / --no-archive', 'archive_fragments',
is_flag=True,
default=None,
help='Archive fragments after writing a new changelog.')
@click.option('--confirm / --no-confirm', 'confirm_write',
is_flag=True,
default=True,
help='Confirm before writing the changelog')
@pass_app
def compile(app: Application,
draft: bool,
project_version: Tuple[Optional[str], str],
version_date: datetime.date,
archive_fragments: Optional[bool],
confirm_write: bool):
"""
Compile change fragments into a changelog.
The existing changelog will be updated with the new changes, and the old
change fragments discarded.
"""
version_guess, version_number = project_version
if version_guess is not None:
echo('Guessed version {} via {}'.format(
version_number, version_guess))
new_fragments = list(app.find_new_fragments())
with open_fs('temp://') as tmp_fs:
n = len(app.compile_fragment_files(tmp_fs, new_fragments))
echo('Found {} new changelog fragments'.format(n))
changelog = app.render_changelog(
fs=tmp_fs,
version=version_number,
version_date=version_date)
if draft:
echo_info(
'Showing a draft changelog -- no changes will be made!\n')
echo_out(changelog)
return
echo_info('This is the changelog to be added:\n')
echo_out(changelog)
if confirm_write:
if not prompt_confirm('Merge this with the existing changelog?'):
echo_info('Aborting at user request')
raise SystemExit(2)
app.merge_with_existing_changelog(changelog)
echo_success('Wrote changelog.')
if n:
if archive_fragments is None:
archive_fragments = prompt_confirm(
'Archive {} {}?'.format(
n, pluralize(n, 'fragment', 'fragments')),
default=True)
if archive_fragments:
n, not_removed = app.archive_fragments(
found_fragments=new_fragments,
version=version_number,
version_date=version_date,
version_author=app.effects.git_user())
if not_removed:
echo_error('Could not archive the following:')
for name in not_removed:
echo(name)
else:
echo_info(
'Archived {} {} as version {}.'.format(
n,
pluralize(n, 'fragment', 'fragments'),
version_number))
def echo_partial(**kw):
"""
Partially applied version of `click.secho`.
"""
return lambda msg: click.secho(msg, **kw)
echo = echo_partial(err=True)
echo_out = echo_partial()
echo_error = echo_partial(fg='red', err=True)
echo_info = echo_partial(fg='yellow', err=True)
echo_warning = echo_partial(fg='bright_yellow', err=True)
echo_warning_out = echo_partial(fg='bright_yellow')
echo_success = echo_partial(fg='green', err=True)
def main():
cli()
if __name__ == '__main__':
main()
| 2.140625
| 2
|
taskobra/orm/relationships/system_component.py
|
manistal/taskobra
| 0
|
12778233
|
<reponame>manistal/taskobra
# Libraries
from sqlalchemy import Column, ForeignKey, Integer
from sqlalchemy.orm import relationship
# Taskobra
from taskobra.orm.base import ORMBase
class SystemComponent(ORMBase):
__tablename__ = "SystemComponent"
system_id = Column(Integer, ForeignKey("System.unique_id"), primary_key=True)
component_id = Column(Integer, ForeignKey("Component.unique_id"), primary_key=True)
count = Column(Integer, default=1)
_system = relationship("System")
_component = relationship("Component")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "count" not in kwargs:
self.count = 1
@property
def component(self):
return self._component
@component.setter
def component(self, component: "taskobra.orm.Component"):
del self.component
self._component = component
self._component.system_components.append(self)
@component.deleter
def component(self):
if self._component:
self._component.system_components.remove(self)
del self._component
@property
def system(self):
return self._system
@system.setter
def system(self, system: "taskobra.orm.System"):
del self.system
self._system = system
self._system.system_components.append(self)
@system.deleter
def system(self):
if self._system:
self._system.system_components.remove(self)
del self._system
def __repr__(self):
return f"<SystemComponent({self.system.name}: {self.component})>"
| 2.453125
| 2
|
helpers/team_manipulator.py
|
enterstudio/the-blue-alliance
| 0
|
12778234
|
import logging
from google.appengine.api import search
from helpers.cache_clearer import CacheClearer
from helpers.location_helper import LocationHelper
from helpers.manipulator_base import ManipulatorBase
from helpers.search_helper import SearchHelper
class TeamManipulator(ManipulatorBase):
"""
Handle Team database writes.
"""
@classmethod
def getCacheKeysAndControllers(cls, affected_refs):
return CacheClearer.get_team_cache_keys_and_controllers(affected_refs)
@classmethod
def postDeleteHook(cls, teams):
'''
To run after the team has been deleted.
'''
for team in teams:
SearchHelper.remove_team_location_index(team)
@classmethod
def postUpdateHook(cls, teams, updated_attr_list, is_new_list):
"""
To run after models have been updated
"""
for (team, updated_attrs) in zip(teams, updated_attr_list):
# Disabled due to unreliability. 2017-01-24 -fangeugene
# try:
# LocationHelper.update_team_location(team)
# except Exception, e:
# logging.error("update_team_location for {} errored!".format(team.key.id()))
# logging.exception(e)
try:
SearchHelper.update_team_location_index(team)
except Exception, e:
logging.error("update_team_location_index for {} errored!".format(team.key.id()))
logging.exception(e)
cls.createOrUpdate(teams, run_post_update_hook=False)
@classmethod
def updateMerge(self, new_team, old_team, auto_union=True):
"""
Given an "old" and a "new" Team object, replace the fields in the
"old" team that are present in the "new" team, but keep fields from
the "old" team that are null in the "new" team.
"""
attrs = [
"city",
"state_prov",
"country",
"postalcode",
"normalized_location", # Overwrite whole thing as one
"name",
"nickname",
"website",
"rookie_year",
"motto",
]
for attr in attrs:
if getattr(new_team, attr) is not None:
if getattr(new_team, attr) != getattr(old_team, attr):
setattr(old_team, attr, getattr(new_team, attr))
old_team.dirty = True
# Take the new tpid and tpid_year iff the year is newer than or equal to the old one
if (new_team.first_tpid_year is not None and new_team.first_tpid_year >= old_team.first_tpid_year):
old_team.first_tpid_year = new_team.first_tpid_year
old_team.first_tpid = new_team.first_tpid
old_team.dirty = True
return old_team
| 2.140625
| 2
|
api/applications/tests/tests_create_application.py
|
django-doctor/lite-api
| 3
|
12778235
|
from parameterized import parameterized
from rest_framework import status
from rest_framework.reverse import reverse
from api.applications.enums import (
ApplicationExportType,
ApplicationExportLicenceOfficialType,
GoodsTypeCategory,
)
from api.applications.models import (
StandardApplication,
OpenApplication,
HmrcQuery,
BaseApplication,
ExhibitionClearanceApplication,
GiftingClearanceApplication,
F680ClearanceApplication,
)
from api.cases.enums import CaseTypeEnum, CaseTypeReferenceEnum
from lite_content.lite_api import strings
from api.staticdata.trade_control.enums import TradeControlActivity, TradeControlProductCategory
from test_helpers.clients import DataTestClient
class DraftTests(DataTestClient):
url = reverse("applications:applications")
def test_create_draft_standard_individual_export_application_successful(self):
"""
Ensure we can create a new standard individual export application draft
"""
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.SIEL,
"export_type": ApplicationExportType.TEMPORARY,
"have_you_been_informed": ApplicationExportLicenceOfficialType.YES,
"reference_number_on_information_form": "123",
}
response = self.client.post(self.url, data, **self.exporter_headers)
response_data = response.json()
standard_application = StandardApplication.objects.get()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response_data["id"], str(standard_application.id))
self.assertEqual(StandardApplication.objects.count(), 1)
def test_create_draft_exhibition_clearance_application_successful(self):
"""
Ensure we can create a new Exhibition Clearance draft object
"""
self.assertEqual(ExhibitionClearanceApplication.objects.count(), 0)
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.EXHC,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(ExhibitionClearanceApplication.objects.count(), 1)
def test_create_draft_gifting_clearance_application_successful(self):
"""
Ensure we can create a new Exhibition Clearance draft object
"""
self.assertEqual(GiftingClearanceApplication.objects.count(), 0)
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.GIFT,
}
response = self.client.post(self.url, data, **self.exporter_headers)
application = GiftingClearanceApplication.objects.get()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(GiftingClearanceApplication.objects.count(), 1)
self.assertEqual(application.name, data["name"])
self.assertEqual(application.case_type.id, CaseTypeEnum.GIFTING.id)
def test_create_draft_f680_clearance_application_successful(self):
"""
Ensure we can create a new Exhibition Clearance draft object
"""
self.assertEqual(F680ClearanceApplication.objects.count(), 0)
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.F680,
}
response = self.client.post(self.url, data, **self.exporter_headers)
application = F680ClearanceApplication.objects.get()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(F680ClearanceApplication.objects.count(), 1)
self.assertEqual(application.name, data["name"])
self.assertEqual(application.case_type.id, CaseTypeEnum.F680.id)
def test_create_draft_open_application_successful(self):
"""
Ensure we can create a new open application draft object
"""
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.OIEL,
"export_type": ApplicationExportType.TEMPORARY,
"goodstype_category": GoodsTypeCategory.MILITARY,
"contains_firearm_goods": True,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(OpenApplication.objects.count(), 1)
def test_create_draft_hmrc_query_successful(self):
"""
Ensure we can create a new HMRC query draft object
"""
data = {
"name": "Test",
"application_type": CaseTypeReferenceEnum.CRE,
"organisation": self.organisation.id,
}
response = self.client.post(self.url, data, **self.hmrc_exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(HmrcQuery.objects.count(), 1)
def test_create_draft_hmrc_query_failure(self):
"""
Ensure that a normal exporter cannot create an HMRC query
"""
data = {
"application_type": CaseTypeReferenceEnum.CRE,
"organisation": self.organisation.id,
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(HmrcQuery.objects.count(), 0)
@parameterized.expand(
[
[{}],
[{"application_type": CaseTypeReferenceEnum.SIEL, "export_type": ApplicationExportType.TEMPORARY}],
[{"name": "Test", "export_type": ApplicationExportType.TEMPORARY}],
[{"name": "Test", "application_type": CaseTypeReferenceEnum.SIEL}],
[{"application_type": CaseTypeReferenceEnum.EXHC}],
[{"name": "Test"}],
]
)
def test_create_draft_failure(self, data):
"""
Ensure we cannot create a new draft object with POST data that is missing required properties
Applications require: application_type, export_type & name
Exhibition clearances require: application_type & name
Above is a mixture of invalid combinations for these cases
"""
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(BaseApplication.objects.count(), 0)
def test_create_no_application_type_failure(self):
"""
Ensure that we cannot create a new application without
providing a application_type.
"""
data = {}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.json()["errors"]["application_type"][0], strings.Applications.Generic.SELECT_A_LICENCE_TYPE
)
@parameterized.expand(
[(CaseTypeEnum.SICL.reference, StandardApplication), (CaseTypeEnum.OICL.reference, OpenApplication)]
)
def test_trade_control_application(self, case_type, model):
data = {
"name": "Test",
"application_type": case_type,
"trade_control_activity": TradeControlActivity.OTHER,
"trade_control_activity_other": "other activity type",
"trade_control_product_categories": [key for key, _ in TradeControlProductCategory.choices],
}
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
application_id = response.json()["id"]
application = model.objects.get(id=application_id)
self.assertEqual(application.trade_control_activity, data["trade_control_activity"])
self.assertEqual(application.trade_control_activity_other, data["trade_control_activity_other"])
self.assertEqual(
set(application.trade_control_product_categories), set(data["trade_control_product_categories"])
)
@parameterized.expand(
[
(
CaseTypeEnum.SICL.reference,
"trade_control_activity",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_ERROR,
),
(
CaseTypeEnum.SICL.reference,
"trade_control_activity_other",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_OTHER_ERROR,
),
(
CaseTypeEnum.SICL.reference,
"trade_control_product_categories",
strings.Applications.Generic.TRADE_CONTROl_PRODUCT_CATEGORY_ERROR,
),
(
CaseTypeEnum.OICL.reference,
"trade_control_activity",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_ERROR,
),
(
CaseTypeEnum.OICL.reference,
"trade_control_activity_other",
strings.Applications.Generic.TRADE_CONTROL_ACTIVITY_OTHER_ERROR,
),
(
CaseTypeEnum.OICL.reference,
"trade_control_product_categories",
strings.Applications.Generic.TRADE_CONTROl_PRODUCT_CATEGORY_ERROR,
),
]
)
def test_trade_control_application_failure(self, case_type, missing_field, expected_error):
data = {
"name": "Test",
"application_type": case_type,
"trade_control_activity": TradeControlActivity.OTHER,
"trade_control_activity_other": "other activity type",
"trade_control_product_categories": [key for key, _ in TradeControlProductCategory.choices],
}
data.pop(missing_field)
response = self.client.post(self.url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
errors = response.json()["errors"]
self.assertEqual(errors[missing_field], [expected_error])
| 2.296875
| 2
|
run_terminate_appstream_fleet_autoscale.py
|
HardBoiledSmith/johanna
| 64
|
12778236
|
<reponame>HardBoiledSmith/johanna<filename>run_terminate_appstream_fleet_autoscale.py
#!/usr/bin/env python3
from env import env
from run_common import AWSCli
from run_common import print_message
from run_common import print_session
options, args = dict(), list()
if __name__ == "__main__":
from run_common import parse_args
options, args = parse_args()
def terminate_appstream_fleet_autoscale(settings):
aws_cli = AWSCli(settings['AWS_REGION'])
fleet_name = settings['FLEET_NAME']
print_message(f'terminate fleet autoscale for: {fleet_name}')
fleet_path = f"fleet/{settings['FLEET_NAME']}"
cc = ['cloudwatch', 'delete-alarms']
cc += ['--alarm-names', 'scale-out-utilization-policy']
aws_cli.run(cc, ignore_error=True)
cc = ['cloudwatch', 'delete-alarms']
cc += ['--alarm-names', 'scale-in-utilization-policy']
aws_cli.run(cc, ignore_error=True)
cc = ['application-autoscaling', 'deregister-scalable-target']
cc += ['--service-namespace', 'appstream']
cc += ['--resource-id', fleet_path]
cc += ['--scalable-dimension', 'appstream:fleet:DesiredCapacity']
aws_cli.run(cc, ignore_error=True)
################################################################################
#
# start
#
################################################################################
print_session('terminate appstream autoscaling setting for stack & fleet')
appstream = env['appstream']
target_name = None
region = options.get('region')
is_target_exists = False
if len(args) > 1:
target_name = args[1]
for settings in appstream.get('STACK', list()):
if target_name and settings['NAME'] != target_name:
continue
if region and settings['AWS_REGION'] != region:
continue
is_target_exists = True
terminate_appstream_fleet_autoscale(settings)
if is_target_exists is False:
mm = list()
if target_name:
mm.append(target_name)
if region:
mm.append(region)
mm = ' in '.join(mm)
print(f'appstream autoscale: {mm} is not found in config.json')
| 2.34375
| 2
|
lcm/lcm/nf_pm/serializers/create_thresho_id_request.py
|
onap/vfc-gvnfm-vnflcm
| 1
|
12778237
|
<reponame>onap/vfc-gvnfm-vnflcm
# Copyright (c) 2019, CMCC Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from lcm.nf_pm.counst import THRESHOLDTYPE
class ThresholdCriteriaSerializer(serializers.Serializer):
performanceMetric = serializers.CharField(help_text="Defines the performance metric associated with the"
"threshold, as specified in an external measurement"
"specification.", required=True, allow_null=False)
thresholdType = serializers.ChoiceField(help_text="Type of threshold", required=True, allow_null=False,
choices=THRESHOLDTYPE)
simpleThresholdDetails = serializers.CharField(help_text="Details of a simple threshold",
required=False, allow_null=True)
class CreateThresholdRequestSerializer(serializers.Serializer):
objectInstanceId = serializers.CharField(help_text="Identifier of the VNF instance associated with this "
"threshold.", required=True, allow_null=False)
criteria = ThresholdCriteriaSerializer(help_text="Criteria that define this threshold.",
required=True, allow_null=False)
| 1.765625
| 2
|
sphere_SA_population/sphere_SA_population.py
|
trevorgokey/misc
| 0
|
12778238
|
<filename>sphere_SA_population/sphere_SA_population.py<gh_stars>0
#!/usr/bin/env python3
import numpy as np
from matplotlib import animation
from matplotlib import rc
import matplotlib.pyplot as plt
def cart2sph(x, y, z):
hxy = np.hypot(x, y)
r = np.hypot(hxy, z)
phi = np.arctan2(y, x)
theta = np.arctan2(hxy,z)
return np.array([r,theta,phi])
def sph2cart(r,theta,phi):
rsin_theta = r * np.sin(theta)
x = rsin_theta * np.cos(phi)
y = rsin_theta * np.sin(phi)
z = r * np.cos(theta)
return np.array([x, y, z])
def read_coordinates(filename):
if type(filename) == str:
return np.loadtxt(filename)
return np.vstack([np.loadtxt(f) for f in filename])
def update_vis(i,self):
batch=self.batch_size
iterations=self.iterations
polar = self.polar
N = self.N
hit = self.hit
theta,phi = np.random.random((batch,2)).T
theta = np.arccos(1 - 2*theta)
phi = self.phi_min + (self.phi_max - self.phi_min)*phi
for (t,p) in zip(theta,phi):
d = self.__class__.arclen(t,p,polar[1],polar[2], self.shell_radius)
if (d < self.point_radius).any():
hit += 1
if self.verbose:
XYZ = sph2cart(self.shell_radius,t,p)
self.hitout.write("H {:8.3f} {:8.3f} {:8.3f}\n".format(*XYZ))
x,y = self.__class__.cart_project_onto_disc(
np.atleast_2d(XYZ), self.visual_2d_clip)
self.hitx_data.extend(x)
self.hity_data.extend(y)
else:
XYZ = sph2cart(self.shell_radius,t,p)
if self.verbose:
x,y = self.__class__.cart_project_onto_disc(
np.atleast_2d(XYZ), self.visual_2d_clip)
self.missout.write("H {:8.3f} {:8.3f} {:8.3f}\n".format(*XYZ))
self.missx_data.extend(x)
self.missy_data.extend(y)
self.hitout.flush()
self.missout.flush()
self.vis_hit.set_data(self.hitx_data, self.hity_data)
self.vis_miss.set_data(self.missx_data, self.missy_data)
self.ax.set_xlim(
min(self.hitx_data + self.missx_data),
max(self.hitx_data + self.missx_data)
)
self.ax.set_ylim(
min(self.hity_data + self.missy_data),
max(self.hity_data + self.missy_data)
)
N += batch
outstr = "r={:8.2f} {:12.8f} N={:d} hit%={:10.6e} iter={:8d}/{:8d}\n"
if not self.quiet:
print(outstr.format(
self.shell_radius,
hit/N * 4*np.pi*self.shell_radius,
N,
hit/N,
i,
iterations),
end='')
self.N = N
self.hit = hit
return [self.vis_hit, self.vis_miss]
class SphereSAPopulation:
def __init__(self, crd, **kwargs):
"""
"""
self.visual=False
self.visual_2d_clip=10.0
self.quiet=True
self.batch_size = 1
self.iterations=10000
self.crd = crd
self.theta_min = 0
self.theta_max = np.pi
self.phi_min = 0.0
self.phi_max = 2.0*np.pi
self.point_radius=1.0
self.shell_radius=1.0
for k,v in kwargs.items():
if v is not None:
self.__dict__[k] = v
if not self.quiet:
print(self.__dict__)
@staticmethod
def arclen(t, p, data_theta, data_phi, r):
central_angle = np.arccos(
np.cos(data_theta)*np.cos(t) +
np.sin(data_theta)*np.sin(t)*np.cos(abs(p - data_phi)))
d = r * central_angle
return d
def run(self):
if self.visual:
return self.run_visual()
polar = cart2sph(*self.crd.T)
batch=self.batch_size
iterations=self.iterations
hit = 0
i = 0
N = 0
while i < iterations:
theta,phi = np.random.random((batch,2)).T
theta = np.arccos(1 - 2*theta)
phi = self.phi_min + (self.phi_max - self.phi_min)*phi
for (t,p) in zip(theta,phi):
d = __class__.arclen(t,p,polar[1],polar[2], self.shell_radius)
if (d < self.point_radius).any():
hit += 1
N += batch
i += 1
outstr = "r={:8.2f} {:12.8f} N={:d} hit%={:10.6e} iter={:8d}/{:8d}\n"
if not self.quiet:
print(outstr.format(
self.shell_radius,
hit/N * 4*np.pi*self.shell_radius,
N,
hit/N,
i,
iterations),
end='')
print(outstr.format(
self.shell_radius,
hit/N * 4*np.pi*self.shell_radius,
N,
hit/N,
i,
iterations),
end='')
@staticmethod
def cart_project_onto_disc(crd, clip=10.0):
x = crd[:,0] / (1-crd[:,2])
y = crd[:,1] / (1-crd[:,2])
mag = np.sqrt((x**2 + y**2))
maxmag = clip
mask = mag > maxmag
x[mask] = x[mask] / mag[mask] * maxmag
y[mask] = y[mask] / mag[mask] * maxmag
return x,y
def run_visual(self):
self.fig = plt.figure(figsize=(10, 10), dpi=120)
rc("font", **{"size": 12})
self.ax = self.fig.add_subplot(111)
self.missx_data = []
self.missy_data = []
self.hitx_data = []
self.hity_data = []
if self.verbose:
self.hitout = open('hit.xyz','w')
self.missout = open('miss.xyz','w')
self.vis_miss = self.ax.plot([], [], 'r.', ms=1)[0]
# self.vis_miss = self.ax.scatter([0], [0], ',', ms=1, c='r')[0]
self.vis_hit = self.ax.plot([], [], 'g.', ms=5)[0]
#self.crd /= np.atleast_2d(np.linalg.norm(self.crd,axis=1)*self.shell_radius).T
x,y = self.cart_project_onto_disc(self.crd, self.visual_2d_clip)
self.vis_data = self.ax.plot(x,y, 'k,', ms=1.0,alpha=.5)[0]
#ax.set_ylim(-20, 20)
self.polar = cart2sph(*self.crd.T)
self.hit = 0
self.N = 0
update = 1
ani = animation.FuncAnimation(self.fig,
update_vis, fargs=(self,),
interval=update, blit=False, frames=self.iterations, repeat=False)
plt.show()
if not self.quiet:
print("Press any key to abort")
input()
if self.verbose:
self.hitout.close()
self.missout.close()
def main():
import argparse
parser = argparse.ArgumentParser(description='MC integration of a spherical shell')
parser.add_argument(
'filename',
metavar='filename',
type=str,
nargs='+',
help='input filename containing coordinates'
)
#parser.add_argument('--theta-min', type=float)
#parser.add_argument('--theta-max', type=float)
#parser.add_argument('--phi-min', type=float)
#parser.add_argument('--phi-max', type=float)
parser.add_argument('--point-radius', type=float)
parser.add_argument('--shell-radius', type=float)
parser.add_argument('--iterations', type=int)
parser.add_argument('--batch-size', type=int)
parser.add_argument('--visual', action="store_true")
parser.add_argument('--quiet', action="store_true")
parser.add_argument('--verbose', action="store_true")
parser.add_argument('--visual-2d-clamp', type=float)
args = parser.parse_args()
crd = read_coordinates(args.filename)
obj = SphereSAPopulation( crd, **args.__dict__)
obj.run()
if __name__ == "__main__":
main()
| 2.703125
| 3
|
hummingbot/strategy/dev_0_hello_world/start.py
|
cardosofede/hummingbot
| 542
|
12778239
|
<reponame>cardosofede/hummingbot
#!/usr/bin/env python
from hummingbot.strategy.dev_0_hello_world.dev_0_hello_world_config_map import dev_0_hello_world_config_map
from hummingbot.strategy.dev_0_hello_world import HelloWorldStrategy
def start(self):
try:
exchange = dev_0_hello_world_config_map.get("exchange").value.lower()
trading_pair = dev_0_hello_world_config_map.get("trading_pair").value
asset = dev_0_hello_world_config_map.get("asset").value
self._initialize_markets([(exchange, [trading_pair])])
exchange = self.markets[exchange]
self.strategy = HelloWorldStrategy(exchange=exchange,
trading_pair=trading_pair,
asset=asset,
)
except Exception as e:
self.notify(str(e))
self.logger().error("Unknown error during initialization.", exc_info=True)
| 2.28125
| 2
|
Cimple_Compiler.py
|
Triantafullenia-Doumani/Cimple-Compiler
| 0
|
12778240
|
# <NAME> 4191
# <NAME> 4052
import sys
SINGLE_TOKENS_LIST = [",", ";", "+", "-", "*", "/", ")", "(", "[", "]", "{", "}", ">", "<", "="]
VARLIST = []
AUTO = [
[4, 3, 5, 5, 5, 2, 5, 5, 5, 5, 5, 5, 5, 0, 7, 8, 5, 5, 5],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 6, 1],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 6, 6],
[-1, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[4, 4, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2],
[4, 3, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 5],
[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 5, 5, 5],
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 5, 5, 5, 5],
]
class Entities:
arguments = []
def __init__(self, name, value, parMode, offset, type, startQuad, framelength):
self.name = name
self.type = type
self.offset = offset
self.startQuad = startQuad
self.framelength = framelength
self.value = value
self.parMode = parMode
class Scope:
entity = []
offset = 12
def __init__(self, nestingLevel):
self.nestingLevel = nestingLevel
class Arguments:
def __init__(self, parMode, varType):
self.parMode = parMode
self.varType = varType
class Buffers:
def __init__(self, string_buffer, counter, temp, filename, state):
self.word_buffer = string_buffer
self.charType = string_buffer
self.char_buffer = string_buffer
self.assigment_buffer = string_buffer
self.temp_counter = counter
self.T1_place = temp
self.input_file_name = filename
self.state = state
class Quads:
def __init__(self):
self.quad_list = []
self.quad_list_for_c = []
class Flag:
def __init__(self, flag):
self.sub = flag
self.program_includes_fun_or_prod = flag
def main(argv):
global BUFFERS
global QUADS
global FLAG
global input
global tokenType
global tokenString
global line
global scopes
global level
tokenType = ""
tokenString = ""
scopes = []
line = 1
level = 0
BUFFERS = Buffers("", 0, "T_0", argv[0], 0)
QUADS = Quads()
FLAG = Flag(0)
input = open(argv[0], "r")
program()
###################################### INTERMEDIATE CODE #########################################
def create_int_file():
int_file_name = BUFFERS.input_file_name.replace(".ci", ".int")
int_file = open(int_file_name, "w")
for quad in QUADS.quad_list:
int_file.write(str(quad) + "\n")
int_file.close()
def create_c_file():
c_file_name = BUFFERS.input_file_name.replace(".ci", ".c")
c_file = open(c_file_name, "w")
c_file.write("#include <stdio.h> \n")
c_file.write("\nvoid main() \n{\n")
c_file.write("int ")
var_len = len(VARLIST)
for var in range(var_len - 1):
c_file.write(str(VARLIST[var]) + ",")
if len(VARLIST) > 1:
c_file.write(str(VARLIST[-1]) + "; \n")
for line in QUADS.quad_list_for_c:
c_file.write(str(line))
c_file.write("}")
c_file.close()
# returns the number of the next quad
def nextquad():
return len(QUADS.quad_list)
# generates the new quand
def genquad(op, x, y, z):
label = nextquad()
new_quad = [label, op, x, y, z]
QUADS.quad_list.append(new_quad)
return new_quad
# creates and returns a new temporary variable
# the temporary changes are of the form T_1, T_2, T_3 ...
def newtemp():
# def __init__(self, name, value, parMode, offset, type, startQuad, framelength):
global temp_counter
new_temp = 'T_%s' % BUFFERS.temp_counter
addNewTempVar(new_temp)
BUFFERS.temp_counter += 1
return new_temp
# creates a blank list of labels
def emptylist():
new_quad = ["_", "_", "_", "_", "_"]
return new_quad
# creates a list of labels containing only x
def makelist(x):
new_quad = [x, "_", "_", "_", "_"]
return new_quad
# creates a list of labels from the merge of list 1 and list 2
def merge(list1, list2):
new_list = list1 + list2
return new_list
# the list consists of indices in quads whose last end is not is completed
# The backpatch visits these quads one by one and completes them with the z tag
def backpatch(pointers_list, z):
for i in pointers_list:
for q in range(1, len(QUADS.quad_list)):
if (QUADS.quad_list[q][0] == i):
QUADS.quad_list[q][4] = z
def backpatch_c(z):
for x in range(len(QUADS.quad_list_for_c)):
string = str(QUADS.quad_list_for_c[x])
QUADS.quad_list_for_c[x] = string.replace("null", str(z))
def addNewScope():
level = len(scopes) #maria
scopes.append(Scope(level))
def addNewVar(name):
ent = Entities(name,None,None,scopes[-1].offset, "Var",None,None)
scopes[-1].entity.append(ent)
scopes[-1].offset += 4
#obj = scopes[-1]
#print(obj.entity[-1].name," ",obj.entity[-1].type, " ",obj.entity[-1].offset)
def addNewTempVar(name):
ent = Entities(name,None,None,scopes[-1].offset,"tempVar",None,None)
scopes[-1].entity.append(ent)
scopes[-1].offset += 4
#obj = scopes[-1]
#print(obj.entity[-1].name," ",obj.entity[-1].type, " ",obj.entity[-1].offset)
def addNewPar(name,parMode):
ent = Entities(name,None,parMode,scopes[-1].offset, "Par",None,None)
scopes[-1].entity.append(ent)
scopes[-1].offset += 4
#obj = scopes[-1]
#print(obj.entity[-1].name," ",obj.entity[-1].type, " ",obj.entity[-1].offset," ",obj.entity[-1].parMode)
def addNewFunction(name):
ent = Entities(name,None,None,None,"Function",None,None)
scopes[-1].entity.append(ent)
def addArgument(parMode):
scopes[-2].entity[-1].arguments.append(parMode)
obj = scopes[-2]
print(obj.entity[-1].name," ",obj.entity[-1].type, " ",obj.entity[-1].offset," ",obj.entity[-1].parMode)
def removeScope():
global level
print("Scope : " , level,"\n")
obj = scopes[-1]
for ent in obj.entity:
if(ent.type == "Var"):
print(ent.name," ",ent.type, " ",ent.offset)
elif(ent.type == "tempVar"):
print(ent.name," ",ent.type, " ",ent.offset)
elif(ent.type == "Par"):
print(ent.name," ",ent.type, " ",ent.offset," ",ent.parMode)
elif(ent.type == "Function" or ent.type == "procedure"):
print(ent.name," ",ent.type, " ",ent.startQuad)
print("\n")
scopes.pop(level - 1)
level = level - 1
###################################### GRAMMAR ANALYSIS #########################################
def program():
global tokenType
global program_name
lex()
if (tokenString == "program"):
lex()
addNewScope()
if (tokenType == "idtk"):
program_name = tokenString
block()
else:
print("Syntax Error line: " + str(line) + "\nProgram name expected")
exit()
else:
print("Syntax Error line: " + str(line) + "\nThe keyword ' program' expected")
exit()
def block():
lex()
declarations()
subprograms()
statements()
def declarations():
if (tokenType == "BracesOpentk"):
lex()
while (tokenString == "declare"):
while (1):
lex()
if (tokenType == "idtk"):
if (tokenString not in VARLIST):
VARLIST.append(tokenString)
addNewVar(tokenString)
else:
print("ERROR line: " + str(line) + "\nYou can't declare the same id multiple times" + tokenString)
exit()
else:
print("Syntax Error in line: " + str(
line) + "\nExpected ID not " + tokenType + " ( " + tokenString + " )")
exit()
lex()
if (tokenType == "commatk"):
continue
if (tokenType == "semicolontk"):
lex()
break
else:
print("Syntax Error in line: " + str(line) + "\nWrong syntax of declaration")
exit()
continue
def subprograms():
global BUFFERS
if (tokenType == "BracesOpentk" and FLAG.sub == 0):
genquad("begin_block", program_name, "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ":\n"
QUADS.quad_list_for_c.append(line_c)
FLAG.sub = 1
elif (tokenType == "BracesOpentk"):
lex()
while (tokenString == "function" or tokenString == "procedure"):
FLAG.program_includes_fun_or_prod = 1
FLAG.sub = 1
lex()
if (tokenType != "idtk"):
print("Syntax Error in line: " + str(
line) + "\nExpected ID not " + tokenType + " ( " + tokenString + " ) after function/procedure")
exit()
function_name = tokenString
addNewFunction(function_name) #maria
addNewScope()
genquad("begin_block", function_name, "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1)
QUADS.quad_list_for_c.append(line_c)
lex()
if (tokenType != "ParenthesesOpentk"):
print("Syntax Error in line: " + str(line) + "\nExpected to open Parentheses")
exit()
formalparlist()
scopes[-2].entity[-1].startQuad = len(QUADS.quad_list) #maria
block()
removeScope()
genquad("end_block", function_name, "_", "_")
FLAG.sub = 0
if (tokenType == "BracesOpentk" and FLAG.sub == 0):
genquad("begin_block", program_name, "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ":\n"
QUADS.quad_list_for_c.append(line_c)
FLAG.sub = 1
# 1 or more statements
def statements():
if (tokenType == "BracesOpentk"):
lex()
while (1):
if (tokenType == "BracesOpentk"):
lex()
while (1):
statem = tokenString
statement()
if (tokenType == "BracesClosetk"):
lex()
if (tokenType != "semicolontk"):
print("Syntax Error in line: " + str(
line) + "\nStatement " + statem + " must finish with semicolon not " + tokenString)
exit()
break
else:
continue
else:
statem = tokenString
if statement():
# if(tokenType == "semicolontk"):
# print("Syntax Error in line: " + str(line) + " Duplicate semicolon \n")
# exit()
break
if (tokenString == "}"):
lex()
return
# one statement
def statement():
if (tokenString == 'if'):
ifStat()
if (tokenString == "while"):
whileStat()
elif (tokenString == "switchcase"):
switchcaseStat()
elif (tokenString == "forcase"):
forcaseStat()
elif (tokenString == "incase"):
incaseStat()
elif (tokenString == "call"):
callStat()
elif (tokenString == "return"):
returnStat()
elif (tokenString == "input"):
inputStat()
elif (tokenString == "print"):
printStat()
elif (tokenType == "idtk"):
assignStat()
else:
return 1
def incaseStat():
lex()
w = newtemp()
iquad = nextquad()
# genquad(":=","1","_",w)
# active_case_flag = 1
while (tokenString == "case"):
lex()
if (tokenType == "ParenthesesOpentk"):
lex()
C_place = condition()
backpatch_c(nextquad() + 1)
genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_null ; // ( jump,_,_,null )\n"
QUADS.quad_list_for_c.append(line_c)
if (tokenType == "ParenthesesClosetk"):
lex()
backpatch(C_place[1], nextquad())
# genquad(":=","0","_",w)
statements()
# line_c = "L_"+str(len(QUADS.quad_list) -1)+": if "+str(w)+" == 0 goto L_"+str(iquad)+" // ( jump,_,_,"+str(iquad)+")\n"
# QUADS.quad_list_for_c.append(line_c)
backpatch(C_place[0], nextquad())
backpatch_c(nextquad())
else:
print("Syntax Error in line: " + str(
line) + "\nExpected ')' to close the expression in IncaseStat() not " + tokenString)
exit()
else:
print("Syntax Error in line: " + str(line) + "\nExpected '(' after ID in IncaseStat() not " + tokenString)
exit()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(line) + "\nStatement incase must finish with semicolon not " + tokenString)
exit()
genquad("=", w, "0", iquad)
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": if " + str(w) + " == 0 goto L_" + str(
iquad) + "; // ( jump,_,_," + str(iquad) + ") \n"
QUADS.quad_list_for_c.append(line_c)
def forcaseStat():
lex()
fquad = nextquad()
while (tokenString == "case"):
lex()
if (tokenType == "ParenthesesOpentk"):
lex()
C_place = condition()
backpatch_c(nextquad() + 1)
genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_null ; // ( jump,_,_,null )\n"
QUADS.quad_list_for_c.append(line_c)
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(
line) + "\nExpected ')'' to close the expression in forcaseStat() not " + tokenString)
exit()
else:
lex()
backpatch(C_place[1], nextquad())
statements()
genquad("jump", "_", "_", fquad)
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_" + str(fquad) + " ; // ( jump,_,_," + str(
fquad) + ")\n"
QUADS.quad_list_for_c.append(line_c)
backpatch(C_place[0], nextquad())
backpatch_c(nextquad())
else:
print("Syntax Error in line: " + str(line) + "\nExpected '('' to open case not " + tokenString)
exit()
if (tokenString != "default"):
print("Syntax Error in line: " + str(line) + "\nYou must have 'default' case at forcase")
exit()
lex()
statements()
if (check_statement_to_finish_with_semicolon() == 0):
print(
"Syntax Error in line: " + str(line) + "\nStatement forcase must finish with semicolon not " + tokenString)
exit()
def switchcaseStat():
lex()
exit_list = emptylist()
pointers_list = []
while (tokenString == "case"):
lex()
if (tokenType == "ParenthesesOpentk"):
lex()
C_place = condition()
backpatch_c(nextquad() + 1)
genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_null ; // ( jump,_,_,null )\n"
QUADS.quad_list_for_c.append(line_c)
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(
line) + "\nExpected ')'' to close the expression in switchcaseStat() not " + tokenString)
exit()
else:
lex()
backpatch(C_place[1], nextquad())
statements()
e = makelist(nextquad())
genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_null //( jump,_,_, null)\n"
QUADS.quad_list_for_c.append(line_c)
exit_list = merge(exit_list, e)
backpatch(C_place[0], nextquad())
# backpatch_c(nextquad())
else:
print("Syntax Error in line: " + str(line) + "\nExpected '('' to open case not " + tokenString)
exit()
if (tokenString != "default"):
print("Syntax Error in line: " + str(line) + "\nYou must have 'default' case at switchcase")
exit()
lex()
statement()
backpatch(exit_list, nextquad())
backpatch_c(nextquad())
# while statement
def whileStat():
lex()
pointers_list = []
bquad = nextquad()
if (tokenType == "ParenthesesOpentk"):
lex()
C_place = condition()
backpatch_c(nextquad() + 1)
genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_null ; // ( jump,_,_,null )\n"
QUADS.quad_list_for_c.append(line_c)
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(
line) + "\nExpected ')'' to close the expression in WhileStat() not " + tokenString)
exit()
else:
lex()
print(C_place[1] + "ddd")
backpatch(C_place[1], nextquad())
statements()
backpatch_c(nextquad() + 1)
else:
print("Syntax Error in line: " + str(line) + "\nExpected '('' after ID in whileStat() not " + tokenString)
exit()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(line) + "\nStatement while must finish with semicolon not " + tokenString)
exit()
genquad("jump", "_", "_", bquad)
backpatch(C_place[0], nextquad())
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_" + str(bquad) + " ; // ( jump,_,_," + str(bquad) + ")\n"
QUADS.quad_list_for_c.append(line_c)
# assignment statement
def assignStat():
ID = tokenString
lex()
if (tokenType != "assignmenttk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of assignment")
exit()
lex()
E_place = expression()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(
line) + "\nStatement assignment must finish with semicolon not " + tokenString)
exit()
genquad(":=", ID, "_", E_place)
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": " + str(ID) + " = " + str(E_place) + " ; // (:= ," + str(
ID) + ",_," + str(E_place) + ")\n"
QUADS.quad_list_for_c.append(line_c)
# if statement
def ifStat():
lex()
if (tokenType == "ParenthesesOpentk"):
lex()
C_place = condition()
backpatch_c(nextquad() + 1)
genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": goto L_null ; // ( jump,_,_,null )\n"
QUADS.quad_list_for_c.append(line_c)
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(
line) + "\nExpected ')'' to close the expression in IfStat() not " + tokenString)
exit()
lex()
backpatch(C_place[1], nextquad())
statements()
backpatch(C_place[0], nextquad())
backpatch_c(nextquad())
# ifList = makelist(nextquad())
elsepart()
# backpatch(ifList,nextquad())
else:
print("Syntax Error in line: " + str(line) + "\nExpected '('' after ID in IfStat() not " + tokenString)
exit()
def elsepart():
if (tokenString == "else"):
lex()
statements()
else:
pass
# call statement
def callStat():
lex()
if (tokenType != "idtk"):
print("Syntax Error in line: " + str(
line) + "\nExpected ID not " + tokenType + " ( " + tokenString + " ) after 'call' statement")
exit()
called_function_name = tokenString
lex()
if (tokenType == "ParenthesesOpentk"):
actualparlist()
else:
print("Syntax Error in line: " + str(
line) + "\nExpected '('' after to start actualparlist in call() not " + tokenString)
exit()
lex()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(line) + "\nStatement else must fiish with semicolon not " + tokenString)
exit()
genquad("call", "", "_", called_function_name)
# input statement
def inputStat():
lex()
if (tokenType == "ParenthesesOpentk"):
lex()
if (tokenType != "idtk"):
print("Syntax Error in line: " + str(line) + "\nExpected keyword inside 'input'")
exit()
ID_place = tokenString
genquad("inp", ID_place, "_", "_")
input = ': scanf("%f", &' + str(ID_place) + ")"
line_c = "L_" + str(len(QUADS.quad_list) - 1) + input + " ;// ( inp," + str(ID_place) + "_,_,)\n"
QUADS.quad_list_for_c.append(line_c)
lex()
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(line) + "\nExpected ') to close the expression 'input(ID)'")
exit()
else:
print("Syntax Error in line: " + str(line) + "\nWrong syntax of input(ID)")
exit()
lex()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(line) + "\nStatement input must finish with semicolon not " + tokenString)
exit()
# print statement
def printStat():
lex()
if (tokenType != "ParenthesesOpentk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of print()")
exit()
lex()
E_place = expression()
genquad("out", E_place, "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": printf(" + str(E_place) + "); // ( out," + str(
E_place) + "_,_,)\n"
QUADS.quad_list_for_c.append(line_c)
lex()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(line) + "\nStatement print must finish with semicolon not " + tokenString)
exit()
# return statement
def returnStat():
lex()
if (tokenType != "ParenthesesOpentk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of return() - Does not open")
exit()
lex()
E_place = expression()
lex()
if (check_statement_to_finish_with_semicolon() == 0):
print("Syntax Error in line: " + str(line) + "\nStatement return must finish with semicolon not " + tokenString)
exit()
genquad("retv", E_place, "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": return " + str(E_place) + "; // ( retv," + str(
E_place) + "_,_,)\n"
QUADS.quad_list_for_c.append(line_c)
def check_statement_to_finish_with_semicolon():
if (tokenType != "semicolontk"):
return 0
else:
lex()
return 1
def formalparlist():
lex()
while (formalparitem() == 1):
if (tokenType == "commatk"):
lex()
continue
elif (tokenType == "ParenthesesClosetk"):
lex()
break
else:
print("Syntax Error in line: " + str(line) + "\nWrong syntax of formalparlist )'")
exit()
def formalparitem():
if (tokenString == "in"):
lex()
if (tokenType != "idtk"):
print("Syntax Error in line: " + str(line) + "\nExpected ID after 'inout' or 'in' )'")
exit()
else:
genquad("par", tokenString, "CV", "_")
lex()
return 1
elif (tokenString == "inout"):
lex()
if (tokenType != "idtk"):
print("Syntax Error in line: " + str(line) + "\nExpected ID after 'inout' or 'in' )'")
exit()
else:
genquad("par", tokenString, "REF", "_")
lex()
return 1
else:
return 0
def actualparlist():
lex()
while (actualparitem() == 1):
if (tokenType == "ParenthesesClosetk"):
break
lex()
while (tokenType == "commatk"):
lex()
continue
def actualparitem():
if (tokenString == "in"):
argument = tokenString
lex()
E_place = expression()
addNewPar(E_place,"cv")
addArgument(argument)
genquad("par", E_place, "CV", "_")
return 1
elif (tokenString == "inout"):
argument = tokenString
lex()
if (tokenType != "idtk"):
print("Syntax Error in line: " + str(line) + "\nExpected ID after 'inout')'")
exit()
else:
genquad("par", tokenString, "REF", "_")
lex()
addNewPar(tokenString,"ref")
addArgument(argument)
return 1
else:
return 0
def condition():
BT_place = boolterm()
C_place = BT_place
if (tokenType == "ParenthesesClosetk"):
# backpatch_c(nextquad()+ 1)
# genquad("jump", "_", "_", "_")
# line_c = "L_"+str(len(QUADS.quad_list) - 1)+": goto L_null ; // ( jump,_,_,null )\n"
# QUADS.quad_list_for_c.append(line_c)
return C_place
lex()
while (tokenString == "or"):
lex()
backpatch(C_place[0], nextquad())
BT_place = boolterm()
C_place[0] = BT_place[0]
C_place[1] = merge(C_place[1], BT_place[1])
lex()
return C_place
def boolterm():
BF_place = boolfactor()
BT_place = BF_place
while (tokenString == "and"):
backpatch(BT_place[1], nextquad())
BF_place = boolfactor()
BT_place[1] = BF_place[1]
BT_place[0] = merge(BT_place[0], BF_place[0])
return BT_place
def boolfactor():
BF_place = [[], []] # lista apoteloumenh apo 2 listes ( h prwth gia false h deuterh gia true)
if (tokenString == "not"):
lex()
if (tokenType != "bracketOpentk"):
print("Syntax Error in line: " + str(line) + "\nExpected '[' before condition")
exit()
lex()
C_place = condition()
if (tokenType != "bracketClosetk"):
print("Syntax Error in line: " + str(line) + "\nExpected ']' before closing")
exit()
BF_place[1] = C_place[0]
BF_place[0] = C_place[1]
return BF_place
elif (tokenType == "bracketOpentk"):
C_place = condition()
if (tokenType != "bracketClosetk"):
print("Syntax Error in line: " + str(line) + "\nExpected ']' before closing")
exit()
return C_place
else:
E1_place = expression()
RO_place = REL_OP()
E2_place = expression()
BF_place[1] = makelist(nextquad())
genquad(RO_place, E1_place, E2_place, "_")
BF_place[0] = makelist(nextquad())
# genquad("jump", "_", "_", "_")
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": if (" + str(E1_place) + " " + str(RO_place) + " " + str(
E2_place) + ") goto L_null // (" + str(RO_place) + "," + str(E1_place) + "," + str(E2_place) + ",null )\n"
QUADS.quad_list_for_c.append(line_c)
return BF_place
def expression():
OS_place = optional_sign()
T1_place = term()
while (1):
if (tokenType == "semicolontk"):
break
if (tokenType == "idtk"):
break
if (ADD_OP() == 1):
while (ADD_OP() == 1):
op = tokenString
lex()
T2_place = term()
w = newtemp()
genquad(op, T1_place, T2_place, w)
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": " + str(w) + " = " + str(T1_place) + " " + str(
op) + " " + str(T2_place) + "; //(" + str(op) + "," + str(T1_place) + "," + str(T2_place) + str(
w) + ")\n"
QUADS.quad_list_for_c.append(line_c)
T1_place = w
if (tokenType == "ParenthesesClosetk"):
break
lex()
if (tokenString in SINGLE_TOKENS_LIST):
break
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of expresion")
exit()
return T1_place
def term():
F1_place = factor()
while (MUL_OP() == 1 or ADD_OP() == 1):
op = tokenString
lex()
F2_place = factor()
w = newtemp()
genquad(op, F1_place, F2_place, w)
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ": " + str(w) + " = " + str(F1_place) + " " + str(
op) + " " + str(F2_place) + "; // (" + str(op) + "," + str(F1_place) + ",_," + str(F2_place) + "," + str(
w) + ")\n"
QUADS.quad_list_for_c.append(line_c)
F1_place = w
return F1_place
def factor():
if (tokenType == "numbertk"):
T = tokenString
lex()
return T
elif (tokenType == "idtk"):
ID_place = idtail()
return ID_place
else: # EXPRESSION
if (tokenType != "ParenthesesOpentk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of expresion")
exit()
E_place = expression()
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of expresion")
exit()
return E_place
def idtail():
global w
if (tokenString in VARLIST):
T = tokenString
lex()
return T
else:
var = tokenString
called_function_name = tokenString
lex()
if (tokenType == "ParenthesesOpentk"):
actualparlist()
w = newtemp()
genquad("par", w, "RET", "_")
genquad("call", "_", "_", called_function_name)
if (tokenType != "ParenthesesClosetk"):
print("Syntax Error in line: " + str(line) + "\nWrong syntax of actualparlist, does not close")
exit()
lex()
return w
# else:
# print("Error in line: " + str(line) + "\nVariable is not defined : "+ var)
# exit()
return var
# symbols + and - (are optional)
def optional_sign():
if (ADD_OP() == 1):
lex()
else:
pass
def ADD_OP():
if (tokenString == "+" or tokenString == "-"):
return 1
return 0
def MUL_OP():
if (tokenString == "*" or tokenString == "/"):
return 1
return 0
def REL_OP():
if (tokenString == "="):
relop_buffer = tokenString
lex()
return relop_buffer
elif (tokenType == "lesstk"):
relop_buffer = tokenString
lex()
if (tokenType == "greatertk" or tokenString == "="):
relop_buffer += tokenString
lex()
return relop_buffer
elif (tokenType == "greatertk"):
relop_buffer = tokenString
lex()
if (tokenString == "="):
relop_buffer += tokenString
lex()
return relop_buffer
else:
print("Syntax Error in line: " + str(
line) + "\nYou must have REL_OP between expressions in boolfactor() not " + tokenType)
exit()
###################################### LEXICAL ANALYSIS #########################################
def newSymbol():
global char
global line
char = input.read(1)
if (char == '\n'):
line += 1
def lex():
global char
global tokenString
global line
while (True):
if (add_char_to_buffer() == 1):
break
BUFFERS.state = AUTO[BUFFERS.state][BUFFERS.charType]
if (BUFFERS.state == -1):
potential_num()
check_state()
break
elif (BUFFERS.state == -2):
potential_ID_or_Keyword()
check_state()
break
check_state()
def check_if_EOF_after_dot():
newSymbol()
while (char == "\n" or char == "\t" or char == " "):
newSymbol()
if not char:
print("COMPILE SUCCESSFUL COMPLETE!\n")
genquad("halt", "_", "_", "_")
genquad("end_block", program_name, "_", "_")
if FLAG.program_includes_fun_or_prod == 0:
line_c = "L_" + str(len(QUADS.quad_list) - 1) + ":\n"
QUADS.quad_list_for_c.append(line_c)
create_c_file()
create_int_file()
input.close()
exit(1)
else:
print("ERROR line: " + str(line) + "\nProgram must finish with '.'")
exit()
def check_if_programm_ends_with_dot():
if (BUFFERS.char_buffer == '.'):
print("COMPILE SUCCESSFUL COMPLETE!\n")
genquad("halt", "_", "_", "_")
genquad("end_block", program_name, "_", "_")
if FLAG.program_includes_fun_or_prod == 0:
line_c = "L_" + str(len(QUADS.quad_list) - 1 + ": \n")
QUADS.quad_list_for_c.append(line_c)
create_c_file()
input.close()
create_int_file()
exit(1)
else:
print("ERROR line: " + str(line) + "\nProgram must finish with '.'")
exit()
def add_char_to_buffer():
global char
global comment_state
global tokenType
global tokenString
if (BUFFERS.char_buffer in SINGLE_TOKENS_LIST):
BUFFERS.charType = find_char_type(BUFFERS.char_buffer)
tokenString = BUFFERS.char_buffer
BUFFERS.char_buffer = ''
return 1
if (BUFFERS.assigment_buffer == ":="):
tokenType = "assignmenttk"
tokenString = ":="
BUFFERS.assigment_buffer = ""
return 1
newSymbol()
BUFFERS.charType = find_char_type(char);
if (BUFFERS.charType == 13):
cross_comment()
if (BUFFERS.charType == 12):
check_if_EOF_after_dot()
if not char:
check_if_programm_ends_with_dot()
if (BUFFERS.charType == -1):
print("ERROR line:" + str(line) + "\nChar:" + char + " is not belongs to alphabet")
exit()
if (BUFFERS.assigment_buffer == ":" and char != "="):
print("ERROR line: " + str(line) + "\nCharacter '=' must exist after character ':' ")
exit()
elif (BUFFERS.assigment_buffer == ':' and char == '='):
BUFFERS.assigment_buffer += char
BUFFERS.state = 5
check_state()
return 0
elif (BUFFERS.charType == 0 or BUFFERS.charType == 1):
BUFFERS.word_buffer += char # word_buffer only stores numbers , strings and ':'
elif (BUFFERS.charType == 5):
BUFFERS.assigment_buffer = ":"
if (BUFFERS.charType != 18):
BUFFERS.char_buffer = char
return 0
def check_state():
global BUFFERS
global tokenString
if (BUFFERS.state == 6): # error
print("ERROR line: " + str(line) + "\nBecause of: " + BUFFERS.word_buffer)
exit()
elif (BUFFERS.state == 5): # OK
tokenString = BUFFERS.word_buffer
BUFFERS.word_buffer = ''
def cross_comment():
global line
global charType
global char
global tokenType
comment_line = line
newSymbol()
while (char != "#"):
if not char:
print("ERROR line :" + str(comment_line) + "\nWrong syntax of comment")
exit()
newSymbol()
BUFFERS.charType = find_char_type(char);
def potential_num():
global BUFFERS
global tokenType
if not (BUFFERS.word_buffer.isnumeric()):
print("ERROR line :" + str(line) + "\nKeyword cant start with number : ( " + word_buffer + " )")
exit()
if (int(BUFFERS.word_buffer) > -4294967295 or int(BUFFERS.word_buffer) < 4294967295):
tokenType = "num<PASSWORD>"
BUFFERS.state = 5
else:
print("ERROR line: " + str(line) + "\nNumber is not between -(2^32-1) and (2^32-1)")
exit()
def potential_ID_or_Keyword():
global tokenType
global BUFFERS
if (BUFFERS.word_buffer in ID_words):
tokenType = "keywordtk"
BUFFERS.state = 5
elif (len(BUFFERS.word_buffer) < 30):
tokenType = "idtk"
BUFFERS.state = 5
else:
print("ERROR line: " + str(line) + "\nThe length of the string is more than 30")
exit()
def find_char_type(c):
global tokenType
if (c.isalpha()):
tokenType = ""
return 0
elif (c.isdigit()):
tokenType = ""
return 1
elif (c == '+' or c == '-' or c == '*' or c == '/'):
tokenType = "arithmetictk"
return 2
elif (c == ';'):
tokenType = "semicolontk"
return 3
elif (c == ','):
tokenType = "commatk"
return 4
elif (c == ':'):
return 5
tokenType = ""
elif (c == '['):
tokenType = "bracketOpentk"
return 6
elif (c == ']'):
tokenType = "bracketClosetk"
return 7
elif (c == '{'):
tokenType = "BracesOpentk"
return 8
elif (c == '}'):
tokenType = "BracesClosetk"
return 9
elif (c == '('):
tokenType = "ParenthesesOpentk"
return 10
elif (c == ')'):
tokenType = "ParenthesesClosetk"
return 11
elif (c == '.'):
tokenType = "dottk"
return 12
elif (c == '#'):
tokenType = "commenttk"
return 13
elif (c == '<'):
tokenType = "lesstk"
return 14
elif (c == '>'):
tokenType = "greatertk"
return 15
elif (c == '='):
tokenType = ""
return 16
elif (not c):
tokenType = ""
return 17
elif (c == " " or c == '\n' or c == '\t'):
tokenType = "whiteSpacetk"
return 18
else:
tokenType = ""
return -1 # char is not in alphabet
ID_words = ["program", "if", "switchcase", "not", "function", "input", "declare",
"else", "forcase", "and", "procedure", "print", "while", "incase", "or",
"call", "case", "default", "return", "in", "inout"]
if __name__ == "__main__":
main(sys.argv[1:])
# STRUCTURE OF AUTO ARRAY
# LETTERS - NUMBERS - (+-*/) - ; - , - : - [ - ] - { - } - ( - ) - . - # - < - > - = - EOF - (white spaces)
# start = 0
# rem = 1
# asgn = 2
# dig = 3
# idk = 4
# OK = 5
# ERROR = 6
# smaller = 7
# larger = 8
# -1 pn
# -2 pik
| 2.296875
| 2
|
tests/integration/test_rerun.py
|
JoshKarpel/condormap
| 21
|
12778241
|
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pathlib import Path
import pytest
import htmap
TIMEOUT = 300
@pytest.mark.timeout(TIMEOUT)
def test_rerun(mapped_doubler):
m = mapped_doubler.map([1])
m.wait()
m.rerun()
assert list(m) == [2]
@pytest.mark.timeout(TIMEOUT)
def test_load_then_rerun(mapped_doubler):
m = mapped_doubler.map([1], tag="load-then-rerun")
m.wait()
loaded = htmap.load("load-then-rerun")
loaded.rerun()
assert list(loaded) == [2]
@pytest.mark.timeout(TIMEOUT)
def test_rerun_out_of_range_component_raises(mapped_doubler):
m = mapped_doubler.map([1], tag="load-then-rerun")
m.wait()
with pytest.raises(htmap.exceptions.CannotRerunComponents):
m.rerun([5])
@pytest.fixture(scope="function")
def sleepy_doubler_that_writes_a_file():
@htmap.mapped
def sleepy_double(x):
time.sleep(1)
r = x * 2
p = Path("foo")
p.write_text("hi")
htmap.transfer_output_files(p)
return r
return sleepy_double
@pytest.mark.timeout(TIMEOUT)
def test_rerun_removes_current_output_file(sleepy_doubler_that_writes_a_file):
m = sleepy_doubler_that_writes_a_file.map([1], tag="load-then-rerun")
m.wait()
assert m.get(0) == 2
m.rerun()
with pytest.raises(htmap.exceptions.OutputNotFound):
m[0]
@pytest.mark.timeout(TIMEOUT)
def test_rerun_removes_current_user_output_file(sleepy_doubler_that_writes_a_file):
m = sleepy_doubler_that_writes_a_file.map([1], tag="load-then-rerun")
m.wait()
assert (m.output_files.get(0) / "foo").read_text() == "hi"
m.rerun()
with pytest.raises(FileNotFoundError):
(m.output_files[0] / "foo").read_text()
| 2.125
| 2
|
py/caesar_cipher.py
|
sti320a/security_tools
| 0
|
12778242
|
#! python3
def generate_cryptogram(text: str, keynum: int) -> str:
encrypted = ''
for char in text:
encrypted += chr(ord(char) + keynum)
return encrypted
def try_decrypt(text: str) -> list:
res = []
for keynum in range(1, 27):
res.append(generate_cryptogram(text, -keynum))
return res
if __name__ == '__main__':
print(generate_cryptogram('test', 1))
try_decrypt('uftu')
| 3.828125
| 4
|
tests/unit/test_modulegraph/testpkg-packages/pkg/__init__.py
|
hawkhai/pyinstaller
| 9,267
|
12778243
|
<reponame>hawkhai/pyinstaller
""" pkg.init """
| 0.757813
| 1
|
bagou/exceptions.py
|
toxinu/django-bagou
| 4
|
12778244
|
<reponame>toxinu/django-bagou
# -*- coding: utf-8 -*-
class BagouException(Exception):
pass
class BagouChannelException(Exception):
pass
| 1.054688
| 1
|
configs/distiller/cwd/cwd_psp_r101-d8_distill_psp_r18_d8_512_1024_80k_cityscapes.py
|
pppppM/mmsegmentation-distiller
| 35
|
12778245
|
<filename>configs/distiller/cwd/cwd_psp_r101-d8_distill_psp_r18_d8_512_1024_80k_cityscapes.py
_base_ = [
'../../_base_/datasets/cityscapes.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_80k.py'
]
find_unused_parameters=True
weight=5.0
tau=1.0
distiller = dict(
type='SegmentationDistiller',
teacher_pretrained = 'pretrained_model/pspnet_r101b-d8_512x1024_80k_cityscapes_20201226_170012-3a4d38ab.pth',
distill_cfg = [ dict(student_module = 'decode_head.conv_seg',
teacher_module = 'decode_head.conv_seg',
output_hook = True,
methods=[dict(type='ChannelWiseDivergence',
name='loss_cwd',
student_channels = 19,
teacher_channels = 19,
tau = tau,
weight =weight,
)
]
),
]
)
student_cfg = 'configs/pspnet/pspnet_r18-d8_512x1024_80k_cityscapes.py'
teacher_cfg = 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py'
| 1.382813
| 1
|
src/pyhn/urls.py
|
knownsec/PyHackerNews
| 8
|
12778246
|
#!/usr/bin/env python
from django.conf.urls import patterns, include, url
urlpatterns = patterns(
'',
url(r'^$', 'pyhn.apps.news.views.index.index', name='index'),
url(
r'^social/', include('social.apps.django_app.urls', namespace='social')
),
url(r'^news/', include('pyhn.apps.news.urls', namespace='news')),
url(r'^accounts/', include('pyhn.apps.account.urls', namespace='account')),
url(
r'^user/(?P<user_id>\d+)/', 'pyhn.apps.account.views.user_profile',
name='profile'
),
)
| 1.953125
| 2
|
crimsobot/utils/games.py
|
the-garlic-os/crimsoBOT
| 0
|
12778247
|
import random
from collections import Counter
from datetime import datetime
from typing import List, Tuple, Union
import discord
from discord import Embed
from discord.ext import commands
from crimsobot.models.currency_account import CurrencyAccount
from crimsobot.models.guess_statistic import GuessStatistic
from crimsobot.utils import tools as c
DiscordUser = Union[discord.User, discord.Member]
def get_crimsoball_answer(ctx: commands.Context) -> str: # function to give first answer a ctx to work with
# don't know if this is any better than just putting it
# inside of the crimsoball command
answer_list = [
'{} haha ping'.format(ctx.message.author.mention),
'ye!',
'**no**',
'what do you think?',
'*perhaps*',
'OMAN',
"i can't answer this, you need an adult",
'absolutely!\n\n\n`not`',
'of course!',
'according to quantum superposition, the answer was both yes and no before you asked.',
"is the sky blue?\n\n(is it? i don't know. i don't have eyes.)",
"i can't be bothered with this right now.",
'funny you should ask--',
'fine, sure, whatever',
'<:xok:551174281367650356>',
'ask seannerz. ping him now and ask.',
'ehhhh sure',
'hmmmm. no.',
'uhhhhhhhhh',
'<:uhhhh:495249068789071882>',
'eat glass!',
'it is important that you stop bothering me.',
'you CANNOT be serious',
'sure? how would i know?',
'what heck',
'random_response', # leave this alone
]
return random.choice(answer_list)
def emojistring() -> str:
emojis = []
for line in open(c.clib_path_join('games', 'emojilist.txt'), encoding='utf-8', errors='ignore'):
line = line.replace('\n', '')
emojis.append(line)
emoji_string = random.sample(''.join(emojis), random.randint(3, 5))
return ' '.join(emoji_string)
def tally(ballots: List[str]) -> Tuple[str, int]:
counter = Counter(sorted(ballots))
winner = counter.most_common(1)[0]
return winner
def winner_list(winners: List[str]) -> str:
if len(winners) > 1:
winners_ = ', '.join(winners[:-1])
winners_ = winners_ + ' & ' + winners[-1] # winner, winner & winner
else:
winners_ = winners[0]
return winners_
def get_story() -> str:
story = open(
c.clib_path_join('games', 'madlibs.txt'),
encoding='utf-8',
errors='ignore'
).readlines()
story = [line[:-1] for line in story]
story = [line.replace('\\n', '\n') for line in story]
return random.choice(story)
def get_keys(format_string: str) -> List[str]:
"""format_string is a format string with embedded dictionary keys.
Return a set containing all the keys from the format string."""
keys = []
end = 0
repetitions = format_string.count('{')
for _ in range(repetitions):
start = format_string.find('{', end) + 1 # pass the '{'
end = format_string.find('}', start)
key = format_string[start:end]
keys.append(key) # may add duplicates
# find indices of marked tags (to be used more than once)
ind = [i for i, s in enumerate(keys) if '#' in s]
# isolate the marked tags and keep one instance each
mults = []
for ele in ind:
mults.append(keys[ele])
mults = list(set(mults))
# delete all marked tags from original list
for ele in sorted(ind, reverse=True):
del keys[ele]
# ...and add back one instance each
keys = keys + mults
return keys
async def win(discord_user: DiscordUser, amount: float) -> None:
account = await CurrencyAccount.get_by_discord_user(discord_user) # type: CurrencyAccount
account.add_to_balance(amount)
await account.save()
async def daily(discord_user: DiscordUser, lucky_number: int) -> Embed:
# fetch account
account = await CurrencyAccount.get_by_discord_user(discord_user) # type: CurrencyAccount
# get current time
now = datetime.utcnow()
# arbitrary "last date collected" and reset time (midnight UTC)
reset = datetime(1969, 7, 20, 0, 0, 0) # ymd required but will not be used
last = account.ran_daily_at
# check if dates are same; if so, gotta wait
if last and last.strftime('%Y-%m-%d') == now.strftime('%Y-%m-%d'):
hours = (reset - now).seconds / 3600
minutes = (hours - int(hours)) * 60
title = 'Patience...'
award_string = 'Daily award resets at midnight UTC, {}h{}m from now.'.format(int(hours), int(minutes + 1))
thumb = 'clock'
color = 'orange'
# if no wait, then check if winner or loser
else:
winning_number = random.randint(1, 100)
if winning_number == lucky_number:
daily_award = 500
title = 'JACKPOT!'
wrong = '' # they're not wrong!
thumb = 'moneymouth'
color = 'green'
else:
daily_award = 10
title_choices = [
'*heck*',
'*frick*',
'*womp womp*',
'**😩**',
'Aw shucks.',
'Why even bother?',
]
title = random.choice(title_choices)
wrong = 'The winning number this time was **{}**, but no worries:'.format(winning_number)
thumb = 'crimsoCOIN'
color = 'yellow'
# update daily then save
account.ran_daily_at = now
await account.save()
# update their balance now (will repoen and reclose user)
await win(discord_user, daily_award)
award_string = '{} You have been awarded your daily **\u20A2{:.2f}**!'.format(wrong, daily_award)
thumb = thumb
color = color
# the embed to return
embed = c.crimbed(
title=title,
descr=award_string,
thumb_name=thumb,
color_name=color,
)
return embed
async def check_balance(discord_user: DiscordUser) -> float:
account = await CurrencyAccount.get_by_discord_user(discord_user) # type: CurrencyAccount
return account.get_balance()
def guess_economy(n: int) -> Tuple[float, float]:
""" input: integer
output: float, float"""
# winnings for each n=0,...,20
winnings = [0, 7, 2, 4, 7, 11, 15, 20, 25, 30, 36, 42, 49, 56, 64, 72, 80, 95, 120, 150, 200]
# variables for cost function
const = 0.0095 # dampener multiplier
sweet = 8 # sweet spot for guess
favor = 1.3 # favor to player (against house) at sweet spot
# conditionals
if n > 2:
cost = winnings[n] / n - (-const * (n - sweet) ** 2 + favor)
else:
cost = 0.00
return winnings[n], cost
async def guess_luck(discord_user: DiscordUser, n: int, won: bool) -> None:
stats = await GuessStatistic.get_by_discord_user(discord_user) # type: GuessStatistic
stats.plays += 1
stats.add_to_expected_wins(n)
if won:
stats.wins += 1
await stats.save()
# async def guess_luck_balance(discord_user: DiscordUser) -> Tuple[float, int]:
# stats = await GuessStatistic.get_by_discord_user(discord_user) # type: GuessStatistic
# return stats.luck_index, stats.plays
async def guess_stat_embed(user: DiscordUser) -> Embed:
"""Return a big ol' embed of Guessmoji! stats"""
s = await GuessStatistic.get_by_discord_user(user)
if s.plays == 0:
embed = c.crimbed(
title='HOW—',
descr="You haven't played GUESSMOJI! yet!",
thumb_name='weary',
footer='Play >guess [n] today!',
)
else:
embed = c.crimbed(
title='GUESSMOJI! stats for {}'.format(user),
descr=None,
thumb_name='crimsoCOIN',
footer='Stat tracking as of {d.year}-{d.month:02d}-{d.day:02d}'.format(d=s.created_at),
)
ess = '' if s.plays == 1 else 's'
ess2 = '' if s.wins == 1 else 's'
# list of tuples (name, value) for embed.add_field
field_list = [
(
'Gameplay',
'**{}** game{ess} played, **{}** win{ess2}'.format(s.plays, s.wins, ess=ess, ess2=ess2)
),
(
'Luck index (expected: 100)',
'**{:.3f}**'.format(100 * s.luck_index)
),
]
for field in field_list:
embed.add_field(name=field[0], value=field[1], inline=False)
return embed
def guesslist() -> str:
output = [' n · cost · payout',
'·························']
for i in range(2, 21):
spc = '\u2002' if i < 10 else ''
w, c = guess_economy(i)
output.append('{}{:>d} · \u20A2{:>5.2f} · \u20A2{:>6.2f}'.format(spc, i, c, w))
return '\n'.join(output)
| 2.609375
| 3
|
custom_components/afvalinfo/location/venlo.py
|
reindrich/home-assistant-config
| 0
|
12778248
|
from ..const.const import (
MONTH_TO_NUMBER,
SENSOR_LOCATIONS_TO_URL,
_LOGGER,
)
from datetime import datetime, date
from bs4 import BeautifulSoup
import urllib.request
import urllib.error
class VenloAfval(object):
def get_date_from_afvaltype(self, tableRows, afvaltype):
try:
for row in tableRows:
garbageDate = row.find("td")
garbageType = row.find("span")
if garbageDate and garbageType:
garbageDate = row.find("td").string
garbageType = row.find("span").string
#Does the afvaltype match...
if garbageType == afvaltype:
day = garbageDate.split()[1]
month = MONTH_TO_NUMBER[garbageDate.split()[2]]
year = str(
datetime.today().year
if datetime.today().month <= int(month)
else datetime.today().year + 1
)
garbageDate = year + "-" + month + "-" + day
if datetime.strptime(garbageDate, '%Y-%m-%d').date() >= date.today():
return garbageDate
# if nothing was found
return ""
except Exception as exc:
_LOGGER.error("Error occurred while splitting data: %r", exc)
return ""
def get_data(self, city, postcode, street_number):
_LOGGER.debug("Updating Waste collection dates")
try:
url = SENSOR_LOCATIONS_TO_URL["venlo"][0].format(
postcode, street_number
)
req = urllib.request.Request(url=url)
f = urllib.request.urlopen(req)
html = f.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
html = soup.find("div", {"class": "trash-removal-calendar"})
tableRows = html.findAll("tr")
# Place all possible values in the dictionary even if they are not necessary
waste_dict = {}
# GFT
waste_dict["gft"] = self.get_date_from_afvaltype(tableRows, "GFT")
# Restafval
waste_dict["restafval"] = self.get_date_from_afvaltype(tableRows, "Restafval/PMD")
# PMD
waste_dict["pbd"] = self.get_date_from_afvaltype(tableRows, "Restafval/PMD")
return waste_dict
except urllib.error.URLError as exc:
_LOGGER.error("Error occurred while fetching data: %r", exc.reason)
return False
| 2.875
| 3
|
cogs/Games.py
|
YeetVegetabales/NOVA
| 7
|
12778249
|
import discord
import aiohttp
import random
import asyncio
import json
import io
import re
import akinator
import time
import asyncpraw
import requests
import urllib3
import urllib
import itertools
import time
import textwrap
from time import perf_counter
from aiotrivia import TriviaClient, AiotriviaException
from discord.ext import commands
from secrets import *
from contextlib import suppress
from async_timeout import timeout
from big_lists import *
from PIL import Image, ImageDraw, ImageSequence, ImageFont
class games(commands.Cog):
"""Play games in your server"""
def __init__(self, client, reddit):
self.client = client
self.trivia = TriviaClient()
self.aki = akinator.Akinator()
self.coin = "<:coin:781367758612725780>"
self.reddit = reddit
reddit = asyncpraw.Reddit(client_id=reddit_client_id,
client_secret=reddit_client_secret,
username=reddit_username,
password=<PASSWORD>,
user_agent=reddit_user_agent)
@commands.command(aliases=['mm'])
async def mastermind(self, ctx):
"""You have 5 tries to guess a 4 digit code. Can you do it?"""
part = random.sample(list(map(str, list(range(9)))), 4)
code = [int(x) for x in part]
human_code = "".join(str(x) for x in code)
embed = discord.Embed(title='Welcome to Mastermind', color=0x5643fd, timestamp=ctx.message.created_at,
description='Mastermind is a logic and guessing game where you have to find a four-digit '
'code in only five tries. Type out four numbers to begin guessing!\n\n'
'<:redx:732660210132451369> ``The number you guessed is incorrect``\n'
'<:ticknull:732660186057015317> ``The number you guessed is in the code, '
'but not '
'in the right spot``\n'
'<:tickgreen:732660186560462958> ``You have the right digit and in the '
'correct spot``')
await ctx.send(embed=embed)
i = 0
while i < 5:
try:
result = ""
msg = await self.client.wait_for('message', timeout=60, check=lambda m: m.author == ctx.author)
r = [int(x) for x in msg.content]
if len(msg.content) != 4:
await ctx.send('Please only guess four-digit numbers.')
continue
for element in r:
if element in code:
if r.index(element) == code.index(element):
result += "<:tickgreen:732660186560462958>"
else:
result += "<:ticknull:732660186057015317>"
else:
result += "<:redx:732660210132451369>"
await ctx.send(result)
if r == code:
await ctx.send(f"<a:party:773063086109753365> That's the right code. You win! "
f"<a:party:773063086109753365>\nYou cracked the code in **{i + 1}** tries.")
break
i += 1
except ValueError:
await ctx.send(f'{ctx.message.author.mention}, that is not a valid code! Please try again '
f'with actual numbers.')
continue
except asyncio.TimeoutError:
await ctx.send(f'{ctx.message.author.mention},'
f' you took too long to guess! The correct code was **{human_code}**.')
break
else:
await ctx.send(f"{ctx.message.author.mention}, you ran out of tries! The correct code was "
f"**{human_code}**.")
@commands.command()
async def fight(self, ctx, member: discord.Member = None):
"""Fight other members to the death."""
if member is None:
return await ctx.send('<:redx:732660210132451369> You must have someone '
'to fight in order to run this command!')
user_mention = member
user = member.display_name
auth_mention = ctx.message.author.mention
auth = ctx.message.author.display_name
weapon_list = ['Russian AK-47', 'revolver', 'crossbow', 'Sniper-AWP', 'SCAR-20', 'sword', 'knife', 'shotgun',
'spear',
'desert eagle', 'steel axe', 'trebuchet', 'Marksmen rifle', 'Hunting rifle', 'slingshot',
'nuclear bomb', 'trident', 'torpedo', 'cannon', 'catapult', 'nerf gun', 'land mine',
'grenade', 'M-16', 'lead-pipe', 'Glock-17', 'Burst-AUG', 'P-90', 'double-barrel shotgun',
'sawed-off shotgun', 'FAMAS', '.22 caliber rifle', 'hammer', 'bottle of bleach', 'tide-pod']
healing_list = ['band-aid', 'first aid kit', 'bottle of alcohol', 'bottle of essential oils', 'flu vaccine',
'plague mask',
'gas mask', 'magic potion', "witch's spell", 'bottle of cough syrup']
try:
await ctx.send(f"**{auth}** has entered the arena and challenged **{user}** to a duel.\n"
f"{user_mention.mention} do you accept?\n``yes|no``")
msg = await self.client.wait_for('message', check=lambda m: m.author == user_mention, timeout=15)
if msg.content.lower() == 'yes':
await ctx.send(f'Fight has begun!')
auth_health = 100
user_health = 100
await ctx.send(embed=discord.Embed(description=f'{auth}: <:heart:775889971931512842>'
f'{auth_health}\n'
f'{user}: <:heart:775889971931512842>{user_health}',
color=0x5643fd))
while user_health > 0 and auth_health > 0:
try:
await asyncio.sleep(2)
await ctx.send(f"{user_mention.mention} it is now your turn. Would you like to ``attack``, "
f"``heal``, or ``end``?")
msg = await self.client.wait_for('message', check=lambda m: m.author == user_mention,
timeout=15)
if msg.content.lower() == 'attack':
weapon = random.choice(weapon_list)
damage = random.randint(25, 50)
after = auth_health - damage
auth_health -= damage
await ctx.send(f"{user_mention.mention} did **{damage}** "
f"damage to {auth} with a "
f"{weapon}.\n{auth} has <:heart:775889971931512842>{after} health"
f" remaining.")
elif msg.content.lower() == 'heal':
if user_health > 99:
await ctx.send("Well that did nothing, "
"you can't heal if you already have full health.")
else:
heal = random.choice(healing_list)
points = random.randint(25, 50)
after = user_health + points
user_health += points
await ctx.send(f"After deciding to heal, {user} gained **{points}** health by "
f"using a {heal}. \nTheir total health is now "
f"<:heart:775889971931512842>{after}")
elif msg.content.lower() == 'end':
await ctx.send(f'{user} has ended the match. <:owner:730864906429136907>{auth}'
f'<:owner:730864906429136907> is the winner!')
await ctx.send(embed=discord.Embed(description=f'{auth}: <:heart:775889971931512842>'
f'{auth_health}\n'
f'{user}: <:heart:775889971931512842>'
f'{user_health}',
color=0x5643fd))
break
if auth_health < 1:
await asyncio.sleep(2)
await ctx.send(f'{auth} has lost all of their health. <:owner:730864906429136907>'
f'{user_mention.mention}<:owner:730864906429136907> wins!')
await ctx.send(embed=discord.Embed(description=f'{auth}: <:heart:775889971931512842>'
f'0\n'
f'{user}: <:heart:775889971931512842>'
f'{user_health}',
color=0x5643fd))
break
await asyncio.sleep(2)
await ctx.send(f"{auth_mention} now it's your turn. Would you like to `attack`, `heal`, or"
f" `end`?")
msg = await self.client.wait_for('message', check=lambda m: m.author == ctx.message.author,
timeout=15)
if msg.content.lower() == 'attack':
weapon = random.choice(weapon_list)
damage = random.randint(25, 50)
after = user_health - damage
user_health -= damage
await ctx.send(f"{auth_mention} did **{damage}** "
f"damage to {user} with a "
f"{weapon}.\n{user} has <:heart:775889971931512842>{after} health"
f" remaining.")
elif msg.content.lower() == 'heal':
if auth_health > 99:
await ctx.send("Well that did nothing, "
"you can't heal if you already have full health.")
else:
heal = random.choice(healing_list)
points = random.randint(25, 50)
after = auth_health + points
auth_health += points
await ctx.send(f"After deciding to heal, {auth} gained **{points}** health by "
f"using a {heal}. \nTheir total health is now "
f"<:heart:775889971931512842>{after}")
elif msg.content.lower() == 'end':
await ctx.send(f'{auth} has ended the match. <:owner:730864906429136907>{user}'
f'<:owner:730864906429136907> is the winner!')
await ctx.send(embed=discord.Embed(description=f'{auth}: <:heart:775889971931512842>'
f'{auth_health}\n'
f'{user}: <:heart:775889971931512842>'
f'{user_health}',
color=0x5643fd))
break
if user_health < 1:
await asyncio.sleep(2)
await ctx.send(f'{user} has lost all of their health. <:owner:730864906429136907>'
f'{auth_mention}<:owner:730864906429136907> wins!')
await ctx.send(embed=discord.Embed(description=f'{auth}: <:heart:775889971931512842>'
f'{auth_health}\n'
f'{user}: <:heart:775889971931512842>'
f'0',
color=0x5643fd))
break
continue
except asyncio.TimeoutError:
await ctx.send('<:redx:732660210132451369> You took too long to respond! '
'The fight was abandoned.')
elif msg.content.lower() == 'no':
return await ctx.send(f'**{user}** has declined the match. Better luck next time :/')
else:
return await ctx.send(f"<:redx:732660210132451369> {user_mention.mention}, "
f"you didn't respond with yes or no so the match "
f"was cancelled.")
except asyncio.TimeoutError:
await ctx.send('<:redx:732660210132451369> You took too long to respond! The fight was abandoned.')
@commands.command()
async def trivia(self, ctx, difficulty: str = None):
"""Test out your knowledge with trivia questions from nizcomix#7532"""
difficulty = difficulty or random.choice(['easy', 'medium', 'hard'])
try:
question = await self.trivia.get_random_question(difficulty)
except AiotriviaException:
return await ctx.send(embed=discord.Embed(title='That is not a valid sort.',
description='Valid sorts are ``easy``, ``medium``, and ``hard``.',
color=0xFF0000))
answers = question.responses
d = difficulty.capitalize()
random.shuffle(answers)
final_answers = '\n'.join([f"{index}. {value}" for index, value in enumerate(answers, 1)])
await ctx.send(embed=discord.Embed(
title=f"{question.question}", description=f"\n{final_answers}\n\nQuestion about: **{question.category}"
f"**\nDifficulty: **{d}**",
color=0x5643fd))
answer = answers.index(question.answer) + 1
try:
while True:
msg = await self.client.wait_for('message', timeout=15, check=lambda m: m.author == ctx.message.author)
if str(answer) in msg.content:
return await ctx.send(embed=discord.Embed(description=f"{answer} was correct ({question.answer})",
color=0x32CD32, title='Correct!'))
if str(answer) not in msg.content:
return await ctx.send(embed=discord.Embed(description=f"Unfortunately **{msg.content}** was wrong. "
f"The "
f"correct answer was ``{question.answer}``.",
title='Incorrect', color=0xFF0000))
except asyncio.TimeoutError:
embed = discord.Embed(title='Time expired', color=0xFF0000,
description=f"The correct answer was {question.answer}")
await ctx.send(embed=embed)
@commands.command(aliases=['aki'])
async def akinator(self, ctx):
"""Let NOVA guess a person of your choice."""
answers = ["y", "yes", "n", "no", "0", "1", "2", "3", "4", "i", "idk", "i dont know",
"i don't know", "pn", "probably not", "probably", "p"]
embed = discord.Embed(title="Welcome to Akinator",
description="""Think of any character, they can be fictional or a real person.
You will be asked questions about this character and it is your job
to respond with one of the five acceptable answers:\n
**• yes**
**• no**
**• idk**
**• probably**
**• probably not**\n
Reply with **stop** to end the game.""",
color=0x5643fd,
timestamp=ctx.message.created_at)
embed.set_thumbnail(url="https://imgur.com/Hkny5Fz.jpg")
await ctx.send(embed=embed)
try:
self.aki.start_game()
await ctx.send(self.aki.answer("idk"))
questions = 0
while self.aki.progression <= 80:
ms = await self.client.wait_for("message", check=lambda m: m.author == ctx.author, timeout=60)
if ms.content.lower() in answers:
ques = self.aki.answer(ms.content)
await ctx.send(f"**{ctx.message.author.display_name}:**\n{ques}")
questions += 1
continue
elif ms.content.lower() == "stop":
await ctx.send("The game has ended. Thanks for playing!")
return
else:
continue
self.aki.win()
embed = discord.Embed(title=f"It's {self.aki.first_guess['name']}", color=0x5643fd,
timestamp=ctx.message.created_at,
description=f"**Description:** {self.aki.first_guess['description']}\n\n"
f"I made this guess in **{questions}** tries.\n\n"
f"**Was I correct?**\nyes/no")
embed.set_image(url=self.aki.first_guess['absolute_picture_path'])
await ctx.send(embed=embed)
try:
correct = await self.client.wait_for('message', check=lambda c: c.author == ctx.author, timeout=60)
if correct.content.lower() == "yes" or correct.content.lower() == "y" or correct.content == ":flushed:":
await ctx.send("<a:party:773063086109753365> Congratulations <a:party:773063086109753365>")
elif correct.content.lower() == "no" or correct.content.lower() == "n":
try:
second_guess = self.aki.guesses[1]
embed = discord.Embed(title=f"My second guess is {second_guess['name']}", color=0x5643fd,
timestamp=ctx.message.created_at,
description=f"**Description:** {second_guess['description']}\n\n"
f"I made this guess in **{questions}** tries.\n\n"
f"**Was I correct?**\nyes/no")
embed.set_image(url=second_guess['absolute_picture_path'])
await ctx.send(embed=embed)
m = await self.client.wait_for('message', check=lambda c: c.author == ctx.author, timeout=60)
if m.content.lower() == "yes" or m.content.lower() == "y" or m.content == ":flushed:":
await ctx.send("<a:party:773063086109753365> Congratulations <a:party:773063086109753365>")
else:
await ctx.send("Welp, better luck next time.")
except IndexError:
await ctx.send("Welp, better luck next time.")
except asyncio.TimeoutError:
await ctx.send("You took too long to respond so the game was abandoned")
except asyncio.TimeoutError:
await ctx.send("You took too long to respond so the game was abandoned")
@commands.command(aliases=['type'])
async def typing(self, ctx):
"""Test your typing skills with this fun and interactive game."""
sentence = random.choice(sentences)
word_count = len(sentence.split())
embed = discord.Embed(title="Welcome to Typing Test", color=0x5643fd, timestamp=ctx.message.created_at,
description="The game will be starting in `5` seconds. Get ready!")
embed.add_field(name="Directions", value="You will be sent a random sentence and it is yo"
"ur duty to type back the "
"sentence as quick as possible with as few mistakes as possible.",
inline=False)
embed.add_field(name="Rules", value="Be warned: punctuation, capitalization, and spelling DO matter.",
inline=False)
await ctx.send(embed=embed)
await asyncio.sleep(5)
await ctx.send("**3...**")
await asyncio.sleep(1)
await ctx.send("**2...**")
await asyncio.sleep(1)
await ctx.send("**1...**")
await asyncio.sleep(1)
await ctx.send("**GO**")
await asyncio.sleep(1)
await ctx.send(sentence)
try:
start = perf_counter()
msg = await self.client.wait_for('message', timeout=60, check=lambda x: x.author == ctx.author)
user_characters = list(msg.content)
characters = list(sentence)
maximum = range(0, len(characters))
correct = 0
for indexer in maximum:
try:
if user_characters[indexer] == characters[indexer]:
correct += 1
except IndexError:
pass
accuracy = correct / len(characters) * 100
stop = perf_counter()
total = round(stop - start)
part_of_minute = total / 60
await ctx.send(f"<:clock:738186842343735387> Time: `{total}` seconds\n"
f"<:star:737736250718421032> Speed: `{round(word_count / part_of_minute)}` WPM\n"
f"<:license:738176207895658507> Accuracy: `{round(accuracy)}`%")
except asyncio.TimeoutError:
await ctx.send("You took over a minute to send your sentence back so the process was abandoned.")
except ZeroDivisionError:
await ctx.send("Lmao you are so bad at typing that you got a zero percent accuracy.")
@commands.command(aliases=['gr'])
async def guessreddit(self, ctx, subreddit=None):
"""Look at two reddit posts and decide which one got more upvotes"""
try:
subreddit_list = ["holup", "dankmemes", "memes"]
listed = ", ".join(str(sub) for sub in subreddit_list)
if subreddit is None:
await ctx.send(f"Here is the list of currently available subs you can choose to play from:\n\n"
f"`{listed}`\n\nSend which subreddit you would like to use into chat.")
msg = await self.client.wait_for("message", check=lambda x: x.author == ctx.message.author, timeout=60)
if msg.content in subreddit_list:
subreddit = msg.content
else:
return await ctx.send("That subreddit is not available for this game.\n"
"Try again with a different sub.")
if subreddit not in subreddit_list:
return await ctx.send(f"That subreddit is not available for this game. "
f"\nThe current available subreddits are `{listed}`.")
ms = await ctx.send("<a:loading:743537226503421973> Please wait while the game is loading... "
"<a:loading:743537226503421973>")
posts = []
emojis = ["1️⃣", "2️⃣"]
sub = await self.reddit.subreddit(subreddit, fetch=True)
async for submission in sub.top("day", limit=50):
if not submission.stickied:
posts.append(str(submission.id))
random.shuffle(posts)
final_ids = random.sample(posts, 2)
post1 = await self.reddit.submission(id=final_ids[0])
post2 = await self.reddit.submission(id=final_ids[1])
await ms.delete()
embed1 = discord.Embed(title="Image 1", color=0x5643fd)
embed1.set_image(url=post1.url)
embed1.set_footer(text=f"r/{subreddit}")
await ctx.send(embed=embed1)
embed2 = discord.Embed(title="Image 2", color=0x5643fd)
embed2.set_image(url=post2.url)
embed2.set_footer(text=f"r/{subreddit}")
await ctx.send(embed=embed2)
msg = await ctx.send("Can you figure out which post got more upvotes?\n"
"React with 1️⃣ or 2️⃣ to make your guess.")
await msg.add_reaction("1️⃣")
await msg.add_reaction("2️⃣")
score1 = "{:,}".format(post1.score)
score2 = "{:,}".format(post2.score)
reaction, user = await self.client.wait_for('reaction_add', check=lambda r, u: str(
r.emoji) in emojis and u.id == ctx.author.id and r.message.id == msg.id,
timeout=60)
if int(post1.score) > int(post2.score) and str(reaction.emoji) == emojis[0]:
await ctx.send(f"Congratulations! `1` was the correct answer with <:upvote:751314607808839803>"
f" `{score1}` upvotes.\nImage 2 "
f"only had <:upvote:751314607808839803> `{score2}` upvotes.")
elif int(post1.score) < int(post2.score) and str(reaction.emoji) == emojis[1]:
await ctx.send(f"Congratulations! `2` was the correct answer with <:upvote:751314607808839803> "
f"`{score2}` upvotes.\nImage 1 "
f"only had <:upvote:751314607808839803> `{score1}` upvotes.")
elif int(post1.score) > int(post2.score) and str(reaction.emoji) == emojis[1]:
await ctx.send(f"Unfortunately, `2` was the incorrect answer.\nImage 1 had <:upvote:751314607808839803>"
f" `{score1}` upvotes "
f"while Image 2 had <:upvote:751314607808839803> `{score2}` upvotes.")
elif int(post1.score) < int(post2.score) and str(reaction.emoji) == emojis[0]:
await ctx.send(f"Unfortunately, `1` was the incorrect answer.\n"
f"Image 2 had <:upvote:751314607808839803> `{score2}` upvotes "
f"while Image 1 only had <:upvote:751314607808839803> `{score1}` upvotes.")
else:
await ctx.send("You did not react with the correct emojis so the game was cancelled.")
except asyncio.TimeoutError:
await ctx.send("You never reacted with a guess so the game was cancelled.")
@commands.command()
async def captionary(self, ctx):
"""A fun game based on captioning different gifs."""
game_master = ctx.message.author.id
random.shuffle(gif_links)
random.shuffle(inspiration)
gifs = gif_links[:20]
embed = discord.Embed(title="Captionary", color=0x5643fd, timestamp=ctx.message.created_at,
description="Captionary is a fun game based on submitting captions for different gifs."
"There are anywhere between 5 and 20 rounds and players submit their best"
"captions to be voted on.")
embed.add_field(name='**Commands**',
value='➤ `caption` - submit your caption\n'
'➤ `!inspire` - get a free caption idea\n'
'➤ `!stop` - used by the game master to end the game', inline=False)
embed.add_field(name='**Game Master**',
value=f'{ctx.message.author.mention} is the game master for this match! '
f'This user holds the power to end the game at any time using the `!stop` command.')
embed.set_image(url='https://imgur.com/qUPbXKI.jpg')
await ctx.send(embed=embed)
await ctx.send(f"{ctx.message.author.mention} as the game master, you get to choose the round length."
f"\nYou can choose any number between **30** and **120**.")
try:
waiter = await self.client.wait_for("message", check=lambda x: x.author.id == game_master, timeout=30)
if 29 < int(waiter.content) < 121:
round_time = int(waiter.content)
await ctx.send(f"Round time has been set at **{round_time}**")
elif 30 > int(waiter.content) or 120 < int(waiter.content):
round_time = 60
await ctx.send(
f"That is not a number between 30 and 120 so the round time has been set at `60`.")
else:
round_time = 60
await ctx.send(f"That is not a number between 30 and 120 so the round time has been set at `60`.")
except asyncio.TimeoutError:
round_time = 60
await ctx.send(f"{ctx.message.author.display_name} never responded so the round time has been set at `60`.")
pass
except ValueError:
round_time = 60
await ctx.send(f"You did not respond with a number so the round "
f"time has been set at `60`.")
pass
await ctx.send(f"{ctx.message.author.mention} additionally, you get to choose how many rounds will be played.\n"
f"You may choose any number between **5** and **20**.")
try:
waiter = await self.client.wait_for("message", check=lambda x: x.author.id == game_master, timeout=30)
if 4 < int(waiter.content) < 20:
total_rounds = int(waiter.content) + 1
await ctx.send(f"The number of rounds has been set at **{total_rounds}**")
elif 5 > int(waiter.content) or 20 < int(waiter.content):
total_rounds = 11
await ctx.send(f"That is not a number between **5** and **20** so the number of rounds "
f"has been set at `10`.")
else:
total_rounds = 11
await ctx.send(f"That is not a number between **5** and **20** so the number of rounds "
f"has been set at `10`.")
except asyncio.TimeoutError:
total_rounds = 11
await ctx.send(f"{ctx.message.author.display_name}"
f" never responded so the number of rounds has been set at `10`.")
pass
await asyncio.sleep(2)
await ctx.send("Get Ready! The game will start in **15 seconds**.")
await asyncio.sleep(15)
rounds = 1
gif_index = 0
players = []
# game loop
while rounds < total_rounds:
end_time = time.time() + round_time
await ctx.send(f"**ROUND {rounds}/{total_rounds}**")
gif_link = await ctx.send(gifs[gif_index])
if rounds == 1:
pass
elif len(players) == 0 and rounds != 1:
await ctx.send("There were no players for this round so the game has ended.")
break
else:
await ctx.send(" ".join(player.mention for player in players))
try:
round_players = []
round_answers = []
# round loop
while time.time() < end_time:
msg = await self.client.wait_for("message", check=lambda x: x.author != ctx.bot, timeout=120)
if "!caption" in msg.content:
if msg.author.id in round_players:
await ctx.send("You've already given a caption for this round!")
if msg.author not in players:
players.append(msg.author)
if msg.author.id not in round_players:
await msg.add_reaction('<:tickgreen:732660186560462958>')
round_players.append(msg.author)
round_answers.append(msg.content)
else:
continue
elif "!inspire" in msg.content:
await ctx.send(random.choice(inspiration))
continue
elif "!stop" == msg.content and msg.author.id == game_master:
return await ctx.send("Thanks for playing, this game has ended!")
except asyncio.TimeoutError:
pass
# end loop here
rounds += 1
gif_index += 1
await ctx.send("Thanks for playing!")
@commands.command()
async def race(self, ctx, member: discord.Member = None):
"""See who can be the fastest in this quick-paced game."""
progress = "<:loading_filled:730823516059992204>"
if member is None:
return await ctx.send('<:redx:732660210132451369> You must mention someone to play against!')
player_1 = "🐕 | "
player_2 = "🐈 | "
accept_or_decline = ["<:tickgreen:732660186560462958>", "<:redx:732660210132451369>"]
emojis = ["🐕", "🐈"]
msg = await ctx.send(f"{member.mention}\n\n{ctx.message.author.display_name} wants to race!\n"
f"React with <:tickgreen:732660186560462958> or <:redx:732660210132451369>"
f"to accept or decline.")
await msg.add_reaction("<:tickgreen:732660186560462958>")
await msg.add_reaction("<:redx:732660210132451369>")
try:
reaction, user = await self.client.wait_for("reaction_add", timeout=60, check=lambda r, u: str(r.emoji) in accept_or_decline and u.id == member.id and r.message.id == msg.id)
if str(reaction.emoji) == accept_or_decline[0]:
delete_dis = await ctx.send(f"{member.mention} has accepted! The game will begin in **5** seconds.")
await asyncio.sleep(5)
await msg.delete()
await delete_dis.delete()
await ctx.send(f"**Player 1 - {ctx.message.author.display_name}**")
player_1_progression = await ctx.send(player_1)
await ctx.send(f"**Player 2 - {member.display_name}**")
player_2_progression = await ctx.send(player_2)
msg2 = await ctx.send("GO! React with your animal to win.")
await msg2.add_reaction("🐕")
await msg2.add_reaction("🐈")
while len(player_1) < 156 and len(player_2) < 156:
reaction, user = await self.client.wait_for("reaction_add", timeout=60, check=lambda r, u: str(
r.emoji) in emojis and u.id == ctx.message.author.id or member.id and r.message.id == msg.id)
if str(reaction.emoji) == emojis[0] and user.id == ctx.message.author.id:
player_1 += progress
await player_1_progression.edit(content=player_1)
await asyncio.sleep(1)
elif str(reaction.emoji) == emojis[1] and user.id == member.id:
player_2 += progress
await player_2_progression.edit(content=player_2)
await asyncio.sleep(1)
if len(player_1) > len(player_2):
return await ctx.send(f"<:owner:730864906429136907>{ctx.message.author.display_name} "
f"is the winner!\n"
f"Thanks to {member.display_name} for playing.")
if len(player_1) < len(player_2):
return await ctx.send(f"<:owner:730864906429136907>{member.display_name} is the winner!\n"
f"Thanks to {ctx.message.author.display_name} for playing.")
elif str(reaction.emoji) == accept_or_decline[1]:
return await ctx.send(f"{member.mention} has declined. Better luck next time!")
except asyncio.TimeoutError:
return await ctx.send(f"The game has timed out due to inactivity.")
def setup(client):
client.add_cog(games(client, reddit=asyncpraw.Reddit(client_id=reddit_client_id,
client_secret=reddit_client_secret,
username=reddit_username,
password=<PASSWORD>,
user_agent=reddit_user_agent)))
| 2.921875
| 3
|
bci_framework/default_extensions/Neuropathic_pain_Neurofeedback/main.py
|
UN-GCPDS/bci-framework-
| 0
|
12778250
|
"""
================================
Neuropathic pain - Neurofeedback
================================
"""
import logging
from typing import Literal, TypeVar
from bci_framework.extensions.stimuli_delivery import StimuliAPI, Feedback, DeliveryInstance
from bci_framework.extensions.stimuli_delivery.utils import Widgets as w
from bci_framework.extensions import properties as prop
from browser import document, html, timer
Ts = TypeVar('Time in seconds')
Tm = TypeVar('Time in milliseconds')
TM = TypeVar('Time in minutes')
bands = {
'alpha': [[1, 5], 'increase'],
'beta': [[5, 10], 'decrease'],
'teta': [[10, 15], 'decrease'],
}
########################################################################
class NPNeurofeedback(StimuliAPI):
""""""
# ----------------------------------------------------------------------
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_stylesheet('styles.css')
self.show_cross()
self.show_synchronizer()
self.feedback = Feedback(self, 'PowerBandNeuroFeedback')
self.feedback.on_feedback(self.on_input_feedback)
self.bci_stimuli <= html.DIV(id='stimuli')
self.dashboard <= w.label(
'NeuropathicPain - Neurofeedback', 'headline4'
)
self.dashboard <= html.BR()
self.dashboard <= w.subject_information(
paradigm='NeuropathicPain - Neurofeedback'
)
self.dashboard <= w.slider(
label='Baseline acquisition:',
min=0,
value=0.1,
max=5,
step=0.1,
unit='m',
id='baseline_duration',
)
self.dashboard <= w.slider(
label='Sesion duration:',
min=5,
value=10,
max=30,
step=0.1,
unit='m',
id='sesion_duration',
)
self.dashboard <= w.slider(
label='Window analysis:',
min=0.5,
max=2,
value=1,
step=0.1,
unit='s',
id='window_analysis',
)
self.dashboard <= w.slider(
label='Sliding data:',
min=0.1,
max=2,
value=1,
unit='s',
step=0.1,
id='sliding_data',
)
self.dashboard <= w.select(
'Analysis Function',
[['Fourier', 'fourier'], ['Welch', 'welch']],
value='fourier',
id='method',
)
self.dashboard <= w.switch(
label='Record EEG',
checked=False,
id='record',
)
self.dashboard <= w.toggle_button(
[
('Start session', self.start),
('Stop session', self.stop_session),
],
id='run',
)
# self.dashboard <= w.slider(
# label='Test feedback:',
# min=-1,
# max=1,
# value=0,
# step=0.1,
# id='test',
# on_change=self.test_feedback,
# )
# # ----------------------------------------------------------------------
# @DeliveryInstance.both
# def test_feedback(self, value):
# """Test the feedback stimuli."""
# self.on_input_feedback(
# **{
# 'feedback': value,
# }
# )
# ----------------------------------------------------------------------
def on_input_feedback(self, **feedback: dict[str, [str, int]]) -> None:
"""Asynchronous method to receive the feedback process value.
`feedback` is a dictionary with the keys:
* `feedback`: The feedback value, an `int` between -1 and 1.
* `baseline`: The baseline value freezed.
"""
f = feedback['feedback']
# logging.warning(f'FEEDBACK: {f}')
plot = self.BandFeedback.neurofeedback(f)
# document.select_one('#stimuli').clear()
# self.update_plot(plot)
# @DeliveryInstance.remote
# def update_plot(self, plot):
document.select_one('#stimuli').style = {
'background-image': f'url(data:image/png;base64,{plot})',
}
# ----------------------------------------------------------------------
def start(self) -> None:
"""Start the session.
A session comprises a baseline calculation and a neurofeedback trial.
"""
if w.get_value('record'):
self.start_record()
self.build_trials()
self.show_counter(5)
timer.set_timeout(self.start_session, 5000)
# ----------------------------------------------------------------------
def start_session(self) -> None:
"""Execute the session pipeline."""
logging.warning('START_SESSION')
self.run_pipeline(
self.pipeline_trial, self.trials, callback='stop_session'
)
# ----------------------------------------------------------------------
def stop_session(self) -> None:
"""Stop pipeline execution."""
document.select_one('#stimuli').style = {'display': 'none'}
self.stop_analyser()
w.get_value('run').off()
if w.get_value('record'):
timer.set_timeout(self.stop_record, 2000)
# ----------------------------------------------------------------------
def build_trials(self) -> None:
"""Define the session and single session pipeline."""
baseline_duration = w.get_value('baseline_duration') * 60
sesion_duration = w.get_value('sesion_duration') * 60
baseline_packages = baseline_duration // w.get_value('sliding_data')
logging.warning(f'BP: {baseline_packages}')
self.trials = [
{
'method': w.get_value('method'),
'window_analysis': w.get_value('window_analysis'),
'sliding_data': w.get_value('sliding_data') * prop.SAMPLE_RATE,
'baseline_packages': baseline_packages,
},
]
self.pipeline_trial = [
['stop_analyser', 100],
['configure_analyser', 1000],
['baseline', baseline_duration * 1000],
['session', sesion_duration * 1000],
['stop_analyser', 1000],
]
# ----------------------------------------------------------------------
def configure_analyser(
self,
method,
window_analysis: Ts,
sliding_data: int,
baseline_packages: int,
) -> None:
"""Send the configuration values to the generator."""
data = {
'status': 'on',
'method': method,
'window_analysis': window_analysis,
'sliding_data': sliding_data,
'baseline_packages': baseline_packages,
'channels': list(prop.CHANNELS.values()),
'target_channels': list(prop.CHANNELS.values()),
'sample_rate': int(prop.SAMPLE_RATE),
'bands': bands,
}
logging.warning(f'CONFIG: {data}')
self.feedback.write(data)
# ----------------------------------------------------------------------
def baseline(self) -> None:
"""Acquire data to use in the zero location."""
self.show_cross()
self.send_marker('Start baseline')
document.select_one('#stimuli').style = {'display': 'none'}
# ----------------------------------------------------------------------
def session(self) -> None:
"""Neurofeedback activity."""
self.hide_cross()
self.send_marker('End baseline')
self.feedback.write({'command': 'freeze_baseline'}) # zero location
document.select_one('#stimuli').style = {'display': 'block'}
# ----------------------------------------------------------------------
def stop_analyser(self) -> None:
"""Stop feedback values generation."""
self.feedback.write(
{
'status': 'off',
}
)
if __name__ == '__main__':
NPNeurofeedback(python=('feedback.py', 'BandFeedback'))
| 2.359375
| 2
|