content
stringlengths 5
1.05M
|
|---|
class BitSet:
def __init__(self, size):
self.bits = (1 << size) - 1
def get(self, index):
return (self.bits >> index) & 1
def set(self, index, value):
mask = 1 << index
self.bits &= ~mask
if value:
self.bits |= mask
def flip(self, index):
self.bits ^= (1 << index)
|
import datetime
from django.test import TestCase
from custom.icds_reports.reports.bihar_api import get_mother_details
from custom.icds_reports.tasks import update_bihar_api_table
from datetime import date
from mock import patch
@patch('custom.icds_reports.utils.aggregation_helpers.distributed.bihar_api_demographics.BiharApiDemographicsHelper.bihar_state_id',
'st1')
class BiharAPIMotherTest(TestCase):
def test_file_content(self):
update_bihar_api_table(date(2017, 5, 1))
data, count = get_mother_details(
month=date(2017, 5, 1).strftime("%Y-%m-%d"),
state_id='st1',
last_ccs_case_id=''
)
for case in data:
if case['ccs_case_id'] == '08d215e7-81c7-4ad3-9c7d-1b27f0ed4bb5':
ccs_case_details = case
break
self.assertEqual(
{
"household_id": 'b6a55583-e07d-4367-ae5c-f3ff22f85271',
"person_id": "cc75916b-a71e-4c4d-a537-5c7bef95b12f",
"ccs_case_id": "08d215e7-81c7-4ad3-9c7d-1b27f0ed4bb5",
"married": 1,
"husband_name": "test_husband_name",
"husband_id": "b1e7f7d8-149e-4ffc-a876-2a70a469edbc",
"last_preg_year": 12,
"is_pregnant": 1,
"preg_reg_date": datetime.date(2017, 4, 12),
"tt_1": datetime.date(2017, 5, 1),
'tt_2': datetime.date(2017, 5, 2),
"tt_booster": datetime.date(2017, 5, 3),
"hb": 2,
"add": datetime.date(2017, 6, 1),
"last_preg_tt": None,
"lmp": datetime.date(2016, 10, 2)
},
ccs_case_details
)
|
from django.urls import path
from .consumers import StudentWaitingConsumer, WorkshopControlConsumer, StudentWorkshopConsumer
"""
Websocket Session UrlPatterns are used for websocket connections for users which do not have a user
e.g. a student which is using a code to access
"""
websocket_urlpatterns = [
path("waiting/", StudentWaitingConsumer),
path("student_workshop/", StudentWorkshopConsumer),
path("manage_workshop/<int:id>/", WorkshopControlConsumer)
]
|
# -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIE CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pytfa.io.json import load_json_model
from skimpy.io.yaml import load_yaml_model
from skimpy.analysis.oracle.load_pytfa_solution import load_fluxes, \
load_concentrations, load_equilibrium_constants
from skimpy.sampling.simple_parameter_sampler import SimpleParameterSampler
from skimpy.core import *
from skimpy.mechanisms import *
from scipy.linalg import eigvals as eigenvalues
from sympy import Symbol
from skimpy.core.parameters import ParameterValues,ParameterValuePopulation
import pandas as pd
import numpy as np
import pickle
from skimpy.core.parameters import load_parameter_population
from sys import argv
#sys.path.append("..")
NCPU = 36
CONCENTRATION_SCALING = 1e6 # 1 mol to 1 mmol
TIME_SCALING = 1 # 1hr
# Parameters of the E. Coli cell
DENSITY = 1105 # g/L
GDW_GWW_RATIO = 0.38 # Assumes 70% Water
exp_id = 'fdp1'
path_to_kmodel = '../models/kinetic/kin_varma_fdp1_curated.yml'
path_to_tmodel = f'../models/thermo/varma_{exp_id}'
path_to_samples = f'../steady_state_samples/samples_{exp_id}.csv'
path_to_param_names_km = '../Models/parameter_names_km.pkl'
with open(path_to_param_names_km, 'rb') as input_file:
parameter_names_km = pickle.load(input_file)
tmodel = load_json_model(path_to_tmodel)
kmodel = load_yaml_model(path_to_kmodel)
kmodel.prepare()
kmodel.compile_jacobian(sim_type=QSSA)
print('kmodel compiled')
# Load gan parameters and names as dataframe
#path_to_GAN_parameters = f'../gray_box_data/{exp_id}/sample_best.npy'
#path_to_max_eig = f'../gray_box_data/{exp_id}/sample_best_max_eig.csv'
# Load ORACLE parameters
path_to_ORACLE_parameters = f'../gan_input/{exp_id}/all_km_{exp_id}.npy'
path_to_max_eig = f'../../../../skimpy/projects/kinvarma/small/kinetics/output/maximal_eigenvalues_{exp_id}.csv'
param = np.load(path_to_ORACLE_parameters)
eig = pd.read_csv(path_to_max_eig).iloc[:,1].values
idx_r = np.where(eig<=-9)[0]
idx_r = np.random.choice(idx_r, 1000)
param = param[idx_r,:]
#param = np.exp(param) # GAN parameters are generated in log scale
parameter_set = pd.DataFrame(param)
parameter_set.columns = parameter_names_km
##########################
# to load .hdf5 file though skimpy
'''
from skimpy.core.parameters import load_parameter_population
path_to_parameters = '/home/skimpy/work/Models_2/parameters_sample_id_0.hdf5'
param_pop = load_parameter_population(path_to_parameters)
'''
#Load ss fluxes and concentrations
samples = pd.read_csv(path_to_samples, header=0, index_col=0).iloc[0,0:]
flux_series = load_fluxes(samples, tmodel, kmodel,
density=DENSITY,
ratio_gdw_gww=GDW_GWW_RATIO,
concentration_scaling=CONCENTRATION_SCALING,
time_scaling=TIME_SCALING)
conc_series = load_concentrations(samples, tmodel, kmodel,
concentration_scaling=CONCENTRATION_SCALING)
# Fetch equilibrium constants
k_eq = load_equilibrium_constants(samples, tmodel, kmodel,
concentration_scaling=CONCENTRATION_SCALING,
in_place=True)
symbolic_concentrations_dict = {Symbol(k):v for k,v in conc_series.items()}
sampling_parameters = SimpleParameterSampler.Parameters(n_samples=1)
sampler = SimpleParameterSampler(sampling_parameters)
sampler._compile_sampling_functions(kmodel, symbolic_concentrations_dict, [])
model_param = kmodel.parameters
#idx_to_solve = np.random.randint(0,len(parameter_set.index),10)
stable_percent = 0
store_max_J = []
# to iterate over .hdf5 file sets
# for j in param_pop._index:
param_pop = []
for j in range(len(parameter_set.index)):
if j%100==0:
print(f'curr. set processed : {j}, stable % : {stable_percent*100/(j+0.001)}')
param_val = parameter_set.loc[j]
param_val = ParameterValues(param_val,kmodel)
kmodel.parameters = k_eq
kmodel.parameters = param_val
parameter_sample = {v.symbol: v.value for k,v in kmodel.parameters.items()}
#Set all vmax/flux parameters to 1.
# TODO Generalize into Flux and Saturation parameters
for this_reaction in kmodel.reactions.values():
vmax_param = this_reaction.parameters.vmax_forward
parameter_sample[vmax_param.symbol] = 1
# Calculate the Vmax's
kmodel.flux_parameter_function(
kmodel,
parameter_sample,
symbolic_concentrations_dict,
flux_series
)
for c in conc_series.index:
if c in model_param:
c_sym = kmodel.parameters[c].symbol
parameter_sample[c_sym]=conc_series[c]
this_param_sample = ParameterValues(parameter_sample,kmodel)
param_pop.append(this_param_sample)
param_pop2 = ParameterValuePopulation(param_pop, kmodel = kmodel)
param_pop2.save(path_to_ORACLE_parameters.replace('npy', 'hdf5'))
print(path_to_ORACLE_parameters.replace('npy', 'hdf5'))
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mindinsight_summary.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import mindinsight_anf_ir_pb2 as mindinsight__anf__ir__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mindinsight_summary.proto',
package='mindinsight',
syntax='proto2',
serialized_options=b'\370\001\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19mindinsight_summary.proto\x12\x0bmindinsight\x1a\x18mindinsight_anf_ir.proto\"\xc3\x01\n\x05\x45vent\x12\x11\n\twall_time\x18\x01 \x02(\x01\x12\x0c\n\x04step\x18\x02 \x01(\x03\x12\x11\n\x07version\x18\x03 \x01(\tH\x00\x12,\n\tgraph_def\x18\x04 \x01(\x0b\x32\x17.mindinsight.GraphProtoH\x00\x12\'\n\x07summary\x18\x05 \x01(\x0b\x32\x14.mindinsight.SummaryH\x00\x12\'\n\x07\x65xplain\x18\x06 \x01(\x0b\x32\x14.mindinsight.ExplainH\x00\x42\x06\n\x04what\"\x82\x04\n\rLossLandscape\x12\x33\n\tlandscape\x18\x01 \x01(\x0b\x32 .mindinsight.LossLandscape.Point\x12\x36\n\tloss_path\x18\x02 \x01(\x0b\x32#.mindinsight.LossLandscape.LossPath\x12\x35\n\x08metadata\x18\x03 \x01(\x0b\x32#.mindinsight.LossLandscape.Metadata\x12;\n\x11\x63onvergence_point\x18\x04 \x01(\x0b\x32 .mindinsight.LossLandscape.Point\x1av\n\x05Point\x12#\n\x01x\x18\x01 \x01(\x0b\x32\x18.mindinsight.TensorProto\x12#\n\x01y\x18\x02 \x01(\x0b\x32\x18.mindinsight.TensorProto\x12#\n\x01z\x18\x03 \x01(\x0b\x32\x18.mindinsight.TensorProto\x1aO\n\x08LossPath\x12\x11\n\tintervals\x18\x01 \x03(\x05\x12\x30\n\x06points\x18\x02 \x01(\x0b\x32 .mindinsight.LossLandscape.Point\x1aG\n\x08Metadata\x12\x15\n\rdecomposition\x18\x01 \x01(\t\x12\x0c\n\x04unit\x18\x02 \x01(\t\x12\x16\n\x0estep_per_epoch\x18\x03 \x01(\x05\"\xf6\x04\n\x07Summary\x12)\n\x05value\x18\x01 \x03(\x0b\x32\x1a.mindinsight.Summary.Value\x1aQ\n\x05Image\x12\x0e\n\x06height\x18\x01 \x02(\x05\x12\r\n\x05width\x18\x02 \x02(\x05\x12\x12\n\ncolorspace\x18\x03 \x02(\x05\x12\x15\n\rencoded_image\x18\x04 \x02(\x0c\x1a\xf0\x01\n\tHistogram\x12\x36\n\x07\x62uckets\x18\x01 \x03(\x0b\x32%.mindinsight.Summary.Histogram.bucket\x12\x11\n\tnan_count\x18\x02 \x01(\x03\x12\x15\n\rpos_inf_count\x18\x03 \x01(\x03\x12\x15\n\rneg_inf_count\x18\x04 \x01(\x03\x12\x0b\n\x03max\x18\x05 \x01(\x01\x12\x0b\n\x03min\x18\x06 \x01(\x01\x12\x0b\n\x03sum\x18\x07 \x01(\x01\x12\r\n\x05\x63ount\x18\x08 \x01(\x03\x1a\x34\n\x06\x62ucket\x12\x0c\n\x04left\x18\x01 \x02(\x01\x12\r\n\x05width\x18\x02 \x02(\x01\x12\r\n\x05\x63ount\x18\x03 \x02(\x03\x1a\xf9\x01\n\x05Value\x12\x0b\n\x03tag\x18\x01 \x02(\t\x12\x16\n\x0cscalar_value\x18\x03 \x01(\x02H\x00\x12+\n\x05image\x18\x04 \x01(\x0b\x32\x1a.mindinsight.Summary.ImageH\x00\x12*\n\x06tensor\x18\x08 \x01(\x0b\x32\x18.mindinsight.TensorProtoH\x00\x12\x33\n\thistogram\x18\t \x01(\x0b\x32\x1e.mindinsight.Summary.HistogramH\x00\x12\x34\n\x0eloss_landscape\x18\n \x01(\x0b\x32\x1a.mindinsight.LossLandscapeH\x00\x42\x07\n\x05value\"\xeb\x07\n\x07\x45xplain\x12\x11\n\tsample_id\x18\x01 \x01(\x05\x12\x12\n\nimage_path\x18\x02 \x01(\t\x12\x1a\n\x12ground_truth_label\x18\x03 \x03(\x05\x12\x31\n\tinference\x18\x04 \x01(\x0b\x32\x1e.mindinsight.Explain.Inference\x12\x35\n\x0b\x65xplanation\x18\x05 \x03(\x0b\x32 .mindinsight.Explain.Explanation\x12\x31\n\tbenchmark\x18\x06 \x03(\x0b\x32\x1e.mindinsight.Explain.Benchmark\x12/\n\x08metadata\x18\x07 \x01(\x0b\x32\x1d.mindinsight.Explain.Metadata\x12\x0e\n\x06status\x18\x08 \x01(\t\x12%\n\x03hoc\x18\t \x03(\x0b\x32\x18.mindinsight.Explain.Hoc\x1a\x9c\x02\n\tInference\x12\x19\n\x11ground_truth_prob\x18\x01 \x03(\x02\x12\x17\n\x0fpredicted_label\x18\x02 \x03(\x05\x12\x16\n\x0epredicted_prob\x18\x03 \x03(\x02\x12\x1c\n\x14ground_truth_prob_sd\x18\x04 \x03(\x02\x12#\n\x1bground_truth_prob_itl95_low\x18\x05 \x03(\x02\x12\"\n\x1aground_truth_prob_itl95_hi\x18\x06 \x03(\x02\x12\x19\n\x11predicted_prob_sd\x18\x07 \x03(\x02\x12 \n\x18predicted_prob_itl95_low\x18\x08 \x03(\x02\x12\x1f\n\x17predicted_prob_itl95_hi\x18\t \x03(\x02\x1aJ\n\x0b\x45xplanation\x12\x16\n\x0e\x65xplain_method\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x14\n\x0cheatmap_path\x18\x03 \x01(\t\x1ag\n\tBenchmark\x12\x18\n\x10\x62\x65nchmark_method\x18\x01 \x01(\t\x12\x16\n\x0e\x65xplain_method\x18\x02 \x01(\t\x12\x13\n\x0btotal_score\x18\x03 \x01(\x02\x12\x13\n\x0blabel_score\x18\x04 \x03(\x02\x1aK\n\x08Metadata\x12\r\n\x05label\x18\x01 \x03(\t\x12\x16\n\x0e\x65xplain_method\x18\x02 \x03(\t\x12\x18\n\x10\x62\x65nchmark_method\x18\x03 \x03(\t\x1a%\n\x08HocLayer\x12\x0c\n\x04prob\x18\x01 \x01(\x02\x12\x0b\n\x03\x62ox\x18\x02 \x03(\x05\x1aP\n\x03Hoc\x12\r\n\x05label\x18\x01 \x01(\x05\x12\x0c\n\x04mask\x18\x02 \x01(\t\x12,\n\x05layer\x18\x03 \x03(\x0b\x32\x1d.mindinsight.Explain.HocLayerB\x03\xf8\x01\x01'
,
dependencies=[mindinsight__anf__ir__pb2.DESCRIPTOR,])
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='mindinsight.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='wall_time', full_name='mindinsight.Event.wall_time', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='step', full_name='mindinsight.Event.step', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='mindinsight.Event.version', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='graph_def', full_name='mindinsight.Event.graph_def', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='summary', full_name='mindinsight.Event.summary', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='explain', full_name='mindinsight.Event.explain', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='what', full_name='mindinsight.Event.what',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=69,
serialized_end=264,
)
_LOSSLANDSCAPE_POINT = _descriptor.Descriptor(
name='Point',
full_name='mindinsight.LossLandscape.Point',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='mindinsight.LossLandscape.Point.x', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='y', full_name='mindinsight.LossLandscape.Point.y', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='z', full_name='mindinsight.LossLandscape.Point.z', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=509,
serialized_end=627,
)
_LOSSLANDSCAPE_LOSSPATH = _descriptor.Descriptor(
name='LossPath',
full_name='mindinsight.LossLandscape.LossPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='intervals', full_name='mindinsight.LossLandscape.LossPath.intervals', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='points', full_name='mindinsight.LossLandscape.LossPath.points', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=629,
serialized_end=708,
)
_LOSSLANDSCAPE_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='mindinsight.LossLandscape.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='decomposition', full_name='mindinsight.LossLandscape.Metadata.decomposition', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='unit', full_name='mindinsight.LossLandscape.Metadata.unit', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='step_per_epoch', full_name='mindinsight.LossLandscape.Metadata.step_per_epoch', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=710,
serialized_end=781,
)
_LOSSLANDSCAPE = _descriptor.Descriptor(
name='LossLandscape',
full_name='mindinsight.LossLandscape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='landscape', full_name='mindinsight.LossLandscape.landscape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loss_path', full_name='mindinsight.LossLandscape.loss_path', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='mindinsight.LossLandscape.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='convergence_point', full_name='mindinsight.LossLandscape.convergence_point', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LOSSLANDSCAPE_POINT, _LOSSLANDSCAPE_LOSSPATH, _LOSSLANDSCAPE_METADATA, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=267,
serialized_end=781,
)
_SUMMARY_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='mindinsight.Summary.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='mindinsight.Summary.Image.height', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='width', full_name='mindinsight.Summary.Image.width', index=1,
number=2, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='colorspace', full_name='mindinsight.Summary.Image.colorspace', index=2,
number=3, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='encoded_image', full_name='mindinsight.Summary.Image.encoded_image', index=3,
number=4, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=838,
serialized_end=919,
)
_SUMMARY_HISTOGRAM_BUCKET = _descriptor.Descriptor(
name='bucket',
full_name='mindinsight.Summary.Histogram.bucket',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='left', full_name='mindinsight.Summary.Histogram.bucket.left', index=0,
number=1, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='width', full_name='mindinsight.Summary.Histogram.bucket.width', index=1,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='count', full_name='mindinsight.Summary.Histogram.bucket.count', index=2,
number=3, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1110,
serialized_end=1162,
)
_SUMMARY_HISTOGRAM = _descriptor.Descriptor(
name='Histogram',
full_name='mindinsight.Summary.Histogram',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='buckets', full_name='mindinsight.Summary.Histogram.buckets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nan_count', full_name='mindinsight.Summary.Histogram.nan_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pos_inf_count', full_name='mindinsight.Summary.Histogram.pos_inf_count', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='neg_inf_count', full_name='mindinsight.Summary.Histogram.neg_inf_count', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max', full_name='mindinsight.Summary.Histogram.max', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='min', full_name='mindinsight.Summary.Histogram.min', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sum', full_name='mindinsight.Summary.Histogram.sum', index=6,
number=7, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='count', full_name='mindinsight.Summary.Histogram.count', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SUMMARY_HISTOGRAM_BUCKET, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1162,
)
_SUMMARY_VALUE = _descriptor.Descriptor(
name='Value',
full_name='mindinsight.Summary.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='tag', full_name='mindinsight.Summary.Value.tag', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='scalar_value', full_name='mindinsight.Summary.Value.scalar_value', index=1,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image', full_name='mindinsight.Summary.Value.image', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tensor', full_name='mindinsight.Summary.Value.tensor', index=3,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='histogram', full_name='mindinsight.Summary.Value.histogram', index=4,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='loss_landscape', full_name='mindinsight.Summary.Value.loss_landscape', index=5,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='mindinsight.Summary.Value.value',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1165,
serialized_end=1414,
)
_SUMMARY = _descriptor.Descriptor(
name='Summary',
full_name='mindinsight.Summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='mindinsight.Summary.value', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_SUMMARY_IMAGE, _SUMMARY_HISTOGRAM, _SUMMARY_VALUE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=784,
serialized_end=1414,
)
_EXPLAIN_INFERENCE = _descriptor.Descriptor(
name='Inference',
full_name='mindinsight.Explain.Inference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='ground_truth_prob', full_name='mindinsight.Explain.Inference.ground_truth_prob', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='predicted_label', full_name='mindinsight.Explain.Inference.predicted_label', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='predicted_prob', full_name='mindinsight.Explain.Inference.predicted_prob', index=2,
number=3, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ground_truth_prob_sd', full_name='mindinsight.Explain.Inference.ground_truth_prob_sd', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ground_truth_prob_itl95_low', full_name='mindinsight.Explain.Inference.ground_truth_prob_itl95_low', index=4,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ground_truth_prob_itl95_hi', full_name='mindinsight.Explain.Inference.ground_truth_prob_itl95_hi', index=5,
number=6, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='predicted_prob_sd', full_name='mindinsight.Explain.Inference.predicted_prob_sd', index=6,
number=7, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='predicted_prob_itl95_low', full_name='mindinsight.Explain.Inference.predicted_prob_itl95_low', index=7,
number=8, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='predicted_prob_itl95_hi', full_name='mindinsight.Explain.Inference.predicted_prob_itl95_hi', index=8,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1757,
serialized_end=2041,
)
_EXPLAIN_EXPLANATION = _descriptor.Descriptor(
name='Explanation',
full_name='mindinsight.Explain.Explanation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='explain_method', full_name='mindinsight.Explain.Explanation.explain_method', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label', full_name='mindinsight.Explain.Explanation.label', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='heatmap_path', full_name='mindinsight.Explain.Explanation.heatmap_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2043,
serialized_end=2117,
)
_EXPLAIN_BENCHMARK = _descriptor.Descriptor(
name='Benchmark',
full_name='mindinsight.Explain.Benchmark',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='benchmark_method', full_name='mindinsight.Explain.Benchmark.benchmark_method', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='explain_method', full_name='mindinsight.Explain.Benchmark.explain_method', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='total_score', full_name='mindinsight.Explain.Benchmark.total_score', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label_score', full_name='mindinsight.Explain.Benchmark.label_score', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2119,
serialized_end=2222,
)
_EXPLAIN_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='mindinsight.Explain.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='mindinsight.Explain.Metadata.label', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='explain_method', full_name='mindinsight.Explain.Metadata.explain_method', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='benchmark_method', full_name='mindinsight.Explain.Metadata.benchmark_method', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2224,
serialized_end=2299,
)
_EXPLAIN_HOCLAYER = _descriptor.Descriptor(
name='HocLayer',
full_name='mindinsight.Explain.HocLayer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='prob', full_name='mindinsight.Explain.HocLayer.prob', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='box', full_name='mindinsight.Explain.HocLayer.box', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2301,
serialized_end=2338,
)
_EXPLAIN_HOC = _descriptor.Descriptor(
name='Hoc',
full_name='mindinsight.Explain.Hoc',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='label', full_name='mindinsight.Explain.Hoc.label', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='mask', full_name='mindinsight.Explain.Hoc.mask', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='layer', full_name='mindinsight.Explain.Hoc.layer', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2340,
serialized_end=2420,
)
_EXPLAIN = _descriptor.Descriptor(
name='Explain',
full_name='mindinsight.Explain',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='sample_id', full_name='mindinsight.Explain.sample_id', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='image_path', full_name='mindinsight.Explain.image_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ground_truth_label', full_name='mindinsight.Explain.ground_truth_label', index=2,
number=3, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='inference', full_name='mindinsight.Explain.inference', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='explanation', full_name='mindinsight.Explain.explanation', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='benchmark', full_name='mindinsight.Explain.benchmark', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='mindinsight.Explain.metadata', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='status', full_name='mindinsight.Explain.status', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='hoc', full_name='mindinsight.Explain.hoc', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_EXPLAIN_INFERENCE, _EXPLAIN_EXPLANATION, _EXPLAIN_BENCHMARK, _EXPLAIN_METADATA, _EXPLAIN_HOCLAYER, _EXPLAIN_HOC, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1417,
serialized_end=2420,
)
_EVENT.fields_by_name['graph_def'].message_type = mindinsight__anf__ir__pb2._GRAPHPROTO
_EVENT.fields_by_name['summary'].message_type = _SUMMARY
_EVENT.fields_by_name['explain'].message_type = _EXPLAIN
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['version'])
_EVENT.fields_by_name['version'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['graph_def'])
_EVENT.fields_by_name['graph_def'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['summary'])
_EVENT.fields_by_name['summary'].containing_oneof = _EVENT.oneofs_by_name['what']
_EVENT.oneofs_by_name['what'].fields.append(
_EVENT.fields_by_name['explain'])
_EVENT.fields_by_name['explain'].containing_oneof = _EVENT.oneofs_by_name['what']
_LOSSLANDSCAPE_POINT.fields_by_name['x'].message_type = mindinsight__anf__ir__pb2._TENSORPROTO
_LOSSLANDSCAPE_POINT.fields_by_name['y'].message_type = mindinsight__anf__ir__pb2._TENSORPROTO
_LOSSLANDSCAPE_POINT.fields_by_name['z'].message_type = mindinsight__anf__ir__pb2._TENSORPROTO
_LOSSLANDSCAPE_POINT.containing_type = _LOSSLANDSCAPE
_LOSSLANDSCAPE_LOSSPATH.fields_by_name['points'].message_type = _LOSSLANDSCAPE_POINT
_LOSSLANDSCAPE_LOSSPATH.containing_type = _LOSSLANDSCAPE
_LOSSLANDSCAPE_METADATA.containing_type = _LOSSLANDSCAPE
_LOSSLANDSCAPE.fields_by_name['landscape'].message_type = _LOSSLANDSCAPE_POINT
_LOSSLANDSCAPE.fields_by_name['loss_path'].message_type = _LOSSLANDSCAPE_LOSSPATH
_LOSSLANDSCAPE.fields_by_name['metadata'].message_type = _LOSSLANDSCAPE_METADATA
_LOSSLANDSCAPE.fields_by_name['convergence_point'].message_type = _LOSSLANDSCAPE_POINT
_SUMMARY_IMAGE.containing_type = _SUMMARY
_SUMMARY_HISTOGRAM_BUCKET.containing_type = _SUMMARY_HISTOGRAM
_SUMMARY_HISTOGRAM.fields_by_name['buckets'].message_type = _SUMMARY_HISTOGRAM_BUCKET
_SUMMARY_HISTOGRAM.containing_type = _SUMMARY
_SUMMARY_VALUE.fields_by_name['image'].message_type = _SUMMARY_IMAGE
_SUMMARY_VALUE.fields_by_name['tensor'].message_type = mindinsight__anf__ir__pb2._TENSORPROTO
_SUMMARY_VALUE.fields_by_name['histogram'].message_type = _SUMMARY_HISTOGRAM
_SUMMARY_VALUE.fields_by_name['loss_landscape'].message_type = _LOSSLANDSCAPE
_SUMMARY_VALUE.containing_type = _SUMMARY
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['scalar_value'])
_SUMMARY_VALUE.fields_by_name['scalar_value'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['image'])
_SUMMARY_VALUE.fields_by_name['image'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['tensor'])
_SUMMARY_VALUE.fields_by_name['tensor'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['histogram'])
_SUMMARY_VALUE.fields_by_name['histogram'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['loss_landscape'])
_SUMMARY_VALUE.fields_by_name['loss_landscape'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY.fields_by_name['value'].message_type = _SUMMARY_VALUE
_EXPLAIN_INFERENCE.containing_type = _EXPLAIN
_EXPLAIN_EXPLANATION.containing_type = _EXPLAIN
_EXPLAIN_BENCHMARK.containing_type = _EXPLAIN
_EXPLAIN_METADATA.containing_type = _EXPLAIN
_EXPLAIN_HOCLAYER.containing_type = _EXPLAIN
_EXPLAIN_HOC.fields_by_name['layer'].message_type = _EXPLAIN_HOCLAYER
_EXPLAIN_HOC.containing_type = _EXPLAIN
_EXPLAIN.fields_by_name['inference'].message_type = _EXPLAIN_INFERENCE
_EXPLAIN.fields_by_name['explanation'].message_type = _EXPLAIN_EXPLANATION
_EXPLAIN.fields_by_name['benchmark'].message_type = _EXPLAIN_BENCHMARK
_EXPLAIN.fields_by_name['metadata'].message_type = _EXPLAIN_METADATA
_EXPLAIN.fields_by_name['hoc'].message_type = _EXPLAIN_HOC
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
DESCRIPTOR.message_types_by_name['LossLandscape'] = _LOSSLANDSCAPE
DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
DESCRIPTOR.message_types_by_name['Explain'] = _EXPLAIN
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), {
'DESCRIPTOR' : _EVENT,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Event)
})
_sym_db.RegisterMessage(Event)
LossLandscape = _reflection.GeneratedProtocolMessageType('LossLandscape', (_message.Message,), {
'Point' : _reflection.GeneratedProtocolMessageType('Point', (_message.Message,), {
'DESCRIPTOR' : _LOSSLANDSCAPE_POINT,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.LossLandscape.Point)
})
,
'LossPath' : _reflection.GeneratedProtocolMessageType('LossPath', (_message.Message,), {
'DESCRIPTOR' : _LOSSLANDSCAPE_LOSSPATH,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.LossLandscape.LossPath)
})
,
'Metadata' : _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _LOSSLANDSCAPE_METADATA,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.LossLandscape.Metadata)
})
,
'DESCRIPTOR' : _LOSSLANDSCAPE,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.LossLandscape)
})
_sym_db.RegisterMessage(LossLandscape)
_sym_db.RegisterMessage(LossLandscape.Point)
_sym_db.RegisterMessage(LossLandscape.LossPath)
_sym_db.RegisterMessage(LossLandscape.Metadata)
Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), {
'Image' : _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_IMAGE,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Summary.Image)
})
,
'Histogram' : _reflection.GeneratedProtocolMessageType('Histogram', (_message.Message,), {
'bucket' : _reflection.GeneratedProtocolMessageType('bucket', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_HISTOGRAM_BUCKET,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Summary.Histogram.bucket)
})
,
'DESCRIPTOR' : _SUMMARY_HISTOGRAM,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Summary.Histogram)
})
,
'Value' : _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), {
'DESCRIPTOR' : _SUMMARY_VALUE,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Summary.Value)
})
,
'DESCRIPTOR' : _SUMMARY,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Summary)
})
_sym_db.RegisterMessage(Summary)
_sym_db.RegisterMessage(Summary.Image)
_sym_db.RegisterMessage(Summary.Histogram)
_sym_db.RegisterMessage(Summary.Histogram.bucket)
_sym_db.RegisterMessage(Summary.Value)
Explain = _reflection.GeneratedProtocolMessageType('Explain', (_message.Message,), {
'Inference' : _reflection.GeneratedProtocolMessageType('Inference', (_message.Message,), {
'DESCRIPTOR' : _EXPLAIN_INFERENCE,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain.Inference)
})
,
'Explanation' : _reflection.GeneratedProtocolMessageType('Explanation', (_message.Message,), {
'DESCRIPTOR' : _EXPLAIN_EXPLANATION,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain.Explanation)
})
,
'Benchmark' : _reflection.GeneratedProtocolMessageType('Benchmark', (_message.Message,), {
'DESCRIPTOR' : _EXPLAIN_BENCHMARK,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain.Benchmark)
})
,
'Metadata' : _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _EXPLAIN_METADATA,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain.Metadata)
})
,
'HocLayer' : _reflection.GeneratedProtocolMessageType('HocLayer', (_message.Message,), {
'DESCRIPTOR' : _EXPLAIN_HOCLAYER,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain.HocLayer)
})
,
'Hoc' : _reflection.GeneratedProtocolMessageType('Hoc', (_message.Message,), {
'DESCRIPTOR' : _EXPLAIN_HOC,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain.Hoc)
})
,
'DESCRIPTOR' : _EXPLAIN,
'__module__' : 'mindinsight_summary_pb2'
# @@protoc_insertion_point(class_scope:mindinsight.Explain)
})
_sym_db.RegisterMessage(Explain)
_sym_db.RegisterMessage(Explain.Inference)
_sym_db.RegisterMessage(Explain.Explanation)
_sym_db.RegisterMessage(Explain.Benchmark)
_sym_db.RegisterMessage(Explain.Metadata)
_sym_db.RegisterMessage(Explain.HocLayer)
_sym_db.RegisterMessage(Explain.Hoc)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
from PyQt4.QtGui import *
class _ListItem(QListWidgetItem):
def __init__(self, value):
super(_ListItem, self).__init__()
self._value = value
def get_value(self):
return self._value
class BaseListWidget(QListWidget):
def __init__(self, parent=None):
super(BaseListWidget, self).__init__(parent)
def add_item(self, name, value):
item = _ListItem(value)
item.setText(name)
self.addItem(item)
def get_current_item(self):
if self.currentItem() is None:
return None
return self.currentItem().get_value()
def remove_current_item(self):
item = self.currentItem()
if item:
self.takeItem(self.row(item))
|
from flask import Flask, render_template, flash, url_for, redirect, request, session
from wtforms import Form, BooleanField, TextField, PasswordField, validators
from passlib.hash import sha256_crypt
from MySQLdb import escape_string as thwart
import gc
from functools import wraps
from content_management import Content
from db_connect import connection
APP_CONTENT = Content()
app = Flask(__name__)
def login_required(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash("Please login.")
return redirect(url_for('login_page'))
return wrap
@app.route("/", methods = ["GET", "POST"])
def main():
error = ''
try:
c, conn = connection()
if request.method == "POST":
data = c.execute("SELECT * FROM users WHERE username = ('{0}')".format(thwart(request.form['username'])))
data = c.fetchone()[2]
if sha256_crypt.verify(request.form['password'], data):
session['logged_in'] = True
session['username'] = request.form['username']
flash("You are now logged in")
return redirect(url_for("dashboard"))
else:
error = "Invalid credentials, try again."
gc.collect()
return render_template("main.html", error = error)
except Exception as e:
flash(e)
error = "Invalid credentials, try again."
return render_template("main.html", error = error)
@app.route("/dashboard/", methods = ["GET", "POST"])
@login_required
def dashboard():
return render_template("dashboard.html", APP_CONTENT = APP_CONTENT)
@app.route("/about/")
def about_page():
return render_template("about.html", APP_CONTENT = APP_CONTENT)
@app.route('/login/', methods=["GET","POST"])
def login_page():
error = ''
try:
c, conn = connection()
if request.method == "POST":
data = c.execute("SELECT * FROM users WHERE username = ('{0}')".format(thwart(request.form['username'])))
data = c.fetchone()[2]
if sha256_crypt.verify(request.form['password'], data):
session['logged_in'] = True
session['username'] = request.form['username']
flash("You are now logged in")
return redirect(url_for("dashboard"))
else:
error = "Invalid credentials, try again."
gc.collect()
return render_template("login.html", error = error)
except Exception as e:
#flash(e)
error = "Invalid credentials, try again."
return render_template("login.html", error = error)
class RegistrationForm(Form):
username = TextField('Username', [validators.Length(min=4, max=20)])
email = TextField('Email Address', [validators.Length(min=6, max=50)])
password = PasswordField('New Password', [
validators.Required(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password')
accept_tos = BooleanField('I accept the Terms of Service and Privacy Notice', [validators.Required()])
@app.route('/register/', methods=["GET","POST"])
def register_page():
try:
form = RegistrationForm(request.form)
if request.method == "POST" and form.validate():
username = form.username.data
email = form.email.data
password = sha256_crypt.encrypt((str(form.password.data)))
c, conn = connection()
x = c.execute("SELECT * FROM users WHERE username = ('{0}')".format(thwart(username)))
if int(x) > 0:
flash("That username is already taken, please choose another.")
return render_template('register.html', form = form)
else:
c.execute("INSERT INTO users (username, password, email, tracking) VALUES ('{0}','{1}','{2}','{3}')".format(thwart(username), thwart(password), thwart(email), thwart("/dashboard/")))
conn.commit()
flash("Thanks for registering!")
c.close()
conn.close()
gc.collect()
session['logged_in'] = True
session['username'] = username
return redirect(url_for('dashboard'))
return render_template("register.html", form = form)
except Exception as e:
return(str(e))
@app.route("/logout/")
@login_required
def logout():
session.clear()
gc.collect()
return redirect(url_for('main'))
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html")
@app.errorhandler(405)
def page_not_found(e):
return render_template("405.html")
@app.errorhandler(500)
def int_server_error(e):
return render_template("500.html", error = e)
if __name__ == "__main__":
app.run()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import configparser
class Config:
config_parser = configparser.ConfigParser()
default_config_path = 'config/config.ini'
def __init__(self, config_path = default_config_path):
"""
Constructor for Config
:param config_path:
Configuration File Path
"""
self.config_parser.read(config_path)
@classmethod
def get_config_dimensions(cls):
"""
Return Tuple for Width and Height of ideal dimensions
:return: (x1,x2) tuple for ideal dimensions
"""
return int(cls.config_parser['ideal_image_dimensions']['width']), int(cls.config_parser['ideal_image_dimensions']['height'])
@classmethod
def get_images_directory_path(cls):
"""
Return directory path where all the images are present
:return: images' directory path
"""
return str(cls.config_parser['directory_path']['path'])
|
print(f"Loading {__file__}...")
import numpy as np
from ophyd import (
EpicsSignal,
EpicsSignalRO,
EpicsMotor,
Device,
Signal,
PseudoPositioner,
PseudoSingle,
)
from ophyd.utils.epics_pvs import set_and_wait
from ophyd.pseudopos import pseudo_position_argument, real_position_argument
from ophyd.positioner import PositionerBase
from ophyd import Component as Cpt
from scipy.interpolate import InterpolatedUnivariateSpline
import functools
import math
from pathlib import Path
"""
For organization, this file will define objects for the machine. This will
include the undulator (and energy axis) and front end slits.
"""
# Constants
ANG_OVER_EV = 12.3984
# Signals
ring_current = EpicsSignalRO("SR:C03-BI{DCCT:1}I:Real-I", name="ring_current")
# Setup undulator
class InsertionDevice(Device, PositionerBase):
gap = Cpt(EpicsMotor, "-Ax:Gap}-Mtr", kind="hinted", name="")
brake = Cpt(
EpicsSignal,
"}BrakesDisengaged-Sts",
write_pv="}BrakesDisengaged-SP",
kind="omitted",
add_prefix=("read_pv", "write_pv", "suffix"),
)
# These are debugging values, not even connected to by default
elev = Cpt(EpicsSignalRO, "-Ax:Elev}-Mtr.RBV", kind="omitted")
taper = Cpt(EpicsSignalRO, "-Ax:Taper}-Mtr.RBV", kind="omitted")
tilt = Cpt(EpicsSignalRO, "-Ax:Tilt}-Mtr.RBV", kind="omitted")
elev_u = Cpt(EpicsSignalRO, "-Ax:E}-Mtr.RBV", kind="omitted")
def set(self, *args, **kwargs):
set_and_wait(self.brake, 1)
return self.gap.set(*args, **kwargs)
def stop(self, *, success=False):
return self.gap.stop(success=success)
@property
def settle_time(self):
return self.gap.settle_time
@settle_time.setter
def settle_time(self, val):
self.gap.settle_time = val
@property
def timeout(self):
return self.gap.timeout
@timeout.setter
def timeout(self, val):
self.gap.timeout = val
@property
def egu(self):
return self.gap.egu
@property
def limits(self):
return self.gap.limits
@property
def low_limit(self):
return self.gap.low_limit
@property
def high_limit(self):
return self.gap.high_limit
def move(self, *args, moved_cb=None, **kwargs):
if moved_cb is not None:
@functools.wraps(moved_cb)
def inner_move(status, obj=None):
if obj is not None:
obj = self
return moved_cb(status, obj=obj)
else:
inner_move = None
return self.set(*args, moved_cb=inner_move, **kwargs)
@property
def position(self):
return self.gap.position
@property
def moving(self):
return self.gap.moving
def subscribe(self, callback, *args, **kwargs):
@functools.wraps(callback)
def inner(obj, **kwargs):
return callback(obj=self, **kwargs)
return self.gap.subscribe(inner, *args, **kwargs)
# Setup energy axis
class Energy(PseudoPositioner):
# Synthetic axis
energy = Cpt(PseudoSingle)
# Real motors
u_gap = Cpt(InsertionDevice, "SR:C5-ID:G1{IVU21:1")
bragg = Cpt(
EpicsMotor,
"XF:05IDA-OP:1{Mono:HDCM-Ax:P}Mtr",
add_prefix=(),
read_attrs=["user_readback"],
)
c2_x = Cpt(
EpicsMotor,
"XF:05IDA-OP:1{Mono:HDCM-Ax:X2}Mtr",
add_prefix=(),
read_attrs=["user_readback"],
)
epics_d_spacing = EpicsSignal("XF:05IDA-CT{IOC:Status01}DCMDspacing.VAL")
epics_bragg_offset = EpicsSignal("XF:05IDA-CT{IOC:Status01}BraggOffset.VAL")
# Motor enable flags
move_u_gap = Cpt(Signal, None, add_prefix=(), value=True)
move_c2_x = Cpt(Signal, None, add_prefix=(), value=True)
harmonic = Cpt(Signal, None, add_prefix=(), value=0, kind="config")
selected_harmonic = Cpt(Signal, None, add_prefix=(), value=0)
# Experimental
detune = Cpt(Signal, None, add_prefix=(), value=0)
def energy_to_positions(self, target_energy, undulator_harmonic, u_detune):
"""Compute undulator and mono positions given a target energy
Paramaters
----------
target_energy : float
Target energy in keV
undulator_harmonic : int, optional
The harmonic in the undulator to use
uv_mistune : float, optional
Amount to 'mistune' the undulator in keV. Will settings
such that the peak of the undulator spectrum will be at
`target_energy + uv_mistune`.
Returns
-------
bragg : float
The angle to set the monocromotor
"""
# Set up constants
Xoffset = self._xoffset
d_111 = self._d_111
delta_bragg = self._delta_bragg
C2Xcal = self._c2xcal
T2cal = self._t2cal
etoulookup = self.etoulookup
# Calculate Bragg RBV
BraggRBV = (
np.arcsin((ANG_OVER_EV / target_energy) / (2 * d_111)) / np.pi * 180
- delta_bragg
)
# Calculate C2X
Bragg = BraggRBV + delta_bragg
T2 = Xoffset * np.sin(Bragg * np.pi / 180) / np.sin(2 * Bragg * np.pi / 180)
dT2 = T2 - T2cal
C2X = C2Xcal - dT2
# Calculate undulator gap
# TODO make this more sohpisticated to stay a fixed distance
# off the peak of the undulator energy
ugap = float(
etoulookup((target_energy + u_detune) / undulator_harmonic)
) # in mm
ugap *= 1000 # convert to um
return BraggRBV, C2X, ugap
def undulator_energy(self, harmonic=3):
"""Return the current energy peak of the undulator at the given harmonic
Paramaters
----------
harmonic : int, optional
The harmonic to use, defaults to 3
"""
p = self.u_gap.get().readback
utoelookup = self.utoelookup
fundemental = float(utoelookup(ugap))
energy = fundemental * harmonic
return energy
def __init__(
self,
*args,
xoffset=None,
d_111=None,
delta_bragg=None,
C2Xcal=None,
T2cal=None,
**kwargs,
):
self._xoffset = xoffset
self._d_111 = d_111
self._delta_bragg = delta_bragg
self._c2xcal = C2Xcal
self._t2cal = T2cal
super().__init__(*args, **kwargs)
# calib_path = '/nfs/xf05id1/UndulatorCalibration/'
calib_path = Path(__file__).parent
# calib_file = "../data/SRXUgapCalibration20170612.txt"
calib_file = "../data/20210912_SRXUgapCalibration.txt"
# with open(os.path.join(calib_path, calib_file), 'r') as f:
with open(calib_path / calib_file, "r") as f:
next(f)
uposlistIn = []
elistIn = []
for line in f:
num = [float(x) for x in line.split()]
uposlistIn.append(num[0])
elistIn.append(num[1])
self.etoulookup = InterpolatedUnivariateSpline(elistIn, uposlistIn)
self.utoelookup = InterpolatedUnivariateSpline(uposlistIn, elistIn)
self.u_gap.gap.user_readback.name = self.u_gap.name
def crystal_gap(self):
"""
Return the current physical gap between first and second crystals
"""
C2X = self.c2_x.get().user_readback
bragg = self.bragg.get().user_readback
T2cal = self._t2cal
delta_bragg = self._delta_bragg
d_111 = self._d_111
c2x_cal = self._c2xcal
Bragg = np.pi / 180 * (bragg + delta_bragg)
dT2 = c2x_cal - C2X
T2 = dT2 + T2cal
XoffsetVal = T2 / (np.sin(Bragg) / np.sin(2 * Bragg))
return XoffsetVal
@pseudo_position_argument
def forward(self, p_pos):
energy = p_pos.energy
harmonic = int(self.harmonic.get())
if harmonic < 0 or ((harmonic % 2) == 0 and harmonic != 0):
raise RuntimeError(
f"The harmonic must be 0 or odd and positive, you set {harmonic}. "
"Set `energy.harmonic` to a positive odd integer or 0."
)
detune = self.detune.get()
if energy <= 4.4:
raise ValueError(
"The energy you entered is too low ({} keV). "
"Minimum energy = 4.4 keV".format(energy)
)
if energy > 25.0:
if (energy < 4400.0) or (energy > 25000.0):
# Energy is invalid
raise ValueError(
"The requested photon energy is invalid ({} keV). "
"Values must be in the range of 4.4 - 25 keV".format(energy)
)
else:
# Energy is in eV
energy = energy / 1000.0
# harmonic cannot be None, it is an undesired datatype
# Previously, we were finding the harmonic with the highest flux, this
# was always done during energy change since harmonic was returned to
# None
# Here, we are programming it in
# if harmonic is None:
if harmonic < 3:
harmonic = 3
# Choose the right harmonic
braggcal, c2xcal, ugapcal = self.energy_to_positions(
energy, harmonic, detune
)
# Try higher harmonics until the required gap is too small
while True:
braggcal, c2xcal, ugapcal = self.energy_to_positions(
energy, harmonic + 2, detune
)
if ugapcal < self.u_gap.low_limit:
break
harmonic += 2
self.selected_harmonic.put(harmonic)
# Compute where we would move everything to in a perfect world
bragg, c2_x, u_gap = self.energy_to_positions(energy, harmonic, detune)
# Sometimes move the crystal gap
if not self.move_c2_x.get():
c2_x = self.c2_x.position
# Sometimes move the undulator
if not self.move_u_gap.get():
u_gap = self.u_gap.position
return self.RealPosition(bragg=bragg, c2_x=c2_x, u_gap=u_gap)
@real_position_argument
def inverse(self, r_pos):
bragg = r_pos.bragg
e = ANG_OVER_EV / (
2 * self._d_111 * math.sin(math.radians(bragg + self._delta_bragg))
)
return self.PseudoPosition(energy=float(e))
@pseudo_position_argument
def set(self, position):
return super().set([float(_) for _ in position])
def synch_with_epics(self):
self.epics_d_spacing.put(self._d_111)
self.epics_bragg_offset.put(self._delta_bragg)
def retune_undulator(self):
self.detune.put(0.0)
self.move(self.energy.get()[0])
# Recalibrated 2021-09-08
cal_data_2021cycle3 = {
"d_111": 3.128666195523328,
"delta_bragg": 0.2167556062528753,
"C2Xcal": 3.6,
"T2cal": 15.0347755916,
"xoffset": 24.65,
}
energy = Energy(prefix="", name="energy", **cal_data_2021cycle3)
energy.wait_for_connection()
energy.synch_with_epics()
energy.value = 1.0
# Setup front end slits (primary slits)
class SRXSlitsFE(Device):
top = Cpt(EpicsMotor, "3-Ax:T}Mtr")
bot = Cpt(EpicsMotor, "4-Ax:B}Mtr")
inb = Cpt(EpicsMotor, "3-Ax:I}Mtr")
out = Cpt(EpicsMotor, "4-Ax:O}Mtr")
fe = SRXSlitsFE("FE:C05A-OP{Slt:", name="fe")
|
#
# File:
# labelbar.py
#
# Synopsis:
# Demonstrates labelbars and labelbar resource settings.
#
# Category:
# Labelbar
#
# Author:
# Mary Haley
#
# Date of initial publication:
# March, 2005
#
# Description:
# This example illustrates the effects of setting values
# for various labelbar resources.
#
# Effects illustrated:
# Labelbar fonts, sizes, orientation, alignment, and fill patterns.
#
# Output:
# Four labelbar visualizations are produced showing:
# 1.) default settings
# 2.) changing the font and alignment
# 3.) changing the size, orientation and fill pattern
# 4.) using lots of user-specified labels
#
# Notes:
#
#
# Import Ngl support functions.
#
from __future__ import print_function
import Ngl
wkres = Ngl.Resources()
wkres.wkColorMap = "default"
wks_type = "png"
wks = Ngl.open_wks(wks_type,"labelbar",wkres)
labels = ["One","Two","Three","Four","Five","Six"]
#
# Generate a labelbar with the default settings.
#
lb = Ngl.labelbar_ndc(wks,5,labels,0.3,0.9)
Ngl.frame(wks)
#
# Change the font and alignment of the labels.
#
rlist = Ngl.Resources()
rlist.lbLabelFont = "Times-Bold"
rlist.lbLabelAlignment = "InteriorEdges"
lb = Ngl.labelbar_ndc(wks,5,labels,0.3,0.9,rlist)
Ngl.frame(wks)
#
# Change the size, orientation, the fill to solid fill.
#
del rlist.lbLabelFont
rlist.vpWidthF = 0.85
rlist.vpHeightF = 0.20
rlist.lbMonoFillPattern = 21
rlist.lbFillPattern = "SolidFill"
rlist.lbOrientation = "Horizontal"
rlist.lbLabelAlignment = "ExternalEdges"
lb = Ngl.labelbar_ndc(wks,5,labels,0.1,0.2,rlist)
Ngl.frame(wks)
#
# Do a lot of labels. Notice how the labelbar labels are automatically
# adjusted and not every one is shown. To turn this off, set
# lbAutoStride to False, but then your labels will run into each other.
# You could set lbLabelAngleF to -45 or 90 to get slanted labels.
#
lotta_labels = ["AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL",\
"IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT",\
"NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA",\
"RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
rlist.lbLabelAlignment = "InteriorEdges"
rlist.lbLabelFontHeightF = 0.014
lb = Ngl.labelbar_ndc(wks,len(lotta_labels),lotta_labels,0.1,0.5,rlist)
Ngl.frame(wks)
Ngl.end()
|
import os
import re
_shaders_dir = os.path.dirname(os.path.realpath(__file__))
# from opengl language specification:
# "Each number sign (#) can be preceded in its line only by spaces or horizontal tabs.
# It may also be followed by spaces and horizontal tabs, preceding the directive.
# Each directive is terminated by a new-line."
_include_pragma_regex = re.compile(r'^[ \t]*#[ \t]*pragma[ \t]+include[ \t]+"(?P<include_path>.*?)"', flags=re.MULTILINE)
# TODO insert #line directives before/after includes? (specifically "#line line source-string-number")
def read_shader_file(shader_file_name):
shader_file_path = os.path.join(_shaders_dir, shader_file_name)
shader_text = ''
with open(shader_file_path, 'r') as shader_file:
for line in shader_file.readlines():
include_match = _include_pragma_regex.match(line)
if include_match:
include_path = include_match.group('include_path')
shader_text += '\n'
shader_text += read_shader_file(include_path)
shader_text += '\n'
else:
shader_text += line
return shader_text
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <info@cloudrunner.io>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
from cloudrunner_server.api.tests import base
from mock import MagicMock
class TestEvents(base.BaseRESTTestCase):
def test_get(self):
sse_data = """id: 10
retry: 1000
event: 1234567
data: 1234567
"""
self.aero.check = MagicMock(return_value=10)
resp = self.app.get('/rest/status/get?1234567', headers={
'Cr-Token': 'PREDEFINED_TOKEN',
'Cr-User': 'testuser',
'Last-Event-Id': '100'})
self.assertEqual(resp.status_int, 200, resp.status_int)
self.assertEqual(resp.content_type, 'text/event-stream',
resp.content_type)
self.assertEqual(resp.body, sse_data)
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy
class AccountsAppConfig(AppConfig):
name = 'learningprogress.accounts'
verbose_name = ugettext_lazy('Accounts')
|
"""
Standard definitions that don't change.
"""
ESI_KEY_DELETED_BY_EVEOAUTH = 0
ESI_KEY_REPLACED_BY_OWNER = 1
ESI_KEY_DELETED_BY_SYSADMIN = 2
ESI_KEY_REMOVAL_REASON = (
(ESI_KEY_DELETED_BY_EVEOAUTH, "The Test Auth app was deleted by the user on the eve website."),
(ESI_KEY_REPLACED_BY_OWNER,"The character's owner replaced this key with a newer key."),
(ESI_KEY_DELETED_BY_SYSADMIN,"This key was deleted by a sysadmin, for whatever reason."),
)
ESI_SCOPE_DEFAULT = 0
ESI_SCOPE_ALLIED = 1
ESI_SCOPE_CHOICES = (
(ESI_SCOPE_DEFAULT, "Default Scopes"),
(ESI_SCOPE_ALLIED, "Allied Scopes"),
)
|
import cp2110
import threading
import time
import json
d = cp2110.CP2110Device()
d.set_uart_config(cp2110.UARTConfig(
baud=9600,
parity=cp2110.PARITY.NONE,
flow_control=cp2110.FLOW_CONTROL.DISABLED,
data_bits=cp2110.DATA_BITS.EIGHT,
stop_bits=cp2110.STOP_BITS.SHORT))
d.enable_uart()
last_p = ""
#https://stackabuse.com/how-to-print-colored-text-in-python/
def colorize_print(p):
global last_p
for i in range(len(p)):
if len(last_p) > i and last_p[i] != p[i]:
print("\033[0;31;40m" + p[i], end='')
else:
print("\033[0;37;40m" + p[i], end='')
print("")
last_p = p
def handle_data(data):
if "raw_value" in data:
print("Values: " + json.dumps(data, default=str))
if "deviceid" in data:
print("DeviceID: " +data["deviceid"])
buf = bytearray(b'')
dat = {}
def decode_data(data):
global buf, dat
for b in data:
if bytes([b]) == b'\xab':
if buf[:2] == b'\xab\xcd':
if len(buf) == 21 and buf[2] == 0x12:
if buf[11] == 0:
dat["raw_value"] = float(buf[6:11])
else:
dat["raw_value"] = float(buf[6:12])
dat["mode"] = buf[4]
dat["range"] = buf[5]
dat["ol"] = bool(buf[14] & 0b00000100)
dat["hold"] = bool(buf[14] & 0b00000001)
dat["rel"] = bool(buf[15] & 0b00000001)
dat["min"] = bool(buf[16] & 0b00000001)
dat["max"] = bool(buf[16] & 0b00000010)
dat["raw"] = buf
dat["raw_text"] = buf.decode("cp1252")
handle_data(dat)
if len(buf) == 26 and buf[2] == 0x17:
dat["deviceid"] = buf[4:-2].decode("cp1252")
dat["raw"] = buf
dat["raw_text"] = buf.decode("cp1252")
handle_data(dat)
#Debug
#colorize_print(buf.decode("cp1252") + "\t" + str(buf) + "\t" + str(measurement["value"]) + "\t" + str(measurement["mode"]) + "\t" + str(measurement["range"]))
buf = bytearray(b'')
dat = {}
buf.append(b)
def read_from_port(dev):
while True:
rv = d.read(64)
if len(rv) > 0:
decode_data(rv)
def requestDeviceID():
d.write(b'\xab\xcd\x04\x58\x00\x01\xd4')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionHold():
d.write(b'\xab\xcd\x04\x46\x00\x01\xc2')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionBacklight():
d.write(b'\xab\xcd\x04\x47\x00\x01\xc3')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionSelect():
d.write(b'\xab\xcd\x04\x48\x00\x01\xc4')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionManualRange():
d.write(b'\xab\xcd\x04\x49\x00\x01\xc5')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionAutoRange():
d.write(b'\xab\xcd\x04\x4a\x00\x01\xc6')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionMinMax():
d.write(b'\xab\xcd\x04\x4b\x00\x01\xc7')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionExitMinMax():
d.write(b'\xab\xcd\x04\x4c\x00\x01\xc8')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionRel():
d.write(b'\xab\xcd\x04\x4d\x00\x01\xc9')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionDVal():
d.write(b'\xab\xcd\x04\x4e\x00\x01\xca')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionQVal():
d.write(b'\xab\xcd\x04\x4f\x00\x01\xcb')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionRVal():
d.write(b'\xab\xcd\x04\x51\x00\x01\xcd')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
def actionExitDQR():
d.write(b'\xab\xcd\x04\x50\x00\x01\xcc')
d.write(b'\xab\xcd\x04\x5a\x00\x01\xd6')
thread = threading.Thread(target=read_from_port, args=(d,))
thread.start()
time.sleep(1)
requestDeviceID()
while True:
time.sleep(5)
|
# Copyright (c) 2011-2013 Kunal Mehta. All rights reserved.
# Use of this source code is governed by a BSD License found in README.md.
from django.test import TestCase
from huxley.accounts.constants import *
from huxley.accounts.models import *
class HuxleyUserTest(TestCase):
def test_authenticate(self):
""" Tests that the function correctly authenticates and returns a
user, or returns an error message. """
kunal = HuxleyUser.objects.create(username='kunal', email='kunal@lol.lol')
kunal.set_password('kunalmehta')
kunal.save()
user, error = HuxleyUser.authenticate('kunal', '')
self.assertIsNone(user)
self.assertEqual(error, AuthenticationErrors.MISSING_FIELDS)
user, error = HuxleyUser.authenticate('', 'kunalmehta')
self.assertIsNone(user)
self.assertEqual(error, AuthenticationErrors.MISSING_FIELDS)
user, error = HuxleyUser.authenticate('roflrofl', 'roflrofl')
self.assertIsNone(user)
self.assertEqual(error, AuthenticationErrors.INVALID_LOGIN)
user, error = HuxleyUser.authenticate('kunal', 'kunalmehta')
self.assertEqual(user, kunal)
self.assertIsNone(error)
def test_change_password(self):
""" Tests that the function correctly changes a user's password, or
returns an error message. """
user = HuxleyUser.objects.create(username='adavis', email='lol@lol.lol')
user.set_password('mr_davis')
success, error = user.change_password('', 'lololol', 'lololol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISSING_FIELDS, error)
success, error = user.change_password('mr_davis', '', 'lololol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISSING_FIELDS, error)
success, error = user.change_password('mr_davis', 'lololol', '')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISSING_FIELDS, error)
success, error = user.change_password('mr_davis', 'lololol', 'roflrofl')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.MISMATCHED_PASSWORDS, error)
success, error = user.change_password('mr_davis', 'lol', 'lol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.PASSWORD_TOO_SHORT, error)
success, error = user.change_password('mr_davis', 'lololol<', 'lololol<')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.INVALID_CHARACTERS, error)
success, error = user.change_password('roflrofl', 'lololol', 'lololol')
self.assertFalse(success)
self.assertEquals(ChangePasswordErrors.INCORRECT_PASSWORD, error)
success, error = user.change_password('mr_davis', 'lololol', 'lololol')
self.assertTrue(success)
self.assertTrue(user.check_password('lololol'))
|
item = float(input('Digite o Preço do produto: '))
desc = int(input('Digite o Valor do desconto: '))
preco = float(item - (item * desc / 100))
print('O produto com {}% de desconto, custara R${:.2f}!'.format(desc, preco))
|
# -*- coding: utf-8 -*-
import shutil
from oss2 import defaults, exceptions, models, utils, xml_utils
from oss2.compat import to_string, to_unicode, urlparse, urlquote
from . import http
class _Base(object):
def __init__(self, auth, endpoint, is_cname, session, connect_timeout,
app_name='', enable_crc=False, loop=None):
self.auth = auth
self.endpoint = _normalize_endpoint(endpoint.strip())
self.session = session or http.Session(loop=loop)
self.timeout = defaults.get(connect_timeout, defaults.connect_timeout)
self.app_name = app_name
self.enable_crc = enable_crc
self._make_url = _UrlMaker(self.endpoint, is_cname)
async def _do(self, method, bucket_name, key, **kwargs):
key = to_string(key)
req = http.Request(method, self._make_url(bucket_name, key),
app_name=self.app_name,
**kwargs)
self.auth._sign_request(req, bucket_name, key)
resp = await self.session.do_request(req, timeout=self.timeout)
return resp
async def _parse_result(self, resp, parse_func, klass):
result = klass(resp)
body = await resp.read()
parse_func(result, body)
return result
async def __aenter__(self):
await self.session._aio_session.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.session._aio_session.__aexit__(exc_type, exc_val, exc_tb)
async def close(self):
await self.session._aio_session.close()
class Service(_Base):
def __init__(self, auth, endpoint,
session=None,
connect_timeout=None,
app_name='',
loop=None):
super().__init__(auth, endpoint, False, session, connect_timeout,
app_name=app_name, loop=loop)
async def list_buckets(self, prefix='', marker='', max_keys=100):
"""根据前缀罗列用户的Bucket。
:param str prefix: 只罗列Bucket名为该前缀的Bucket,空串表示罗列所有的Bucket
:param str marker: 分页标志。首次调用传空串,后续使用返回值中的next_marker
:param int max_keys: 每次调用最多返回的Bucket数目
:return: 罗列的结果
:rtype: oss2.models.ListBucketsResult
"""
resp = await self._do('GET', '', '',
params={'prefix': prefix,
'marker': marker,
'max-keys': str(max_keys)})
return await self._parse_result(resp, xml_utils.parse_list_buckets, models.ListBucketsResult)
class Bucket(_Base):
"""用于Bucket和Object操作的类,诸如创建、删除Bucket,上传、下载Object等。
用法(假设Bucket属于杭州区域) ::
>>> import oss2
>>> auth = oss2.Auth('your-access-key-id', 'your-access-key-secret')
>>> bucket = oss2.Bucket(auth, 'http://oss-cn-hangzhou.aliyuncs.com', 'your-bucket')
>>> bucket.put_object('readme.txt', 'content of the object')
<oss2.models.PutObjectResult object at 0x029B9930>
:param auth: 包含了用户认证信息的Auth对象
:type auth: oss2.Auth
:param str endpoint: 访问域名或者CNAME
:param str bucket_name: Bucket名
:param bool is_cname: 如果endpoint是CNAME则设为True;反之,则为False。
:param session: 会话。如果是None表示新开会话,非None则复用传入的会话
:type session: oss2.Session
:param float connect_timeout: 连接超时时间,以秒为单位。
:param str app_name: 应用名。该参数不为空,则在User Agent中加入其值。
注意到,最终这个字符串是要作为HTTP Header的值传输的,所以必须要遵循HTTP标准。
"""
ACL = 'acl'
CORS = 'cors'
LIFECYCLE = 'lifecycle'
LOCATION = 'location'
LOGGING = 'logging'
REFERER = 'referer'
WEBSITE = 'website'
LIVE = 'live'
COMP = 'comp'
STATUS = 'status'
VOD = 'vod'
SYMLINK = 'symlink'
STAT = 'stat'
BUCKET_INFO = 'bucketInfo'
def __init__(self, auth, endpoint, bucket_name,
is_cname=False,
session=None,
connect_timeout=None,
app_name='',
enable_crc=False,
loop=None):
super().__init__(auth, endpoint, is_cname, session, connect_timeout,
app_name, enable_crc, loop=loop)
self.bucket_name = bucket_name.strip()
def sign_url(self, method, key, expires, headers=None, params=None):
"""生成签名URL。
常见的用法是生成加签的URL以供授信用户下载,如为log.jpg生成一个5分钟后过期的下载链接::
>>> bucket.sign_url('GET', 'log.jpg', 5 * 60)
'http://your-bucket.oss-cn-hangzhou.aliyuncs.com/logo.jpg?OSSAccessKeyId=YourAccessKeyId\&Expires=1447178011&Signature=UJfeJgvcypWq6Q%2Bm3IJcSHbvSak%3D'
:param method: HTTP方法,如'GET'、'PUT'、'DELETE'等
:type method: str
:param key: 文件名
:param expires: 过期时间(单位:秒),链接在当前时间再过expires秒后过期
:param headers: 需要签名的HTTP头部,如名称以x-oss-meta-开头的头部(作为用户自定义元数据)、
Content-Type头部等。对于下载,不需要填。
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param params: 需要签名的HTTP查询参数
:return: 签名URL。
"""
key = to_string(key)
req = http.Request(method, self._make_url(self.bucket_name, key),
headers=headers,
params=params)
return self.auth._sign_url(req, self.bucket_name, key, expires)
def sign_rtmp_url(self, channel_name, playlist_name, expires):
"""生成RTMP推流的签名URL。
常见的用法是生成加签的URL以供授信用户向OSS推RTMP流。
:param channel_name: 直播频道的名称
:param expires: 过期时间(单位:秒),链接在当前时间再过expires秒后过期
:param playlist_name: 播放列表名称,注意与创建live channel时一致
:param params: 需要签名的HTTP查询参数
:return: 签名URL。
"""
url = self._make_url(self.bucket_name, 'live').replace(
'http://', 'rtmp://').replace('https://',
'rtmp://') + '/' + channel_name
params = {}
params['playlistName'] = playlist_name
return self.auth._sign_rtmp_url(url, self.bucket_name, channel_name, playlist_name, expires, params)
async def list_objects(self, prefix='', delimiter='', marker='', max_keys=100):
"""根据前缀罗列Bucket里的文件。
:param str prefix: 只罗列文件名为该前缀的文件
:param str delimiter: 分隔符。可以用来模拟目录
:param str marker: 分页标志。首次调用传空串,后续使用返回值的next_marker
:param int max_keys: 最多返回文件的个数,文件和目录的和不能超过该值
:return: :class:`ListObjectsResult <oss2.models.ListObjectsResult>`
"""
resp = await self.__do_object('GET', '',
params={'prefix': prefix,
'delimiter': delimiter,
'marker': marker,
'max-keys': str(max_keys),
'encoding-type': 'url'})
return await self._parse_result(resp, xml_utils.parse_list_objects, models.ListObjectsResult)
async def put_object(self, key, data,
headers=None,
progress_callback=None):
"""上传一个普通文件。
用法 ::
>>> bucket.put_object('readme.txt', 'content of readme.txt')
>>> with open(u'local_file.txt', 'rb') as f:
>>> bucket.put_object('remote_file.txt', f)
:param key: 上传到OSS的文件名
:param data: 待上传的内容。
:type data: bytes,str或file-like object
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
if self.enable_crc:
data = utils.make_crc_adapter(data)
resp = await self.__do_object('PUT', key, data=data, headers=headers)
result = models.PutObjectResult(resp)
if self.enable_crc and result.crc is not None:
utils.check_crc('put', data.crc, result.crc)
return result
async def put_object_from_file(self, key, filename,
headers=None,
progress_callback=None):
"""上传一个本地文件到OSS的普通文件。
:param str key: 上传到OSS的文件名
:param str filename: 本地文件名,需要有可读权限
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-meta-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), filename)
with open(to_unicode(filename), 'rb') as f:
return await self.put_object(key, f, headers=headers, progress_callback=progress_callback)
async def append_object(self, key, position, data,
headers=None,
progress_callback=None,
init_crc=None):
"""追加上传一个文件。
:param str key: 新的文件名,或已经存在的可追加文件名
:param int position: 追加上传一个新的文件, `position` 设为0;追加一个已经存在的可追加文件, `position` 设为文件的当前长度。
`position` 可以从上次追加的结果 `AppendObjectResult.next_position` 中获得。
:param data: 用户数据
:type data: str、bytes、file-like object或可迭代对象
:param headers: 用户指定的HTTP头部。可以指定Content-Type、Content-MD5、x-oss-开头的头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:return: :class:`AppendObjectResult <oss2.models.AppendObjectResult>`
:raises: 如果 `position` 和当前文件长度不一致,抛出 :class:`PositionNotEqualToLength <oss2.exceptions.PositionNotEqualToLength>` ;
如果当前文件不是可追加类型,抛出 :class:`ObjectNotAppendable <oss2.exceptions.ObjectNotAppendable>` ;
还会抛出其他一些异常
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
if self.enable_crc and init_crc is not None:
data = utils.make_crc_adapter(data, init_crc)
resp = await self.__do_object('POST', key,
data=data,
headers=headers,
params={'append': '', 'position': str(position)})
result = models.AppendObjectResult(resp)
if self.enable_crc and result.crc is not None and init_crc is not None:
utils.check_crc('append', data.crc, result.crc)
return result
async def get_object(self, key,
byte_range=None,
headers=None,
progress_callback=None,
process=None):
"""下载一个文件。
用法 ::
>>> result = await bucket.get_object('readme.txt')
>>> print(result.read())
'hello world'
:param key: 文件名
:param byte_range: 指定下载范围。参见 :ref:`byte_range`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:param process: oss文件处理,如图像服务等。指定后process,返回的内容为处理后的文件。
:return: file-like object
:raises: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
headers = http.CaseInsensitiveDict(headers)
range_string = _make_range_string(byte_range)
if range_string:
headers['range'] = range_string
params = None
if process:
params = {'x-oss-process': process}
resp = await self.__do_object('GET', key, headers=headers, params=params)
return models.GetObjectResult(resp, progress_callback, self.enable_crc)
async def get_object_to_file(self, key, filename,
byte_range=None,
headers=None,
progress_callback=None,
process=None):
"""下载一个文件到本地文件。
:param key: 文件名
:param filename: 本地文件名。要求父目录已经存在,且有写权限。
:param byte_range: 指定下载范围。参见 :ref:`byte_range`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:param progress_callback: 用户指定的进度回调函数。参考 :ref:`progress_callback`
:param process: oss文件处理,如图像服务等。指定后process,返回的内容为处理后的文件。
:return: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
with open(to_unicode(filename), 'wb') as f:
result = await self.get_object(key, byte_range=byte_range, headers=headers,
progress_callback=progress_callback,
process=process)
if result.content_length is None:
shutil.copyfileobj(result, f)
else:
utils.copyfileobj_and_verify(result, f, result.content_length, request_id=result.request_id)
return result
async def head_object(self, key, headers=None):
"""获取文件元信息。
HTTP响应的头部包含了文件元信息,可以通过 `RequestResult` 的 `headers` 成员获得。
用法 ::
>>> result = await bucket.head_object('readme.txt')
>>> print(result.content_type)
text/plain
:param key: 文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`HeadObjectResult <oss2.models.HeadObjectResult>`
:raises: 如果Bucket不存在或者Object不存在,则抛出 :class:`NotFound <oss2.exceptions.NotFound>`
"""
resp = await self.__do_object('HEAD', key, headers=headers)
return models.HeadObjectResult(resp)
async def get_object_meta(self, key):
"""获取文件基本元信息,包括该Object的ETag、Size(文件大小)、LastModified,并不返回其内容。
HTTP响应的头部包含了文件基本元信息,可以通过 `GetObjectMetaResult` 的 `last_modified`,`content_length`,`etag` 成员获得。
:param key: 文件名
:return: :class:`GetObjectMetaResult <oss2.models.GetObjectMetaResult>`
:raises: 如果文件不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
resp = await self.__do_object('GET', key, params={'objectMeta': ''})
return models.GetObjectMetaResult(resp)
def object_exists(self, key):
"""如果文件存在就返回True,否则返回False。如果Bucket不存在,或是发生其他错误,则抛出异常。"""
# 如果我们用head_object来实现的话,由于HTTP HEAD请求没有响应体,只有响应头部,这样当发生404时,
# 我们无法区分是NoSuchBucket还是NoSuchKey错误。
#
# 2.2.0之前的实现是通过get_object的if-modified-since头部,把date设为当前时间24小时后,这样如果文件存在,则会返回
# 304 (NotModified);不存在,则会返回NoSuchKey。get_object会受回源的影响,如果配置会404回源,get_object会判断错误。
#
# 目前的实现是通过get_object_meta判断文件是否存在。
try:
self.get_object_meta(key)
except exceptions.NoSuchKey:
return False
return True
async def copy_object(self, source_bucket_name, source_key, target_key, headers=None):
"""拷贝一个文件到当前Bucket。
:param str source_bucket_name: 源Bucket名
:param str source_key: 源文件名
:param str target_key: 目标文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
headers['x-oss-copy-source'] = '/' + source_bucket_name + '/' + urlquote(source_key, '')
resp = await self.__do_object('PUT', target_key, headers=headers)
return models.PutObjectResult(resp)
async def update_object_meta(self, key, headers):
"""更改Object的元数据信息,包括Content-Type这类标准的HTTP头部,以及以x-oss-meta-开头的自定义元数据。
用户可以通过 :func:`head_object` 获得元数据信息。
:param str key: 文件名
:param headers: HTTP头部,包含了元数据信息
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`RequestResult <oss2.models.RequestResults>`
"""
return await self.copy_object(self.bucket_name, key, key, headers=headers)
async def delete_object(self, key):
"""删除一个文件。
:param str key: 文件名
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = await self.__do_object('DELETE', key)
return models.RequestResult(resp)
async def restore_object(self, key):
"""restore an object
如果是第一次针对该object调用接口,返回RequestResult.status = 202;
如果已经成功调用过restore接口,且服务端仍处于解冻中,抛异常RestoreAlreadyInProgress(status=409)
如果已经成功调用过restore接口,且服务端解冻已经完成,再次调用时返回RequestResult.status = 200,且会将object的可下载时间延长一天,最多延长7天。
如果object不存在,则抛异常NoSuchKey(status=404);
对非Archive类型的Object提交restore,则抛异常OperationNotSupported(status=400)
也可以通过调用head_object接口来获取meta信息来判断是否可以restore与restore的状态
代码示例::
>>> meta = await bucket.head_object(key)
>>> if meta.resp.headers['x-oss-storage-class'] == oss2.BUCKET_STORAGE_CLASS_ARCHIVE:
>>> bucket.restore_object(key)
>>> while True:
>>> meta = await bucket.head_object(key)
>>> if meta.resp.headers['x-oss-restore'] == 'ongoing-request="true"':
>>> time.sleep(5)
>>> else:
>>> break
:param str key: object name
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = await self.__do_object('POST', key, params={'restore': ''})
return models.RequestResult(resp)
async def put_object_acl(self, key, permission):
"""设置文件的ACL。
:param str key: 文件名
:param str permission: 可以是oss2.OBJECT_ACL_DEFAULT、oss2.OBJECT_ACL_PRIVATE、oss2.OBJECT_ACL_PUBLIC_READ或
oss2.OBJECT_ACL_PUBLIC_READ_WRITE。
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = await self.__do_object('PUT', key, params={'acl': ''}, headers={'x-oss-object-acl': permission})
return models.RequestResult(resp)
async def get_object_acl(self, key):
"""获取文件的ACL。
:return: :class:`GetObjectAclResult <oss2.models.GetObjectAclResult>`
"""
resp = await self.__do_object('GET', key, params={'acl': ''})
return await self._parse_result(resp, xml_utils.parse_get_object_acl, models.GetObjectAclResult)
async def batch_delete_objects(self, key_list):
"""批量删除文件。待删除文件列表不能为空。
:param key_list: 文件名列表,不能为空。
:type key_list: list of str
:return: :class:`BatchDeleteObjectsResult <oss2.models.BatchDeleteObjectsResult>`
"""
if not key_list:
raise models.ClientError('key_list should not be empty')
data = xml_utils.to_batch_delete_objects_request(key_list, False)
resp = await self.__do_object('POST', '',
data=data,
params={'delete': '', 'encoding-type': 'url'},
headers={'Content-MD5': utils.content_md5(data)})
return await self._parse_result(resp, xml_utils.parse_batch_delete_objects, models.BatchDeleteObjectsResult)
async def init_multipart_upload(self, key, headers=None):
"""初始化分片上传。
返回值中的 `upload_id` 以及Bucket名和Object名三元组唯一对应了此次分片上传事件。
:param str key: 待上传的文件名
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`InitMultipartUploadResult <oss2.models.InitMultipartUploadResult>`
"""
headers = utils.set_content_type(http.CaseInsensitiveDict(headers), key)
resp = await self.__do_object('POST', key, params={'uploads': ''}, headers=headers)
return await self._parse_result(resp, xml_utils.parse_init_multipart_upload, models.InitMultipartUploadResult)
async def upload_part(self, key, upload_id, part_number, data, progress_callback=None, headers=None):
"""上传一个分片。
:param str key: 待上传文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param int part_number: 分片号,最小值是1.
:param data: 待上传数据。
:param progress_callback: 用户指定进度回调函数。可以用来实现进度条等功能。参考 :ref:`progress_callback` 。
:param headers: 用户指定的HTTP头部。可以指定Content-MD5头部等
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
if progress_callback:
data = utils.make_progress_adapter(data, progress_callback)
if self.enable_crc:
data = utils.make_crc_adapter(data)
resp = await self.__do_object('PUT', key,
params={'uploadId': upload_id, 'partNumber': str(part_number)},
headers=headers,
data=data)
result = models.PutObjectResult(resp)
if self.enable_crc and result.crc is not None:
utils.check_crc('put', data.crc, result.crc)
return result
async def complete_multipart_upload(self, key, upload_id, parts, headers=None):
"""完成分片上传,创建文件。
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:param parts: PartInfo列表。PartInfo中的part_number和etag是必填项。其中的etag可以从 :func:`upload_part` 的返回值中得到。
:type parts: list of `PartInfo <oss2.models.PartInfo>`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
data = xml_utils.to_complete_upload_request(sorted(parts, key=lambda p: p.part_number))
resp = await self.__do_object('POST', key,
params={'uploadId': upload_id},
data=data,
headers=headers)
return models.PutObjectResult(resp)
async def abort_multipart_upload(self, key, upload_id):
"""取消分片上传。
:param str key: 待上传的文件名,这个文件名要和 :func:`init_multipart_upload` 的文件名一致。
:param str upload_id: 分片上传ID
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
resp = await self.__do_object('DELETE', key,
params={'uploadId': upload_id})
return models.RequestResult(resp)
async def list_multipart_uploads(self,
prefix='',
delimiter='',
key_marker='',
upload_id_marker='',
max_uploads=1000):
"""罗列正在进行中的分片上传。支持分页。
:param str prefix: 只罗列匹配该前缀的文件的分片上传
:param str delimiter: 目录分割符
:param str key_marker: 文件名分页符。第一次调用可以不传,后续设为返回值中的 `next_key_marker`
:param str upload_id_marker: 分片ID分页符。第一次调用可以不传,后续设为返回值中的 `next_upload_id_marker`
:param int max_uploads: 一次罗列最多能够返回的条目数
:return: :class:`ListMultipartUploadsResult <oss2.models.ListMultipartUploadsResult>`
"""
resp = await self.__do_object('GET', '',
params={'uploads': '',
'prefix': prefix,
'delimiter': delimiter,
'key-marker': key_marker,
'upload-id-marker': upload_id_marker,
'max-uploads': str(max_uploads),
'encoding-type': 'url'})
return await self._parse_result(resp, xml_utils.parse_list_multipart_uploads, models.ListMultipartUploadsResult)
async def upload_part_copy(self, source_bucket_name, source_key, byte_range,
target_key, target_upload_id, target_part_number,
headers=None):
"""分片拷贝。把一个已有文件的一部分或整体拷贝成目标文件的一个分片。
:param byte_range: 指定待拷贝内容在源文件里的范围。参见 :ref:`byte_range`
:param headers: HTTP头部
:type headers: 可以是dict,建议是oss2.CaseInsensitiveDict
:return: :class:`PutObjectResult <oss2.models.PutObjectResult>`
"""
headers = http.CaseInsensitiveDict(headers)
headers['x-oss-copy-source'] = '/' + source_bucket_name + '/' + source_key
range_string = _make_range_string(byte_range)
if range_string:
headers['x-oss-copy-source-range'] = range_string
resp = await self.__do_object('PUT', target_key,
params={'uploadId': target_upload_id,
'partNumber': str(target_part_number)},
headers=headers)
return models.PutObjectResult(resp)
async def list_parts(self, key, upload_id,
marker='', max_parts=1000):
"""列举已经上传的分片。支持分页。
:param str key: 文件名
:param str upload_id: 分片上传ID
:param str marker: 分页符
:param int max_parts: 一次最多罗列多少分片
:return: :class:`ListPartsResult <oss2.models.ListPartsResult>`
"""
resp = await self.__do_object('GET', key,
params={'uploadId': upload_id,
'part-number-marker': marker,
'max-parts': str(max_parts)})
return await self._parse_result(resp, xml_utils.parse_list_parts, models.ListPartsResult)
async def put_symlink(self, target_key, symlink_key, headers=None):
"""创建Symlink。
:param str target_key: 目标文件,目标文件不能为符号连接
:param str symlink_key: 符号连接类文件,其实质是一个特殊的文件,数据指向目标文件
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
headers = headers or {}
headers['x-oss-symlink-target'] = urlquote(target_key, '')
resp = await self.__do_object('PUT', symlink_key, headers=headers, params={Bucket.SYMLINK: ''})
return models.RequestResult(resp)
async def get_symlink(self, symlink_key):
"""获取符号连接文件的目标文件。
:param str symlink_key: 符号连接类文件
:return: :class:`GetSymlinkResult <oss2.models.GetSymlinkResult>`
:raises: 如果文件的符号链接不存在,则抛出 :class:`NoSuchKey <oss2.exceptions.NoSuchKey>` ;还可能抛出其他异常
"""
resp = await self.__do_object('GET', symlink_key, params={Bucket.SYMLINK: ''})
return models.GetSymlinkResult(resp)
async def create_bucket(self, permission=None, input=None):
"""创建新的Bucket。
:param str permission: 指定Bucket的ACL。可以是oss2.BUCKET_ACL_PRIVATE(推荐、缺省)、oss2.BUCKET_ACL_PUBLIC_READ或是
oss2.BUCKET_ACL_PUBLIC_READ_WRITE。
:param input: :class:`BucketCreateConfig <oss2.models.BucketCreateConfig>` object
"""
if permission:
headers = {'x-oss-acl': permission}
else:
headers = None
data = self.__convert_data(models.BucketCreateConfig, xml_utils.to_put_bucket_config, input)
resp = await self.__do_bucket('PUT', headers=headers, data=data)
return models.RequestResult(resp)
async def delete_bucket(self):
"""删除一个Bucket。只有没有任何文件,也没有任何未完成的分片上传的Bucket才能被删除。
:return: :class:`RequestResult <oss2.models.RequestResult>`
":raises: 如果试图删除一个非空Bucket,则抛出 :class:`BucketNotEmpty <oss2.exceptions.BucketNotEmpty>`
"""
resp = await self.__do_bucket('DELETE')
return models.RequestResult(resp)
async def put_bucket_acl(self, permission):
"""设置Bucket的ACL。
:param str permission: 新的ACL,可以是oss2.BUCKET_ACL_PRIVATE、oss2.BUCKET_ACL_PUBLIC_READ或
oss2.BUCKET_ACL_PUBLIC_READ_WRITE
"""
resp = await self.__do_bucket('PUT', headers={'x-oss-acl': permission}, params={Bucket.ACL: ''})
return models.RequestResult(resp)
async def get_bucket_acl(self):
"""获取Bucket的ACL。
:return: :class:`GetBucketAclResult <oss2.models.GetBucketAclResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.ACL: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_acl, models.GetBucketAclResult)
async def put_bucket_cors(self, input):
"""设置Bucket的CORS。
:param input: :class:`BucketCors <oss2.models.BucketCors>` 对象或其他
"""
data = self.__convert_data(models.BucketCors, xml_utils.to_put_bucket_cors, input)
resp = await self.__do_bucket('PUT', data=data, params={Bucket.CORS: ''})
return models.RequestResult(resp)
async def get_bucket_cors(self):
"""获取Bucket的CORS配置。
:return: :class:`GetBucketCorsResult <oss2.models.GetBucketCorsResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.CORS: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_cors, models.GetBucketCorsResult)
async def delete_bucket_cors(self):
"""删除Bucket的CORS配置。"""
resp = await self.__do_bucket('DELETE', params={Bucket.CORS: ''})
return models.RequestResult(resp)
async def put_bucket_lifecycle(self, input):
"""设置生命周期管理的配置。
:param input: :class:`BucketLifecycle <oss2.models.BucketLifecycle>` 对象或其他
"""
data = self.__convert_data(models.BucketLifecycle, xml_utils.to_put_bucket_lifecycle, input)
resp = await self.__do_bucket('PUT', data=data, params={Bucket.LIFECYCLE: ''})
return models.RequestResult(resp)
async def get_bucket_lifecycle(self):
"""获取生命周期管理配置。
:return: :class:`GetBucketLifecycleResult <oss2.models.GetBucketLifecycleResult>`
:raises: 如果没有设置Lifecycle,则抛出 :class:`NoSuchLifecycle <oss2.exceptions.NoSuchLifecycle>`
"""
resp = await self.__do_bucket('GET', params={Bucket.LIFECYCLE: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_lifecycle, models.GetBucketLifecycleResult)
async def delete_bucket_lifecycle(self):
"""删除生命周期管理配置。如果Lifecycle没有设置,也返回成功。"""
resp = await self.__do_bucket('DELETE', params={Bucket.LIFECYCLE: ''})
return models.RequestResult(resp)
async def get_bucket_location(self):
"""获取Bucket的数据中心。
:return: :class:`GetBucketLocationResult <oss2.models.GetBucketLocationResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.LOCATION: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_location, models.GetBucketLocationResult)
async def put_bucket_logging(self, input):
"""设置Bucket的访问日志功能。
:param input: :class:`BucketLogging <oss2.models.BucketLogging>` 对象或其他
"""
data = self.__convert_data(models.BucketLogging, xml_utils.to_put_bucket_logging, input)
resp = await self.__do_bucket('PUT', data=data, params={Bucket.LOGGING: ''})
return models.RequestResult(resp)
async def get_bucket_logging(self):
"""获取Bucket的访问日志功能配置。
:return: :class:`GetBucketLoggingResult <oss2.models.GetBucketLoggingResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.LOGGING: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_logging, models.GetBucketLoggingResult)
async def delete_bucket_logging(self):
"""关闭Bucket的访问日志功能。"""
resp = await self.__do_bucket('DELETE', params={Bucket.LOGGING: ''})
return models.RequestResult(resp)
async def put_bucket_referer(self, input):
"""为Bucket设置防盗链。
:param input: :class:`BucketReferer <oss2.models.BucketReferer>` 对象或其他
"""
data = self.__convert_data(models.BucketReferer, xml_utils.to_put_bucket_referer, input)
resp = await self.__do_bucket('PUT', data=data, params={Bucket.REFERER: ''})
return models.RequestResult(resp)
async def get_bucket_referer(self):
"""获取Bucket的防盗链配置。
:return: :class:`GetBucketRefererResult <oss2.models.GetBucketRefererResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.REFERER: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_referer, models.GetBucketRefererResult)
async def get_bucket_stat(self):
"""查看Bucket的状态,目前包括bucket大小,bucket的object数量,bucket正在上传的Multipart Upload事件个数等。
:return: :class:`GetBucketStatResult <oss2.models.GetBucketStatResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.STAT: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_stat, models.GetBucketStatResult)
async def get_bucket_info(self):
"""获取bucket相关信息,如创建时间,访问Endpoint,Owner与ACL等。
:return: :class:`GetBucketInfoResult <oss2.models.GetBucketInfoResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.BUCKET_INFO: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_info, models.GetBucketInfoResult)
async def put_bucket_website(self, input):
"""为Bucket配置静态网站托管功能。
:param input: :class:`BucketWebsite <oss2.models.BucketWebsite>`
"""
data = self.__convert_data(models.BucketWebsite, xml_utils.to_put_bucket_website, input)
resp = await self.__do_bucket('PUT', data=data, params={Bucket.WEBSITE: ''})
return models.RequestResult(resp)
async def get_bucket_website(self):
"""获取Bucket的静态网站托管配置。
:return: :class:`GetBucketWebsiteResult <oss2.models.GetBucketWebsiteResult>`
:raises: 如果没有设置静态网站托管,那么就抛出 :class:`NoSuchWebsite <oss2.exceptions.NoSuchWebsite>`
"""
resp = await self.__do_bucket('GET', params={Bucket.WEBSITE: ''})
return await self._parse_result(resp, xml_utils.parse_get_bucket_websiste, models.GetBucketWebsiteResult)
async def delete_bucket_website(self):
"""关闭Bucket的静态网站托管功能。"""
resp = await self.__do_bucket('DELETE', params={Bucket.WEBSITE: ''})
return models.RequestResult(resp)
async def create_live_channel(self, channel_name, input):
"""创建推流直播频道
:param str channel_name: 要创建的live channel的名称
:param input: LiveChannelInfo类型,包含了live channel中的描述信息
:return: :class:`CreateLiveChannelResult <oss2.models.CreateLiveChannelResult>`
"""
data = self.__convert_data(models.LiveChannelInfo, xml_utils.to_create_live_channel, input)
resp = await self.__do_object('PUT', channel_name, data=data, params={Bucket.LIVE: ''})
return await self._parse_result(resp, xml_utils.parse_create_live_channel, models.CreateLiveChannelResult)
async def delete_live_channel(self, channel_name):
"""删除推流直播频道
:param str channel_name: 要删除的live channel的名称
"""
resp = await self.__do_object('DELETE', channel_name, params={Bucket.LIVE: ''})
return models.RequestResult(resp)
async def get_live_channel(self, channel_name):
"""获取直播频道配置
:param str channel_name: 要获取的live channel的名称
:return: :class:`GetLiveChannelResult <oss2.models.GetLiveChannelResult>`
"""
resp = await self.__do_object('GET', channel_name, params={Bucket.LIVE: ''})
return await self._parse_result(resp, xml_utils.parse_get_live_channel, models.GetLiveChannelResult)
async def list_live_channel(self, prefix='', marker='', max_keys=100):
"""列举出Bucket下所有符合条件的live channel
param: str prefix: list时channel_id的公共前缀
param: str marker: list时指定的起始标记
param: int max_keys: 本次list返回live channel的最大个数
return: :class:`ListLiveChannelResult <oss2.models.ListLiveChannelResult>`
"""
resp = await self.__do_bucket('GET', params={Bucket.LIVE: '',
'prefix': prefix,
'marker': marker,
'max-keys': str(max_keys)})
return await self._parse_result(resp, xml_utils.parse_list_live_channel, models.ListLiveChannelResult)
async def get_live_channel_stat(self, channel_name):
"""获取live channel当前推流的状态
param str channel_name: 要获取推流状态的live channel的名称
return: :class:`GetLiveChannelStatResult <oss2.models.GetLiveChannelStatResult>`
"""
resp = await self.__do_object('GET', channel_name, params={Bucket.LIVE: '', Bucket.COMP: 'stat'})
return await self._parse_result(resp, xml_utils.parse_live_channel_stat, models.GetLiveChannelStatResult)
async def put_live_channel_status(self, channel_name, status):
"""更改live channel的status,仅能在“enabled”和“disabled”两种状态中更改
param str channel_name: 要更改status的live channel的名称
param str status: live channel的目标status
"""
resp = await self.__do_object('PUT', channel_name, params={Bucket.LIVE: '', Bucket.STATUS: status})
return models.RequestResult(resp)
async def get_live_channel_history(self, channel_name):
"""获取live channel中最近的最多十次的推流记录,记录中包含推流的起止时间和远端的地址
param str channel_name: 要获取最近推流记录的live channel的名称
return: :class:`GetLiveChannelHistoryResult <oss2.models.GetLiveChannelHistoryResult>`
"""
resp = await self.__do_object('GET', channel_name, params={Bucket.LIVE: '', Bucket.COMP: 'history'})
return await self._parse_result(resp, xml_utils.parse_live_channel_history, models.GetLiveChannelHistoryResult)
async def post_vod_playlist(self, channel_name, playlist_name, start_time=0, end_time=0):
"""根据指定的playlist name以及startTime和endTime生成一个点播的播放列表
param str channel_name: 要生成点播列表的live channel的名称
param str playlist_name: 要生成点播列表m3u8文件的名称
param int start_time: 点播的起始时间,Unix Time格式,可以使用int(time.time())获取
param int end_time: 点播的结束时间,Unix Time格式,可以使用int(time.time())获取
"""
key = channel_name + "/" + playlist_name
resp = await self.__do_object('POST', key, params={Bucket.VOD: '',
'startTime': str(start_time),
'endTime': str(end_time)})
return models.RequestResult(resp)
async def _get_bucket_config(self, config):
"""获得Bucket某项配置,具体哪种配置由 `config` 指定。该接口直接返回 `RequestResult` 对象。
通过read()接口可以获得XML字符串。不建议使用。
:param str config: 可以是 `Bucket.ACL` 、 `Bucket.LOGGING` 等。
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
return await self.__do_bucket('GET', params={config: ''})
async def __do_object(self, method, key, **kwargs):
return await self._do(method, self.bucket_name, key, **kwargs)
async def __do_bucket(self, method, **kwargs):
return await self._do(method, self.bucket_name, '', **kwargs)
def __convert_data(self, klass, converter, data):
if isinstance(data, klass):
return converter(data)
else:
return data
def _normalize_endpoint(endpoint):
if not endpoint.startswith('http://') and not endpoint.startswith('https://'):
return 'http://' + endpoint
else:
return endpoint
_ENDPOINT_TYPE_ALIYUN = 0
_ENDPOINT_TYPE_CNAME = 1
_ENDPOINT_TYPE_IP = 2
def _make_range_string(range):
if range is None:
return ''
start = range[0]
last = range[1]
if start is None and last is None:
return ''
return 'bytes=' + _range(start, last)
def _range(start, last):
def to_str(pos):
if pos is None:
return ''
else:
return str(pos)
return to_str(start) + '-' + to_str(last)
def _determine_endpoint_type(netloc, is_cname, bucket_name):
if utils.is_ip_or_localhost(netloc):
return _ENDPOINT_TYPE_IP
if is_cname:
return _ENDPOINT_TYPE_CNAME
if utils.is_valid_bucket_name(bucket_name):
return _ENDPOINT_TYPE_ALIYUN
else:
return _ENDPOINT_TYPE_IP
class _UrlMaker(object):
def __init__(self, endpoint, is_cname):
p = urlparse(endpoint)
self.scheme = p.scheme
self.netloc = p.netloc
self.is_cname = is_cname
def __call__(self, bucket_name, key):
self.type = _determine_endpoint_type(self.netloc, self.is_cname, bucket_name)
key = urlquote(key, '')
if self.type == _ENDPOINT_TYPE_CNAME:
return '{0}://{1}/{2}'.format(self.scheme, self.netloc, key)
if self.type == _ENDPOINT_TYPE_IP:
if bucket_name:
return '{0}://{1}/{2}/{3}'.format(self.scheme, self.netloc, bucket_name, key)
else:
return '{0}://{1}/{2}'.format(self.scheme, self.netloc, key)
if not bucket_name:
assert not key
return '{0}://{1}'.format(self.scheme, self.netloc)
return '{0}://{1}.{2}/{3}'.format(self.scheme, bucket_name, self.netloc, key)
|
def test_login(app):
x = 0
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from torch import cat, Tensor
from torch.nn import Module, GRU, Dropout, LayerNorm
__author__ = 'An Tran'
__docformat__ = 'reStructuredText'
__all__ = ['SublayerConnection']
class SublayerConnection(Module):
def __init__(self,
size: int,
dropout: float):
"""SublayerConnection module.
A residule connection followed by a layer norm
"""
super(SublayerConnection, self).__init__()
self.norm : Module = LayerNorm(size)
self.dropout : Module = Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size """
return x + self.dropout(sublayer(self.norm(x)))
# EOF
|
# Copyright (c) 2018, NVIDIA CORPORATION.
"""
Helper functions for parameterized docstring
"""
import functools
import string
import re
_regex_whitespaces = re.compile(r'^\s+$')
def _only_spaces(s):
return bool(_regex_whitespaces.match(s))
_wrapopts = {
'width': 78,
'replace_whitespace': False,
}
def docfmt(**kwargs):
"""Format docstring.
Simliar to saving the result of ``__doc__.format(**kwargs)`` as the
function's docstring.
"""
kwargs = {k: v.lstrip() for k, v in kwargs.items()}
def outer(fn):
buf = []
formatsiter = string.Formatter().parse(fn.__doc__)
for literal, field, fmtspec, conv in formatsiter:
assert conv is None
assert not fmtspec
buf.append(literal)
if field is not None:
# get indentation
lines = literal.rsplit('\n', 1)
if _only_spaces(lines[-1]):
indent = ' ' * len(lines[-1])
valuelines = kwargs[field].splitlines(True)
# first line
buf.append(valuelines[0])
# subsequent lines are indented
buf.extend([indent + ln for ln in valuelines[1:]])
else:
buf.append(kwargs[field])
fn.__doc__ = ''.join(buf)
return fn
return outer
def docfmt_partial(**kwargs):
return functools.partial(docfmt, **kwargs)
|
from .jobManager import JobManager
class ClusterJobManager(JobManager):
"""
Helper class to enqueue and dequeue jobs to the cluster job queue.
"""
def __init__(self):
JobManager.__init__(self, 'CLUSTER_JOBSQUEUE_CONNECTIONSTRING')
|
# -*- coding: utf-8 -*-
"""
@FileName: model_test.py
@author: Meihua Peng
@time: $(2018.5.30) $(10:00)
"""
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
import tensorflow as tf
import pandas as pd
# tf.cond = tf
import keras
from keras import optimizers, backend
from keras.models import Sequential
from keras.layers import core, convolutional, pooling
#from keras.layers import Dense, Dropout, Flatten
from keras.layers import Flatten, Dense, Lambda, Dropout
from sklearn import model_selection
from data import generate_samples, preprocess
from weights_logger_callback import WeightsLogger
local_project_path = '/home/wales/Udacity/behavioral-cloning-master-track2/behavioral-cloning-master/'
local_data_path = os.path.join(local_project_path, 'data/')
local_image_path = os.path.join(local_data_path, 'IMG/')
if __name__ == '__main__':
# Read the data
df = pd.io.parsers.read_csv(os.path.join(local_data_path, 'driving_log.csv'))
# Split data into training and validation sets
df_train, df_valid = model_selection.train_test_split(df, test_size=.2)
# Model architecture
model = Sequential()
model.add(convolutional.Conv2D(16, (3, 3), input_shape=(38, 128, 3), activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(convolutional.Conv2D(32, (3, 3), activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')
# print(local_image_path)
history = model.fit_generator(
generate_samples(df_train, local_image_path, augment=False),
samples_per_epoch=df_train.shape[0],
nb_epoch=10,
validation_data=generate_samples(df_valid, local_image_path, augment=False),
callbacks=[WeightsLogger(root_path=local_project_path)],
nb_val_samples = df_valid.shape[0]
)
with open(os.path.join(local_project_path, 'model.json'), 'w') as file:
file.write(model.to_json())
backend.clear_session()
|
"""
Implementation of Adaptive Embedding from :cite:https://arxiv.org/abs/1809.10853
Based on https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import torch
import torch.nn as nn
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
sample_softmax=False):
"""
Args:
n_token (int): Overall size of the dictionary of tokens to look up
d_embed (int): Size of the largest embedding dimension. The
other groups are factors of `div_val` smaller.
d_proj (int): Size of the output embedding dimension, what all
embeddings are projected to and concatenated. Usually the
same as `d_embed`.
cutoffs (list[int]): The end of each of the groups of tokens
with common embedding dimensions, not including the final
group which always ends at `n_token`.
div_val (int): The factor to reduce each group's embedding
dimension by.
sparse (bool): Whether to make our embeddings sparse or not
Properties:
n_token, d_embed, d_proj, cutoffs, div_val: same as in args
emb_layers (nn.ModuleList[nn.Embedding]): All the embeddings
emb_projs (nn.ModuleList[nn.Linear]): All the projection layers
to `d_proj`
"""
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.div_val = div_val
self.emb_scale = d_proj ** 0.5
self.cutoffs = [0] + cutoffs + [n_token]
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ModuleList()
if div_val == 1:
# We just need the one embedding, everything will be the same size
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sparse)
)
self.emb_projs.append(
nn.Linear(d_embed, d_proj, bias=False)
)
else:
for i in range(len(self.cutoffs) - 1):
start_inc, end_exc = self.cutoffs[i], self.cutoffs[i+1]
d_embed_i = d_embed // (div_val ** i)
self.emb_layers.append(
nn.Embedding(end_exc - start_inc, d_embed_i, sparse=sparse)
)
self.emb_projs.append(
nn.Linear(d_embed_i, d_proj, bias=False)
)
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.emb_projs[0](embedding)
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
embed_flat = torch.zeros([inp_flat.size(0), self.d_proj],
dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs) - 1):
start_inc, end_exc = self.cutoffs[i], self.cutoffs[i+1]
# Get all the elements in the input that fall in this range
mask_i = (inp_flat >= start_inc) & (inp_flat < end_exc)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
# There are no elements, go to the next group
continue
# Convert the overall indicies into indices for this embedding
inp_i = inp_flat.index_select(0, indices_i) - start_inc
# Get the corresponding embedding
embed_i = self.emb_layers[i](inp_i)
embed_i = self.emb_projs[i](embed_i)
# Copy back to the main embedding array
embed_flat.index_copy_(0, indices_i, embed_i)
embed = embed_flat.view(*inp.size(), self.d_proj)
embed.mul_(self.emb_scale)
return embed
|
from .. import _image_size
import numpy as np
from scipy.spatial.distance import cdist, pdist
class ImageCrop():
""" """
def __init__(self,
ndim,
crop_array=None,
single_im_size=_image_size,
):
_shape = (ndim, 2)
self.ndim = ndim
self.array = np.zeros(_shape, dtype=np.int32)
if crop_array is None:
self.array[:,1] = np.array(single_im_size)
else:
self.update(crop_array)
def update(self,
crop_array,
):
_arr = np.array(crop_array, dtype=np.int32)
if np.shape(_arr) == np.shape(self.array):
self.array = _arr
return
def to_slices(self):
return tuple([slice(_s[0], _s[1]) for _s in self.array])
def inside(self, coords):
"""Check whether given coordinate is in this crop"""
_coords = np.array(coords)
if len(np.shape(_coords)) == 1:
_coords = _coords[np.newaxis,:]
elif len(np.shape(_coords)) > 2:
raise IndexError("Only support single or multiple coordinates")
# find kept spots
_masks = [(_coords[:,_d] >= self.array[_d,0]) *\
(_coords[:,_d] <= self.array[_d,1])
for _d in range(self.ndim)]
_mask = np.prod(_masks, axis=0).astype(np.bool)
return _mask
def distance_to_edge(self, coord):
"""Check distance of a coordinate to the edge of this crop"""
_coord = np.array(coord)[:self.ndim]
return np.min(np.abs(_coord[:,np.newaxis] - self.array))
def crop_coords(self, coords):
""" """
_coords = np.array(coords)
_mask = self.inside(coords)
_cropped_coords = _coords[_mask] - self.array[:,0][np.newaxis,:]
return _cropped_coords
def overlap(self, crop2):
# find overlaps
_llim = np.max([self.array[:,0], crop2.array[:,0]], axis=0)
_rlim = np.min([self.array[:,1], crop2.array[:,1]], axis=0)
if (_llim > _rlim).any():
return None
else:
return ImageCrop(len(_llim), np.array([_llim, _rlim]).transpose())
def relative_overlap(self, crop2):
_overlap = self.overlap(crop2)
if _overlap is not None:
_overlap.array = _overlap.array - self.array[:,0][:, np.newaxis]
return _overlap
class ImageCrop_3d(ImageCrop):
""" """
def __init__(self,
crop_array=None,
single_im_size=_image_size,
):
super().__init__(3, crop_array, single_im_size)
def crop_spots(self, spots_3d):
""" """
_spots = spots_3d.copy()
_coords = _spots[:,1:4]
_mask = self.inside(_coords)
_cropped_spots = _spots[_mask].copy()
_cropped_spots[:,1:4] = np.array(_cropped_spots[:,1:4]) - self.array[:,0][np.newaxis,:]
return _cropped_spots
def overlap(self, crop2):
_returned_crop = super().overlap(crop2)
if _returned_crop is None:
return None
else:
return ImageCrop_3d(_returned_crop.array)
class Spots3D(np.ndarray):
"""Class for fitted spots in 3D"""
def __new__(cls,
input_array,
bits=None,
pixel_sizes=None,
#info=None,
copy_data=True):
# Input array is an already formed ndarray instance
# We first cast to be our class type
if copy_data:
input_array = np.array(input_array).copy()
if len(np.shape(input_array)) == 1:
obj = np.asarray([input_array]).view(cls)
elif len(np.shape(input_array)) == 2:
obj = np.asarray(input_array).view(cls)
else:
raise IndexError('Spots3D class only creating 2D-array')
# add the new attribute to the created instance
if isinstance(bits, (int, np.int32)):
obj.bits = np.ones(len(obj), dtype=np.int32) * int(bits)
elif bits is not None and np.size(bits) == 1:
obj.bits = np.ones(len(obj), dtype=np.int32) * int(bits[0])
elif bits is not None and len(bits) == len(obj):
obj.bits = np.array(bits, dtype=np.int32)
else:
obj.bits = bits
obj.pixel_sizes = np.array(pixel_sizes)
#obj.info = info
# Finally, we must return the newly created object:
return obj
# def __str__(self):
# """Spots3D object with dimension"""
# return ""
def __getitem__(self, key):
"""Modified getitem to allow slicing of bits as well"""
#print(f" getitem {key}, {type(key)}")
new_obj = super().__getitem__(key)
# if slice, slice bits as well
if hasattr(self, 'bits') and getattr(self, 'bits') is not None:
if isinstance(key, slice) or isinstance(key, np.ndarray):
setattr(new_obj, 'bits', getattr(self, 'bits')[key] )
#print(new_obj, type(new_obj))
return new_obj
def __setitem__(self, key, value):
#print(f" setitem {key}, {type(key)}")
return super().__setitem__(key, value)
def __array_finalize__(self, obj):
"""
Reference: https://numpy.org/devdocs/user/basics.subclassing.html
"""
if obj is None:
return
else:
if hasattr(obj, 'shape') and len(getattr(obj, 'shape')) != 2:
obj = np.array(obj)
# other attributes
setattr(self, 'bits', getattr(obj, 'bits', None))
setattr(self, 'pixel_sizes', getattr(obj, 'pixel_sizes', None))
#print(f"**finalizing, {obj}, {type(obj)}")
return obj
def to_coords(self):
""" convert into 3D coordinates in pixels """
return np.array(self[:,1:4])
def to_positions(self, pixel_sizes=None):
""" convert into 3D spatial positions"""
_saved_pixel_sizes = getattr(self, 'pixel_sizes', None)
if _saved_pixel_sizes is not None and _saved_pixel_sizes.any():
return self.to_coords() * np.array(_saved_pixel_sizes)[np.newaxis,:]
elif pixel_sizes is None:
raise ValueError('pixel_sizes not given')
else:
return self.to_coords() * np.array(pixel_sizes)[np.newaxis,:]
def to_intensities(self):
""" """
return np.array(self[:,0])
# scoring spot Tuple
class SpotTuple():
"""Tuple of coordinates"""
def __init__(self,
spots_tuple:Spots3D,
bits:np.ndarray=None,
pixel_sizes:np.ndarray or list=None,
spots_inds=None,
tuple_id=None,
):
# add spot Tuple
self.spots = spots_tuple[:].copy()
# add information for bits
if isinstance(bits, int):
self.bits = np.ones(len(self.spots), dtype=np.int32) * int(bits)
elif bits is not None and np.size(bits) == 1:
self.bits = np.ones(len(self.spots), dtype=np.int32) * int(bits[0])
elif bits is not None:
self.bits = np.array(bits[:len(self.spots)], dtype=np.int32)
elif spots_tuple.bits is not None:
self.bits = spots_tuple.bits[:len(self.spots)]
else:
self.bits = bits
if pixel_sizes is None:
self.pixel_sizes = getattr(self.spots, 'pixel_sizes', None)
else:
self.pixel_sizes = np.array(pixel_sizes)
self.spots_inds = spots_inds
self.tuple_id = tuple_id
def dist_internal(self):
_self_coords = self.spots.to_positions(self.pixel_sizes)
return pdist(_self_coords)
def intensities(self):
return self.spots.to_intensities()
def intensity_mean(self):
return np.mean(self.spots.to_intensities())
def centroid_spot(self):
self.centroid = np.mean(self.spots, axis=0, keepdims=True)
self.centroid.pixel_sizes = self.pixel_sizes
return self.centroid
def dist_centroid_to_spots(self, spots:Spots3D):
"""Calculate distance from tuple centroid to given spots"""
if not hasattr(self, 'centroid'):
_cp = self.centroid_spot()
else:
_cp = getattr(self, 'centroid')
_centroid_coords = _cp.to_positions(pixel_sizes=self.pixel_sizes)
_target_coords = spots.to_positions(pixel_sizes=self.pixel_sizes)
return cdist(_centroid_coords, _target_coords)[0]
def dist_to_spots(self,
spots:Spots3D):
_self_coords = self.spots.to_positions(pixel_sizes=self.pixel_sizes)
_target_coords = spots.to_positions(pixel_sizes=self.pixel_sizes)
return cdist(_self_coords, _target_coords)
def dist_chromosome(self):
pass
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
from pyasn1.codec.der.decoder import decode as der_decode
from pyasn1.codec.der.encoder import encode as der_encode
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc8209
try:
import unittest2 as unittest
except ImportError:
import unittest
class CertificateTestCase(unittest.TestCase):
cert_pem_text = """\
MIIBiDCCAS+gAwIBAgIEAk3WfDAKBggqhkjOPQQDAjAaMRgwFgYDVQQDDA9ST1VU
RVItMDAwMEZCRjAwHhcNMTcwMTAxMDUwMDAwWhcNMTgwNzAxMDUwMDAwWjAaMRgw
FgYDVQQDDA9ST1VURVItMDAwMEZCRjAwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
AARzkbq7kqDLO+EOWbGev/shTgSpHgy6GxOafTjZD3flWqBbjmlWeOD6FpBLVdnU
9cDfxYiV7lC8T3XSBaJb02/1o2MwYTALBgNVHQ8EBAMCB4AwHQYDVR0OBBYEFKtN
kQ9VyucaIV7zyv46zEW17sFUMBMGA1UdJQQMMAoGCCsGAQUFBwMeMB4GCCsGAQUF
BwEIAQH/BA8wDaAHMAUCAwD78KECBQAwCgYIKoZIzj0EAwIDRwAwRAIgB7e0al+k
8cxoNjkDpIPsfIAC0vYInUay7Cp75pKzb7ECIACRBUqh9bAYnSck6LQi/dEc8D2x
OCRdZCk1KI3uDDgp
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert der_encode(asn1Object) == substrate
extn_list = [ ]
for extn in asn1Object['tbsCertificate']['extensions']:
extn_list.append(extn['extnID'])
if extn['extnID'] in rfc5280.certificateExtensionsMap.keys():
extnValue, rest = der_decode(extn['extnValue'],
asn1Spec=rfc5280.certificateExtensionsMap[extn['extnID']])
assert der_encode(extnValue) == extn['extnValue']
if extn['extnID'] == rfc5280.id_ce_extKeyUsage:
assert rfc8209.id_kp_bgpsec_router in extnValue
assert rfc5280.id_ce_extKeyUsage in extn_list
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
|
# Created by Jennifer Langford on 3/24/22 for CMIT235 - Week 1 Assignment
# This is an ongoing effort weekly for the duration of this course.
# The program will be complete at the end of the course.
mySubList1 = [[1, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6], [2, 8, 4, 6, 7, 8, 9, 10, 11, 12, 12, 13]]
mySubList2 = [[0, -1, 3, 4, 4, 6, -2, 3, 1, 0, -20, -2], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15]]
mySubList3 = [[2, 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, 1], [-1, -3, 44, 22, 4, 7, 7, 8, 9, 10, 11, 20]]
|
#!/usr/bin/env python
import xml.etree.ElementTree as XET
'''
Author: Pengjia Zhu (zhupengjia@gmail.com)
'''
class odsread:
'''
read ods file
Input:
- filename: ods filepath
'''
def __init__(self,filename):
from zipfile import ZipFile
ziparchive = ZipFile(filename, "r")
self.xmldata = ziparchive.read("content.xml")
ziparchive.close()
self.xmltree=XET.fromstring(self.xmldata)
def __tag2str(self,tag):
return tag.split("}")[-1]
def __findtag(self,tree,tag):
for branch in list(tree):
if self.__tag2str(branch.tag)==tag:
return branch
else:
branch2=self.__findtag(branch,tag)
if isinstance(branch2,XET.Element):
return branch2
def __findtagall(self,treein,tagin):
def taginteg(tree,tag):
for branch in list(tree):
if self.__tag2str(branch.tag)==tag:
branchall.append(branch)
else:
taginteg(branch,tag)
branchall=[]
taginteg(treein,tagin)
return branchall
def __getattrib(self,tree,name):
for attrib in tree.attrib:
if "}"+name in attrib:
return tree.get(attrib)
return False
def __findtreewithattrib(self,trees,attribname,attribvalue):
for tree in trees:
if self.__getattrib(tree,attribname)==attribvalue:
return tree
return False
def parse(self,tablename):
'''
parse the ods table
input:
- tablename: string
'''
tables=self.__findtagall(self.xmltree,"table")
table=self.__findtreewithattrib(tables,"name",tablename)
rows=self.__findtagall(table,"table-row")
self.values,self.hrefs=[],[] #value and link
for row in rows:
cells=self.__findtagall(row,"table-cell")
self.values.append([])
self.hrefs.append([])
for cell in cells:
if self.__getattrib(cell,"number-columns-repeated"):
repeat=int(self.__getattrib(cell,"number-columns-repeated"))
else:repeat=1
if repeat>500:repeat=1
for i in range(repeat):
text=cell.itertext()
self.values[-1].append("".join(text))
hreftag=self.__findtag(cell,"a")
if hreftag!=None:
hrefkey = [k for k in hreftag.attrib.keys() if "href" in k][0]
self.hrefs[-1].append(hreftag.attrib[hrefkey])
else:
self.hrefs[-1].append(None)
def getvalue(self,row,col):
'''
get value with row and col number
input:
- row: row number
- col: col number
output:
- value
'''
if row<len(self.values):
if col<len(self.values[row]):
return self.values[row][col]
return False
def getvalbyrow(self,row):
'''
get value for special row
input:
- row: row number
output:
- value list
'''
if row<len(self.values):
return self.values[row]
return False
def getvalbycol(self,col):
'''
get value for special col
input:
- row: col number
output:
- value list
'''
vals=[]
for rows in self.values:
if col<len(rows):
vals.append(rows[col])
else:
vals.append("")
return vals
if __name__ == '__main__':
bcmconstfile="/home/pzhu/work/run record/bcm calibration.ods"
ods=odsread(bcmconstfile)
ods.parse("bcm")
for col in ods.getvalbycol(14):
print(col)
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
# Although this looks like an Autotools package, it's not one. Refer to:
# https://github.com/flame/blis/issues/17
# https://github.com/flame/blis/issues/195
# https://github.com/flame/blis/issues/197
class Blis(Package):
"""BLIS is a portable software framework for instantiating high-performance
BLAS-like dense linear algebra libraries. The framework was designed to
isolate essential kernels of computation that, when optimized, immediately
enable optimized implementations of most of its commonly used and
computationally intensive operations. BLIS is written in ISO C99 and
available under a new/modified/3-clause BSD license. While BLIS exports a
new BLAS-like API, it also includes a BLAS compatibility layer which gives
application developers access to BLIS implementations via traditional BLAS
routine calls. An object-based API unique to BLIS is also available."""
homepage = "https://github.com/flame/blis"
url = "https://github.com/flame/blis/archive/0.4.0.tar.gz"
git = "https://github.com/flame/blis.git"
version('develop', branch='master')
version('0.4.0', sha256='9c7efd75365a833614c01b5adfba93210f869d92e7649e0b5d9edc93fc20ea76')
version('0.3.2', sha256='b87e42c73a06107d647a890cbf12855925777dc7124b0c7698b90c5effa7f58f')
version('0.3.1', sha256='957f28d47c5cf71ffc62ce8cc1277e17e44d305b1c2fa8506b0b55617a9f28e4')
version('0.3.0', sha256='d34d17df7bdc2be8771fe0b7f867109fd10437ac91e2a29000a4a23164c7f0da')
version('0.2.2', sha256='4a7ecb56034fb20e9d1d8b16e2ef587abbc3d30cb728e70629ca7e795a7998e8')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
variant(
'blas', default=True,
description='BLAS compatibility',
)
variant(
'cblas', default=False,
description='CBLAS compatibility',
)
variant(
'shared', default=True,
description='Build shared library',
)
variant(
'static', default=True,
description='Build static library',
)
# TODO: add cpu variants. Currently using auto.
# If one knl, should the default be memkind ?
# BLIS has it's own API but can be made compatible with BLAS
# enabling CBLAS automatically enables BLAS.
provides('blas', when="+blas")
provides('blas', when="+cblas")
phases = ['configure', 'build', 'install']
def configure(self, spec, prefix):
config_args = []
config_args.append("--enable-threading=" +
spec.variants['threads'].value)
if '+cblas' in spec:
config_args.append("--enable-cblas")
else:
config_args.append("--disable-cblas")
if '+blas' in spec:
config_args.append("--enable-blas")
else:
config_args.append("--disable-blas")
if '+shared' in spec:
config_args.append("--enable-shared")
else:
config_args.append("--disable-shared")
if '+static' in spec:
config_args.append("--enable-static")
else:
config_args.append("--disable-static")
# FIXME: add cpu isa variants.
config_args.append("auto")
configure("--prefix=" + prefix,
*config_args)
def build(self, spec, prefix):
make()
@run_after('build')
@on_package_attributes(run_tests=True)
def check(self):
make('check')
def install(self, spec, prefix):
make('install')
|
# -*- coding: utf-8 -*-
import logging
from abc import ABCMeta
from abc import abstractmethod
from copy import deepcopy
from random import getrandbits
from sklearn.base import BaseEstimator
from pipesnake.base.utils import _check_input
from pipesnake.base.utils import _check_transformer_type
from pipesnake.utils import to_snake
__all__ = [
'Transformer',
'Pipe',
]
class Transformer(BaseEstimator):
"""Transformer abstract class freely inspired by Scikit-Learn.
See `TransformerMixin <https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py>`_
for more details, the main differences are the parameters type and the returns.
Note: that this inherits from sklearn BaseEstimator to simplify the parameters managment.
"""
__metaclass__ = ABCMeta
def __init__(self, name=None, sklearn_output=False, **kwargs):
"""
:param name: provide a name for this transfomer
:param sklearn_output: if True produces outputs compatible with sklearn Pipeline
"""
self.name = name
if name is None:
# if no name is provided name will be assigned as the class name with a random hex
self.name = to_snake(self.__class__.__name__) + '_{:02x}'.format(getrandbits(16))
self.sklearn_output = sklearn_output
@abstractmethod
def fit_x(self, x):
"""Fit the Transformer parameters for `x`.
This function is intended for implementing the
computation of the parameters needed for transform function.
Args:
:param x: a Pandas Dataframe of shape [n_samples, n_features] the dataset
Returns:
:return self:
Raises:
`NotImplementedError`: function not implemented
"""
raise NotImplementedError()
@abstractmethod
def fit_y(self, y):
"""Fit the Transformer parameters for `y`.
This function is intended for implementing the
computation of the parameters needed for transform function.
Args:
:param y: a Pandas Dataframe of shape [n_samples] the target
Returns:
:return self:
Raises:
`NotImplementedError`: function not implemented
"""
raise NotImplementedError()
def fit(self, x, y=None):
"""Fit the Transformer parameters.
If `x` or `y` are not Pandas DataFrame not fit will be performed
Note: `y=None` as default is to be compliant with scikit-learn
Returns:
:return self:
"""
self.logging('fitting...', level=logging.DEBUG)
if _check_input(x, self.logging):
self.fit_x(x)
if _check_input(y, self.logging):
self.fit_y(y)
return self
@abstractmethod
def transform_x(self, x):
"""Transform `x`.
This function is intended to implement the actual transformation on
x.
Args:
:param x: a Pandas Dataframe of shape [n_samples, n_features] the dataset
Returns:
:return x_new: the new transformed x
Raises:
`NotImplementedError`: function not implemented
"""
raise NotImplementedError()
@abstractmethod
def transform_y(self, y):
"""Transform `y`.
This function is intended to implement the actual transformation on
y.
Args:
:param y: a Pandas Dataframe of shape [n_samples] the target
Returns:
:return y_new: the new transformed y
Raises:
`NotImplementedError`: function not implemented
"""
raise NotImplementedError()
def transform(self, x, y=None):
"""Transform `x` and `y`.
If `x` or `y` are not Pandas DataFrame the original value will be returned
Note: `y=None` as default is to be compliant with scikit-learn
Returns:
:return x_new: the new transformed x
:return y_new: the new transformed y
.. todo:: This can be parallelized in computation on x and y
"""
self.logging('transforming...', level=logging.DEBUG)
_x = x
if _check_input(x, self.logging):
_x = self.transform_x(x)
if self.sklearn_output:
return _x
_y = y
if _check_input(y, self.logging):
_y = self.transform_y(y)
return _x, _y
def fit_transform(self, x, y=None):
"""Apply `fit` and `transform` functions.
Note: `y=None` as default is to be compliant with scikit-learn
Args:
:param x: a Pandas Dataframe of shape [n_samples, n_features] the dataset
:param y: a Pandas Dataframe of shape [n_samples] the target
Returns:
:return x_new: the new transformed x
:return y_new: the new transformed y
"""
return self.fit(x, y).transform(x, y)
def fit_transform_x(self, x):
"""Apply `fit` and `transform` functions only on `x`.
Args:
:param x: a Pandas Dataframe of shape [n_samples, n_features] the dataset
Returns:
:return x_new: the new transformed x
"""
return self.fit_x(x).transform_x(x)
def fit_transform_y(self, y):
"""Apply `fit` and `transform` functions only on `y`.
Args:
:param y: a Pandas Dataframe of shape [n_samples] the target
Returns:
:return y_new: the new transformed y
"""
return self.fit_y(y).transform_y(y)
@abstractmethod
def inverse_transform_x(self, x):
"""Inverse transform `x`.
This function is intended to implement the inverse
transform to get back to the original x.
Args:
:param x: a Pandas Dataframe of shape [n_samples, n_features] the dataset
Returns:
:return x_org: the original inverse transformed x
Raises:
`NotImplementedError`: function not implemented
"""
raise NotImplementedError()
@abstractmethod
def inverse_transform_y(self, y):
"""Inverse transform `y`.
This function is intended to implement the inverse
transform to get back to the original y.
Args:
:param y: a Pandas Dataframe of shape [n_samples] the target
Returns:
:return y_org: the original inverse transformed y
Raises:
`NotImplementedError`: function not implemented
"""
raise NotImplementedError()
def inverse_transform(self, x, y=None):
"""Inverse transform `x` and `y`.
This function is intended to implement the inverse
transform to get back to the original x and y.
Note: `y=None` as default is to be compliant with scikit-learn
Returns:
:return x_org: the new inverse transformed x
:return y_org: the new inverse transformed y
"""
self.logging('inverse transforming...', level=logging.DEBUG)
_x = x
if _check_input(x, self.logging):
_x = self.inverse_transform_x(x)
if self.sklearn_output:
return _x
_y = y
if _check_input(y, self.logging):
_y = self.inverse_transform_y(y)
return _x, _y
def logging(self, msg, level=logging.INFO):
"""Helper function to log info related to Transformer
Args:
:param msg: the message to log
:param level: logging level enum (https://docs.python.org/2/library/logging.html#logging-levels)
"""
logging.log(level, '[{}] : {}'.format(self.name, msg))
class Pipe(object):
"""Pipe abstract class to apply a list of transformers.
This provides all basic wiring to deal with list of transformers.
:param name: a name for this pipe object
:param transformers: a list of transfomers
"""
def __init__(self, transformers=[]):
self.transformers = transformers
def __len__(self):
return len(self.transformers)
def __iter__(self):
return iter(self.transformers)
def __str__(self):
s = '['
for c in self.transformers:
s += str(c) + ', '
return s[:-1] + ']'
def __getitem__(self, index):
return self.transformers[index]
def __setitem__(self, index, transformer):
assert _check_transformer_type(transformer, self.logging), 'Mismatched type: expected Transformer got {0}'.format(
type(transformer))
self.transformers[index] = transformer
def __delitem__(self, index):
del self.transformers[index]
def copy(self, transformers):
"""Copy a list of :class:`Transformer` objects to this object."""
del self.transformers[:]
for t in transformers:
self.transformers.append(t)
def clone(self):
"""Clone this object."""
return deepcopy(self)
def append(self, transformer):
assert _check_transformer_type(transformer, self.logging), 'Mismatched type: expected Transformer got {0}'.format(
type(transformer))
self.transformers.append(transformer)
def extend(self, transformers):
for t in transformers:
self.append(t)
|
#!/usr/bin/env python
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## Class: ModulestoreExtractor
## Author: Alex Kindel
## Date: 8 February 2016
## Converts modulestore mongo structure to SQL.
##
## On 25 January 2016, Lagunita (OpenEdX @ Stanford) switched from a single modulestore (one Mongo database)
## to a split modulestore (three Mongo databases). This class provides an interface to explicitly handle
## either modulestore case.
import argparse
from collections import namedtuple
import datetime
import logging
import math
import os.path
import re
import sys
import pymongo as mng
from pymysql_utils1 import MySQLDB
#from collections import defaultdict, namedtuple
# Data type for AY_[quarter][year] date ranges
Quarter = namedtuple('Quarter', ['start_date', 'end_date', 'quarter'])
# Class variables for determining internal status of course
SU_ENROLLMENT_DOMAIN = "shib:https://idp.stanford.edu/"
INTERNAL_ORGS = ['ohsx', 'ohs', 'oli']
TEST_COURSE_NAME_PATTERN = re.compile(r'[Ss]and[bB]ox|TESTTEST|/[tT][eE][sS][tT]/|[tT]esting|' +\
'DisplayTest|GSB-test|Test_course|JosephTest|LoadTest|' +\
'ABTest|EmailTest|LiveTest|TestEDUC2000C|EXP1/Experimental_Assessment_Test|' +\
'Stanford/shib_only|SPCS_test_course1|VPTL/TEST_|eqptest|wikitest|' +\
'Wadhwani_test_course|Test/stv_Medicine|Test_VPTL1|SANDBOX|G3U/IVHT/Feb2016|' +\
'^Demo[s]*/|/Demo[s]*$|/Sdemo|Medicine/Demo/Anesthesia_Illustrated|^DemoX|' +\
'DEMO-MedStats|SampleUniversity|MonX/AB123/Fall201|^MonX')
# When courses are created, OpenEdx gives them a run date
# of 2030. Only when they are finalized do they
# get a real run-date. So create a list with valid
# academic years: 2012 up to current year + 5:
VALID_AYS = [ay for ay in range(2012, datetime.datetime.today().year + 6)]
class ModulestoreExtractor(MySQLDB):
# Max number of collection records
# to import before doing a bulk
# import:
BULK_INSERT_NUM_ROWS = 10000
# For logging: print how many rows have
# been ingested every REPORT_EVERY_N_ROWS
# rows:
REPORT_EVERY_N_ROWS = 10000
def __init__(self, split=True, old=True, edxproblem=True, courseinfo=True, edxvideo=True, verbose=False):
'''
Get interface to modulestore backup.
Note: This class presumes modulestore was recently loaded to mongod.
Class also presumes that mongod is running on localhost:27017.
'''
# FIXME: don't presume modulestore is recently loaded and running
self.msdb = mng.MongoClient().modulestore
if verbose:
self.setupLogging(logging.INFO, logFile=None)
else:
self.setupLogging(logging.WARN, logFile=None)
# Need to handle Split and Old modulestore cases
self.split = split
self.old = old
# Switch for updating EdxProblem and CourseInfo separately (useful for testing)
self.update_EP = edxproblem
self.update_CI = courseinfo
self.update_EV = edxvideo
# Initialize MySQL connection from config file
home = os.path.expanduser('~')
dbFile = home + "/.ssh/mysql_user"
if not os.path.isfile(dbFile):
sys.exit("MySQL user credentials not found: " + dbFile)
dbuser = None #@UnusedVariable
dbpass = None #@UnusedVariable
with open(dbFile, 'r') as f:
dbuser = f.readline().rstrip()
dbpass = f.readline().rstrip()
MySQLDB.__init__(self, db="Edx", user=dbuser, passwd=dbpass)
def __buildEmptyEdxProblemTable(self):
'''
Reset EdxProblem table and rebuild.
'''
# Build table drop and table create queries
dropOldTableQuery = """DROP TABLE IF EXISTS `EdxProblem`;"""
emptyEdxProblemTableQuery = """
CREATE TABLE IF NOT EXISTS `EdxProblem` (
`problem_id` VARCHAR(32) DEFAULT NULL,
`problem_display_name` VARCHAR(100) DEFAULT NULL,
`course_display_name` VARCHAR(100) DEFAULT NULL,
`problem_text` LONGTEXT,
`date` VARCHAR(50) DEFAULT NULL,
`weight` DECIMAL DEFAULT NULL,
`revision` VARCHAR(10) DEFAULT NULL,
`max_attempts` INT DEFAULT NULL,
`trackevent_hook` VARCHAR(200) DEFAULT NULL,
`vertical_uri` VARCHAR(200) DEFAULT NULL,
`problem_idx` INT DEFAULT NULL,
`sequential_uri` VARCHAR(200) DEFAULT NULL,
`vertical_idx` INT DEFAULT NULL,
`chapter_uri` VARCHAR(200) DEFAULT NULL,
`sequential_idx` INT DEFAULT NULL,
`chapter_idx` INT DEFAULT NULL,
`staff_only` tinyint(4) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
"""
# Execute table definition queries
self.execute(dropOldTableQuery)
self.execute(emptyEdxProblemTableQuery)
def __buildEmptyEdxVideoTable(self):
'''
'''
dropOldTableQuery = """DROP TABLE IF EXISTS `EdxVideo`;"""
emptyEdxVideoTableQuery = """
CREATE TABLE IF NOT EXISTS `EdxVideo` (
`video_id` VARCHAR(32) DEFAULT NULL,
`video_display_name` VARCHAR(100) DEFAULT NULL,
`course_display_name` VARCHAR(100) DEFAULT NULL,
`video_uri` TEXT DEFAULT NULL,
`video_code` TEXT DEFAULT NULL,
`trackevent_hook` VARCHAR(200) DEFAULT NULL,
`vertical_uri` VARCHAR(200) DEFAULT NULL,
`video_idx` INT DEFAULT NULL,
`sequential_uri` VARCHAR(200) DEFAULT NULL,
`vertical_idx` INT DEFAULT NULL,
`chapter_uri` VARCHAR(200) DEFAULT NULL,
`sequential_idx` INT DEFAULT NULL,
`chapter_idx` INT DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
"""
# Execute table definition queries
self.execute(dropOldTableQuery)
self.execute(emptyEdxVideoTableQuery)
def __buildEmptyCourseInfoTable(self):
'''
Reset CourseInfo table and rebuild.
'''
# Build table drop and table definition queries
dropOldTableQuery = """DROP TABLE IF EXISTS `CourseInfo`;"""
emptyCourseInfoTableQuery = """
CREATE TABLE IF NOT EXISTS `CourseInfo` (
`course_display_name` varchar(255) DEFAULT NULL,
`course_catalog_name` varchar(255) DEFAULT NULL,
`academic_year` int(11) DEFAULT NULL,
`quarter` varchar(7) DEFAULT NULL,
# `num_quarters` int(11) DEFAULT NULL, # NOTE: num_quarters field deprecated 5 May 2016
`is_internal` int DEFAULT NULL,
`enrollment_start` datetime DEFAULT NULL,
`start_date` datetime DEFAULT NULL,
`enrollment_end` datetime DEFAULT NULL,
`end_date` datetime DEFAULT NULL,
`grade_policy` text DEFAULT NULL,
`certs_policy` text DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8;
"""
# Execute table definition queries
self.execute(dropOldTableQuery)
self.execute(emptyCourseInfoTableQuery)
def export(self):
'''
Client method builds tables and loads various modulestore cases to MySQL.
We reload both tables from scratch each time since the tables are relatively small.
'''
self.__buildEmptyEdxProblemTable() if self.update_EP else None
self.__buildEmptyCourseInfoTable() if self.update_CI else None
self.__buildEmptyEdxVideoTable() if self.update_EV else None
if self.split and self.update_EP:
self.logInfo("About to ingest problem defs from new-type modulestore...")
self.__extractSplitEdxProblem()
self.logInfo("Done ingesting problem defs from new-type modulestore...")
if self.split and self.update_CI:
self.logInfo("About to ingest course defs from new-type modulestore...")
self.__extractSplitCourseInfo()
self.logInfo("Done ingesting course defs from new-type modulestore...")
pass
if self.split and self.update_EV:
self.logInfo("About to ingest video defs from new-type modulestore...")
self.__extractSplitEdxVideo()
self.logInfo("Done ingesting video defs from new-type modulestore...")
if self.old and self.update_EP:
self.logInfo("About to ingest problem defs from old-type modulestore...")
self.__extractOldEdxProblem()
self.logInfo("Done ingesting problem defs from old-type modulestore...")
if self.old and self.update_CI:
self.logInfo("About to ingest course defs from old-type modulestore...")
self.__extractOldCourseInfo()
self.logInfo("Done ingesting course defs from old-type modulestore...")
if self.old and self.update_EV:
self.logInfo("About to ingest video defs from old-type modulestore...")
self.__extractOldEdxVideo()
self.logInfo("Done ingesting video defs from old-type modulestore...")
@staticmethod
def __resolveResourceURI(problem):
'''
Extract resource URI as identifier for EdxTrackEvent hook.
'''
tag = problem["_id"]["tag"]
org = problem["_id"]["org"]
course = problem["_id"]["course"]
category = problem["_id"]["category"]
name = problem["_id"]["name"]
uri = "%s://%s/%s/%s/%s" % (tag, org, course, category, name)
return uri
@staticmethod
def __resolveTimestamp(problem):
'''
Convert published_date array from modulestore to python datetime object.
'''
# FIXME: This function is 100% wrong, lol
dtarr = problem['metadata'].get('published_date', False)
if not dtarr:
return None
dtstr = '-'.join(map(str, dtarr[:6]))
date = datetime.datetime.strptime(dtstr, "%Y-%m-%d-%H-%M-%S")
return date
def __resolveCDN(self, module):
'''
Extract course display name from old-style modulestore.
'''
org = module["_id"]["org"]
course = module["_id"]["course"]
definition = self.msdb.modulestore.find({"_id.category": "course",
"_id.org": org,
"_id.course": course})
try:
name = definition[0]["_id"]["name"]
except IndexError:
self.logError('Course display name for course %s not found.' % str(course))
cdn = '<Not found in Modulestore>'
return cdn
cdn = "%s/%s/%s" % (org, course, name)
return cdn
def __locateModuleInParent(self, resource_uri):
'''
Given URI for a vertical, return the URI of the encapsulating sequential
and an integer for what order in the sequence the vertical occurred.
'''
if not resource_uri:
return None, -2
try:
parent_module = self.msdb.modulestore.find({"definition.children": resource_uri}).next()
except StopIteration:
# print resource_uri
return None, -2
parent_module_uri = self.__resolveResourceURI(parent_module)
order = parent_module['definition']['children'].index(resource_uri) + 1 # Use 1-indexing
return parent_module_uri, order
def __extractOldEdxProblem(self):
'''
Extract problem data from old-style MongoDB modulestore.
Inserts data into EdxProblem. Since pulling from the old
part of modulestore is slow: takes about 8hrs, we have table
EdxOldModstoreProblemArchive. It holds all the old data.
If that table is present and not empty, we copy from it
to the EdxProblem table and are done. Else we run the
actual extraction.
'''
try:
self.logInfo('Checking whether EdxOldModstoreProblemArchive exists and non-empty...')
it = self.query('SELECT COUNT(*) FROM EdxOldModstoreProblemArchive')
num_rows = it.next()
self.logInfo('Found %s rows in EdxOldModstoreProblemArchive exists.' % num_rows)
if num_rows > 0:
self.logInfo("Copying rows from EdxOldModstoreProblemArchive to EdxProblem...")
self.execute('INSERT INTO EdxProblem SELECT * FROM EdxOldModstoreProblemArchive')
self.logInfo("Done copying rows from EdxOldModstoreProblemArchive to EdxProblem.")
return
except ValueError:
self.logInfo("EdxOldModstoreProblemArchive not present; extracting 'old-modulestore' problem defs from Mongodb.")
problems = self.msdb.modulestore.find({"_id.category": "problem"}).batch_size(20)
col_names = ['problem_id',
'problem_display_name',
'course_display_name',
'problem_text',
'date',
'weight',
'revision',
'max_attempts',
'trackevent_hook'
]
table = []
num_pulled = 0
for problem in problems:
# Each row in the table is a dictionary
cdn = self.__resolveCDN(problem)
# Is this a test course?
if self.is_test_name(cdn):
continue
# Reconstruct URI for problem
problem_uri = self.__resolveResourceURI(problem)
# Get URI for enclosing vertical and location of problem therein
vertical_uri, problem_idx = self.__locateModuleInParent(problem_uri)
# Get URI for enclosing sequential and location of vertical therein
sequential_uri, vertical_idx = self.__locateModuleInParent(vertical_uri)
# Staff-only indicator
if not vertical_uri:
staff_only = False
else:
staff_name = vertical_uri.split('/')[5]
staff_only = self.msdb.modulestore.find({"_id.name": staff_name}).next()['metadata']\
.get('visible_to_staff_only', False)
# URI for enclosing chapter and location of sequential
chapter_uri, sequential_idx = self.__locateModuleInParent(sequential_uri)
# Get URI for enclosing sequential and location of vertical therein
sequential_uri, vertical_idx = self.__locateModuleInParent(vertical_uri)
# URI for course and location of chapter
course_uri, chapter_idx = self.__locateModuleInParent(chapter_uri) #@UnusedVariable
data = (problem['_id'].get('name', 'False'), # problem_id
problem['metadata'].get('display_name', 'False'), # problem_display_name
cdn, # course_display_name
problem['definition'].get('data', 'False'), # problem_text
self.__resolveTimestamp(problem), # date
problem['metadata'].get('weight', -1), # weight
problem['_id'].get('revision', 'False'), # revision
problem['metadata'].get('max_attempts', -1), # max_attempts
problem_uri, # trackevent_hook
vertical_uri, # vertical_uri
problem_idx, # problem_idx
sequential_uri, # sequential_uri
vertical_idx, # vertical_idx
chapter_uri, # chapter_uri
sequential_idx, # sequential_idx
chapter_idx, # chapter_idx
staff_only # staff_only
)
table.append(data)
if len(table) >= ModulestoreExtractor.BULK_INSERT_NUM_ROWS:
self.__loadToSQL('EdxProblem', col_names, table)
num_pulled += len(table)
if num_pulled > ModulestoreExtractor.REPORT_EVERY_N_ROWS:
self.logInfo("Ingested %s rows of old-modulestore problems." % num_pulled)
table = []
if len(table) > 0:
self.__loadToSQL('EdxProblem', col_names, table)
num_pulled += len(table)
self.logInfo("Ingested %s rows of old-modulestore problems." % num_pulled)
def __extractSplitEdxProblem(self):
'''
Extract problem data from Split MongoDB modulestore.
SQL load method expects a list of dicts mapping column names to data.
'''
table = []
# Get a course generator and iterate through
courses = self.msdb['modulestore.active_versions'].find()
col_names = ['problem_id',
'problem_display_name',
'course_display_name',
'problem_text',
'date',
'weight',
'revision',
'max_attempts',
'trackevent_hook'
]
num_pulled = 0
for course in courses:
cdn = "%s/%s/%s" % (course['org'], course['course'], course['run'])
if self.is_test_name(cdn):
continue
cid = course['versions'].get('published-branch', None)
if not cid:
continue
# Retrieve course structure from published branch and filter out non-problem blocks
try:
structure = self.msdb['modulestore.structures'].find({"_id": cid, "blocks.block_type": "problem"}).next()
except StopIteration:
continue
for block in filter(lambda b: b['block_type'] == 'problem', structure['blocks']):
try:
definition = self.msdb['modulestore.definitions'].find({"_id": block['definition']}).next()
except StopIteration:
continue
# Construct data dict and append to table list
data = (block['block_id'], # problem_id
block['fields'].get('display_name', "NA"), # problem_display_name
cdn, # course_display_name
definition['fields']['data'], # problem_text
# TODO: Test the below on real course data from split modulestore
# TODO: Add context metadata
False, # date
-1, # weight
False, # revision
-1, # max_attempts
False # trackeventhook
)
table.append(data)
if len(table) >= ModulestoreExtractor.BULK_INSERT_NUM_ROWS:
self.__loadToSQL('EdxProblem', col_names, table)
num_pulled += len(table)
if num_pulled > ModulestoreExtractor.REPORT_EVERY_N_ROWS:
self.logInfo("Ingested %s rows of new-modulestore problems." % num_pulled)
table = []
if len(table) > 0:
self.__loadToSQL('EdxProblem', col_names, table)
num_pulled += len(table)
self.logInfo("Ingested %s rows of new-modulestore problems." % num_pulled)
def __extractOldEdxVideo(self):
'''
Extract video metadata from old MongoDB modulestore.
More or less identical to EdxProblem extract, but with different metadata.
Since pulling from the old part of modulestore is slow: takes about 8hrs,
we have table EdxOldModstoreVideoArchive. It holds all the old data.
If that table is present and not empty, we copy from it to the EdxVideo
table and are done. Else we run the actual extraction.
'''
try:
self.logInfo('Checking whether EdxOldModstoreVideoArchive exists and non-empty...')
it = self.query('SELECT COUNT(*) FROM EdxOldModstoreVideoArchive')
num_rows = it.next()
self.logInfo('Found %s rows in EdxOldModstoreVideoArchive exists.' % num_rows)
if num_rows > 0:
self.logInfo("Copying rows from EdxOldModstoreVideoArchive to EdxProblem...")
self.execute('INSERT INTO EdxVideo SELECT * FROM EdxOldModstoreVideoArchive')
self.logInfo("Done copying rows from EdxOldModstoreVideoArchive to EdxProblem.")
return
except ValueError:
self.logInfo("EdxOldModstoreVideoArchive not present; extracting 'old-modulestore' video defs from Mongodb.")
table = []
num_pulled = 0
videos = self.msdb.modulestore.find({"_id.category": "video"}).batch_size(20)
col_names = ['video_id',
'video_display_name',
'course_display_name',
'video_uri',
'video_code',
'trackevent_hook',
'vertical_uri',
'problem_idx',
'sequential_uri',
'vertical_idx'
'chapter_uri',
'sequential_idx',
'chapter_idx'
]
for video in videos:
cdn = self.__resolveCDN(video)
# Is this a test course?
if self.is_test_name(cdn):
continue
video_uri = self.__resolveResourceURI(video)
vertical_uri, problem_idx = self.__locateModuleInParent(video_uri)
sequential_uri, vertical_idx = self.__locateModuleInParent(vertical_uri)
chapter_uri, sequential_idx = self.__locateModuleInParent(sequential_uri)
course_uri, chapter_idx = self.__locateModuleInParent(chapter_uri) #@UnusedVariable
# Identifiers:
data = (video['_id'].get('name', 'NA'), # video_id
video['metadata'].get('display_name', 'NA'), # video_display_name
cdn, # course_display_name
video['metadata'].get('html5_sources', 'NA'), # video_uri
video['metadata'].get('youtube_id_1_0', 'NA'), # video_code
# Context
video_uri, # trackevent_hook
vertical_uri, # vertical_uri
problem_idx, # problem_idx
sequential_uri, # sequential_uri
vertical_idx, # vertical_idx
chapter_uri, # chapter_uri
sequential_idx, # sequential_idx
chapter_idx # chapter_idx
)
table.append(data)
if len(table) >= ModulestoreExtractor.BULK_INSERT_NUM_ROWS:
self.__loadToSQL('EdxVideo', col_names, table)
num_pulled += len(table)
if num_pulled > ModulestoreExtractor.REPORT_EVERY_N_ROWS:
self.logInfo("Ingested %s rows of old-modulestore videos." % num_pulled)
table = []
if len(table) > 0:
self.__loadToSQL('EdxVideo', col_names, table)
num_pulled += len(table)
self.logInfo("Ingested %s rows of old-modulestore videos." % num_pulled)
def __extractSplitEdxVideo(self):
'''
Extract video metadata from Split MongoDB modulestore.
More or less identical to EdxProblem extract, but with different metadata.
'''
table = []
num_pulled = 0
# Get a course generator and iterate through
courses = self.msdb['modulestore.active_versions'].find()
col_names = ['video_id',
'video_display_name',
'course_display_name',
'video_uri',
'video_code',
'trackevent_hook',
'vertical_uri',
'problem_idx',
'sequential_uri',
'vertical_idx'
'chapter_uri',
'sequential_idx',
'chapter_idx'
]
for course in courses:
cdn = "%s/%s/%s" % (course['org'], course['course'], course['run'])
if self.is_test_name(cdn):
continue
cid = course['versions'].get('published-branch', None)
if not cid:
continue
# Retrieve course structure from published branch and filter out non-problem blocks
try:
structure = self.msdb['modulestore.structures'].find({"_id": cid, "blocks.block_type": "video"}).next()
except StopIteration:
continue # Some courses don't have any video content
for block in filter(lambda b: b['block_type'] == 'video', structure['blocks']):
try:
definition = self.msdb['modulestore.definitions'].find({"_id": block['definition']}).next() #@UnusedVariable
except StopIteration:
continue
data = (block['block_id'], # video_id'
block['fields'].get('display_name', 'NA'), # video_display_name
cdn, # course_display_name
block['fields'].get('html5_sources', 'NA'), # video_uri
block['fields'].get('youtube_id_1_0', 'NA') # video_code
)
table.append(data)
if len(table) >= ModulestoreExtractor.BULK_INSERT_NUM_ROWS:
self.__loadToSQL('EdxVideo', col_names, table)
num_pulled += len(table)
if num_pulled > ModulestoreExtractor.REPORT_EVERY_N_ROWS:
self.logInfo("Ingested %s rows of new-modulestore videos." % num_pulled)
table = []
if len(table) > 0:
self.__loadToSQL('EdxVideo', col_names, table)
num_pulled += len(table)
self.logInfo("Ingested %s rows of new-modulestore videos." % num_pulled)
@staticmethod
def inRange(date, quarter):
'''
Return boolean indicating whether date is contained in date_range.
'''
if not type(quarter) is Quarter:
raise TypeError('Function inRange expects date_range of type Quarter.')
msdb_time = lambda timestamp: datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ")
if (msdb_time(date) >= msdb_time(quarter.start_date)) and (msdb_time(date) <= msdb_time(quarter.end_date)):
return quarter.quarter
else:
return False
@staticmethod
def genQuartersForAY(ay):
'''
Return date ranges for quarters for a given academic year.
'''
ayb = str(int(ay) + 1) # academic years bleed over into the next calendar year
AYfall = Quarter('%s-09-01T00:00:00Z' % ay, '%s-11-30T00:00:00Z' % ay, 'fall')
AYwinter = Quarter('%s-12-01T00:00:00Z' % ay, '%s-02-28T00:00:00Z' % ayb, 'winter')
AYspring = Quarter('%s-03-01T00:00:00Z' % ayb, '%s-05-31T00:00:00Z' % ayb, 'spring')
AYsummer = Quarter('%s-06-01T00:00:00Z' % ayb, '%s-08-31T00:00:00Z' % ayb, 'summer')
return AYfall, AYwinter, AYspring, AYsummer
def __lookupAYDataFromDates(self, start_date, end_date):
'''
Return course calendar data from hardcoded lookup table.
'''
if start_date == '0000-00-00T00:00:00Z':
return 0, 'NA', 0
end_date = '0000-00-00T00:00:00Z' if not end_date else end_date
if start_date.count(':') < 2:
start_date = start_date[:-1] + ":00Z"
# Quick functions to parse out month/year/academic year
month = lambda x: int(x[5:7])
year = lambda x: int(x[:4])
# Generate quarters given start date and determine starting quarter
start_ay = str(year(start_date)) if month(start_date) >= 9 else str(year(start_date) - 1)
sFall, sWinter, sSpring, sSummer = self.genQuartersForAY(start_ay)
start_quarter = self.inRange(start_date, sFall) or\
self.inRange(start_date, sWinter) or\
self.inRange(start_date, sSpring) or\
self.inRange(start_date, sSummer)
# Calculate number of quarters
months_passed = (year(end_date) - year(start_date)) * 12 + (month(end_date) - month(start_date))
n_quarters = int(math.ceil(months_passed / 4))
if n_quarters == 0:
n_quarters = 1 # Round up to one quarter minimum
if n_quarters < 0:
n_quarters = 0 # Self-paced courses have no quarters
return int(start_ay), start_quarter, n_quarters
@staticmethod
def isInternal(course_struct, block_struct):
'''
Return 1 or 0 indicating whether course is internal
because it is offered inside Stanford or by other
internal organizations. The two parameters are messy
JSON extracted by the new and old course info extract
methods.
'''
# Be extremely defensive: be prepared for any field
# not being present.
# Check the organization that offers the course:
course_metadata = course_struct.get('metadata', None)
if course_metadata is not None:
enroll_domain = course_metadata.get('enrollment_domain', 'NA')
else:
enroll_domain = None
course_org = course_struct.get('org', None)
if (enroll_domain == SU_ENROLLMENT_DOMAIN) or (course_org in INTERNAL_ORGS):
return 1
# Now the explicit prohibitions: by_invitation and isprivate:
if block_struct is not None:
# Likely a new-type modulestore record:
block_fields = block_struct.get('fields', None)
if block_fields is not None:
# If field 'ispublic' is absent, we assume
# that course is public:
if not block_fields.get('ispublic', True):
return 1
# If the invitation_only field is absent,
# we assume public
if block_fields.get('invitation_only', False):
return 1
else:
if not course_struct.get('ispublic', True):
return 1
if course_struct.get('invitation_only', False):
return 1
# None of the privat-course criteria hold:
return 0
def __extractOldCourseInfo(self):
'''
Extract course metadata from old-style MongoDB modulestore.
Inserts all into CourseInfo table.
'''
table = []
num_pulled = 0
col_names = ['course_display_name',
'course_catalog_name',
'academic_year',
'quarter',
'is_internal',
'enrollment_start',
'start_date',
'enrollment_end',
'end_date',
'grade_policy',
'certs_policy'
]
# Iterate through all 'course' type documents in modulestore
courses = self.msdb.modulestore.find({"_id.category": "course"})
for course in courses:
# For old modstore format, there was no inner
# JSON structure called 'block'. Set to None
# to make the is_internal() method work for
# both old and new module store:
# #*******
# try:
# if course['_id']['course'] == '115':
# print("In old.")
# except KeyError:
# pass
# #*******
block = None
cdn = self.__resolveCDN(course)
# Is this a test course?
if self.is_test_name(cdn):
continue
start_date = course['metadata'].get('start', '0000-00-00T00:00:00Z')
end_date = course['metadata'].get('end', '0000-00-00T00:00:00Z')
academic_year, quarter, num_quarters = self.__lookupAYDataFromDates(start_date, end_date) #@UnusedVariable
# The following would check for start/end dates in the far
# future. But we decided rather to have the courses in CourseInfo
# and filter 'bad' dates during queries:
#if academic_year not in VALID_AYS and\
# course['_id']['name'] != 'SelfPaced' and\
# academic_year != 0: # Happens when start_date is not entered, as e.g. for Law courses and Medicine
# continue
try:
grade_policy = str(course['definition']['data'].get('grading_policy', 'NA').get('GRADER', 'NA'))
certs_policy = str(course['definition']['data'].get('grading_policy', 'NA').get('GRADE_CUTOFFS', 'NA'))
except AttributeError:
grade_policy = 'NA'
certs_policy = 'NA'
data = (cdn, # course_display_name
course['metadata']['display_name'], # course_catalog_name
academic_year, # academic_year
quarter, # quarter
self.isInternal(course,block), # is_internal field
course['metadata'].get('enrollment_start', '0000-00-00T00:00:00Z'), # enrollment_start
start_date, # start_date'
course['metadata'].get('enrollment_end', '0000-00-00T00:00:00Z'), # enrollment_end
end_date, # end_date
grade_policy, # grade_policy
certs_policy # certs_policy
)
table.append(data)
if len(table) >= ModulestoreExtractor.BULK_INSERT_NUM_ROWS:
self.__loadToSQL('CourseInfo', col_names, table)
num_pulled += len(table)
if num_pulled > ModulestoreExtractor.REPORT_EVERY_N_ROWS:
self.logInfo("Ingested %s rows of old-modulestore course info." % num_pulled)
table = []
if len(table) > 0:
self.__loadToSQL('CourseInfo', col_names, table)
num_pulled += len(table)
self.logInfo("Ingested %s rows of old-modulestore course info." % num_pulled)
def __extractSplitCourseInfo(self):
'''
Extract course metadata from Split MongoDB modulestore.
Inserts results in table CourseInfo.
'''
table = []
num_pulled = 0
col_names = ['course_display_name',
'course_catalog_name',
'start_date',
'end_date',
'academic_year',
'quarter',
'is_internal',
'enrollment_start',
'enrollment_end',
'grade_policy',
'certs_policy'
]
# Get all most recent versions of 'course' type documents from modulestore
courses = self.msdb['modulestore.active_versions'].find()
for course in courses:
# #*******
# try:
# if course['course'] == '115':
# print("In new.")
# except KeyError:
# pass
# #*******
cdn = "course-V%s:%s+%s+%s" %\
(course['schema_version'],course['org'], course['course'], course['run'])
if self.is_test_name(cdn):
continue
cid = course['versions'].get('published-branch', None)
if not cid:
continue # Ignore if not a 'published' course
# Get this course block and corresponding definition document from modulestore
try:
structure = self.msdb['modulestore.structures'].find({"_id": cid, "blocks.block_type": "course"}).next()
except StopIteration:
# No record found in structures:
continue
try:
block = filter(lambda b: b['block_type'] == 'course', structure['blocks'])[0]
except IndexError:
self.logError('No course block found for course %s' % str(course))
continue
try:
definition = self.msdb['modulestore.definitions'].find({"_id": block['definition']}).next()
except StopIteration:
continue
datestr = lambda d: datetime.datetime.strftime(d, "%Y-%m-%dT%H:%M:%SZ")
start_date = block['fields'].get('start', '0000-00-00T00:00:00Z')
end_date = block['fields'].get('end', '0000-00-00T00:00:00Z')
start_date = datestr(start_date) if type(start_date) is datetime.datetime else start_date
end_date = datestr(end_date) if type(end_date) is datetime.datetime else end_date
academic_year, quarter, num_quarters = self.__lookupAYDataFromDates(start_date, end_date) #@UnusedVariable
# We decided rather to have all courses in CourseInfo,
# even ones with 'bad' start/end dates:
#if academic_year not in VALID_AYS:
# continue
enrollment_start = block['fields'].get('enrollment_start', '0000-00-00T00:00:00Z')
enrollment_end = block['fields'].get('enrollment_end', '0000-00-00T00:00:00Z')
enrollment_start = datestr(enrollment_start) if type(enrollment_start) is datetime.datetime else enrollment_start
enrollment_end = datestr(enrollment_end) if type(enrollment_end) is datetime.datetime else enrollment_end
try:
grade_policy = str(definition['fields'].get('grading_policy').get('GRADER', 'NA'))
certs_policy = ("minimum_grade_credit: %s certificate_policy: " %\
block['fields'].get('minimum_grade_credit', 'NA')) +\
str(definition['fields'].get('grading_policy').get('GRADE_CUTOFFS', 'NA'))
except AttributeError:
grade_policy = 'NA'
certs_policy = 'NA'
data = (cdn, # course_display_name
block['fields']['display_name'], # course_catalog_name
academic_year, # academic_year
quarter, # quarter
self.isInternal(course,block), # is_internal
enrollment_start, # enrollment_start
start_date, # start_date
enrollment_end, # enrollment_end
end_date, # end_date
grade_policy, # grade_policy
certs_policy # certs_policy
)
table.append(data)
if len(table) >= ModulestoreExtractor.BULK_INSERT_NUM_ROWS:
self.__loadToSQL('CourseInfo', col_names, table)
num_pulled += len(table)
if num_pulled > ModulestoreExtractor.REPORT_EVERY_N_ROWS:
self.logInfo("Ingested %s rows of new-modulestore course info." % num_pulled)
table = []
if len(table) > 0:
self.__loadToSQL('CourseInfo', col_names, table)
num_pulled += len(table)
self.logInfo("Ingested %s rows of new-modulestore course info." % num_pulled)
# ----------------------- Utilities -------------------
#-------------------------
# is_test_name
#--------------
def is_test_name(self, course_display_name):
'''
Given a course name, return True if
it appears to be the name of aa test
course.
'''
if TEST_COURSE_NAME_PATTERN.search(course_display_name) is not None:
return True
else:
return False
#-------------------------
# __loadToSQL
#--------------
def __loadToSQL(self, table_name, columns, arr_of_tuples):
'''
Build columns tuple and list of row tuples for MySQLDB bulkInsert operation, then execute.
We hold tables in memory to minimize query load on the receiving database.
'''
self.bulkInsert(table_name, columns, arr_of_tuples)
#-------------------------
# setupLogging
#--------------
def setupLogging(self, loggingLevel, logFile=None):
# Set up logging:
self.logger = logging.getLogger('newEvalIntake')
self.logger.setLevel(loggingLevel)
# Create file handler if requested:
if logFile is not None:
handler = logging.FileHandler(logFile)
else:
# Create console handler:
handler = logging.StreamHandler()
handler.setLevel(loggingLevel)
# Add the handler to the logger
self.logger.addHandler(handler)
#-------------------------
# logInfo
#--------------
def logInfo(self, msg):
self.logger.info(msg)
#-------------------------
# logError
#--------------
def logError(self, msg):
self.logger.error('***** ' + msg)
#-------------------------
# logWarn
#--------------
def logWarn(self, msg):
self.logger.warning(msg)
if __name__ == '__main__':
description_str = '''Pulls data from a local copy of modulestore,
and fills tables CourseInfo, EdxProblem, and EdxVideo.'''
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawTextHelpFormatter,
description=description_str)
parser.add_argument('-v', '--verbose',
help='print operational info to log.',
dest='verbose',
action='store_true');
parser.add_argument('to_load',
action='store',
nargs='*',
default='all',
choices=['courseinfo',
'edxproblem',
'edxvideo',
'all'
],
help='which table to extract from modulestore. \n' +\
'Use any or none of these. If none: extract all three.\n')
args = parser.parse_args();
# Ensure that tables-to-load info is an array:
if args.to_load == 'all' or 'all' in args.to_load:
to_load = ['courseinfo', 'edxproblem', 'edxvideo']
elif type(args.to_load) == list:
to_load = args.to_load
else:
to_load = [args.to_load]
extractor = ModulestoreExtractor(edxproblem=True if 'edxproblem' in to_load else False,
edxvideo=True if 'edxvideo' in to_load else False,
courseinfo=True if 'courseinfo' in to_load else False,
verbose=args.verbose)
extractor.export()
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from textwrap import dedent
from reusable_components import Section, Chapter
from tutorial import styles
from tutorial import tools
examples = {
example: tools.load_example('tutorial/examples/table/{}'.format(example))
for example in ['simple.py']
}
layout = html.Div([
dcc.Markdown(dedent('''
# Dash DataTable
''')),
html.Iframe(
src="https://ghbtns.com/github-btn.html?user=plotly&repo=dash-table&type=star&count=true&size=large",
width="160px",
height="30px",
style={'border': 'none'}
),
dcc.Markdown(dedent('''
> **New! Released on November 2, 2018**
>
> Dash DataTable is an interactive table component designed for
> viewing, editing, and exploring large datasets.
>
> DataTable is rendered with standard, semantic HTML `<table/>` markup,
> which makes it accessible, responsive, and easy to style.
>
> This component was written from scratch in React.js specifically
> for the Dash community. Its API was designed to be ergonomic
> and its behavior is completely customizable through its properties.
>
> 7 months in the making, this is the most complex Dash
> component that Plotly has written, all from the ground-up
> using React and TypeScript. DataTable was designed with a
> featureset that allows that Dash users to create complex,
> spreadsheet driven applications with no compromises.
> We're excited to continue to work with users and companies
> that [invest in DataTable's future](https://plot.ly/products/consulting-and-oem/).
>
> DataTable is in `Alpha`. This is more of a statement on the
> DataTable API rather than on its features.
> The table currently works beautifully and is already
> used in production at F500 companies. However, we
> expect to make a few more breaking changes to its API and
> behavior within the next couple of months.
> Once the community feels good about its API, we'll lock it down
> and we'll commit to reducing the frequency of breaking changes.
> Please subscribe to [dash-table#207](https://github.com/plotly/dash-table/issues/207)
> and the [CHANGELOG.md](https://github.com/plotly/dash-table/blob/master/CHANGELOG.md) to stay up-to-date with any breaking changes.
>
> So, check out DataTable and let us know what you think.
> Or even better, share your DataTable Dash apps
> on the [community forum](https://community.plot.ly/t/show-and-tell-community-thread/7554)!
>
> -- chriddyp
''')),
Section('Quickstart', [
dcc.SyntaxHighlighter(
'''pip install dash=={}'''.format(dash.__version__),
customStyle=styles.code_container
),
dcc.SyntaxHighlighter(
examples['simple.py'][0],
language='python',
customStyle=styles.code_container
),
html.Div(examples['simple.py'][1], className='example-container'),
]),
Section('Dash DataTable User Guide', [
Chapter('Part 1. Sizing',
'/datatable/sizing',
'''
All about sizing the DataTable. Examples include:
- Setting the width and the height of the table
- Responsive table design
- Setting the widths of individual columns
- Handling long text
- Fixing rows and columns
'''),
Chapter('Part 2. Styling',
'/datatable/style',
'''
The style of the DataTable is highly customizable. This chapter
includes examples for:
- Conditional formatting
- Displaying multiple rows of headers
- Highlighting rows, columns, and cells
- Styling the table as a list view
- Changing the colors (including a dark theme!)
The sizing API for the table has been particularly tricky for
us to nail down, so be sure to read this chapter to understand the nuances,
limitations, and the APIs that we're exploring.
'''),
Chapter('Part 3. Sorting, Filtering, Selecting, and Paging',
'/datatable/interactivity',
'''
The DataTable is interactive. This chapter demonstrates the
interactive features of the table and how to wire up these
interations to Python callbacks. These actions include:
- Paging
- Selecting Rows
- Sorting Columns
- Filtering Data
'''),
Chapter([html.Span('Part 4. Sorting, Filtering, and Paging '), html.I('with Python')],
'/datatable/callbacks',
'''
In Part 3, the paging, sorting, and filtering was done entirely
clientside (in the browser). This means that you need to
load all of the data into the table up-front. If your data is large,
then this can be prohibitively slow.
In this chapter, you'll learn how to write your own filtering,
sorting, and paging backends in Python with Dash.
We'll do the data processing with Pandas but you could write your
own routines with SQL or even generate the data on the fly!
'''),
Chapter([html.Span('Part 5. Typing ')],
'/datatable/typing',
'''
In this chapter, you'll learn how to configure the table to
- assign the column type
- change the data presentation
- change the data formatting
- validate or coerce user data input
- apply default behavior for valid and invalid data
'''),
Chapter('Part 6. Editable Tables',
'/datatable/editable',
'''
The DataTable is editable. Like a spreadsheet, it can be used
as an input for controlling models with a variable number
of inputs.
This chapter includes recipes for:
- Determining which cell has changed
- Filtering out null values
- Adding or removing columns
- Adding or removing rows
- Ensuring that a minimum set of rows are visible
- Running Python computations on certain columns or cells
'''),
Chapter('Part 7. Rendering Cells as Dropdowns',
'/datatable/dropdowns',
'''
Cells can be rendered as editable Dropdowns. This is our first
stake in bringing a full typing system to the table.
Rendering cells as dropdowns introduces some complexity in the
markup and so there are a few limitations that you should be aware
of.
'''),
Chapter('Part 8. Virtualization',
'/datatable/virtualization',
'''
Examples using DataTable virtualization.
'''),
Chapter('Part 9. Filtering Syntax',
'/datatable/filtering',
'''
An explanation and examples of filtering syntax for both frontend
and backend filtering in the DataTable.
'''),
Chapter('Part 10. Table Reference',
'/datatable/reference',
'''
The full list of Table properties and their settings.
''')
]),
Section('Roadmap, Sponsorships, and Contact', dcc.Markdown(dedent(
'''
Immediately, we're working on stability, virtualization, and
a first-class data type system.
Check out [our roadmap project board](https://github.com/orgs/plotly/projects/12)
to see what's coming next.
Many thanks to all of our customers who have sponsored the
development of this table. Interested in steering the roadmap?
[Get in touch](https://plot.ly/products/consulting-and-oem/)
'''
)))
])
|
#!/usr/bin/env python3
import sys
import time
import random
from optparse import OptionParser
from rgbmatrix import RGBMatrix, RGBMatrixOptions
from matrixdemos.scripts.utils import *
description = "A simple demo of twinkling stars"
parser = OptionParser(description=description)
parser.add_option("-s", "--speed",
help="set the animation speed multiplier as a float", type=float,
dest="speed", default=1)
(options, args) = parser.parse_args()
_options = RGBMatrixOptions()
_options.drop_privileges = False
_options.rows = 32
_options.chain_length = 1
_options.parallel = 1
_options.hardware_mapping = 'adafruit-hat'
matrix = RGBMatrix(options=_options)
STAR_COLOR = (190, 200, 255)
BACKGROUND_COLOR = (1, 3, 5)
SPEED = options.speed
if SPEED <= 0:
sys.stderr.write("ERR: Animation speed must exceed zero\n")
sys.exit()
if SPEED > 20:
sys.stderr.write("ERR: Animation speed must be below 20\n")
sys.exit()
STAR_COUNT = 40
class Star:
def __init__(self, start=False):
self.pos = (random.randint(0, matrix.width - 1), (random.randint(0, matrix.height - 1)))
self.fuzz_amount = 5 # Alpha can always be +/- this number
self.max_alpha = random.randint(30, 200)
self.alpha = random.randint(-100, (self.max_alpha - 5) if start else 0)
self.grow_speed = random.uniform(.4, .8)
self.fade_speed = random.uniform(.3, .8)
self.tolerance = 1 # Starts going down if max_alpha-alpha>=tolerance
self.going_up = True
self.dead = False
def draw(self, canvas):
fuzz = random.uniform(-self.fuzz_amount, self.fuzz_amount)
canvas.point(self.pos, color_fade(STAR_COLOR, BACKGROUND_COLOR, self.alpha + fuzz))
def update(self, time_passed):
if not time_passed:
return
if self.going_up:
self.alpha += (self.max_alpha - self.alpha) * (time_passed * self.grow_speed)
else:
self.alpha -= (self.alpha) * (time_passed * self.fade_speed)
if self.going_up and abs(self.max_alpha - self.alpha) <= self.tolerance:
self.going_up = False
if (not self.going_up) and self.alpha < 5:
self.dead = True
class StarSimulator:
def __init__(self):
self.image, self.canvas = new_canvas()
self.time_passed = 0
self.stars = []
def run(self):
for x in range(STAR_COUNT):
self.stars.append(Star(True))
while True:
self.draw()
self.update()
def draw(self):
self.canvas.rectangle(((0, 0), (matrix.width, matrix.height)), BACKGROUND_COLOR)
for star in self.stars:
star.draw(self.canvas)
matrix.SetImage(self.image)
def update(self):
dead_stars = []
for star in self.stars:
star.update(self.time_passed)
if star.dead:
dead_stars.append(star)
for dead in dead_stars:
self.stars.remove(dead)
self.stars.append(Star()) # And remember: a star lost is a star earned!
t1 = time.time()
time.sleep(.1)
self.time_passed = (time.time() - t1) * SPEED
def main():
try:
sim = StarSimulator()
sim.run()
except KeyboardInterrupt:
print()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""
Create release notes for a new release of this GitHub repository.
"""
# Requires:
#
# * assumes current directory is within a repository clone
# * pyGithub (conda or pip install) - https://pygithub.readthedocs.io/
# * Github personal access token (https://github.com/settings/tokens)
#
# Github token access is needed or the GitHub API limit
# will likely interfere with making a complete report
# of the release.
import argparse
import datetime
import github
import logging
import os
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger('create_release_notes')
def findGitConfigFile():
"""
return full path to .git/config file
must be in current working directory or some parent directory
This is a simplistic search that could be improved by using
an open source package.
Needs testing for when things are wrong.
"""
path = os.getcwd()
for i in range(99):
config_file = os.path.join(path, ".git", "config")
if os.path.exists(config_file):
return config_file # found it!
# next, look in the parent directory
path = os.path.abspath(os.path.join(path, ".."))
msg = "Could not find .git/config file in any parent directory."
logger.error(msg)
raise ValueError(msg)
def parse_git_url(url):
"""
return (organization, repository) tuple from url line of .git/config file
"""
if url.startswith("git@"): # deal with git@github.com:org/repo.git
url = url.split(":")[1]
org, repo = url.rstrip(".git").split("/")[-2:]
return org, repo
def getRepositoryInfo():
"""
return (organization, repository) tuple from .git/config file
This is a simplistic search that could be improved by using
an open source package.
Needs testing for when things are wrong.
"""
config_file = findGitConfigFile()
with open(config_file, "r") as f:
for line in f.readlines():
line = line.strip()
if line.startswith("url"):
url = line.split("=")[-1].strip()
if url.find("github.com") < 0:
msg = "Not a GitHub repo: " + url
logger.error(msg)
raise ValueError(msg)
return parse_git_url(url)
def get_release_info(token, base_tag_name, head_branch_name, milestone_name):
"""mine the Github API for information about this release"""
organization_name, repository_name = getRepositoryInfo()
gh = github.Github(token) # GitHub Personal Access Token
user = gh.get_user(organization_name)
logger.debug(f"user: {user}")
repo = user.get_repo(repository_name)
logger.debug(f"repo: {repo}")
milestones = [
m
for m in repo.get_milestones(state="all")
if m.title == milestone_name
]
if len(milestones) == 0:
msg = f"Could not find milestone: {milestone_name}"
logger.error(msg)
raise ValueError(msg)
milestone = milestones[0]
logger.debug(f"milestone: {milestone}")
compare = repo.compare(base_tag_name, head_branch_name)
logger.debug(f"compare: {compare}")
commits = {c.sha: c for c in compare.commits}
logger.debug(f"# commits: {len(commits)}")
tags = {}
earliest = None
for t in repo.get_tags():
if t.commit.sha in commits:
tags[t.name] = t
elif t.name == base_tag_name:
# PyGitHub oddity:
# t.commit == commit
# t.commit.last_modified != commit.last_modified
commit = repo.get_commit(t.commit.sha)
dt = str2time(commit.last_modified)
earliest = min(dt, earliest or dt)
logger.debug(f"# tags: {len(tags)}")
pulls = {
p.number: p
for p in repo.get_pulls(state="closed")
if p.closed_at > earliest
}
logger.debug(f"# pulls: {len(pulls)}")
issues = {
i.number: i
for i in repo.get_issues(milestone=milestone, state="closed")
if (
(milestone is not None or i.closed_at > earliest)
and
i.number not in pulls
)
}
logger.debug(f"# issues: {len(issues)}")
return repo, milestone, tags, pulls, issues, commits
def parse_command_line():
"""command line argument parser"""
doc = __doc__.strip()
parser = argparse.ArgumentParser(description=doc)
help_text = "name of tag to start the range"
parser.add_argument('base', action='store', help=help_text)
help_text = "name of milestone"
parser.add_argument('milestone', action='store', help=help_text)
parser.add_argument(
'token',
action='store',
help=(
"personal access token "
"(see: https://github.com/settings/tokens)"))
help_text = "name of tag, branch, SHA to end the range"
help_text += ' (default="main")'
parser.add_argument(
"--head",
action='store',
dest='head',
nargs='?',
help = help_text,
default="main")
return parser.parse_args()
def str2time(time_string):
"""convert date/time string to datetime object
input string example: ``Tue, 20 Dec 2016 17:35:40 GMT``
"""
if time_string is None:
msg = f"need valid date/time string, not: {time_string}"
logger.error(msg)
raise ValueError(msg)
return datetime.datetime.strptime(
time_string,
"%a, %d %b %Y %H:%M:%S %Z")
def report(title, repo, milestone, tags, pulls, issues, commits):
print(f"## {title}")
print("")
print(f"* **date/time**: {datetime.datetime.now()}")
print("* **release**: ")
print("* **documentation**: [PDF]()")
if milestone is not None:
print(f"* **milestone**: [{milestone.title}]({milestone.url})")
print("")
print("section | quantity")
print("-"*5, " | ", "-"*5)
print(f"[New Tags](#tags) | {len(tags)}")
print(f"[Pull Requests](#pull-requests) | {len(pulls)}")
print(f"[Issues](#issues) | {len(issues)}")
print(f"[Commits](#commits) | {len(commits)}")
print("")
print("### Tags")
print("")
if len(tags) == 0:
print("-- none --")
else:
print("tag | date | name")
print("-"*5, " | ", "-"*5, " | ", "-"*5)
for k, tag in sorted(tags.items()):
commit = repo.get_commit(tag.commit.sha)
when = str2time(commit.last_modified).strftime("%Y-%m-%d")
print(f"[{tag.commit.sha[:7]}]({tag.commit.html_url}) | {when} | {k}")
print("")
print("### Pull Requests")
print("")
if len(pulls) == 0:
print("-- none --")
else:
print("pull request | date | state | title")
print("-"*5, " | ", "-"*5, " | ", "-"*5, " | ", "-"*5)
for k, pull in sorted(pulls.items()):
state = {True: "merged", False: "closed"}[pull.merged]
when = str2time(pull.last_modified).strftime("%Y-%m-%d")
print(f"[#{pull.number}]({pull.html_url}) | {when} | {state} | {pull.title}")
print("")
print("### Issues")
print("")
if len(issues) == 0:
print("-- none --")
else:
print("issue | date | title")
print("-"*5, " | ", "-"*5, " | ", "-"*5)
for k, issue in sorted(issues.items()):
if k not in pulls:
when = issue.closed_at.strftime("%Y-%m-%d")
print(f"[#{issue.number}]({issue.html_url}) | {when} | {issue.title}")
print("")
print("### Commits")
print("")
if len(commits) == 0:
print("-- none --")
else:
print("commit | date | message")
print("-"*5, " | ", "-"*5, " | ", "-"*5)
for k, commit in commits.items():
message = commit.commit.message.splitlines()[0]
when = commit.raw_data['commit']['committer']['date'].split("T")[0]
print(f"[{k[:7]}]({commit.html_url}) | {when} | {message}")
def main(base=None, head=None, milestone=None, token=None, debug=False):
if debug:
base_tag_name = base
head_branch_name = head
milestone_name = milestone
logger.setLevel(logging.DEBUG)
else:
cmd = parse_command_line()
base_tag_name = cmd.base
head_branch_name = cmd.head
milestone_name = cmd.milestone
token = cmd.token
logger.setLevel(logging.WARNING)
info = get_release_info(
token, base_tag_name, head_branch_name, milestone_name)
# milestone, repo, tags, pulls, issues, commits = info
report(milestone_name, *info)
if __name__ == '__main__':
main()
# NeXus - Neutron and X-ray Common Data Format
#
# Copyright (C) 2008-2022 NeXus International Advisory Committee (NIAC)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For further information, see http://www.nexusformat.org
|
#!/usr/bin/env python3
# Small GUI program using GUIZero to change the
# brightness of a Pi-top/CEED if using Jessie and not pi-topOS
# to use this program you first have to install the code from
# https://github.com/rricharz/pi-top-install
# to get the brightness command
# while there it's worth installing the shutdown code too
# as it will shutdown the pi-top hub when you shutdown Jessie
# as of the latest Jessie guizero is pre-installed so no other
# software should need to be installed.
# if guizero is not installed then follow these instruction
# https://lawsie.github.io/guizero/
# this program is python 3 and needs to be made executable with
# chmod +x pi-top-brightness-gui.py
# might be good to change the name of the program to something shorter
from guizero import *
from subprocess import call
def brighter():
call(["brightness", "increase"])
def darker():
call(["brightness", "decrease"])
app = App(title="pi-top", height=50, width=145, layout="grid", bgcolor=None)
button1 = PushButton(app, brighter, text="Brighter", grid=[0,1])
button2 = PushButton(app, darker, text="Darker", grid=[0,0])
app.display()
|
#!/usr/bin/env python
import sys
from fetch_data import FetchData
from markov_python.cc_markov import MarkovChain
"""
Goofy first attempt at a Python application that uses the Codecademy
markov_python module to create fun/dumb/whatever responses based on
data pulled from various web locations.
Pretty lame, but I didn't want to spend much time on it...
Expects at least one URL on the command line for a source of text to
pull and search.
Example: run.py http://www.textfiles.com/sf/adams.txt http://www.textfiles.com/sf/alt3.txt
"""
def main(args):
mc = MarkovChain()
for a in args[1::]:
fd = FetchData(a)
mc.add_string(fd.fetch_data())
chain = mc.generate_text()
out = (" ").join(chain)
print out
if __name__ == "__main__":
main(sys.argv)
|
#!/home/tortes/anaconda3/envs/ts/bin/python
"""
Change list:
- Remove past action
- adjust step output: remove arrived
- Change python version to 3
- Add action space, observation space
8.17
- Change action space to discrete action
"""
import os
import rospy
import numpy as np
import math
from math import pi
import random
import gym
from geometry_msgs.msg import Twist, Point, Pose
from sensor_msgs.msg import LaserScan, PointCloud2, Imu, NavSatFix
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from gazebo_msgs.srv import SpawnModel, DeleteModel
from rosgraph_msgs.msg import Clock
import sensor_msgs.point_cloud2 as pc2 # pcl lib
# from velodyne_msgs.msg import VelodyneScan, VelodynePacket
action_linear_max = 5. # m/s
action_angular_max = 2. # rad/s
EARTH_RADIUS = 6378137
# REALSENSE_MAX_POINT = 20000
REALSENSE_MAX_POINT = 5000
zero_point = (0,0,0)
diagonal_dis = math.sqrt(2) * 100
epi = 10**-6
goal_model_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0], '..', 'models', 'Target_col', 'model.sdf')
# Velodyne Disabled
class Env():
def __init__(self, is_training):
self.position = Pose()
self.goal_position = Pose()
self.goal_position.position.x = 0.
self.goal_position.position.y = 0.
self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.reset_proxy = rospy.ServiceProxy('gazebo/reset_simulation', Empty)
self.unpause_proxy = rospy.ServiceProxy('gazebo/unpause_physics', Empty)
self.pause_proxy = rospy.ServiceProxy('gazebo/pause_physics', Empty)
self.goal = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
self.del_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
self.time = 0
self.roll = 0.
self.pitch = 0.
self.nav_yaw = 0.
self.extend_data = np.zeros(3*REALSENSE_MAX_POINT)
self.scan_data = []
self.nav_position = [9.083599620367968, -8.909992062367177]
self.sub_time = rospy.Subscriber('clock', Clock, self.getClock)
self.sub_imu = rospy.Subscriber('imu/data', Imu, self.getQuaternion)
self.sub_realsense = rospy.Subscriber('realsense/downsample', PointCloud2, self.getRealsense)
self.sub_lidar = rospy.Subscriber('scan', LaserScan, self.getLidar)
self.sub_navsat = rospy.Subscriber('navsat/fix', NavSatFix, self.getNavSat)
self.past_distance = 0.
self.nav_goal_distance = 0.
self.nav_rel_theta = 0.
self.nav_diff_angle = 0.
self.action_space()
self.observation_space()
if is_training:
self.threshold_arrive = 0.5
else:
self.threshold_arrive = 1.0
def getNavGoalDistance(self):
nav_goal_distance = math.hypot(self.goal_position.position.x - self.nav_position[0], self.goal_position.position.y - self.nav_position[1])
self.nav_past_distance = nav_goal_distance
return nav_goal_distance
def getClock(self, clock):
self.time = clock.clock.secs
# return clock.clock.secs
def getQuaternion(self, imu_data):
# roll, pitch, yaw
q_data = imu_data.orientation
eular_data = self.getEular(q_data)
self.orientation = q_data
self.roll = eular_data[0]
self.pitch = eular_data[1]
self.nav_yaw = eular_data[2]
def getRealsense(self, realsense_data):
rs_generator = pc2.read_points(realsense_data, skip_nans=True, field_names=("x","y","z"))
realsense_point_ = list(rs_generator)
rs_point_length = len(realsense_point_)
# sample or extend
if rs_point_length <= REALSENSE_MAX_POINT:
realsense_point_.extend([zero_point for _ in range(REALSENSE_MAX_POINT-rs_point_length)])
else:
selected_point = np.random.choice(np.arange(rs_point_length), REALSENSE_MAX_POINT, replace=True)
realsense_point_ = [realsense_point_[i] for i in selected_point]
extend_data_ = []
for point in realsense_point_:
extend_data_.extend([point[0],point[1],point[2]])
self.realsense_point = realsense_point_
self.extend_data = extend_data_
def getLidar(self, scan_raw_data):
scan_data_ = []
scan_length = len(scan_raw_data.ranges)
for i in range(scan_length):
if scan_raw_data.ranges[i] == float('Inf'):
scan_data_.append(30.)
elif np.isnan(scan_raw_data.ranges[i]):
scan_data_.append(0)
else:
scan_data_.append(scan_raw_data.ranges[i])
self.scan_data = scan_data_
def getNavSat(self, navsat_data):
# reference Longi:45 Lati:45
ref_longi = 45.0
ref_lati = 45.0
longitude = navsat_data.longitude
latitude = navsat_data.latitude
delta_longi = (longitude-ref_longi) * pi / 180
delta_lati = (latitude-ref_lati) * pi / 180
para_longi = 0.5 * (1-math.cos(delta_longi))
para_lati = math.cos(latitude*pi/180) * math.cos(latitude*pi/180)
if delta_longi >= 0:
para_symbol = 1
else:
para_symbol = -1
longitude_aff = para_symbol * EARTH_RADIUS * math.acos(1-2*para_lati*para_longi)
latitude_aff = EARTH_RADIUS * delta_lati
self.nav_position = [longitude_aff, latitude_aff]
def getGoalAngle(self):
rel_dis_x = round(self.goal_position.position.x - self.nav_position[0], 1)
rel_dis_y = round(self.goal_position.position.y - self.nav_position[1], 1)
# Calculate the angle between robot and target
if rel_dis_x > 0 and rel_dis_y > 0:
theta = math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x > 0 and rel_dis_y < 0:
theta = 2 * math.pi + math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x < 0 and rel_dis_y < 0:
theta = math.pi + math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x < 0 and rel_dis_y > 0:
theta = math.pi + math.atan(rel_dis_y / rel_dis_x)
elif rel_dis_x == 0 and rel_dis_y > 0:
theta = 1 / 2 * math.pi
elif rel_dis_x == 0 and rel_dis_y < 0:
theta = 3 / 2 * math.pi
elif rel_dis_y == 0 and rel_dis_x > 0:
theta = 0
else:
theta = math.pi
rel_theta = round(math.degrees(theta), 2)
diff_angle = abs(rel_theta - self.nav_yaw)
if diff_angle <= 180:
diff_angle = round(diff_angle, 2)
else:
diff_angle = round(360 - diff_angle, 2)
self.nav_rel_theta = rel_theta
self.nav_diff_angle = diff_angle
def getState(self):
# Get angle info
self.getGoalAngle()
extend_data = self.extend_data
roll = self.roll
pitch = self.pitch
yaw = self.nav_yaw
rel_theta = self.nav_rel_theta
diff_angle = self.nav_diff_angle
min_range = 0.3 # Collision range
done = False
arrive = False
# Add physical factors to depend done
done = self.is_done()
# Use only dist to depend arrive
current_distance = math.hypot(self.goal_position.position.x - self.nav_position[0], self.goal_position.position.y - self.nav_position[1])
if current_distance <= self.threshold_arrive:
arrive = True
print("Arrived!")
# Check data
assert len(extend_data) == 3 * REALSENSE_MAX_POINT
return extend_data, current_distance, roll, pitch, yaw, rel_theta, diff_angle, done, arrive
def setReward(self, done, arrive):
current_distance = math.hypot(self.goal_position.position.x - self.nav_position[0], self.goal_position.position.y - self.nav_position[1])
distance_rate = (self.past_distance - current_distance)
reward = 200.*distance_rate
self.past_distance = current_distance
# Time reward
# reward = reward - .5 * self.time
# Imu reward
if abs(self.roll) > 22.5:
# print("Alert! Roll angle is %.2f" % self.roll)
reward = reward - 1.
if abs(self.pitch) > 22.5:
# print("Alert! Pitch angle is %.2f" % self.pitch)
reward = reward - 1.
if done:
reward = -100.
self.pub_cmd_vel.publish(Twist())
if arrive:
reward = 2000.
self.pub_cmd_vel.publish(Twist())
arrive = False
return reward
def step(self, action):
linear_vel = self.action_space_discrete[action][0]
ang_vel = self.action_space_discrete[action][1]
# print(linear_vel, ang_vel)
vel_cmd = Twist()
vel_cmd.linear.x = linear_vel / 4
vel_cmd.angular.z = ang_vel
self.pub_cmd_vel.publish(vel_cmd)
# Update sensor data
# self.getSensor()
# Update state observation
realsense_data, rel_dis, roll, pitch, yaw, rel_theta, diff_angle, done, arrive = self.getState()
# Normalize the state
'''
Realsense: [0, 12] => [0,1]
LiDAR: [0, 30] => [0,1]
roll, pitch:[-180, 180] => [0,1]
'''
# scan_data = [i/30 for i in scan_data]
state = realsense_data + [rel_dis / diagonal_dis, (roll+180)/360, (pitch+180)/360, yaw / 360, rel_theta / 360, diff_angle / 180]
reward = self.setReward(done, arrive)
return np.asarray(state), reward, done or arrive, {}
def reset(self):
# Reset the env #
rospy.wait_for_service('/gazebo/delete_model')
self.del_model('target')
rospy.wait_for_service('gazebo/reset_simulation')
try:
self.reset_proxy()
except (rospy.ServiceException) as e:
print("gazebo/reset_simulation service call failed")
# Build the targetz
rospy.wait_for_service('/gazebo/spawn_sdf_model')
try:
goal_urdf = open(goal_model_dir, "r").read()
target = SpawnModel
target.model_name = 'target' # the same with sdf name
target.model_xml = goal_urdf
# Get goal position
self.goal_position.position.x, self.goal_position.position.y = self.goal_on_law()
self.goal(target.model_name, target.model_xml, 'namespace', self.goal_position, 'world')
# Affine Goal Position to NavSatFix(x -> -y, y->x)
self.goal_position.position.x = -self.goal_position.position.y
self.goal_position.position.y = self.goal_position.position.x
except (rospy.ServiceException) as e:
print("/gazebo/failed to build the target")
rospy.wait_for_service('/gazebo/unpause_physics')
# Get sensor data
# self.getSensor()
self.goal_distance = self.getNavGoalDistance()
realsense_data, rel_dis, roll, pitch, yaw, rel_theta, diff_angle, done, arrive = self.getState()
# scan_data = [i/30 for i in scan_data]
realsense_data = [i/12 for i in realsense_data]
# Normalize the state
state = realsense_data + [rel_dis / diagonal_dis, (roll+180)/360, (pitch+180)/360, yaw / 360, rel_theta / 360, diff_angle / 180]
return np.asarray(state)
def goal_on_law(self):
x_ = 0
y_ = 0
while True:
x_ = random.uniform(0.0, 10.0)
y_ = random.uniform(-10.0, 0.0)
dist1 = math.hypot(x_+0.6, y_+0.6)
dist2 = math.hypot(x_+0.6, y_-0.6)
dist3 = math.hypot(x_-0.6, y_-0.6)
if (dist1 > 0.2) or (dist2 > 0.2) or (dist3 > 0.2):
break
return x_, y_
def box_affine(self, p, threshold_affine):
# threshold_affine = 0.2
x, y, z = p[0], p[1], p[2]
if (x<threshold_affine and y<threshold_affine and z<threshold_affine):
k = threshold_affine / max(map(abs, (x,y,z)))
x, y, z = map(lambda x: x*k, (x,y,z))
return x, y, z
def ball_affine(self, p, threshold_affine):
# threshold_affine = 0.2
x, y, z = p[0], p[1], p[2]
point_dist = np.linalg.norm((x,y,z))
if (point_dist < threshold_affine):
k = point_dist / threshold_affine
x, y, z = map(lambda x: x/k, (x,y,z))
return x, y, z
def is_outbound(self):
x = self.nav_position[0]
y = self.nav_position[1]
# print(x,y)
if abs(x) > 13.5 or abs(y) > 13.5:
return True
return False
def is_done(self):
min_range = 1.2
if len(self.scan_data) == 0:
return False
# Roll Pitch error
if abs(self.roll) > 45 or abs(self.pitch) > 45:
# print("Roll/Pitch danger")
return True
# Collision error
if min_range > min(self.scan_data) > 0 and self.is_outbound():
# print("Collision")
return True
if self.time > 10000:
# print("Time exceed")
return True
return False
def getEular(self, quaternion):
x = quaternion.x
y = quaternion.y
z = quaternion.z
w = quaternion.w
# roll
sinr_cosp = 2.0*(w*x+y*z)
cosr_cosp = 1-2.0*(x*x+y*y)
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch
sinp = 2.0*(w*y-z*x)
if abs(sinp) > 1:
pitch = pi/2 if sinp > 0 else -pi/2 # Use pi/2 if out of range
else:
pitch = math.asin(sinp)
# yaw
siny_cosp = 2.0*(w*z + x*y)
cosy_cosp = 1-2.0*(y*y+z*z)
yaw = math.atan2(siny_cosp, cosy_cosp)
if yaw >= 0:
yaw = yaw
else:
yaw = yaw + 360
return roll*180/pi, pitch*180/pi, yaw*180/pi
def action_space(self):
action_space_discrete_ = []
linear_sample = np.arange(0,action_linear_max+epi,
action_linear_max/4.,
dtype=np.float32).tolist()
angular_sample = np.arange(-action_angular_max, action_angular_max+epi,
action_angular_max*2/10,
dtype=np.float32).tolist()
for linear_speed in linear_sample:
for angular_speed in angular_sample:
action_space_discrete_.append([linear_speed, angular_speed])
self.action_space_discrete = action_space_discrete_
self.action_dim = len(action_space_discrete_)
return gym.spaces.Discrete(len(action_space_discrete_))
def observation_space(self):
self.state_dim = 6 + 3 * REALSENSE_MAX_POINT
return gym.spaces.Box(low=0, high=1, shape=(self.state_dim,), dtype=np.float32)
def seed(self, seed):
return seed
def render(self):
return 0
def close(self):
return 0
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import pickle
import yaml
import pathlib
import pandas as pd
import shutil
from qlib.contrib.backtest.account import Account
from qlib.contrib.backtest.exchange import Exchange
from qlib.contrib.online.user import User
from qlib.contrib.online.utils import load_instance
from qlib.contrib.online.utils import save_instance, init_instance_by_config
class UserManager:
def __init__(self, user_data_path, save_report=True):
"""
This module is designed to manager the users in online system
all users' data were assumed to be saved in user_data_path
Parameter
user_data_path : string
data path that all users' data were saved in
variables:
data_path : string
data path that all users' data were saved in
users_file : string
A path of the file record the add_date of users
save_report : bool
whether to save report after each trading process
users : dict{}
[user_id]->User()
the python dict save instances of User() for each user_id
user_record : pd.Dataframe
user_id(string), add_date(string)
indicate the add_date for each users
"""
self.data_path = pathlib.Path(user_data_path)
self.users_file = self.data_path / "users.csv"
self.save_report = save_report
self.users = {}
self.user_record = None
def load_users(self):
"""
load all users' data into manager
"""
self.users = {}
self.user_record = pd.read_csv(self.users_file, index_col=0)
for user_id in self.user_record.index:
self.users[user_id] = self.load_user(user_id)
def load_user(self, user_id):
"""
return a instance of User() represents a user to be processed
Parameter
user_id : string
:return
user : User()
"""
account_path = self.data_path / user_id
strategy_file = self.data_path / user_id / "strategy_{}.pickle".format(user_id)
model_file = self.data_path / user_id / "model_{}.pickle".format(user_id)
cur_user_list = [user_id for user_id in self.users]
if user_id in cur_user_list:
raise ValueError("User {} has been loaded".format(user_id))
else:
trade_account = Account(0)
trade_account.load_account(account_path)
strategy = load_instance(strategy_file)
model = load_instance(model_file)
user = User(account=trade_account, strategy=strategy, model=model)
return user
def save_user_data(self, user_id):
"""
save a instance of User() to user data path
Parameter
user_id : string
"""
if not user_id in self.users:
raise ValueError("Cannot find user {}".format(user_id))
self.users[user_id].account.save_account(self.data_path / user_id)
save_instance(
self.users[user_id].strategy,
self.data_path / user_id / "strategy_{}.pickle".format(user_id),
)
save_instance(
self.users[user_id].model,
self.data_path / user_id / "model_{}.pickle".format(user_id),
)
def add_user(self, user_id, config_file, add_date):
"""
add the new user {user_id} into user data
will create a new folder named "{user_id}" in user data path
Parameter
user_id : string
init_cash : int
config_file : str/pathlib.Path()
path of config file
"""
config_file = pathlib.Path(config_file)
if not config_file.exists():
raise ValueError("Cannot find config file {}".format(config_file))
user_path = self.data_path / user_id
if user_path.exists():
raise ValueError("User data for {} already exists".format(user_id))
with config_file.open("r") as fp:
config = yaml.load(fp)
# load model
model = init_instance_by_config(config["model"])
# load strategy
strategy = init_instance_by_config(config["strategy"])
init_args = strategy.get_init_args_from_model(model, add_date)
strategy.init(**init_args)
# init Account
trade_account = Account(init_cash=config["init_cash"])
# save user
user_path.mkdir()
save_instance(model, self.data_path / user_id / "model_{}.pickle".format(user_id))
save_instance(strategy, self.data_path / user_id / "strategy_{}.pickle".format(user_id))
trade_account.save_account(self.data_path / user_id)
user_record = pd.read_csv(self.users_file, index_col=0)
user_record.loc[user_id] = [add_date]
user_record.to_csv(self.users_file)
def remove_user(self, user_id):
"""
remove user {user_id} in current user dataset
will delete the folder "{user_id}" in user data path
:param
user_id : string
"""
user_path = self.data_path / user_id
if not user_path.exists():
raise ValueError("Cannot find user data {}".format(user_id))
shutil.rmtree(user_path)
user_record = pd.read_csv(self.users_file, index_col=0)
user_record.drop([user_id], inplace=True)
user_record.to_csv(self.users_file)
|
def addition(a, b):
return int(a) + int(b)
def subtraction(a, b):
return int(a) - int(b)
def multiplication(a, b):
return int(a) * int(b)
def division(a, b):
return round((int(a) / int(b)),9)
def squaring(a):
return int(a)**2
def squarerooting(a):
return round((int(a)**.5),8)
class Calculator:
result = 0
def __init__(self):
pass
def add(self, a, b):
self.result = addition(a, b)
return self.result
def subtract(self, a, b):
self.result = subtraction(a, b)
return self.result
def multiply(self, a, b):
self.result = multiplication(a, b)
return self.result
def divide(self, a, b):
self.result = division(a, b)
return self.result
def square(self, a):
self.result = squaring(a)
return self.result
def squareroot(self, a):
self.result = squarerooting(a)
return self.result
#class CSVStats(Calculator):
#data = []
#def __init__(self, data_file):
#self.data = CsvReader(data_file)
#pass
|
from django.conf.urls import url, include
from django.contrib import admin
from books.views import Mainpage
urlpatterns = [
url(r'^$', Mainpage.as_view(), name='mainpage'), # 主页
]
|
# -*- coding: utf-8 -*-
"""
Pytesting/test/test_worker.py
Generative test cases for the data processing Worker object
@author: Rupert.Thomas
Created 22/11/2019
Run tests (from the root folder using):
pytest
"""
import datetime
from string import printable # digits + ascii_letters + punctuation + whitespace
from hypothesis.strategies import text, dates
from hypothesis import given
from truth.truth import AssertThat
# Module under test
from app.core.worker import Worker
# Generate strings using all printable characters, except forward slashes
@given(input_string=text(alphabet=[char for char in printable if char !=',']))
def test_worker_parseLineCSV_generative(input_string):
# given
worker = Worker()
# when
result = worker.parseLineCSV(input_string)
# then
AssertThat(result).IsNone()
# Generate dates within the four digit year range
@given(input_date=dates(min_value=datetime.date(1000, 1, 1), max_value=datetime.date(9999, 1, 1)))
def test_worker_parseDate1_generative(mocker, input_date):
# given
input_string = input_date.strftime(format="%d%b%Y")
worker = Worker()
# when
result = worker.parseDate(input_string)
print(input_string, result)
# then
AssertThat(result).IsInstanceOf(str)
AssertThat(result).HasSize(10)
AssertThat(result.split('-')).HasSize(3)
# Generate strings using all printable characters, except forward slashes
@given(input_string=text())
def test_worker_parseDate2_generative(input_string):
# given
worker = Worker()
# when
result = worker.parseLineCSV(input_string)
# then
# returns None or a string
# Must not throw unhandled exception
if result is not None:
AssertThat(result).IsInstanceOf(str)
|
"""
This holds functionality to get commands, and parse commands
"""
from quick_netmiko import QuickNetmiko
from pyats_genie_command_parse import GenieCommandParse
def command_parse(python_dict, fifo_queue, thread_lock): # pylint: disable=inconsistent-return-statements
"""Function to get and parse commands from devices
:type python_dict: Dict
:param python_dict: A dictionary of connection data
:type fifo_queue: queue.Queue Object
:param fifo_queue: The FIFO queue
:type thread_lock: threading.Lock Object
:param thread_lock: The thread lock
:rtype: None
:returns: None, but it does put a item in the fifo_queue
"""
with thread_lock:
allowed_device_types = {'ios', 'iosxe', 'iosxr', 'nxos'}
if python_dict.get('device_type') not in allowed_device_types:
return None
command = python_dict.get('command')
netmiko_obj = QuickNetmiko(python_dict.get('device_ip_name'), python_dict.get('device_type'),
python_dict.get('username'), python_dict.get('password'))
command_result = netmiko_obj.send_commands(command)
genie_parse_obj = GenieCommandParse(python_dict.get('device_type'))
parse_result = genie_parse_obj.parse_string(command, command_result)
fifo_queue.put((parse_result, command_result))
|
r"""Implementation of the Dijkstra algorithm for finding shortest paths.
The algorithm is implemented using a priority queue (in particular heapq that
is provided in python). This should provide a complexity of :math:`O(E \\cdot
log(V))`,
while many other implementations have the complexity :math:`O(V \\cdot E)`.
.. note::
This still has to be verified, as it has not been evaluated how efficient
the priority queue implementation is.
"""
import heapq
import math
from collections import OrderedDict
from dataclasses import dataclass
@dataclass
class DijkstraMetrics:
"""Holds Dijkstra data of a certain node."""
dist: int
first_hop_dist: int
path: str
hops: int
hash: int
def get_best_route(source,
destination,
graph,
direct_neighbor,
current_time,
suppressed_nodes=None,
suppressed_contacts=None,
hashes=None,
lookahead_time=None):
"""Return the best route from source to destination through the graph.
Args:
source (ContactIdentifier): Source node
destination (ContactIdentifier): Destination node
graph (ContactGraph): The Graph that should routed
direct_neighbor (callable): A callable providing the adjecent nodes of
a given node - i.e. the neighbors - and the corresponding weight
(distance) of the specific neighbors return. The signature is as
follows:
.. function:: def direct_neighbor(graph, node, distance)
current_time (int): Time in the simulation when the calculation
is performed (in ms).
suppressed_nodes (list): A list of nodes that should not be considered
during the Dijkstra search run. Defaults to [].
suppressed_contacts (list): A list of contacts that should not be
considered during the Dijkstra search run. Defaults to [].
hashes (dict): A dictionary providing precomputed hash values in tuples
for all nodes of the graph object. Defaults to None. Hashes are
computed internally then.
lookahead_time (int): Time value that specifies a time window
(or rather a maximum time) only in which routes are searched.
This reduces the time necessary to find a shortest route.
Returns:
tuple: A tuple of the form `(<route>, <weight>)` with `<route>` being
the found shortest route and `<weight>` the cost of that route.
.. seealso::
https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-using-priority_queue-stl/
Based on the provided pseudocode
"""
priority_queue = list()
metrics = dict()
visited = set()
if suppressed_nodes is None:
suppressed_nodes = []
if suppressed_contacts is None:
suppressed_contacts = []
if hashes is None:
hashes = OrderedDict()
for vertex in graph:
hashes[vertex] = hash(vertex)
# Set vertices visited that are in the suppressed list.
for node in suppressed_nodes:
visited.add(node)
# Return immediately with an empty route when source is destination
if source == destination:
return [], 0
# Set distance to source to 0 and add to priority queue
# Also set the (secondary) hop count to 0
metrics[source] = DijkstraMetrics(
dist=0, hops=0, first_hop_dist=0, path=None, hash=hashes[source])
heapq.heappush(priority_queue,
(current_time, 0, 0, hashes[source], source))
# Loop until the priority queue becomes empty
while priority_queue:
# Pop the vertex with the shortest distance from the priority queue
(dist_min_node, min_hop_count, min_first_hop_dist, hash_min,
min_node) = heapq.heappop(priority_queue)
# End the looping if we are evaluating the destination node
# We definitely found the best route
if min_node == destination:
break
# Iterate over all neighbors of the selected vertex
for neighbor, dist_neigh in direct_neighbor(
graph, min_node, destination, dist_min_node, visited,
suppressed_contacts, lookahead_time):
# Calculate the overall distance of the minimal path from the
# source through the min node to the neighbor
new_distance = dist_min_node + dist_neigh
if neighbor not in metrics:
if min_node == source:
hash_val = hashes[neighbor]
first_hop_dist_val = new_distance
else:
hash_val = hash_min
first_hop_dist_val = min_first_hop_dist
metrics[neighbor] = DijkstraMetrics(
dist=new_distance,
hops=min_hop_count + 1,
first_hop_dist=first_hop_dist_val,
path=min_node,
hash=hash_val)
# If not done yet, add the neighbor with the updated values to
# the priority queue (there might be more than one instance of
# one node in the queue, but the priority mechanism ensures
# that always the best one is evaluated)
heapq.heappush(priority_queue,
(new_distance, min_hop_count + 1,
first_hop_dist_val, hash_val, neighbor))
# If that distance is smaller than the distance that was previously
# calculated, then update the values values and append the new
# found path to the priority list (we can end if we found the
# destination node, the found path will be the shortest)
elif ((metrics[neighbor].dist, metrics[neighbor].hops,
metrics[neighbor].first_hop_dist, metrics[neighbor].hash) >
(new_distance,
(min_hop_count + 1), min_first_hop_dist, hash_min)):
# Update the distance
metrics[neighbor].dist = new_distance
# Update the hop count to the new lower value
metrics[neighbor].hops = min_hop_count + 1
# Note the neighbor to be later able to reproduce the shortest
# path
metrics[neighbor].path = min_node
# Save the better first hop
metrics[neighbor].first_hop_dist = min_first_hop_dist
# Save the better first hash
metrics[neighbor].hash = hash_min
# If not done yet, add the neighbor with the updated values to
# the priority queue (there might be more than one instance of
# one node in the queue, but the priority mechanism ensures
# that always the best one is evaluated)
heapq.heappush(priority_queue,
(new_distance, min_hop_count + 1,
min_first_hop_dist, hash_min, neighbor))
# Check if route was found
if destination in metrics and metrics[destination].dist < math.inf:
# Best route found, no generate the path to return to the callee
node = destination
path_list = list()
while node != source:
path_list.append(node)
node = metrics[node].path
path_list.append(source)
# Return generated path (in the correct order source -> dest)
return list(reversed(path_list)), metrics[destination].dist
# No route from source to destination was found, return no route (i.e.
# None)
return None, math.inf
|
import notebooks
import os.path
tracking_uri = r'file:///' + os.path.join(os.path.dirname(notebooks.__file__),'model_simulate','mlruns')
import mlflow
mlflow.set_tracking_uri(tracking_uri)
|
"""Command line tool to handle Postgresql WAL archiving on Rackspace's cloudfiles.
pg_raxarchive
=============
``pg_raxarchive`` is a command line tool to handle Postgresql WAL archiving on
Rackspace's cloudfiles.
Quick help
----------
Install using pip then:
- Create a file ``/etc/pg_raxarchive.ini`` with rackspace credentials (see below).
- Run ``pg_raxarchive upload {path_to_file} {basename}`` to upload archive a file.
- Run ``pg_raxarchive download {basename} {path_to_file}`` to download an archived file.
- Run ``pg_raxarchive cleanup {filename}`` to remove WAL files older than {filename}.
Rackspace credential file follows pyrax format::
[rackspace_cloud]
username = YUOR_USERNAME_HERE
api_key = YOUR_API_KEY_HERE
You can customize the region and the container name using:
- ``pg_raxarchive --container CONTAINE_RNAME ...``
- ``pg_raxarchive --region REGION_NAME ...``
By default ``pg_raxarchive`` expects to be running inside rackspace network. If this is
not your case or you want to be billed for some other reasons use ``pg_raxarchive --use-public``.
More
----
* Run ``pg_raxarchive --help`` to know more.
* Check the repository at https://github.com/duilio/pg_raxarchive/
"""
__version__ = '1.1'
__author__ = 'Maurizio Sambati'
import sys
import logging
import argparse
def main():
from archiver import PGRaxArchiver
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='/etc/pg_raxarchive.ini')
parser.add_argument('--region', default='DFW')
parser.add_argument('--container', default='pg_archives')
parser.add_argument('--use-public', default=False, action='store_true')
parser.add_argument('-d', '--debug', dest='loglevel', default=logging.WARNING,
action='store_const', const=logging.DEBUG)
subparsers = parser.add_subparsers()
upload_parser = subparsers.add_parser('upload', help='Upload a file')
upload_parser.add_argument('srcpath', help='Full source path')
upload_parser.add_argument('filename', help='WAL filename')
upload_parser.add_argument('--disable-compression', dest='compress',
default=True, action='store_false')
upload_parser.set_defaults(cmd='upload')
download_parser = subparsers.add_parser('download', help='Download a file')
download_parser.add_argument('filename', help='WAL filename')
download_parser.add_argument('destpath', help='Full destination path')
download_parser.set_defaults(cmd='download')
cleanup_parser = subparsers.add_parser('cleanup', help='Cleanup archives')
cleanup_parser.add_argument('filename', help='Last file to keep')
cleanup_parser.set_defaults(cmd='cleanup')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=args.loglevel)
archiver = PGRaxArchiver(args.config, args.region, args.container, args.use_public)
if args.cmd == 'upload':
return archiver.upload(args.srcpath, args.filename, args.compress)
elif args.cmd == 'download':
return archiver.download(args.filename, args.destpath)
elif args.cmd == 'cleanup':
return archiver.cleanup(args.filename)
else:
raise RuntimeError('Unknown subcommand {}'.format(args.cmd))
if __name__ == '__main__':
sys.exit(main())
|
import pathlib
from typing import Union
from configparser import ConfigParser
from azure.identity import DefaultAzureCredential
class TradingCredentials():
def __init__(self, config_file: Union[str, pathlib.Path] = None) -> None:
"""Initializes the `TradingCredentials` object.
### Overview
----
This object helps interact with the `DefaultAzureCredential`
object which will handle authentication of the different Azure
resources.
### Parameters
----
config_file : Union[str, pathlib.Path] (optional, Default=None)
The location of your config file. If not provided, will
check the default location `config/config.ini`. Additionally,
this does assume you have a section, called `rbac_credentials`.
"""
# Read the file.
if not config_file:
config_folder = pathlib.Path(__file__).parents[2].joinpath('config/')
config_file = config_folder.joinpath('config.ini')
self.config = ConfigParser()
self.config.read(config_file)
self._subscription_id = None
self._tenant_id = None
self._client_id = None
self._client_secret = None
@property
def subscription_id(self) -> str:
"""Returns your Azure Subscription ID.
### Returns
----
str:
Your Azure Subscription ID.
"""
return self.config.get('rbac_credentials', 'subscription_id')
@property
def tenant_id(self) -> str:
"""Returns your Azure Tenant ID.
### Returns
----
str:
Your Azure Tenant ID.
"""
return self.config.get('rbac_credentials', 'tenant_id')
@property
def client_id(self) -> str:
"""Returns your Azure Client ID.
### Returns
----
str:
Your Azure Client ID.
"""
return self.config.get('rbac_credentials', 'client_id')
@property
def client_secret(self) -> str:
"""Returns your Azure Client Secret.
### Returns
----
str:
Your Azure Client ID.
"""
return self.config.get('rbac_credentials', 'client_secret')
@property
def azure_credentials(self) -> DefaultAzureCredential:
"""Returns the `DefaultAzureCredential` object used for authentication.
### Returns
----
DefaultAzureCredential:
An non-authenticated instance of the `DefaultAzureCredential`
object.
"""
return DefaultAzureCredential()
def to_dict(self) -> dict:
"""Returns all the properties as a python dictionary.
### Returns
----
dict:
A dictionary of your azure credentials.
"""
return {
'tenant_id': self.tenant_id,
'client_id': self.client_id,
'client_secret': self.client_secret,
'subscription_id': self.subscription_id
}
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
import time
from boto import sqs, utils as boto_utils
from boto.sqs.message import Message
import requests
from mo_dots import coalesce, unwrap, wrap
import mo_json
from mo_json import value2json
from mo_kwargs import override
from mo_logs import Log, machine_metadata
from mo_logs.exceptions import Except, suppress_exception
import mo_math
from mo_threads import Thread, Till, Signal
from mo_times import timer
from mo_times.durations import Duration, SECOND
class Queue(object):
@override
def __init__(
self,
name,
region,
aws_access_key_id=None,
aws_secret_access_key=None,
debug=False,
kwargs=None
):
self.settings = kwargs
self.pending = []
if kwargs.region not in [r.name for r in sqs.regions()]:
Log.error("Can not find region {{region}} in {{regions}}", region=kwargs.region, regions=[r.name for r in sqs.regions()])
conn = sqs.connect_to_region(
region_name=unwrap(kwargs.region),
aws_access_key_id=unwrap(kwargs.aws_access_key_id),
aws_secret_access_key=unwrap(kwargs.aws_secret_access_key),
)
self.queue = conn.get_queue(name)
if self.queue == None:
Log.error("Can not find queue with name {{queue}} in region {{region}}", queue=kwargs.name, region=kwargs.region)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __len__(self):
attrib = self.queue.get_attributes("ApproximateNumberOfMessages")
return int(attrib['ApproximateNumberOfMessages'])
def add(self, message):
message = wrap(message)
m = Message()
m.set_body(value2json(message))
self.queue.write(m)
@property
def name(self):
return self.settings.name
def extend(self, messages):
for m in messages:
self.add(m)
def pop(self, wait=SECOND, till=None):
if till is not None and not isinstance(till, Signal):
Log.error("Expecting a signal")
m = self.queue.read(wait_time_seconds=mo_math.floor(wait.seconds))
if not m:
return None
self.pending.append(m)
output = mo_json.json2value(m.get_body())
return output
def pop_message(self, wait=SECOND, till=None):
"""
RETURN TUPLE (message, payload) CALLER IS RESPONSIBLE FOR CALLING message.delete() WHEN DONE
"""
if till is not None and not isinstance(till, Signal):
Log.error("Expecting a signal")
message = self.queue.read(wait_time_seconds=mo_math.floor(wait.seconds))
if not message:
return None
message.delete = lambda: self.queue.delete_message(message)
payload = mo_json.json2value(message.get_body())
return message, payload
def commit(self):
pending = self.pending
self.pending = []
for p in pending:
self.queue.delete_message(p)
def rollback(self):
if self.pending:
pending, self.pending = self.pending, []
for p in pending:
m = Message()
m.set_body(p.get_body())
self.queue.write(m)
for p in pending:
self.queue.delete_message(p)
if self.settings.debug:
Log.alert("{{num}} messages returned to queue", num=len(pending))
def close(self):
self.commit()
def capture_termination_signal(please_stop):
"""
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
"""
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60 # MINUTES
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
# IGNORE THE FIRST PROBLEM
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker)
def get_instance_metadata(timeout=None):
if not isinstance(timeout, (int, float)):
timeout = Duration(timeout).seconds
output = wrap({k.replace("-", "_"): v for k, v in boto_utils.get_instance_metadata(timeout=coalesce(timeout, 5), num_retries=2).items()})
return output
def aws_retry(func):
def output(*args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except Exception as e:
e = Except.wrap(e)
if "Request limit exceeded" in e:
Log.warning("AWS Problem", cause=e)
continue
else:
Log.error("Problem with call to AWS", cause=e)
return output
# GET FROM AWS, IF WE CAN
def _get_metadata_from_from_aws(please_stop):
with suppress_exception:
ec2 = get_instance_metadata()
if ec2:
machine_metadata.aws_instance_type = ec2.instance_type
machine_metadata.name = ec2.instance_id
Thread.run("get aws machine metadata", _get_metadata_from_from_aws)
from . import s3
|
"""Checks to ensure the individual issues toml have all the required fields."""
import os
import sys
import tomlkit as toml
SEVERITIES = ("major", "minor", "critical")
CATEGORIES = (
"bug-risk",
"doc",
"style",
"antipattern",
"coverage",
"security",
"performance",
"typecheck",
)
GITHUB_WORKSPACE_PATH = os.environ.get("GITHUB_WORKSPACE")
def raise_issue(filepath, message, line=1, col=0):
"""
Print the issue to stdout.
Since we have checks on fields here, it isn't possible
to get exact line and column.
So, we will add issues in the last line of each file.
"""
with open(filepath) as fp:
line = len(fp.readlines())
print(f"{filepath}: {line}:{col}: {message}")
def main():
"""Validate the issue toml files."""
issues_dir = os.path.join(
GITHUB_WORKSPACE_PATH, os.environ.get("INPUT_ISSUES-PATH", "issues/")
)
issue_count = 0
for dir_path, _, filenames in os.walk(issues_dir):
for filename in filenames:
filepath = os.path.join(dir_path, filename)
with open(filepath) as fp:
try:
data = toml.loads(fp.read())
except Exception as exc:
# Can not decode toml file. Raise an issue.
# Details are in exc.
raise_issue(filepath, f"Error decoding toml: {str(exc)}")
issue_count += 1
continue
# Do not check this file if the issue is archived
if data.get("archived"):
continue
# Check for issue title:
title = data.get("title")
if not title:
raise_issue(filepath, f"Missing title in {filename}")
issue_count += 1
else:
if title.endswith("."):
raise_issue(
filepath, "Issue title should not have a period `.`"
)
issue_count += 1
# check for severity:
severity = data.get("severity")
if not severity:
raise_issue(filepath, "Missing severity field")
issue_count += 1
else:
if severity not in SEVERITIES:
raise_issue(filepath, f"severity should be one of {SEVERITIES}")
issue_count += 1
# check for category
category = data.get("category")
if not category:
raise_issue(filepath, "Missing category field")
issue_count += 1
else:
if category not in CATEGORIES:
raise_issue(filepath, f"category should be one of {CATEGORIES}")
issue_count += 1
# Check for description
description = data.get("description")
if not description:
raise_issue(filepath, "Missing description field")
issue_count += 1
else:
if not isinstance(description, str):
raise_issue(filepath, "Description is not a string")
issue_count += 1
# if issue is recommended: make sure it is a boolean value
recommended = data.get("recommended")
if recommended:
if not isinstance(recommended, bool):
raise_issue(
filepath, "`is_recommended` should have a boolean value"
)
issue_count += 1
# If any of autofix_available or autofix_title is present, check autofix attrs
if "autofix_title" in data or "autofix_available" in data:
if not isinstance(data.get("autofix_title"), str):
raise_issue(filepath, "Autofix title is not a string")
issue_count += 1
if not isinstance(data.get("autofix_available"), bool):
raise_issue(
filepath, "`autofix_available` should have a boolean value"
)
issue_count += 1
if issue_count == 0:
sys.exit(0)
else:
print(f"{issue_count} issues raised.")
sys.exit(1)
if __name__ == "__main__":
main()
|
import pandas as pd
from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor as GPR
from PyALE import ale
import matplotlib.pyplot as plt
import seaborn as sns
def plot_ale(log_name=None):
if log_name == 'Baseline':
iters = pd.read_csv('./logs/mbo/' + log_name + '/iterations/mbo_result.csv', header=None,
names=['reward_target', 'reward_distance', 'reward_crash', 'prob_crash', 'prob_target'])
design = pd.read_csv('./logs/mbo/' + log_name + '/initial_design/initial_design.csv', header=None,
names=['reward_target', 'reward_distance', 'reward_crash', 'prob_crash', 'prob_target'])
data = pd.concat((iters, design))
X = data.iloc[:, 0:3]
X.iloc[:, 1] = X.iloc[:, 1] * -1
X.iloc[:, 2] = X.iloc[:, 2] * -1
else:
iters = pd.read_csv('./logs/mbo/' + log_name + '/iterations/mbo_result.csv', header=None,
names=['reward_target', 'reward_distance', 'reward_teacher', 'reward_crash', 'prob_crash',
'prob_target'])
design = pd.read_csv('./logs/mbo/' + log_name + '/initial_design/initial_design.csv', header=None,
names=['reward_target', 'reward_distance', 'reward_teacher', 'reward_crash', 'prob_crash',
'prob_target'])
data = pd.concat((iters, design))
X = data.iloc[:, 0:4]
X.iloc[:, 1] = X.iloc[:, 1] * -1
X.iloc[:, 2] = X.iloc[:, 2] * -1
X.iloc[:, 3] = X.iloc[:, 3] * -1
if log_name == 'Baseline':
y = data.iloc[:, 4]
model = GPR(kernel=Matern(nu=1.5), n_restarts_optimizer=100)
model.fit(X, y)
df1 = ale(X=X, model=model, feature=['reward_target'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df1['eff'] = df1['eff'].clip(-.6, .6)
df2 = ale(X=X, model=model, feature=['reward_distance'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df2['eff'] = df2['eff'].clip(-.6, .6)
df3 = ale(X=X, model=model, feature=['reward_crash'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df3['eff'] = df3['eff'].clip(-.6, .6)
y = data.iloc[:, 3] * (-1)
model = GPR(kernel=Matern(nu=1.5), n_restarts_optimizer=100)
model.fit(X, y)
df4 = ale(X=X, model=model, feature=['reward_target'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of crashing')
df4['eff'] = df4['eff'].clip(-.6, .6)
df5 = ale(X=X, model=model, feature=['reward_distance'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of crashing')
df5['eff'] = df5['eff'].clip(-.6, .6)
df6 = ale(X=X, model=model, feature=['reward_crash'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of crashing')
df6['eff'] = df6['eff'].clip(-.6, .6)
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in [df1, df2, df3]:
sns.lineplot(data=data, x=data.index, y=data.iloc[:, 0])
plt.xlabel('reward parameter')
plt.ylabel('Effect on probability of reaching the target during testing')
plt.xlim(-10, 10)
plt.ylim(-.62, .62)
plt.legend(loc=4, borderaxespad=0, labels=['target', 'distance', 'crash'],
title='reward parameter')
plt.savefig("art/plots/ale/" + log_name + "_target.png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in [df4, df5, df6]:
sns.lineplot(data=data, x=data.index, y=data.iloc[:, 0])
plt.xlabel('reward parameter')
plt.ylabel('Effect on probability of crashing during training and testing')
plt.xlim(-10, 10)
plt.ylim(-.62, .62)
plt.legend(loc=4, borderaxespad=0, labels=['target', 'distance', 'crash'],
title='reward parameter')
plt.savefig("art/plots/ale/" + log_name + "_crashing.png", dpi=100, transparent=True)
plt.show()
else:
y = data.iloc[:, 5]
model = GPR(kernel=Matern(nu=1.5), n_restarts_optimizer=100)
model.fit(X, y)
df1 = ale(X=X, model=model, feature=['reward_target'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df1['eff'] = df1['eff'].clip(-.6, .6)
df2 = ale(X=X, model=model, feature=['reward_distance'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df2['eff'] = df2['eff'].clip(-.6, .6)
df3 = ale(X=X, model=model, feature=['reward_crash'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df3['eff'] = df3['eff'].clip(-.6, .6)
df4 = ale(X=X, model=model, feature=['reward_teacher'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df4['eff'] = df4['eff'].clip(-.6, .6)
y = data.iloc[:, 4] * (-1)
model = GPR(kernel=Matern(nu=1.5), n_restarts_optimizer=100)
model.fit(X, y)
df5 = ale(X=X, model=model, feature=['reward_target'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of crashing')
df5['eff'] = df5['eff'].clip(-.6, .6)
df6 = ale(X=X, model=model, feature=['reward_distance'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of crashing')
df6['eff'] = df6['eff'].clip(-.6, .6)
df7 = ale(X=X, model=model, feature=['reward_crash'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of crashing')
df7['eff'] = df7['eff'].clip(-.6, .6)
df8 = ale(X=X, model=model, feature=['reward_teacher'], grid_size=50, include_CI=False, C=0.95,
y_name='probability of reaching the target')
df8['eff'] = df8['eff'].clip(-.6, .6)
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in [df1, df2, df3, df4]:
sns.lineplot(data=data, x=data.index, y=data.iloc[:, 0])
plt.xlabel('reward parameter')
plt.ylabel('Effect on probability of reaching the target during testing')
plt.xlim(-10, 10)
plt.ylim(-.62, .62)
plt.legend(loc=4, borderaxespad=0, labels=['target', 'distance', 'crash', 'teacher'],
title='reward parameter')
plt.savefig("art/plots/ale/" + log_name + "_target.png", dpi=100, transparent=True)
plt.show()
plt.figure()
sns.set_style("whitegrid", {'axes.grid': True, 'axes.edgecolor': 'black'})
for data in [df5, df6, df7, df8]:
sns.lineplot(data=data, x=data.index, y=data.iloc[:, 0])
plt.xlabel('reward parameter')
plt.ylabel('Effect on probability of crashing during training and testing')
plt.xlim(-10, 10)
plt.ylim(-.62, .62)
plt.legend(loc=4, borderaxespad=0, labels=['target', 'distance', 'crash', 'teacher'],
title='reward parameter')
plt.savefig("art/plots/ale/" + log_name + "_crashing.png", dpi=100, transparent=True)
plt.show()
|
print('\033[0;30;41mComo você está? ')
print('\033[4;33;44mComo você está?')
print('\033[1;35;43mComo você está?')
print('\033[30;42mComo você está?')
print('\033[mComo você está?')
print('\033[7;30mComo você está?')
|
# Copyright (c) 2013, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = get_columns(), get_data()
return columns, data
def get_columns():
return [
_("Code/Rer") + ":Link/Request for Material:150",
_("Department") + ":Data:150",
_("Type") + ":Data:150",
_("Requested by") + ":Data:200",
_("Status") + ":Data:120",
_("Accepted by") + ":Data:150",
_("Approved by") + ":Data:150",
]
def get_data():
data=[]
rfm_list=frappe.db.sql("""select * from `tabRequest for Material` where docstatus=1""",as_dict=1)
for rfm in rfm_list:
accepted = rfm.request_for_material_accepter if rfm.status == 'Accepted' else ''
approved = ""
if rfm.status == 'Approved':
approved = rfm.request_for_material_approver
accepted = rfm.request_for_material_accepter
row = [
rfm.name,
rfm.department,
rfm.type,
rfm.requested_by,
rfm.status,
accepted,
approved
]
if rfm.status == "Approved" and not rfm.request_for_material_approver:
continue
if rfm.status == "Accepted" and not rfm.request_for_material_accepter:
continue
data.append(row)
return data
|
import json, time
start = time.time()
# Translate avro message to NGSI
def to_ngsi(ms):
#
msg_time = time.ctime(ms["header"]["time"])
id_string = "hmod.itcrowd.predator.uav.2:"+str(ms["header"]["time"])
entity = {
"id": id_string,
"type":"UAV_simple",
#"time" : str(msg_time),
}
value = dict()
# The modules from which data is provided
srcMod = {
"Navigation" : "location",
"Core" : "CPUUsage",
"CPU" : "CPUTime",
# More attributes here
}
# Type for each attribute value
types = {
"location" : "geo:json",
"CPUUsage" : "percent",
"CPUTime" : "sec",
# More types here
}
attr = ms["header"]["sourceModule"]
key = srcMod[attr]
# translate the message
# Special case for location attribute.
if attr == "Navigation":
m = ms
value[key] = {
"value": {"type": "Point",
"coordinates":[m['latitude'],m['longitude']]
},
"type": types[key]
}
value['elevation'] = {
"value": m['height'],
"type": "meter"
}
entity.update(value)
else:
value[key] = {
"value":m["value"],
"type":types[key]
}
entity.update(value)
return entity
# messages.txt file contains the consumed avro messages.
ngsi_data=[]
with open('messages.txt') as f:
for line in f:
ngsi_data.append(to_ngsi(json.loads(line)))
print(len(ngsi_data),'messages translated...')
end = time.time()
print(json.dumps(ngsi_data[1], indent=4))
print("Execution time: ", (end-start),'seconds.')
|
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Wrapper function to view FITS files using DS9.
"""
import subprocess
def ds9_view(filename, regfile=None, regformat="ciao", regsystem="physical",
cmap="he", binfactor=2, scale="linear", smooth=None):
"""
Wrapper function to view FITS files using DS9.
"""
cmd = [
"ds9", filename,
"-cmap", cmap,
"-bin", "factor", str(binfactor),
"-scale", scale,
]
if regfile:
cmd += [
"-regions", "format", regformat,
"-regions", "system", regsystem,
"-regions", regfile,
]
if smooth:
cmd += ["-smooth", "yes", "-smooth", "radius", str(smooth)]
subprocess.check_call(cmd)
|
import sys
import re
## Need to convert asxxxx syntax to rgbds syntax
module = sys.argv[1]
class Token:
def __init__(self, kind, value, line_nr):
self.kind = kind
self.value = value
self.line_nr = line_nr
def isA(self, kind, value=None):
return self.kind == kind and (value is None or value == self.value)
def __repr__(self):
return "[%s:%s:%d]" % (self.kind, self.value, self.line_nr)
class Tokenizer:
TOKEN_REGEX = re.compile('|'.join('(?P<%s>%s)' % pair for pair in [
('HEX', r'0x[0-9A-Fa-f]+'),
('ASSIGN', r'='),
('ALABEL', r'\d+\$'),
('NUMBER', r'\d+(\.\d*)?'),
('COMMENT', r';[^\n]*'),
('LABEL', r':+'),
('EXPR', r'#'),
('STRING', '"[^"]*"'),
('DIRECTIVE', r'\.[A-Za-z_][A-Za-z0-9_\.]*'),
('ID', r'[A-Za-z_][A-Za-z0-9_\.]*'),
('OP', r'[+\-*/,\(\)<>]'),
('NEWLINE', r'\n'),
('SKIP', r'[ \t]+'),
('MISMATCH', r'.'),
]))
def __init__(self, code):
self.__tokens = []
line_num = 1
for mo in self.TOKEN_REGEX.finditer(code):
kind = mo.lastgroup
value = mo.group()
if kind == 'MISMATCH':
print("ERR:", code.split("\n")[line_num-1])
raise RuntimeError("Syntax error on line: %d: %s\n%s", line_num, value)
elif kind == 'SKIP':
pass
else:
if kind == 'HEX':
value = "$" + value[2:]
if kind == 'ALABEL':
value = "._ANNO_" + value[:-1]
kind = "ID"
self.__tokens.append(Token(kind, value, line_num))
if kind == 'NEWLINE':
line_num += 1
self.__tokens.append(Token('NEWLINE', '\n', line_num))
def peek(self):
return self.__tokens[0]
def pop(self):
return self.__tokens.pop(0)
def expect(self, kind, value=None):
pop = self.pop()
if not pop.isA(kind, value):
if value is not None:
raise SyntaxError("%s != %s:%s" % (pop, kind, value))
raise SyntaxError("%s != %s" % (pop, kind))
def __bool__(self):
return bool(self.__tokens)
tok = Tokenizer(sys.stdin.read())
def processExpression():
while True:
t = tok.peek()
if t.isA('EXPR'):
tok.pop()
t = tok.peek()
if t.isA('OP', '<'):
sys.stdout.write('LOW')
tok.pop()
t = tok.peek()
if t.isA('OP', '>'):
sys.stdout.write('HIGH')
tok.pop()
t = tok.peek()
if t.isA('OP', '('):
tok.pop()
sys.stdout.write('(')
processExpression()
t = tok.pop()
assert t.isA('OP', ')')
sys.stdout.write(')')
if t.isA('ID') and t.value.startswith("b_"):
t.value = "BANK(%s)" % (t.value[1:])
if t.isA('ID') and t.value.startswith("___bank__"):
t.value = "BANK(%s)" % (t.value[8:])
if t.isA('NEWLINE') or t.isA('OP', ')') or t.isA('OP', ','):
break
sys.stdout.write(t.value)
tok.pop()
def processParameter():
t = tok.pop()
if t.isA('EXPR'):
processExpression()
elif t.isA('NEWLINE'):
return
elif t.isA('ID') or t.isA('NUMBER') or t.isA('HEX'):
sys.stdout.write(t.value)
elif t.isA('OP', '('):
sys.stdout.write("[")
processExpression()
t = tok.pop()
while not t.isA('OP', ')'):
sys.stdout.write(t.value)
t = tok.pop()
assert t.isA('OP', ')'), t
sys.stdout.write("]")
else:
raise Exception(t)
while tok:
start = tok.pop()
if start.isA('NEWLINE'):
pass
elif start.isA('COMMENT'):
print(start.value)
elif start.isA('DIRECTIVE'):
if start.value in {'.module', '.optsdcc', '.globl'}:
while not tok.pop().isA('NEWLINE'):
pass
elif start.value == '.pusharea':
print('PUSHS')
elif start.value == '.poparea':
print('POPS')
elif start.value == '.area':
area_name = tok.pop().value
if area_name == "_DATA":
print('SECTION "%s_%s", WRAM0' % (module, area_name))
elif area_name == "_INITIALIZED":
print('SECTION FRAGMENT "INITIALIZED", WRAM0')
elif area_name == "_DABS":
print('SECTION "%s_%s", SRAM' % (module, area_name))
elif area_name == "_HOME":
print('SECTION FRAGMENT "%s_%s", ROM0' % (module, area_name))
elif area_name == "_CODE":
print('SECTION FRAGMENT "%s_%s", ROM0' % (module, area_name))
elif area_name.startswith("_CODE_"):
print('SECTION FRAGMENT "%s_%s", ROMX, BANK[%d]' % (module, area_name, int(area_name[6:])))
elif area_name == "_CABS":
print('SECTION FRAGMENT "%s_%s", ROM0' % (module, area_name))
elif area_name == "_GSINIT":
print('SECTION FRAGMENT "GSINIT", ROMX, BANK[1]')
elif area_name == "_GSFINAL":
print('SECTION FRAGMENT "GSFINAL", ROMX, BANK[1]')
elif area_name == "_INITIALIZER":
print('SECTION FRAGMENT "INITIALIZER", ROMX, BANK[1]')
elif area_name == "_auto":
print('SECTION FRAGMENT "code_%s", ROMX' % (module))
elif area_name.startswith("VECTOR_"):
print('SECTION "%s", ROM0[$%04x]' % (area_name, int(area_name[7:], 16)))
else:
raise Exception(area_name)
while not tok.pop().isA('NEWLINE'):
pass
elif start.value == '.ds':
sys.stdout.write("ds ")
processExpression()
sys.stdout.write("\n")
elif start.value == '.ascii':
sys.stdout.write("db ")
sys.stdout.write(tok.pop().value)
sys.stdout.write("\n")
elif start.value == '.db' or start.value == '.byte':
sys.stdout.write("db ")
processParameter()
while tok.peek().isA('OP', ','):
sys.stdout.write(",")
tok.pop()
processParameter()
sys.stdout.write("\n")
elif start.value == '.dw':
sys.stdout.write("dw ")
processParameter()
while tok.peek().isA('OP', ','):
sys.stdout.write(",")
tok.pop()
processParameter()
sys.stdout.write("\n")
elif start.value == '.incbin':
sys.stdout.write("incbin ")
while not tok.peek().isA('NEWLINE'):
sys.stdout.write(tok.pop().value)
sys.stdout.write("\n")
tok.pop()
else:
raise Exception(start, tok.peek())
elif start.isA('ID'):
if tok.peek().isA('ASSIGN'):
tok.pop()
sys.stdout.write("%s = " % (start.value))
processExpression()
sys.stdout.write("\n")
elif tok.peek().isA('LABEL'):
print("%s%s" % (start.value, tok.pop().value))
elif start.isA('ID', 'ldhl'):
tok.expect('ID', 'sp')
tok.expect('OP', ',')
sys.stdout.write("ld hl, sp + ")
processParameter()
sys.stdout.write("\n")
elif start.isA('ID', 'lda'):
tok.expect('ID', 'hl')
tok.expect('OP', ',')
t = tok.pop()
assert t.isA('NUMBER') or t.isA('HEX')
tok.expect('OP', '(')
tok.expect('ID', 'sp')
tok.expect('OP', ')')
sys.stdout.write("ld hl, sp + %s\n" % (t.value))
elif start.isA('ID', 'jp') and tok.peek().isA('OP', '('):
tok.pop()
tok.expect('ID', 'hl')
tok.expect('OP', ')')
sys.stdout.write("jp hl\n")
else:
sys.stdout.write("%s " % (start.value))
if not tok.peek().isA('NEWLINE'):
processParameter()
if tok.peek().isA('OP', ','):
tok.pop()
sys.stdout.write(", ")
processParameter()
sys.stdout.write("\n")
tok.expect('NEWLINE')
else:
raise Exception(start)
|
import sqlite3
from datetime import date
class TalkThread(object):
def __init__(self, id, cmts):
self.id = id
self.comments = cmts
def add_comments(self, cmts):
self.comments.append(cmts)
class Comment(object):
def __init__(self, cmt):
self.comment_id = cmt[0]
self.reply_id = cmt[1]
self.date = cmt[2]
self.username = cmt[3]
self.post = cmt[4]
class TalkPage(object):
def __init__(self, page):
self.dbcon = sqlite3.connect('wiki/web/talk/talk.db')
self.cursor = self.dbcon.cursor()
def get_page_id(self, name):
self.cursor.execute("SELECT page_id FROM pages WHERE page_name = ?", (name,))
r = self.cursor.fetchone()
if r is None:
return None
return r[0]
def add_page(self, name):
self.cursor.execute("INSERT INTO pages (page_name) VALUES (?)", (name,))
self.dbcon.commit()
return self.cursor.execute("SELECT last_insert_rowid()").fetchone()[0]
def get_page_thread_ids(self, pgid):
self.cursor.execute("SELECT DISTINCT thread_id FROM thread WHERE page_id = ?", (pgid,))
ids = []
r = self.cursor.fetchall()
for result in r:
ids.append(result[0])
return ids
def new_thread_id(self):
self.cursor.execute("SELECT max(thread_id) FROM thread")
f = self.cursor.fetchone()[0]
if f is None:
return 1
else:
return f + 1
def thread_of_comment_id(self, cmnt_id):
self.cursor.execute("SELECT thread_id FROM thread WHERE comment_id = ?", (cmnt_id,))
return self.cursor.fetchone()[0]
def get_thread_comments(self, thread_id):
self.cursor.execute("""SELECT comment_id,reply_id,date,username,post FROM comment INNER JOIN
thread USING (comment_id) WHERE thread_id = ?""", (thread_id,))
comments = []
for cmt in self.cursor.fetchall():
comments.append(Comment(cmt))
return comments
def add_comment(self, thread_id, page_id, comment_id):
self.cursor.execute("INSERT INTO thread VALUES (?,?,?)", (thread_id,page_id,comment_id,))
self.dbcon.commit()
def comment_exists(self, id):
self.cursor.execute("SELECT comment_id FROM comment WHERE comment_id = ?", (id,))
r = self.cursor.fetchone()
if r is None:
return False
else:
return True
def get_threads(self, page_name):
page_id = self.get_page_id(page_name)
if page_id is None:
print("Page id does not exist, creating a new talk page")
page_id = self.add_page(page_name)
return []
ids = self.get_page_thread_ids(page_id)
threads = []
for tid in ids:
new_thr = TalkThread(tid, self.get_thread_comments(tid))
threads.append(new_thr)
return threads
# Save a comment to the database and return its comment id
# cmt_text: string of comment's text
# reply_to: id of reply comment, optional
def save_comment(self, cmt_text, reply_to=''):
d = date.today()
date_str = d.strftime("%m/%d/%y")
cmt_data = (reply_to, date_str, "User", cmt_text.rstrip(),)
self.cursor.execute("INSERT INTO comment (reply_id, date, username, post) VALUES (?,?,?,?)", cmt_data)
self.dbcon.commit()
return self.cursor.execute("SELECT last_insert_rowid()").fetchone()[0]
def post_comment(self, page_name, cmt_text, reply_to):
pg_id = self.get_page_id(page_name)
assert pg_id,"Page ID not found"
# New comments are in their own thread
if reply_to == '':
new_thr_id = self.new_thread_id()
cmt_id = self.save_comment(cmt_text)
self.add_comment(new_thr_id, pg_id, cmt_id)
else:
assert self.comment_exists(reply_to), "Invalid reply number: " + str(reply_to)
thr_id = self.thread_of_comment_id(reply_to)
cmt_id = self.save_comment(cmt_text, reply_to)
self.add_comment(thr_id, pg_id, cmt_id)
|
import sqlite3
from .db import DB
class CaseInfoTable(DB):
def __init__(self):
self.table_name = 'PB_case_details'
self.table_desc = 'Punjab patients discharged, ventilators, icu and death details. Table page 1 or 2'
self.cols = self.getcolumns()
def getcolumns(self):
cols = {
'date': 'DATE NOT NULL PRIMARY KEY',
'icu_patients_today': 'INT',
'icu_patients_today_districts': 'STRING',
'ventilator_patients_today': 'INT',
'ventilator_patients_today_districts': 'STRING',
'discharged_patients_today': 'INT',
'discharged_patients_today_districts': 'STRING',
'deaths_today': 'INT',
'deaths_today_districts': 'STRING'
}
return cols
|
# pure_collector.py
# import Python modules
import urllib3
# import third party modules
from prometheus_client.core import GaugeMetricFamily
# disable ceritificate warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class PurestorageCollector:
""" Class that instantiate the collector's methods and properties to
retrieve metrics from Puretorage Flasharray.
It implements also a 'collect' method to allow Prometheus client REGISTRY
to work
Parameters:
fa: the authenticated API session to the Purestorage Flasharray
"""
def __init__(self, fa):
self.fa = fa
self._name = None
@property
def name(self):
"""(Self) -> str
it returns the FlashArray name if not already retrieved
"""
if self._name is None:
self._name = self.fa.get()['array_name']
return self._name
def array_hw(self):
"""(Self) -> iter
it creates metrics for: temperature, power and components status of
gauge type with array name and the hw component name as labels.
Metrics values can be iterated over.
"""
temp = GaugeMetricFamily('array_temperature', 'Hardware components'
'temperature', labels=['array', 'hw'])
power = GaugeMetricFamily('array_power', 'Hardware components Power'
'consumption', labels=['array', 'hw'])
status = GaugeMetricFamily('array_hw_status', 'Hardware'
'components status', labels=['array', 'hw'])
fa_hw = self.fa.list_hardware()
for i in fa_hw:
metric = i['name'].replace('.', '_')
state = i['status']
if 'TMP' in metric and i['temperature']:
temp.add_metric([self.name, metric], i['temperature'])
if 'PWR' in metric and i['voltage']:
power.add_metric([self.name, metric], i['voltage'])
if state == 'ok' or state == 'not_installed':
status.add_metric([self.name, metric], 1)
else:
status.add_metric([self.name, metric], 0)
yield temp
yield power
yield status
def array_events(self):
"""(Self) -> iter
it creates a metric for the number open alerts: critical, warning and
info of gauge type with array name and the severity as labels.
Metrics values can be iterated over.
"""
events = GaugeMetricFamily('purestorage_events_total', 'Number of open'
'events', labels=['array', 'severity'])
fa_events = self.fa.list_messages(open=True)
ccounter = 0
wcounter = 0
icounter = 0
if len(fa_events) > 0:
for msg in fa_events:
severity = msg['current_severity']
if severity == 'critical':
ccounter += 1
if severity == 'warning':
wcounter += 1
if severity == 'info':
icounter += 1
events.add_metric([self.name, 'critical'], ccounter)
events.add_metric([self.name, 'warning'], wcounter)
events.add_metric([self.name, 'info'], icounter)
yield events
def array_space(self):
"""(Self) -> iter
It dinamically creates array space metrics of type gauge with array
name as a label, metrics are crated only if integer or float value
returned. Metrics values can be iterated over.
"""
fa_space = self.fa._request('GET', 'array?space=true')
for m, v in fa_space[0].items():
if isinstance(v, (int, float)):
array_space = GaugeMetricFamily(f'purestorage_array_space_{m}',
'Overall space consumption for'
'Flasharray', labels=['array'])
array_space.add_metric([self.name], v)
yield array_space
def array_perf(self):
"""(Self) -> iter
It dinamically creates array perf metrics of type gauge with array
name as a label, metrics are crated only if integer or float value
returned. Metrics values can be iterated over.
"""
fa_perf = self.fa._request('GET', 'array?action=monitor')
for m, v in fa_perf[0].items():
if isinstance(v, (int, float)):
array_perf = GaugeMetricFamily(f'purestorage_array_perf_{m}',
'Overall performance metric for'
'Flasharray', labels=['array'])
array_perf.add_metric([self.name], v)
yield array_perf
def vol_space(self):
"""(Self) -> iter
It dinamically creates volume space metrics of type gauge with array
and volume name as a labels, metrics are crated only if integer or
float value returned. Metrics values can be iterated over.
"""
v_space = self.fa._request('GET', 'volume?space=true')
# first iterate over metric, value to avoid duplicate metrics
for m, v in v_space[0].items():
if isinstance(v, (int, float)):
vol_space = GaugeMetricFamily(f'purestorage_vol_space_{m}',
'Vol space for Flasharray',
labels=['array', 'volume'])
# second iterate over volume dicts to populate the metric
for volume in v_space:
vol_name = volume['name'].replace('.', '_')
vol_space.add_metric([self.name, vol_name], v)
yield vol_space
def vol_perf(self):
"""(Self) -> iter
It dinamically creates volume space metrics of type gauge with array
and volume name as a labels, metrics are crated only if integer or
float value returned. Metrics values can be iterated over.
"""
v_perf = self.fa._request('GET', 'volume?action=monitor')
# first iterate over metric, value to avoid duplicate metrics
for m, v in v_perf[0].items():
if isinstance(v, (int, float)):
vol_perf = GaugeMetricFamily(f'purestorage_vol_perf_{m}',
'Vol perf for Flasharray',
labels=['array', 'volume'])
# second iterate over volume dicts to populate the metric
for volume in v_perf:
vol_name = volume['name'].replace('.', '_')
vol_perf.add_metric([self.name, vol_name], v)
yield vol_perf
def collect(self):
"""(Self) -> iter
Aggregates the collection of the Purestorage Flasharray colelctor
metrics under the collect method for Prometheus client REGISTRY
"""
yield from self.array_hw()
yield from self.array_events()
yield from self.array_space()
yield from self.array_perf()
yield from self.vol_space()
yield from self.vol_perf()
|
"""
EmailAlert - send alerts via email
"""
from __future__ import division
from builtins import str, object
import smtplib
import logging
class EmailAlert(object):
"""
A simple class to send alerts via email
"""
EMAIL_HEADER = "From: %s\r\nSubject: %s\r\nTo: %s\r\n\r\n"
def __init__(self, configDict):
self.serverName = configDict.get("smtpServer", "localhost")
self.fromAddr = configDict.get("fromAddr", "noreply@cern.ch")
self.toAddr = configDict.get("toAddr", "cms-service-production-admins@cern.ch")
if not isinstance(self.toAddr, (list, set)):
self.toAddr = [self.toAddr]
def send(self, subject, message):
"""
Send an email
:param subject: Email subject
:param message: Email body
"""
msg = self.EMAIL_HEADER % (self.fromAddr, subject, ", ".join(self.toAddr))
msg += message
try:
smtp = smtplib.SMTP(self.serverName)
smtp.sendmail(self.fromAddr, self.toAddr, msg)
except Exception as ex:
logging.exception("Error sending alert email.\nDetails: %s", str(ex))
try:
# clean up smtp connection
smtp.quit()
except UnboundLocalError:
# it means our client failed connecting to the SMTP server
pass
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Shutdown handler.
Provides a Python 2.7 method which invokes a user-specified shutdown hook.
"""
import logging
import sys
import traceback
from google.appengine.api.logservice import logservice
from google.appengine.api.runtime import runtime as runtime_api
from google.appengine.runtime import request_environment
def Handle(environ):
"""Handles a shutdown request.
Args:
environ: A dict containing the environ for this request (i.e. for
os.environ).
Returns:
A tuple with:
A dict containing:
error: App Engine error code. Always 0 for OK.
response_code: The HTTP response code. Always 200.
logs: A list of tuples (timestamp_usec, severity, message) of log
entries. timestamp_usec is a long, severity is int and message is
str. Severity levels are 0..4 for Debug, Info, Warning, Error,
Critical.
A tuple containing the result of sys.exc_info() if an exception occured,
or None.
"""
response = {}
response['error'] = 0
response['response_code'] = 200
exc = None
try:
error = logservice.LogsBuffer()
request_environment.current_request.Init(error, environ)
getattr(runtime_api, '__BeginShutdown')()
except:
exc = sys.exc_info()
message = ''.join(traceback.format_exception(exc[0], exc[1],
exc[2].tb_next))
logging.info('Raised exception in shutdown handler:\n' + message)
finally:
request_environment.current_request.Clear()
response['logs'] = error.parse_logs()
return (response, exc)
|
def simple_gem5(data):
s = ""
s += "from __future__ import print_function\n"
s += "from __future__ import absolute_import\n"
s += "import m5\n"
s += "from m5.objects import *\n"
s += "system = System()\n"
# Set the clock fequency of the system (and all of its children)
s += "system.clk_domain = SrcClockDomain()\n"
s += "system.clk_domain.clock = '%.1fGHz'\n" %(data['clk'])
s += "system.clk_domain.voltage_domain = VoltageDomain()\n"
# Create a simple CPU
if data['cpu'] == 'Simple':
s += "system.cpu = TimingSimpleCPU()\n"
s += "system.mem_mode = 'timing'\n"
elif data['cpu'] == 'Out Order':
s += "system.cpu = DerivO3CPU()\n"
s += "system.mem_mode = 'timing'\n"
s += "system.cpu.createThreads()\n"
elif data['cpu'] == 'In Order':
s += "system.cpu = AtomicSimpleCPU()\n"
s += "system.mem_mode = 'atomic'\n"
s += "system.cpu.createThreads()\n"
s += "system.mem_ranges = [AddrRange('512MB')]\n"
# Create a memory bus, a system crossbar, in this case
s += "system.membus = SystemXBar()\n"
# Hook the CPU ports up to the membus
s += "system.cpu.icache_port = system.membus.cpu_side_ports\n"
s += "system.cpu.dcache_port = system.membus.cpu_side_ports\n"
# create the interrupt controller for the CPU and connect to the membus
s += "system.cpu.createInterruptController()\n"
# For x86 only, make sure the interrupts are connected to the memory
# Note: these are directly connected to the memory bus and are not cached
s += "if m5.defines.buildEnv['TARGET_ISA'] == \"x86\":\n"
s += " system.cpu.interrupts[0].pio = system.membus.mem_side_ports\n"
s += " system.cpu.interrupts[0].int_requestor = system.membus.cpu_side_ports\n"
s += " system.cpu.interrupts[0].int_responder = system.membus.mem_side_ports\n"
# Create a DDR3 memory controller and connect it to the membus
s += "system.mem_ctrl = MemCtrl()\n"
s += "system.mem_ctrl.dram = %s_8x8()\n" %(data['memory'])
s += "system.mem_ctrl.dram.range = system.mem_ranges[0]\n"
s += "system.mem_ctrl.port = system.membus.mem_side_ports\n"
# Connect the system up to the membus
s += "system.system_port = system.membus.cpu_side_ports\n"
# get ISA for the binary to run.
s += "isa = str(m5.defines.buildEnv['TARGET_ISA']).lower()\n"
# Default to running 'hello', use the compiled ISA to find the binary
# grab the specific path to the binary
s += "binary = os.path.join('%s')\n" %data['binary']
# Create a process for a simple "Hello World" application
s += "process = Process()\n"
# Set the command
# cmd is a list which begins with the executable (like argv)
s += "process.cmd = [binary]\n"
# Set the cpu to use the process as its workload and create thread contexts
s += "system.cpu.workload = process\n"
s += "system.cpu.createThreads()\n"
# set up the root SimObject and start the simulation
s += "root = Root(full_system = False, system = system)\n"
# instantiate all of the objects we've created above
s += "m5.instantiate()\n"
s += "print('Beginning simulation!')\n"
s += "exit_event = m5.simulate()\n"
s += "print('Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()))\n"
f = open("/content/gem5_code.py","w")
f.write(s)
f.close()
|
import argparse
import torch
from rlkit.core import logger
from rlkit.samplers.rollout_functions import multitask_rollout
from rlkit.torch import pytorch_util as ptu
from rlkit.envs.vae_wrapper import VAEWrappedEnv
def simulate_policy(args):
data = torch.load(args.file)
policy = data['evaluation/policy']
env = data['evaluation/env']
print("Policy and environment loaded")
if args.gpu:
ptu.set_gpu_mode(True)
policy.to(ptu.device)
if isinstance(env, VAEWrappedEnv) and hasattr(env, 'mode'):
# env.mode(args.mode)
env.goal_sampling_mode = args.mode
if args.enable_render or hasattr(env, 'enable_render'):
# some environments need to be reconfigured for visualization
env.enable_render()
paths = []
while True:
paths.append(multitask_rollout(
env,
policy,
max_path_length=args.H,
render=not args.hide,
observation_key='observation',
desired_goal_key='desired_goal',
))
if hasattr(env, "log_diagnostics"):
env.log_diagnostics(paths)
if hasattr(env, "get_diagnostics"):
for k, v in env.get_diagnostics(paths).items():
logger.record_tabular(k, v)
logger.dump_tabular()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str,
help='path to the snapshot file')
parser.add_argument('--H', type=int, default=300,
help='Max length of rollout')
parser.add_argument('--speedup', type=float, default=10,
help='Speedup')
parser.add_argument('--mode', default='video_env', type=str,
help='env mode')
parser.add_argument('--gpu', action='store_true')
parser.add_argument('--enable_render', action='store_true')
parser.add_argument('--hide', action='store_true')
args = parser.parse_args()
simulate_policy(args)
|
from damage_detector.detector import Detector
if __name__ == '__main__':
print("Enter image path: ")
image_path = input()
Detector.detect_scratches(image_path)
|
"""See :ref:`specs.projects.actors`.
"""
|
import sys
from datetime import datetime, timedelta, timezone, time
from django.conf import settings
from django.utils.translation import ugettext as _
from .models import InfusionChanged, SensorChanged, LastTriggerSet, TriggerTime
def process_nightscouts_api_response(response):
"""
process nightscout`s response and return date and time of last change of infusion set and CGM sensor
saves it in database
if data not in response, get from database (if present)
:param response: response from nightscout`s API
:return: last change date and time
"""
if response.status_code == 200:
inf_date = None
sensor_date = None
response_text = response.json()
for set in response_text:
try:
if inf_date is None and (set["eventType"] == "Site Change" or set['notes'] == "Reservoir changed"):
inf_date = set["created_at"]
InfusionChanged.objects.update_or_create(id=1, defaults={"date": inf_date, })
elif sensor_date is None and (set["eventType"] == "Sensor Change" or set['notes'] == "Sensor changed"):
sensor_date = set['created_at']
SensorChanged.objects.update_or_create(id=1, defaults={"date": sensor_date, })
except KeyError:
pass
try:
inf_date = InfusionChanged.objects.get(id=1).date
except InfusionChanged.DoesNotExist:
print(_("warning: infusion set change has never been cached"))
sys.stdout.flush()
try:
sensor_date = SensorChanged.objects.get(id=1).date
except SensorChanged.DoesNotExist:
print(_("warning: CGM sensor change has never been cached"))
sys.stdout.flush()
return inf_date, sensor_date
def calculate_infusion(date):
"""
calculates next change of infusion set
:param date: datetime of previous change of infusion set
:return: time remains to next change
"""
infusion = timedelta(hours=settings.INFUSION_SET_ALERT_FREQUENCY)
infusion_alert_date = date + infusion
infusion_time_remains = infusion_alert_date - datetime.now(timezone.utc)
return infusion_time_remains
def calculate_sensor(date):
"""
calculates next change of CGM sensor
:param date: datetime of previous change of CGM sensor
:return: time remains to next change
"""
sensor = timedelta(hours=settings.SENSOR_ALERT_FREQUENCY)
sensor_alert_date = date + sensor
sensor_time_remains = sensor_alert_date - datetime.now(timezone.utc)
return sensor_time_remains
def get_sms_txt_infusion_set(time_remains):
"""
add info about next change of infusion set to sms`s text
:param time_remains: timedelta to next change
:return: part of text for sms notification
"""
days = time_remains.days
if days < 0:
return _(".\n\n Your infusion set change has already passed")
hours = round(time_remains.seconds / 3600)
text = _(".\n\n Your infusion set should be changed in {} days and {} hours.").format(days, hours)
return text
def get_sms_txt_sensor(time_remains):
"""
add info about next change of CGM sensor to sms`s text
:param time_remains: timedelta to next change
:return: part of text for sms notification
"""
days = time_remains.days
if days < 0:
return _("\n\n Your CGM sensor change has already passed")
hours = round(time_remains.seconds / 3600)
text = _("\n\n Your CGM sensor should be changed in {} days and {} hours.").format(days, hours)
return text
def not_today():
"""
check if triger was not created today
:return: boolean
"""
now = datetime.now().date()
last, created = LastTriggerSet.objects.get_or_create(id=1, defaults={"date": now})
if created or last.date != now:
return True
else:
return False
def update_last_triggerset():
""" updates date in LastTriggerSet"""
LastTriggerSet.objects.update_or_create(id=1, defaults={"date": datetime.now().date()})
def get_trigger_model():
time_model, created = TriggerTime.objects.get_or_create(id=1, defaults={"time": time(16)})
return time_model
|
"""
Read a fastq file and a time file and split based on the time time stamp
Our time stamp file should have the following information:
sample ID\tStart Time
We assume that all times are contiguous, so if you want to include a gap you'll need to add that with a name
Time should be in one of the formats supported by `dateutil.parser <http://dateutil.readthedocs.io/en/stable/parser.html>`
e.g.:
- 2018-06-13T09:12:57Z
- 2018-06-13T19:12:57AEST
e.g.:
F6 2018-06-13T12:23:00AEST
B10 2018-06-13T14:41:00AEST
D8 2018-06-13T16:41:00AEST
C7 2018-06-13T18:42:00AEST
B12 2018-06-13T20:40:00AEST
G1 2018-06-14T08:56:00AEST
D10 2018-06-14T10:55:00AEST
"""
import os
import sys
import argparse
import re
from dateutil.parser import parse
from datetime import timedelta
from fastq import stream_fastq
def write_fastq(fqf, outs, outdir):
"""
Write the sequences to a set of fastq files
:param fqf: the input fastq file with the original sequences
:param outs: the sets of sequences for each id
:param outdir: the output directory to write the sequences to
:return:
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
outputs = {}
for o in outs:
outputs[o] = open(os.path.join(outdir, o + ".fastq"), 'w')
remapped = {}
for o in outs:
for seqid in outs[o]:
remapped[seqid] = o
for seqid, header, seq, qualscores in stream_fastq(fqf):
if seqid not in remapped:
sys.stderr.write("Error: found sequence {} that we don't know where to write{}\n".format(seqid))
outputs[remapped[seqid]].write("@{}\n{}\n+\n{}\n".format(header, seq, qualscores))
for o in outputs:
outputs[o].close()
def split_fastq(fqf, times):
"""
Split the fastq file based on the times and dates
:param fqf: fastq file to parse
:param times: dictionary of times
:return: a dictionary of time ids and the list of sequences in that time
"""
seqs = {"other" : set(), "before" : set(), "after" : set()}
alltimes = []
for t in times:
seqs[t] = set()
alltimes.append(times[t][0])
alltimes.append(times[t][1])
earliest = min(alltimes)
latest = max(alltimes)
newest = None
newestseq = None
oldest = None
oldestseq = None
for seqid, header, seq, qualscores in stream_fastq(fqf):
m = re.search("start_time=([\w\:\-]+)", header)
if not m:
sys.stderr.write("No start time was detected in {}\n".format(header))
continue
try:
seqstart = parse(m.groups()[0])
except ValueError as v:
sys.stderr.write("Can't parse date time from: {}\n".format(m.groups()[0]))
continue
if seqstart < earliest:
seqs['before'].add(seqid)
continue
if seqstart > latest:
seqs['after'].add(seqid)
continue
if not newest or seqstart < newest:
newest = seqstart
newestseq = seqid
if not oldest or seqstart > oldest:
oldest = seqstart
oldestseq = seqid
added = False
for t in times:
if seqstart > times[t][0] and seqstart <= times[t][1]:
added = True
seqs[t].add(seqid)
break
if not added:
seqs['other'].add(seqid)
sys.stderr.write("Newest sequence: {} at {}\nOldest sequence: {} at {}\n".format(
newestseq, newest, oldestseq, oldest
))
return seqs
def parse_times(timefile, ztoffset):
"""
Parse the times from the time separation file
:param timefile: the file to parse
:param ztoffset: the difference from zulu time
:return: a dict of IDs and times
"""
times = {}
lastid = None
with open(timefile, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
try:
starttime = parse(p[1])
except:
sys.stderr.write("Error: could not parse start time from {}\n".format(p[1]))
continue
if ztoffset:
startime = starttime + timedelta(hours=ztoffset)
times[p[0]] = [starttime, 0]
if lastid:
times[lastid][1] = starttime
lastid = p[0]
times[lastid][1] = times[lastid][0] + timedelta(hours=48)
for t in times:
sys.stderr.write("Time: {} From: {} To: {}\n".format(t, times[t][0], times[t][1]))
return times
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Deconvolute a fastq file")
parser.add_argument('-f', help='fastq file to read', required=True)
parser.add_argument('-t', help='timestamp file', required=True)
parser.add_argument('-z', help='offset from zulu time. This number will be added to Z so use -8 on the west coast of US. Default: use Z time', type=int, default=0)
parser.add_argument('-o', help='output directory to write fastq files to. If not provided sequences not written')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
times = parse_times(args.t, args.z)
seqs = split_fastq(args.f, times)
if args.o:
write_fastq(args.f, seqs, args.o)
for t in seqs:
sys.stdout.write("{}\t{}\n".format(t, "; ".join(seqs[t])))
|
from colloidoscope.hoomd_sim_positions import convert_hoomd_positions, hooomd_sim_positions
from colloidoscope import DeepColloid
from colloidoscope.simulator import simulate
from colloidoscope.hoomd_sim_positions import read_gsd
import numpy as np
from magicgui import magicgui
from napari.layers import Image
import napari
# init magicgui parameters for sliders
@magicgui(
call_button='Simulate',
r={"widget_type": "Slider", 'max': 30},
xy_gauss={"widget_type": "Slider", 'max': 30},
z_gauss={"widget_type": "Slider", 'max': 30},
max_brightness={"widget_type": "Slider", 'max': 255, 'min':150},
min_brightness={"widget_type": "Slider", 'max': 150, 'min':50},
noise={"widget_type": "FloatSlider", 'max': 0.2},
layout='vertical',)
def update_simulation(layer:Image, label_layer:Image, r:int=6,
xy_gauss:int=2, z_gauss:int=5, max_brightness:int=255,
min_brightness:int=75, noise:float=0.01) -> Image:
if layer is not None:
assert isinstance(layer.data, np.ndarray) # it will be!
array = layer.data
canvas_size = array.shape
hoomd_positions = layer.metadata['hoomd_positions']
hoomd_diameters = layer.metadata['hoomd_diameters']
centers, diameters = convert_hoomd_positions(hoomd_positions, canvas_size, diameter=r*2, diameters=hoomd_diameters)
new_array, label_array = simulate(canvas_size, centers, r, xy_gauss, z_gauss, min_brightness, max_brightness,
noise, make_label=True, diameters=diameters, num_workers=10)
print(new_array.shape, new_array.max(), new_array.min(), r, centers.shape)
print(label_array.shape, label_array.max(), label_array.min(), r, centers.shape)
layer.data = new_array
if label_layer: label_layer.data = label_array*255
return
if __name__ == "__main__":
# dataset_path = '/home/ak18001/Data/HDD/Colloids'
dataset_path = '/home/wahab/Data/HDD/Colloids'
# dataset_path = '/mnt/storage/home/ak18001/scratch/Colloids'
dc = DeepColloid(dataset_path)
canvas_size = (32,128,128)
volfrac = 0.3
centers_path = f'{dataset_path}/Positions/poly/phi_{volfrac*1000:.0f}_poly.gsd'
canvas = np.zeros(canvas_size, dtype='uint8')
hoomd_positions, hoomd_diameters = read_gsd(centers_path, 1)
canvas_metadata = {'hoomd_diameters' : hoomd_diameters,
'hoomd_positions' : hoomd_positions}
viewer = napari.Viewer()
viewer.add_image(canvas, name="Simulated colloids", metadata=canvas_metadata)
viewer.add_image(canvas, name="Simulated labels", metadata=canvas_metadata, opacity=0.5, colormap='red')
# Add it to the napari viewer
viewer.window.add_dock_widget(update_simulation)
# update the layer dropdown menu when the layer list changes
viewer.layers.events.changed.connect(update_simulation.reset_choices)
# napari points tutorial: https://napari.org/tutorials/fundamentals/points.html
napari.run()
|
# -*- coding: utf-8 -*-
# Update by: https://github.com/CokeMine/ServerStatus-Hotaru
# 依赖于psutil跨平台库:
# 支持Python版本:2.6 to 3.7
# 支持操作系统: Linux, Windows, OSX, Sun Solaris, FreeBSD, OpenBSD and NetBSD, both 32-bit and 64-bit architectures
import socket
import time
import json
import psutil
from collections import deque
SERVER = "127.0.0.1"
PORT = 35601
USER = "USER"
PASSWORD = "USER_PASSWORD"
INTERVAL = 1 # 更新间隔,单位:秒
def check_interface(net_name):
net_name = net_name.strip()
invalid_name = ['lo', 'tun', 'kube', 'docker', 'vmbr', 'br-', 'vnet', 'veth']
return not any(name in net_name for name in invalid_name)
def get_uptime():
return int(time.time() - psutil.boot_time())
def get_memory():
mem = psutil.virtual_memory()
swap = psutil.swap_memory()
return int(mem.total / 1024.0), int(mem.used / 1024.0), int(swap.total / 1024.0), int(swap.used / 1024.0)
def get_hdd():
valid_fs = ['ext4', 'ext3', 'ext2', 'reiserfs', 'jfs', 'btrfs', 'fuseblk', 'zfs', 'simfs', 'ntfs', 'fat32', 'exfat',
'xfs']
disks = dict()
size = 0
used = 0
for disk in psutil.disk_partitions():
if disk.device not in disks and disk.fstype.lower() in valid_fs:
disks[disk.device] = disk.mountpoint
for disk in disks.values():
usage = psutil.disk_usage(disk)
size += usage.total
used += usage.used
return int(size / 1024.0 / 1024.0), int(used / 1024.0 / 1024.0)
def get_load():
try:
return round(psutil.getloadavg()[0], 1)
except Exception:
return -1.0
def get_cpu():
return psutil.cpu_percent(interval=INTERVAL)
class Network:
def __init__(self):
self.rx = deque(maxlen=10)
self.tx = deque(maxlen=10)
self._get_traffic()
def _get_traffic(self):
net_in = 0
net_out = 0
net = psutil.net_io_counters(pernic=True)
for k, v in net.items():
if check_interface(k):
net_in += v[1]
net_out += v[0]
self.rx.append(net_in)
self.tx.append(net_out)
def get_speed(self):
self._get_traffic()
avg_rx = 0
avg_tx = 0
queue_len = len(self.rx)
for x in range(queue_len - 1):
avg_rx += self.rx[x + 1] - self.rx[x]
avg_tx += self.tx[x + 1] - self.tx[x]
avg_rx = int(avg_rx / queue_len / INTERVAL)
avg_tx = int(avg_tx / queue_len / INTERVAL)
return avg_rx, avg_tx
def get_traffic(self):
queue_len = len(self.rx)
return self.rx[queue_len - 1], self.tx[queue_len - 1]
def get_network(ip_version):
if ip_version == 4:
host = 'ipv4.google.com'
elif ip_version == 6:
host = 'ipv6.google.com'
else:
return False
try:
socket.create_connection((host, 80), 2).close()
return True
except Exception:
return False
if __name__ == '__main__':
socket.setdefaulttimeout(30)
while True:
try:
print("Connecting...")
s = socket.create_connection((SERVER, PORT))
data = s.recv(1024).decode()
if data.find("Authentication required") > -1:
s.send((USER + ':' + PASSWORD + '\n').encode("utf-8"))
data = s.recv(1024).decode()
if data.find("Authentication successful") < 0:
print(data)
raise socket.error
else:
print(data)
raise socket.error
print(data)
if data.find('You are connecting via') < 0:
data = s.recv(1024).decode()
print(data)
timer = 0
check_ip = 0
if data.find("IPv4") > -1:
check_ip = 6
elif data.find("IPv6") > -1:
check_ip = 4
else:
print(data)
raise socket.error
traffic = Network()
while True:
CPU = get_cpu()
NetRx, NetTx = traffic.get_speed()
NET_IN, NET_OUT = traffic.get_traffic()
Uptime = get_uptime()
Load = get_load()
MemoryTotal, MemoryUsed, SwapTotal, SwapUsed = get_memory()
HDDTotal, HDDUsed = get_hdd()
array = {}
if not timer:
array['online' + str(check_ip)] = get_network(check_ip)
timer = 150
else:
timer -= 1 * INTERVAL
array['uptime'] = Uptime
array['load'] = Load
array['memory_total'] = MemoryTotal
array['memory_used'] = MemoryUsed
array['swap_total'] = SwapTotal
array['swap_used'] = SwapUsed
array['hdd_total'] = HDDTotal
array['hdd_used'] = HDDUsed
array['cpu'] = CPU
array['network_rx'] = NetRx
array['network_tx'] = NetTx
array['network_in'] = NET_IN
array['network_out'] = NET_OUT
s.send(("update " + json.dumps(array) + "\n").encode("utf-8"))
except KeyboardInterrupt:
raise
except socket.error:
print("Disconnected...")
# keep on trying after a disconnect
if 's' in locals().keys():
del s
time.sleep(3)
except Exception as e:
print("Caught Exception:", e)
if 's' in locals().keys():
del s
time.sleep(3)
|
# Config
homeserver_url = "matrix.example.com"
access_token = ""
# # Import modules
import json
import requests
import sys
if __name__ == "__main__":
room_id = sys.argv[1].replace("!", "%21").strip()
url = "https://%s/_matrix/client/r0/rooms/%s/send/m.room.message" %(homeserver_url, room_id)
headers = {
"Authorization": "Bearer %s" % access_token,
"Content-Type": "application/json"
}
data = {
"msgtype": "m.text",
"body": sys.argv[2]
}
requests.post(
url=url,
headers=headers,
data=json.dumps(data)
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Machine translation using Microsoft Translator API
"""
import sys
import os
import argparse
import uuid
import json
import requests
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
from logging import getLogger, StreamHandler, FileHandler, Formatter, DEBUG
import openpyxl
__author__ = 'Yuta OHURA <bultau@gmail.com>'
__status__ = 'development'
__version__ = '0.1'
__date__ = '17 May 2018'
class Translator:
"""
Main class of translation
"""
def __init__(self, api_key):
self._api_key = api_key
self._host = 'https://api.cognitive.microsofttranslator.com'
self._path = '/translate?api-version=3.0'
self._headers = {
'Ocp-Apim-Subscription-Key': self._api_key,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
def translate(self, base_string, to_lang):
"""
Main function of translation
"""
req = requests.Session()
retries = Retry(total=5,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504])
req.mount('https://', HTTPAdapter(max_retries=retries))
req.mount('http://', HTTPAdapter(max_retries=retries))
requestBody = [{
'Text': base_string,
}]
content = json.dumps(requestBody, ensure_ascii=False).encode('utf-8')
res = req.request('POST', self._host + self._path + '&to=' +
to_lang, data=content, headers=self._headers, timeout=30)
return res.text
if __name__ == '__main__':
logger = getLogger(__name__)
handler = StreamHandler(sys.stdout)
handler.setFormatter(Formatter('%(asctime)s %(message)s'))
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
p = argparse.ArgumentParser()
p.add_argument(
'-t', '--to', help='language which you want to translate to')
p.add_argument('-k', '--api-key', help='your API Key of Microsoft Azure')
p.add_argument(
'-s', '--input-string', help='character string which you want to translate', default=None)
p.add_argument(
'-f', '--input-file', help='input file which you want to translate', default=None)
args = p.parse_args()
trs = Translator(args.api_key)
if args.input_string is not None:
ret_json = trs.translate(args.input_string, args.to)
ret_dict = json.loads(ret_json)
logger.debug(json.dumps(ret_dict, indent=4, ensure_ascii=False))
print(ret_dict[0]['translations'][0]['text'])
elif args.input_file is not None:
wb = openpyxl.load_workbook(args.input_file)
sheet = wb['data']
for row in sheet.rows:
if row[0].row == 1:
continue
string = row[1].value
if string is not None:
try:
ret_json = trs.translate(string, args.to)
ret_dict = json.loads(ret_json)
logger.debug(json.dumps(json.loads(ret_json),
indent=4, ensure_ascii=False))
ret_text = ret_dict[0]['translations'][0]['text']
row[2].value = ret_text
except KeyError as e:
logger.error('error has occured on line %s' %
str(row[0].row))
logger.error(e)
tmp_txt = os.path.splitext(args.input_file)
wb.save('.'.join([tmp_txt[0] + '_translated', tmp_txt[1]]))
else:
logger.error('you must specify the string or file to translate')
sys.exit()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common, Form
from odoo.tools import mute_logger
class TestDropship(common.TransactionCase):
def test_change_qty(self):
# enable the dropship and MTO route on the product
prod = self.env['product.product'].create({'name': 'Large Desk'})
dropshipping_route = self.env.ref('stock_dropshipping.route_drop_shipping')
mto_route = self.env.ref('stock.route_warehouse0_mto')
prod.write({'route_ids': [(6, 0, [dropshipping_route.id, mto_route.id])]})
# add a vendor
vendor1 = self.env['res.partner'].create({'name': 'vendor1'})
seller1 = self.env['product.supplierinfo'].create({
'name': vendor1.id,
'price': 8,
})
prod.write({'seller_ids': [(6, 0, [seller1.id])]})
# sell one unit of this product
cust = self.env['res.partner'].create({'name': 'customer1'})
so = self.env['sale.order'].create({
'partner_id': cust.id,
'partner_invoice_id': cust.id,
'partner_shipping_id': cust.id,
'order_line': [(0, 0, {
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 1.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
so.action_confirm()
po = self.env['purchase.order'].search([('group_id', '=', so.procurement_group_id.id)])
po_line = po.order_line
# Check the qty on the P0
self.assertAlmostEqual(po_line.product_qty, 1.00)
# Update qty on SO and check PO
so.write({'order_line': [[1, so.order_line.id, {'product_uom_qty': 2.00}]]})
self.assertAlmostEqual(po_line.product_qty, 2.00)
# Create a new so line
sol2 = self.env['sale.order.line'].create({
'order_id': so.id,
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 3.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})
# there is a new line
pol2 = po.order_line - po_line
# the first line is unchanged
self.assertAlmostEqual(po_line.product_qty, 2.00)
# the new line matches the new line on the so
self.assertAlmostEqual(pol2.product_qty, sol2.product_uom_qty)
def test_00_dropship(self):
# Create a vendor
supplier_dropship = self.env['res.partner'].create({'name': 'Vendor of Dropshipping test'})
# Create new product without any routes
drop_shop_product = self.env['product.product'].create({
'name': "Pen drive",
'type': "product",
'categ_id': self.env.ref('product.product_category_1').id,
'lst_price': 100.0,
'standard_price': 0.0,
'uom_id': self.env.ref('uom.product_uom_unit').id,
'uom_po_id': self.env.ref('uom.product_uom_unit').id,
'seller_ids': [(0, 0, {
'delay': 1,
'name': supplier_dropship.id,
'min_qty': 2.0
})]
})
# Create a sales order with a line of 200 PCE incoming shipment, with route_id drop shipping
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env['res.partner'].create({'name': 'My Test Partner'})
so_form.payment_term_id = self.env.ref('account.account_payment_term_end_following_month')
with mute_logger('odoo.tests.common.onchange'):
# otherwise complains that there's not enough inventory and
# apparently that's normal according to @jco and @sle
with so_form.order_line.new() as line:
line.product_id = drop_shop_product
line.product_uom_qty = 200
line.price_unit = 1.00
line.route_id = self.env.ref('stock_dropshipping.route_drop_shipping')
sale_order_drp_shpng = so_form.save()
# Confirm sales order
sale_order_drp_shpng.action_confirm()
# Check the sales order created a procurement group which has a procurement of 200 pieces
self.assertTrue(sale_order_drp_shpng.procurement_group_id, 'SO should have procurement group')
# Check a quotation was created to a certain vendor and confirm so it becomes a confirmed purchase order
purchase = self.env['purchase.order'].search([('partner_id', '=', supplier_dropship.id)])
self.assertTrue(purchase, "an RFQ should have been created by the scheduler")
purchase.button_confirm()
self.assertEqual(purchase.state, 'purchase', 'Purchase order should be in the approved state')
self.assertEqual(len(purchase.ids), 1, 'There should be one picking')
# Send the 200 pieces
purchase.picking_ids.move_lines.quantity_done = purchase.picking_ids.move_lines.product_qty
purchase.picking_ids.button_validate()
# Check one move line was created in Customers location with 200 pieces
move_line = self.env['stock.move.line'].search([
('location_dest_id', '=', self.env.ref('stock.stock_location_customers').id),
('product_id', '=', drop_shop_product.id)])
self.assertEqual(len(move_line.ids), 1, 'There should be exactly one move line')
|
"""Request handler classes for the extension"""
import base64
import json
import tornado.gen as gen
from notebook.base.handlers import APIHandler, app_log
from jupyterlab_ucaip.service import UCAIPService, ManagementService
handlers = {}
def _create_handler(req_type, handler):
class Handler(APIHandler):
"""Handles the request types sent from the frontend"""
if req_type == "GET":
@gen.coroutine
def get(self, _input=""):
args = {k: self.get_argument(k) for k in self.request.arguments}
try:
self.finish(json.dumps(handler(args)))
except Exception as e:
self._handle_exception(e)
elif req_type == "POST":
@gen.coroutine
def post(self, _input=""):
args = self.get_json_body()
try:
self.finish(json.dumps(handler(args)))
except Exception as e:
self._handle_exception(e)
def _handle_exception(self, e):
app_log.exception(str(e))
self.set_status(500, str(e))
self.finish({"error": {"message": str(e)}})
return Handler
def _handler(request_type, endpoint):
def decorator(func):
handlers[endpoint] = _create_handler(request_type, func)
return func
return decorator
@_handler("GET", "datasets")
def _list_datasets(_):
return UCAIPService.get().get_datasets()
@_handler("GET", "datasetDetails")
def _get_dataset_details(args):
return UCAIPService.get().get_dataset_details(args["datasetId"])
@_handler("GET", "models")
def _list_models(_):
return UCAIPService.get().get_models()
@_handler("GET", "modelEvaluation")
def _list_model_evaluations(args):
return UCAIPService.get().get_model_evaluation(args["modelId"])
@_handler("GET", "pipeline")
def _get_pipeline(args):
return UCAIPService.get().get_training_pipeline(args["pipelineId"])
@_handler("GET", "pipelines")
def _get_pipelines(_):
return UCAIPService.get().get_training_pipelines()
@_handler("POST", "getEndpoints")
def _get_endpoints(args):
return UCAIPService.get().get_endpoints(model_id=args["modelId"])
@_handler("POST", "checkDeploying")
def _check_deploying(args):
return UCAIPService.get().check_deploying(model_name=args["modelName"])
@_handler("POST", "deployModel")
def _deploy_model(args):
UCAIPService.get().deploy_model(model_id=args["modelId"])
return {"success": True}
@_handler("POST", "undeployModel")
def _undeploy_model(args):
UCAIPService.get().undeploy_model(deployed_model_id=args["deployedModelId"],
endpoint_id=args["endpointId"])
return {"success": True}
@_handler("POST", "deleteEndpoint")
def _delete_endpoint(args):
UCAIPService.get().delete_endpoint(endpoint_id=args["endpointId"])
return {"success": True}
@_handler("POST", "predict")
def _predict_tables(args):
return UCAIPService.get().predict_tables(endpoint_id=args["endpointId"],
instance=args["inputs"])
@_handler("GET", "tableInfo")
def _table_info(args):
return UCAIPService.get().get_table_specs(args["datasetId"])
@_handler("POST", "deleteDataset")
def _delete_dataset(args):
UCAIPService.get().dataset_client.delete_dataset(name=args["datasetId"])
return {"success": True}
@_handler("POST", "deleteModel")
def _delete_model(args):
UCAIPService.get().model_client.delete_model(name=args["modelId"])
return {"success": True}
@_handler("GET", "managedServices")
def _managed_services(_):
return ManagementService.get().get_managed_services()
@_handler("GET", "project")
def _project(_):
return ManagementService.get().get_project()
@_handler("POST", "createTablesDataset")
def _create_tables_dataset(args):
file_source = args.get("fileSource")
if file_source:
decoded = base64.decodebytes(file_source["data"])
UCAIPService.get().create_dataset_from_file(
display_name=args["displayName"],
file_name=file_source["name"],
file_data=decoded)
else:
UCAIPService.get().create_dataset(display_name=args["displayName"],
gcs_uri=args.get("gcsSource"),
bigquery_uri=args.get("bigquerySource"))
return {"success": True}
|
# This file is managed by the 'airflow' file bundle and updated automatically when `meltano upgrade` is run.
# To prevent any manual changes from being overwritten, remove the file bundle from `meltano.yml` or disable automatic updates:
# meltano config --plugin-type=files airflow set _update orchestrate/dags/meltano.py false
# If you want to define a custom DAG, create
# a new file under orchestrate/dags/ and Airflow
# will pick it up automatically.
import os
import logging
import subprocess
import json
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import timedelta
from pathlib import Path
logger = logging.getLogger(__name__)
DEFAULT_ARGS = {
"owner": "airflow",
"depends_on_past": False,
"email_on_failure": False,
"email_on_retry": False,
"catchup": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
"concurrency": 1,
}
project_root = os.getenv("MELTANO_PROJECT_ROOT", os.getcwd())
meltano_bin = ".meltano/run/bin"
if not Path(project_root).joinpath(meltano_bin).exists():
logger.warning(f"A symlink to the 'meltano' executable could not be found at '{meltano_bin}'. Falling back on expecting it to be in the PATH instead.")
meltano_bin = "meltano"
result = subprocess.run(
[meltano_bin, "schedule", "list", "--format=json"],
cwd=project_root,
stdout=subprocess.PIPE,
universal_newlines=True,
check=True,
)
schedules = json.loads(result.stdout)
for schedule in schedules:
logger.info(f"Considering schedule '{schedule['name']}': {schedule}")
if not schedule["cron_interval"]:
logger.info(
f"No DAG created for schedule '{schedule['name']}' because its interval is set to `@once`."
)
continue
args = DEFAULT_ARGS.copy()
if schedule["start_date"]:
args["start_date"] = schedule["start_date"]
dag_id = f"meltano_{schedule['name']}"
# from https://airflow.apache.org/docs/stable/scheduler.html#backfill-and-catchup
#
# It is crucial to set `catchup` to False so that Airflow only create a single job
# at the tail end of date window we want to extract data.
#
# Because our extractors do not support date-window extraction, it serves no
# purpose to enqueue date-chunked jobs for complete extraction window.
dag = DAG(
dag_id, catchup=False, default_args=args, schedule_interval=schedule["interval"], max_active_runs=1
)
elt = BashOperator(
task_id="extract_load",
bash_command=f"cd {project_root}; {meltano_bin} elt {' '.join(schedule['elt_args'])}",
dag=dag,
env={
# inherit the current env
**os.environ,
**schedule["env"],
},
)
# register the dag
globals()[dag_id] = dag
logger.info(f"DAG created for schedule '{schedule['name']}'")
|
# Generated by Django 2.0.4 on 2018-05-05 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('source', '0005_auto_20180505_0833'),
]
operations = [
migrations.AlterField(
model_name='source',
name='jnl_cidade',
field=models.IntegerField(default=0, verbose_name='Cidade'),
),
migrations.AlterField(
model_name='source',
name='jnl_scielo',
field=models.IntegerField(default=0, verbose_name='Scielo'),
),
]
|
from pydgn.training.event.state import State
class EventHandler:
r"""
Interface that adheres to the publisher/subscribe pattern for training. It defines the main methods
that a subscriber should implement. Each subscriber can make use of the
:class:`~training.event.state.State` object that is passed to each method, so detailed knowledge
about that object is required.
This class defines a set of callbacks that should cover a sufficient number of use cases. These are meant to work
closely with the :class:`~training.callback.engine.TrainingEngine` object, which implements the overall training and
evaluation process. This training engine is fairly general to accomodate a number of situations, so we expect we
won't need to change it much to deal with static graph problems.
We list below some pre/post conditions for each method that depend on the current implementation of the main
training engine :class:`~training.callback.engine.TrainingEngine`. These are clearly not strict conditions, but
they can help design new training engines with their own publisher/subscriber patterns or create subclasses
of :class:`~training.callback.engine.TrainingEngine` that require special modifications.
"""
ON_FETCH_DATA = "on_fetch_data"
ON_FIT_START = "on_fit_start"
ON_FIT_END = "on_fit_end"
ON_EPOCH_START = "on_epoch_start"
ON_EPOCH_END = "on_epoch_end"
ON_TRAINING_EPOCH_START = "on_training_epoch_start"
ON_TRAINING_EPOCH_END = "on_training_epoch_end"
ON_EVAL_EPOCH_START = "on_eval_epoch_start"
ON_EVAL_EPOCH_END = "on_eval_epoch_end"
ON_TRAINING_BATCH_START = "on_training_batch_start"
ON_TRAINING_BATCH_END = "on_training_batch_end"
ON_EVAL_BATCH_START = "on_eval_batch_start"
ON_EVAL_BATCH_END = "on_eval_batch_end"
ON_FORWARD = 'on_forward'
ON_BACKWARD = "on_backward"
ON_COMPUTE_METRICS = "on_compute_metrics"
def on_fetch_data(self, state: State):
"""
Load the next batch of data, possibly applying some kind of additional pre-processing not
included in the :mod:`~pydgn.data.transform` package.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The data loader is contained in ``state.loader_iterable`` and the minibatch ID (i.e., a counter) is stored
in``state.id_batch``
Post-condition:
The ``state`` object now has a field ``batch_input`` with the next batch of data
"""
pass
def on_fit_start(self, state: State):
"""
Initialize an object at the beginning of the training phase, e.g., the internals of an optimizer,
using the information contained in ``state``.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.initial_epoch``: the initial epoch from which to start/resume training
* ``state.stop_training``: do/don't train the model
* ``state.optimizer_state``: the internal state of the optimizer (can be ``None``)
* ``state.scheduler_state``: the internal state of the scheduler (can be ``None``)
* ``state.best_epoch_results``: a dictionary with the best results computed so far (can be used when resuming training, either for early stopping or to keep some information about the last checkpoint).
"""
pass
def on_fit_end(self, state: State):
"""
Training has ended, free all resources, e.g., close Tensorboard writers.
Args:
state (:class:`~training.event.state.State`): object holding training information
"""
pass
def on_epoch_start(self, state: State):
"""
Initialize/reset some internal state at the start of a training/evaluation epoch.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
* The following fields have been initialized:
* ``state.epoch``: the current epoch
* ``state.return_node_embeddings``: do/don't return node_embeddings for each graph at the end of the epoch
"""
pass
def on_epoch_end(self, state: State):
"""
Perform bookkeeping operations at the end of an epoch, e.g., early stopping, plotting, etc.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.epoch_loss``: a dictionary containing the aggregated loss value across all minibatches
* ``state.epoch_score``: a dictionary containing the aggregated score value across all minibatches
Post-condition:
The following fields have been initialized:
* ``state.stop_training``: do/don't train the model
* ``state.optimizer_state``: the internal state of the optimizer (can be ``None``)
* ``state.scheduler_state``: the internal state of the scheduler (can be ``None``)
* ``state.best_epoch_results``: a dictionary with the best results computed so far (can be used when resuming training, either for early stopping or to keep some information about the last checkpoint).
"""
pass
def on_training_epoch_start(self, state: State):
"""
Initialize/reset some internal state at the start of a training epoch.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.set``: it must be set to :const:`~pydgn.static.TRAINING`
"""
pass
def on_training_epoch_end(self, state: State):
"""
Initialize/reset some internal state at the end of a training epoch.
Args:
state (:class:`~training.event.state.State`): object holding training information
Post-condition:
The following fields have been initialized:
* ``state.epoch_loss``: a dictionary containing the aggregated loss value across all minibatches
* ``state.epoch_score``: a dictionary containing the aggregated score value across all minibatches
"""
pass
def on_eval_epoch_start(self, state: State):
"""
Initialize/reset some internal state at the start of an evaluation epoch.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.set``: the dataset type (can be :const:`~pydgn.static.TRAINING`, :const:`~pydgn.static.VALIDATION` or :const:`~pydgn.static.TEST`)
"""
pass
def on_eval_epoch_end(self, state: State):
"""
Initialize/reset some internal state at the end of an evaluation epoch.
Args:
state (:class:`~training.event.state.State`): object holding training information
Post-condition:
The following fields have been initialized:
* ``state.epoch_loss``: a dictionary containing the aggregated loss value across all minibatches
* ``state.epoch_score``: a dictionary containing the aggregated score value across all minibatches
"""
pass
def on_training_batch_start(self, state: State):
"""
Initialize/reset some internal state before training on a new minibatch of data.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.set``: it must be set to :const:`~pydgn.static.TRAINING`
* ``state.batch_input``: the input to be fed to the model
* ``state.batch_targets``: the ground truth values to be fed to the model (if any, ow a dummy value can be used)
* ``state.batch_num_graphs``: the total number of graphs in the minibatch
* ``state.batch_num_nodes``: the total number of nodes in the minibatch
* ``state.batch_num_targets``: the total number of ground truth values in the minibatch
"""
pass
def on_training_batch_end(self, state: State):
"""
Initialize/reset some internal state after training on a new minibatch of data.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.set``: it must be set to :const:`~pydgn.static.TRAINING`
* ``state.batch_num_graphs``: the total number of graphs in the minibatch
* ``state.batch_num_nodes``: the total number of nodes in the minibatch
* ``state.batch_num_targets``: the total number of ground truth values in the minibatch
* ``state.batch_loss``: a dictionary holding the loss of the minibatch
* ``state.batch_loss_extra``: a dictionary containing extra info, e.g., intermediate loss scores etc.
* ``state.batch_score``: a dictionary holding the score of the minibatch
"""
pass
def on_eval_batch_start(self, state: State):
"""
Initialize/reset some internal state before evaluating on a new minibatch of data.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.set``: the dataset type (can be :const:`~pydgn.static.TRAINING`, :const:`~pydgn.static.VALIDATION` or :const:`~pydgn.static.TEST`)
* ``state.batch_input``: the input to be fed to the model
* ``state.batch_targets``: the ground truth values to be fed to the model (if any, ow a dummy value can be used)
* ``state.batch_num_graphs``: the total number of graphs in the minibatch
* ``state.batch_num_nodes``: the total number of nodes in the minibatch
* ``state.batch_num_targets``: the total number of ground truth values in the minibatch
"""
pass
def on_eval_batch_end(self, state: State):
"""
Initialize/reset some internal state after evaluating on a new minibatch of data.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.set``: the dataset type (can be :const:`~pydgn.static.TRAINING`, :const:`~pydgn.static.VALIDATION` or :const:`~pydgn.static.TEST`)
* ``state.batch_num_graphs``: the total number of graphs in the minibatch
* ``state.batch_num_nodes``: the total number of nodes in the minibatch
* ``state.batch_num_targets``: the total number of ground truth values in the minibatch
* ``state.batch_loss``: a dictionary holding the loss of the minibatch
* ``state.batch_loss_extra``: a dictionary containing extra info, e.g., intermediate loss scores etc.
* ``state.batch_score``: a dictionary holding the score of the minibatch
"""
pass
def on_forward(self, state: State):
"""
Feed the input data to the model.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.batch_input``: the input to be fed to the model
* ``state.batch_targets``: the ground truth values to be fed to the model (if any, ow a dummy value can be used)
Post-condition:
The following fields have been initialized:
* ``state.batch_outputs``: the output produced the model (a tuple of values)
"""
pass
def on_backward(self, state: State):
"""
Updates the parameters of the model using loss information.
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.batch_loss``: a dictionary holding the loss of the minibatch
"""
pass
def on_compute_metrics(self, state: State):
"""
Computes the metrics of interest using the output and ground truth information obtained so far.
The loss-related subscriber MUST be called before the score-related one
Args:
state (:class:`~training.event.state.State`): object holding training information
Pre-condition:
The following fields have been initialized:
* ``state.batch_input``: the input to be fed to the model
* ``state.batch_targets``: the ground truth values to be fed to the model (if any, ow a dummy value can be used)
* ``state.batch_outputs``: the output produced the model (a tuple of values)
Post-condition:
The following fields have been initialized:
* ``state.batch_loss``: a dictionary holding the loss of the minibatch
* ``state.batch_loss_extra``: a dictionary containing extra info, e.g., intermediate loss scores etc.
* ``state.batch_score``: a dictionary holding the score of the minibatch
"""
pass
|
import ksl_env
import os
import pandas as pd
from Climate_Shocks.get_past_record import get_vcsn_record, event_def_path
import matplotlib.pyplot as plt
ksl_env.add_basgra_nz_path()
from basgra_python import run_basgra_nz
from supporting_functions.plotting import plot_multiple_results
from supporting_functions.woodward_2020_params import get_woodward_mean_full_params
from input_output_keys import matrix_weather_keys_pet
if __name__ == '__main__':
detrend = pd.read_csv(os.path.join(os.path.dirname(event_def_path), 'daily_percentiles_detrended_v2.csv'))
trend = pd.read_csv(os.path.join(os.path.dirname(event_def_path), 'daily_percentiles.csv'))
for d in [detrend, trend]:
d.loc[:, 'date'] = pd.to_datetime(d.loc[:, 'date'])
d.loc[:, 'month'] = d.loc[:, 'date'].dt.month
d.set_index('date', inplace=True)
data = {
'detrend': detrend,
'trend': trend,
}
out_vars = ['hot_per', 'cold_per', 'dry_per', 'wet_per', ]
plot_multiple_results(data, out_vars=out_vars)
vcsn2 = get_vcsn_record('detrended2')
vcsn2.loc[vcsn2.tmax >= 25, 'hotday'] = 1
vcsn2 = vcsn2.groupby('year').sum()
vcsn = get_vcsn_record()
vcsn.loc[vcsn.tmax >= 25, 'hotday'] = 1
vcsn = vcsn.groupby('year').sum()
data = {'trend': vcsn,
'detrend': vcsn2}
plot_multiple_results(data, out_vars=['hotday'])
data = {
'trend': get_vcsn_record(),
'detrend2': get_vcsn_record('detrended2')
}
diff = detrend - trend
diff.loc[:, 'month'] = detrend.loc[:, 'month']
dif2 = diff.groupby('month').mean()
diff.to_csv(r"C:\Users\Matt Hanson\Downloads\detrend-trend_dif_raw.csv")
dif2.to_csv(r"C:\Users\Matt Hanson\Downloads\detrend-trend_dif_monthly.csv")
out_vars = ['doy', 'pet', 'radn', 'tmax', 'tmin', 'rain']
plot_multiple_results(data, out_vars=out_vars, rolling=5,
main_kwargs={'alpha': 0.5})
|
"""CLI to get data from a MetaGenScope Server."""
from sys import stderr
import click
from requests.exceptions import HTTPError
from .utils import add_authorization
@click.group()
def get():
"""Get data from the server."""
pass
@get.command(name='orgs')
@add_authorization()
def get_orgs(uploader):
"""Get a list of organizations."""
try:
response = uploader.knex.get('/api/v1/organizations')
click.echo(response)
except HTTPError as exc:
print(f'{exc}', file=stderr)
@get.group()
def uuids():
"""Get UUIDs from the server."""
pass
def report_uuid(name, uuid):
"""Report a uuid to the user."""
click.echo(f'{name}\t{uuid}')
@uuids.command(name='samples')
@add_authorization()
@click.argument('sample_names', nargs=-1)
def sample_uuids(uploader, sample_names):
"""Get UUIDs for the given sample names."""
for sample_name in sample_names:
response = uploader.knex.get(f'/api/v1/samples/getid/{sample_name}')
report_uuid(response['data']['sample_name'],
response['data']['sample_uuid'])
@uuids.command(name='groups')
@add_authorization()
@click.argument('sample_group_names', nargs=-1)
def sample_group_uuids(uploader, sample_group_names):
"""Get UUIDs for the given sample groups."""
for sample_group_name in sample_group_names:
try:
response = uploader.knex.get(f'/api/v1/sample_groups/getid/{sample_group_name}')
report_uuid(response['data']['sample_group_name'],
response['data']['sample_group_uuid'])
except Exception: # pylint: disable=broad-except
print(f'Failed to get uuid for {sample_group_name}', file=stderr)
@uuids.command(name='orgs')
@add_authorization()
@click.argument('org_names', nargs=-1)
def org_uuids(uploader, org_names):
"""Get UUIDs for the given sample groups."""
for org_name in org_names:
try:
response = uploader.knex.get(f'/api/v1/organizations/getid/{org_name}')
report_uuid(response['data']['organization_name'],
response['data']['organization_uuid'])
except Exception: # pylint: disable=broad-except
print(f'Failed to get uuid for {org_name}', file=stderr)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .card_dialog import CardDialog
from .card_options import CardOptions
from .card_sample_helper import CardSampleHelper
from .channel_supported_cards import ChannelSupportedCards
__all__ = ["CardDialog", "CardOptions", "CardSampleHelper", "ChannelSupportedCards"]
|
# 1 DOF SYSTEM
import numpy as np
import matplotlib.pyplot as plt
import libraryTugas as lib
# 1. SYSTEMS PARAMETERS
#================
# a. Initial condition
x_init1, xDot_init1 = 0.1, 0 # [m], [m/s]
# b. System parameters
mass1, damp1, spring1 = 1, 1, 10 # [kg], [Ns/m], [N/m]
omega, forceMagnit = 2, 0.5 # [rad/s], [Newton]--> SYSTEM INPUT
# c. Time parameters
timeStart, timeStop, stepTime = 0, 30, 0.001 # [S]
# d. Define System MODEL!
def systemFunction (y, t):
xDotDot = np.zeros((1,1), dtype = float)
xDotDot[0] = (forceMagnit*np.sin(omega*t)-damp1*float(y[1])-spring1*float(y[0]))/mass1
return xDotDot #*np.sin(omega*t)
# 2. INITIALIZE SIMULATION
# a. Simulation time
time = np.arange(timeStart, timeStop, stepTime, dtype = float)
# b. Initial Condition of THE state
y = np.array([[x_init1], [xDot_init1]], dtype = float)
# 3. SOLVING EQUATION OF MOTION USING RUNGE KUTTA 4th ORDER!!
position, velocity, acceleration = lib.rungeKutta4(y, time,
systemFunction, stepTime)
# 4. PLOTING RESULTS!!
plt.figure(1)
plt.plot(time, position)
title = "position plot [step time = %1.6f s]" % stepTime
plt.title(title)
plt.ylabel('displacement [m]')
plt.xlabel('time [s]')
plt.grid(True)
plt.figure(2)
plt.plot(time, velocity)
title = "velocity plot [step time = %1.6f s]" % stepTime
plt.title(title)
plt.ylabel('velocity [m/s]')
plt.xlabel('time [s]')
plt.grid(True)
plt.figure(3)
plt.plot(time, acceleration)
title = "acceleration plot [step time = %1.6f s]" % stepTime
plt.title(title)
plt.ylabel('acceleration [m/s/s]')
plt.xlabel('time [s]')
plt.grid(True)
plt.show()
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : May 23, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtWidgets import QApplication
from ..info_model import TableInfo, VectorTableInfo, RasterTableInfo, DatabaseInfo
from ..html_elems import HtmlSection, HtmlParagraph, HtmlTable, HtmlTableHeader, HtmlTableCol
class PGDatabaseInfo(DatabaseInfo):
def connectionDetails(self):
tbl = [
(QApplication.translate("DBManagerPlugin", "Host:"), self.db.connector.host),
(QApplication.translate("DBManagerPlugin", "User:"), self.db.connector.user),
(QApplication.translate("DBManagerPlugin", "Database:"), self.db.connector.dbname)
]
return HtmlTable(tbl)
class PGTableInfo(TableInfo):
def __init__(self, table):
super(PGTableInfo, self).__init__(table)
self.table = table
def generalInfo(self):
ret = []
# if the estimation is less than 100 rows, try to count them - it shouldn't take long time
if self.table.rowCount is None and self.table.estimatedRowCount < 100:
# row count information is not displayed yet, so just block
# table signals to avoid double refreshing (infoViewer->refreshRowCount->tableChanged->infoViewer)
self.table.blockSignals(True)
self.table.refreshRowCount()
self.table.blockSignals(False)
tbl = [
(QApplication.translate("DBManagerPlugin", "Relation type:"),
QApplication.translate("DBManagerPlugin", "View") if self.table._relationType == 'v' else
QApplication.translate("DBManagerPlugin", "Materialized view") if self.table._relationType == 'm' else
QApplication.translate("DBManagerPlugin", "Table")),
(QApplication.translate("DBManagerPlugin", "Owner:"), self.table.owner)
]
if self.table.comment:
tbl.append((QApplication.translate("DBManagerPlugin", "Comment:"), self.table.comment))
tbl.extend([
(QApplication.translate("DBManagerPlugin", "Pages:"), self.table.pages),
(QApplication.translate("DBManagerPlugin", "Rows (estimation):"), self.table.estimatedRowCount)
])
# privileges
# has the user access to this schema?
schema_priv = self.table.database().connector.getSchemaPrivileges(
self.table.schemaName()) if self.table.schema() else None
if schema_priv is None:
pass
elif not schema_priv[1]: # no usage privileges on the schema
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"),
QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have usage privileges for this schema!")))
else:
table_priv = self.table.database().connector.getTablePrivileges((self.table.schemaName(), self.table.name))
privileges = []
if table_priv[0]:
privileges.append("select")
if self.table.rowCount is not None and self.table.rowCount >= 0:
tbl.append((QApplication.translate("DBManagerPlugin", "Rows (counted):"),
self.table.rowCount if self.table.rowCount is not None else QApplication.translate(
"DBManagerPlugin", 'Unknown (<a href="action:rows/count">find out</a>)')))
if table_priv[1]:
privileges.append("insert")
if table_priv[2]:
privileges.append("update")
if table_priv[3]:
privileges.append("delete")
priv_string = u", ".join(privileges) if len(privileges) > 0 else QApplication.translate("DBManagerPlugin",
'<warning> This user has no privileges!')
tbl.append((QApplication.translate("DBManagerPlugin", "Privileges:"), priv_string))
ret.append(HtmlTable(tbl))
if schema_priv is not None and schema_priv[1]:
if table_priv[0] and not table_priv[1] and not table_priv[2] and not table_priv[3]:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> This user has read-only privileges.")))
if not self.table.isView:
if self.table.rowCount is not None:
if abs(self.table.estimatedRowCount - self.table.rowCount) > 1 and \
(self.table.estimatedRowCount > 2 * self.table.rowCount
or self.table.rowCount > 2 * self.table.estimatedRowCount):
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> There's a significant difference between estimated and real row count. "
'Consider running <a href="action:vacuumanalyze/run">VACUUM ANALYZE</a>.')))
# primary key defined?
if not self.table.isView:
if len([fld for fld in self.table.fields() if fld.primaryKey]) <= 0:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> No primary key defined for this table!")))
return ret
def getSpatialInfo(self):
ret = []
info = self.db.connector.getSpatialInfo()
if info is None:
return
tbl = [
(QApplication.translate("DBManagerPlugin", "Library:"), info[0]),
(QApplication.translate("DBManagerPlugin", "Scripts:"), info[3]),
("GEOS:", info[1]),
("Proj:", info[2])
]
ret.append(HtmlTable(tbl))
if info[1] is not None and info[1] != info[2]:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> Version of installed scripts doesn't match version of released scripts!\n"
"This is probably a result of incorrect PostGIS upgrade.")))
if not self.db.connector.has_geometry_columns:
ret.append(HtmlParagraph(
QApplication.translate("DBManagerPlugin", "<warning> geometry_columns table doesn't exist!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
elif not self.db.connector.has_geometry_columns_access:
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
"<warning> This user doesn't have privileges to read contents of geometry_columns table!\n"
"This table is essential for many GIS applications for enumeration of tables.")))
return ret
def fieldsDetails(self):
tbl = []
# define the table header
header = (
"#", QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Type"),
QApplication.translate("DBManagerPlugin", "Length"), QApplication.translate("DBManagerPlugin", "Null"),
QApplication.translate("DBManagerPlugin", "Default"), QApplication.translate("DBManagerPlugin", "Comment"))
tbl.append(HtmlTableHeader(header))
# add table contents
for fld in self.table.fields():
char_max_len = fld.charMaxLen if fld.charMaxLen is not None and fld.charMaxLen != -1 else ""
is_null_txt = "N" if fld.notNull else "Y"
# make primary key field underlined
attrs = {"class": "underline"} if fld.primaryKey else None
name = HtmlTableCol(fld.name, attrs)
tbl.append((fld.num, name, fld.type2String(), char_max_len, is_null_txt, fld.default2String(), fld.getComment()))
return HtmlTable(tbl, {"class": "header"})
def triggersDetails(self):
if self.table.triggers() is None or len(self.table.triggers()) <= 0:
return None
ret = []
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Function"),
QApplication.translate("DBManagerPlugin", "Type"), QApplication.translate("DBManagerPlugin", "Enabled"))
tbl.append(HtmlTableHeader(header))
# add table contents
for trig in self.table.triggers():
name = u'%(name)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {"name": trig.name,
"action": "delete"}
(enabled, action) = (QApplication.translate("DBManagerPlugin", "Yes"), "disable") if trig.enabled else (
QApplication.translate("DBManagerPlugin", "No"), "enable")
txt_enabled = u'%(enabled)s (<a href="action:trigger/%(name)s/%(action)s">%(action)s</a>)' % {
"name": trig.name, "action": action, "enabled": enabled}
tbl.append((name, trig.function, trig.type2String(), txt_enabled))
ret.append(HtmlTable(tbl, {"class": "header"}))
ret.append(HtmlParagraph(QApplication.translate("DBManagerPlugin",
'<a href="action:triggers/enable">Enable all triggers</a> / <a href="action:triggers/disable">Disable all triggers</a>')))
return ret
def rulesDetails(self):
if self.table.rules() is None or len(self.table.rules()) <= 0:
return None
tbl = []
# define the table header
header = (
QApplication.translate("DBManagerPlugin", "Name"), QApplication.translate("DBManagerPlugin", "Definition"))
tbl.append(HtmlTableHeader(header))
# add table contents
for rule in self.table.rules():
name = u'%(name)s (<a href="action:rule/%(name)s/%(action)s">%(action)s</a>)' % {"name": rule.name,
"action": "delete"}
tbl.append((name, rule.definition))
return HtmlTable(tbl, {"class": "header"})
def getTableInfo(self):
ret = TableInfo.getTableInfo(self)
# rules
rules_details = self.rulesDetails()
if rules_details is None:
pass
else:
ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'Rules'), rules_details))
return ret
class PGVectorTableInfo(PGTableInfo, VectorTableInfo):
def __init__(self, table):
VectorTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return VectorTableInfo.spatialInfo(self)
class PGRasterTableInfo(PGTableInfo, RasterTableInfo):
def __init__(self, table):
RasterTableInfo.__init__(self, table)
PGTableInfo.__init__(self, table)
def spatialInfo(self):
return RasterTableInfo.spatialInfo(self)
|
from fastapi import HTTPException, status
InvalidCredentials = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Incorrect username or password',
headers={'WWW-Authenticate': 'Basic'},
)
UserAlreadyExists = HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail='User already exists',
)
UserNotFound = HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail='User was not found',
)
FilmNotFound = HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail='Film was not found',
)
FilmAlreadyExists = HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail='Film already exists',
)
InternalError = HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail='Some internal error occurred',
)
ForbiddenAction = HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail='You are not allowed to do this action',
)
InvalidReview = HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail='Invalid review data',
)
|
#!/usr/bin/env python3
import os
import pandas as pd
import argparse
def get_tag(x, tag):
try:
return x[tag]
except:
return False
def main():
parser = argparse.ArgumentParser(
prog="check_argparse.py",
formatter_class=argparse.RawTextHelpFormatter,
description='''
Check each file in py-scripts, or user defined '''
)
parser.add_argument("--path", default='.')
parser.add_argument("--output", default='argparse_results')
args = parser.parse_args()
files = [f for f in os.listdir(args.path) if '.py' in f]
results = dict()
for file in files:
text = open(os.path.join(args.path, file)).read()
results_file = dict()
results_file['argparse'] = 'argparse.' in text
if results_file['argparse'] is True:
results_file['create_basic'] = 'create_basic_argparse' in text
results_file['create_bare'] = 'create_bare_argparse' in text
results_file['prog'] = 'prog=' in text
results_file['formatter_class'] = 'formatter_class=' in text
results_file['description'] = 'description=' in text
results_file['epilog'] = 'epilog=' in text
results_file['usage'] = 'usage=' in text
results[file] = results_file
df = pd.DataFrame(results.items())
df.columns = ['File', 'results']
df['argparse'] = [x['argparse'] for x in df['results']]
for tag in ['create_basic',
'create_bare',
'prog',
'formatter_class',
'description',
'epilog',
'usage']:
df[tag] = [get_tag(x, tag) for x in df['results']]
df['details'] = df['description'] + df['epilog'] + df['usage']
df.to_csv(args.output + '.csv', index=False)
if __name__ == "__main__":
main()
|
from setuptools import setup
setup(
name='bt_futu_store',
version='1.0',
description='Futu API store for backtrader',
url='',
author='Damon Yuan',
author_email='damon.yuan.dev@gmail.com',
license='MIT',
packages=['btfutu'],
install_requires=['backtrader', 'futu-api'],
)
|
import torch
import torch.nn as nn
class VGGishish(nn.Module):
def __init__(self, conv_layers, use_bn, num_classes):
'''
Mostly from
https://pytorch.org/vision/0.8/_modules/torchvision/models/vgg.html
'''
super().__init__()
layers = []
in_channels = 1
# a list of channels with 'MP' (maxpool) from config
for v in conv_layers:
if v == 'MP':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, stride=1)
if use_bn:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((5, 10))
self.flatten = nn.Flatten()
self.classifier = nn.Sequential(
nn.Linear(512 * 5 * 10, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
# weight init
self.reset_parameters()
def forward(self, x):
# adding channel dim for conv2d (B, 1, F, T) <-
x = x.unsqueeze(1)
# backbone (B, 1, 5, 53) <- (B, 1, 80, 860)
x = self.features(x)
# adaptive avg pooling (B, 1, 5, 10) <- (B, 1, 5, 53) – if no MP is used as the end of VGG
x = self.avgpool(x)
# flatten
x = self.flatten(x)
# classify
x = self.classifier(x)
return x
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
num_classes = 309
inputs = torch.rand(3, 80, 848)
conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
# conv_layers = [64, 'MP', 128, 'MP', 256, 256, 'MP', 512, 512, 'MP']
model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes)
outputs = model(inputs)
print(outputs.shape)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: proto/messages.proto
# plugin: python-betterproto
from dataclasses import dataclass
import betterproto
class Target(betterproto.Enum):
UNKNOWN = 0
DOCKER_IMAGE = 1
USER_BACKGROUND_IMAGE = 2
DATA = 3
BASH_SCRIPT = 4
ANSIBLE_SCRIPT = 5
PLAIN_FILE = 6
class StartTransferRequestDirection(betterproto.Enum):
PHONE_TO_DEVICE = 0
DEVICE_TO_PHONE = 1
class StartTransferResponseStatus(betterproto.Enum):
UNKNOWN = 0
TRANSFER = 1
FINISHED = 2
FILE_NOT_FOUND = 3
HDD_FULL = 4
AUTH_FAILED = 5
ERROR = 6
@dataclass
class StartTransferRequest(betterproto.Message):
"""Message to trigger a data transfer. Limited to 185 Bytes for iOS."""
filename: str = betterproto.string_field(1)
hash: bytes = betterproto.bytes_field(2)
chunks: int = betterproto.int32_field(3)
target: "Target" = betterproto.enum_field(4)
direction: "StartTransferRequestDirection" = betterproto.enum_field(5)
@dataclass
class StartTransferResponse(betterproto.Message):
"""Response message to a trigger. Limited to 185 Bytes for iOS."""
filename: str = betterproto.string_field(1)
hash: bytes = betterproto.bytes_field(2)
chunks: int = betterproto.int32_field(3)
next_chunk: int = betterproto.int32_field(4)
target: "Target" = betterproto.enum_field(5)
status: "StartTransferResponseStatus" = betterproto.enum_field(6)
duration: float = betterproto.float_field(7)
size: int = betterproto.uint32_field(8)
|
#!/usr/bin/env python
"""Tests for `sentency` package."""
from sentency.regex import regexize_keywords
def test_regexize_keywords():
keywords = "abdominal aortic aneurysm\naneurysm abdominal aorta"
actual = regexize_keywords(keywords)
expected = "(?i)((abdominal.*aortic.*aneurysm)|(aneurysm.*abdominal.*aorta))"
assert actual == expected
def test_regexize_single():
keywords = "abdominal"
actual = regexize_keywords(keywords)
expected = "(?i)((abdominal))"
assert actual == expected
def test_regexize_case_sensitive():
keywords = "abdominal aortic aneurysm\naneurysm abdominal aorta"
actual = regexize_keywords(keywords, case_insensitive=False)
expected = "((abdominal.*aortic.*aneurysm)|(aneurysm.*abdominal.*aorta))"
assert actual == expected
def test_regexize_only_lines():
keywords = "abdominal\naneurysm"
actual = regexize_keywords(keywords, case_insensitive=False)
expected = "((abdominal)|(aneurysm))"
assert actual == expected
def test_regexize_single_group():
keywords = "abdominal aortic aneurysm"
actual = regexize_keywords(keywords, case_insensitive=False)
expected = "((abdominal.*aortic.*aneurysm))"
assert actual == expected
|
import csv
produceFile = open("produceSalesAltered.csv")
csvReader = csv.reader(produceFile)
produceData = list(csvReader) # Get a list of lists
produceTotals = {}
for csvLine in produceData[1:]:
# Extract data from current line
# [produceType, _, _, total] = csvLine
produceType = csvLine[0] # produce type is in position 0 of list
total = csvLine[3] # produce total is in position 3 of list
if produceType in produceTotals:
produceTotals[produceType] = produceTotals[produceType] + float(total)
else:
produceTotals[produceType] = float(total)
for produceType in sorted(produceTotals):
print('{0} - ${1:.2f}'.format(produceType, produceTotals[produceType]))
|
import os
import unittest
from recipe_scrapers.hundredandonecookbooks import HundredAndOneCookbooks
class TestHundredAndOneCookbooksScraper(unittest.TestCase):
def setUp(self):
# tests are run from tests.py
with open(os.path.join(
os.getcwd(),
'recipe_scrapers',
'tests',
'test_data',
'101cookbooks.testhtml'
)) as file_opened:
self.harvester_class = HundredAndOneCookbooks(file_opened, test=True)
def test_host(self):
self.assertEqual(
'101cookbooks.com',
self.harvester_class.host()
)
def test_title(self):
self.assertEqual(
self.harvester_class.title(),
"Nikki's Healthy Cookies Recipe"
)
def test_total_time(self):
self.assertEqual(
0,
self.harvester_class.total_time()
)
def test_ingredients(self):
self.assertCountEqual(
[
'3 large, ripe bananas, well mashed (about 1 1/2 cups)',
'1 teaspoon vanilla extract',
"1/4 cup coconut oil, barely warm - so it isn't solid (or alternately, olive oil)",
'2 cups rolled oats',
'2/3 cup almond meal',
'1/3 cup coconut, finely shredded & unsweetened',
'1/2 teaspoon cinnamon',
'1/2 teaspoon fine grain sea salt',
'1 teaspoon baking powder',
'6 - 7 ounces chocolate chips or dark chocolate bar chopped'
],
self.harvester_class.ingredients()
)
def test_instructions(self):
return self.assertEqual(
"Preheat oven to 350 degrees, racks in the top third.\nIn a large bowl combine the bananas, vanilla extract, and coconut oil. Set aside. In another bowl whisk together the oats, almond meal, shredded coconut, cinnamon, salt, and baking powder. Add the dry ingredients to the wet ingredients and stir until combined. Fold in the chocolate chunks/chips.The dough is a bit looser than a standard cookie dough, don't worry about it. Drop dollops of the dough, each about 2 teaspoons in size, an inch apart, onto a parchment (or Silpat) lined baking sheet. Bake for 12 - 14 minutes. I baked these as long as possible without burning the bottoms and they were perfect - just shy of 15 minutes seems to be about right in my oven.\nMakes about 3 dozen bite-sized cookies.\nPrint Recipe",
self.harvester_class.instructions()
)
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# one off?, daily, weekly, monthly
class TaskType(models.Model):
typename=models.CharField(max_length=255)
typedescription=models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.typename
class Meta:
db_table='tasktype'
class Task(models.Model):
taskname=models.CharField(max_length=255)
tasktype=models.ForeignKey(TaskType, on_delete=models.DO_NOTHING)
user=models.ForeignKey(User, on_delete=models.DO_NOTHING)
taskentrydate=models.DateField()
taskurl=models.URLField(null=True, blank=True)
taskdescription=models.TextField()
def __str__(self):
return self.taskname
class Meta:
db_table='task'
class Comment(models.Model):
commenttitle=models.CharField(max_length=255)
commentdate=models.DateField()
task=models.ForeignKey(Task, on_delete=models.DO_NOTHING)
user=models.ManyToManyField(User)
commenttext=models.TextField()
def __str__(self):
return self.commenttitle
class Meta:
db_table='comment'
|
from dask.distributed import Client, progress
from dask import delayed
import dask.bag as db
import pandas as pd
from time import sleep
import json
import os
# create 8 parallel workers
client = Client(n_workers=8)
def computeIDF(documents):
import math
N = len(documents)
idfDict = dict.fromkeys(documents[0].keys(), 0)
for document in documents:
for word, val in document.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log(N / float(val))
return idfDict
def computeTF(wordDict, bagOfWords):
tfDict = {}
bagOfWordsCount = len(bagOfWords)
for word, count in wordDict.items():
tfDict[word] = count / float(bagOfWordsCount)
return tfDict
def computeTFIDF(tfBagOfWords, idfs):
tfidf = {}
for word, val in tfBagOfWords.items():
tfidf[word] = val * idfs[word]
return tfidf
data_dir = "../data/handout/data/Documents/"
df_list = []
for filename in os.listdir(data_dir):
if filename.endswith(".txt"):
df_list.append(db.read_text(data_dir+filename).to_dataframe(columns={"sentence"}))
# print(os.path.join(directory, filename))
continue
else:
continue
All_word_List = []
for item in df_list:
All_words_tmp = []
for index, row in item.iterrows():
result = row["sentence"].split( )
for _ in result:
if _ == "\ufeff" or _ == "\n" or len(_)==1:
continue
else:
All_words_tmp.append(_.strip("!:?,.;'").lower())
All_word_List.append(All_words_tmp)
uniqueWords = set(All_word_List[0]).union(set(All_word_List[1])).union(set(All_word_List[2])).union(set(All_word_List[3])).union(set(All_word_List[4])).union(set(All_word_List[5])).union(set(All_word_List[6])).union(set(All_word_List[7]))
numOfWords1 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[0]:
numOfWords1[word] += 1
numOfWords2 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[1]:
numOfWords2[word] += 1
numOfWords3 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[2]:
numOfWords3[word] += 1
numOfWords4 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[3]:
numOfWords4[word] += 1
numOfWords5 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[4]:
numOfWords5[word] += 1
numOfWords6 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[5]:
numOfWords6[word] += 1
numOfWords7 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[6]:
numOfWords7[word] += 1
numOfWords8 = dict.fromkeys(uniqueWords, 0)
for word in All_word_List[7]:
numOfWords8[word] += 1
tf1 = computeTF(numOfWords1, All_word_List[0])
tf2 = computeTF(numOfWords2, All_word_List[1])
tf3 = computeTF(numOfWords3, All_word_List[2])
tf4 = computeTF(numOfWords4, All_word_List[3])
tf5 = computeTF(numOfWords5, All_word_List[4])
tf6 = computeTF(numOfWords6, All_word_List[5])
tf7 = computeTF(numOfWords7, All_word_List[6])
tf8 = computeTF(numOfWords8, All_word_List[7])
tfidf1 = computeTFIDF(tf1, idfs)
tfidf2 = computeTFIDF(tf2, idfs)
tfidf3 = computeTFIDF(tf3, idfs)
tfidf4 = computeTFIDF(tf4, idfs)
tfidf5 = computeTFIDF(tf5, idfs)
tfidf6 = computeTFIDF(tf6, idfs)
tfidf7 = computeTFIDF(tf7, idfs)
tfidf8 = computeTFIDF(tf8, idfs)
df = pd.DataFrame([tfidf1, tfidf2, tfidf3, tfidf4, tfidf5, tfidf6, tfidf7, tfidf8])
sorted_tfidf1 = sorted(tfidf1.items(), key=lambda x: x[1], reverse=True)
top_40_dict = []
i = 1
for item in sorted_tfidf1:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf2 = sorted(tfidf2.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf2:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf3 = sorted(tfidf3.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf3:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf4 = sorted(tfidf4.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf4:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf5 = sorted(tfidf5.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf5:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf6 = sorted(tfidf6.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf6:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf7 = sorted(tfidf7.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf7:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
sorted_tfidf8 = sorted(tfidf8.items(), key=lambda x: x[1], reverse=True)
i = 1
for item in sorted_tfidf8:
if i <= 5:
top_40_dict.append(item)
i = i+1
print (top_40_dict)
top_40_dict = dict(top_40_dict)
with open('sp4.json', 'w') as f:
json.dump(top_40_dict, f)
|
"""
Hacemos lo mismo que en el 9, pero en vez de sumar, multiplicamos.
"""
total = 1
input_values_correct = False
"""
Aquí vamos a controlar que introducimos el número de elementos
correcto, para luego pedir las veces especificadas para la multiplicación
total de lo que queremos conseguir
"""
while(input_values_correct == False):
elements = int(input("¿Cuántos números introduciremos? "))
if (elements < 1): print("Hay que introducir 1 ó más")
else : input_values_correct = True
counter = 0
"""
Vamos pidiendo lo necesario
"""
while (counter < elements):
total *= int(input(f"Introduce el número {counter + 1}: "))
counter += 1
print(f"La multiplicación total de {elements} números: {total}")
|
from django.contrib import admin
from mysite.bridie.models import User, Post, Comment
# Register your models here.
class UserAdmin(admin.ModelAdmin):
pass
class PostAdmin(admin.ModelAdmin):
pass
class CommentAdmin(admin.ModelAdmin):
pass
admin.site.register(User, UserAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(Comment, CommentAdmin)
|
'''
Created on Sep 6, 2015
@author: ace
'''
import argparse
import re
from django.core.management.base import BaseCommand, CommandError
from openpyxl import load_workbook
from account.models import Profile
from account.utils import format_phonenumber
from friend.models import PhoneContactRecord
class Command(BaseCommand):
help = 'Batch import Phone Contact from excel file'
def add_arguments(self, parser):
parser.add_argument('filepath',
type=argparse.FileType('rb'),
help='the filepath on the system to import')
def handle(self, *args, **options):
wb = load_workbook(options['filepath'], use_iterators = True)
ws = wb.worksheets[0]
row_counter = 1
newly_created_counter, updated_counter, err_rows = 0, 0, 0
'''
from_phone_num to_phone_num(comma separated)
1234567890 1234567891, 1234567892, 1234567893, 1234567894
1234567891 1234567895, 1234567896, 1234567897, 1234567898
1234567892 1234567895, 1234567896, 1234567897, 1234567898
'''
phone_num_profile_dict = dict( (profile.phone_num, profile) for profile in Profile.objects.all())
for row in ws.iter_rows(row_offset=1):
row_counter += 1
from_phone_num = str(row[0].value or '')
try:
from_phone_num = format_phonenumber(from_phone_num)
except Exception as e:
raise CommandError('Line: %d encounter error: %s' % (row_counter, e, ) )
from_profile = phone_num_profile_dict.get(from_phone_num)
if not from_profile:
self.stderr.write('Line: %d, no profile for from_phone_num: %s' % (row_counter, from_phone_num,) )
err_rows += 1
continue
# actually in this style means always newly_created
PhoneContactRecord.objects.filter(from_profile=from_profile).delete()
to_phone_num_strs = re.split('[,锛� ]+', str(row[1].value or '') )
to_profiles = []
for to_phone_num in to_phone_num_strs:
if not to_phone_num:
continue
try:
to_phone_num = format_phonenumber(to_phone_num)
except Exception as e:
raise CommandError('Line: %d encounter error to_phone_number: %s, %s' % (row_counter, to_phone_num, e, ) )
if to_phone_num in phone_num_profile_dict:
to_profiles.append(phone_num_profile_dict[to_phone_num])
for to_profile in to_profiles:
defaults = {
'to_phone_num': to_profile.phone_num
}
try:
contact_record, newly_created = PhoneContactRecord.objects.get_or_create(from_profile=from_profile,
to_profile=to_profile, defaults=defaults)
except Exception as e:
raise CommandError('Line: %d encounter error to_profile: %s' % (row_counter, e, ) )
else:
if newly_created:
# actually in this style means always newly_created
newly_created_counter += 1
else:
for key, value in defaults.items():
setattr(contact_record, key, value)
contact_record.save()
updated_counter += 1
self.stdout.write('newly created: %d, updated: %d, total: %d, rows: %d, err_rows: %d' % (newly_created_counter,
updated_counter,
newly_created_counter+updated_counter,
row_counter,
err_rows, ) )
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
dp_i_0, dp_i_1 = 0, float('-inf')
for i in range(len(prices)):
dp_i_0 = max(dp_i_0, dp_i_1 + prices[i])
dp_i_1 = max(dp_i_1, -prices[i])
return dp_i_0
|
import future
import builtins
import past
import six
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd
import components as comp
from torch.distributions import multinomial, categorical
import math
import numpy as np
try:
from . import helpers as h
from . import ai
from . import scheduling as S
except:
import helpers as h
import ai
import scheduling as S
class WrapDom(object):
def __init__(self, a):
self.a = eval(a) if type(a) is str else a
def box(self, *args, **kargs):
return self.Domain(self.a.box(*args, **kargs))
def boxBetween(self, *args, **kargs):
return self.Domain(self.a.boxBetween(*args, **kargs))
def line(self, *args, **kargs):
return self.Domain(self.a.line(*args, **kargs))
class DList(object):
Domain = ai.ListDomain
class MLoss():
def __init__(self, aw):
self.aw = aw
def loss(self, dom, *args, lr = 1, **kargs):
if self.aw <= 0.0:
return 0
return self.aw * dom.loss(*args, lr = lr * self.aw, **kargs)
def __init__(self, *al):
if len(al) == 0:
al = [("Point()", 1.0), ("Box()", 0.1)]
self.al = [(eval(a) if type(a) is str else a, S.Const.initConst(aw)) for a,aw in al]
def getDiv(self, **kargs):
return 1.0 / sum(aw.getVal(**kargs) for _,aw in self.al)
def box(self, *args, **kargs):
m = self.getDiv(**kargs)
return self.Domain(ai.TaggedDomain(a.box(*args, **kargs), DList.MLoss(aw.getVal(**kargs) * m)) for a,aw in self.al)
def boxBetween(self, *args, **kargs):
m = self.getDiv(**kargs)
return self.Domain(ai.TaggedDomain(a.boxBetween(*args, **kargs), DList.MLoss(aw.getVal(**kargs) * m)) for a,aw in self.al)
def line(self, *args, **kargs):
m = self.getDiv(**kargs)
return self.Domain(ai.TaggedDomain(a.line(*args, **kargs), DList.MLoss(aw.getVal(**kargs) * m)) for a,aw in self.al)
def __str__(self):
return "DList(%s)" % h.sumStr("("+str(a)+","+str(w)+")" for a,w in self.al)
class Mix(DList):
def __init__(self, a="Point()", b="Box()", aw = 1.0, bw = 0.1):
super(Mix, self).__init__((a,aw), (b,bw))
class LinMix(DList):
def __init__(self, a="Point()", b="Box()", bw = 0.1):
super(LinMix, self).__init__((a,S.Complement(bw)), (b,bw))
class DProb(object):
def __init__(self, *doms):
if len(doms) == 0:
doms = [("Point()", 0.8), ("Box()", 0.2)]
div = 1.0 / sum(float(aw) for _,aw in doms)
self.domains = [eval(a) if type(a) is str else a for a,_ in doms]
self.probs = [ div * float(aw) for _,aw in doms]
def chooseDom(self):
return self.domains[np.random.choice(len(self.domains), p = self.probs)] if len(self.domains) > 1 else self.domains[0]
def box(self, *args, **kargs):
domain = self.chooseDom()
return domain.box(*args, **kargs)
def line(self, *args, **kargs):
domain = self.chooseDom()
return domain.line(*args, **kargs)
def __str__(self):
return "DProb(%s)" % h.sumStr("("+str(a)+","+str(w)+")" for a,w in zip(self.domains, self.probs))
class Coin(DProb):
def __init__(self, a="Point()", b="Box()", ap = 0.8, bp = 0.2):
super(Coin, self).__init__((a,ap), (b,bp))
class Point(object):
Domain = h.dten
def __init__(self, **kargs):
pass
def box(self, original, *args, **kargs):
return original
def line(self, original, other, *args, **kargs):
return (original + other) / 2
def boxBetween(self, o1, o2, *args, **kargs):
return (o1 + o2) / 2
def __str__(self):
return "Point()"
class PointA(Point):
def boxBetween(self, o1, o2, *args, **kargs):
return o1
def __str__(self):
return "PointA()"
class PointB(Point):
def boxBetween(self, o1, o2, *args, **kargs):
return o2
def __str__(self):
return "PointB()"
class NormalPoint(Point):
def __init__(self, w = None, **kargs):
self.epsilon = w
def box(self, original, w, *args, **kargs):
""" original = mu = mean, epsilon = variance"""
if not self.epsilon is None:
w = self.epsilon
inter = torch.randn_like(original, device = h.device) * w
return original + inter
def __str__(self):
return "NormalPoint(%s)" % ("" if self.epsilon is None else str(self.epsilon))
class MI_FGSM(Point):
def __init__(self, w = None, r = 20.0, k = 100, mu = 0.8, should_end = True, restart = None, searchable=False,**kargs):
self.epsilon = S.Const.initConst(w)
self.k = k
self.mu = mu
self.r = float(r)
self.should_end = should_end
self.restart = restart
self.searchable = searchable
def box(self, original, model, target = None, untargeted = False, **kargs):
if target is None:
untargeted = True
with torch.no_grad():
target = model(original).max(1)[1]
return self.attack(model, original, untargeted, target, **kargs)
def boxBetween(self, o1, o2, model, target = None, *args, **kargs):
return self.attack(model, (o1 - o2).abs() / 2, (o1 + o2) / 2, target, **kargs)
def attack(self, model, xo, untargeted, target, w, loss_function=ai.stdLoss, **kargs):
w = self.epsilon.getVal(c = w, **kargs)
x = nn.Parameter(xo.clone(), requires_grad=True)
gradorg = h.zeros(x.shape)
is_eq = 1
w = h.ones(x.shape) * w
for i in range(self.k):
if self.restart is not None and i % int(self.k / self.restart) == 0:
x = is_eq * (torch.rand_like(xo) * w + xo) + (1 - is_eq) * x
x = nn.Parameter(x, requires_grad = True)
model.optimizer.zero_grad()
out = model(x).vanillaTensorPart()
loss = loss_function(out, target)
loss.sum().backward(retain_graph=True)
with torch.no_grad():
oth = x.grad / torch.norm(x.grad, p=1)
gradorg *= self.mu
gradorg += oth
grad = (self.r * w / self.k) * ai.mysign(gradorg)
if self.should_end:
is_eq = ai.mulIfEq(grad, out, target)
x = (x + grad * is_eq) if untargeted else (x - grad * is_eq)
x = xo + torch.min(torch.max(x - xo, -w),w)
x.requires_grad_()
model.optimizer.zero_grad()
return x
def boxBetween(self, o1, o2, model, target, *args, **kargs):
raise "Not boxBetween is not yet supported by MI_FGSM"
def __str__(self):
return "MI_FGSM(%s)" % (("" if self.epsilon is None else "w="+str(self.epsilon)+",")
+ ("" if self.k == 5 else "k="+str(self.k)+",")
+ ("" if self.r == 5.0 else "r="+str(self.r)+",")
+ ("" if self.mu == 0.8 else "r="+str(self.mu)+",")
+ ("" if self.should_end else "should_end=False"))
class PGD(MI_FGSM):
def __init__(self, r = 5.0, k = 5, **kargs):
super(PGD,self).__init__(r=r, k = k, mu = 0, **kargs)
def __str__(self):
return "PGD(%s)" % (("" if self.epsilon is None else "w="+str(self.epsilon)+",")
+ ("" if self.k == 5 else "k="+str(self.k)+",")
+ ("" if self.r == 5.0 else "r="+str(self.r)+",")
+ ("" if self.should_end else "should_end=False"))
class IFGSM(PGD):
def __init__(self, k = 5, **kargs):
super(IFGSM, self).__init__(r = 1, k=k, **kargs)
def __str__(self):
return "IFGSM(%s)" % (("" if self.epsilon is None else "w="+str(self.epsilon)+",")
+ ("" if self.k == 5 else "k="+str(self.k)+",")
+ ("" if self.should_end else "should_end=False"))
class NormalAdv(Point):
def __init__(self, a="IFGSM()", w = None):
self.a = (eval(a) if type(a) is str else a)
self.epsilon = S.Const.initConst(w)
def box(self, original, w, *args, **kargs):
epsilon = self.epsilon.getVal(c = w, shape = original.shape[:1], **kargs)
assert (0 <= h.dten(epsilon)).all()
epsilon = torch.randn(original.size()[0:1], device = h.device)[0] * epsilon
return self.a.box(original, w = epsilon, *args, **kargs)
def __str__(self):
return "NormalAdv(%s)" % ( str(self.a) + ("" if self.epsilon is None else ",w="+str(self.epsilon)))
class InclusionSample(Point):
def __init__(self, sub, a="Box()", normal = False, w = None, **kargs):
self.sub = S.Const.initConst(sub) # sub is the fraction of w to use.
self.w = S.Const.initConst(w)
self.normal = normal
self.a = (eval(a) if type(a) is str else a)
def box(self, original, w, *args, **kargs):
w = self.w.getVal(c = w, shape = original.shape[:1], **kargs)
sub = self.sub.getVal(c = 1, shape = original.shape[:1], **kargs)
assert (0 <= h.dten(w)).all()
assert (h.dten(sub) <= 1).all()
assert (0 <= h.dten(sub)).all()
if self.normal:
inter = torch.randn_like(original, device = h.device)
else:
inter = (torch.rand_like(original, device = h.device) * 2 - 1)
inter = inter * w * (1 - sub)
return self.a.box(original + inter, w = w * sub, *args, **kargs)
def boxBetween(self, o1, o2, *args, **kargs):
w = (o2 - o1).abs()
return self.box( (o2 + o1)/2 , w = w, *args, **kargs)
def __str__(self):
return "InclusionSample(%s, %s)" % (str(self.sub), str(self.a) + ("" if self.epsilon is None else ",w="+str(self.epsilon)))
InSamp = InclusionSample
class AdvInclusion(InclusionSample):
def __init__(self, sub, a="IFGSM()", b="Box()", w = None, **kargs):
self.sub = S.Const.initConst(sub) # sub is the fraction of w to use.
self.w = S.Const.initConst(w)
self.a = (eval(a) if type(a) is str else a)
self.b = (eval(b) if type(b) is str else b)
def box(self, original, w, *args, **kargs):
w = self.w.getVal(c = w, shape = original.shape, **kargs)
sub = self.sub.getVal(c = 1, shape = original.shape, **kargs)
assert (0 <= h.dten(w)).all()
assert (h.dten(sub) <= 1).all()
assert (0 <= h.dten(sub)).all()
if h.dten(w).sum().item() <= 0.0:
inter = original
else:
inter = self.a.box(original, w = w * (1 - sub), *args, **kargs)
return self.b.box(inter, w = w * sub, *args, **kargs)
def __str__(self):
return "AdvInclusion(%s, %s, %s)" % (str(self.sub), str(self.a), str(self.b) + ("" if self.epsilon is None else ",w="+str(self.epsilon)))
class AdvDom(Point):
def __init__(self, a="IFGSM()", b="Box()"):
self.a = (eval(a) if type(a) is str else a)
self.b = (eval(b) if type(b) is str else b)
def box(self, original,*args, **kargs):
adv = self.a.box(original, *args, **kargs)
return self.b.boxBetween(original, adv.ub(), *args, **kargs)
def boxBetween(self, o1, o2, *args, **kargs):
original = (o1 + o2) / 2
adv = self.a.boxBetween(o1, o2, *args, **kargs)
return self.b.boxBetween(original, adv.ub(), *args, **kargs)
def __str__(self):
return "AdvDom(%s)" % (("" if self.width is None else "width="+str(self.width)+",")
+ str(self.a) + "," + str(self.b))
class BiAdv(AdvDom):
def box(self, original, **kargs):
adv = self.a.box(original, **kargs)
extreme = (adv.ub() - original).abs()
return self.b.boxBetween(original - extreme, original + extreme, **kargs)
def boxBetween(self, o1, o2, *args, **kargs):
original = (o1 + o2) / 2
adv = self.a.boxBetween(o1, o2, *args, **kargs)
extreme = (adv.ub() - original).abs()
return self.b.boxBetween(original - extreme, original + extreme, *args, **kargs)
def __str__(self):
return "BiAdv" + AdvDom.__str__(self)[6:]
class HBox(object):
Domain = ai.HybridZonotope
def domain(self, *args, **kargs):
return ai.TaggedDomain(self.Domain(*args, **kargs), self)
def __init__(self, w = None, tot_weight = 1, width_weight = 0, pow_loss = None, log_loss = False, searchable = True, cross_loss = True, **kargs):
self.w = S.Const.initConst(w)
self.tot_weight = S.Const.initConst(tot_weight)
self.width_weight = S.Const.initConst(width_weight)
self.pow_loss = pow_loss
self.searchable = searchable
self.log_loss = log_loss
self.cross_loss = cross_loss
def __str__(self):
return "HBox(%s)" % ("" if self.w is None else "w="+str(self.w))
def boxBetween(self, o1, o2, *args, **kargs):
batches = o1.size()[0]
num_elem = h.product(o1.size()[1:])
ei = h.getEi(batches, num_elem)
if len(o1.size()) > 2:
ei = ei.contiguous().view(num_elem, *o1.size())
return self.domain((o1 + o2) / 2, None, ei * (o2 - o1).abs() / 2).checkSizes()
def box(self, original, w, **kargs):
"""
This version of it is slow, but keeps correlation down the line.
"""
radius = self.w.getVal(c = w, **kargs)
batches = original.size()[0]
num_elem = h.product(original.size()[1:])
ei = h.getEi(batches,num_elem)
if len(original.size()) > 2:
ei = ei.contiguous().view(num_elem, *original.size())
return self.domain(original, None, ei * radius).checkSizes()
def line(self, o1, o2, **kargs):
w = self.w.getVal(c = 0, **kargs)
ln = ((o2 - o1) / 2).unsqueeze(0)
if not w is None and w > 0.0:
batches = o1.size()[0]
num_elem = h.product(o1.size()[1:])
ei = h.getEi(batches,num_elem)
if len(o1.size()) > 2:
ei = ei.contiguous().view(num_elem, *o1.size())
ln = torch.cat([ln, ei * w])
return self.domain((o1 + o2) / 2, None, ln ).checkSizes()
def loss(self, dom, target, *args, **kargs):
width_weight = self.width_weight.getVal(**kargs)
tot_weight = self.tot_weight.getVal(**kargs)
if self.cross_loss:
r = dom.ub()
inds = torch.arange(r.shape[0], device=h.device, dtype=h.ltype)
r[inds,target] = dom.lb()[inds,target]
tot = r.loss(target, *args, **kargs)
else:
tot = dom.loss(target, *args, **kargs)
if self.log_loss:
tot = (tot + 1).log()
if self.pow_loss is not None and self.pow_loss > 0 and self.pow_loss != 1:
tot = tot.pow(self.pow_loss)
ls = tot * tot_weight
if width_weight > 0:
ls += dom.diameter() * width_weight
return ls / (width_weight + tot_weight)
class Box(HBox):
def __str__(self):
return "Box(%s)" % ("" if self.w is None else "w="+str(self.w))
def box(self, original, w, **kargs):
"""
This version of it takes advantage of betas being uncorrelated.
Unfortunately they stay uncorrelated forever.
Counterintuitively, tests show more accuracy - this is because the other box
creates lots of 0 errors which get accounted for by the calcultion of the newhead in relu
which is apparently worse than not accounting for errors.
"""
radius = self.w.getVal(c = w, **kargs)
return self.domain(original, h.ones(original.size()) * radius, None).checkSizes()
def line(self, o1, o2, **kargs):
w = self.w.getVal(c = 0, **kargs)
return self.domain((o1 + o2) / 2, ((o2 - o1) / 2).abs() + h.ones(o2.size()) * w, None).checkSizes()
def boxBetween(self, o1, o2, *args, **kargs):
return self.line(o1, o2, **kargs)
class ZBox(HBox):
def __str__(self):
return "ZBox(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.Zonotope(*args, **kargs)
class HSwitch(HBox):
def __str__(self):
return "HSwitch(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.HybridZonotope(*args, customRelu = ai.creluSwitch, **kargs)
class ZSwitch(ZBox):
def __str__(self):
return "ZSwitch(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.Zonotope(*args, customRelu = ai.creluSwitch, **kargs)
class ZNIPS(ZBox):
def __str__(self):
return "ZSwitch(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.Zonotope(*args, customRelu = ai.creluNIPS, **kargs)
class HSmooth(HBox):
def __str__(self):
return "HSmooth(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.HybridZonotope(*args, customRelu = ai.creluSmooth, **kargs)
class HNIPS(HBox):
def __str__(self):
return "HSmooth(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.HybridZonotope(*args, customRelu = ai.creluNIPS, **kargs)
class ZSmooth(ZBox):
def __str__(self):
return "ZSmooth(%s)" % ("" if self.w is None else "w="+str(self.w))
def Domain(self, *args, **kargs):
return ai.Zonotope(*args, customRelu = ai.creluSmooth, **kargs)
# stochastic correlation
class HRand(WrapDom):
# domain must be an ai style domain like hybrid zonotope.
def __init__(self, num_correlated, a = "HSwitch()", **kargs):
super(HRand, self).__init__(Box())
self.num_correlated = num_correlated
self.dom = eval(a) if type(a) is str else a
def Domain(self, d):
with torch.no_grad():
out = d.abstractApplyLeaf('stochasticCorrelate', self.num_correlated)
out = self.dom.Domain(out.head, out.beta, out.errors)
return out
def __str__(self):
return "HRand(%s, domain = %s)" % (str(self.num_correlated), str(self.a))
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base_gate import BaseGate
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
class NaiveGate(BaseGate):
def __init__(self, d_model, num_expert, world_size, topk=2):
super().__init__(num_expert, world_size)
self.gate = nn.Linear(d_model, self.tot_expert)
self.gate.weight.name = "gate_" + self.gate.weight.name
self.gate.bias.name = "gate_" + self.gate.bias.name
self.top_k = topk
def forward(self, inp, return_all_scores=False):
gate = self.gate(inp)
gate_top_k_val, gate_top_k_idx = paddle.topk(
gate, k=self.top_k, axis=-1, largest=True, sorted=False)
if return_all_scores:
return gate_top_k_val, gate_top_k_idx, gate
return gate_top_k_val, gate_top_k_idx
|
def f(x,y):
x = [1]
z = x+y
a = [1,2]
b = a
print(f(a,b))
print(a)
print(b)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.rpn.generate_anchors import generate_anchors
import numpy as np
def generate_anchors_global(feat_stride, height, width, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
anchors = generate_anchors(base_size=feat_stride, ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
# Enumerate all shifts
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (A*K, 4) shifted anchors
A = anchors.shape[0]
K = shifts.shape[0]
anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
return anchors
|
import pandas as pd
import sklearn
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import cross_validate
def baseline_fun(
X_train, y_train, type="regression", metrics_1="accuracy", metrics_2="r2"
):
"""
Gives the scoring metrics of sklearn DummyRegressor and LinearRegression or sklearn DummyClassifier and LogisticRegression.
Parameters
----------
X_train : Pandas DataFrame
The train set dataframe.
y_train : Pandas DataFrame
The target of train set dataframe.
type: string
What kind of supervised machine learning to use, regression or classification:
If “regression”, then DummyRegressor and LinearRegression would be used.
If “classification", then DummyClassifier and LogisticRegression would be used.
metrics_1: string
What kind of score metrics to use for classification problem, the default one is accuracy.
Use sorted(sklearn.metrics.SCORERS.keys()) to get valid options.
metrics_2: string
What kind of score metrics to use for regression problem, the default one is r2
Use sorted(sklearn.metrics.SCORERS.keys()) to get valid options.
Return
-------
score : Pandas DataFrame
The DataFrame contains the mean of fit time, score time, training score and validation score by 5-fold cross validation for both two models.
Examples
--------
>>> from easysklearn.baseline_fun import baseline_fun
>>> import pandas as pd
>>> import sklearn.datasets as datasets
>>> iris = datasets.load_iris(return_X_y=True)
>>> X = pd.DataFrame(iris[0])
>>> y = pd.DataFrame(iris[1])
>>> baseline_fun(X, y, type = 'regression', metrics = 'neg_root_mean_squared_error')
DummyRegressor LinearRegression
fit_time 0.002573 0.003994
score_time 0.002200 0.002614
test_score -0.882971 -0.244363
train_score -0.790971 -0.209256
"""
# input test
if not isinstance(X_train, pd.DataFrame):
raise TypeError("Input X should be a data frame")
if not isinstance(y_train, pd.DataFrame):
raise TypeError("Input y should be a data frame")
if type not in ["regression", "classification"]:
raise TypeError(
"Please check what kind of supervised machine learning to use, regression or classification"
)
if metrics_1 not in sklearn.metrics.SCORERS.keys():
raise KeyError(
"Please check sklearn.metrics.SCORERS.keys() to get valid options"
)
if metrics_2 not in sklearn.metrics.SCORERS.keys():
raise KeyError(
"Please check sklearn.metrics.SCORERS.keys() to get valid options"
)
# fit data into the model
score = {}
if type == "regression":
dr_score = cross_validate(
DummyRegressor(),
X_train,
y_train,
return_train_score=True,
scoring=metrics_2,
)
lr_score = cross_validate(
LinearRegression(),
X_train,
y_train,
return_train_score=True,
scoring=metrics_2,
)
score["DummyRegressor"] = pd.DataFrame(dr_score).mean()
score["LinearRegression"] = pd.DataFrame(lr_score).mean()
if type == "classification":
dc_score = cross_validate(
DummyClassifier(),
X_train,
y_train,
return_train_score=True,
scoring=metrics_1,
)
lr_score = cross_validate(
LogisticRegression(),
X_train,
y_train,
return_train_score=True,
scoring=metrics_1,
)
score["DummyClassifier"] = pd.DataFrame(dc_score).mean()
score["LogisticRegression"] = pd.DataFrame(lr_score).mean()
return pd.DataFrame(score)
|
#!/usr/bin/env python3
import json
import sys
CHUNK_SIZE = 50
FILENAME_PREFIX = 'circles'
class Worker(object):
def __init__(self, src: list, destdir: str):
self.src = src
self.destdir = destdir
def save_file(self, counter: int):
filename = '%s-%d.json' % (FILENAME_PREFIX, counter)
with open('%s/%s' % (self.destdir, filename), 'w') as fp:
start_idx = counter * CHUNK_SIZE
end_idx = (counter + 1) * CHUNK_SIZE
json.dump(self.src[start_idx:end_idx], fp)
def main():
destdir = '.'
if len(sys.argv) >= 2:
destdir = sys.argv[1]
circles = json.load(sys.stdin)
worker = Worker(circles, destdir)
counter = 0
while counter * CHUNK_SIZE < len(circles):
worker.save_file(counter)
counter += 1
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li
:license: MIT, see LICENSE for more details.
"""
import os
import sys
import click
from flask import Flask, request, g, session, redirect, url_for, render_template, jsonify, flash
from flask_github import GitHub
from flask_sqlalchemy import SQLAlchemy
# sqlite URI compatible
WIN = sys.platform.startswith('win')
if WIN:
prefix = 'sqlite:///'
else:
prefix = 'sqlite:////'
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config['SECRET_KEY'] = os.getenv('SECRET_KEY', 'secret string')
# Flask-SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL', prefix + os.path.join(app.root_path, 'data.db'))
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# GitHub-Flask
# Register your OAuth application on https://github.com/settings/applications/new
# You normally need to save this values as enviroment variable
app.config['GITHUB_CLIENT_ID'] = 'your_client_id'
app.config['GITHUB_CLIENT_SECRET'] = 'your_clent_secret'
db = SQLAlchemy(app)
github = GitHub(app)
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database."""
if drop:
db.drop_all()
db.create_all()
click.echo('Initialized database.')
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100))
access_token = db.Column(db.String(200))
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
g.user = User.query.get(session['user_id'])
@app.route('/')
def index():
if g.user:
is_login = True
response = github.get('user')
avatar = response['avatar_url']
username = response['name']
url = response['html_url']
return render_template('index.html', is_login=is_login, avatar=avatar, username=username, url=url)
is_login = False
return render_template('index.html', is_login=is_login)
@app.route('/star/helloflask')
def star():
github.put('user/starred/greyli/helloflask', headers={'Content-Length': '0'})
flash('Star success.')
return redirect(url_for('index'))
@github.access_token_getter
def token_getter():
user = g.user
if user is not None:
return user.access_token
@app.route('/callback/github')
@github.authorized_handler
def authorized(access_token):
if access_token is None:
flash('Login failed.')
return redirect(url_for('index'))
response = github.get('user', access_token=access_token)
username = response['login'] # get username
user = User.query.filter_by(username=username).first()
if user is None:
user = User(username=username, access_token=access_token)
db.session.add(user)
user.access_token = access_token # update access token
db.session.commit()
flash('Login success.')
# log the user in
# if you use flask-login, just call login_user() here.
session['user_id'] = user.id
return redirect(url_for('index'))
@app.route('/login')
def login():
if session.get('user_id', None) is None:
return github.authorize(scope='repo')
flash('Already logged in.')
return redirect(url_for('index'))
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('Goodbye.')
return redirect(url_for('index'))
@app.route('/user')
def get_user():
return jsonify(github.get('user'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.